diff options
author | Sergei Golubchik <serg@mariadb.org> | 2014-10-15 12:59:13 +0200 |
---|---|---|
committer | Sergei Golubchik <serg@mariadb.org> | 2014-10-15 12:59:13 +0200 |
commit | f62c12b405ba7ec80b8e2490856b83c6f5899211 (patch) | |
tree | 010605c7f145da6ea6ac14b39abc4cf700d619b1 | |
parent | f947f73b2b6d2bd246b81a9038224d2a85777520 (diff) | |
parent | f1afc003eefe0aafd3e070c7453d9e029d8445a8 (diff) | |
download | mariadb-git-f62c12b405ba7ec80b8e2490856b83c6f5899211.tar.gz |
Merge 10.0.14 into 10.1
2117 files changed, 88154 insertions, 80359 deletions
diff --git a/CMakeLists.txt b/CMakeLists.txt index 85aadb81d5f..b018cc9a818 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -26,6 +26,14 @@ IF(POLICY CMP0022) CMAKE_POLICY(SET CMP0022 OLD) ENDIF() +# We use the LOCATION target property (CMP0026) +# and get_target_property() for non-existent targets (CMP0045) +IF(CMAKE_VERSION VERSION_EQUAL "3.0.0" OR + CMAKE_VERSION VERSION_GREATER "3.0.0") + CMAKE_POLICY(SET CMP0026 OLD) + CMAKE_POLICY(SET CMP0045 OLD) +ENDIF() + MESSAGE(STATUS "Running cmake version ${CMAKE_VERSION}") SET(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} ${CMAKE_SOURCE_DIR}/cmake) @@ -277,16 +285,11 @@ ENDIF() # safemalloc can be enabled and disabled independently SET(WITH_SAFEMALLOC "AUTO" CACHE STRING "Use safemalloc memory debugger. Will result in slower execution. Options are: ON OFF AUTO.") -# force -DUSE_MYSYS_NEW unless already done by HAVE_CXX_NEW -IF(HAVE_CXX_NEW) - SET(DUSE_MYSYS_NEW "-DUSE_MYSYS_NEW") -ENDIF() - IF(WITH_SAFEMALLOC MATCHES "ON") - ADD_DEFINITIONS( -DSAFEMALLOC ${DUSE_MYSYS_NEW}) + ADD_DEFINITIONS( -DSAFEMALLOC) ELSEIF(WITH_SAFEMALLOC MATCHES "AUTO" AND NOT WIN32 AND NOT WITH_VALGRIND) SET(CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} -DSAFEMALLOC") - SET(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -DSAFEMALLOC ${DUSE_MYSYS_NEW}") + SET(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -DSAFEMALLOC") ENDIF() # Set commonly used variables @@ -306,7 +309,7 @@ ELSE() ENDIF() SET(DEFAULT_CHARSET_HOME "${DEFAULT_MYSQL_HOME}") SET(PLUGINDIR "${DEFAULT_MYSQL_HOME}/${INSTALL_PLUGINDIR}") -IF(INSTALL_SYSCONFDIR) +IF(INSTALL_SYSCONFDIR AND NOT DEFAULT_SYSCONFDIR) SET(DEFAULT_SYSCONFDIR "${INSTALL_SYSCONFDIR}") ENDIF() @@ -324,6 +327,11 @@ ENDIF() # Run platform tests INCLUDE(configure.cmake) +# force -DUSE_MYSYS_NEW unless already done by HAVE_CXX_NEW +IF(NOT HAVE_CXX_NEW) + ADD_DEFINITIONS(-DUSE_MYSYS_NEW) +ENDIF() + # Find header files from the bundled libraries # (jemalloc, yassl, readline, pcre, etc) # before the ones installed in the system @@ -360,6 +368,11 @@ ELSEIF(MYSQL_MAINTAINER_MODE MATCHES "AUTO") SET(CMAKE_CXX_FLAGS_DEBUG "${MY_MAINTAINER_CXX_WARNINGS} ${CMAKE_CXX_FLAGS_DEBUG}") ENDIF() +IF(CMAKE_CROSSCOMPILING) + SET(IMPORT_EXECUTABLES "IMPORTFILE-NOTFOUND" CACHE FILEPATH "Path to import_executables.cmake from a native build") + INCLUDE(${IMPORT_EXECUTABLES}) +ENDIF() + IF(WITH_UNIT_TESTS) ENABLE_TESTING() ADD_SUBDIRECTORY(unittest/mytap) @@ -421,10 +434,21 @@ IF(WIN32) ENDIF() ADD_SUBDIRECTORY(packaging/solaris) + INCLUDE(for_clients) ADD_SUBDIRECTORY(scripts) ADD_SUBDIRECTORY(support-files) +IF(NOT CMAKE_CROSSCOMPILING) + SET(EXPORTED comp_err comp_sql factorial) + IF(NOT WITHOUT_SERVER) + SET(EXPORTED ${EXPORTED} gen_lex_hash gen_pfs_lex_token) + ENDIF() + # minimal target to build only binaries for export + ADD_CUSTOM_TARGET(import_executables DEPENDS ${EXPORTED}) + EXPORT(TARGETS ${EXPORTED} FILE ${CMAKE_BINARY_DIR}/import_executables.cmake) +ENDIF() + CONFIGURE_FILE(config.h.cmake ${CMAKE_BINARY_DIR}/include/my_config.h) CONFIGURE_FILE(config.h.cmake ${CMAKE_BINARY_DIR}/include/config.h) CONFIGURE_FILE(${CMAKE_SOURCE_DIR}/include/mysql_version.h.in @@ -458,8 +482,9 @@ ADD_CUSTOM_TARGET(INFO_BIN ALL INSTALL_DOCUMENTATION(README CREDITS COPYING COPYING.LESSER COPYING.thirdparty EXCEPTIONS-CLIENT COMPONENT Readme) -INSTALL_DOCUMENTATION(${CMAKE_BINARY_DIR}/Docs/INFO_SRC - ${CMAKE_BINARY_DIR}/Docs/INFO_BIN) +# MDEV-6526 these files are not installed anymore +#INSTALL_DOCUMENTATION(${CMAKE_BINARY_DIR}/Docs/INFO_SRC +# ${CMAKE_BINARY_DIR}/Docs/INFO_BIN) IF(UNIX) INSTALL_DOCUMENTATION(Docs/INSTALL-BINARY COMPONENT Readme) @@ -1,7 +1,3 @@ -# Version number for MariaDB is maintained here. -# The version string is created from: -# MYSQL_VERSION_MAJOR.MYSQL_VERSION_MINOR.MYSQL_VERSION_PATCH-MYSQL_VERSION_EXTRA -# MYSQL_VERSION_MAJOR=10 MYSQL_VERSION_MINOR=1 MYSQL_VERSION_PATCH=1 diff --git a/client/mysql.cc b/client/mysql.cc index 510bceecac2..25e6c0255c6 100644 --- a/client/mysql.cc +++ b/client/mysql.cc @@ -90,7 +90,7 @@ extern "C" { #if defined(__WIN__) #include <conio.h> #else -#include <readline/readline.h> +#include <readline.h> #define HAVE_READLINE #define USE_POPEN #endif diff --git a/client/mysql_upgrade.c b/client/mysql_upgrade.c index 65ea586c672..cef5e22647d 100644 --- a/client/mysql_upgrade.c +++ b/client/mysql_upgrade.c @@ -22,7 +22,7 @@ #include <welcome_copyright_notice.h> /* ORACLE_WELCOME_COPYRIGHT_NOTICE */ -#define VER "1.3a" +#define VER "1.4" #ifdef HAVE_SYS_WAIT_H #include <sys/wait.h> @@ -140,21 +140,21 @@ static struct my_option my_long_options[]= #include <sslopt-longopts.h> {"tmpdir", 't', "Directory for temporary files.", 0, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, - {"upgrade-system-tables", 's', "Only upgrade the system tables " - "do not try to upgrade the data.", + {"upgrade-system-tables", 's', "Only upgrade the system tables in the mysql database. Tables in other databases are not checked or touched.", &opt_systables_only, &opt_systables_only, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"user", 'u', "User for login if not current user.", &opt_user, &opt_user, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, - {"verbose", 'v', "Display more output about the process.", + {"verbose", 'v', "Display more output about the process; Using it twice will print connection argument; Using it 3 times will print out all CHECK, RENAME and ALTER TABLE during the check phase.", &opt_not_used, &opt_not_used, 0, GET_BOOL, NO_ARG, 1, 0, 0, 0, 0, 0}, {"version", 'V', "Output version information and exit.", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, - {"version-check", 'k', "Run this program only if its \'server version\' " - "matches the version of the server to which it's connecting, (enabled by " - "default); use --skip-version-check to avoid this check. Note: the \'server " - "version\' of the program is the version of the MySQL server with which it " - "was built/distributed.", &opt_version_check, &opt_version_check, 0, + {"version-check", 'k', + "Run this program only if its \'server version\' " + "matches the version of the server to which it's connecting. " + "Note: the \'server version\' of the program is the version of the MariaDB " + "server with which it was built/distributed.", + &opt_version_check, &opt_version_check, 0, GET_BOOL, NO_ARG, 1, 0, 0, 0, 0, 0}, {"write-binlog", OPT_WRITE_BINLOG, "All commands including those, " "issued by mysqlcheck, are written to the binary log.", @@ -206,12 +206,12 @@ static void die(const char *fmt, ...) } -static void verbose(const char *fmt, ...) +static int verbose(const char *fmt, ...) { va_list args; if (opt_silent) - return; + return 0; /* Print the verbose message */ va_start(args, fmt); @@ -222,6 +222,7 @@ static void verbose(const char *fmt, ...) fflush(stdout); } va_end(args); + return 0; } @@ -370,6 +371,9 @@ static int run_command(char* cmd, FILE *res_file; int error; + if (opt_verbose >= 4) + puts(cmd); + if (!(res_file= popen(cmd, "r"))) die("popen(\"%s\", \"r\") failed", cmd); @@ -750,20 +754,21 @@ static void print_conn_args(const char *tool_name) in the server using "mysqlcheck --check-upgrade .." */ -static int run_mysqlcheck_upgrade(void) +static int run_mysqlcheck_upgrade(const char *arg1, const char *arg2) { - verbose("Phase 2/3: Checking and upgrading tables"); print_conn_args("mysqlcheck"); return run_tool(mysqlcheck_path, NULL, /* Send output from mysqlcheck directly to screen */ "--no-defaults", ds_args.str, "--check-upgrade", - "--all-databases", "--auto-repair", - !opt_silent || opt_verbose ? "--verbose": "", + !opt_silent || opt_verbose >= 1 ? "--verbose" : "", + opt_verbose >= 2 ? "--verbose" : "", + opt_verbose >= 3 ? "--verbose" : "", opt_silent ? "--silent": "", opt_write_binlog ? "--write-binlog" : "--skip-write-binlog", + arg1, arg2, "2>&1", NULL); } @@ -771,7 +776,7 @@ static int run_mysqlcheck_upgrade(void) static int run_mysqlcheck_fixnames(void) { - verbose("Phase 1/3: Fixing table and database names"); + verbose("Phase 3/4: Fixing table and database names"); print_conn_args("mysqlcheck"); return run_tool(mysqlcheck_path, NULL, /* Send output from mysqlcheck directly to screen */ @@ -780,7 +785,9 @@ static int run_mysqlcheck_fixnames(void) "--all-databases", "--fix-db-names", "--fix-table-names", - opt_verbose ? "--verbose": "", + opt_verbose >= 1 ? "--verbose" : "", + opt_verbose >= 2 ? "--verbose" : "", + opt_verbose >= 3 ? "--verbose" : "", opt_silent ? "--silent": "", opt_write_binlog ? "--write-binlog" : "--skip-write-binlog", "2>&1", @@ -857,7 +864,6 @@ static int run_sql_fix_privilege_tables(void) if (init_dynamic_string(&ds_result, "", 512, 512)) die("Out of memory"); - verbose("Phase 3/3: Running 'mysql_fix_privilege_tables'..."); /* Individual queries can not be executed independently by invoking a forked mysql client, because the script uses session variables @@ -1004,16 +1010,12 @@ int main(int argc, char **argv) /* Find mysql */ find_tool(mysql_path, IF_WIN("mysql.exe", "mysql"), self_name); - if (!opt_systables_only) - { - /* Find mysqlcheck */ - find_tool(mysqlcheck_path, IF_WIN("mysqlcheck.exe", "mysqlcheck"), self_name); - } - else - { - if (!opt_silent) - printf("The --upgrade-system-tables option was used, databases won't be touched.\n"); - } + /* Find mysqlcheck */ + find_tool(mysqlcheck_path, IF_WIN("mysqlcheck.exe", "mysqlcheck"), self_name); + + if (opt_systables_only && !opt_silent) + printf("The --upgrade-system-tables option was used, user tables won't be touched.\n"); + /* Read the mysql_upgrade_info file to check if mysql_upgrade @@ -1033,16 +1035,19 @@ int main(int argc, char **argv) /* Run "mysqlcheck" and "mysql_fix_privilege_tables.sql" */ - if ((!opt_systables_only && - (run_mysqlcheck_fixnames() || run_mysqlcheck_upgrade())) || - run_sql_fix_privilege_tables()) - { - /* - The upgrade failed to complete in some way or another, - significant error message should have been printed to the screen - */ + verbose("Phase 1/4: Checking mysql database"); + if (run_mysqlcheck_upgrade("--databases", "mysql")) die("Upgrade failed" ); - } + verbose("Phase 2/4: Running 'mysql_fix_privilege_tables'..."); + if (run_sql_fix_privilege_tables()) + die("Upgrade failed" ); + + if (!opt_systables_only && + (run_mysqlcheck_fixnames() || + verbose("Phase 4/4: Checking and upgrading tables") || + run_mysqlcheck_upgrade("--all-databases","--skip-database=mysql"))) + die("Upgrade failed" ); + verbose("OK"); /* Create a file indicating upgrade has been performed */ diff --git a/client/mysqladmin.cc b/client/mysqladmin.cc index 5ba463f8d6f..ee3ab7f0864 100644 --- a/client/mysqladmin.cc +++ b/client/mysqladmin.cc @@ -1,6 +1,6 @@ /* - Copyright (c) 2000, 2012, Oracle and/or its affiliates. - Copyright (c) 2010, 2012, Monty Program Ab. + Copyright (c) 2000, 2014, Oracle and/or its affiliates. + Copyright (c) 2010, 2014, Monty Program Ab. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -71,6 +71,7 @@ extern "C" my_bool get_one_option(int optid, const struct my_option *opt, char *argument); static my_bool sql_connect(MYSQL *mysql, uint wait); static int execute_commands(MYSQL *mysql,int argc, char **argv); +static char **mask_password(int argc, char ***argv); static int drop_db(MYSQL *mysql,const char *db); extern "C" sig_handler endprog(int signal_number); static void nice_time(ulong sec,char *buff); @@ -306,9 +307,9 @@ get_one_option(int optid, const struct my_option *opt __attribute__((unused)), int main(int argc,char *argv[]) { - int error= 0; + int error= 0, temp_argc; MYSQL mysql; - char **commands, **save_argv; + char **commands, **save_argv, **temp_argv; MY_INIT(argv[0]); mysql_init(&mysql); @@ -316,8 +317,12 @@ int main(int argc,char *argv[]) if ((error= load_defaults("my",load_default_groups,&argc,&argv))) goto err1; save_argv = argv; /* Save for free_defaults */ + if ((error=handle_options(&argc, &argv, my_long_options, get_one_option))) goto err2; + temp_argv= mask_password(argc, &argv); + temp_argc= argc; + if (debug_info_flag) my_end_arg= MY_CHECK_ERROR | MY_GIVE_INFO; if (debug_check_flag) @@ -328,7 +333,7 @@ int main(int argc,char *argv[]) usage(); exit(1); } - commands = argv; + commands = temp_argv; if (tty_password) opt_password = get_tty_password(NullS); @@ -475,6 +480,13 @@ int main(int argc,char *argv[]) } /* got connection */ mysql_close(&mysql); + temp_argc--; + while(temp_argc >= 0) + { + my_free(temp_argv[temp_argc]); + temp_argc--; + } + my_free(temp_argv); err2: mysql_library_end(); my_free(opt_password); @@ -1213,6 +1225,47 @@ static int execute_commands(MYSQL *mysql,int argc, char **argv) return 0; } +/** + @brief Masking the password if it is passed as command line argument. + + @details It works in Linux and changes cmdline in ps and /proc/pid/cmdline, + but it won't work for history file of shell. + The command line arguments are copied to another array and the + password in the argv is masked. This function is called just after + "handle_options" because in "handle_options", the agrv pointers + are altered which makes freeing of dynamically allocated memory + difficult. The password masking is done before all other operations + in order to minimise the time frame of password visibility via cmdline. + + @param argc command line options (count) + @param argv command line options (values) + + @return temp_argv copy of argv +*/ + +static char **mask_password(int argc, char ***argv) +{ + char **temp_argv; + temp_argv= (char **)(my_malloc(sizeof(char *) * argc, MYF(MY_WME))); + argc--; + while (argc > 0) + { + temp_argv[argc]= my_strdup((*argv)[argc], MYF(MY_FAE)); + if (find_type((*argv)[argc - 1],&command_typelib, FIND_TYPE_BASIC) == ADMIN_PASSWORD || + find_type((*argv)[argc - 1],&command_typelib, FIND_TYPE_BASIC) == ADMIN_OLD_PASSWORD) + { + char *start= (*argv)[argc]; + while (*start) + *start++= 'x'; + start= (*argv)[argc]; + if (*start) + start[1]= 0; /* Cut length of argument */ + } + argc--; + } + temp_argv[argc]= my_strdup((*argv)[argc], MYF(MY_FAE)); + return(temp_argv); +} static void print_version(void) { diff --git a/client/mysqlcheck.c b/client/mysqlcheck.c index fc196d2f27a..295affe9b1a 100644 --- a/client/mysqlcheck.c +++ b/client/mysqlcheck.c @@ -18,7 +18,7 @@ /* By Jani Tolonen, 2001-04-20, MySQL Development Team */ -#define CHECK_VERSION "2.7.2-MariaDB" +#define CHECK_VERSION "2.7.3-MariaDB" #include "client_priv.h" #include <m_ctype.h> @@ -51,6 +51,7 @@ static char *opt_password = 0, *current_user = 0, *default_charset= 0, *current_host= 0; static char *opt_plugin_dir= 0, *opt_default_auth= 0; static int first_error = 0; +static char *opt_skip_database; DYNAMIC_ARRAY tables4repair, tables4rebuild, alter_table_cmds; static char *shared_memory_base_name=0; static uint opt_protocol=0; @@ -178,6 +179,9 @@ static struct my_option my_long_options[] = #endif {"silent", 's', "Print only error messages.", &opt_silent, &opt_silent, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, + {"skip_database", 0, "Don't process the database specified as argument", + &opt_skip_database, &opt_skip_database, 0, GET_STR, REQUIRED_ARG, + 0, 0, 0, 0, 0, 0}, {"socket", 'S', "The socket file to use for connection.", &opt_mysql_unix_port, &opt_mysql_unix_port, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, @@ -192,8 +196,8 @@ static struct my_option my_long_options[] = {"user", 'u', "User for login if not current user.", ¤t_user, ¤t_user, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, #endif - {"verbose", 'v', "Print info about the various stages.", 0, 0, 0, GET_NO_ARG, - NO_ARG, 0, 0, 0, 0, 0, 0}, + {"verbose", 'v', "Print info about the various stages; Using it 3 times will print out all CHECK, RENAME and ALTER TABLE during the check phase.", + 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, {"version", 'V', "Output version information and exit.", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0} @@ -246,6 +250,9 @@ static void usage(void) puts("mysqlrepair: The default option will be -r"); puts("mysqlanalyze: The default option will be -a"); puts("mysqloptimize: The default option will be -o\n"); + printf("Usage: %s [OPTIONS] database [tables]\n", my_progname); + printf("OR %s [OPTIONS] --databases DB1 [DB2 DB3...]\n", + my_progname); puts("Please consult the MariaDB/MySQL knowledgebase at"); puts("http://kb.askmonty.org/v/mysqlcheck for latest information about"); puts("this program."); @@ -625,8 +632,10 @@ static int process_all_tables_in_db(char *database) } /* process_all_tables_in_db */ -static int run_query(const char *query) +static int run_query(const char *query, my_bool log_query) { + if (verbose >=3 && log_query) + puts(query); if (mysql_query(sock, query)) { fprintf(stderr, "Failed to %s\n", query); @@ -646,7 +655,7 @@ static int fix_table_storage_name(const char *name) if (strncmp(name, "#mysql50#", 9)) DBUG_RETURN(1); sprintf(qbuf, "RENAME TABLE `%s` TO `%s`", name, name + 9); - rc= run_query(qbuf); + rc= run_query(qbuf, 1); if (verbose) printf("%-50s %s\n", name, rc ? "FAILED" : "OK"); DBUG_RETURN(rc); @@ -661,7 +670,7 @@ static int fix_database_storage_name(const char *name) if (strncmp(name, "#mysql50#", 9)) DBUG_RETURN(1); sprintf(qbuf, "ALTER DATABASE `%s` UPGRADE DATA DIRECTORY NAME", name); - rc= run_query(qbuf); + rc= run_query(qbuf, 1); if (verbose) printf("%-50s %s\n", name, rc ? "FAILED" : "OK"); DBUG_RETURN(rc); @@ -680,6 +689,8 @@ static int rebuild_table(char *name) ptr= strmov(query, "ALTER TABLE "); ptr= fix_table_name(ptr, name); ptr= strxmov(ptr, " FORCE", NullS); + if (verbose >= 3) + puts(query); if (mysql_real_query(sock, query, (uint)(ptr - query))) { fprintf(stderr, "Failed to %s\n", query); @@ -696,6 +707,9 @@ static int process_one_db(char *database) { DBUG_ENTER("process_one_db"); + if (opt_skip_database && !strcmp(database, opt_skip_database)) + DBUG_RETURN(0); + if (verbose) puts(database); if (what_to_do == DO_UPGRADE) @@ -735,7 +749,7 @@ static int use_db(char *database) static int disable_binlog() { mysql_query(sock, "SET WSREP_ON=0"); /* ignore the error, if any */ - return run_query("SET SQL_LOG_BIN=0"); + return run_query("SET SQL_LOG_BIN=0", 0); } static int handle_request_for_tables(char *tables, uint length) @@ -794,6 +808,8 @@ static int handle_request_for_tables(char *tables, uint length) ptr= strxmov(ptr, " ", options, NullS); query_length= (uint) (ptr - query); } + if (verbose >= 3) + puts(query); if (mysql_real_query(sock, query, query_length)) { sprintf(message, "when executing '%s TABLE ... %s'", op, options); @@ -1046,7 +1062,7 @@ int main(int argc, char **argv) for (i = 0; i < tables4rebuild.elements ; i++) rebuild_table((char*) dynamic_array_ptr(&tables4rebuild, i)); for (i = 0; i < alter_table_cmds.elements ; i++) - run_query((char*) dynamic_array_ptr(&alter_table_cmds, i)); + run_query((char*) dynamic_array_ptr(&alter_table_cmds, i), 1); } ret= MY_TEST(first_error); diff --git a/client/mysqldump.c b/client/mysqldump.c index 98f6f15b46e..dfda2f35f71 100644 --- a/client/mysqldump.c +++ b/client/mysqldump.c @@ -87,6 +87,9 @@ /* Chars needed to store LONGLONG, excluding trailing '\0'. */ #define LONGLONG_LEN 20 +/* Max length GTID position that we will output. */ +#define MAX_GTID_LENGTH 1024 + static void add_load_option(DYNAMIC_STRING *str, const char *option, const char *option_value); static ulong find_set(TYPELIB *lib, const char *x, uint length, @@ -135,6 +138,7 @@ static ulong opt_compatible_mode= 0; #define MYSQL_OPT_SLAVE_DATA_COMMENTED_SQL 2 static uint opt_mysql_port= 0, opt_master_data; static uint opt_slave_data; +static uint opt_use_gtid; static uint my_end_arg; static char * opt_mysql_unix_port=0; static int first_error=0; @@ -355,6 +359,13 @@ static struct my_option my_long_options[] = "server receiving the resulting dump.", &opt_galera_sst_mode, &opt_galera_sst_mode, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, + {"gtid", 0, "Used together with --master-data=1 or --dump-slave=1." + "When enabled, the output from those options will set the GTID position " + "instead of the binlog file and offset; the file/offset will appear only as " + "a comment. When disabled, the GTID position will still appear in the " + "output, but only commented.", + &opt_use_gtid, &opt_use_gtid, 0, GET_BOOL, NO_ARG, + 0, 0, 0, 0, 0, 0}, {"help", '?', "Display this help message and exit.", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, {"hex-blob", OPT_HEXBLOB, "Dump binary strings (BINARY, " @@ -1192,7 +1203,7 @@ check_consistent_binlog_pos(char *binlog_pos_file, char *binlog_pos_offset) if (mysql_query_with_error_report(mysql, &res, "SHOW STATUS LIKE 'binlog_snapshot_%'")) - return 1; + return 0; found= 0; while ((row= mysql_fetch_row(res))) @@ -1215,6 +1226,90 @@ check_consistent_binlog_pos(char *binlog_pos_file, char *binlog_pos_offset) return (found == 2); } + +/* + Get the GTID position corresponding to a given old-style binlog position. + This uses BINLOG_GTID_POS(). The advantage is that the GTID position can + be obtained completely non-blocking in this way (without the need for + FLUSH TABLES WITH READ LOCK), as the old-style position can be obtained + with START TRANSACTION WITH CONSISTENT SNAPSHOT. + + Returns 0 if ok, non-zero if error. +*/ +static int +get_binlog_gtid_pos(char *binlog_pos_file, char *binlog_pos_offset, + char *out_gtid_pos) +{ + DYNAMIC_STRING query; + MYSQL_RES *res; + MYSQL_ROW row; + int err; + char file_buf[FN_REFLEN*2+1], offset_buf[LONGLONG_LEN*2+1]; + size_t len_pos_file= strlen(binlog_pos_file); + size_t len_pos_offset= strlen(binlog_pos_offset); + + if (len_pos_file >= FN_REFLEN || len_pos_offset > LONGLONG_LEN) + return 0; + mysql_real_escape_string(mysql, file_buf, binlog_pos_file, len_pos_file); + mysql_real_escape_string(mysql, offset_buf, binlog_pos_offset, len_pos_offset); + init_dynamic_string_checked(&query, "SELECT BINLOG_GTID_POS('", 256, 1024); + dynstr_append_checked(&query, file_buf); + dynstr_append_checked(&query, "', '"); + dynstr_append_checked(&query, offset_buf); + dynstr_append_checked(&query, "')"); + + err= mysql_query_with_error_report(mysql, &res, query.str); + dynstr_free(&query); + if (err) + return err; + + err= 1; + if ((row= mysql_fetch_row(res))) + { + strmake(out_gtid_pos, row[0], MAX_GTID_LENGTH-1); + err= 0; + } + mysql_free_result(res); + + return err; +} + + +/* + Get the GTID position on a master or slave. + The parameter MASTER is non-zero to get the position on a master + (@@gtid_binlog_pos) or zero for a slave (@@gtid_slave_pos). + + This uses the @@gtid_binlog_pos or @@gtid_slave_pos, so requires FLUSH TABLES + WITH READ LOCK or similar to be consistent. + + Returns 0 if ok, non-zero for error. +*/ +static int +get_gtid_pos(char *out_gtid_pos, int master) +{ + MYSQL_RES *res; + MYSQL_ROW row; + int found; + + if (mysql_query_with_error_report(mysql, &res, + (master ? + "SELECT @@GLOBAL.gtid_binlog_pos" : + "SELECT @@GLOBAL.gtid_slave_pos"))) + return 1; + + found= 0; + if ((row= mysql_fetch_row(res))) + { + strmake(out_gtid_pos, row[0], MAX_GTID_LENGTH-1); + found++; + } + mysql_free_result(res); + + return (found != 1); +} + + static char *my_case_str(const char *str, uint str_len, const char *token, @@ -4850,12 +4945,15 @@ static int wsrep_set_sst_cmds(MYSQL *mysql) { return 0; } -static int do_show_master_status(MYSQL *mysql_con, int consistent_binlog_pos) + +static int do_show_master_status(MYSQL *mysql_con, int consistent_binlog_pos, + int have_mariadb_gtid, int use_gtid) { MYSQL_ROW row; MYSQL_RES *UNINIT_VAR(master); char binlog_pos_file[FN_REFLEN]; char binlog_pos_offset[LONGLONG_LEN+1]; + char gtid_pos[MAX_GTID_LENGTH]; char *file, *offset; const char *comment_prefix= (opt_master_data == MYSQL_OPT_MASTER_DATA_COMMENTED_SQL) ? "-- " : ""; @@ -4866,6 +4964,9 @@ static int do_show_master_status(MYSQL *mysql_con, int consistent_binlog_pos) return 1; file= binlog_pos_file; offset= binlog_pos_offset; + if (have_mariadb_gtid && + get_binlog_gtid_pos(binlog_pos_file, binlog_pos_offset, gtid_pos)) + return 1; } else { @@ -4895,6 +4996,9 @@ static int do_show_master_status(MYSQL *mysql_con, int consistent_binlog_pos) return 0; } } + + if (have_mariadb_gtid && get_gtid_pos(gtid_pos, 1)) + return 1; } /* SHOW MASTER STATUS reports file and position */ @@ -4903,7 +5007,19 @@ static int do_show_master_status(MYSQL *mysql_con, int consistent_binlog_pos) "recovery from\n--\n\n"); fprintf(md_result_file, "%sCHANGE MASTER TO MASTER_LOG_FILE='%s', MASTER_LOG_POS=%s;\n", - comment_prefix, file, offset); + (use_gtid ? "-- " : comment_prefix), file, offset); + if (have_mariadb_gtid) + { + print_comment(md_result_file, 0, + "\n--\n-- GTID to start replication from\n--\n\n"); + if (use_gtid) + fprintf(md_result_file, + "%sCHANGE MASTER TO MASTER_USE_GTID=slave_pos;\n", + comment_prefix); + fprintf(md_result_file, + "%sSET GLOBAL gtid_slave_pos='%s';\n", + (!use_gtid ? "-- " : comment_prefix), gtid_pos); + } check_io(md_result_file); if (!consistent_binlog_pos) @@ -4973,12 +5089,16 @@ static int add_slave_statements(void) return(0); } -static int do_show_slave_status(MYSQL *mysql_con) +static int do_show_slave_status(MYSQL *mysql_con, int use_gtid, + int have_mariadb_gtid) { MYSQL_RES *UNINIT_VAR(slave); MYSQL_ROW row; const char *comment_prefix= (opt_slave_data == MYSQL_OPT_SLAVE_DATA_COMMENTED_SQL) ? "-- " : ""; + const char *gtid_comment_prefix= (use_gtid ? comment_prefix : "-- "); + const char *nogtid_comment_prefix= (!use_gtid ? comment_prefix : "-- "); + int set_gtid_done= 0; if (mysql_query_with_error_report(mysql_con, &slave, multi_source ? @@ -4996,8 +5116,30 @@ static int do_show_slave_status(MYSQL *mysql_con) while ((row= mysql_fetch_row(slave))) { + if (multi_source && !set_gtid_done) + { + char gtid_pos[MAX_GTID_LENGTH]; + if (have_mariadb_gtid && get_gtid_pos(gtid_pos, 0)) + return 1; + if (opt_comments) + fprintf(md_result_file, "\n--\n-- Gtid position to start replication " + "from\n--\n\n"); + fprintf(md_result_file, "%sSET GLOBAL gtid_slave_pos='%s';\n", + gtid_comment_prefix, gtid_pos); + set_gtid_done= 1; + } if (row[9 + multi_source] && row[21 + multi_source]) { + if (use_gtid) + { + if (multi_source) + fprintf(md_result_file, "%sCHANGE MASTER '%.80s' TO " + "MASTER_USE_GTID=slave_pos;\n", gtid_comment_prefix, row[0]); + else + fprintf(md_result_file, "%sCHANGE MASTER TO " + "MASTER_USE_GTID=slave_pos;\n", gtid_comment_prefix); + } + /* SHOW MASTER STATUS reports file and position */ if (opt_comments) fprintf(md_result_file, @@ -5006,9 +5148,9 @@ static int do_show_slave_status(MYSQL *mysql_con) if (multi_source) fprintf(md_result_file, "%sCHANGE MASTER '%.80s' TO ", - comment_prefix, row[0]); + nogtid_comment_prefix, row[0]); else - fprintf(md_result_file, "%sCHANGE MASTER TO ", comment_prefix); + fprintf(md_result_file, "%sCHANGE MASTER TO ", nogtid_comment_prefix); if (opt_include_master_host_port) { @@ -5081,12 +5223,13 @@ static int do_flush_tables_read_lock(MYSQL *mysql_con) FLUSH TABLES is to lower the probability of a stage where both mysqldump and most client connections are stalled. Of course, if a second long update starts between the two FLUSHes, we have that bad stall. + + We use the LOCAL option, as we do not want the FLUSH TABLES replicated to + other servers. */ return - ( mysql_query_with_error_report(mysql_con, 0, - ((opt_master_data != 0) ? - "FLUSH /*!40101 LOCAL */ TABLES" : - "FLUSH TABLES")) || + ( mysql_query_with_error_report(mysql_con, 0, + "FLUSH /*!40101 LOCAL */ TABLES") || mysql_query_with_error_report(mysql_con, 0, "FLUSH TABLES WITH READ LOCK") ); } @@ -5707,6 +5850,7 @@ int main(int argc, char **argv) char bin_log_name[FN_REFLEN]; int exit_code; int consistent_binlog_pos= 0; + int have_mariadb_gtid= 0; MY_INIT(argv[0]); sf_leaking_memory=1; /* don't report memory leaks on early exits */ @@ -5747,7 +5891,10 @@ int main(int argc, char **argv) /* Check if the server support multi source */ if (mysql_get_server_version(mysql) >= 100000) + { multi_source= 2; + have_mariadb_gtid= 1; + } if (opt_slave_data && do_stop_slave_sql(mysql)) goto err; @@ -5798,9 +5945,11 @@ int main(int argc, char **argv) if (opt_galera_sst_mode && wsrep_set_sst_cmds(mysql)) goto err; - if (opt_master_data && do_show_master_status(mysql, consistent_binlog_pos)) + if (opt_master_data && do_show_master_status(mysql, consistent_binlog_pos, + have_mariadb_gtid, opt_use_gtid)) goto err; - if (opt_slave_data && do_show_slave_status(mysql)) + if (opt_slave_data && do_show_slave_status(mysql, opt_use_gtid, + have_mariadb_gtid)) goto err; if (opt_single_transaction && do_unlock_tables(mysql)) /* unlock but no commit! */ goto err; @@ -5817,19 +5966,36 @@ int main(int argc, char **argv) dump_all_tablespaces(); dump_all_databases(); } - else if (argc > 1 && !opt_databases) - { - /* Only one database and selected table(s) */ - if (!opt_alltspcs && !opt_notspcs) - dump_tablespaces_for_tables(*argv, (argv + 1), (argc -1)); - dump_selected_tables(*argv, (argv + 1), (argc - 1)); - } else { - /* One or more databases, all tables */ - if (!opt_alltspcs && !opt_notspcs) - dump_tablespaces_for_databases(argv); - dump_databases(argv); + // Check all arguments meet length condition. Currently database and table + // names are limited to NAME_LEN bytes and stack-based buffers assumes + // that escaped name will be not longer than NAME_LEN*2 + 2 bytes long. + int argument; + for (argument= 0; argument < argc; argument++) + { + size_t argument_length= strlen(argv[argument]); + if (argument_length > NAME_LEN) + { + die(EX_CONSCHECK, "[ERROR] Argument '%s' is too long, it cannot be " + "name for any table or database.\n", argv[argument]); + } + } + + if (argc > 1 && !opt_databases) + { + /* Only one database and selected table(s) */ + if (!opt_alltspcs && !opt_notspcs) + dump_tablespaces_for_tables(*argv, (argv + 1), (argc - 1)); + dump_selected_tables(*argv, (argv + 1), (argc - 1)); + } + else + { + /* One or more databases, all tables */ + if (!opt_alltspcs && !opt_notspcs) + dump_tablespaces_for_databases(argv); + dump_databases(argv); + } } /* add 'START SLAVE' to end of dump */ diff --git a/client/mysqlslap.c b/client/mysqlslap.c index 3ba5eb80a07..01064f74261 100644 --- a/client/mysqlslap.c +++ b/client/mysqlslap.c @@ -1796,8 +1796,8 @@ run_scheduler(stats *sptr, statement *stmts, uint concur, ulonglong limit) pthread_mutex_lock(&sleeper_mutex); master_wakeup= 0; - pthread_mutex_unlock(&sleeper_mutex); pthread_cond_broadcast(&sleep_threshhold); + pthread_mutex_unlock(&sleeper_mutex); gettimeofday(&start_time, NULL); diff --git a/client/mysqltest.cc b/client/mysqltest.cc index 635b9b51cda..6c11cd234a9 100644 --- a/client/mysqltest.cc +++ b/client/mysqltest.cc @@ -7577,6 +7577,7 @@ int append_warnings(DYNAMIC_STRING *ds, MYSQL* mysql) { uint count; MYSQL_RES *warn_res; + DYNAMIC_STRING res; DBUG_ENTER("append_warnings"); if (!(count= mysql_warning_count(mysql))) @@ -7596,11 +7597,18 @@ int append_warnings(DYNAMIC_STRING *ds, MYSQL* mysql) die("Warning count is %u but didn't get any warnings", count); - append_result(ds, warn_res); + init_dynamic_string(&res, "", 1024, 1024); + + append_result(&res, warn_res); mysql_free_result(warn_res); - DBUG_PRINT("warnings", ("%s", ds->str)); + DBUG_PRINT("warnings", ("%s", res.str)); + if (display_result_sorted) + dynstr_append_sorted(ds, &res, 0); + else + dynstr_append_mem(ds, res.str, res.length); + dynstr_free(&res); DBUG_RETURN(count); } @@ -8848,6 +8856,10 @@ int main(int argc, char **argv) 128, 0, 0, get_var_key, 0, var_free, MYF(0))) die("Variable hash initialization failed"); + { + char path_separator[]= { FN_LIBCHAR, 0 }; + var_set_string("SYSTEM_PATH_SEPARATOR", path_separator); + } var_set_string("MYSQL_SERVER_VERSION", MYSQL_SERVER_VERSION); var_set_string("MYSQL_SYSTEM_TYPE", SYSTEM_TYPE); var_set_string("MYSQL_MACHINE_TYPE", MACHINE_TYPE); @@ -9768,36 +9780,34 @@ struct st_regex int reg_replace(char** buf_p, int* buf_len_p, char *pattern, char *replace, char *string, int icase); +bool parse_re_part(char *start_re, char *end_re, + char **p, char *end, char **buf) +{ + if (*start_re != *end_re) + { + switch ((*start_re= *(*p)++)) { + case '(': *end_re= ')'; break; + case '[': *end_re= ']'; break; + case '{': *end_re= '}'; break; + case '<': *end_re= '>'; break; + default: *end_re= *start_re; + } + } + while (*p < end && **p != *end_re) + { + if ((*p)[0] == '\\' && *p + 1 < end && (*p)[1] == *end_re) + (*p)++; -/* - Finds the next (non-escaped) '/' in the expression. - (If the character '/' is needed, it can be escaped using '\'.) -*/ + *(*buf)++= *(*p)++; + } + *(*buf)++= 0; + + (*p)++; + + return *p > end; +} -#define PARSE_REGEX_ARG \ - while (p < expr_end) \ - { \ - char c= *p; \ - if (c == '/') \ - { \ - if (last_c == '\\') \ - { \ - buf_p[-1]= '/'; \ - } \ - else \ - { \ - *buf_p++ = 0; \ - break; \ - } \ - } \ - else \ - *buf_p++ = c; \ - \ - last_c= c; \ - p++; \ - } \ - \ /* Initializes the regular substitution expression to be used in the result output of test. @@ -9809,10 +9819,9 @@ struct st_replace_regex* init_replace_regex(char* expr) { struct st_replace_regex* res; char* buf,*expr_end; - char* p; + char* p, start_re, end_re= 1; char* buf_p; uint expr_len= strlen(expr); - char last_c = 0; struct st_regex reg; /* my_malloc() will die on fail with MY_FAE */ @@ -9830,44 +9839,32 @@ struct st_replace_regex* init_replace_regex(char* expr) { bzero(®,sizeof(reg)); /* find the start of the statement */ - while (p < expr_end) - { - if (*p == '/') - break; + while (my_isspace(charset_info, *p) && p < expr_end) p++; - } - if (p == expr_end || ++p == expr_end) + if (p >= expr_end) { if (res->regex_arr.elements) break; else goto err; } - /* we found the start */ - reg.pattern= buf_p; - - /* Find first argument -- pattern string to be removed */ - PARSE_REGEX_ARG - if (p == expr_end || ++p == expr_end) - goto err; + start_re= 0; + reg.pattern= buf_p; + if (parse_re_part(&start_re, &end_re, &p, expr_end, &buf_p)) + goto err; - /* buf_p now points to the replacement pattern terminated with \0 */ reg.replace= buf_p; - - /* Find second argument -- replace string to replace pattern */ - PARSE_REGEX_ARG - - if (p == expr_end) - goto err; - - /* skip the ending '/' in the statement */ - p++; + if (parse_re_part(&start_re, &end_re, &p, expr_end, &buf_p)) + goto err; /* Check if we should do matching case insensitive */ if (p < expr_end && *p == 'i') + { + p++; reg.icase= 1; + } /* done parsing the statement, now place it in regex_arr */ if (insert_dynamic(&res->regex_arr,(uchar*) ®)) diff --git a/cmake/build_configurations/mysql_release.cmake b/cmake/build_configurations/mysql_release.cmake index 6673cdc438c..434e1cfda0e 100644 --- a/cmake/build_configurations/mysql_release.cmake +++ b/cmake/build_configurations/mysql_release.cmake @@ -109,14 +109,9 @@ ENDIF() IF(UNIX) SET(WITH_EXTRA_CHARSETS all CACHE STRING "") - IF(EXISTS "${CMAKE_SOURCE_DIR}/COPYING") - OPTION(WITH_READLINE "" ON) - ELSE() - OPTION(WITH_LIBEDIT "" ON) - ENDIF() - IF(CMAKE_SYSTEM_NAME STREQUAL "Linux") + SET(WITH_JEMALLOC "static" CACHE STRING "") IF(NOT IGNORE_AIO_CHECK) # Ensure aio is available on Linux (required by InnoDB) diff --git a/cmake/dtrace.cmake b/cmake/dtrace.cmake index 1fc87cfcbef..5d0bb7ff8c9 100644 --- a/cmake/dtrace.cmake +++ b/cmake/dtrace.cmake @@ -1,4 +1,4 @@ -# Copyright (c) 2009, 2013, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 2009, 2014, Oracle and/or its affiliates. All rights reserved. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -80,13 +80,6 @@ IF(ENABLE_DTRACE) ${CMAKE_BINARY_DIR}/include/probes_mysql_dtrace.h ${CMAKE_BINARY_DIR}/include/probes_mysql_nodtrace.h ) - IF(CMAKE_SYSTEM_NAME MATCHES "Linux") - # Systemtap object - EXECUTE_PROCESS( - COMMAND ${DTRACE} -G -s ${CMAKE_SOURCE_DIR}/include/probes_mysql.d.base - -o ${CMAKE_BINARY_DIR}/probes_mysql.o - ) - ENDIF() ADD_CUSTOM_TARGET(gen_dtrace_header DEPENDS ${CMAKE_BINARY_DIR}/include/probes_mysql.d @@ -105,12 +98,7 @@ FUNCTION(DTRACE_INSTRUMENT target) IF(ENABLE_DTRACE) ADD_DEPENDENCIES(${target} gen_dtrace_header) - IF(CMAKE_SYSTEM_NAME MATCHES "Linux") - TARGET_LINK_LIBRARIES(${target} ${CMAKE_BINARY_DIR}/probes_mysql.o) - ENDIF() - - # On Solaris, invoke dtrace -G to generate object file and - # link it together with target. + # Invoke dtrace to generate object file and link it together with target. IF(CMAKE_SYSTEM_NAME MATCHES "SunOS") SET(objdir ${CMAKE_CURRENT_BINARY_DIR}/CMakeFiles/${target}.dir) SET(outfile ${objdir}/${target}_dtrace.o) @@ -127,6 +115,21 @@ FUNCTION(DTRACE_INSTRUMENT target) -P ${CMAKE_SOURCE_DIR}/cmake/dtrace_prelink.cmake WORKING_DIRECTORY ${objdir} ) + ELSEIF(CMAKE_SYSTEM_NAME MATCHES "Linux") + # dtrace on Linux runs gcc and uses flags from environment + SET(CFLAGS_SAVED $ENV{CFLAGS}) + SET(ENV{CFLAGS} ${CMAKE_C_FLAGS}) + SET(outfile "${CMAKE_BINARY_DIR}/probes_mysql.o") + # Systemtap object + EXECUTE_PROCESS( + COMMAND ${DTRACE} -G -s ${CMAKE_SOURCE_DIR}/include/probes_mysql.d.base + -o ${outfile} + ) + SET(ENV{CFLAGS} ${CFLAGS_SAVED}) + ENDIF() + + # Do not try to extend the library if we have not built the .o file + IF(outfile) # Add full object path to linker flags GET_TARGET_PROPERTY(target_type ${target} TYPE) IF(NOT target_type MATCHES "STATIC") @@ -138,12 +141,12 @@ FUNCTION(DTRACE_INSTRUMENT target) # but maybe one day this will be fixed. GET_TARGET_PROPERTY(target_location ${target} LOCATION) ADD_CUSTOM_COMMAND( - TARGET ${target} POST_BUILD - COMMAND ${CMAKE_AR} r ${target_location} ${outfile} - COMMAND ${CMAKE_RANLIB} ${target_location} - ) - # Used in DTRACE_INSTRUMENT_WITH_STATIC_LIBS - SET(TARGET_OBJECT_DIRECTORY_${target} ${objdir} CACHE INTERNAL "") + TARGET ${target} POST_BUILD + COMMAND ${CMAKE_AR} r ${target_location} ${outfile} + COMMAND ${CMAKE_RANLIB} ${target_location} + ) + # Used in DTRACE_INSTRUMENT_WITH_STATIC_LIBS + SET(TARGET_OBJECT_DIRECTORY_${target} ${objdir} CACHE INTERNAL "") ENDIF() ENDIF() ENDIF() diff --git a/cmake/install_macros.cmake b/cmake/install_macros.cmake index 8226f0f705b..2680d9ccbb4 100644 --- a/cmake/install_macros.cmake +++ b/cmake/install_macros.cmake @@ -38,7 +38,9 @@ FUNCTION (INSTALL_DEBUG_SYMBOLS) STRING(REPLACE ".dll" ".pdb" pdb_location ${pdb_location}) STRING(REPLACE ".lib" ".pdb" pdb_location ${pdb_location}) IF(CMAKE_GENERATOR MATCHES "Visual Studio") - STRING(REPLACE "${CMAKE_CFG_INTDIR}" "\${CMAKE_INSTALL_CONFIG_NAME}" pdb_location ${pdb_location}) + STRING(REPLACE + "${CMAKE_CFG_INTDIR}" "\${CMAKE_INSTALL_CONFIG_NAME}" + pdb_location ${pdb_location}) ENDIF() set(comp "") diff --git a/cmake/jemalloc.cmake b/cmake/jemalloc.cmake index b677f226266..876688f02f6 100644 --- a/cmake/jemalloc.cmake +++ b/cmake/jemalloc.cmake @@ -1,67 +1,37 @@ -# old cmake does not have ExternalProject file -IF(CMAKE_VERSION VERSION_LESS "2.8.6") - MACRO (CHECK_JEMALLOC) - ENDMACRO() - RETURN() -ENDIF() +INCLUDE (CheckLibraryExists) -INCLUDE(ExternalProject) +SET(WITH_JEMALLOC auto CACHE STRING + "Build with jemalloc. Possible values are 'yes', 'no', 'auto'") -MACRO (USE_BUNDLED_JEMALLOC) - SET(SOURCE_DIR "${CMAKE_SOURCE_DIR}/extra/jemalloc") - SET(BINARY_DIR "${CMAKE_BINARY_DIR}/${CMAKE_CFG_INTDIR}/extra/jemalloc/build") - SET(LIBJEMALLOC "libjemalloc") - SET(JEMALLOC_CONFIGURE_OPTS "CC=${CMAKE_C_COMPILER} ${CMAKE_C_COMPILER_ARG1}" "--with-private-namespace=jemalloc_internal_" "--enable-cc-silence") - IF (CMAKE_BUILD_TYPE MATCHES "Debug" AND NOT APPLE) # see the comment in CMakeLists.txt - LIST(APPEND JEMALLOC_CONFIGURE_OPTS --enable-debug) +MACRO (CHECK_JEMALLOC) + # compatibility with old WITH_JEMALLOC values + IF(WITH_JEMALLOC STREQUAL "bundled") + MESSAGE(FATAL_ERROR "MariaDB no longer bundles jemalloc") ENDIF() - - IF(CMAKE_GENERATOR MATCHES "Makefiles") - SET(MAKE_COMMAND ${CMAKE_MAKE_PROGRAM}) - ELSE() # Xcode/Ninja generators - SET(MAKE_COMMAND make) + IF(WITH_JEMALLOC STREQUAL "system") + SET(WITH_JEMALLOC "yes") ENDIF() - - ExternalProject_Add(jemalloc - PREFIX extra/jemalloc - SOURCE_DIR ${SOURCE_DIR} - BINARY_DIR ${BINARY_DIR} - STAMP_DIR ${BINARY_DIR} - CONFIGURE_COMMAND "${SOURCE_DIR}/configure" ${JEMALLOC_CONFIGURE_OPTS} - BUILD_COMMAND ${MAKE_COMMAND} "build_lib_static" - INSTALL_COMMAND "" - ) - ADD_LIBRARY(libjemalloc STATIC IMPORTED) - SET_TARGET_PROPERTIES(libjemalloc PROPERTIES IMPORTED_LOCATION "${BINARY_DIR}/lib/libjemalloc_pic.a") - ADD_DEPENDENCIES(libjemalloc jemalloc) -ENDMACRO() -IF(CMAKE_SYSTEM_NAME MATCHES "Linux" OR APPLE) - # Linux and OSX are the only systems where bundled jemalloc can be built without problems, - # as they both have GNU make and jemalloc actually compiles. - # Also, BSDs use jemalloc as malloc already - SET(WITH_JEMALLOC_DEFAULT "yes") -ELSE() - SET(WITH_JEMALLOC_DEFAULT "no") -ENDIF() + IF(WITH_JEMALLOC STREQUAL "yes" OR WITH_JEMALLOC STREQUAL "auto" OR + WITH_JEMALLOC STREQUAL "static") + + IF(WITH_JEMALLOC STREQUAL "static") + SET(libname jemalloc_pic) + SET(CMAKE_REQUIRED_LIBRARIES pthread dl m) + SET(what bundled) + ELSE() + SET(libname jemalloc) + SET(what system) + ENDIF() -SET(WITH_JEMALLOC ${WITH_JEMALLOC_DEFAULT} CACHE STRING - "Which jemalloc to use. Possible values are 'no', 'bundled', 'system', 'yes' (system if possible, otherwise bundled)") + CHECK_LIBRARY_EXISTS(${libname} malloc_stats_print "" HAVE_JEMALLOC) + SET(CMAKE_REQUIRED_LIBRARIES) -MACRO (CHECK_JEMALLOC) - IF(WITH_JEMALLOC STREQUAL "system" OR WITH_JEMALLOC STREQUAL "yes") - CHECK_LIBRARY_EXISTS(jemalloc malloc_stats_print "" HAVE_JEMALLOC) IF (HAVE_JEMALLOC) - SET(LIBJEMALLOC jemalloc) - SET(MALLOC_LIBRARY "system jemalloc") - ELSEIF (WITH_JEMALLOC STREQUAL "system") - MESSAGE(FATAL_ERROR "system jemalloc is not found") - ELSEIF (WITH_JEMALLOC STREQUAL "yes") - SET(trybundled 1) + SET(LIBJEMALLOC ${libname}) + SET(MALLOC_LIBRARY "${what} jemalloc") + ELSEIF (NOT WITH_JEMALLOC STREQUAL "auto") + MESSAGE(FATAL_ERROR "${libname} is not found") ENDIF() ENDIF() - IF(WITH_JEMALLOC STREQUAL "bundled" OR trybundled) - USE_BUNDLED_JEMALLOC() - SET(MALLOC_LIBRARY "bundled jemalloc") - ENDIF() ENDMACRO() diff --git a/cmake/os/Windows.cmake b/cmake/os/Windows.cmake index 53c3bad7fed..5a6e51e4120 100644 --- a/cmake/os/Windows.cmake +++ b/cmake/os/Windows.cmake @@ -1,4 +1,4 @@ -# Copyright (c) 2010, 2011, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 2010, 2014, Oracle and/or its affiliates. All rights reserved. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -61,22 +61,30 @@ IF(MINGW AND CMAKE_SIZEOF_VOID_P EQUAL 4) ENDIF() IF(MSVC) - # Enable debug info also in Release build, and create PDB to be able to analyze - # crashes - FOREACH(lang C CXX) - SET(CMAKE_${lang}_FLAGS_RELEASE "${CMAKE_${lang}_FLAGS_RELEASE} /Zi") - ENDFOREACH() + # Enable debug info also in Release build, + # and create PDB to be able to analyze crashes. FOREACH(type EXE SHARED MODULE) - SET(CMAKE_{type}_LINKER_FLAGS_RELEASE "${CMAKE_${type}_LINKER_FLAGS_RELEASE} /debug") + SET(CMAKE_{type}_LINKER_FLAGS_RELEASE + "${CMAKE_${type}_LINKER_FLAGS_RELEASE} /debug") ENDFOREACH() # Force static runtime libraries + # - Choose debugging information: + # /Z7 + # Produces an .obj file containing full symbolic debugging + # information for use with the debugger. The symbolic debugging + # information includes the names and types of variables, as well as + # functions and line numbers. No .pdb file is produced by the compiler. + FOREACH(lang C CXX) + SET(CMAKE_${lang}_FLAGS_RELEASE "${CMAKE_${lang}_FLAGS_RELEASE} /Z7") + ENDFOREACH() FOREACH(flag - CMAKE_C_FLAGS_RELEASE CMAKE_C_FLAGS_RELWITHDEBINFO - CMAKE_C_FLAGS_DEBUG CMAKE_C_FLAGS_DEBUG_INIT + CMAKE_C_FLAGS_RELEASE CMAKE_C_FLAGS_RELWITHDEBINFO + CMAKE_C_FLAGS_DEBUG CMAKE_C_FLAGS_DEBUG_INIT CMAKE_CXX_FLAGS_RELEASE CMAKE_CXX_FLAGS_RELWITHDEBINFO - CMAKE_CXX_FLAGS_DEBUG CMAKE_CXX_FLAGS_DEBUG_INIT) + CMAKE_CXX_FLAGS_DEBUG CMAKE_CXX_FLAGS_DEBUG_INIT) STRING(REPLACE "/MD" "/MT" "${flag}" "${${flag}}") + STRING(REPLACE "/Zi" "/Z7" "${flag}" "${${flag}}") ENDFOREACH() @@ -104,7 +112,6 @@ IF(MSVC) SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} /wd4800 /wd4805 /wd4996") SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /wd4800 /wd4805 /wd4996 /we4099") - IF(CMAKE_SIZEOF_VOID_P MATCHES 8) # _WIN64 is defined by the compiler itself. # Yet, we define it here again to work around a bug with Intellisense diff --git a/cmake/plugin.cmake b/cmake/plugin.cmake index 632c4cfe442..ccd744cee6b 100644 --- a/cmake/plugin.cmake +++ b/cmake/plugin.cmake @@ -211,7 +211,7 @@ MACRO(MYSQL_ADD_PLUGIN) IF(CPACK_COMPONENTS_ALL AND NOT CPACK_COMPONENTS_ALL MATCHES ${ARG_COMPONENT}) SET(CPACK_COMPONENTS_ALL ${CPACK_COMPONENTS_ALL} ${ARG_COMPONENT} PARENT_SCOPE) - SET(CPACK_RPM_${ARG_COMPONENT}_PACKAGE_REQUIRES "MariaDB-server" PARENT_SCOPE) + SET(CPACK_RPM_${ARG_COMPONENT}_PACKAGE_REQUIRES "MariaDB" PARENT_SCOPE) IF (NOT ARG_CONFIG) SET(ARG_CONFIG "${CMAKE_CURRENT_BINARY_DIR}/${target}.cnf") diff --git a/cmake/readline.cmake b/cmake/readline.cmake index ad6a7382e2a..f570c91c1b5 100644 --- a/cmake/readline.cmake +++ b/cmake/readline.cmake @@ -116,24 +116,23 @@ ENDMACRO() MACRO (MYSQL_USE_BUNDLED_READLINE) SET(USE_NEW_READLINE_INTERFACE 1) SET(HAVE_HIST_ENTRY 0 CACHE INTERNAL "" FORCE) - SET(READLINE_INCLUDE_DIR ${CMAKE_SOURCE_DIR}/extra) - SET(READLINE_LIBRARY readline) + SET(MY_READLINE_INCLUDE_DIR ${CMAKE_SOURCE_DIR}/extra/readline) + SET(MY_READLINE_LIBRARY readline) ADD_SUBDIRECTORY(${CMAKE_SOURCE_DIR}/extra/readline) ENDMACRO() MACRO (MYSQL_FIND_SYSTEM_READLINE) - FIND_PATH(READLINE_INCLUDE_DIR readline/readline.h ) + FIND_PATH(READLINE_INCLUDE_DIR readline.h PATH_SUFFIXES readline) FIND_LIBRARY(READLINE_LIBRARY NAMES readline) MARK_AS_ADVANCED(READLINE_INCLUDE_DIR READLINE_LIBRARY) - SET(CMAKE_REQUIRES_LIBRARIES ${READLINE_LIBRARY} ${CURSES_LIBRARY}) - IF(READLINE_LIBRARY AND READLINE_INCLUDE_DIR) SET(CMAKE_REQUIRED_LIBRARIES ${READLINE_LIBRARY} ${CURSES_LIBRARY}) + SET(CMAKE_REQUIRED_INCLUDES ${READLINE_INCLUDE_DIR}) CHECK_CXX_SOURCE_COMPILES(" #include <stdio.h> - #include <readline/readline.h> + #include <readline.h> int main(int argc, char **argv) { rl_completion_func_t *func1= (rl_completion_func_t*)0; @@ -141,19 +140,9 @@ MACRO (MYSQL_FIND_SYSTEM_READLINE) }" NEW_READLINE_INTERFACE) - CHECK_CXX_SOURCE_COMPILES(" - #include <stdio.h> - #include <readline/readline.h> - int main(int argc, char **argv) - { - HIST_ENTRY entry; - return 0; - }" - HAVE_HIST_ENTRY) - CHECK_C_SOURCE_COMPILES(" #include <stdio.h> - #include <readline/readline.h> + #include <readline.h> #if RL_VERSION_MAJOR > 5 #error #endif @@ -176,30 +165,27 @@ MACRO (MYSQL_FIND_SYSTEM_READLINE) ENDIF(READLINE_V5) ENDIF(NEW_READLINE_INTERFACE) ENDIF() - SET(CMAKE_REQUIRES_LIBRARIES ) ENDMACRO() MACRO (MYSQL_FIND_SYSTEM_LIBEDIT) - - FIND_PATH(READLINE_INCLUDE_DIR readline/readline.h ) - FIND_LIBRARY(READLINE_LIBRARY NAMES readline) - MARK_AS_ADVANCED(READLINE_INCLUDE_DIR READLINE_LIBRARY) - - SET(CMAKE_REQUIRES_LIBRARIES ${READLINE_LIBRARY}) + FIND_PATH(LIBEDIT_INCLUDE_DIR readline.h PATH_SUFFIXES editline edit/readline) + FIND_LIBRARY(LIBEDIT_LIBRARY edit) + MARK_AS_ADVANCED(LIBEDIT_INCLUDE_DIR LIBEDIT_LIBRARY) - IF(READLINE_LIBRARY AND READLINE_INCLUDE_DIR) + IF(LIBEDIT_LIBRARY AND LIBEDIT_INCLUDE_DIR) + SET(CMAKE_REQUIRED_LIBRARIES ${LIBEDIT_LIBRARY}) + SET(CMAKE_REQUIRED_INCLUDES ${LIBEDIT_INCLUDE_DIR}) CHECK_CXX_SOURCE_COMPILES(" #include <stdio.h> - #include <readline/readline.h> + #include <readline.h> int main(int argc, char **argv) { - char res= *(*rl_completion_entry_function)(0,0); + int res= (*rl_completion_entry_function)(0,0); completion_matches(0,0); }" LIBEDIT_INTERFACE) SET(USE_LIBEDIT_INTERFACE ${LIBEDIT_INTERFACE}) ENDIF() - SET(CMAKE_REQUIRES_LIBRARIES) ENDMACRO() @@ -216,15 +202,33 @@ MACRO (MYSQL_CHECK_READLINE) IF (NOT APPLE) MYSQL_FIND_SYSTEM_READLINE() ENDIF() - IF(NOT USE_NEW_READLINE_INTERFACE) + IF(USE_NEW_READLINE_INTERFACE) + SET(MY_READLINE_INCLUDE_DIR ${READLINE_INCLUDE_DIR}) + SET(MY_READLINE_LIBRARY ${READLINE_LIBRARY} ${CURSES_LIBRARY}) + ELSE() MYSQL_FIND_SYSTEM_LIBEDIT() - IF(NOT USE_LIBEDIT_INTERFACE) + IF(USE_LIBEDIT_INTERFACE) + SET(MY_READLINE_INCLUDE_DIR ${LIBEDIT_INCLUDE_DIR}) + SET(MY_READLINE_LIBRARY ${LIBEDIT_LIBRARY} ${CURSES_LIBRARY}) + ELSE() MYSQL_USE_BUNDLED_READLINE() ENDIF() ENDIF() ENDIF() - SET(MY_READLINE_INCLUDE_DIR ${READLINE_INCLUDE_DIR}) - SET(MY_READLINE_LIBRARY ${READLINE_LIBRARY} ${CURSES_LIBRARY}) + + SET(CMAKE_REQUIRED_LIBRARIES ${MY_READLINE_LIBRARY}) + SET(CMAKE_REQUIRED_INCLUDES ${MY_READLINE_INCLUDE_DIR}) + CHECK_CXX_SOURCE_COMPILES(" + #include <stdio.h> + #include <readline.h> + int main(int argc, char **argv) + { + HIST_ENTRY entry; + return 0; + }" + HAVE_HIST_ENTRY) + SET(CMAKE_REQUIRED_LIBRARIES) + SET(CMAKE_REQUIRED_INCLUDES) ENDIF(NOT WIN32) ENDMACRO() diff --git a/config.h.cmake b/config.h.cmake index e0e19f37261..8982976a0bd 100644 --- a/config.h.cmake +++ b/config.h.cmake @@ -521,6 +521,11 @@ #endif #define PSAPI_VERSION 1 /* for GetProcessMemoryInfo() */ +/* We don't want the min/max macros */ +#ifdef __WIN__ +#define NOMINMAX +#endif + /* MySQL features */ diff --git a/configure.cmake b/configure.cmake index 6997a3c3478..d8b219f0e01 100644 --- a/configure.cmake +++ b/configure.cmake @@ -962,7 +962,7 @@ IF(CMAKE_COMPILER_IS_GNUCXX) IF(WITH_ATOMIC_OPS STREQUAL "up") SET(MY_ATOMIC_MODE_DUMMY 1 CACHE BOOL "Assume single-CPU mode, no concurrency") ELSEIF(WITH_ATOMIC_OPS STREQUAL "rwlocks") - SET(MY_ATOMIC_MODE_RWLOCK 1 CACHE BOOL "Use pthread rwlocks for atomic ops") + SET(MY_ATOMIC_MODE_RWLOCKS 1 CACHE BOOL "Use pthread rwlocks for atomic ops") ELSEIF(WITH_ATOMIC_OPS STREQUAL "smp") ELSEIF(NOT WITH_ATOMIC_OPS) CHECK_CXX_SOURCE_COMPILES(" @@ -994,12 +994,12 @@ ELSE() ENDIF() ENDIF() -SET(WITH_ATOMIC_LOCKS "${WITH_ATOMIC_LOCKS}" CACHE STRING -"Implement atomic operations using pthread rwlocks or atomic CPU -instructions for multi-processor or uniprocessor +SET(WITH_ATOMIC_OPS "${WITH_ATOMIC_OPS}" CACHE STRING + "Implement atomic operations using pthread rwlocks (rwlocks); or atomic CPU +instructions for multi-processor (smp) or uniprocessor (up) configuration. By default gcc built-in sync functions are used, if available and 'smp' configuration otherwise.") -MARK_AS_ADVANCED(WITH_ATOMIC_LOCKS MY_ATOMIC_MODE_RWLOCK MY_ATOMIC_MODE_DUMMY) +MARK_AS_ADVANCED(WITH_ATOMIC_OPS MY_ATOMIC_MODE_RWLOCK MY_ATOMIC_MODE_DUMMY) IF(WITH_VALGRIND) SET(HAVE_valgrind 1) diff --git a/dbug/CMakeLists.txt b/dbug/CMakeLists.txt index 3d0b0801132..c40c70b684d 100644 --- a/dbug/CMakeLists.txt +++ b/dbug/CMakeLists.txt @@ -24,8 +24,10 @@ TARGET_LINK_LIBRARIES(dbug mysys) ADD_EXECUTABLE(tests tests.c) TARGET_LINK_LIBRARIES(tests dbug) -ADD_EXECUTABLE(factorial my_main.c factorial.c) -TARGET_LINK_LIBRARIES(factorial dbug) +IF(NOT CMAKE_CROSSCOMPILING) + ADD_EXECUTABLE(factorial my_main.c factorial.c) + TARGET_LINK_LIBRARIES(factorial dbug) +ENDIF() IF(NOT WIN32 AND NOT CMAKE_GENERATOR MATCHES Xcode) FIND_PROGRAM(GROFF groff) @@ -36,11 +38,11 @@ IF(NOT WIN32 AND NOT CMAKE_GENERATOR MATCHES Xcode) SET(SOURCE_INC factorial.r main.r example1.r example2.r example3.r) ADD_CUSTOM_COMMAND(OUTPUT ${OUTPUT_INC} DEPENDS factorial - COMMAND ./factorial 1 2 3 4 5 > output1.r - COMMAND ./factorial -\#t:o 2 3 > output2.r - COMMAND ./factorial -\#d:t:o 3 > output3.r - COMMAND ./factorial -\#d,result:o 4 > output4.r - COMMAND ./factorial -\#d:f,factorial:F:L:o 3 > output5.r) + COMMAND factorial 1 2 3 4 5 > output1.r + COMMAND factorial -\#t:o 2 3 > output2.r + COMMAND factorial -\#d:t:o 3 > output3.r + COMMAND factorial -\#d,result:o 4 > output4.r + COMMAND factorial -\#d:f,factorial:F:L:o 3 > output5.r) FOREACH(file ${SOURCE_INC}) STRING(REGEX REPLACE "\\.r" ".c" srcfile ${file}) ADD_CUSTOM_COMMAND(OUTPUT ${file} DEPENDS ${srcfile} diff --git a/debian/dist/Debian/control b/debian/dist/Debian/control index 4468917ca34..e68f0277076 100644 --- a/debian/dist/Debian/control +++ b/debian/dist/Debian/control @@ -4,7 +4,7 @@ Priority: optional Maintainer: MariaDB Developers <maria-developers@lists.launchpad.net> XSBC-Original-Maintainer: Maria Developers <maria-developers@lists.launchpad.net> Uploaders: MariaDB Developers <maria-developers@lists.launchpad.net> -Build-Depends: libtool (>= 1.4.2-7), procps | hurd, debhelper, file (>= 3.28), libncurses5-dev (>= 5.0-6), perl (>= 5.6.0), libwrap0-dev (>= 7.6-8.3), zlib1g-dev (>= 1:1.1.3-5), ${LIBREADLINE_DEV}, libssl-dev, libpam0g-dev, psmisc, po-debconf, chrpath, automake1.9, doxygen, texlive-latex-base, ghostscript | gs-gpl, dpatch, gawk, bison, lsb-release, hardening-wrapper, ${CMAKE_DEP}libaio-dev +Build-Depends: libtool (>= 1.4.2-7), procps | hurd, debhelper, file (>= 3.28), libncurses5-dev (>= 5.0-6), perl (>= 5.6.0), libwrap0-dev (>= 7.6-8.3), zlib1g-dev (>= 1:1.1.3-5), ${LIBREADLINE_DEV}, libssl-dev, libpam0g-dev, psmisc, po-debconf, chrpath, automake1.9, doxygen, texlive-latex-base, ghostscript | gs-gpl, dpatch, gawk, bison, lsb-release, hardening-wrapper, ${CMAKE_DEP}libaio-dev, libjemalloc-dev (>= 3.0.0) Standards-Version: 3.8.3 Homepage: http://mariadb.org/ Vcs-Browser: https://github.com/MariaDB/server/tree/10.1/ diff --git a/debian/dist/Ubuntu/control b/debian/dist/Ubuntu/control index 698b81eae00..85a46e8a47f 100644 --- a/debian/dist/Ubuntu/control +++ b/debian/dist/Ubuntu/control @@ -4,7 +4,7 @@ Priority: optional Maintainer: MariaDB Developers <maria-developers@lists.launchpad.net> XSBC-Original-Maintainer: Maria Developers <maria-developers@lists.launchpad.net> Uploaders: MariaDB Developers <maria-developers@lists.launchpad.net> -Build-Depends: libtool (>= 1.4.2-7), procps | hurd, debhelper, file (>= 3.28), libncurses5-dev (>= 5.0-6), perl (>= 5.6.0), libwrap0-dev (>= 7.6-8.3), zlib1g-dev (>= 1:1.1.3-5), ${LIBREADLINE_DEV}, libssl-dev, libpam0g-dev, psmisc, po-debconf, chrpath, automake1.9, doxygen, texlive-latex-base, ghostscript | gs-gpl, dpatch, gawk, bison, lsb-release, hardening-wrapper, ${CMAKE_DEP}libaio-dev +Build-Depends: libtool (>= 1.4.2-7), procps | hurd, debhelper, file (>= 3.28), libncurses5-dev (>= 5.0-6), perl (>= 5.6.0), libwrap0-dev (>= 7.6-8.3), zlib1g-dev (>= 1:1.1.3-5), ${LIBREADLINE_DEV}, libssl-dev, libpam0g-dev, psmisc, po-debconf, chrpath, automake1.9, doxygen, texlive-latex-base, ghostscript | gs-gpl, dpatch, gawk, bison, lsb-release, hardening-wrapper, ${CMAKE_DEP}libaio-dev, libjemalloc-dev (>= 3.0.0) Standards-Version: 3.8.2 Homepage: http://mariadb.org/ Vcs-Browser: http://bazaar.launchpad.net/~maria-captains/maria/10.1/files diff --git a/extra/jemalloc/COPYING b/extra/jemalloc/COPYING deleted file mode 100644 index 019e8132275..00000000000 --- a/extra/jemalloc/COPYING +++ /dev/null @@ -1,27 +0,0 @@ -Unless otherwise specified, files in the jemalloc source distribution are -subject to the following license: --------------------------------------------------------------------------------- -Copyright (C) 2002-2013 Jason Evans <jasone@canonware.com>. -All rights reserved. -Copyright (C) 2007-2012 Mozilla Foundation. All rights reserved. -Copyright (C) 2009-2013 Facebook, Inc. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: -1. Redistributions of source code must retain the above copyright notice(s), - this list of conditions and the following disclaimer. -2. Redistributions in binary form must reproduce the above copyright notice(s), - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY EXPRESS -OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF -MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO -EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY DIRECT, INDIRECT, -INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF -LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE -OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF -ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. --------------------------------------------------------------------------------- diff --git a/extra/jemalloc/ChangeLog b/extra/jemalloc/ChangeLog deleted file mode 100644 index 4f03fc141cb..00000000000 --- a/extra/jemalloc/ChangeLog +++ /dev/null @@ -1,434 +0,0 @@ -Following are change highlights associated with official releases. Important -bug fixes are all mentioned, but internal enhancements are omitted here for -brevity (even though they are more fun to write about). Much more detail can be -found in the git revision history: - - http://www.canonware.com/cgi-bin/gitweb.cgi?p=jemalloc.git - git://canonware.com/jemalloc.git - -* 3.3.1a (December 27, 2013) - - Bug fixes from 3.4.1 - - Fix Valgrind integration flaws that caused Valgrind warnings about reads of - uninitialized memory in: - + arena chunk headers - + internal zero-initialized data structures (relevant to tcache and prof - code) - -* 3.3.1 (March 6, 2013) - - This version fixes bugs that are typically encountered only when utilizing - custom run-time options. - - Bug fixes: - - Fix a locking order bug that could cause deadlock during fork if heap - profiling were enabled. - - Fix a chunk recycling bug that could cause the allocator to lose track of - whether a chunk was zeroed. On FreeBSD, NetBSD, and OS X, it could cause - corruption if allocating via sbrk(2) (unlikely unless running with the - "dss:primary" option specified). This was completely harmless on Linux - unless using mlockall(2) (and unlikely even then, unless the - --disable-munmap configure option or the "dss:primary" option was - specified). This regression was introduced in 3.1.0 by the - mlockall(2)/madvise(2) interaction fix. - - Fix TLS-related memory corruption that could occur during thread exit if the - thread never allocated memory. Only the quarantine and prof facilities were - susceptible. - - Fix two quarantine bugs: - + Internal reallocation of the quarantined object array leaked the old - array. - + Reallocation failure for internal reallocation of the quarantined object - array (very unlikely) resulted in memory corruption. - - Fix Valgrind integration to annotate all internally allocated memory in a - way that keeps Valgrind happy about internal data structure access. - - Fix building for s390 systems. - -* 3.3.0 (January 23, 2013) - - This version includes a few minor performance improvements in addition to the - listed new features and bug fixes. - - New features: - - Add clipping support to lg_chunk option processing. - - Add the --enable-ivsalloc option. - - Add the --without-export option. - - Add the --disable-zone-allocator option. - - Bug fixes: - - Fix "arenas.extend" mallctl to output the number of arenas. - - Fix chunk_recycyle() to unconditionally inform Valgrind that returned memory - is undefined. - - Fix build break on FreeBSD related to alloca.h. - -* 3.2.0 (November 9, 2012) - - In addition to a couple of bug fixes, this version modifies page run - allocation and dirty page purging algorithms in order to better control - page-level virtual memory fragmentation. - - Incompatible changes: - - Change the "opt.lg_dirty_mult" default from 5 to 3 (32:1 to 8:1). - - Bug fixes: - - Fix dss/mmap allocation precedence code to use recyclable mmap memory only - after primary dss allocation fails. - - Fix deadlock in the "arenas.purge" mallctl. This regression was introduced - in 3.1.0 by the addition of the "arena.<i>.purge" mallctl. - -* 3.1.0 (October 16, 2012) - - New features: - - Auto-detect whether running inside Valgrind, thus removing the need to - manually specify MALLOC_CONF=valgrind:true. - - Add the "arenas.extend" mallctl, which allows applications to create - manually managed arenas. - - Add the ALLOCM_ARENA() flag for {,r,d}allocm(). - - Add the "opt.dss", "arena.<i>.dss", and "stats.arenas.<i>.dss" mallctls, - which provide control over dss/mmap precedence. - - Add the "arena.<i>.purge" mallctl, which obsoletes "arenas.purge". - - Define LG_QUANTUM for hppa. - - Incompatible changes: - - Disable tcache by default if running inside Valgrind, in order to avoid - making unallocated objects appear reachable to Valgrind. - - Drop const from malloc_usable_size() argument on Linux. - - Bug fixes: - - Fix heap profiling crash if sampled object is freed via realloc(p, 0). - - Remove const from __*_hook variable declarations, so that glibc can modify - them during process forking. - - Fix mlockall(2)/madvise(2) interaction. - - Fix fork(2)-related deadlocks. - - Fix error return value for "thread.tcache.enabled" mallctl. - -* 3.0.0 (May 11, 2012) - - Although this version adds some major new features, the primary focus is on - internal code cleanup that facilitates maintainability and portability, most - of which is not reflected in the ChangeLog. This is the first release to - incorporate substantial contributions from numerous other developers, and the - result is a more broadly useful allocator (see the git revision history for - contribution details). Note that the license has been unified, thanks to - Facebook granting a license under the same terms as the other copyright - holders (see COPYING). - - New features: - - Implement Valgrind support, redzones, and quarantine. - - Add support for additional platforms: - + FreeBSD - + Mac OS X Lion - + MinGW - + Windows (no support yet for replacing the system malloc) - - Add support for additional architectures: - + MIPS - + SH4 - + Tilera - - Add support for cross compiling. - - Add nallocm(), which rounds a request size up to the nearest size class - without actually allocating. - - Implement aligned_alloc() (blame C11). - - Add the "thread.tcache.enabled" mallctl. - - Add the "opt.prof_final" mallctl. - - Update pprof (from gperftools 2.0). - - Add the --with-mangling option. - - Add the --disable-experimental option. - - Add the --disable-munmap option, and make it the default on Linux. - - Add the --enable-mremap option, which disables use of mremap(2) by default. - - Incompatible changes: - - Enable stats by default. - - Enable fill by default. - - Disable lazy locking by default. - - Rename the "tcache.flush" mallctl to "thread.tcache.flush". - - Rename the "arenas.pagesize" mallctl to "arenas.page". - - Change the "opt.lg_prof_sample" default from 0 to 19 (1 B to 512 KiB). - - Change the "opt.prof_accum" default from true to false. - - Removed features: - - Remove the swap feature, including the "config.swap", "swap.avail", - "swap.prezeroed", "swap.nfds", and "swap.fds" mallctls. - - Remove highruns statistics, including the - "stats.arenas.<i>.bins.<j>.highruns" and - "stats.arenas.<i>.lruns.<j>.highruns" mallctls. - - As part of small size class refactoring, remove the "opt.lg_[qc]space_max", - "arenas.cacheline", "arenas.subpage", "arenas.[tqcs]space_{min,max}", and - "arenas.[tqcs]bins" mallctls. - - Remove the "arenas.chunksize" mallctl. - - Remove the "opt.lg_prof_tcmax" option. - - Remove the "opt.lg_prof_bt_max" option. - - Remove the "opt.lg_tcache_gc_sweep" option. - - Remove the --disable-tiny option, including the "config.tiny" mallctl. - - Remove the --enable-dynamic-page-shift configure option. - - Remove the --enable-sysv configure option. - - Bug fixes: - - Fix a statistics-related bug in the "thread.arena" mallctl that could cause - invalid statistics and crashes. - - Work around TLS deallocation via free() on Linux. This bug could cause - write-after-free memory corruption. - - Fix a potential deadlock that could occur during interval- and - growth-triggered heap profile dumps. - - Fix large calloc() zeroing bugs due to dropping chunk map unzeroed flags. - - Fix chunk_alloc_dss() to stop claiming memory is zeroed. This bug could - cause memory corruption and crashes with --enable-dss specified. - - Fix fork-related bugs that could cause deadlock in children between fork - and exec. - - Fix malloc_stats_print() to honor 'b' and 'l' in the opts parameter. - - Fix realloc(p, 0) to act like free(p). - - Do not enforce minimum alignment in memalign(). - - Check for NULL pointer in malloc_usable_size(). - - Fix an off-by-one heap profile statistics bug that could be observed in - interval- and growth-triggered heap profiles. - - Fix the "epoch" mallctl to update cached stats even if the passed in epoch - is 0. - - Fix bin->runcur management to fix a layout policy bug. This bug did not - affect correctness. - - Fix a bug in choose_arena_hard() that potentially caused more arenas to be - initialized than necessary. - - Add missing "opt.lg_tcache_max" mallctl implementation. - - Use glibc allocator hooks to make mixed allocator usage less likely. - - Fix build issues for --disable-tcache. - - Don't mangle pthread_create() when --with-private-namespace is specified. - -* 2.2.5 (November 14, 2011) - - Bug fixes: - - Fix huge_ralloc() race when using mremap(2). This is a serious bug that - could cause memory corruption and/or crashes. - - Fix huge_ralloc() to maintain chunk statistics. - - Fix malloc_stats_print(..., "a") output. - -* 2.2.4 (November 5, 2011) - - Bug fixes: - - Initialize arenas_tsd before using it. This bug existed for 2.2.[0-3], as - well as for --disable-tls builds in earlier releases. - - Do not assume a 4 KiB page size in test/rallocm.c. - -* 2.2.3 (August 31, 2011) - - This version fixes numerous bugs related to heap profiling. - - Bug fixes: - - Fix a prof-related race condition. This bug could cause memory corruption, - but only occurred in non-default configurations (prof_accum:false). - - Fix off-by-one backtracing issues (make sure that prof_alloc_prep() is - excluded from backtraces). - - Fix a prof-related bug in realloc() (only triggered by OOM errors). - - Fix prof-related bugs in allocm() and rallocm(). - - Fix prof_tdata_cleanup() for --disable-tls builds. - - Fix a relative include path, to fix objdir builds. - -* 2.2.2 (July 30, 2011) - - Bug fixes: - - Fix a build error for --disable-tcache. - - Fix assertions in arena_purge() (for real this time). - - Add the --with-private-namespace option. This is a workaround for symbol - conflicts that can inadvertently arise when using static libraries. - -* 2.2.1 (March 30, 2011) - - Bug fixes: - - Implement atomic operations for x86/x64. This fixes compilation failures - for versions of gcc that are still in wide use. - - Fix an assertion in arena_purge(). - -* 2.2.0 (March 22, 2011) - - This version incorporates several improvements to algorithms and data - structures that tend to reduce fragmentation and increase speed. - - New features: - - Add the "stats.cactive" mallctl. - - Update pprof (from google-perftools 1.7). - - Improve backtracing-related configuration logic, and add the - --disable-prof-libgcc option. - - Bug fixes: - - Change default symbol visibility from "internal", to "hidden", which - decreases the overhead of library-internal function calls. - - Fix symbol visibility so that it is also set on OS X. - - Fix a build dependency regression caused by the introduction of the .pic.o - suffix for PIC object files. - - Add missing checks for mutex initialization failures. - - Don't use libgcc-based backtracing except on x64, where it is known to work. - - Fix deadlocks on OS X that were due to memory allocation in - pthread_mutex_lock(). - - Heap profiling-specific fixes: - + Fix memory corruption due to integer overflow in small region index - computation, when using a small enough sample interval that profiling - context pointers are stored in small run headers. - + Fix a bootstrap ordering bug that only occurred with TLS disabled. - + Fix a rallocm() rsize bug. - + Fix error detection bugs for aligned memory allocation. - -* 2.1.3 (March 14, 2011) - - Bug fixes: - - Fix a cpp logic regression (due to the "thread.{de,}allocatedp" mallctl fix - for OS X in 2.1.2). - - Fix a "thread.arena" mallctl bug. - - Fix a thread cache stats merging bug. - -* 2.1.2 (March 2, 2011) - - Bug fixes: - - Fix "thread.{de,}allocatedp" mallctl for OS X. - - Add missing jemalloc.a to build system. - -* 2.1.1 (January 31, 2011) - - Bug fixes: - - Fix aligned huge reallocation (affected allocm()). - - Fix the ALLOCM_LG_ALIGN macro definition. - - Fix a heap dumping deadlock. - - Fix a "thread.arena" mallctl bug. - -* 2.1.0 (December 3, 2010) - - This version incorporates some optimizations that can't quite be considered - bug fixes. - - New features: - - Use Linux's mremap(2) for huge object reallocation when possible. - - Avoid locking in mallctl*() when possible. - - Add the "thread.[de]allocatedp" mallctl's. - - Convert the manual page source from roff to DocBook, and generate both roff - and HTML manuals. - - Bug fixes: - - Fix a crash due to incorrect bootstrap ordering. This only impacted - --enable-debug --enable-dss configurations. - - Fix a minor statistics bug for mallctl("swap.avail", ...). - -* 2.0.1 (October 29, 2010) - - Bug fixes: - - Fix a race condition in heap profiling that could cause undefined behavior - if "opt.prof_accum" were disabled. - - Add missing mutex unlocks for some OOM error paths in the heap profiling - code. - - Fix a compilation error for non-C99 builds. - -* 2.0.0 (October 24, 2010) - - This version focuses on the experimental *allocm() API, and on improved - run-time configuration/introspection. Nonetheless, numerous performance - improvements are also included. - - New features: - - Implement the experimental {,r,s,d}allocm() API, which provides a superset - of the functionality available via malloc(), calloc(), posix_memalign(), - realloc(), malloc_usable_size(), and free(). These functions can be used to - allocate/reallocate aligned zeroed memory, ask for optional extra memory - during reallocation, prevent object movement during reallocation, etc. - - Replace JEMALLOC_OPTIONS/JEMALLOC_PROF_PREFIX with MALLOC_CONF, which is - more human-readable, and more flexible. For example: - JEMALLOC_OPTIONS=AJP - is now: - MALLOC_CONF=abort:true,fill:true,stats_print:true - - Port to Apple OS X. Sponsored by Mozilla. - - Make it possible for the application to control thread-->arena mappings via - the "thread.arena" mallctl. - - Add compile-time support for all TLS-related functionality via pthreads TSD. - This is mainly of interest for OS X, which does not support TLS, but has a - TSD implementation with similar performance. - - Override memalign() and valloc() if they are provided by the system. - - Add the "arenas.purge" mallctl, which can be used to synchronously purge all - dirty unused pages. - - Make cumulative heap profiling data optional, so that it is possible to - limit the amount of memory consumed by heap profiling data structures. - - Add per thread allocation counters that can be accessed via the - "thread.allocated" and "thread.deallocated" mallctls. - - Incompatible changes: - - Remove JEMALLOC_OPTIONS and malloc_options (see MALLOC_CONF above). - - Increase default backtrace depth from 4 to 128 for heap profiling. - - Disable interval-based profile dumps by default. - - Bug fixes: - - Remove bad assertions in fork handler functions. These assertions could - cause aborts for some combinations of configure settings. - - Fix strerror_r() usage to deal with non-standard semantics in GNU libc. - - Fix leak context reporting. This bug tended to cause the number of contexts - to be underreported (though the reported number of objects and bytes were - correct). - - Fix a realloc() bug for large in-place growing reallocation. This bug could - cause memory corruption, but it was hard to trigger. - - Fix an allocation bug for small allocations that could be triggered if - multiple threads raced to create a new run of backing pages. - - Enhance the heap profiler to trigger samples based on usable size, rather - than request size. - - Fix a heap profiling bug due to sometimes losing track of requested object - size for sampled objects. - -* 1.0.3 (August 12, 2010) - - Bug fixes: - - Fix the libunwind-based implementation of stack backtracing (used for heap - profiling). This bug could cause zero-length backtraces to be reported. - - Add a missing mutex unlock in library initialization code. If multiple - threads raced to initialize malloc, some of them could end up permanently - blocked. - -* 1.0.2 (May 11, 2010) - - Bug fixes: - - Fix junk filling of large objects, which could cause memory corruption. - - Add MAP_NORESERVE support for chunk mapping, because otherwise virtual - memory limits could cause swap file configuration to fail. Contributed by - Jordan DeLong. - -* 1.0.1 (April 14, 2010) - - Bug fixes: - - Fix compilation when --enable-fill is specified. - - Fix threads-related profiling bugs that affected accuracy and caused memory - to be leaked during thread exit. - - Fix dirty page purging race conditions that could cause crashes. - - Fix crash in tcache flushing code during thread destruction. - -* 1.0.0 (April 11, 2010) - - This release focuses on speed and run-time introspection. Numerous - algorithmic improvements make this release substantially faster than its - predecessors. - - New features: - - Implement autoconf-based configuration system. - - Add mallctl*(), for the purposes of introspection and run-time - configuration. - - Make it possible for the application to manually flush a thread's cache, via - the "tcache.flush" mallctl. - - Base maximum dirty page count on proportion of active memory. - - Compute various addtional run-time statistics, including per size class - statistics for large objects. - - Expose malloc_stats_print(), which can be called repeatedly by the - application. - - Simplify the malloc_message() signature to only take one string argument, - and incorporate an opaque data pointer argument for use by the application - in combination with malloc_stats_print(). - - Add support for allocation backed by one or more swap files, and allow the - application to disable over-commit if swap files are in use. - - Implement allocation profiling and leak checking. - - Removed features: - - Remove the dynamic arena rebalancing code, since thread-specific caching - reduces its utility. - - Bug fixes: - - Modify chunk allocation to work when address space layout randomization - (ASLR) is in use. - - Fix thread cleanup bugs related to TLS destruction. - - Handle 0-size allocation requests in posix_memalign(). - - Fix a chunk leak. The leaked chunks were never touched, so this impacted - virtual memory usage, but not physical memory usage. - -* linux_2008082[78]a (August 27/28, 2008) - - These snapshot releases are the simple result of incorporating Linux-specific - support into the FreeBSD malloc sources. - --------------------------------------------------------------------------------- -vim:filetype=text:textwidth=80 diff --git a/extra/jemalloc/INSTALL b/extra/jemalloc/INSTALL deleted file mode 100644 index 6e371ce5095..00000000000 --- a/extra/jemalloc/INSTALL +++ /dev/null @@ -1,293 +0,0 @@ -Building and installing jemalloc can be as simple as typing the following while -in the root directory of the source tree: - - ./configure - make - make install - -=== Advanced configuration ===================================================== - -The 'configure' script supports numerous options that allow control of which -functionality is enabled, where jemalloc is installed, etc. Optionally, pass -any of the following arguments (not a definitive list) to 'configure': - ---help - Print a definitive list of options. - ---prefix=<install-root-dir> - Set the base directory in which to install. For example: - - ./configure --prefix=/usr/local - - will cause files to be installed into /usr/local/include, /usr/local/lib, - and /usr/local/man. - ---with-rpath=<colon-separated-rpath> - Embed one or more library paths, so that libjemalloc can find the libraries - it is linked to. This works only on ELF-based systems. - ---with-mangling=<map> - Mangle public symbols specified in <map> which is a comma-separated list of - name:mangled pairs. - - For example, to use ld's --wrap option as an alternative method for - overriding libc's malloc implementation, specify something like: - - --with-mangling=malloc:__wrap_malloc,free:__wrap_free[...] - - Note that mangling happens prior to application of the prefix specified by - --with-jemalloc-prefix, and mangled symbols are then ignored when applying - the prefix. - ---with-jemalloc-prefix=<prefix> - Prefix all public APIs with <prefix>. For example, if <prefix> is - "prefix_", API changes like the following occur: - - malloc() --> prefix_malloc() - malloc_conf --> prefix_malloc_conf - /etc/malloc.conf --> /etc/prefix_malloc.conf - MALLOC_CONF --> PREFIX_MALLOC_CONF - - This makes it possible to use jemalloc at the same time as the system - allocator, or even to use multiple copies of jemalloc simultaneously. - - By default, the prefix is "", except on OS X, where it is "je_". On OS X, - jemalloc overlays the default malloc zone, but makes no attempt to actually - replace the "malloc", "calloc", etc. symbols. - ---without-export - Don't export public APIs. This can be useful when building jemalloc as a - static library, or to avoid exporting public APIs when using the zone - allocator on OSX. - ---with-private-namespace=<prefix> - Prefix all library-private APIs with <prefix>. For shared libraries, - symbol visibility mechanisms prevent these symbols from being exported, but - for static libraries, naming collisions are a real possibility. By - default, the prefix is "" (empty string). - ---with-install-suffix=<suffix> - Append <suffix> to the base name of all installed files, such that multiple - versions of jemalloc can coexist in the same installation directory. For - example, libjemalloc.so.0 becomes libjemalloc<suffix>.so.0. - ---enable-cc-silence - Enable code that silences non-useful compiler warnings. This is helpful - when trying to tell serious warnings from those due to compiler - limitations, but it potentially incurs a performance penalty. - ---enable-debug - Enable assertions and validation code. This incurs a substantial - performance hit, but is very useful during application development. - Implies --enable-ivsalloc. - ---enable-ivsalloc - Enable validation code, which verifies that pointers reside within - jemalloc-owned chunks before dereferencing them. This incurs a substantial - performance hit. - ---disable-stats - Disable statistics gathering functionality. See the "opt.stats_print" - option documentation for usage details. - ---enable-prof - Enable heap profiling and leak detection functionality. See the "opt.prof" - option documentation for usage details. When enabled, there are several - approaches to backtracing, and the configure script chooses the first one - in the following list that appears to function correctly: - - + libunwind (requires --enable-prof-libunwind) - + libgcc (unless --disable-prof-libgcc) - + gcc intrinsics (unless --disable-prof-gcc) - ---enable-prof-libunwind - Use the libunwind library (http://www.nongnu.org/libunwind/) for stack - backtracing. - ---disable-prof-libgcc - Disable the use of libgcc's backtracing functionality. - ---disable-prof-gcc - Disable the use of gcc intrinsics for backtracing. - ---with-static-libunwind=<libunwind.a> - Statically link against the specified libunwind.a rather than dynamically - linking with -lunwind. - ---disable-tcache - Disable thread-specific caches for small objects. Objects are cached and - released in bulk, thus reducing the total number of mutex operations. See - the "opt.tcache" option for usage details. - ---enable-mremap - Enable huge realloc() via mremap(2). mremap() is disabled by default - because the flavor used is specific to Linux, which has a quirk in its - virtual memory allocation algorithm that causes semi-permanent VM map holes - under normal jemalloc operation. - ---disable-munmap - Disable virtual memory deallocation via munmap(2); instead keep track of - the virtual memory for later use. munmap() is disabled by default (i.e. - --disable-munmap is implied) on Linux, which has a quirk in its virtual - memory allocation algorithm that causes semi-permanent VM map holes under - normal jemalloc operation. - ---enable-dss - Enable support for page allocation/deallocation via sbrk(2), in addition to - mmap(2). - ---disable-fill - Disable support for junk/zero filling of memory, quarantine, and redzones. - See the "opt.junk", "opt.zero", "opt.quarantine", and "opt.redzone" option - documentation for usage details. - ---disable-valgrind - Disable support for Valgrind. - ---disable-experimental - Disable support for the experimental API (*allocm()). - ---disable-zone-allocator - Disable zone allocator for Darwin. This means jemalloc won't be hooked as - the default allocator on OSX/iOS. - ---enable-utrace - Enable utrace(2)-based allocation tracing. This feature is not broadly - portable (FreeBSD has it, but Linux and OS X do not). - ---enable-xmalloc - Enable support for optional immediate termination due to out-of-memory - errors, as is commonly implemented by "xmalloc" wrapper function for malloc. - See the "opt.xmalloc" option documentation for usage details. - ---enable-lazy-lock - Enable code that wraps pthread_create() to detect when an application - switches from single-threaded to multi-threaded mode, so that it can avoid - mutex locking/unlocking operations while in single-threaded mode. In - practice, this feature usually has little impact on performance unless - thread-specific caching is disabled. - ---disable-tls - Disable thread-local storage (TLS), which allows for fast access to - thread-local variables via the __thread keyword. If TLS is available, - jemalloc uses it for several purposes. - ---with-xslroot=<path> - Specify where to find DocBook XSL stylesheets when building the - documentation. - -The following environment variables (not a definitive list) impact configure's -behavior: - -CFLAGS="?" - Pass these flags to the compiler. You probably shouldn't define this unless - you know what you are doing. (Use EXTRA_CFLAGS instead.) - -EXTRA_CFLAGS="?" - Append these flags to CFLAGS. This makes it possible to add flags such as - -Werror, while allowing the configure script to determine what other flags - are appropriate for the specified configuration. - - The configure script specifically checks whether an optimization flag (-O*) - is specified in EXTRA_CFLAGS, and refrains from specifying an optimization - level if it finds that one has already been specified. - -CPPFLAGS="?" - Pass these flags to the C preprocessor. Note that CFLAGS is not passed to - 'cpp' when 'configure' is looking for include files, so you must use - CPPFLAGS instead if you need to help 'configure' find header files. - -LD_LIBRARY_PATH="?" - 'ld' uses this colon-separated list to find libraries. - -LDFLAGS="?" - Pass these flags when linking. - -PATH="?" - 'configure' uses this to find programs. - -=== Advanced compilation ======================================================= - -To build only parts of jemalloc, use the following targets: - - build_lib_shared - build_lib_static - build_lib - build_doc_html - build_doc_man - build_doc - -To install only parts of jemalloc, use the following targets: - - install_bin - install_include - install_lib_shared - install_lib_static - install_lib - install_doc_html - install_doc_man - install_doc - -To clean up build results to varying degrees, use the following make targets: - - clean - distclean - relclean - -=== Advanced installation ====================================================== - -Optionally, define make variables when invoking make, including (not -exclusively): - -INCLUDEDIR="?" - Use this as the installation prefix for header files. - -LIBDIR="?" - Use this as the installation prefix for libraries. - -MANDIR="?" - Use this as the installation prefix for man pages. - -DESTDIR="?" - Prepend DESTDIR to INCLUDEDIR, LIBDIR, DATADIR, and MANDIR. This is useful - when installing to a different path than was specified via --prefix. - -CC="?" - Use this to invoke the C compiler. - -CFLAGS="?" - Pass these flags to the compiler. - -CPPFLAGS="?" - Pass these flags to the C preprocessor. - -LDFLAGS="?" - Pass these flags when linking. - -PATH="?" - Use this to search for programs used during configuration and building. - -=== Development ================================================================ - -If you intend to make non-trivial changes to jemalloc, use the 'autogen.sh' -script rather than 'configure'. This re-generates 'configure', enables -configuration dependency rules, and enables re-generation of automatically -generated source files. - -The build system supports using an object directory separate from the source -tree. For example, you can create an 'obj' directory, and from within that -directory, issue configuration and build commands: - - autoconf - mkdir obj - cd obj - ../configure --enable-autogen - make - -=== Documentation ============================================================== - -The manual page is generated in both html and roff formats. Any web browser -can be used to view the html manual. The roff manual page can be formatted -prior to installation via the following command: - - nroff -man -t doc/jemalloc.3 diff --git a/extra/jemalloc/Makefile.in b/extra/jemalloc/Makefile.in deleted file mode 100644 index 74810472d11..00000000000 --- a/extra/jemalloc/Makefile.in +++ /dev/null @@ -1,324 +0,0 @@ -# Clear out all vpaths, then set just one (default vpath) for the main build -# directory. -vpath -vpath % . - -# Clear the default suffixes, so that built-in rules are not used. -.SUFFIXES : - -SHELL := /bin/sh - -CC := @CC@ - -# Configuration parameters. -DESTDIR = -BINDIR := $(DESTDIR)@BINDIR@ -INCLUDEDIR := $(DESTDIR)@INCLUDEDIR@ -LIBDIR := $(DESTDIR)@LIBDIR@ -DATADIR := $(DESTDIR)@DATADIR@ -MANDIR := $(DESTDIR)@MANDIR@ -srcroot := @srcroot@ -objroot := @objroot@ -abs_srcroot := @abs_srcroot@ -abs_objroot := @abs_objroot@ - -# Build parameters. -CPPFLAGS := @CPPFLAGS@ -I$(srcroot)include -I$(objroot)include -CFLAGS := @CFLAGS@ -LDFLAGS := @LDFLAGS@ -EXTRA_LDFLAGS := @EXTRA_LDFLAGS@ -LIBS := @LIBS@ -RPATH_EXTRA := @RPATH_EXTRA@ -SO := @so@ -IMPORTLIB := @importlib@ -O := @o@ -A := @a@ -EXE := @exe@ -LIBPREFIX := @libprefix@ -REV := @rev@ -install_suffix := @install_suffix@ -ABI := @abi@ -XSLTPROC := @XSLTPROC@ -AUTOCONF := @AUTOCONF@ -_RPATH = @RPATH@ -RPATH = $(if $(1),$(call _RPATH,$(1))) -cfghdrs_in := @cfghdrs_in@ -cfghdrs_out := @cfghdrs_out@ -cfgoutputs_in := @cfgoutputs_in@ -cfgoutputs_out := @cfgoutputs_out@ -enable_autogen := @enable_autogen@ -enable_experimental := @enable_experimental@ -enable_zone_allocator := @enable_zone_allocator@ -DSO_LDFLAGS = @DSO_LDFLAGS@ -SOREV = @SOREV@ -PIC_CFLAGS = @PIC_CFLAGS@ -CTARGET = @CTARGET@ -LDTARGET = @LDTARGET@ -MKLIB = @MKLIB@ -CC_MM = @CC_MM@ - -ifeq (macho, $(ABI)) -TEST_LIBRARY_PATH := DYLD_FALLBACK_LIBRARY_PATH="$(objroot)lib" -else -ifeq (pecoff, $(ABI)) -TEST_LIBRARY_PATH := PATH="$(PATH):$(objroot)lib" -else -TEST_LIBRARY_PATH := -endif -endif - -LIBJEMALLOC := $(LIBPREFIX)jemalloc$(install_suffix) - -# Lists of files. -BINS := $(srcroot)bin/pprof $(objroot)bin/jemalloc.sh -CHDRS := $(objroot)include/jemalloc/jemalloc$(install_suffix).h \ - $(objroot)include/jemalloc/jemalloc_defs$(install_suffix).h -CSRCS := $(srcroot)src/jemalloc.c $(srcroot)src/arena.c $(srcroot)src/atomic.c \ - $(srcroot)src/base.c $(srcroot)src/bitmap.c $(srcroot)src/chunk.c \ - $(srcroot)src/chunk_dss.c $(srcroot)src/chunk_mmap.c \ - $(srcroot)src/ckh.c $(srcroot)src/ctl.c $(srcroot)src/extent.c \ - $(srcroot)src/hash.c $(srcroot)src/huge.c $(srcroot)src/mb.c \ - $(srcroot)src/mutex.c $(srcroot)src/prof.c $(srcroot)src/quarantine.c \ - $(srcroot)src/rtree.c $(srcroot)src/stats.c $(srcroot)src/tcache.c \ - $(srcroot)src/util.c $(srcroot)src/tsd.c -ifeq ($(enable_zone_allocator), 1) -CSRCS += $(srcroot)src/zone.c -endif -ifeq ($(IMPORTLIB),$(SO)) -STATIC_LIBS := $(objroot)lib/$(LIBJEMALLOC).$(A) -endif -ifdef PIC_CFLAGS -STATIC_LIBS += $(objroot)lib/$(LIBJEMALLOC)_pic.$(A) -else -STATIC_LIBS += $(objroot)lib/$(LIBJEMALLOC)_s.$(A) -endif -DSOS := $(objroot)lib/$(LIBJEMALLOC).$(SOREV) -ifneq ($(SOREV),$(SO)) -DSOS += $(objroot)lib/$(LIBJEMALLOC).$(SO) -endif -MAN3 := $(objroot)doc/jemalloc$(install_suffix).3 -DOCS_XML := $(objroot)doc/jemalloc$(install_suffix).xml -DOCS_HTML := $(DOCS_XML:$(objroot)%.xml=$(srcroot)%.html) -DOCS_MAN3 := $(DOCS_XML:$(objroot)%.xml=$(srcroot)%.3) -DOCS := $(DOCS_HTML) $(DOCS_MAN3) -CTESTS := $(srcroot)test/aligned_alloc.c $(srcroot)test/allocated.c \ - $(srcroot)test/ALLOCM_ARENA.c $(srcroot)test/bitmap.c \ - $(srcroot)test/mremap.c $(srcroot)test/posix_memalign.c \ - $(srcroot)test/thread_arena.c $(srcroot)test/thread_tcache_enabled.c -ifeq ($(enable_experimental), 1) -CTESTS += $(srcroot)test/allocm.c $(srcroot)test/rallocm.c -endif - -COBJS := $(CSRCS:$(srcroot)%.c=$(objroot)%.$(O)) -CPICOBJS := $(CSRCS:$(srcroot)%.c=$(objroot)%.pic.$(O)) -CTESTOBJS := $(CTESTS:$(srcroot)%.c=$(objroot)%.$(O)) - -.PHONY: all dist build_doc_html build_doc_man build_doc -.PHONY: install_bin install_include install_lib -.PHONY: install_doc_html install_doc_man install_doc install -.PHONY: tests check clean distclean relclean - -.SECONDARY : $(CTESTOBJS) - -# Default target. -all: build - -dist: build_doc - -$(srcroot)doc/%.html : $(objroot)doc/%.xml $(srcroot)doc/stylesheet.xsl $(objroot)doc/html.xsl - $(XSLTPROC) -o $@ $(objroot)doc/html.xsl $< - -$(srcroot)doc/%.3 : $(objroot)doc/%.xml $(srcroot)doc/stylesheet.xsl $(objroot)doc/manpages.xsl - $(XSLTPROC) -o $@ $(objroot)doc/manpages.xsl $< - -build_doc_html: $(DOCS_HTML) -build_doc_man: $(DOCS_MAN3) -build_doc: $(DOCS) - -# -# Include generated dependency files. -# -ifdef CC_MM --include $(COBJS:%.$(O)=%.d) --include $(CPICOBJS:%.$(O)=%.d) --include $(CTESTOBJS:%.$(O)=%.d) -endif - -$(COBJS): $(objroot)src/%.$(O): $(srcroot)src/%.c -$(CPICOBJS): $(objroot)src/%.pic.$(O): $(srcroot)src/%.c -$(CPICOBJS): CFLAGS += $(PIC_CFLAGS) -$(CTESTOBJS): $(objroot)test/%.$(O): $(srcroot)test/%.c -$(CTESTOBJS): CPPFLAGS += -I$(objroot)test -ifneq ($(IMPORTLIB),$(SO)) -$(COBJS): CPPFLAGS += -DDLLEXPORT -endif - -ifndef CC_MM -# Dependencies -HEADER_DIRS = $(srcroot)include/jemalloc/internal \ - $(objroot)include/jemalloc $(objroot)include/jemalloc/internal -HEADERS = $(wildcard $(foreach dir,$(HEADER_DIRS),$(dir)/*.h)) -$(COBJS) $(CPICOBJS) $(CTESTOBJS): $(HEADERS) -$(CTESTOBJS): $(objroot)test/jemalloc_test.h -endif - -$(COBJS) $(CPICOBJS) $(CTESTOBJS): %.$(O): - @mkdir -p $(@D) - $(CC) $(CFLAGS) -c $(CPPFLAGS) $(CTARGET) $< -ifdef CC_MM - @$(CC) -MM $(CPPFLAGS) -MT $@ -o $(@:%.$(O)=%.d) $< -endif - -ifneq ($(SOREV),$(SO)) -%.$(SO) : %.$(SOREV) - @mkdir -p $(@D) - ln -sf $(<F) $@ -endif - -$(objroot)lib/$(LIBJEMALLOC).$(SOREV) : $(if $(PIC_CFLAGS),$(CPICOBJS),$(COBJS)) - @mkdir -p $(@D) - $(CC) $(DSO_LDFLAGS) $(call RPATH,$(RPATH_EXTRA)) $(LDTARGET) $+ $(LDFLAGS) $(LIBS) $(EXTRA_LDFLAGS) - -$(objroot)lib/$(LIBJEMALLOC)_pic.$(A) : $(CPICOBJS) -$(objroot)lib/$(LIBJEMALLOC).$(A) : $(COBJS) -$(objroot)lib/$(LIBJEMALLOC)_s.$(A) : $(COBJS) - -$(STATIC_LIBS): - @mkdir -p $(@D) - $(MKLIB) $+ - -$(objroot)test/bitmap$(EXE): $(objroot)src/bitmap.$(O) - -$(objroot)test/%$(EXE): $(objroot)test/%.$(O) $(objroot)src/util.$(O) $(DSOS) - @mkdir -p $(@D) - $(CC) $(LDTARGET) $(filter %.$(O),$^) $(call RPATH,$(objroot)lib) $(objroot)lib/$(LIBJEMALLOC).$(IMPORTLIB) $(filter -lpthread,$(LIBS)) $(EXTRA_LDFLAGS) - -build_lib_shared: $(DSOS) -build_lib_static: $(STATIC_LIBS) -build: build_lib_shared build_lib_static - -install_bin: - install -d $(BINDIR) - @for b in $(BINS); do \ - echo "install -m 755 $$b $(BINDIR)"; \ - install -m 755 $$b $(BINDIR); \ -done - -install_include: - install -d $(INCLUDEDIR)/jemalloc - @for h in $(CHDRS); do \ - echo "install -m 644 $$h $(INCLUDEDIR)/jemalloc"; \ - install -m 644 $$h $(INCLUDEDIR)/jemalloc; \ -done - -install_lib_shared: $(DSOS) - install -d $(LIBDIR) - install -m 755 $(objroot)lib/$(LIBJEMALLOC).$(SOREV) $(LIBDIR) -ifneq ($(SOREV),$(SO)) - ln -sf $(LIBJEMALLOC).$(SOREV) $(LIBDIR)/$(LIBJEMALLOC).$(SO) -endif - -install_lib_static: $(STATIC_LIBS) - install -d $(LIBDIR) - @for l in $(STATIC_LIBS); do \ - echo "install -m 755 $$l $(LIBDIR)"; \ - install -m 755 $$l $(LIBDIR); \ -done - -install_lib: install_lib_shared install_lib_static - -install_doc_html: - install -d $(DATADIR)/doc/jemalloc$(install_suffix) - @for d in $(DOCS_HTML); do \ - echo "install -m 644 $$d $(DATADIR)/doc/jemalloc$(install_suffix)"; \ - install -m 644 $$d $(DATADIR)/doc/jemalloc$(install_suffix); \ -done - -install_doc_man: - install -d $(MANDIR)/man3 - @for d in $(DOCS_MAN3); do \ - echo "install -m 644 $$d $(MANDIR)/man3"; \ - install -m 644 $$d $(MANDIR)/man3; \ -done - -install_doc: install_doc_html install_doc_man - -install: install_bin install_include install_lib install_doc - -tests: $(CTESTS:$(srcroot)%.c=$(objroot)%$(EXE)) - -check: tests - @mkdir -p $(objroot)test - @$(SHELL) -c 'total=0; \ - failures=0; \ - echo "========================================="; \ - for t in $(CTESTS:$(srcroot)%.c=$(objroot)%); do \ - total=`expr $$total + 1`; \ - /bin/echo -n "$${t} ... "; \ - $(TEST_LIBRARY_PATH) $${t}$(EXE) $(abs_srcroot) \ - $(abs_objroot) > $(objroot)$${t}.out 2>&1; \ - if test -e "$(srcroot)$${t}.exp"; then \ - diff -w -u $(srcroot)$${t}.exp \ - $(objroot)$${t}.out >/dev/null 2>&1; \ - fail=$$?; \ - if test "$${fail}" -eq "1" ; then \ - failures=`expr $${failures} + 1`; \ - echo "*** FAIL ***"; \ - else \ - echo "pass"; \ - fi; \ - else \ - echo "*** FAIL *** (.exp file is missing)"; \ - failures=`expr $${failures} + 1`; \ - fi; \ - done; \ - echo "========================================="; \ - echo "Failures: $${failures}/$${total}"' - -clean: - rm -f $(COBJS) - rm -f $(CPICOBJS) - rm -f $(COBJS:%.$(O)=%.d) - rm -f $(CPICOBJS:%.$(O)=%.d) - rm -f $(CTESTOBJS:%.$(O)=%$(EXE)) - rm -f $(CTESTOBJS) - rm -f $(CTESTOBJS:%.$(O)=%.d) - rm -f $(CTESTOBJS:%.$(O)=%.out) - rm -f $(DSOS) $(STATIC_LIBS) - -distclean: clean - rm -rf $(objroot)autom4te.cache - rm -f $(objroot)config.log - rm -f $(objroot)config.status - rm -f $(objroot)config.stamp - rm -f $(cfghdrs_out) - rm -f $(cfgoutputs_out) - -relclean: distclean - rm -f $(objroot)configure - rm -f $(srcroot)VERSION - rm -f $(DOCS_HTML) - rm -f $(DOCS_MAN3) - -#=============================================================================== -# Re-configuration rules. - -ifeq ($(enable_autogen), 1) -$(srcroot)configure : $(srcroot)configure.ac - cd ./$(srcroot) && $(AUTOCONF) - -$(objroot)config.status : $(srcroot)configure - ./$(objroot)config.status --recheck - -$(srcroot)config.stamp.in : $(srcroot)configure.ac - echo stamp > $(srcroot)config.stamp.in - -$(objroot)config.stamp : $(cfgoutputs_in) $(cfghdrs_in) $(srcroot)configure - ./$(objroot)config.status - @touch $@ - -# There must be some action in order for make to re-read Makefile when it is -# out of date. -$(cfgoutputs_out) $(cfghdrs_out) : $(objroot)config.stamp - @true -endif diff --git a/extra/jemalloc/README b/extra/jemalloc/README deleted file mode 100644 index 7661683bae7..00000000000 --- a/extra/jemalloc/README +++ /dev/null @@ -1,16 +0,0 @@ -jemalloc is a general-purpose scalable concurrent malloc(3) implementation. -This distribution is a "portable" implementation that currently targets -FreeBSD, Linux, Apple OS X, and MinGW. jemalloc is included as the default -allocator in the FreeBSD and NetBSD operating systems, and it is used by the -Mozilla Firefox web browser on Microsoft Windows-related platforms. Depending -on your needs, one of the other divergent versions may suit your needs better -than this distribution. - -The COPYING file contains copyright and licensing information. - -The INSTALL file contains information on how to configure, build, and install -jemalloc. - -The ChangeLog file contains a brief summary of changes for each release. - -URL: http://www.canonware.com/jemalloc/ diff --git a/extra/jemalloc/VERSION b/extra/jemalloc/VERSION deleted file mode 100644 index 900c82d1043..00000000000 --- a/extra/jemalloc/VERSION +++ /dev/null @@ -1 +0,0 @@ -3.3.1-0-g9ef9d9e8c271cdf14f664b871a8f98c827714784 diff --git a/extra/jemalloc/autogen.sh b/extra/jemalloc/autogen.sh deleted file mode 100755 index 75f32da6873..00000000000 --- a/extra/jemalloc/autogen.sh +++ /dev/null @@ -1,17 +0,0 @@ -#!/bin/sh - -for i in autoconf; do - echo "$i" - $i - if [ $? -ne 0 ]; then - echo "Error $? in $i" - exit 1 - fi -done - -echo "./configure --enable-autogen $@" -./configure --enable-autogen $@ -if [ $? -ne 0 ]; then - echo "Error $? in ./configure" - exit 1 -fi diff --git a/extra/jemalloc/bin/jemalloc.sh b/extra/jemalloc/bin/jemalloc.sh deleted file mode 100644 index 7c9f1b530da..00000000000 --- a/extra/jemalloc/bin/jemalloc.sh +++ /dev/null @@ -1,9 +0,0 @@ -#!/bin/sh - -prefix=/usr/local -exec_prefix=/usr/local -libdir=${exec_prefix}/lib - -LD_PRELOAD=${libdir}/libjemalloc.so.1 -export LD_PRELOAD -exec "$@" diff --git a/extra/jemalloc/bin/jemalloc.sh.in b/extra/jemalloc/bin/jemalloc.sh.in deleted file mode 100644 index cdf36737591..00000000000 --- a/extra/jemalloc/bin/jemalloc.sh.in +++ /dev/null @@ -1,9 +0,0 @@ -#!/bin/sh - -prefix=@prefix@ -exec_prefix=@exec_prefix@ -libdir=@libdir@ - -@LD_PRELOAD_VAR@=${libdir}/libjemalloc.@SOREV@ -export @LD_PRELOAD_VAR@ -exec "$@" diff --git a/extra/jemalloc/bin/pprof b/extra/jemalloc/bin/pprof deleted file mode 100755 index 727eb43704f..00000000000 --- a/extra/jemalloc/bin/pprof +++ /dev/null @@ -1,5348 +0,0 @@ -#! /usr/bin/env perl - -# Copyright (c) 1998-2007, Google Inc. -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -# --- -# Program for printing the profile generated by common/profiler.cc, -# or by the heap profiler (common/debugallocation.cc) -# -# The profile contains a sequence of entries of the form: -# <count> <stack trace> -# This program parses the profile, and generates user-readable -# output. -# -# Examples: -# -# % tools/pprof "program" "profile" -# Enters "interactive" mode -# -# % tools/pprof --text "program" "profile" -# Generates one line per procedure -# -# % tools/pprof --gv "program" "profile" -# Generates annotated call-graph and displays via "gv" -# -# % tools/pprof --gv --focus=Mutex "program" "profile" -# Restrict to code paths that involve an entry that matches "Mutex" -# -# % tools/pprof --gv --focus=Mutex --ignore=string "program" "profile" -# Restrict to code paths that involve an entry that matches "Mutex" -# and does not match "string" -# -# % tools/pprof --list=IBF_CheckDocid "program" "profile" -# Generates disassembly listing of all routines with at least one -# sample that match the --list=<regexp> pattern. The listing is -# annotated with the flat and cumulative sample counts at each line. -# -# % tools/pprof --disasm=IBF_CheckDocid "program" "profile" -# Generates disassembly listing of all routines with at least one -# sample that match the --disasm=<regexp> pattern. The listing is -# annotated with the flat and cumulative sample counts at each PC value. -# -# TODO: Use color to indicate files? - -use strict; -use warnings; -use Getopt::Long; - -my $PPROF_VERSION = "2.0"; - -# These are the object tools we use which can come from a -# user-specified location using --tools, from the PPROF_TOOLS -# environment variable, or from the environment. -my %obj_tool_map = ( - "objdump" => "objdump", - "nm" => "nm", - "addr2line" => "addr2line", - "c++filt" => "c++filt", - ## ConfigureObjTools may add architecture-specific entries: - #"nm_pdb" => "nm-pdb", # for reading windows (PDB-format) executables - #"addr2line_pdb" => "addr2line-pdb", # ditto - #"otool" => "otool", # equivalent of objdump on OS X -); -# NOTE: these are lists, so you can put in commandline flags if you want. -my @DOT = ("dot"); # leave non-absolute, since it may be in /usr/local -my @GV = ("gv"); -my @EVINCE = ("evince"); # could also be xpdf or perhaps acroread -my @KCACHEGRIND = ("kcachegrind"); -my @PS2PDF = ("ps2pdf"); -# These are used for dynamic profiles -my @URL_FETCHER = ("curl", "-s"); - -# These are the web pages that servers need to support for dynamic profiles -my $HEAP_PAGE = "/pprof/heap"; -my $PROFILE_PAGE = "/pprof/profile"; # must support cgi-param "?seconds=#" -my $PMUPROFILE_PAGE = "/pprof/pmuprofile(?:\\?.*)?"; # must support cgi-param - # ?seconds=#&event=x&period=n -my $GROWTH_PAGE = "/pprof/growth"; -my $CONTENTION_PAGE = "/pprof/contention"; -my $WALL_PAGE = "/pprof/wall(?:\\?.*)?"; # accepts options like namefilter -my $FILTEREDPROFILE_PAGE = "/pprof/filteredprofile(?:\\?.*)?"; -my $CENSUSPROFILE_PAGE = "/pprof/censusprofile(?:\\?.*)?"; # must support cgi-param - # "?seconds=#", - # "?tags_regexp=#" and - # "?type=#". -my $SYMBOL_PAGE = "/pprof/symbol"; # must support symbol lookup via POST -my $PROGRAM_NAME_PAGE = "/pprof/cmdline"; - -# These are the web pages that can be named on the command line. -# All the alternatives must begin with /. -my $PROFILES = "($HEAP_PAGE|$PROFILE_PAGE|$PMUPROFILE_PAGE|" . - "$GROWTH_PAGE|$CONTENTION_PAGE|$WALL_PAGE|" . - "$FILTEREDPROFILE_PAGE|$CENSUSPROFILE_PAGE)"; - -# default binary name -my $UNKNOWN_BINARY = "(unknown)"; - -# There is a pervasive dependency on the length (in hex characters, -# i.e., nibbles) of an address, distinguishing between 32-bit and -# 64-bit profiles. To err on the safe size, default to 64-bit here: -my $address_length = 16; - -my $dev_null = "/dev/null"; -if (! -e $dev_null && $^O =~ /MSWin/) { # $^O is the OS perl was built for - $dev_null = "nul"; -} - -# A list of paths to search for shared object files -my @prefix_list = (); - -# Special routine name that should not have any symbols. -# Used as separator to parse "addr2line -i" output. -my $sep_symbol = '_fini'; -my $sep_address = undef; - -##### Argument parsing ##### - -sub usage_string { - return <<EOF; -Usage: -pprof [options] <program> <profiles> - <profiles> is a space separated list of profile names. -pprof [options] <symbolized-profiles> - <symbolized-profiles> is a list of profile files where each file contains - the necessary symbol mappings as well as profile data (likely generated - with --raw). -pprof [options] <profile> - <profile> is a remote form. Symbols are obtained from host:port$SYMBOL_PAGE - - Each name can be: - /path/to/profile - a path to a profile file - host:port[/<service>] - a location of a service to get profile from - - The /<service> can be $HEAP_PAGE, $PROFILE_PAGE, /pprof/pmuprofile, - $GROWTH_PAGE, $CONTENTION_PAGE, /pprof/wall, - $CENSUSPROFILE_PAGE, or /pprof/filteredprofile. - For instance: - pprof http://myserver.com:80$HEAP_PAGE - If /<service> is omitted, the service defaults to $PROFILE_PAGE (cpu profiling). -pprof --symbols <program> - Maps addresses to symbol names. In this mode, stdin should be a - list of library mappings, in the same format as is found in the heap- - and cpu-profile files (this loosely matches that of /proc/self/maps - on linux), followed by a list of hex addresses to map, one per line. - - For more help with querying remote servers, including how to add the - necessary server-side support code, see this filename (or one like it): - - /usr/doc/gperftools-$PPROF_VERSION/pprof_remote_servers.html - -Options: - --cum Sort by cumulative data - --base=<base> Subtract <base> from <profile> before display - --interactive Run in interactive mode (interactive "help" gives help) [default] - --seconds=<n> Length of time for dynamic profiles [default=30 secs] - --add_lib=<file> Read additional symbols and line info from the given library - --lib_prefix=<dir> Comma separated list of library path prefixes - -Reporting Granularity: - --addresses Report at address level - --lines Report at source line level - --functions Report at function level [default] - --files Report at source file level - -Output type: - --text Generate text report - --callgrind Generate callgrind format to stdout - --gv Generate Postscript and display - --evince Generate PDF and display - --web Generate SVG and display - --list=<regexp> Generate source listing of matching routines - --disasm=<regexp> Generate disassembly of matching routines - --symbols Print demangled symbol names found at given addresses - --dot Generate DOT file to stdout - --ps Generate Postcript to stdout - --pdf Generate PDF to stdout - --svg Generate SVG to stdout - --gif Generate GIF to stdout - --raw Generate symbolized pprof data (useful with remote fetch) - -Heap-Profile Options: - --inuse_space Display in-use (mega)bytes [default] - --inuse_objects Display in-use objects - --alloc_space Display allocated (mega)bytes - --alloc_objects Display allocated objects - --show_bytes Display space in bytes - --drop_negative Ignore negative differences - -Contention-profile options: - --total_delay Display total delay at each region [default] - --contentions Display number of delays at each region - --mean_delay Display mean delay at each region - -Call-graph Options: - --nodecount=<n> Show at most so many nodes [default=80] - --nodefraction=<f> Hide nodes below <f>*total [default=.005] - --edgefraction=<f> Hide edges below <f>*total [default=.001] - --maxdegree=<n> Max incoming/outgoing edges per node [default=8] - --focus=<regexp> Focus on nodes matching <regexp> - --ignore=<regexp> Ignore nodes matching <regexp> - --scale=<n> Set GV scaling [default=0] - --heapcheck Make nodes with non-0 object counts - (i.e. direct leak generators) more visible - -Miscellaneous: - --tools=<prefix or binary:fullpath>[,...] \$PATH for object tool pathnames - --test Run unit tests - --help This message - --version Version information - -Environment Variables: - PPROF_TMPDIR Profiles directory. Defaults to \$HOME/pprof - PPROF_TOOLS Prefix for object tools pathnames - -Examples: - -pprof /bin/ls ls.prof - Enters "interactive" mode -pprof --text /bin/ls ls.prof - Outputs one line per procedure -pprof --web /bin/ls ls.prof - Displays annotated call-graph in web browser -pprof --gv /bin/ls ls.prof - Displays annotated call-graph via 'gv' -pprof --gv --focus=Mutex /bin/ls ls.prof - Restricts to code paths including a .*Mutex.* entry -pprof --gv --focus=Mutex --ignore=string /bin/ls ls.prof - Code paths including Mutex but not string -pprof --list=getdir /bin/ls ls.prof - (Per-line) annotated source listing for getdir() -pprof --disasm=getdir /bin/ls ls.prof - (Per-PC) annotated disassembly for getdir() - -pprof http://localhost:1234/ - Enters "interactive" mode -pprof --text localhost:1234 - Outputs one line per procedure for localhost:1234 -pprof --raw localhost:1234 > ./local.raw -pprof --text ./local.raw - Fetches a remote profile for later analysis and then - analyzes it in text mode. -EOF -} - -sub version_string { - return <<EOF -pprof (part of gperftools $PPROF_VERSION) - -Copyright 1998-2007 Google Inc. - -This is BSD licensed software; see the source for copying conditions -and license information. -There is NO warranty; not even for MERCHANTABILITY or FITNESS FOR A -PARTICULAR PURPOSE. -EOF -} - -sub usage { - my $msg = shift; - print STDERR "$msg\n\n"; - print STDERR usage_string(); - print STDERR "\nFATAL ERROR: $msg\n"; # just as a reminder - exit(1); -} - -sub Init() { - # Setup tmp-file name and handler to clean it up. - # We do this in the very beginning so that we can use - # error() and cleanup() function anytime here after. - $main::tmpfile_sym = "/tmp/pprof$$.sym"; - $main::tmpfile_ps = "/tmp/pprof$$"; - $main::next_tmpfile = 0; - $SIG{'INT'} = \&sighandler; - - # Cache from filename/linenumber to source code - $main::source_cache = (); - - $main::opt_help = 0; - $main::opt_version = 0; - - $main::opt_cum = 0; - $main::opt_base = ''; - $main::opt_addresses = 0; - $main::opt_lines = 0; - $main::opt_functions = 0; - $main::opt_files = 0; - $main::opt_lib_prefix = ""; - - $main::opt_text = 0; - $main::opt_callgrind = 0; - $main::opt_list = ""; - $main::opt_disasm = ""; - $main::opt_symbols = 0; - $main::opt_gv = 0; - $main::opt_evince = 0; - $main::opt_web = 0; - $main::opt_dot = 0; - $main::opt_ps = 0; - $main::opt_pdf = 0; - $main::opt_gif = 0; - $main::opt_svg = 0; - $main::opt_raw = 0; - - $main::opt_nodecount = 80; - $main::opt_nodefraction = 0.005; - $main::opt_edgefraction = 0.001; - $main::opt_maxdegree = 8; - $main::opt_focus = ''; - $main::opt_ignore = ''; - $main::opt_scale = 0; - $main::opt_heapcheck = 0; - $main::opt_seconds = 30; - $main::opt_lib = ""; - - $main::opt_inuse_space = 0; - $main::opt_inuse_objects = 0; - $main::opt_alloc_space = 0; - $main::opt_alloc_objects = 0; - $main::opt_show_bytes = 0; - $main::opt_drop_negative = 0; - $main::opt_interactive = 0; - - $main::opt_total_delay = 0; - $main::opt_contentions = 0; - $main::opt_mean_delay = 0; - - $main::opt_tools = ""; - $main::opt_debug = 0; - $main::opt_test = 0; - - # These are undocumented flags used only by unittests. - $main::opt_test_stride = 0; - - # Are we using $SYMBOL_PAGE? - $main::use_symbol_page = 0; - - # Files returned by TempName. - %main::tempnames = (); - - # Type of profile we are dealing with - # Supported types: - # cpu - # heap - # growth - # contention - $main::profile_type = ''; # Empty type means "unknown" - - GetOptions("help!" => \$main::opt_help, - "version!" => \$main::opt_version, - "cum!" => \$main::opt_cum, - "base=s" => \$main::opt_base, - "seconds=i" => \$main::opt_seconds, - "add_lib=s" => \$main::opt_lib, - "lib_prefix=s" => \$main::opt_lib_prefix, - "functions!" => \$main::opt_functions, - "lines!" => \$main::opt_lines, - "addresses!" => \$main::opt_addresses, - "files!" => \$main::opt_files, - "text!" => \$main::opt_text, - "callgrind!" => \$main::opt_callgrind, - "list=s" => \$main::opt_list, - "disasm=s" => \$main::opt_disasm, - "symbols!" => \$main::opt_symbols, - "gv!" => \$main::opt_gv, - "evince!" => \$main::opt_evince, - "web!" => \$main::opt_web, - "dot!" => \$main::opt_dot, - "ps!" => \$main::opt_ps, - "pdf!" => \$main::opt_pdf, - "svg!" => \$main::opt_svg, - "gif!" => \$main::opt_gif, - "raw!" => \$main::opt_raw, - "interactive!" => \$main::opt_interactive, - "nodecount=i" => \$main::opt_nodecount, - "nodefraction=f" => \$main::opt_nodefraction, - "edgefraction=f" => \$main::opt_edgefraction, - "maxdegree=i" => \$main::opt_maxdegree, - "focus=s" => \$main::opt_focus, - "ignore=s" => \$main::opt_ignore, - "scale=i" => \$main::opt_scale, - "heapcheck" => \$main::opt_heapcheck, - "inuse_space!" => \$main::opt_inuse_space, - "inuse_objects!" => \$main::opt_inuse_objects, - "alloc_space!" => \$main::opt_alloc_space, - "alloc_objects!" => \$main::opt_alloc_objects, - "show_bytes!" => \$main::opt_show_bytes, - "drop_negative!" => \$main::opt_drop_negative, - "total_delay!" => \$main::opt_total_delay, - "contentions!" => \$main::opt_contentions, - "mean_delay!" => \$main::opt_mean_delay, - "tools=s" => \$main::opt_tools, - "test!" => \$main::opt_test, - "debug!" => \$main::opt_debug, - # Undocumented flags used only by unittests: - "test_stride=i" => \$main::opt_test_stride, - ) || usage("Invalid option(s)"); - - # Deal with the standard --help and --version - if ($main::opt_help) { - print usage_string(); - exit(0); - } - - if ($main::opt_version) { - print version_string(); - exit(0); - } - - # Disassembly/listing/symbols mode requires address-level info - if ($main::opt_disasm || $main::opt_list || $main::opt_symbols) { - $main::opt_functions = 0; - $main::opt_lines = 0; - $main::opt_addresses = 1; - $main::opt_files = 0; - } - - # Check heap-profiling flags - if ($main::opt_inuse_space + - $main::opt_inuse_objects + - $main::opt_alloc_space + - $main::opt_alloc_objects > 1) { - usage("Specify at most on of --inuse/--alloc options"); - } - - # Check output granularities - my $grains = - $main::opt_functions + - $main::opt_lines + - $main::opt_addresses + - $main::opt_files + - 0; - if ($grains > 1) { - usage("Only specify one output granularity option"); - } - if ($grains == 0) { - $main::opt_functions = 1; - } - - # Check output modes - my $modes = - $main::opt_text + - $main::opt_callgrind + - ($main::opt_list eq '' ? 0 : 1) + - ($main::opt_disasm eq '' ? 0 : 1) + - ($main::opt_symbols == 0 ? 0 : 1) + - $main::opt_gv + - $main::opt_evince + - $main::opt_web + - $main::opt_dot + - $main::opt_ps + - $main::opt_pdf + - $main::opt_svg + - $main::opt_gif + - $main::opt_raw + - $main::opt_interactive + - 0; - if ($modes > 1) { - usage("Only specify one output mode"); - } - if ($modes == 0) { - if (-t STDOUT) { # If STDOUT is a tty, activate interactive mode - $main::opt_interactive = 1; - } else { - $main::opt_text = 1; - } - } - - if ($main::opt_test) { - RunUnitTests(); - # Should not return - exit(1); - } - - # Binary name and profile arguments list - $main::prog = ""; - @main::pfile_args = (); - - # Remote profiling without a binary (using $SYMBOL_PAGE instead) - if (@ARGV > 0) { - if (IsProfileURL($ARGV[0])) { - $main::use_symbol_page = 1; - } elsif (IsSymbolizedProfileFile($ARGV[0])) { - $main::use_symbolized_profile = 1; - $main::prog = $UNKNOWN_BINARY; # will be set later from the profile file - } - } - - if ($main::use_symbol_page || $main::use_symbolized_profile) { - # We don't need a binary! - my %disabled = ('--lines' => $main::opt_lines, - '--disasm' => $main::opt_disasm); - for my $option (keys %disabled) { - usage("$option cannot be used without a binary") if $disabled{$option}; - } - # Set $main::prog later... - scalar(@ARGV) || usage("Did not specify profile file"); - } elsif ($main::opt_symbols) { - # --symbols needs a binary-name (to run nm on, etc) but not profiles - $main::prog = shift(@ARGV) || usage("Did not specify program"); - } else { - $main::prog = shift(@ARGV) || usage("Did not specify program"); - scalar(@ARGV) || usage("Did not specify profile file"); - } - - # Parse profile file/location arguments - foreach my $farg (@ARGV) { - if ($farg =~ m/(.*)\@([0-9]+)(|\/.*)$/ ) { - my $machine = $1; - my $num_machines = $2; - my $path = $3; - for (my $i = 0; $i < $num_machines; $i++) { - unshift(@main::pfile_args, "$i.$machine$path"); - } - } else { - unshift(@main::pfile_args, $farg); - } - } - - if ($main::use_symbol_page) { - unless (IsProfileURL($main::pfile_args[0])) { - error("The first profile should be a remote form to use $SYMBOL_PAGE\n"); - } - CheckSymbolPage(); - $main::prog = FetchProgramName(); - } elsif (!$main::use_symbolized_profile) { # may not need objtools! - ConfigureObjTools($main::prog) - } - - # Break the opt_lib_prefix into the prefix_list array - @prefix_list = split (',', $main::opt_lib_prefix); - - # Remove trailing / from the prefixes, in the list to prevent - # searching things like /my/path//lib/mylib.so - foreach (@prefix_list) { - s|/+$||; - } -} - -sub Main() { - Init(); - $main::collected_profile = undef; - @main::profile_files = (); - $main::op_time = time(); - - # Printing symbols is special and requires a lot less info that most. - if ($main::opt_symbols) { - PrintSymbols(*STDIN); # Get /proc/maps and symbols output from stdin - return; - } - - # Fetch all profile data - FetchDynamicProfiles(); - - # this will hold symbols that we read from the profile files - my $symbol_map = {}; - - # Read one profile, pick the last item on the list - my $data = ReadProfile($main::prog, pop(@main::profile_files)); - my $profile = $data->{profile}; - my $pcs = $data->{pcs}; - my $libs = $data->{libs}; # Info about main program and shared libraries - $symbol_map = MergeSymbols($symbol_map, $data->{symbols}); - - # Add additional profiles, if available. - if (scalar(@main::profile_files) > 0) { - foreach my $pname (@main::profile_files) { - my $data2 = ReadProfile($main::prog, $pname); - $profile = AddProfile($profile, $data2->{profile}); - $pcs = AddPcs($pcs, $data2->{pcs}); - $symbol_map = MergeSymbols($symbol_map, $data2->{symbols}); - } - } - - # Subtract base from profile, if specified - if ($main::opt_base ne '') { - my $base = ReadProfile($main::prog, $main::opt_base); - $profile = SubtractProfile($profile, $base->{profile}); - $pcs = AddPcs($pcs, $base->{pcs}); - $symbol_map = MergeSymbols($symbol_map, $base->{symbols}); - } - - # Get total data in profile - my $total = TotalProfile($profile); - - # Collect symbols - my $symbols; - if ($main::use_symbolized_profile) { - $symbols = FetchSymbols($pcs, $symbol_map); - } elsif ($main::use_symbol_page) { - $symbols = FetchSymbols($pcs); - } else { - # TODO(csilvers): $libs uses the /proc/self/maps data from profile1, - # which may differ from the data from subsequent profiles, especially - # if they were run on different machines. Use appropriate libs for - # each pc somehow. - $symbols = ExtractSymbols($libs, $pcs); - } - - # Remove uniniteresting stack items - $profile = RemoveUninterestingFrames($symbols, $profile); - - # Focus? - if ($main::opt_focus ne '') { - $profile = FocusProfile($symbols, $profile, $main::opt_focus); - } - - # Ignore? - if ($main::opt_ignore ne '') { - $profile = IgnoreProfile($symbols, $profile, $main::opt_ignore); - } - - my $calls = ExtractCalls($symbols, $profile); - - # Reduce profiles to required output granularity, and also clean - # each stack trace so a given entry exists at most once. - my $reduced = ReduceProfile($symbols, $profile); - - # Get derived profiles - my $flat = FlatProfile($reduced); - my $cumulative = CumulativeProfile($reduced); - - # Print - if (!$main::opt_interactive) { - if ($main::opt_disasm) { - PrintDisassembly($libs, $flat, $cumulative, $main::opt_disasm); - } elsif ($main::opt_list) { - PrintListing($total, $libs, $flat, $cumulative, $main::opt_list, 0); - } elsif ($main::opt_text) { - # Make sure the output is empty when have nothing to report - # (only matters when --heapcheck is given but we must be - # compatible with old branches that did not pass --heapcheck always): - if ($total != 0) { - printf("Total: %s %s\n", Unparse($total), Units()); - } - PrintText($symbols, $flat, $cumulative, -1); - } elsif ($main::opt_raw) { - PrintSymbolizedProfile($symbols, $profile, $main::prog); - } elsif ($main::opt_callgrind) { - PrintCallgrind($calls); - } else { - if (PrintDot($main::prog, $symbols, $profile, $flat, $cumulative, $total)) { - if ($main::opt_gv) { - RunGV(TempName($main::next_tmpfile, "ps"), ""); - } elsif ($main::opt_evince) { - RunEvince(TempName($main::next_tmpfile, "pdf"), ""); - } elsif ($main::opt_web) { - my $tmp = TempName($main::next_tmpfile, "svg"); - RunWeb($tmp); - # The command we run might hand the file name off - # to an already running browser instance and then exit. - # Normally, we'd remove $tmp on exit (right now), - # but fork a child to remove $tmp a little later, so that the - # browser has time to load it first. - delete $main::tempnames{$tmp}; - if (fork() == 0) { - sleep 5; - unlink($tmp); - exit(0); - } - } - } else { - cleanup(); - exit(1); - } - } - } else { - InteractiveMode($profile, $symbols, $libs, $total); - } - - cleanup(); - exit(0); -} - -##### Entry Point ##### - -Main(); - -# Temporary code to detect if we're running on a Goobuntu system. -# These systems don't have the right stuff installed for the special -# Readline libraries to work, so as a temporary workaround, we default -# to using the normal stdio code, rather than the fancier readline-based -# code -sub ReadlineMightFail { - if (-e '/lib/libtermcap.so.2') { - return 0; # libtermcap exists, so readline should be okay - } else { - return 1; - } -} - -sub RunGV { - my $fname = shift; - my $bg = shift; # "" or " &" if we should run in background - if (!system(ShellEscape(@GV, "--version") . " >$dev_null 2>&1")) { - # Options using double dash are supported by this gv version. - # Also, turn on noantialias to better handle bug in gv for - # postscript files with large dimensions. - # TODO: Maybe we should not pass the --noantialias flag - # if the gv version is known to work properly without the flag. - system(ShellEscape(@GV, "--scale=$main::opt_scale", "--noantialias", $fname) - . $bg); - } else { - # Old gv version - only supports options that use single dash. - print STDERR ShellEscape(@GV, "-scale", $main::opt_scale) . "\n"; - system(ShellEscape(@GV, "-scale", "$main::opt_scale", $fname) . $bg); - } -} - -sub RunEvince { - my $fname = shift; - my $bg = shift; # "" or " &" if we should run in background - system(ShellEscape(@EVINCE, $fname) . $bg); -} - -sub RunWeb { - my $fname = shift; - print STDERR "Loading web page file:///$fname\n"; - - if (`uname` =~ /Darwin/) { - # OS X: open will use standard preference for SVG files. - system("/usr/bin/open", $fname); - return; - } - - # Some kind of Unix; try generic symlinks, then specific browsers. - # (Stop once we find one.) - # Works best if the browser is already running. - my @alt = ( - "/etc/alternatives/gnome-www-browser", - "/etc/alternatives/x-www-browser", - "google-chrome", - "firefox", - ); - foreach my $b (@alt) { - if (system($b, $fname) == 0) { - return; - } - } - - print STDERR "Could not load web browser.\n"; -} - -sub RunKcachegrind { - my $fname = shift; - my $bg = shift; # "" or " &" if we should run in background - print STDERR "Starting '@KCACHEGRIND " . $fname . $bg . "'\n"; - system(ShellEscape(@KCACHEGRIND, $fname) . $bg); -} - - -##### Interactive helper routines ##### - -sub InteractiveMode { - $| = 1; # Make output unbuffered for interactive mode - my ($orig_profile, $symbols, $libs, $total) = @_; - - print STDERR "Welcome to pprof! For help, type 'help'.\n"; - - # Use ReadLine if it's installed and input comes from a console. - if ( -t STDIN && - !ReadlineMightFail() && - defined(eval {require Term::ReadLine}) ) { - my $term = new Term::ReadLine 'pprof'; - while ( defined ($_ = $term->readline('(pprof) '))) { - $term->addhistory($_) if /\S/; - if (!InteractiveCommand($orig_profile, $symbols, $libs, $total, $_)) { - last; # exit when we get an interactive command to quit - } - } - } else { # don't have readline - while (1) { - print STDERR "(pprof) "; - $_ = <STDIN>; - last if ! defined $_ ; - s/\r//g; # turn windows-looking lines into unix-looking lines - - # Save some flags that might be reset by InteractiveCommand() - my $save_opt_lines = $main::opt_lines; - - if (!InteractiveCommand($orig_profile, $symbols, $libs, $total, $_)) { - last; # exit when we get an interactive command to quit - } - - # Restore flags - $main::opt_lines = $save_opt_lines; - } - } -} - -# Takes two args: orig profile, and command to run. -# Returns 1 if we should keep going, or 0 if we were asked to quit -sub InteractiveCommand { - my($orig_profile, $symbols, $libs, $total, $command) = @_; - $_ = $command; # just to make future m//'s easier - if (!defined($_)) { - print STDERR "\n"; - return 0; - } - if (m/^\s*quit/) { - return 0; - } - if (m/^\s*help/) { - InteractiveHelpMessage(); - return 1; - } - # Clear all the mode options -- mode is controlled by "$command" - $main::opt_text = 0; - $main::opt_callgrind = 0; - $main::opt_disasm = 0; - $main::opt_list = 0; - $main::opt_gv = 0; - $main::opt_evince = 0; - $main::opt_cum = 0; - - if (m/^\s*(text|top)(\d*)\s*(.*)/) { - $main::opt_text = 1; - - my $line_limit = ($2 ne "") ? int($2) : 10; - - my $routine; - my $ignore; - ($routine, $ignore) = ParseInteractiveArgs($3); - - my $profile = ProcessProfile($total, $orig_profile, $symbols, "", $ignore); - my $reduced = ReduceProfile($symbols, $profile); - - # Get derived profiles - my $flat = FlatProfile($reduced); - my $cumulative = CumulativeProfile($reduced); - - PrintText($symbols, $flat, $cumulative, $line_limit); - return 1; - } - if (m/^\s*callgrind\s*([^ \n]*)/) { - $main::opt_callgrind = 1; - - # Get derived profiles - my $calls = ExtractCalls($symbols, $orig_profile); - my $filename = $1; - if ( $1 eq '' ) { - $filename = TempName($main::next_tmpfile, "callgrind"); - } - PrintCallgrind($calls, $filename); - if ( $1 eq '' ) { - RunKcachegrind($filename, " & "); - $main::next_tmpfile++; - } - - return 1; - } - if (m/^\s*(web)?list\s*(.+)/) { - my $html = (defined($1) && ($1 eq "web")); - $main::opt_list = 1; - - my $routine; - my $ignore; - ($routine, $ignore) = ParseInteractiveArgs($2); - - my $profile = ProcessProfile($total, $orig_profile, $symbols, "", $ignore); - my $reduced = ReduceProfile($symbols, $profile); - - # Get derived profiles - my $flat = FlatProfile($reduced); - my $cumulative = CumulativeProfile($reduced); - - PrintListing($total, $libs, $flat, $cumulative, $routine, $html); - return 1; - } - if (m/^\s*disasm\s*(.+)/) { - $main::opt_disasm = 1; - - my $routine; - my $ignore; - ($routine, $ignore) = ParseInteractiveArgs($1); - - # Process current profile to account for various settings - my $profile = ProcessProfile($total, $orig_profile, $symbols, "", $ignore); - my $reduced = ReduceProfile($symbols, $profile); - - # Get derived profiles - my $flat = FlatProfile($reduced); - my $cumulative = CumulativeProfile($reduced); - - PrintDisassembly($libs, $flat, $cumulative, $routine); - return 1; - } - if (m/^\s*(gv|web|evince)\s*(.*)/) { - $main::opt_gv = 0; - $main::opt_evince = 0; - $main::opt_web = 0; - if ($1 eq "gv") { - $main::opt_gv = 1; - } elsif ($1 eq "evince") { - $main::opt_evince = 1; - } elsif ($1 eq "web") { - $main::opt_web = 1; - } - - my $focus; - my $ignore; - ($focus, $ignore) = ParseInteractiveArgs($2); - - # Process current profile to account for various settings - my $profile = ProcessProfile($total, $orig_profile, $symbols, - $focus, $ignore); - my $reduced = ReduceProfile($symbols, $profile); - - # Get derived profiles - my $flat = FlatProfile($reduced); - my $cumulative = CumulativeProfile($reduced); - - if (PrintDot($main::prog, $symbols, $profile, $flat, $cumulative, $total)) { - if ($main::opt_gv) { - RunGV(TempName($main::next_tmpfile, "ps"), " &"); - } elsif ($main::opt_evince) { - RunEvince(TempName($main::next_tmpfile, "pdf"), " &"); - } elsif ($main::opt_web) { - RunWeb(TempName($main::next_tmpfile, "svg")); - } - $main::next_tmpfile++; - } - return 1; - } - if (m/^\s*$/) { - return 1; - } - print STDERR "Unknown command: try 'help'.\n"; - return 1; -} - - -sub ProcessProfile { - my $total_count = shift; - my $orig_profile = shift; - my $symbols = shift; - my $focus = shift; - my $ignore = shift; - - # Process current profile to account for various settings - my $profile = $orig_profile; - printf("Total: %s %s\n", Unparse($total_count), Units()); - if ($focus ne '') { - $profile = FocusProfile($symbols, $profile, $focus); - my $focus_count = TotalProfile($profile); - printf("After focusing on '%s': %s %s of %s (%0.1f%%)\n", - $focus, - Unparse($focus_count), Units(), - Unparse($total_count), ($focus_count*100.0) / $total_count); - } - if ($ignore ne '') { - $profile = IgnoreProfile($symbols, $profile, $ignore); - my $ignore_count = TotalProfile($profile); - printf("After ignoring '%s': %s %s of %s (%0.1f%%)\n", - $ignore, - Unparse($ignore_count), Units(), - Unparse($total_count), - ($ignore_count*100.0) / $total_count); - } - - return $profile; -} - -sub InteractiveHelpMessage { - print STDERR <<ENDOFHELP; -Interactive pprof mode - -Commands: - gv - gv [focus] [-ignore1] [-ignore2] - Show graphical hierarchical display of current profile. Without - any arguments, shows all samples in the profile. With the optional - "focus" argument, restricts the samples shown to just those where - the "focus" regular expression matches a routine name on the stack - trace. - - web - web [focus] [-ignore1] [-ignore2] - Like GV, but displays profile in your web browser instead of using - Ghostview. Works best if your web browser is already running. - To change the browser that gets used: - On Linux, set the /etc/alternatives/gnome-www-browser symlink. - On OS X, change the Finder association for SVG files. - - list [routine_regexp] [-ignore1] [-ignore2] - Show source listing of routines whose names match "routine_regexp" - - weblist [routine_regexp] [-ignore1] [-ignore2] - Displays a source listing of routines whose names match "routine_regexp" - in a web browser. You can click on source lines to view the - corresponding disassembly. - - top [--cum] [-ignore1] [-ignore2] - top20 [--cum] [-ignore1] [-ignore2] - top37 [--cum] [-ignore1] [-ignore2] - Show top lines ordered by flat profile count, or cumulative count - if --cum is specified. If a number is present after 'top', the - top K routines will be shown (defaults to showing the top 10) - - disasm [routine_regexp] [-ignore1] [-ignore2] - Show disassembly of routines whose names match "routine_regexp", - annotated with sample counts. - - callgrind - callgrind [filename] - Generates callgrind file. If no filename is given, kcachegrind is called. - - help - This listing - quit or ^D - End pprof - -For commands that accept optional -ignore tags, samples where any routine in -the stack trace matches the regular expression in any of the -ignore -parameters will be ignored. - -Further pprof details are available at this location (or one similar): - - /usr/doc/gperftools-$PPROF_VERSION/cpu_profiler.html - /usr/doc/gperftools-$PPROF_VERSION/heap_profiler.html - -ENDOFHELP -} -sub ParseInteractiveArgs { - my $args = shift; - my $focus = ""; - my $ignore = ""; - my @x = split(/ +/, $args); - foreach $a (@x) { - if ($a =~ m/^(--|-)lines$/) { - $main::opt_lines = 1; - } elsif ($a =~ m/^(--|-)cum$/) { - $main::opt_cum = 1; - } elsif ($a =~ m/^-(.*)/) { - $ignore .= (($ignore ne "") ? "|" : "" ) . $1; - } else { - $focus .= (($focus ne "") ? "|" : "" ) . $a; - } - } - if ($ignore ne "") { - print STDERR "Ignoring samples in call stacks that match '$ignore'\n"; - } - return ($focus, $ignore); -} - -##### Output code ##### - -sub TempName { - my $fnum = shift; - my $ext = shift; - my $file = "$main::tmpfile_ps.$fnum.$ext"; - $main::tempnames{$file} = 1; - return $file; -} - -# Print profile data in packed binary format (64-bit) to standard out -sub PrintProfileData { - my $profile = shift; - - # print header (64-bit style) - # (zero) (header-size) (version) (sample-period) (zero) - print pack('L*', 0, 0, 3, 0, 0, 0, 1, 0, 0, 0); - - foreach my $k (keys(%{$profile})) { - my $count = $profile->{$k}; - my @addrs = split(/\n/, $k); - if ($#addrs >= 0) { - my $depth = $#addrs + 1; - # int(foo / 2**32) is the only reliable way to get rid of bottom - # 32 bits on both 32- and 64-bit systems. - print pack('L*', $count & 0xFFFFFFFF, int($count / 2**32)); - print pack('L*', $depth & 0xFFFFFFFF, int($depth / 2**32)); - - foreach my $full_addr (@addrs) { - my $addr = $full_addr; - $addr =~ s/0x0*//; # strip off leading 0x, zeroes - if (length($addr) > 16) { - print STDERR "Invalid address in profile: $full_addr\n"; - next; - } - my $low_addr = substr($addr, -8); # get last 8 hex chars - my $high_addr = substr($addr, -16, 8); # get up to 8 more hex chars - print pack('L*', hex('0x' . $low_addr), hex('0x' . $high_addr)); - } - } - } -} - -# Print symbols and profile data -sub PrintSymbolizedProfile { - my $symbols = shift; - my $profile = shift; - my $prog = shift; - - $SYMBOL_PAGE =~ m,[^/]+$,; # matches everything after the last slash - my $symbol_marker = $&; - - print '--- ', $symbol_marker, "\n"; - if (defined($prog)) { - print 'binary=', $prog, "\n"; - } - while (my ($pc, $name) = each(%{$symbols})) { - my $sep = ' '; - print '0x', $pc; - # We have a list of function names, which include the inlined - # calls. They are separated (and terminated) by --, which is - # illegal in function names. - for (my $j = 2; $j <= $#{$name}; $j += 3) { - print $sep, $name->[$j]; - $sep = '--'; - } - print "\n"; - } - print '---', "\n"; - - $PROFILE_PAGE =~ m,[^/]+$,; # matches everything after the last slash - my $profile_marker = $&; - print '--- ', $profile_marker, "\n"; - if (defined($main::collected_profile)) { - # if used with remote fetch, simply dump the collected profile to output. - open(SRC, "<$main::collected_profile"); - while (<SRC>) { - print $_; - } - close(SRC); - } else { - # dump a cpu-format profile to standard out - PrintProfileData($profile); - } -} - -# Print text output -sub PrintText { - my $symbols = shift; - my $flat = shift; - my $cumulative = shift; - my $line_limit = shift; - - my $total = TotalProfile($flat); - - # Which profile to sort by? - my $s = $main::opt_cum ? $cumulative : $flat; - - my $running_sum = 0; - my $lines = 0; - foreach my $k (sort { GetEntry($s, $b) <=> GetEntry($s, $a) || $a cmp $b } - keys(%{$cumulative})) { - my $f = GetEntry($flat, $k); - my $c = GetEntry($cumulative, $k); - $running_sum += $f; - - my $sym = $k; - if (exists($symbols->{$k})) { - $sym = $symbols->{$k}->[0] . " " . $symbols->{$k}->[1]; - if ($main::opt_addresses) { - $sym = $k . " " . $sym; - } - } - - if ($f != 0 || $c != 0) { - printf("%8s %6s %6s %8s %6s %s\n", - Unparse($f), - Percent($f, $total), - Percent($running_sum, $total), - Unparse($c), - Percent($c, $total), - $sym); - } - $lines++; - last if ($line_limit >= 0 && $lines >= $line_limit); - } -} - -# Callgrind format has a compression for repeated function and file -# names. You show the name the first time, and just use its number -# subsequently. This can cut down the file to about a third or a -# quarter of its uncompressed size. $key and $val are the key/value -# pair that would normally be printed by callgrind; $map is a map from -# value to number. -sub CompressedCGName { - my($key, $val, $map) = @_; - my $idx = $map->{$val}; - # For very short keys, providing an index hurts rather than helps. - if (length($val) <= 3) { - return "$key=$val\n"; - } elsif (defined($idx)) { - return "$key=($idx)\n"; - } else { - # scalar(keys $map) gives the number of items in the map. - $idx = scalar(keys(%{$map})) + 1; - $map->{$val} = $idx; - return "$key=($idx) $val\n"; - } -} - -# Print the call graph in a way that's suiteable for callgrind. -sub PrintCallgrind { - my $calls = shift; - my $filename; - my %filename_to_index_map; - my %fnname_to_index_map; - - if ($main::opt_interactive) { - $filename = shift; - print STDERR "Writing callgrind file to '$filename'.\n" - } else { - $filename = "&STDOUT"; - } - open(CG, ">$filename"); - printf CG ("events: Hits\n\n"); - foreach my $call ( map { $_->[0] } - sort { $a->[1] cmp $b ->[1] || - $a->[2] <=> $b->[2] } - map { /([^:]+):(\d+):([^ ]+)( -> ([^:]+):(\d+):(.+))?/; - [$_, $1, $2] } - keys %$calls ) { - my $count = int($calls->{$call}); - $call =~ /([^:]+):(\d+):([^ ]+)( -> ([^:]+):(\d+):(.+))?/; - my ( $caller_file, $caller_line, $caller_function, - $callee_file, $callee_line, $callee_function ) = - ( $1, $2, $3, $5, $6, $7 ); - - # TODO(csilvers): for better compression, collect all the - # caller/callee_files and functions first, before printing - # anything, and only compress those referenced more than once. - printf CG CompressedCGName("fl", $caller_file, \%filename_to_index_map); - printf CG CompressedCGName("fn", $caller_function, \%fnname_to_index_map); - if (defined $6) { - printf CG CompressedCGName("cfl", $callee_file, \%filename_to_index_map); - printf CG CompressedCGName("cfn", $callee_function, \%fnname_to_index_map); - printf CG ("calls=$count $callee_line\n"); - } - printf CG ("$caller_line $count\n\n"); - } -} - -# Print disassembly for all all routines that match $main::opt_disasm -sub PrintDisassembly { - my $libs = shift; - my $flat = shift; - my $cumulative = shift; - my $disasm_opts = shift; - - my $total = TotalProfile($flat); - - foreach my $lib (@{$libs}) { - my $symbol_table = GetProcedureBoundaries($lib->[0], $disasm_opts); - my $offset = AddressSub($lib->[1], $lib->[3]); - foreach my $routine (sort ByName keys(%{$symbol_table})) { - my $start_addr = $symbol_table->{$routine}->[0]; - my $end_addr = $symbol_table->{$routine}->[1]; - # See if there are any samples in this routine - my $length = hex(AddressSub($end_addr, $start_addr)); - my $addr = AddressAdd($start_addr, $offset); - for (my $i = 0; $i < $length; $i++) { - if (defined($cumulative->{$addr})) { - PrintDisassembledFunction($lib->[0], $offset, - $routine, $flat, $cumulative, - $start_addr, $end_addr, $total); - last; - } - $addr = AddressInc($addr); - } - } - } -} - -# Return reference to array of tuples of the form: -# [start_address, filename, linenumber, instruction, limit_address] -# E.g., -# ["0x806c43d", "/foo/bar.cc", 131, "ret", "0x806c440"] -sub Disassemble { - my $prog = shift; - my $offset = shift; - my $start_addr = shift; - my $end_addr = shift; - - my $objdump = $obj_tool_map{"objdump"}; - my $cmd = ShellEscape($objdump, "-C", "-d", "-l", "--no-show-raw-insn", - "--start-address=0x$start_addr", - "--stop-address=0x$end_addr", $prog); - open(OBJDUMP, "$cmd |") || error("$cmd: $!\n"); - my @result = (); - my $filename = ""; - my $linenumber = -1; - my $last = ["", "", "", ""]; - while (<OBJDUMP>) { - s/\r//g; # turn windows-looking lines into unix-looking lines - chop; - if (m|\s*([^:\s]+):(\d+)\s*$|) { - # Location line of the form: - # <filename>:<linenumber> - $filename = $1; - $linenumber = $2; - } elsif (m/^ +([0-9a-f]+):\s*(.*)/) { - # Disassembly line -- zero-extend address to full length - my $addr = HexExtend($1); - my $k = AddressAdd($addr, $offset); - $last->[4] = $k; # Store ending address for previous instruction - $last = [$k, $filename, $linenumber, $2, $end_addr]; - push(@result, $last); - } - } - close(OBJDUMP); - return @result; -} - -# The input file should contain lines of the form /proc/maps-like -# output (same format as expected from the profiles) or that looks -# like hex addresses (like "0xDEADBEEF"). We will parse all -# /proc/maps output, and for all the hex addresses, we will output -# "short" symbol names, one per line, in the same order as the input. -sub PrintSymbols { - my $maps_and_symbols_file = shift; - - # ParseLibraries expects pcs to be in a set. Fine by us... - my @pclist = (); # pcs in sorted order - my $pcs = {}; - my $map = ""; - foreach my $line (<$maps_and_symbols_file>) { - $line =~ s/\r//g; # turn windows-looking lines into unix-looking lines - if ($line =~ /\b(0x[0-9a-f]+)\b/i) { - push(@pclist, HexExtend($1)); - $pcs->{$pclist[-1]} = 1; - } else { - $map .= $line; - } - } - - my $libs = ParseLibraries($main::prog, $map, $pcs); - my $symbols = ExtractSymbols($libs, $pcs); - - foreach my $pc (@pclist) { - # ->[0] is the shortname, ->[2] is the full name - print(($symbols->{$pc}->[0] || "??") . "\n"); - } -} - - -# For sorting functions by name -sub ByName { - return ShortFunctionName($a) cmp ShortFunctionName($b); -} - -# Print source-listing for all all routines that match $list_opts -sub PrintListing { - my $total = shift; - my $libs = shift; - my $flat = shift; - my $cumulative = shift; - my $list_opts = shift; - my $html = shift; - - my $output = \*STDOUT; - my $fname = ""; - - if ($html) { - # Arrange to write the output to a temporary file - $fname = TempName($main::next_tmpfile, "html"); - $main::next_tmpfile++; - if (!open(TEMP, ">$fname")) { - print STDERR "$fname: $!\n"; - return; - } - $output = \*TEMP; - print $output HtmlListingHeader(); - printf $output ("<div class=\"legend\">%s<br>Total: %s %s</div>\n", - $main::prog, Unparse($total), Units()); - } - - my $listed = 0; - foreach my $lib (@{$libs}) { - my $symbol_table = GetProcedureBoundaries($lib->[0], $list_opts); - my $offset = AddressSub($lib->[1], $lib->[3]); - foreach my $routine (sort ByName keys(%{$symbol_table})) { - # Print if there are any samples in this routine - my $start_addr = $symbol_table->{$routine}->[0]; - my $end_addr = $symbol_table->{$routine}->[1]; - my $length = hex(AddressSub($end_addr, $start_addr)); - my $addr = AddressAdd($start_addr, $offset); - for (my $i = 0; $i < $length; $i++) { - if (defined($cumulative->{$addr})) { - $listed += PrintSource( - $lib->[0], $offset, - $routine, $flat, $cumulative, - $start_addr, $end_addr, - $html, - $output); - last; - } - $addr = AddressInc($addr); - } - } - } - - if ($html) { - if ($listed > 0) { - print $output HtmlListingFooter(); - close($output); - RunWeb($fname); - } else { - close($output); - unlink($fname); - } - } -} - -sub HtmlListingHeader { - return <<'EOF'; -<DOCTYPE html> -<html> -<head> -<title>Pprof listing</title> -<style type="text/css"> -body { - font-family: sans-serif; -} -h1 { - font-size: 1.5em; - margin-bottom: 4px; -} -.legend { - font-size: 1.25em; -} -.line { - color: #aaaaaa; -} -.nop { - color: #aaaaaa; -} -.unimportant { - color: #cccccc; -} -.disasmloc { - color: #000000; -} -.deadsrc { - cursor: pointer; -} -.deadsrc:hover { - background-color: #eeeeee; -} -.livesrc { - color: #0000ff; - cursor: pointer; -} -.livesrc:hover { - background-color: #eeeeee; -} -.asm { - color: #008800; - display: none; -} -</style> -<script type="text/javascript"> -function pprof_toggle_asm(e) { - var target; - if (!e) e = window.event; - if (e.target) target = e.target; - else if (e.srcElement) target = e.srcElement; - - if (target) { - var asm = target.nextSibling; - if (asm && asm.className == "asm") { - asm.style.display = (asm.style.display == "block" ? "" : "block"); - e.preventDefault(); - return false; - } - } -} -</script> -</head> -<body> -EOF -} - -sub HtmlListingFooter { - return <<'EOF'; -</body> -</html> -EOF -} - -sub HtmlEscape { - my $text = shift; - $text =~ s/&/&/g; - $text =~ s/</</g; - $text =~ s/>/>/g; - return $text; -} - -# Returns the indentation of the line, if it has any non-whitespace -# characters. Otherwise, returns -1. -sub Indentation { - my $line = shift; - if (m/^(\s*)\S/) { - return length($1); - } else { - return -1; - } -} - -# If the symbol table contains inlining info, Disassemble() may tag an -# instruction with a location inside an inlined function. But for -# source listings, we prefer to use the location in the function we -# are listing. So use MapToSymbols() to fetch full location -# information for each instruction and then pick out the first -# location from a location list (location list contains callers before -# callees in case of inlining). -# -# After this routine has run, each entry in $instructions contains: -# [0] start address -# [1] filename for function we are listing -# [2] line number for function we are listing -# [3] disassembly -# [4] limit address -# [5] most specific filename (may be different from [1] due to inlining) -# [6] most specific line number (may be different from [2] due to inlining) -sub GetTopLevelLineNumbers { - my ($lib, $offset, $instructions) = @_; - my $pcs = []; - for (my $i = 0; $i <= $#{$instructions}; $i++) { - push(@{$pcs}, $instructions->[$i]->[0]); - } - my $symbols = {}; - MapToSymbols($lib, $offset, $pcs, $symbols); - for (my $i = 0; $i <= $#{$instructions}; $i++) { - my $e = $instructions->[$i]; - push(@{$e}, $e->[1]); - push(@{$e}, $e->[2]); - my $addr = $e->[0]; - my $sym = $symbols->{$addr}; - if (defined($sym)) { - if ($#{$sym} >= 2 && $sym->[1] =~ m/^(.*):(\d+)$/) { - $e->[1] = $1; # File name - $e->[2] = $2; # Line number - } - } - } -} - -# Print source-listing for one routine -sub PrintSource { - my $prog = shift; - my $offset = shift; - my $routine = shift; - my $flat = shift; - my $cumulative = shift; - my $start_addr = shift; - my $end_addr = shift; - my $html = shift; - my $output = shift; - - # Disassemble all instructions (just to get line numbers) - my @instructions = Disassemble($prog, $offset, $start_addr, $end_addr); - GetTopLevelLineNumbers($prog, $offset, \@instructions); - - # Hack 1: assume that the first source file encountered in the - # disassembly contains the routine - my $filename = undef; - for (my $i = 0; $i <= $#instructions; $i++) { - if ($instructions[$i]->[2] >= 0) { - $filename = $instructions[$i]->[1]; - last; - } - } - if (!defined($filename)) { - print STDERR "no filename found in $routine\n"; - return 0; - } - - # Hack 2: assume that the largest line number from $filename is the - # end of the procedure. This is typically safe since if P1 contains - # an inlined call to P2, then P2 usually occurs earlier in the - # source file. If this does not work, we might have to compute a - # density profile or just print all regions we find. - my $lastline = 0; - for (my $i = 0; $i <= $#instructions; $i++) { - my $f = $instructions[$i]->[1]; - my $l = $instructions[$i]->[2]; - if (($f eq $filename) && ($l > $lastline)) { - $lastline = $l; - } - } - - # Hack 3: assume the first source location from "filename" is the start of - # the source code. - my $firstline = 1; - for (my $i = 0; $i <= $#instructions; $i++) { - if ($instructions[$i]->[1] eq $filename) { - $firstline = $instructions[$i]->[2]; - last; - } - } - - # Hack 4: Extend last line forward until its indentation is less than - # the indentation we saw on $firstline - my $oldlastline = $lastline; - { - if (!open(FILE, "<$filename")) { - print STDERR "$filename: $!\n"; - return 0; - } - my $l = 0; - my $first_indentation = -1; - while (<FILE>) { - s/\r//g; # turn windows-looking lines into unix-looking lines - $l++; - my $indent = Indentation($_); - if ($l >= $firstline) { - if ($first_indentation < 0 && $indent >= 0) { - $first_indentation = $indent; - last if ($first_indentation == 0); - } - } - if ($l >= $lastline && $indent >= 0) { - if ($indent >= $first_indentation) { - $lastline = $l+1; - } else { - last; - } - } - } - close(FILE); - } - - # Assign all samples to the range $firstline,$lastline, - # Hack 4: If an instruction does not occur in the range, its samples - # are moved to the next instruction that occurs in the range. - my $samples1 = {}; # Map from line number to flat count - my $samples2 = {}; # Map from line number to cumulative count - my $running1 = 0; # Unassigned flat counts - my $running2 = 0; # Unassigned cumulative counts - my $total1 = 0; # Total flat counts - my $total2 = 0; # Total cumulative counts - my %disasm = (); # Map from line number to disassembly - my $running_disasm = ""; # Unassigned disassembly - my $skip_marker = "---\n"; - if ($html) { - $skip_marker = ""; - for (my $l = $firstline; $l <= $lastline; $l++) { - $disasm{$l} = ""; - } - } - my $last_dis_filename = ''; - my $last_dis_linenum = -1; - my $last_touched_line = -1; # To detect gaps in disassembly for a line - foreach my $e (@instructions) { - # Add up counts for all address that fall inside this instruction - my $c1 = 0; - my $c2 = 0; - for (my $a = $e->[0]; $a lt $e->[4]; $a = AddressInc($a)) { - $c1 += GetEntry($flat, $a); - $c2 += GetEntry($cumulative, $a); - } - - if ($html) { - my $dis = sprintf(" %6s %6s \t\t%8s: %s ", - HtmlPrintNumber($c1), - HtmlPrintNumber($c2), - UnparseAddress($offset, $e->[0]), - CleanDisassembly($e->[3])); - - # Append the most specific source line associated with this instruction - if (length($dis) < 80) { $dis .= (' ' x (80 - length($dis))) }; - $dis = HtmlEscape($dis); - my $f = $e->[5]; - my $l = $e->[6]; - if ($f ne $last_dis_filename) { - $dis .= sprintf("<span class=disasmloc>%s:%d</span>", - HtmlEscape(CleanFileName($f)), $l); - } elsif ($l ne $last_dis_linenum) { - # De-emphasize the unchanged file name portion - $dis .= sprintf("<span class=unimportant>%s</span>" . - "<span class=disasmloc>:%d</span>", - HtmlEscape(CleanFileName($f)), $l); - } else { - # De-emphasize the entire location - $dis .= sprintf("<span class=unimportant>%s:%d</span>", - HtmlEscape(CleanFileName($f)), $l); - } - $last_dis_filename = $f; - $last_dis_linenum = $l; - $running_disasm .= $dis; - $running_disasm .= "\n"; - } - - $running1 += $c1; - $running2 += $c2; - $total1 += $c1; - $total2 += $c2; - my $file = $e->[1]; - my $line = $e->[2]; - if (($file eq $filename) && - ($line >= $firstline) && - ($line <= $lastline)) { - # Assign all accumulated samples to this line - AddEntry($samples1, $line, $running1); - AddEntry($samples2, $line, $running2); - $running1 = 0; - $running2 = 0; - if ($html) { - if ($line != $last_touched_line && $disasm{$line} ne '') { - $disasm{$line} .= "\n"; - } - $disasm{$line} .= $running_disasm; - $running_disasm = ''; - $last_touched_line = $line; - } - } - } - - # Assign any leftover samples to $lastline - AddEntry($samples1, $lastline, $running1); - AddEntry($samples2, $lastline, $running2); - if ($html) { - if ($lastline != $last_touched_line && $disasm{$lastline} ne '') { - $disasm{$lastline} .= "\n"; - } - $disasm{$lastline} .= $running_disasm; - } - - if ($html) { - printf $output ( - "<h1>%s</h1>%s\n<pre onClick=\"pprof_toggle_asm()\">\n" . - "Total:%6s %6s (flat / cumulative %s)\n", - HtmlEscape(ShortFunctionName($routine)), - HtmlEscape(CleanFileName($filename)), - Unparse($total1), - Unparse($total2), - Units()); - } else { - printf $output ( - "ROUTINE ====================== %s in %s\n" . - "%6s %6s Total %s (flat / cumulative)\n", - ShortFunctionName($routine), - CleanFileName($filename), - Unparse($total1), - Unparse($total2), - Units()); - } - if (!open(FILE, "<$filename")) { - print STDERR "$filename: $!\n"; - return 0; - } - my $l = 0; - while (<FILE>) { - s/\r//g; # turn windows-looking lines into unix-looking lines - $l++; - if ($l >= $firstline - 5 && - (($l <= $oldlastline + 5) || ($l <= $lastline))) { - chop; - my $text = $_; - if ($l == $firstline) { print $output $skip_marker; } - my $n1 = GetEntry($samples1, $l); - my $n2 = GetEntry($samples2, $l); - if ($html) { - # Emit a span that has one of the following classes: - # livesrc -- has samples - # deadsrc -- has disassembly, but with no samples - # nop -- has no matching disasembly - # Also emit an optional span containing disassembly. - my $dis = $disasm{$l}; - my $asm = ""; - if (defined($dis) && $dis ne '') { - $asm = "<span class=\"asm\">" . $dis . "</span>"; - } - my $source_class = (($n1 + $n2 > 0) - ? "livesrc" - : (($asm ne "") ? "deadsrc" : "nop")); - printf $output ( - "<span class=\"line\">%5d</span> " . - "<span class=\"%s\">%6s %6s %s</span>%s\n", - $l, $source_class, - HtmlPrintNumber($n1), - HtmlPrintNumber($n2), - HtmlEscape($text), - $asm); - } else { - printf $output( - "%6s %6s %4d: %s\n", - UnparseAlt($n1), - UnparseAlt($n2), - $l, - $text); - } - if ($l == $lastline) { print $output $skip_marker; } - }; - } - close(FILE); - if ($html) { - print $output "</pre>\n"; - } - return 1; -} - -# Return the source line for the specified file/linenumber. -# Returns undef if not found. -sub SourceLine { - my $file = shift; - my $line = shift; - - # Look in cache - if (!defined($main::source_cache{$file})) { - if (100 < scalar keys(%main::source_cache)) { - # Clear the cache when it gets too big - $main::source_cache = (); - } - - # Read all lines from the file - if (!open(FILE, "<$file")) { - print STDERR "$file: $!\n"; - $main::source_cache{$file} = []; # Cache the negative result - return undef; - } - my $lines = []; - push(@{$lines}, ""); # So we can use 1-based line numbers as indices - while (<FILE>) { - push(@{$lines}, $_); - } - close(FILE); - - # Save the lines in the cache - $main::source_cache{$file} = $lines; - } - - my $lines = $main::source_cache{$file}; - if (($line < 0) || ($line > $#{$lines})) { - return undef; - } else { - return $lines->[$line]; - } -} - -# Print disassembly for one routine with interspersed source if available -sub PrintDisassembledFunction { - my $prog = shift; - my $offset = shift; - my $routine = shift; - my $flat = shift; - my $cumulative = shift; - my $start_addr = shift; - my $end_addr = shift; - my $total = shift; - - # Disassemble all instructions - my @instructions = Disassemble($prog, $offset, $start_addr, $end_addr); - - # Make array of counts per instruction - my @flat_count = (); - my @cum_count = (); - my $flat_total = 0; - my $cum_total = 0; - foreach my $e (@instructions) { - # Add up counts for all address that fall inside this instruction - my $c1 = 0; - my $c2 = 0; - for (my $a = $e->[0]; $a lt $e->[4]; $a = AddressInc($a)) { - $c1 += GetEntry($flat, $a); - $c2 += GetEntry($cumulative, $a); - } - push(@flat_count, $c1); - push(@cum_count, $c2); - $flat_total += $c1; - $cum_total += $c2; - } - - # Print header with total counts - printf("ROUTINE ====================== %s\n" . - "%6s %6s %s (flat, cumulative) %.1f%% of total\n", - ShortFunctionName($routine), - Unparse($flat_total), - Unparse($cum_total), - Units(), - ($cum_total * 100.0) / $total); - - # Process instructions in order - my $current_file = ""; - for (my $i = 0; $i <= $#instructions; ) { - my $e = $instructions[$i]; - - # Print the new file name whenever we switch files - if ($e->[1] ne $current_file) { - $current_file = $e->[1]; - my $fname = $current_file; - $fname =~ s|^\./||; # Trim leading "./" - - # Shorten long file names - if (length($fname) >= 58) { - $fname = "..." . substr($fname, -55); - } - printf("-------------------- %s\n", $fname); - } - - # TODO: Compute range of lines to print together to deal with - # small reorderings. - my $first_line = $e->[2]; - my $last_line = $first_line; - my %flat_sum = (); - my %cum_sum = (); - for (my $l = $first_line; $l <= $last_line; $l++) { - $flat_sum{$l} = 0; - $cum_sum{$l} = 0; - } - - # Find run of instructions for this range of source lines - my $first_inst = $i; - while (($i <= $#instructions) && - ($instructions[$i]->[2] >= $first_line) && - ($instructions[$i]->[2] <= $last_line)) { - $e = $instructions[$i]; - $flat_sum{$e->[2]} += $flat_count[$i]; - $cum_sum{$e->[2]} += $cum_count[$i]; - $i++; - } - my $last_inst = $i - 1; - - # Print source lines - for (my $l = $first_line; $l <= $last_line; $l++) { - my $line = SourceLine($current_file, $l); - if (!defined($line)) { - $line = "?\n"; - next; - } else { - $line =~ s/^\s+//; - } - printf("%6s %6s %5d: %s", - UnparseAlt($flat_sum{$l}), - UnparseAlt($cum_sum{$l}), - $l, - $line); - } - - # Print disassembly - for (my $x = $first_inst; $x <= $last_inst; $x++) { - my $e = $instructions[$x]; - printf("%6s %6s %8s: %6s\n", - UnparseAlt($flat_count[$x]), - UnparseAlt($cum_count[$x]), - UnparseAddress($offset, $e->[0]), - CleanDisassembly($e->[3])); - } - } -} - -# Print DOT graph -sub PrintDot { - my $prog = shift; - my $symbols = shift; - my $raw = shift; - my $flat = shift; - my $cumulative = shift; - my $overall_total = shift; - - # Get total - my $local_total = TotalProfile($flat); - my $nodelimit = int($main::opt_nodefraction * $local_total); - my $edgelimit = int($main::opt_edgefraction * $local_total); - my $nodecount = $main::opt_nodecount; - - # Find nodes to include - my @list = (sort { abs(GetEntry($cumulative, $b)) <=> - abs(GetEntry($cumulative, $a)) - || $a cmp $b } - keys(%{$cumulative})); - my $last = $nodecount - 1; - if ($last > $#list) { - $last = $#list; - } - while (($last >= 0) && - (abs(GetEntry($cumulative, $list[$last])) <= $nodelimit)) { - $last--; - } - if ($last < 0) { - print STDERR "No nodes to print\n"; - return 0; - } - - if ($nodelimit > 0 || $edgelimit > 0) { - printf STDERR ("Dropping nodes with <= %s %s; edges with <= %s abs(%s)\n", - Unparse($nodelimit), Units(), - Unparse($edgelimit), Units()); - } - - # Open DOT output file - my $output; - my $escaped_dot = ShellEscape(@DOT); - my $escaped_ps2pdf = ShellEscape(@PS2PDF); - if ($main::opt_gv) { - my $escaped_outfile = ShellEscape(TempName($main::next_tmpfile, "ps")); - $output = "| $escaped_dot -Tps2 >$escaped_outfile"; - } elsif ($main::opt_evince) { - my $escaped_outfile = ShellEscape(TempName($main::next_tmpfile, "pdf")); - $output = "| $escaped_dot -Tps2 | $escaped_ps2pdf - $escaped_outfile"; - } elsif ($main::opt_ps) { - $output = "| $escaped_dot -Tps2"; - } elsif ($main::opt_pdf) { - $output = "| $escaped_dot -Tps2 | $escaped_ps2pdf - -"; - } elsif ($main::opt_web || $main::opt_svg) { - # We need to post-process the SVG, so write to a temporary file always. - my $escaped_outfile = ShellEscape(TempName($main::next_tmpfile, "svg")); - $output = "| $escaped_dot -Tsvg >$escaped_outfile"; - } elsif ($main::opt_gif) { - $output = "| $escaped_dot -Tgif"; - } else { - $output = ">&STDOUT"; - } - open(DOT, $output) || error("$output: $!\n"); - - # Title - printf DOT ("digraph \"%s; %s %s\" {\n", - $prog, - Unparse($overall_total), - Units()); - if ($main::opt_pdf) { - # The output is more printable if we set the page size for dot. - printf DOT ("size=\"8,11\"\n"); - } - printf DOT ("node [width=0.375,height=0.25];\n"); - - # Print legend - printf DOT ("Legend [shape=box,fontsize=24,shape=plaintext," . - "label=\"%s\\l%s\\l%s\\l%s\\l%s\\l\"];\n", - $prog, - sprintf("Total %s: %s", Units(), Unparse($overall_total)), - sprintf("Focusing on: %s", Unparse($local_total)), - sprintf("Dropped nodes with <= %s abs(%s)", - Unparse($nodelimit), Units()), - sprintf("Dropped edges with <= %s %s", - Unparse($edgelimit), Units()) - ); - - # Print nodes - my %node = (); - my $nextnode = 1; - foreach my $a (@list[0..$last]) { - # Pick font size - my $f = GetEntry($flat, $a); - my $c = GetEntry($cumulative, $a); - - my $fs = 8; - if ($local_total > 0) { - $fs = 8 + (50.0 * sqrt(abs($f * 1.0 / $local_total))); - } - - $node{$a} = $nextnode++; - my $sym = $a; - $sym =~ s/\s+/\\n/g; - $sym =~ s/::/\\n/g; - - # Extra cumulative info to print for non-leaves - my $extra = ""; - if ($f != $c) { - $extra = sprintf("\\rof %s (%s)", - Unparse($c), - Percent($c, $local_total)); - } - my $style = ""; - if ($main::opt_heapcheck) { - if ($f > 0) { - # make leak-causing nodes more visible (add a background) - $style = ",style=filled,fillcolor=gray" - } elsif ($f < 0) { - # make anti-leak-causing nodes (which almost never occur) - # stand out as well (triple border) - $style = ",peripheries=3" - } - } - - printf DOT ("N%d [label=\"%s\\n%s (%s)%s\\r" . - "\",shape=box,fontsize=%.1f%s];\n", - $node{$a}, - $sym, - Unparse($f), - Percent($f, $local_total), - $extra, - $fs, - $style, - ); - } - - # Get edges and counts per edge - my %edge = (); - my $n; - my $fullname_to_shortname_map = {}; - FillFullnameToShortnameMap($symbols, $fullname_to_shortname_map); - foreach my $k (keys(%{$raw})) { - # TODO: omit low %age edges - $n = $raw->{$k}; - my @translated = TranslateStack($symbols, $fullname_to_shortname_map, $k); - for (my $i = 1; $i <= $#translated; $i++) { - my $src = $translated[$i]; - my $dst = $translated[$i-1]; - #next if ($src eq $dst); # Avoid self-edges? - if (exists($node{$src}) && exists($node{$dst})) { - my $edge_label = "$src\001$dst"; - if (!exists($edge{$edge_label})) { - $edge{$edge_label} = 0; - } - $edge{$edge_label} += $n; - } - } - } - - # Print edges (process in order of decreasing counts) - my %indegree = (); # Number of incoming edges added per node so far - my %outdegree = (); # Number of outgoing edges added per node so far - foreach my $e (sort { $edge{$b} <=> $edge{$a} } keys(%edge)) { - my @x = split(/\001/, $e); - $n = $edge{$e}; - - # Initialize degree of kept incoming and outgoing edges if necessary - my $src = $x[0]; - my $dst = $x[1]; - if (!exists($outdegree{$src})) { $outdegree{$src} = 0; } - if (!exists($indegree{$dst})) { $indegree{$dst} = 0; } - - my $keep; - if ($indegree{$dst} == 0) { - # Keep edge if needed for reachability - $keep = 1; - } elsif (abs($n) <= $edgelimit) { - # Drop if we are below --edgefraction - $keep = 0; - } elsif ($outdegree{$src} >= $main::opt_maxdegree || - $indegree{$dst} >= $main::opt_maxdegree) { - # Keep limited number of in/out edges per node - $keep = 0; - } else { - $keep = 1; - } - - if ($keep) { - $outdegree{$src}++; - $indegree{$dst}++; - - # Compute line width based on edge count - my $fraction = abs($local_total ? (3 * ($n / $local_total)) : 0); - if ($fraction > 1) { $fraction = 1; } - my $w = $fraction * 2; - if ($w < 1 && ($main::opt_web || $main::opt_svg)) { - # SVG output treats line widths < 1 poorly. - $w = 1; - } - - # Dot sometimes segfaults if given edge weights that are too large, so - # we cap the weights at a large value - my $edgeweight = abs($n) ** 0.7; - if ($edgeweight > 100000) { $edgeweight = 100000; } - $edgeweight = int($edgeweight); - - my $style = sprintf("setlinewidth(%f)", $w); - if ($x[1] =~ m/\(inline\)/) { - $style .= ",dashed"; - } - - # Use a slightly squashed function of the edge count as the weight - printf DOT ("N%s -> N%s [label=%s, weight=%d, style=\"%s\"];\n", - $node{$x[0]}, - $node{$x[1]}, - Unparse($n), - $edgeweight, - $style); - } - } - - print DOT ("}\n"); - close(DOT); - - if ($main::opt_web || $main::opt_svg) { - # Rewrite SVG to be more usable inside web browser. - RewriteSvg(TempName($main::next_tmpfile, "svg")); - } - - return 1; -} - -sub RewriteSvg { - my $svgfile = shift; - - open(SVG, $svgfile) || die "open temp svg: $!"; - my @svg = <SVG>; - close(SVG); - unlink $svgfile; - my $svg = join('', @svg); - - # Dot's SVG output is - # - # <svg width="___" height="___" - # viewBox="___" xmlns=...> - # <g id="graph0" transform="..."> - # ... - # </g> - # </svg> - # - # Change it to - # - # <svg width="100%" height="100%" - # xmlns=...> - # $svg_javascript - # <g id="viewport" transform="translate(0,0)"> - # <g id="graph0" transform="..."> - # ... - # </g> - # </g> - # </svg> - - # Fix width, height; drop viewBox. - $svg =~ s/(?s)<svg width="[^"]+" height="[^"]+"(.*?)viewBox="[^"]+"/<svg width="100%" height="100%"$1/; - - # Insert script, viewport <g> above first <g> - my $svg_javascript = SvgJavascript(); - my $viewport = "<g id=\"viewport\" transform=\"translate(0,0)\">\n"; - $svg =~ s/<g id="graph\d"/$svg_javascript$viewport$&/; - - # Insert final </g> above </svg>. - $svg =~ s/(.*)(<\/svg>)/$1<\/g>$2/; - $svg =~ s/<g id="graph\d"(.*?)/<g id="viewport"$1/; - - if ($main::opt_svg) { - # --svg: write to standard output. - print $svg; - } else { - # Write back to temporary file. - open(SVG, ">$svgfile") || die "open $svgfile: $!"; - print SVG $svg; - close(SVG); - } -} - -sub SvgJavascript { - return <<'EOF'; -<script type="text/ecmascript"><![CDATA[ -// SVGPan -// http://www.cyberz.org/blog/2009/12/08/svgpan-a-javascript-svg-panzoomdrag-library/ -// Local modification: if(true || ...) below to force panning, never moving. - -/** - * SVGPan library 1.2 - * ==================== - * - * Given an unique existing element with id "viewport", including the - * the library into any SVG adds the following capabilities: - * - * - Mouse panning - * - Mouse zooming (using the wheel) - * - Object dargging - * - * Known issues: - * - * - Zooming (while panning) on Safari has still some issues - * - * Releases: - * - * 1.2, Sat Mar 20 08:42:50 GMT 2010, Zeng Xiaohui - * Fixed a bug with browser mouse handler interaction - * - * 1.1, Wed Feb 3 17:39:33 GMT 2010, Zeng Xiaohui - * Updated the zoom code to support the mouse wheel on Safari/Chrome - * - * 1.0, Andrea Leofreddi - * First release - * - * This code is licensed under the following BSD license: - * - * Copyright 2009-2010 Andrea Leofreddi <a.leofreddi@itcharm.com>. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without modification, are - * permitted provided that the following conditions are met: - * - * 1. Redistributions of source code must retain the above copyright notice, this list of - * conditions and the following disclaimer. - * - * 2. Redistributions in binary form must reproduce the above copyright notice, this list - * of conditions and the following disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY Andrea Leofreddi ``AS IS'' AND ANY EXPRESS OR IMPLIED - * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND - * FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL Andrea Leofreddi OR - * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR - * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON - * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING - * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF - * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - * The views and conclusions contained in the software and documentation are those of the - * authors and should not be interpreted as representing official policies, either expressed - * or implied, of Andrea Leofreddi. - */ - -var root = document.documentElement; - -var state = 'none', stateTarget, stateOrigin, stateTf; - -setupHandlers(root); - -/** - * Register handlers - */ -function setupHandlers(root){ - setAttributes(root, { - "onmouseup" : "add(evt)", - "onmousedown" : "handleMouseDown(evt)", - "onmousemove" : "handleMouseMove(evt)", - "onmouseup" : "handleMouseUp(evt)", - //"onmouseout" : "handleMouseUp(evt)", // Decomment this to stop the pan functionality when dragging out of the SVG element - }); - - if(navigator.userAgent.toLowerCase().indexOf('webkit') >= 0) - window.addEventListener('mousewheel', handleMouseWheel, false); // Chrome/Safari - else - window.addEventListener('DOMMouseScroll', handleMouseWheel, false); // Others - - var g = svgDoc.getElementById("svg"); - g.width = "100%"; - g.height = "100%"; -} - -/** - * Instance an SVGPoint object with given event coordinates. - */ -function getEventPoint(evt) { - var p = root.createSVGPoint(); - - p.x = evt.clientX; - p.y = evt.clientY; - - return p; -} - -/** - * Sets the current transform matrix of an element. - */ -function setCTM(element, matrix) { - var s = "matrix(" + matrix.a + "," + matrix.b + "," + matrix.c + "," + matrix.d + "," + matrix.e + "," + matrix.f + ")"; - - element.setAttribute("transform", s); -} - -/** - * Dumps a matrix to a string (useful for debug). - */ -function dumpMatrix(matrix) { - var s = "[ " + matrix.a + ", " + matrix.c + ", " + matrix.e + "\n " + matrix.b + ", " + matrix.d + ", " + matrix.f + "\n 0, 0, 1 ]"; - - return s; -} - -/** - * Sets attributes of an element. - */ -function setAttributes(element, attributes){ - for (i in attributes) - element.setAttributeNS(null, i, attributes[i]); -} - -/** - * Handle mouse move event. - */ -function handleMouseWheel(evt) { - if(evt.preventDefault) - evt.preventDefault(); - - evt.returnValue = false; - - var svgDoc = evt.target.ownerDocument; - - var delta; - - if(evt.wheelDelta) - delta = evt.wheelDelta / 3600; // Chrome/Safari - else - delta = evt.detail / -90; // Mozilla - - var z = 1 + delta; // Zoom factor: 0.9/1.1 - - var g = svgDoc.getElementById("viewport"); - - var p = getEventPoint(evt); - - p = p.matrixTransform(g.getCTM().inverse()); - - // Compute new scale matrix in current mouse position - var k = root.createSVGMatrix().translate(p.x, p.y).scale(z).translate(-p.x, -p.y); - - setCTM(g, g.getCTM().multiply(k)); - - stateTf = stateTf.multiply(k.inverse()); -} - -/** - * Handle mouse move event. - */ -function handleMouseMove(evt) { - if(evt.preventDefault) - evt.preventDefault(); - - evt.returnValue = false; - - var svgDoc = evt.target.ownerDocument; - - var g = svgDoc.getElementById("viewport"); - - if(state == 'pan') { - // Pan mode - var p = getEventPoint(evt).matrixTransform(stateTf); - - setCTM(g, stateTf.inverse().translate(p.x - stateOrigin.x, p.y - stateOrigin.y)); - } else if(state == 'move') { - // Move mode - var p = getEventPoint(evt).matrixTransform(g.getCTM().inverse()); - - setCTM(stateTarget, root.createSVGMatrix().translate(p.x - stateOrigin.x, p.y - stateOrigin.y).multiply(g.getCTM().inverse()).multiply(stateTarget.getCTM())); - - stateOrigin = p; - } -} - -/** - * Handle click event. - */ -function handleMouseDown(evt) { - if(evt.preventDefault) - evt.preventDefault(); - - evt.returnValue = false; - - var svgDoc = evt.target.ownerDocument; - - var g = svgDoc.getElementById("viewport"); - - if(true || evt.target.tagName == "svg") { - // Pan mode - state = 'pan'; - - stateTf = g.getCTM().inverse(); - - stateOrigin = getEventPoint(evt).matrixTransform(stateTf); - } else { - // Move mode - state = 'move'; - - stateTarget = evt.target; - - stateTf = g.getCTM().inverse(); - - stateOrigin = getEventPoint(evt).matrixTransform(stateTf); - } -} - -/** - * Handle mouse button release event. - */ -function handleMouseUp(evt) { - if(evt.preventDefault) - evt.preventDefault(); - - evt.returnValue = false; - - var svgDoc = evt.target.ownerDocument; - - if(state == 'pan' || state == 'move') { - // Quit pan mode - state = ''; - } -} - -]]></script> -EOF -} - -# Provides a map from fullname to shortname for cases where the -# shortname is ambiguous. The symlist has both the fullname and -# shortname for all symbols, which is usually fine, but sometimes -- -# such as overloaded functions -- two different fullnames can map to -# the same shortname. In that case, we use the address of the -# function to disambiguate the two. This function fills in a map that -# maps fullnames to modified shortnames in such cases. If a fullname -# is not present in the map, the 'normal' shortname provided by the -# symlist is the appropriate one to use. -sub FillFullnameToShortnameMap { - my $symbols = shift; - my $fullname_to_shortname_map = shift; - my $shortnames_seen_once = {}; - my $shortnames_seen_more_than_once = {}; - - foreach my $symlist (values(%{$symbols})) { - # TODO(csilvers): deal with inlined symbols too. - my $shortname = $symlist->[0]; - my $fullname = $symlist->[2]; - if ($fullname !~ /<[0-9a-fA-F]+>$/) { # fullname doesn't end in an address - next; # the only collisions we care about are when addresses differ - } - if (defined($shortnames_seen_once->{$shortname}) && - $shortnames_seen_once->{$shortname} ne $fullname) { - $shortnames_seen_more_than_once->{$shortname} = 1; - } else { - $shortnames_seen_once->{$shortname} = $fullname; - } - } - - foreach my $symlist (values(%{$symbols})) { - my $shortname = $symlist->[0]; - my $fullname = $symlist->[2]; - # TODO(csilvers): take in a list of addresses we care about, and only - # store in the map if $symlist->[1] is in that list. Saves space. - next if defined($fullname_to_shortname_map->{$fullname}); - if (defined($shortnames_seen_more_than_once->{$shortname})) { - if ($fullname =~ /<0*([^>]*)>$/) { # fullname has address at end of it - $fullname_to_shortname_map->{$fullname} = "$shortname\@$1"; - } - } - } -} - -# Return a small number that identifies the argument. -# Multiple calls with the same argument will return the same number. -# Calls with different arguments will return different numbers. -sub ShortIdFor { - my $key = shift; - my $id = $main::uniqueid{$key}; - if (!defined($id)) { - $id = keys(%main::uniqueid) + 1; - $main::uniqueid{$key} = $id; - } - return $id; -} - -# Translate a stack of addresses into a stack of symbols -sub TranslateStack { - my $symbols = shift; - my $fullname_to_shortname_map = shift; - my $k = shift; - - my @addrs = split(/\n/, $k); - my @result = (); - for (my $i = 0; $i <= $#addrs; $i++) { - my $a = $addrs[$i]; - - # Skip large addresses since they sometimes show up as fake entries on RH9 - if (length($a) > 8 && $a gt "7fffffffffffffff") { - next; - } - - if ($main::opt_disasm || $main::opt_list) { - # We want just the address for the key - push(@result, $a); - next; - } - - my $symlist = $symbols->{$a}; - if (!defined($symlist)) { - $symlist = [$a, "", $a]; - } - - # We can have a sequence of symbols for a particular entry - # (more than one symbol in the case of inlining). Callers - # come before callees in symlist, so walk backwards since - # the translated stack should contain callees before callers. - for (my $j = $#{$symlist}; $j >= 2; $j -= 3) { - my $func = $symlist->[$j-2]; - my $fileline = $symlist->[$j-1]; - my $fullfunc = $symlist->[$j]; - if (defined($fullname_to_shortname_map->{$fullfunc})) { - $func = $fullname_to_shortname_map->{$fullfunc}; - } - if ($j > 2) { - $func = "$func (inline)"; - } - - # Do not merge nodes corresponding to Callback::Run since that - # causes confusing cycles in dot display. Instead, we synthesize - # a unique name for this frame per caller. - if ($func =~ m/Callback.*::Run$/) { - my $caller = ($i > 0) ? $addrs[$i-1] : 0; - $func = "Run#" . ShortIdFor($caller); - } - - if ($main::opt_addresses) { - push(@result, "$a $func $fileline"); - } elsif ($main::opt_lines) { - if ($func eq '??' && $fileline eq '??:0') { - push(@result, "$a"); - } else { - push(@result, "$func $fileline"); - } - } elsif ($main::opt_functions) { - if ($func eq '??') { - push(@result, "$a"); - } else { - push(@result, $func); - } - } elsif ($main::opt_files) { - if ($fileline eq '??:0' || $fileline eq '') { - push(@result, "$a"); - } else { - my $f = $fileline; - $f =~ s/:\d+$//; - push(@result, $f); - } - } else { - push(@result, $a); - last; # Do not print inlined info - } - } - } - - # print join(",", @addrs), " => ", join(",", @result), "\n"; - return @result; -} - -# Generate percent string for a number and a total -sub Percent { - my $num = shift; - my $tot = shift; - if ($tot != 0) { - return sprintf("%.1f%%", $num * 100.0 / $tot); - } else { - return ($num == 0) ? "nan" : (($num > 0) ? "+inf" : "-inf"); - } -} - -# Generate pretty-printed form of number -sub Unparse { - my $num = shift; - if ($main::profile_type eq 'heap' || $main::profile_type eq 'growth') { - if ($main::opt_inuse_objects || $main::opt_alloc_objects) { - return sprintf("%d", $num); - } else { - if ($main::opt_show_bytes) { - return sprintf("%d", $num); - } else { - return sprintf("%.1f", $num / 1048576.0); - } - } - } elsif ($main::profile_type eq 'contention' && !$main::opt_contentions) { - return sprintf("%.3f", $num / 1e9); # Convert nanoseconds to seconds - } else { - return sprintf("%d", $num); - } -} - -# Alternate pretty-printed form: 0 maps to "." -sub UnparseAlt { - my $num = shift; - if ($num == 0) { - return "."; - } else { - return Unparse($num); - } -} - -# Alternate pretty-printed form: 0 maps to "" -sub HtmlPrintNumber { - my $num = shift; - if ($num == 0) { - return ""; - } else { - return Unparse($num); - } -} - -# Return output units -sub Units { - if ($main::profile_type eq 'heap' || $main::profile_type eq 'growth') { - if ($main::opt_inuse_objects || $main::opt_alloc_objects) { - return "objects"; - } else { - if ($main::opt_show_bytes) { - return "B"; - } else { - return "MB"; - } - } - } elsif ($main::profile_type eq 'contention' && !$main::opt_contentions) { - return "seconds"; - } else { - return "samples"; - } -} - -##### Profile manipulation code ##### - -# Generate flattened profile: -# If count is charged to stack [a,b,c,d], in generated profile, -# it will be charged to [a] -sub FlatProfile { - my $profile = shift; - my $result = {}; - foreach my $k (keys(%{$profile})) { - my $count = $profile->{$k}; - my @addrs = split(/\n/, $k); - if ($#addrs >= 0) { - AddEntry($result, $addrs[0], $count); - } - } - return $result; -} - -# Generate cumulative profile: -# If count is charged to stack [a,b,c,d], in generated profile, -# it will be charged to [a], [b], [c], [d] -sub CumulativeProfile { - my $profile = shift; - my $result = {}; - foreach my $k (keys(%{$profile})) { - my $count = $profile->{$k}; - my @addrs = split(/\n/, $k); - foreach my $a (@addrs) { - AddEntry($result, $a, $count); - } - } - return $result; -} - -# If the second-youngest PC on the stack is always the same, returns -# that pc. Otherwise, returns undef. -sub IsSecondPcAlwaysTheSame { - my $profile = shift; - - my $second_pc = undef; - foreach my $k (keys(%{$profile})) { - my @addrs = split(/\n/, $k); - if ($#addrs < 1) { - return undef; - } - if (not defined $second_pc) { - $second_pc = $addrs[1]; - } else { - if ($second_pc ne $addrs[1]) { - return undef; - } - } - } - return $second_pc; -} - -sub ExtractSymbolLocation { - my $symbols = shift; - my $address = shift; - # 'addr2line' outputs "??:0" for unknown locations; we do the - # same to be consistent. - my $location = "??:0:unknown"; - if (exists $symbols->{$address}) { - my $file = $symbols->{$address}->[1]; - if ($file eq "?") { - $file = "??:0" - } - $location = $file . ":" . $symbols->{$address}->[0]; - } - return $location; -} - -# Extracts a graph of calls. -sub ExtractCalls { - my $symbols = shift; - my $profile = shift; - - my $calls = {}; - while( my ($stack_trace, $count) = each %$profile ) { - my @address = split(/\n/, $stack_trace); - my $destination = ExtractSymbolLocation($symbols, $address[0]); - AddEntry($calls, $destination, $count); - for (my $i = 1; $i <= $#address; $i++) { - my $source = ExtractSymbolLocation($symbols, $address[$i]); - my $call = "$source -> $destination"; - AddEntry($calls, $call, $count); - $destination = $source; - } - } - - return $calls; -} - -sub RemoveUninterestingFrames { - my $symbols = shift; - my $profile = shift; - - # List of function names to skip - my %skip = (); - my $skip_regexp = 'NOMATCH'; - if ($main::profile_type eq 'heap' || $main::profile_type eq 'growth') { - foreach my $name ('calloc', - 'cfree', - 'malloc', - 'free', - 'memalign', - 'posix_memalign', - 'pvalloc', - 'valloc', - 'realloc', - 'tc_calloc', - 'tc_cfree', - 'tc_malloc', - 'tc_free', - 'tc_memalign', - 'tc_posix_memalign', - 'tc_pvalloc', - 'tc_valloc', - 'tc_realloc', - 'tc_new', - 'tc_delete', - 'tc_newarray', - 'tc_deletearray', - 'tc_new_nothrow', - 'tc_newarray_nothrow', - 'do_malloc', - '::do_malloc', # new name -- got moved to an unnamed ns - '::do_malloc_or_cpp_alloc', - 'DoSampledAllocation', - 'simple_alloc::allocate', - '__malloc_alloc_template::allocate', - '__builtin_delete', - '__builtin_new', - '__builtin_vec_delete', - '__builtin_vec_new', - 'operator new', - 'operator new[]', - # The entry to our memory-allocation routines on OS X - 'malloc_zone_malloc', - 'malloc_zone_calloc', - 'malloc_zone_valloc', - 'malloc_zone_realloc', - 'malloc_zone_memalign', - 'malloc_zone_free', - # These mark the beginning/end of our custom sections - '__start_google_malloc', - '__stop_google_malloc', - '__start_malloc_hook', - '__stop_malloc_hook') { - $skip{$name} = 1; - $skip{"_" . $name} = 1; # Mach (OS X) adds a _ prefix to everything - } - # TODO: Remove TCMalloc once everything has been - # moved into the tcmalloc:: namespace and we have flushed - # old code out of the system. - $skip_regexp = "TCMalloc|^tcmalloc::"; - } elsif ($main::profile_type eq 'contention') { - foreach my $vname ('base::RecordLockProfileData', - 'base::SubmitMutexProfileData', - 'base::SubmitSpinLockProfileData', - 'Mutex::Unlock', - 'Mutex::UnlockSlow', - 'Mutex::ReaderUnlock', - 'MutexLock::~MutexLock', - 'SpinLock::Unlock', - 'SpinLock::SlowUnlock', - 'SpinLockHolder::~SpinLockHolder') { - $skip{$vname} = 1; - } - } elsif ($main::profile_type eq 'cpu') { - # Drop signal handlers used for CPU profile collection - # TODO(dpeng): this should not be necessary; it's taken - # care of by the general 2nd-pc mechanism below. - foreach my $name ('ProfileData::Add', # historical - 'ProfileData::prof_handler', # historical - 'CpuProfiler::prof_handler', - '__FRAME_END__', - '__pthread_sighandler', - '__restore') { - $skip{$name} = 1; - } - } else { - # Nothing skipped for unknown types - } - - if ($main::profile_type eq 'cpu') { - # If all the second-youngest program counters are the same, - # this STRONGLY suggests that it is an artifact of measurement, - # i.e., stack frames pushed by the CPU profiler signal handler. - # Hence, we delete them. - # (The topmost PC is read from the signal structure, not from - # the stack, so it does not get involved.) - while (my $second_pc = IsSecondPcAlwaysTheSame($profile)) { - my $result = {}; - my $func = ''; - if (exists($symbols->{$second_pc})) { - $second_pc = $symbols->{$second_pc}->[0]; - } - print STDERR "Removing $second_pc from all stack traces.\n"; - foreach my $k (keys(%{$profile})) { - my $count = $profile->{$k}; - my @addrs = split(/\n/, $k); - splice @addrs, 1, 1; - my $reduced_path = join("\n", @addrs); - AddEntry($result, $reduced_path, $count); - } - $profile = $result; - } - } - - my $result = {}; - foreach my $k (keys(%{$profile})) { - my $count = $profile->{$k}; - my @addrs = split(/\n/, $k); - my @path = (); - foreach my $a (@addrs) { - if (exists($symbols->{$a})) { - my $func = $symbols->{$a}->[0]; - if ($skip{$func} || ($func =~ m/$skip_regexp/)) { - next; - } - } - push(@path, $a); - } - my $reduced_path = join("\n", @path); - AddEntry($result, $reduced_path, $count); - } - return $result; -} - -# Reduce profile to granularity given by user -sub ReduceProfile { - my $symbols = shift; - my $profile = shift; - my $result = {}; - my $fullname_to_shortname_map = {}; - FillFullnameToShortnameMap($symbols, $fullname_to_shortname_map); - foreach my $k (keys(%{$profile})) { - my $count = $profile->{$k}; - my @translated = TranslateStack($symbols, $fullname_to_shortname_map, $k); - my @path = (); - my %seen = (); - $seen{''} = 1; # So that empty keys are skipped - foreach my $e (@translated) { - # To avoid double-counting due to recursion, skip a stack-trace - # entry if it has already been seen - if (!$seen{$e}) { - $seen{$e} = 1; - push(@path, $e); - } - } - my $reduced_path = join("\n", @path); - AddEntry($result, $reduced_path, $count); - } - return $result; -} - -# Does the specified symbol array match the regexp? -sub SymbolMatches { - my $sym = shift; - my $re = shift; - if (defined($sym)) { - for (my $i = 0; $i < $#{$sym}; $i += 3) { - if ($sym->[$i] =~ m/$re/ || $sym->[$i+1] =~ m/$re/) { - return 1; - } - } - } - return 0; -} - -# Focus only on paths involving specified regexps -sub FocusProfile { - my $symbols = shift; - my $profile = shift; - my $focus = shift; - my $result = {}; - foreach my $k (keys(%{$profile})) { - my $count = $profile->{$k}; - my @addrs = split(/\n/, $k); - foreach my $a (@addrs) { - # Reply if it matches either the address/shortname/fileline - if (($a =~ m/$focus/) || SymbolMatches($symbols->{$a}, $focus)) { - AddEntry($result, $k, $count); - last; - } - } - } - return $result; -} - -# Focus only on paths not involving specified regexps -sub IgnoreProfile { - my $symbols = shift; - my $profile = shift; - my $ignore = shift; - my $result = {}; - foreach my $k (keys(%{$profile})) { - my $count = $profile->{$k}; - my @addrs = split(/\n/, $k); - my $matched = 0; - foreach my $a (@addrs) { - # Reply if it matches either the address/shortname/fileline - if (($a =~ m/$ignore/) || SymbolMatches($symbols->{$a}, $ignore)) { - $matched = 1; - last; - } - } - if (!$matched) { - AddEntry($result, $k, $count); - } - } - return $result; -} - -# Get total count in profile -sub TotalProfile { - my $profile = shift; - my $result = 0; - foreach my $k (keys(%{$profile})) { - $result += $profile->{$k}; - } - return $result; -} - -# Add A to B -sub AddProfile { - my $A = shift; - my $B = shift; - - my $R = {}; - # add all keys in A - foreach my $k (keys(%{$A})) { - my $v = $A->{$k}; - AddEntry($R, $k, $v); - } - # add all keys in B - foreach my $k (keys(%{$B})) { - my $v = $B->{$k}; - AddEntry($R, $k, $v); - } - return $R; -} - -# Merges symbol maps -sub MergeSymbols { - my $A = shift; - my $B = shift; - - my $R = {}; - foreach my $k (keys(%{$A})) { - $R->{$k} = $A->{$k}; - } - if (defined($B)) { - foreach my $k (keys(%{$B})) { - $R->{$k} = $B->{$k}; - } - } - return $R; -} - - -# Add A to B -sub AddPcs { - my $A = shift; - my $B = shift; - - my $R = {}; - # add all keys in A - foreach my $k (keys(%{$A})) { - $R->{$k} = 1 - } - # add all keys in B - foreach my $k (keys(%{$B})) { - $R->{$k} = 1 - } - return $R; -} - -# Subtract B from A -sub SubtractProfile { - my $A = shift; - my $B = shift; - - my $R = {}; - foreach my $k (keys(%{$A})) { - my $v = $A->{$k} - GetEntry($B, $k); - if ($v < 0 && $main::opt_drop_negative) { - $v = 0; - } - AddEntry($R, $k, $v); - } - if (!$main::opt_drop_negative) { - # Take care of when subtracted profile has more entries - foreach my $k (keys(%{$B})) { - if (!exists($A->{$k})) { - AddEntry($R, $k, 0 - $B->{$k}); - } - } - } - return $R; -} - -# Get entry from profile; zero if not present -sub GetEntry { - my $profile = shift; - my $k = shift; - if (exists($profile->{$k})) { - return $profile->{$k}; - } else { - return 0; - } -} - -# Add entry to specified profile -sub AddEntry { - my $profile = shift; - my $k = shift; - my $n = shift; - if (!exists($profile->{$k})) { - $profile->{$k} = 0; - } - $profile->{$k} += $n; -} - -# Add a stack of entries to specified profile, and add them to the $pcs -# list. -sub AddEntries { - my $profile = shift; - my $pcs = shift; - my $stack = shift; - my $count = shift; - my @k = (); - - foreach my $e (split(/\s+/, $stack)) { - my $pc = HexExtend($e); - $pcs->{$pc} = 1; - push @k, $pc; - } - AddEntry($profile, (join "\n", @k), $count); -} - -##### Code to profile a server dynamically ##### - -sub CheckSymbolPage { - my $url = SymbolPageURL(); - my $command = ShellEscape(@URL_FETCHER, $url); - open(SYMBOL, "$command |") or error($command); - my $line = <SYMBOL>; - $line =~ s/\r//g; # turn windows-looking lines into unix-looking lines - close(SYMBOL); - unless (defined($line)) { - error("$url doesn't exist\n"); - } - - if ($line =~ /^num_symbols:\s+(\d+)$/) { - if ($1 == 0) { - error("Stripped binary. No symbols available.\n"); - } - } else { - error("Failed to get the number of symbols from $url\n"); - } -} - -sub IsProfileURL { - my $profile_name = shift; - if (-f $profile_name) { - printf STDERR "Using local file $profile_name.\n"; - return 0; - } - return 1; -} - -sub ParseProfileURL { - my $profile_name = shift; - - if (!defined($profile_name) || $profile_name eq "") { - return (); - } - - # Split profile URL - matches all non-empty strings, so no test. - $profile_name =~ m,^(https?://)?([^/]+)(.*?)(/|$PROFILES)?$,; - - my $proto = $1 || "http://"; - my $hostport = $2; - my $prefix = $3; - my $profile = $4 || "/"; - - my $host = $hostport; - $host =~ s/:.*//; - - my $baseurl = "$proto$hostport$prefix"; - return ($host, $baseurl, $profile); -} - -# We fetch symbols from the first profile argument. -sub SymbolPageURL { - my ($host, $baseURL, $path) = ParseProfileURL($main::pfile_args[0]); - return "$baseURL$SYMBOL_PAGE"; -} - -sub FetchProgramName() { - my ($host, $baseURL, $path) = ParseProfileURL($main::pfile_args[0]); - my $url = "$baseURL$PROGRAM_NAME_PAGE"; - my $command_line = ShellEscape(@URL_FETCHER, $url); - open(CMDLINE, "$command_line |") or error($command_line); - my $cmdline = <CMDLINE>; - $cmdline =~ s/\r//g; # turn windows-looking lines into unix-looking lines - close(CMDLINE); - error("Failed to get program name from $url\n") unless defined($cmdline); - $cmdline =~ s/\x00.+//; # Remove argv[1] and latters. - $cmdline =~ s!\n!!g; # Remove LFs. - return $cmdline; -} - -# Gee, curl's -L (--location) option isn't reliable at least -# with its 7.12.3 version. Curl will forget to post data if -# there is a redirection. This function is a workaround for -# curl. Redirection happens on borg hosts. -sub ResolveRedirectionForCurl { - my $url = shift; - my $command_line = ShellEscape(@URL_FETCHER, "--head", $url); - open(CMDLINE, "$command_line |") or error($command_line); - while (<CMDLINE>) { - s/\r//g; # turn windows-looking lines into unix-looking lines - if (/^Location: (.*)/) { - $url = $1; - } - } - close(CMDLINE); - return $url; -} - -# Add a timeout flat to URL_FETCHER. Returns a new list. -sub AddFetchTimeout { - my $timeout = shift; - my @fetcher = shift; - if (defined($timeout)) { - if (join(" ", @fetcher) =~ m/\bcurl -s/) { - push(@fetcher, "--max-time", sprintf("%d", $timeout)); - } elsif (join(" ", @fetcher) =~ m/\brpcget\b/) { - push(@fetcher, sprintf("--deadline=%d", $timeout)); - } - } - return @fetcher; -} - -# Reads a symbol map from the file handle name given as $1, returning -# the resulting symbol map. Also processes variables relating to symbols. -# Currently, the only variable processed is 'binary=<value>' which updates -# $main::prog to have the correct program name. -sub ReadSymbols { - my $in = shift; - my $map = {}; - while (<$in>) { - s/\r//g; # turn windows-looking lines into unix-looking lines - # Removes all the leading zeroes from the symbols, see comment below. - if (m/^0x0*([0-9a-f]+)\s+(.+)/) { - $map->{$1} = $2; - } elsif (m/^---/) { - last; - } elsif (m/^([a-z][^=]*)=(.*)$/ ) { - my ($variable, $value) = ($1, $2); - for ($variable, $value) { - s/^\s+//; - s/\s+$//; - } - if ($variable eq "binary") { - if ($main::prog ne $UNKNOWN_BINARY && $main::prog ne $value) { - printf STDERR ("Warning: Mismatched binary name '%s', using '%s'.\n", - $main::prog, $value); - } - $main::prog = $value; - } else { - printf STDERR ("Ignoring unknown variable in symbols list: " . - "'%s' = '%s'\n", $variable, $value); - } - } - } - return $map; -} - -# Fetches and processes symbols to prepare them for use in the profile output -# code. If the optional 'symbol_map' arg is not given, fetches symbols from -# $SYMBOL_PAGE for all PC values found in profile. Otherwise, the raw symbols -# are assumed to have already been fetched into 'symbol_map' and are simply -# extracted and processed. -sub FetchSymbols { - my $pcset = shift; - my $symbol_map = shift; - - my %seen = (); - my @pcs = grep { !$seen{$_}++ } keys(%$pcset); # uniq - - if (!defined($symbol_map)) { - my $post_data = join("+", sort((map {"0x" . "$_"} @pcs))); - - open(POSTFILE, ">$main::tmpfile_sym"); - print POSTFILE $post_data; - close(POSTFILE); - - my $url = SymbolPageURL(); - - my $command_line; - if (join(" ", @URL_FETCHER) =~ m/\bcurl -s/) { - $url = ResolveRedirectionForCurl($url); - $command_line = ShellEscape(@URL_FETCHER, "-d", "\@$main::tmpfile_sym", - $url); - } else { - $command_line = (ShellEscape(@URL_FETCHER, "--post", $url) - . " < " . ShellEscape($main::tmpfile_sym)); - } - # We use c++filt in case $SYMBOL_PAGE gives us mangled symbols. - my $escaped_cppfilt = ShellEscape($obj_tool_map{"c++filt"}); - open(SYMBOL, "$command_line | $escaped_cppfilt |") or error($command_line); - $symbol_map = ReadSymbols(*SYMBOL{IO}); - close(SYMBOL); - } - - my $symbols = {}; - foreach my $pc (@pcs) { - my $fullname; - # For 64 bits binaries, symbols are extracted with 8 leading zeroes. - # Then /symbol reads the long symbols in as uint64, and outputs - # the result with a "0x%08llx" format which get rid of the zeroes. - # By removing all the leading zeroes in both $pc and the symbols from - # /symbol, the symbols match and are retrievable from the map. - my $shortpc = $pc; - $shortpc =~ s/^0*//; - # Each line may have a list of names, which includes the function - # and also other functions it has inlined. They are separated (in - # PrintSymbolizedProfile), by --, which is illegal in function names. - my $fullnames; - if (defined($symbol_map->{$shortpc})) { - $fullnames = $symbol_map->{$shortpc}; - } else { - $fullnames = "0x" . $pc; # Just use addresses - } - my $sym = []; - $symbols->{$pc} = $sym; - foreach my $fullname (split("--", $fullnames)) { - my $name = ShortFunctionName($fullname); - push(@{$sym}, $name, "?", $fullname); - } - } - return $symbols; -} - -sub BaseName { - my $file_name = shift; - $file_name =~ s!^.*/!!; # Remove directory name - return $file_name; -} - -sub MakeProfileBaseName { - my ($binary_name, $profile_name) = @_; - my ($host, $baseURL, $path) = ParseProfileURL($profile_name); - my $binary_shortname = BaseName($binary_name); - return sprintf("%s.%s.%s", - $binary_shortname, $main::op_time, $host); -} - -sub FetchDynamicProfile { - my $binary_name = shift; - my $profile_name = shift; - my $fetch_name_only = shift; - my $encourage_patience = shift; - - if (!IsProfileURL($profile_name)) { - return $profile_name; - } else { - my ($host, $baseURL, $path) = ParseProfileURL($profile_name); - if ($path eq "" || $path eq "/") { - # Missing type specifier defaults to cpu-profile - $path = $PROFILE_PAGE; - } - - my $profile_file = MakeProfileBaseName($binary_name, $profile_name); - - my $url = "$baseURL$path"; - my $fetch_timeout = undef; - if ($path =~ m/$PROFILE_PAGE|$PMUPROFILE_PAGE/) { - if ($path =~ m/[?]/) { - $url .= "&"; - } else { - $url .= "?"; - } - $url .= sprintf("seconds=%d", $main::opt_seconds); - $fetch_timeout = $main::opt_seconds * 1.01 + 60; - } else { - # For non-CPU profiles, we add a type-extension to - # the target profile file name. - my $suffix = $path; - $suffix =~ s,/,.,g; - $profile_file .= $suffix; - } - - my $profile_dir = $ENV{"PPROF_TMPDIR"} || ($ENV{HOME} . "/pprof"); - if (! -d $profile_dir) { - mkdir($profile_dir) - || die("Unable to create profile directory $profile_dir: $!\n"); - } - my $tmp_profile = "$profile_dir/.tmp.$profile_file"; - my $real_profile = "$profile_dir/$profile_file"; - - if ($fetch_name_only > 0) { - return $real_profile; - } - - my @fetcher = AddFetchTimeout($fetch_timeout, @URL_FETCHER); - my $cmd = ShellEscape(@fetcher, $url) . " > " . ShellEscape($tmp_profile); - if ($path =~ m/$PROFILE_PAGE|$PMUPROFILE_PAGE|$CENSUSPROFILE_PAGE/){ - print STDERR "Gathering CPU profile from $url for $main::opt_seconds seconds to\n ${real_profile}\n"; - if ($encourage_patience) { - print STDERR "Be patient...\n"; - } - } else { - print STDERR "Fetching $path profile from $url to\n ${real_profile}\n"; - } - - (system($cmd) == 0) || error("Failed to get profile: $cmd: $!\n"); - (system("mv", $tmp_profile, $real_profile) == 0) || error("Unable to rename profile\n"); - print STDERR "Wrote profile to $real_profile\n"; - $main::collected_profile = $real_profile; - return $main::collected_profile; - } -} - -# Collect profiles in parallel -sub FetchDynamicProfiles { - my $items = scalar(@main::pfile_args); - my $levels = log($items) / log(2); - - if ($items == 1) { - $main::profile_files[0] = FetchDynamicProfile($main::prog, $main::pfile_args[0], 0, 1); - } else { - # math rounding issues - if ((2 ** $levels) < $items) { - $levels++; - } - my $count = scalar(@main::pfile_args); - for (my $i = 0; $i < $count; $i++) { - $main::profile_files[$i] = FetchDynamicProfile($main::prog, $main::pfile_args[$i], 1, 0); - } - print STDERR "Fetching $count profiles, Be patient...\n"; - FetchDynamicProfilesRecurse($levels, 0, 0); - $main::collected_profile = join(" \\\n ", @main::profile_files); - } -} - -# Recursively fork a process to get enough processes -# collecting profiles -sub FetchDynamicProfilesRecurse { - my $maxlevel = shift; - my $level = shift; - my $position = shift; - - if (my $pid = fork()) { - $position = 0 | ($position << 1); - TryCollectProfile($maxlevel, $level, $position); - wait; - } else { - $position = 1 | ($position << 1); - TryCollectProfile($maxlevel, $level, $position); - cleanup(); - exit(0); - } -} - -# Collect a single profile -sub TryCollectProfile { - my $maxlevel = shift; - my $level = shift; - my $position = shift; - - if ($level >= ($maxlevel - 1)) { - if ($position < scalar(@main::pfile_args)) { - FetchDynamicProfile($main::prog, $main::pfile_args[$position], 0, 0); - } - } else { - FetchDynamicProfilesRecurse($maxlevel, $level+1, $position); - } -} - -##### Parsing code ##### - -# Provide a small streaming-read module to handle very large -# cpu-profile files. Stream in chunks along a sliding window. -# Provides an interface to get one 'slot', correctly handling -# endian-ness differences. A slot is one 32-bit or 64-bit word -# (depending on the input profile). We tell endianness and bit-size -# for the profile by looking at the first 8 bytes: in cpu profiles, -# the second slot is always 3 (we'll accept anything that's not 0). -BEGIN { - package CpuProfileStream; - - sub new { - my ($class, $file, $fname) = @_; - my $self = { file => $file, - base => 0, - stride => 512 * 1024, # must be a multiple of bitsize/8 - slots => [], - unpack_code => "", # N for big-endian, V for little - perl_is_64bit => 1, # matters if profile is 64-bit - }; - bless $self, $class; - # Let unittests adjust the stride - if ($main::opt_test_stride > 0) { - $self->{stride} = $main::opt_test_stride; - } - # Read the first two slots to figure out bitsize and endianness. - my $slots = $self->{slots}; - my $str; - read($self->{file}, $str, 8); - # Set the global $address_length based on what we see here. - # 8 is 32-bit (8 hexadecimal chars); 16 is 64-bit (16 hexadecimal chars). - $address_length = ($str eq (chr(0)x8)) ? 16 : 8; - if ($address_length == 8) { - if (substr($str, 6, 2) eq chr(0)x2) { - $self->{unpack_code} = 'V'; # Little-endian. - } elsif (substr($str, 4, 2) eq chr(0)x2) { - $self->{unpack_code} = 'N'; # Big-endian - } else { - ::error("$fname: header size >= 2**16\n"); - } - @$slots = unpack($self->{unpack_code} . "*", $str); - } else { - # If we're a 64-bit profile, check if we're a 64-bit-capable - # perl. Otherwise, each slot will be represented as a float - # instead of an int64, losing precision and making all the - # 64-bit addresses wrong. We won't complain yet, but will - # later if we ever see a value that doesn't fit in 32 bits. - my $has_q = 0; - eval { $has_q = pack("Q", "1") ? 1 : 1; }; - if (!$has_q) { - $self->{perl_is_64bit} = 0; - } - read($self->{file}, $str, 8); - if (substr($str, 4, 4) eq chr(0)x4) { - # We'd love to use 'Q', but it's a) not universal, b) not endian-proof. - $self->{unpack_code} = 'V'; # Little-endian. - } elsif (substr($str, 0, 4) eq chr(0)x4) { - $self->{unpack_code} = 'N'; # Big-endian - } else { - ::error("$fname: header size >= 2**32\n"); - } - my @pair = unpack($self->{unpack_code} . "*", $str); - # Since we know one of the pair is 0, it's fine to just add them. - @$slots = (0, $pair[0] + $pair[1]); - } - return $self; - } - - # Load more data when we access slots->get(X) which is not yet in memory. - sub overflow { - my ($self) = @_; - my $slots = $self->{slots}; - $self->{base} += $#$slots + 1; # skip over data we're replacing - my $str; - read($self->{file}, $str, $self->{stride}); - if ($address_length == 8) { # the 32-bit case - # This is the easy case: unpack provides 32-bit unpacking primitives. - @$slots = unpack($self->{unpack_code} . "*", $str); - } else { - # We need to unpack 32 bits at a time and combine. - my @b32_values = unpack($self->{unpack_code} . "*", $str); - my @b64_values = (); - for (my $i = 0; $i < $#b32_values; $i += 2) { - # TODO(csilvers): if this is a 32-bit perl, the math below - # could end up in a too-large int, which perl will promote - # to a double, losing necessary precision. Deal with that. - # Right now, we just die. - my ($lo, $hi) = ($b32_values[$i], $b32_values[$i+1]); - if ($self->{unpack_code} eq 'N') { # big-endian - ($lo, $hi) = ($hi, $lo); - } - my $value = $lo + $hi * (2**32); - if (!$self->{perl_is_64bit} && # check value is exactly represented - (($value % (2**32)) != $lo || int($value / (2**32)) != $hi)) { - ::error("Need a 64-bit perl to process this 64-bit profile.\n"); - } - push(@b64_values, $value); - } - @$slots = @b64_values; - } - } - - # Access the i-th long in the file (logically), or -1 at EOF. - sub get { - my ($self, $idx) = @_; - my $slots = $self->{slots}; - while ($#$slots >= 0) { - if ($idx < $self->{base}) { - # The only time we expect a reference to $slots[$i - something] - # after referencing $slots[$i] is reading the very first header. - # Since $stride > |header|, that shouldn't cause any lookback - # errors. And everything after the header is sequential. - print STDERR "Unexpected look-back reading CPU profile"; - return -1; # shrug, don't know what better to return - } elsif ($idx > $self->{base} + $#$slots) { - $self->overflow(); - } else { - return $slots->[$idx - $self->{base}]; - } - } - # If we get here, $slots is [], which means we've reached EOF - return -1; # unique since slots is supposed to hold unsigned numbers - } -} - -# Reads the top, 'header' section of a profile, and returns the last -# line of the header, commonly called a 'header line'. The header -# section of a profile consists of zero or more 'command' lines that -# are instructions to pprof, which pprof executes when reading the -# header. All 'command' lines start with a %. After the command -# lines is the 'header line', which is a profile-specific line that -# indicates what type of profile it is, and perhaps other global -# information about the profile. For instance, here's a header line -# for a heap profile: -# heap profile: 53: 38236 [ 5525: 1284029] @ heapprofile -# For historical reasons, the CPU profile does not contain a text- -# readable header line. If the profile looks like a CPU profile, -# this function returns "". If no header line could be found, this -# function returns undef. -# -# The following commands are recognized: -# %warn -- emit the rest of this line to stderr, prefixed by 'WARNING:' -# -# The input file should be in binmode. -sub ReadProfileHeader { - local *PROFILE = shift; - my $firstchar = ""; - my $line = ""; - read(PROFILE, $firstchar, 1); - seek(PROFILE, -1, 1); # unread the firstchar - if ($firstchar !~ /[[:print:]]/) { # is not a text character - return ""; - } - while (defined($line = <PROFILE>)) { - $line =~ s/\r//g; # turn windows-looking lines into unix-looking lines - if ($line =~ /^%warn\s+(.*)/) { # 'warn' command - # Note this matches both '%warn blah\n' and '%warn\n'. - print STDERR "WARNING: $1\n"; # print the rest of the line - } elsif ($line =~ /^%/) { - print STDERR "Ignoring unknown command from profile header: $line"; - } else { - # End of commands, must be the header line. - return $line; - } - } - return undef; # got to EOF without seeing a header line -} - -sub IsSymbolizedProfileFile { - my $file_name = shift; - if (!(-e $file_name) || !(-r $file_name)) { - return 0; - } - # Check if the file contains a symbol-section marker. - open(TFILE, "<$file_name"); - binmode TFILE; - my $firstline = ReadProfileHeader(*TFILE); - close(TFILE); - if (!$firstline) { - return 0; - } - $SYMBOL_PAGE =~ m,[^/]+$,; # matches everything after the last slash - my $symbol_marker = $&; - return $firstline =~ /^--- *$symbol_marker/; -} - -# Parse profile generated by common/profiler.cc and return a reference -# to a map: -# $result->{version} Version number of profile file -# $result->{period} Sampling period (in microseconds) -# $result->{profile} Profile object -# $result->{map} Memory map info from profile -# $result->{pcs} Hash of all PC values seen, key is hex address -sub ReadProfile { - my $prog = shift; - my $fname = shift; - my $result; # return value - - $CONTENTION_PAGE =~ m,[^/]+$,; # matches everything after the last slash - my $contention_marker = $&; - $GROWTH_PAGE =~ m,[^/]+$,; # matches everything after the last slash - my $growth_marker = $&; - $SYMBOL_PAGE =~ m,[^/]+$,; # matches everything after the last slash - my $symbol_marker = $&; - $PROFILE_PAGE =~ m,[^/]+$,; # matches everything after the last slash - my $profile_marker = $&; - - # Look at first line to see if it is a heap or a CPU profile. - # CPU profile may start with no header at all, and just binary data - # (starting with \0\0\0\0) -- in that case, don't try to read the - # whole firstline, since it may be gigabytes(!) of data. - open(PROFILE, "<$fname") || error("$fname: $!\n"); - binmode PROFILE; # New perls do UTF-8 processing - my $header = ReadProfileHeader(*PROFILE); - if (!defined($header)) { # means "at EOF" - error("Profile is empty.\n"); - } - - my $symbols; - if ($header =~ m/^--- *$symbol_marker/o) { - # Verify that the user asked for a symbolized profile - if (!$main::use_symbolized_profile) { - # we have both a binary and symbolized profiles, abort - error("FATAL ERROR: Symbolized profile\n $fname\ncannot be used with " . - "a binary arg. Try again without passing\n $prog\n"); - } - # Read the symbol section of the symbolized profile file. - $symbols = ReadSymbols(*PROFILE{IO}); - # Read the next line to get the header for the remaining profile. - $header = ReadProfileHeader(*PROFILE) || ""; - } - - $main::profile_type = ''; - if ($header =~ m/^heap profile:.*$growth_marker/o) { - $main::profile_type = 'growth'; - $result = ReadHeapProfile($prog, *PROFILE, $header); - } elsif ($header =~ m/^heap profile:/) { - $main::profile_type = 'heap'; - $result = ReadHeapProfile($prog, *PROFILE, $header); - } elsif ($header =~ m/^--- *$contention_marker/o) { - $main::profile_type = 'contention'; - $result = ReadSynchProfile($prog, *PROFILE); - } elsif ($header =~ m/^--- *Stacks:/) { - print STDERR - "Old format contention profile: mistakenly reports " . - "condition variable signals as lock contentions.\n"; - $main::profile_type = 'contention'; - $result = ReadSynchProfile($prog, *PROFILE); - } elsif ($header =~ m/^--- *$profile_marker/) { - # the binary cpu profile data starts immediately after this line - $main::profile_type = 'cpu'; - $result = ReadCPUProfile($prog, $fname, *PROFILE); - } else { - if (defined($symbols)) { - # a symbolized profile contains a format we don't recognize, bail out - error("$fname: Cannot recognize profile section after symbols.\n"); - } - # no ascii header present -- must be a CPU profile - $main::profile_type = 'cpu'; - $result = ReadCPUProfile($prog, $fname, *PROFILE); - } - - close(PROFILE); - - # if we got symbols along with the profile, return those as well - if (defined($symbols)) { - $result->{symbols} = $symbols; - } - - return $result; -} - -# Subtract one from caller pc so we map back to call instr. -# However, don't do this if we're reading a symbolized profile -# file, in which case the subtract-one was done when the file -# was written. -# -# We apply the same logic to all readers, though ReadCPUProfile uses an -# independent implementation. -sub FixCallerAddresses { - my $stack = shift; - if ($main::use_symbolized_profile) { - return $stack; - } else { - $stack =~ /(\s)/; - my $delimiter = $1; - my @addrs = split(' ', $stack); - my @fixedaddrs; - $#fixedaddrs = $#addrs; - if ($#addrs >= 0) { - $fixedaddrs[0] = $addrs[0]; - } - for (my $i = 1; $i <= $#addrs; $i++) { - $fixedaddrs[$i] = AddressSub($addrs[$i], "0x1"); - } - return join $delimiter, @fixedaddrs; - } -} - -# CPU profile reader -sub ReadCPUProfile { - my $prog = shift; - my $fname = shift; # just used for logging - local *PROFILE = shift; - my $version; - my $period; - my $i; - my $profile = {}; - my $pcs = {}; - - # Parse string into array of slots. - my $slots = CpuProfileStream->new(*PROFILE, $fname); - - # Read header. The current header version is a 5-element structure - # containing: - # 0: header count (always 0) - # 1: header "words" (after this one: 3) - # 2: format version (0) - # 3: sampling period (usec) - # 4: unused padding (always 0) - if ($slots->get(0) != 0 ) { - error("$fname: not a profile file, or old format profile file\n"); - } - $i = 2 + $slots->get(1); - $version = $slots->get(2); - $period = $slots->get(3); - # Do some sanity checking on these header values. - if ($version > (2**32) || $period > (2**32) || $i > (2**32) || $i < 5) { - error("$fname: not a profile file, or corrupted profile file\n"); - } - - # Parse profile - while ($slots->get($i) != -1) { - my $n = $slots->get($i++); - my $d = $slots->get($i++); - if ($d > (2**16)) { # TODO(csilvers): what's a reasonable max-stack-depth? - my $addr = sprintf("0%o", $i * ($address_length == 8 ? 4 : 8)); - print STDERR "At index $i (address $addr):\n"; - error("$fname: stack trace depth >= 2**32\n"); - } - if ($slots->get($i) == 0) { - # End of profile data marker - $i += $d; - last; - } - - # Make key out of the stack entries - my @k = (); - for (my $j = 0; $j < $d; $j++) { - my $pc = $slots->get($i+$j); - # Subtract one from caller pc so we map back to call instr. - # However, don't do this if we're reading a symbolized profile - # file, in which case the subtract-one was done when the file - # was written. - if ($j > 0 && !$main::use_symbolized_profile) { - $pc--; - } - $pc = sprintf("%0*x", $address_length, $pc); - $pcs->{$pc} = 1; - push @k, $pc; - } - - AddEntry($profile, (join "\n", @k), $n); - $i += $d; - } - - # Parse map - my $map = ''; - seek(PROFILE, $i * 4, 0); - read(PROFILE, $map, (stat PROFILE)[7]); - - my $r = {}; - $r->{version} = $version; - $r->{period} = $period; - $r->{profile} = $profile; - $r->{libs} = ParseLibraries($prog, $map, $pcs); - $r->{pcs} = $pcs; - - return $r; -} - -sub ReadHeapProfile { - my $prog = shift; - local *PROFILE = shift; - my $header = shift; - - my $index = 1; - if ($main::opt_inuse_space) { - $index = 1; - } elsif ($main::opt_inuse_objects) { - $index = 0; - } elsif ($main::opt_alloc_space) { - $index = 3; - } elsif ($main::opt_alloc_objects) { - $index = 2; - } - - # Find the type of this profile. The header line looks like: - # heap profile: 1246: 8800744 [ 1246: 8800744] @ <heap-url>/266053 - # There are two pairs <count: size>, the first inuse objects/space, and the - # second allocated objects/space. This is followed optionally by a profile - # type, and if that is present, optionally by a sampling frequency. - # For remote heap profiles (v1): - # The interpretation of the sampling frequency is that the profiler, for - # each sample, calculates a uniformly distributed random integer less than - # the given value, and records the next sample after that many bytes have - # been allocated. Therefore, the expected sample interval is half of the - # given frequency. By default, if not specified, the expected sample - # interval is 128KB. Only remote-heap-page profiles are adjusted for - # sample size. - # For remote heap profiles (v2): - # The sampling frequency is the rate of a Poisson process. This means that - # the probability of sampling an allocation of size X with sampling rate Y - # is 1 - exp(-X/Y) - # For version 2, a typical header line might look like this: - # heap profile: 1922: 127792360 [ 1922: 127792360] @ <heap-url>_v2/524288 - # the trailing number (524288) is the sampling rate. (Version 1 showed - # double the 'rate' here) - my $sampling_algorithm = 0; - my $sample_adjustment = 0; - chomp($header); - my $type = "unknown"; - if ($header =~ m"^heap profile:\s*(\d+):\s+(\d+)\s+\[\s*(\d+):\s+(\d+)\](\s*@\s*([^/]*)(/(\d+))?)?") { - if (defined($6) && ($6 ne '')) { - $type = $6; - my $sample_period = $8; - # $type is "heapprofile" for profiles generated by the - # heap-profiler, and either "heap" or "heap_v2" for profiles - # generated by sampling directly within tcmalloc. It can also - # be "growth" for heap-growth profiles. The first is typically - # found for profiles generated locally, and the others for - # remote profiles. - if (($type eq "heapprofile") || ($type !~ /heap/) ) { - # No need to adjust for the sampling rate with heap-profiler-derived data - $sampling_algorithm = 0; - } elsif ($type =~ /_v2/) { - $sampling_algorithm = 2; # version 2 sampling - if (defined($sample_period) && ($sample_period ne '')) { - $sample_adjustment = int($sample_period); - } - } else { - $sampling_algorithm = 1; # version 1 sampling - if (defined($sample_period) && ($sample_period ne '')) { - $sample_adjustment = int($sample_period)/2; - } - } - } else { - # We detect whether or not this is a remote-heap profile by checking - # that the total-allocated stats ($n2,$s2) are exactly the - # same as the in-use stats ($n1,$s1). It is remotely conceivable - # that a non-remote-heap profile may pass this check, but it is hard - # to imagine how that could happen. - # In this case it's so old it's guaranteed to be remote-heap version 1. - my ($n1, $s1, $n2, $s2) = ($1, $2, $3, $4); - if (($n1 == $n2) && ($s1 == $s2)) { - # This is likely to be a remote-heap based sample profile - $sampling_algorithm = 1; - } - } - } - - if ($sampling_algorithm > 0) { - # For remote-heap generated profiles, adjust the counts and sizes to - # account for the sample rate (we sample once every 128KB by default). - if ($sample_adjustment == 0) { - # Turn on profile adjustment. - $sample_adjustment = 128*1024; - print STDERR "Adjusting heap profiles for 1-in-128KB sampling rate\n"; - } else { - printf STDERR ("Adjusting heap profiles for 1-in-%d sampling rate\n", - $sample_adjustment); - } - if ($sampling_algorithm > 1) { - # We don't bother printing anything for the original version (version 1) - printf STDERR "Heap version $sampling_algorithm\n"; - } - } - - my $profile = {}; - my $pcs = {}; - my $map = ""; - - while (<PROFILE>) { - s/\r//g; # turn windows-looking lines into unix-looking lines - if (/^MAPPED_LIBRARIES:/) { - # Read the /proc/self/maps data - while (<PROFILE>) { - s/\r//g; # turn windows-looking lines into unix-looking lines - $map .= $_; - } - last; - } - - if (/^--- Memory map:/) { - # Read /proc/self/maps data as formatted by DumpAddressMap() - my $buildvar = ""; - while (<PROFILE>) { - s/\r//g; # turn windows-looking lines into unix-looking lines - # Parse "build=<dir>" specification if supplied - if (m/^\s*build=(.*)\n/) { - $buildvar = $1; - } - - # Expand "$build" variable if available - $_ =~ s/\$build\b/$buildvar/g; - - $map .= $_; - } - last; - } - - # Read entry of the form: - # <count1>: <bytes1> [<count2>: <bytes2>] @ a1 a2 a3 ... an - s/^\s*//; - s/\s*$//; - if (m/^\s*(\d+):\s+(\d+)\s+\[\s*(\d+):\s+(\d+)\]\s+@\s+(.*)$/) { - my $stack = $5; - my ($n1, $s1, $n2, $s2) = ($1, $2, $3, $4); - - if ($sample_adjustment) { - if ($sampling_algorithm == 2) { - # Remote-heap version 2 - # The sampling frequency is the rate of a Poisson process. - # This means that the probability of sampling an allocation of - # size X with sampling rate Y is 1 - exp(-X/Y) - if ($n1 != 0) { - my $ratio = (($s1*1.0)/$n1)/($sample_adjustment); - my $scale_factor = 1/(1 - exp(-$ratio)); - $n1 *= $scale_factor; - $s1 *= $scale_factor; - } - if ($n2 != 0) { - my $ratio = (($s2*1.0)/$n2)/($sample_adjustment); - my $scale_factor = 1/(1 - exp(-$ratio)); - $n2 *= $scale_factor; - $s2 *= $scale_factor; - } - } else { - # Remote-heap version 1 - my $ratio; - $ratio = (($s1*1.0)/$n1)/($sample_adjustment); - if ($ratio < 1) { - $n1 /= $ratio; - $s1 /= $ratio; - } - $ratio = (($s2*1.0)/$n2)/($sample_adjustment); - if ($ratio < 1) { - $n2 /= $ratio; - $s2 /= $ratio; - } - } - } - - my @counts = ($n1, $s1, $n2, $s2); - AddEntries($profile, $pcs, FixCallerAddresses($stack), $counts[$index]); - } - } - - my $r = {}; - $r->{version} = "heap"; - $r->{period} = 1; - $r->{profile} = $profile; - $r->{libs} = ParseLibraries($prog, $map, $pcs); - $r->{pcs} = $pcs; - return $r; -} - -sub ReadSynchProfile { - my $prog = shift; - local *PROFILE = shift; - my $header = shift; - - my $map = ''; - my $profile = {}; - my $pcs = {}; - my $sampling_period = 1; - my $cyclespernanosec = 2.8; # Default assumption for old binaries - my $seen_clockrate = 0; - my $line; - - my $index = 0; - if ($main::opt_total_delay) { - $index = 0; - } elsif ($main::opt_contentions) { - $index = 1; - } elsif ($main::opt_mean_delay) { - $index = 2; - } - - while ( $line = <PROFILE> ) { - $line =~ s/\r//g; # turn windows-looking lines into unix-looking lines - if ( $line =~ /^\s*(\d+)\s+(\d+) \@\s*(.*?)\s*$/ ) { - my ($cycles, $count, $stack) = ($1, $2, $3); - - # Convert cycles to nanoseconds - $cycles /= $cyclespernanosec; - - # Adjust for sampling done by application - $cycles *= $sampling_period; - $count *= $sampling_period; - - my @values = ($cycles, $count, $cycles / $count); - AddEntries($profile, $pcs, FixCallerAddresses($stack), $values[$index]); - - } elsif ( $line =~ /^(slow release).*thread \d+ \@\s*(.*?)\s*$/ || - $line =~ /^\s*(\d+) \@\s*(.*?)\s*$/ ) { - my ($cycles, $stack) = ($1, $2); - if ($cycles !~ /^\d+$/) { - next; - } - - # Convert cycles to nanoseconds - $cycles /= $cyclespernanosec; - - # Adjust for sampling done by application - $cycles *= $sampling_period; - - AddEntries($profile, $pcs, FixCallerAddresses($stack), $cycles); - - } elsif ( $line =~ m/^([a-z][^=]*)=(.*)$/ ) { - my ($variable, $value) = ($1,$2); - for ($variable, $value) { - s/^\s+//; - s/\s+$//; - } - if ($variable eq "cycles/second") { - $cyclespernanosec = $value / 1e9; - $seen_clockrate = 1; - } elsif ($variable eq "sampling period") { - $sampling_period = $value; - } elsif ($variable eq "ms since reset") { - # Currently nothing is done with this value in pprof - # So we just silently ignore it for now - } elsif ($variable eq "discarded samples") { - # Currently nothing is done with this value in pprof - # So we just silently ignore it for now - } else { - printf STDERR ("Ignoring unnknown variable in /contention output: " . - "'%s' = '%s'\n",$variable,$value); - } - } else { - # Memory map entry - $map .= $line; - } - } - - if (!$seen_clockrate) { - printf STDERR ("No cycles/second entry in profile; Guessing %.1f GHz\n", - $cyclespernanosec); - } - - my $r = {}; - $r->{version} = 0; - $r->{period} = $sampling_period; - $r->{profile} = $profile; - $r->{libs} = ParseLibraries($prog, $map, $pcs); - $r->{pcs} = $pcs; - return $r; -} - -# Given a hex value in the form "0x1abcd" or "1abcd", return either -# "0001abcd" or "000000000001abcd", depending on the current (global) -# address length. -sub HexExtend { - my $addr = shift; - - $addr =~ s/^(0x)?0*//; - my $zeros_needed = $address_length - length($addr); - if ($zeros_needed < 0) { - printf STDERR "Warning: address $addr is longer than address length $address_length\n"; - return $addr; - } - return ("0" x $zeros_needed) . $addr; -} - -##### Symbol extraction ##### - -# Aggressively search the lib_prefix values for the given library -# If all else fails, just return the name of the library unmodified. -# If the lib_prefix is "/my/path,/other/path" and $file is "/lib/dir/mylib.so" -# it will search the following locations in this order, until it finds a file: -# /my/path/lib/dir/mylib.so -# /other/path/lib/dir/mylib.so -# /my/path/dir/mylib.so -# /other/path/dir/mylib.so -# /my/path/mylib.so -# /other/path/mylib.so -# /lib/dir/mylib.so (returned as last resort) -sub FindLibrary { - my $file = shift; - my $suffix = $file; - - # Search for the library as described above - do { - foreach my $prefix (@prefix_list) { - my $fullpath = $prefix . $suffix; - if (-e $fullpath) { - return $fullpath; - } - } - } while ($suffix =~ s|^/[^/]+/|/|); - return $file; -} - -# Return path to library with debugging symbols. -# For libc libraries, the copy in /usr/lib/debug contains debugging symbols -sub DebuggingLibrary { - my $file = shift; - if ($file =~ m|^/| && -f "/usr/lib/debug$file") { - return "/usr/lib/debug$file"; - } - return undef; -} - -# Parse text section header of a library using objdump -sub ParseTextSectionHeaderFromObjdump { - my $lib = shift; - - my $size = undef; - my $vma; - my $file_offset; - # Get objdump output from the library file to figure out how to - # map between mapped addresses and addresses in the library. - my $cmd = ShellEscape($obj_tool_map{"objdump"}, "-h", $lib); - open(OBJDUMP, "$cmd |") || error("$cmd: $!\n"); - while (<OBJDUMP>) { - s/\r//g; # turn windows-looking lines into unix-looking lines - # Idx Name Size VMA LMA File off Algn - # 10 .text 00104b2c 420156f0 420156f0 000156f0 2**4 - # For 64-bit objects, VMA and LMA will be 16 hex digits, size and file - # offset may still be 8. But AddressSub below will still handle that. - my @x = split; - if (($#x >= 6) && ($x[1] eq '.text')) { - $size = $x[2]; - $vma = $x[3]; - $file_offset = $x[5]; - last; - } - } - close(OBJDUMP); - - if (!defined($size)) { - return undef; - } - - my $r = {}; - $r->{size} = $size; - $r->{vma} = $vma; - $r->{file_offset} = $file_offset; - - return $r; -} - -# Parse text section header of a library using otool (on OS X) -sub ParseTextSectionHeaderFromOtool { - my $lib = shift; - - my $size = undef; - my $vma = undef; - my $file_offset = undef; - # Get otool output from the library file to figure out how to - # map between mapped addresses and addresses in the library. - my $command = ShellEscape($obj_tool_map{"otool"}, "-l", $lib); - open(OTOOL, "$command |") || error("$command: $!\n"); - my $cmd = ""; - my $sectname = ""; - my $segname = ""; - foreach my $line (<OTOOL>) { - $line =~ s/\r//g; # turn windows-looking lines into unix-looking lines - # Load command <#> - # cmd LC_SEGMENT - # [...] - # Section - # sectname __text - # segname __TEXT - # addr 0x000009f8 - # size 0x00018b9e - # offset 2552 - # align 2^2 (4) - # We will need to strip off the leading 0x from the hex addresses, - # and convert the offset into hex. - if ($line =~ /Load command/) { - $cmd = ""; - $sectname = ""; - $segname = ""; - } elsif ($line =~ /Section/) { - $sectname = ""; - $segname = ""; - } elsif ($line =~ /cmd (\w+)/) { - $cmd = $1; - } elsif ($line =~ /sectname (\w+)/) { - $sectname = $1; - } elsif ($line =~ /segname (\w+)/) { - $segname = $1; - } elsif (!(($cmd eq "LC_SEGMENT" || $cmd eq "LC_SEGMENT_64") && - $sectname eq "__text" && - $segname eq "__TEXT")) { - next; - } elsif ($line =~ /\baddr 0x([0-9a-fA-F]+)/) { - $vma = $1; - } elsif ($line =~ /\bsize 0x([0-9a-fA-F]+)/) { - $size = $1; - } elsif ($line =~ /\boffset ([0-9]+)/) { - $file_offset = sprintf("%016x", $1); - } - if (defined($vma) && defined($size) && defined($file_offset)) { - last; - } - } - close(OTOOL); - - if (!defined($vma) || !defined($size) || !defined($file_offset)) { - return undef; - } - - my $r = {}; - $r->{size} = $size; - $r->{vma} = $vma; - $r->{file_offset} = $file_offset; - - return $r; -} - -sub ParseTextSectionHeader { - # obj_tool_map("otool") is only defined if we're in a Mach-O environment - if (defined($obj_tool_map{"otool"})) { - my $r = ParseTextSectionHeaderFromOtool(@_); - if (defined($r)){ - return $r; - } - } - # If otool doesn't work, or we don't have it, fall back to objdump - return ParseTextSectionHeaderFromObjdump(@_); -} - -# Split /proc/pid/maps dump into a list of libraries -sub ParseLibraries { - return if $main::use_symbol_page; # We don't need libraries info. - my $prog = shift; - my $map = shift; - my $pcs = shift; - - my $result = []; - my $h = "[a-f0-9]+"; - my $zero_offset = HexExtend("0"); - - my $buildvar = ""; - foreach my $l (split("\n", $map)) { - if ($l =~ m/^\s*build=(.*)$/) { - $buildvar = $1; - } - - my $start; - my $finish; - my $offset; - my $lib; - if ($l =~ /^($h)-($h)\s+..x.\s+($h)\s+\S+:\S+\s+\d+\s+(\S+\.(so|dll|dylib|bundle)((\.\d+)+\w*(\.\d+){0,3})?)$/i) { - # Full line from /proc/self/maps. Example: - # 40000000-40015000 r-xp 00000000 03:01 12845071 /lib/ld-2.3.2.so - $start = HexExtend($1); - $finish = HexExtend($2); - $offset = HexExtend($3); - $lib = $4; - $lib =~ s|\\|/|g; # turn windows-style paths into unix-style paths - } elsif ($l =~ /^\s*($h)-($h):\s*(\S+\.so(\.\d+)*)/) { - # Cooked line from DumpAddressMap. Example: - # 40000000-40015000: /lib/ld-2.3.2.so - $start = HexExtend($1); - $finish = HexExtend($2); - $offset = $zero_offset; - $lib = $3; - } else { - next; - } - - # Expand "$build" variable if available - $lib =~ s/\$build\b/$buildvar/g; - - $lib = FindLibrary($lib); - - # Check for pre-relocated libraries, which use pre-relocated symbol tables - # and thus require adjusting the offset that we'll use to translate - # VM addresses into symbol table addresses. - # Only do this if we're not going to fetch the symbol table from a - # debugging copy of the library. - if (!DebuggingLibrary($lib)) { - my $text = ParseTextSectionHeader($lib); - if (defined($text)) { - my $vma_offset = AddressSub($text->{vma}, $text->{file_offset}); - $offset = AddressAdd($offset, $vma_offset); - } - } - - push(@{$result}, [$lib, $start, $finish, $offset]); - } - - # Append special entry for additional library (not relocated) - if ($main::opt_lib ne "") { - my $text = ParseTextSectionHeader($main::opt_lib); - if (defined($text)) { - my $start = $text->{vma}; - my $finish = AddressAdd($start, $text->{size}); - - push(@{$result}, [$main::opt_lib, $start, $finish, $start]); - } - } - - # Append special entry for the main program. This covers - # 0..max_pc_value_seen, so that we assume pc values not found in one - # of the library ranges will be treated as coming from the main - # program binary. - my $min_pc = HexExtend("0"); - my $max_pc = $min_pc; # find the maximal PC value in any sample - foreach my $pc (keys(%{$pcs})) { - if (HexExtend($pc) gt $max_pc) { $max_pc = HexExtend($pc); } - } - push(@{$result}, [$prog, $min_pc, $max_pc, $zero_offset]); - - return $result; -} - -# Add two hex addresses of length $address_length. -# Run pprof --test for unit test if this is changed. -sub AddressAdd { - my $addr1 = shift; - my $addr2 = shift; - my $sum; - - if ($address_length == 8) { - # Perl doesn't cope with wraparound arithmetic, so do it explicitly: - $sum = (hex($addr1)+hex($addr2)) % (0x10000000 * 16); - return sprintf("%08x", $sum); - - } else { - # Do the addition in 7-nibble chunks to trivialize carry handling. - - if ($main::opt_debug and $main::opt_test) { - print STDERR "AddressAdd $addr1 + $addr2 = "; - } - - my $a1 = substr($addr1,-7); - $addr1 = substr($addr1,0,-7); - my $a2 = substr($addr2,-7); - $addr2 = substr($addr2,0,-7); - $sum = hex($a1) + hex($a2); - my $c = 0; - if ($sum > 0xfffffff) { - $c = 1; - $sum -= 0x10000000; - } - my $r = sprintf("%07x", $sum); - - $a1 = substr($addr1,-7); - $addr1 = substr($addr1,0,-7); - $a2 = substr($addr2,-7); - $addr2 = substr($addr2,0,-7); - $sum = hex($a1) + hex($a2) + $c; - $c = 0; - if ($sum > 0xfffffff) { - $c = 1; - $sum -= 0x10000000; - } - $r = sprintf("%07x", $sum) . $r; - - $sum = hex($addr1) + hex($addr2) + $c; - if ($sum > 0xff) { $sum -= 0x100; } - $r = sprintf("%02x", $sum) . $r; - - if ($main::opt_debug and $main::opt_test) { print STDERR "$r\n"; } - - return $r; - } -} - - -# Subtract two hex addresses of length $address_length. -# Run pprof --test for unit test if this is changed. -sub AddressSub { - my $addr1 = shift; - my $addr2 = shift; - my $diff; - - if ($address_length == 8) { - # Perl doesn't cope with wraparound arithmetic, so do it explicitly: - $diff = (hex($addr1)-hex($addr2)) % (0x10000000 * 16); - return sprintf("%08x", $diff); - - } else { - # Do the addition in 7-nibble chunks to trivialize borrow handling. - # if ($main::opt_debug) { print STDERR "AddressSub $addr1 - $addr2 = "; } - - my $a1 = hex(substr($addr1,-7)); - $addr1 = substr($addr1,0,-7); - my $a2 = hex(substr($addr2,-7)); - $addr2 = substr($addr2,0,-7); - my $b = 0; - if ($a2 > $a1) { - $b = 1; - $a1 += 0x10000000; - } - $diff = $a1 - $a2; - my $r = sprintf("%07x", $diff); - - $a1 = hex(substr($addr1,-7)); - $addr1 = substr($addr1,0,-7); - $a2 = hex(substr($addr2,-7)) + $b; - $addr2 = substr($addr2,0,-7); - $b = 0; - if ($a2 > $a1) { - $b = 1; - $a1 += 0x10000000; - } - $diff = $a1 - $a2; - $r = sprintf("%07x", $diff) . $r; - - $a1 = hex($addr1); - $a2 = hex($addr2) + $b; - if ($a2 > $a1) { $a1 += 0x100; } - $diff = $a1 - $a2; - $r = sprintf("%02x", $diff) . $r; - - # if ($main::opt_debug) { print STDERR "$r\n"; } - - return $r; - } -} - -# Increment a hex addresses of length $address_length. -# Run pprof --test for unit test if this is changed. -sub AddressInc { - my $addr = shift; - my $sum; - - if ($address_length == 8) { - # Perl doesn't cope with wraparound arithmetic, so do it explicitly: - $sum = (hex($addr)+1) % (0x10000000 * 16); - return sprintf("%08x", $sum); - - } else { - # Do the addition in 7-nibble chunks to trivialize carry handling. - # We are always doing this to step through the addresses in a function, - # and will almost never overflow the first chunk, so we check for this - # case and exit early. - - # if ($main::opt_debug) { print STDERR "AddressInc $addr1 = "; } - - my $a1 = substr($addr,-7); - $addr = substr($addr,0,-7); - $sum = hex($a1) + 1; - my $r = sprintf("%07x", $sum); - if ($sum <= 0xfffffff) { - $r = $addr . $r; - # if ($main::opt_debug) { print STDERR "$r\n"; } - return HexExtend($r); - } else { - $r = "0000000"; - } - - $a1 = substr($addr,-7); - $addr = substr($addr,0,-7); - $sum = hex($a1) + 1; - $r = sprintf("%07x", $sum) . $r; - if ($sum <= 0xfffffff) { - $r = $addr . $r; - # if ($main::opt_debug) { print STDERR "$r\n"; } - return HexExtend($r); - } else { - $r = "00000000000000"; - } - - $sum = hex($addr) + 1; - if ($sum > 0xff) { $sum -= 0x100; } - $r = sprintf("%02x", $sum) . $r; - - # if ($main::opt_debug) { print STDERR "$r\n"; } - return $r; - } -} - -# Extract symbols for all PC values found in profile -sub ExtractSymbols { - my $libs = shift; - my $pcset = shift; - - my $symbols = {}; - - # Map each PC value to the containing library. To make this faster, - # we sort libraries by their starting pc value (highest first), and - # advance through the libraries as we advance the pc. Sometimes the - # addresses of libraries may overlap with the addresses of the main - # binary, so to make sure the libraries 'win', we iterate over the - # libraries in reverse order (which assumes the binary doesn't start - # in the middle of a library, which seems a fair assumption). - my @pcs = (sort { $a cmp $b } keys(%{$pcset})); # pcset is 0-extended strings - foreach my $lib (sort {$b->[1] cmp $a->[1]} @{$libs}) { - my $libname = $lib->[0]; - my $start = $lib->[1]; - my $finish = $lib->[2]; - my $offset = $lib->[3]; - - # Get list of pcs that belong in this library. - my $contained = []; - my ($start_pc_index, $finish_pc_index); - # Find smallest finish_pc_index such that $finish < $pc[$finish_pc_index]. - for ($finish_pc_index = $#pcs + 1; $finish_pc_index > 0; - $finish_pc_index--) { - last if $pcs[$finish_pc_index - 1] le $finish; - } - # Find smallest start_pc_index such that $start <= $pc[$start_pc_index]. - for ($start_pc_index = $finish_pc_index; $start_pc_index > 0; - $start_pc_index--) { - last if $pcs[$start_pc_index - 1] lt $start; - } - # This keeps PC values higher than $pc[$finish_pc_index] in @pcs, - # in case there are overlaps in libraries and the main binary. - @{$contained} = splice(@pcs, $start_pc_index, - $finish_pc_index - $start_pc_index); - # Map to symbols - MapToSymbols($libname, AddressSub($start, $offset), $contained, $symbols); - } - - return $symbols; -} - -# Map list of PC values to symbols for a given image -sub MapToSymbols { - my $image = shift; - my $offset = shift; - my $pclist = shift; - my $symbols = shift; - - my $debug = 0; - - # Ignore empty binaries - if ($#{$pclist} < 0) { return; } - - # Figure out the addr2line command to use - my $addr2line = $obj_tool_map{"addr2line"}; - my $cmd = ShellEscape($addr2line, "-f", "-C", "-e", $image); - if (exists $obj_tool_map{"addr2line_pdb"}) { - $addr2line = $obj_tool_map{"addr2line_pdb"}; - $cmd = ShellEscape($addr2line, "--demangle", "-f", "-C", "-e", $image); - } - - # If "addr2line" isn't installed on the system at all, just use - # nm to get what info we can (function names, but not line numbers). - if (system(ShellEscape($addr2line, "--help") . " >$dev_null 2>&1") != 0) { - MapSymbolsWithNM($image, $offset, $pclist, $symbols); - return; - } - - # "addr2line -i" can produce a variable number of lines per input - # address, with no separator that allows us to tell when data for - # the next address starts. So we find the address for a special - # symbol (_fini) and interleave this address between all real - # addresses passed to addr2line. The name of this special symbol - # can then be used as a separator. - $sep_address = undef; # May be filled in by MapSymbolsWithNM() - my $nm_symbols = {}; - MapSymbolsWithNM($image, $offset, $pclist, $nm_symbols); - if (defined($sep_address)) { - # Only add " -i" to addr2line if the binary supports it. - # addr2line --help returns 0, but not if it sees an unknown flag first. - if (system("$cmd -i --help >$dev_null 2>&1") == 0) { - $cmd .= " -i"; - } else { - $sep_address = undef; # no need for sep_address if we don't support -i - } - } - - # Make file with all PC values with intervening 'sep_address' so - # that we can reliably detect the end of inlined function list - open(ADDRESSES, ">$main::tmpfile_sym") || error("$main::tmpfile_sym: $!\n"); - if ($debug) { print("---- $image ---\n"); } - for (my $i = 0; $i <= $#{$pclist}; $i++) { - # addr2line always reads hex addresses, and does not need '0x' prefix. - if ($debug) { printf STDERR ("%s\n", $pclist->[$i]); } - printf ADDRESSES ("%s\n", AddressSub($pclist->[$i], $offset)); - if (defined($sep_address)) { - printf ADDRESSES ("%s\n", $sep_address); - } - } - close(ADDRESSES); - if ($debug) { - print("----\n"); - system("cat", $main::tmpfile_sym); - print("----\n"); - system("$cmd < " . ShellEscape($main::tmpfile_sym)); - print("----\n"); - } - - open(SYMBOLS, "$cmd <" . ShellEscape($main::tmpfile_sym) . " |") - || error("$cmd: $!\n"); - my $count = 0; # Index in pclist - while (<SYMBOLS>) { - # Read fullfunction and filelineinfo from next pair of lines - s/\r?\n$//g; - my $fullfunction = $_; - $_ = <SYMBOLS>; - s/\r?\n$//g; - my $filelinenum = $_; - - if (defined($sep_address) && $fullfunction eq $sep_symbol) { - # Terminating marker for data for this address - $count++; - next; - } - - $filelinenum =~ s|\\|/|g; # turn windows-style paths into unix-style paths - - my $pcstr = $pclist->[$count]; - my $function = ShortFunctionName($fullfunction); - my $nms = $nm_symbols->{$pcstr}; - if (defined($nms)) { - if ($fullfunction eq '??') { - # nm found a symbol for us. - $function = $nms->[0]; - $fullfunction = $nms->[2]; - } else { - # MapSymbolsWithNM tags each routine with its starting address, - # useful in case the image has multiple occurrences of this - # routine. (It uses a syntax that resembles template paramters, - # that are automatically stripped out by ShortFunctionName().) - # addr2line does not provide the same information. So we check - # if nm disambiguated our symbol, and if so take the annotated - # (nm) version of the routine-name. TODO(csilvers): this won't - # catch overloaded, inlined symbols, which nm doesn't see. - # Better would be to do a check similar to nm's, in this fn. - if ($nms->[2] =~ m/^\Q$function\E/) { # sanity check it's the right fn - $function = $nms->[0]; - $fullfunction = $nms->[2]; - } - } - } - - # Prepend to accumulated symbols for pcstr - # (so that caller comes before callee) - my $sym = $symbols->{$pcstr}; - if (!defined($sym)) { - $sym = []; - $symbols->{$pcstr} = $sym; - } - unshift(@{$sym}, $function, $filelinenum, $fullfunction); - if ($debug) { printf STDERR ("%s => [%s]\n", $pcstr, join(" ", @{$sym})); } - if (!defined($sep_address)) { - # Inlining is off, so this entry ends immediately - $count++; - } - } - close(SYMBOLS); -} - -# Use nm to map the list of referenced PCs to symbols. Return true iff we -# are able to read procedure information via nm. -sub MapSymbolsWithNM { - my $image = shift; - my $offset = shift; - my $pclist = shift; - my $symbols = shift; - - # Get nm output sorted by increasing address - my $symbol_table = GetProcedureBoundaries($image, "."); - if (!%{$symbol_table}) { - return 0; - } - # Start addresses are already the right length (8 or 16 hex digits). - my @names = sort { $symbol_table->{$a}->[0] cmp $symbol_table->{$b}->[0] } - keys(%{$symbol_table}); - - if ($#names < 0) { - # No symbols: just use addresses - foreach my $pc (@{$pclist}) { - my $pcstr = "0x" . $pc; - $symbols->{$pc} = [$pcstr, "?", $pcstr]; - } - return 0; - } - - # Sort addresses so we can do a join against nm output - my $index = 0; - my $fullname = $names[0]; - my $name = ShortFunctionName($fullname); - foreach my $pc (sort { $a cmp $b } @{$pclist}) { - # Adjust for mapped offset - my $mpc = AddressSub($pc, $offset); - while (($index < $#names) && ($mpc ge $symbol_table->{$fullname}->[1])){ - $index++; - $fullname = $names[$index]; - $name = ShortFunctionName($fullname); - } - if ($mpc lt $symbol_table->{$fullname}->[1]) { - $symbols->{$pc} = [$name, "?", $fullname]; - } else { - my $pcstr = "0x" . $pc; - $symbols->{$pc} = [$pcstr, "?", $pcstr]; - } - } - return 1; -} - -sub ShortFunctionName { - my $function = shift; - while ($function =~ s/\([^()]*\)(\s*const)?//g) { } # Argument types - while ($function =~ s/<[^<>]*>//g) { } # Remove template arguments - $function =~ s/^.*\s+(\w+::)/$1/; # Remove leading type - return $function; -} - -# Trim overly long symbols found in disassembler output -sub CleanDisassembly { - my $d = shift; - while ($d =~ s/\([^()%]*\)(\s*const)?//g) { } # Argument types, not (%rax) - while ($d =~ s/(\w+)<[^<>]*>/$1/g) { } # Remove template arguments - return $d; -} - -# Clean file name for display -sub CleanFileName { - my ($f) = @_; - $f =~ s|^/proc/self/cwd/||; - $f =~ s|^\./||; - return $f; -} - -# Make address relative to section and clean up for display -sub UnparseAddress { - my ($offset, $address) = @_; - $address = AddressSub($address, $offset); - $address =~ s/^0x//; - $address =~ s/^0*//; - return $address; -} - -##### Miscellaneous ##### - -# Find the right versions of the above object tools to use. The -# argument is the program file being analyzed, and should be an ELF -# 32-bit or ELF 64-bit executable file. The location of the tools -# is determined by considering the following options in this order: -# 1) --tools option, if set -# 2) PPROF_TOOLS environment variable, if set -# 3) the environment -sub ConfigureObjTools { - my $prog_file = shift; - - # Check for the existence of $prog_file because /usr/bin/file does not - # predictably return error status in prod. - (-e $prog_file) || error("$prog_file does not exist.\n"); - - my $file_type = undef; - if (-e "/usr/bin/file") { - # Follow symlinks (at least for systems where "file" supports that). - my $escaped_prog_file = ShellEscape($prog_file); - $file_type = `/usr/bin/file -L $escaped_prog_file 2>$dev_null || - /usr/bin/file $escaped_prog_file`; - } elsif ($^O == "MSWin32") { - $file_type = "MS Windows"; - } else { - print STDERR "WARNING: Can't determine the file type of $prog_file"; - } - - if ($file_type =~ /64-bit/) { - # Change $address_length to 16 if the program file is ELF 64-bit. - # We can't detect this from many (most?) heap or lock contention - # profiles, since the actual addresses referenced are generally in low - # memory even for 64-bit programs. - $address_length = 16; - } - - if ($file_type =~ /MS Windows/) { - # For windows, we provide a version of nm and addr2line as part of - # the opensource release, which is capable of parsing - # Windows-style PDB executables. It should live in the path, or - # in the same directory as pprof. - $obj_tool_map{"nm_pdb"} = "nm-pdb"; - $obj_tool_map{"addr2line_pdb"} = "addr2line-pdb"; - } - - if ($file_type =~ /Mach-O/) { - # OS X uses otool to examine Mach-O files, rather than objdump. - $obj_tool_map{"otool"} = "otool"; - $obj_tool_map{"addr2line"} = "false"; # no addr2line - $obj_tool_map{"objdump"} = "false"; # no objdump - } - - # Go fill in %obj_tool_map with the pathnames to use: - foreach my $tool (keys %obj_tool_map) { - $obj_tool_map{$tool} = ConfigureTool($obj_tool_map{$tool}); - } -} - -# Returns the path of a caller-specified object tool. If --tools or -# PPROF_TOOLS are specified, then returns the full path to the tool -# with that prefix. Otherwise, returns the path unmodified (which -# means we will look for it on PATH). -sub ConfigureTool { - my $tool = shift; - my $path; - - # --tools (or $PPROF_TOOLS) is a comma separated list, where each - # item is either a) a pathname prefix, or b) a map of the form - # <tool>:<path>. First we look for an entry of type (b) for our - # tool. If one is found, we use it. Otherwise, we consider all the - # pathname prefixes in turn, until one yields an existing file. If - # none does, we use a default path. - my $tools = $main::opt_tools || $ENV{"PPROF_TOOLS"} || ""; - if ($tools =~ m/(,|^)\Q$tool\E:([^,]*)/) { - $path = $2; - # TODO(csilvers): sanity-check that $path exists? Hard if it's relative. - } elsif ($tools ne '') { - foreach my $prefix (split(',', $tools)) { - next if ($prefix =~ /:/); # ignore "tool:fullpath" entries in the list - if (-x $prefix . $tool) { - $path = $prefix . $tool; - last; - } - } - if (!$path) { - error("No '$tool' found with prefix specified by " . - "--tools (or \$PPROF_TOOLS) '$tools'\n"); - } - } else { - # ... otherwise use the version that exists in the same directory as - # pprof. If there's nothing there, use $PATH. - $0 =~ m,[^/]*$,; # this is everything after the last slash - my $dirname = $`; # this is everything up to and including the last slash - if (-x "$dirname$tool") { - $path = "$dirname$tool"; - } else { - $path = $tool; - } - } - if ($main::opt_debug) { print STDERR "Using '$path' for '$tool'.\n"; } - return $path; -} - -sub ShellEscape { - my @escaped_words = (); - foreach my $word (@_) { - my $escaped_word = $word; - if ($word =~ m![^a-zA-Z0-9/.,_=-]!) { # check for anything not in whitelist - $escaped_word =~ s/'/'\\''/; - $escaped_word = "'$escaped_word'"; - } - push(@escaped_words, $escaped_word); - } - return join(" ", @escaped_words); -} - -sub cleanup { - unlink($main::tmpfile_sym); - unlink(keys %main::tempnames); - - # We leave any collected profiles in $HOME/pprof in case the user wants - # to look at them later. We print a message informing them of this. - if ((scalar(@main::profile_files) > 0) && - defined($main::collected_profile)) { - if (scalar(@main::profile_files) == 1) { - print STDERR "Dynamically gathered profile is in $main::collected_profile\n"; - } - print STDERR "If you want to investigate this profile further, you can do:\n"; - print STDERR "\n"; - print STDERR " pprof \\\n"; - print STDERR " $main::prog \\\n"; - print STDERR " $main::collected_profile\n"; - print STDERR "\n"; - } -} - -sub sighandler { - cleanup(); - exit(1); -} - -sub error { - my $msg = shift; - print STDERR $msg; - cleanup(); - exit(1); -} - - -# Run $nm_command and get all the resulting procedure boundaries whose -# names match "$regexp" and returns them in a hashtable mapping from -# procedure name to a two-element vector of [start address, end address] -sub GetProcedureBoundariesViaNm { - my $escaped_nm_command = shift; # shell-escaped - my $regexp = shift; - - my $symbol_table = {}; - open(NM, "$escaped_nm_command |") || error("$escaped_nm_command: $!\n"); - my $last_start = "0"; - my $routine = ""; - while (<NM>) { - s/\r//g; # turn windows-looking lines into unix-looking lines - if (m/^\s*([0-9a-f]+) (.) (..*)/) { - my $start_val = $1; - my $type = $2; - my $this_routine = $3; - - # It's possible for two symbols to share the same address, if - # one is a zero-length variable (like __start_google_malloc) or - # one symbol is a weak alias to another (like __libc_malloc). - # In such cases, we want to ignore all values except for the - # actual symbol, which in nm-speak has type "T". The logic - # below does this, though it's a bit tricky: what happens when - # we have a series of lines with the same address, is the first - # one gets queued up to be processed. However, it won't - # *actually* be processed until later, when we read a line with - # a different address. That means that as long as we're reading - # lines with the same address, we have a chance to replace that - # item in the queue, which we do whenever we see a 'T' entry -- - # that is, a line with type 'T'. If we never see a 'T' entry, - # we'll just go ahead and process the first entry (which never - # got touched in the queue), and ignore the others. - if ($start_val eq $last_start && $type =~ /t/i) { - # We are the 'T' symbol at this address, replace previous symbol. - $routine = $this_routine; - next; - } elsif ($start_val eq $last_start) { - # We're not the 'T' symbol at this address, so ignore us. - next; - } - - if ($this_routine eq $sep_symbol) { - $sep_address = HexExtend($start_val); - } - - # Tag this routine with the starting address in case the image - # has multiple occurrences of this routine. We use a syntax - # that resembles template paramters that are automatically - # stripped out by ShortFunctionName() - $this_routine .= "<$start_val>"; - - if (defined($routine) && $routine =~ m/$regexp/) { - $symbol_table->{$routine} = [HexExtend($last_start), - HexExtend($start_val)]; - } - $last_start = $start_val; - $routine = $this_routine; - } elsif (m/^Loaded image name: (.+)/) { - # The win32 nm workalike emits information about the binary it is using. - if ($main::opt_debug) { print STDERR "Using Image $1\n"; } - } elsif (m/^PDB file name: (.+)/) { - # The win32 nm workalike emits information about the pdb it is using. - if ($main::opt_debug) { print STDERR "Using PDB $1\n"; } - } - } - close(NM); - # Handle the last line in the nm output. Unfortunately, we don't know - # how big this last symbol is, because we don't know how big the file - # is. For now, we just give it a size of 0. - # TODO(csilvers): do better here. - if (defined($routine) && $routine =~ m/$regexp/) { - $symbol_table->{$routine} = [HexExtend($last_start), - HexExtend($last_start)]; - } - return $symbol_table; -} - -# Gets the procedure boundaries for all routines in "$image" whose names -# match "$regexp" and returns them in a hashtable mapping from procedure -# name to a two-element vector of [start address, end address]. -# Will return an empty map if nm is not installed or not working properly. -sub GetProcedureBoundaries { - my $image = shift; - my $regexp = shift; - - # If $image doesn't start with /, then put ./ in front of it. This works - # around an obnoxious bug in our probing of nm -f behavior. - # "nm -f $image" is supposed to fail on GNU nm, but if: - # - # a. $image starts with [BbSsPp] (for example, bin/foo/bar), AND - # b. you have a.out in your current directory (a not uncommon occurence) - # - # then "nm -f $image" succeeds because -f only looks at the first letter of - # the argument, which looks valid because it's [BbSsPp], and then since - # there's no image provided, it looks for a.out and finds it. - # - # This regex makes sure that $image starts with . or /, forcing the -f - # parsing to fail since . and / are not valid formats. - $image =~ s#^[^/]#./$&#; - - # For libc libraries, the copy in /usr/lib/debug contains debugging symbols - my $debugging = DebuggingLibrary($image); - if ($debugging) { - $image = $debugging; - } - - my $nm = $obj_tool_map{"nm"}; - my $cppfilt = $obj_tool_map{"c++filt"}; - - # nm can fail for two reasons: 1) $image isn't a debug library; 2) nm - # binary doesn't support --demangle. In addition, for OS X we need - # to use the -f flag to get 'flat' nm output (otherwise we don't sort - # properly and get incorrect results). Unfortunately, GNU nm uses -f - # in an incompatible way. So first we test whether our nm supports - # --demangle and -f. - my $demangle_flag = ""; - my $cppfilt_flag = ""; - my $to_devnull = ">$dev_null 2>&1"; - if (system(ShellEscape($nm, "--demangle", "image") . $to_devnull) == 0) { - # In this mode, we do "nm --demangle <foo>" - $demangle_flag = "--demangle"; - $cppfilt_flag = ""; - } elsif (system(ShellEscape($cppfilt, $image) . $to_devnull) == 0) { - # In this mode, we do "nm <foo> | c++filt" - $cppfilt_flag = " | " . ShellEscape($cppfilt); - }; - my $flatten_flag = ""; - if (system(ShellEscape($nm, "-f", $image) . $to_devnull) == 0) { - $flatten_flag = "-f"; - } - - # Finally, in the case $imagie isn't a debug library, we try again with - # -D to at least get *exported* symbols. If we can't use --demangle, - # we use c++filt instead, if it exists on this system. - my @nm_commands = (ShellEscape($nm, "-n", $flatten_flag, $demangle_flag, - $image) . " 2>$dev_null $cppfilt_flag", - ShellEscape($nm, "-D", "-n", $flatten_flag, $demangle_flag, - $image) . " 2>$dev_null $cppfilt_flag", - # 6nm is for Go binaries - ShellEscape("6nm", "$image") . " 2>$dev_null | sort", - ); - - # If the executable is an MS Windows PDB-format executable, we'll - # have set up obj_tool_map("nm_pdb"). In this case, we actually - # want to use both unix nm and windows-specific nm_pdb, since - # PDB-format executables can apparently include dwarf .o files. - if (exists $obj_tool_map{"nm_pdb"}) { - push(@nm_commands, - ShellEscape($obj_tool_map{"nm_pdb"}, "--demangle", $image) - . " 2>$dev_null"); - } - - foreach my $nm_command (@nm_commands) { - my $symbol_table = GetProcedureBoundariesViaNm($nm_command, $regexp); - return $symbol_table if (%{$symbol_table}); - } - my $symbol_table = {}; - return $symbol_table; -} - - -# The test vectors for AddressAdd/Sub/Inc are 8-16-nibble hex strings. -# To make them more readable, we add underscores at interesting places. -# This routine removes the underscores, producing the canonical representation -# used by pprof to represent addresses, particularly in the tested routines. -sub CanonicalHex { - my $arg = shift; - return join '', (split '_',$arg); -} - - -# Unit test for AddressAdd: -sub AddressAddUnitTest { - my $test_data_8 = shift; - my $test_data_16 = shift; - my $error_count = 0; - my $fail_count = 0; - my $pass_count = 0; - # print STDERR "AddressAddUnitTest: ", 1+$#{$test_data_8}, " tests\n"; - - # First a few 8-nibble addresses. Note that this implementation uses - # plain old arithmetic, so a quick sanity check along with verifying what - # happens to overflow (we want it to wrap): - $address_length = 8; - foreach my $row (@{$test_data_8}) { - if ($main::opt_debug and $main::opt_test) { print STDERR "@{$row}\n"; } - my $sum = AddressAdd ($row->[0], $row->[1]); - if ($sum ne $row->[2]) { - printf STDERR "ERROR: %s != %s + %s = %s\n", $sum, - $row->[0], $row->[1], $row->[2]; - ++$fail_count; - } else { - ++$pass_count; - } - } - printf STDERR "AddressAdd 32-bit tests: %d passes, %d failures\n", - $pass_count, $fail_count; - $error_count = $fail_count; - $fail_count = 0; - $pass_count = 0; - - # Now 16-nibble addresses. - $address_length = 16; - foreach my $row (@{$test_data_16}) { - if ($main::opt_debug and $main::opt_test) { print STDERR "@{$row}\n"; } - my $sum = AddressAdd (CanonicalHex($row->[0]), CanonicalHex($row->[1])); - my $expected = join '', (split '_',$row->[2]); - if ($sum ne CanonicalHex($row->[2])) { - printf STDERR "ERROR: %s != %s + %s = %s\n", $sum, - $row->[0], $row->[1], $row->[2]; - ++$fail_count; - } else { - ++$pass_count; - } - } - printf STDERR "AddressAdd 64-bit tests: %d passes, %d failures\n", - $pass_count, $fail_count; - $error_count += $fail_count; - - return $error_count; -} - - -# Unit test for AddressSub: -sub AddressSubUnitTest { - my $test_data_8 = shift; - my $test_data_16 = shift; - my $error_count = 0; - my $fail_count = 0; - my $pass_count = 0; - # print STDERR "AddressSubUnitTest: ", 1+$#{$test_data_8}, " tests\n"; - - # First a few 8-nibble addresses. Note that this implementation uses - # plain old arithmetic, so a quick sanity check along with verifying what - # happens to overflow (we want it to wrap): - $address_length = 8; - foreach my $row (@{$test_data_8}) { - if ($main::opt_debug and $main::opt_test) { print STDERR "@{$row}\n"; } - my $sum = AddressSub ($row->[0], $row->[1]); - if ($sum ne $row->[3]) { - printf STDERR "ERROR: %s != %s - %s = %s\n", $sum, - $row->[0], $row->[1], $row->[3]; - ++$fail_count; - } else { - ++$pass_count; - } - } - printf STDERR "AddressSub 32-bit tests: %d passes, %d failures\n", - $pass_count, $fail_count; - $error_count = $fail_count; - $fail_count = 0; - $pass_count = 0; - - # Now 16-nibble addresses. - $address_length = 16; - foreach my $row (@{$test_data_16}) { - if ($main::opt_debug and $main::opt_test) { print STDERR "@{$row}\n"; } - my $sum = AddressSub (CanonicalHex($row->[0]), CanonicalHex($row->[1])); - if ($sum ne CanonicalHex($row->[3])) { - printf STDERR "ERROR: %s != %s - %s = %s\n", $sum, - $row->[0], $row->[1], $row->[3]; - ++$fail_count; - } else { - ++$pass_count; - } - } - printf STDERR "AddressSub 64-bit tests: %d passes, %d failures\n", - $pass_count, $fail_count; - $error_count += $fail_count; - - return $error_count; -} - - -# Unit test for AddressInc: -sub AddressIncUnitTest { - my $test_data_8 = shift; - my $test_data_16 = shift; - my $error_count = 0; - my $fail_count = 0; - my $pass_count = 0; - # print STDERR "AddressIncUnitTest: ", 1+$#{$test_data_8}, " tests\n"; - - # First a few 8-nibble addresses. Note that this implementation uses - # plain old arithmetic, so a quick sanity check along with verifying what - # happens to overflow (we want it to wrap): - $address_length = 8; - foreach my $row (@{$test_data_8}) { - if ($main::opt_debug and $main::opt_test) { print STDERR "@{$row}\n"; } - my $sum = AddressInc ($row->[0]); - if ($sum ne $row->[4]) { - printf STDERR "ERROR: %s != %s + 1 = %s\n", $sum, - $row->[0], $row->[4]; - ++$fail_count; - } else { - ++$pass_count; - } - } - printf STDERR "AddressInc 32-bit tests: %d passes, %d failures\n", - $pass_count, $fail_count; - $error_count = $fail_count; - $fail_count = 0; - $pass_count = 0; - - # Now 16-nibble addresses. - $address_length = 16; - foreach my $row (@{$test_data_16}) { - if ($main::opt_debug and $main::opt_test) { print STDERR "@{$row}\n"; } - my $sum = AddressInc (CanonicalHex($row->[0])); - if ($sum ne CanonicalHex($row->[4])) { - printf STDERR "ERROR: %s != %s + 1 = %s\n", $sum, - $row->[0], $row->[4]; - ++$fail_count; - } else { - ++$pass_count; - } - } - printf STDERR "AddressInc 64-bit tests: %d passes, %d failures\n", - $pass_count, $fail_count; - $error_count += $fail_count; - - return $error_count; -} - - -# Driver for unit tests. -# Currently just the address add/subtract/increment routines for 64-bit. -sub RunUnitTests { - my $error_count = 0; - - # This is a list of tuples [a, b, a+b, a-b, a+1] - my $unit_test_data_8 = [ - [qw(aaaaaaaa 50505050 fafafafa 5a5a5a5a aaaaaaab)], - [qw(50505050 aaaaaaaa fafafafa a5a5a5a6 50505051)], - [qw(ffffffff aaaaaaaa aaaaaaa9 55555555 00000000)], - [qw(00000001 ffffffff 00000000 00000002 00000002)], - [qw(00000001 fffffff0 fffffff1 00000011 00000002)], - ]; - my $unit_test_data_16 = [ - # The implementation handles data in 7-nibble chunks, so those are the - # interesting boundaries. - [qw(aaaaaaaa 50505050 - 00_000000f_afafafa 00_0000005_a5a5a5a 00_000000a_aaaaaab)], - [qw(50505050 aaaaaaaa - 00_000000f_afafafa ff_ffffffa_5a5a5a6 00_0000005_0505051)], - [qw(ffffffff aaaaaaaa - 00_000001a_aaaaaa9 00_0000005_5555555 00_0000010_0000000)], - [qw(00000001 ffffffff - 00_0000010_0000000 ff_ffffff0_0000002 00_0000000_0000002)], - [qw(00000001 fffffff0 - 00_000000f_ffffff1 ff_ffffff0_0000011 00_0000000_0000002)], - - [qw(00_a00000a_aaaaaaa 50505050 - 00_a00000f_afafafa 00_a000005_a5a5a5a 00_a00000a_aaaaaab)], - [qw(0f_fff0005_0505050 aaaaaaaa - 0f_fff000f_afafafa 0f_ffefffa_5a5a5a6 0f_fff0005_0505051)], - [qw(00_000000f_fffffff 01_800000a_aaaaaaa - 01_800001a_aaaaaa9 fe_8000005_5555555 00_0000010_0000000)], - [qw(00_0000000_0000001 ff_fffffff_fffffff - 00_0000000_0000000 00_0000000_0000002 00_0000000_0000002)], - [qw(00_0000000_0000001 ff_fffffff_ffffff0 - ff_fffffff_ffffff1 00_0000000_0000011 00_0000000_0000002)], - ]; - - $error_count += AddressAddUnitTest($unit_test_data_8, $unit_test_data_16); - $error_count += AddressSubUnitTest($unit_test_data_8, $unit_test_data_16); - $error_count += AddressIncUnitTest($unit_test_data_8, $unit_test_data_16); - if ($error_count > 0) { - print STDERR $error_count, " errors: FAILED\n"; - } else { - print STDERR "PASS\n"; - } - exit ($error_count); -} diff --git a/extra/jemalloc/config.guess b/extra/jemalloc/config.guess deleted file mode 100755 index d622a44e551..00000000000 --- a/extra/jemalloc/config.guess +++ /dev/null @@ -1,1530 +0,0 @@ -#! /bin/sh -# Attempt to guess a canonical system name. -# Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, -# 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, -# 2011, 2012 Free Software Foundation, Inc. - -timestamp='2012-02-10' - -# This file is free software; you can redistribute it and/or modify it -# under the terms of the GNU General Public License as published by -# the Free Software Foundation; either version 2 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, but -# WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -# General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, see <http://www.gnu.org/licenses/>. -# -# As a special exception to the GNU General Public License, if you -# distribute this file as part of a program that contains a -# configuration script generated by Autoconf, you may include it under -# the same distribution terms that you use for the rest of that program. - - -# Originally written by Per Bothner. Please send patches (context -# diff format) to <config-patches@gnu.org> and include a ChangeLog -# entry. -# -# This script attempts to guess a canonical system name similar to -# config.sub. If it succeeds, it prints the system name on stdout, and -# exits with 0. Otherwise, it exits with 1. -# -# You can get the latest version of this script from: -# http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.guess;hb=HEAD - -me=`echo "$0" | sed -e 's,.*/,,'` - -usage="\ -Usage: $0 [OPTION] - -Output the configuration name of the system \`$me' is run on. - -Operation modes: - -h, --help print this help, then exit - -t, --time-stamp print date of last modification, then exit - -v, --version print version number, then exit - -Report bugs and patches to <config-patches@gnu.org>." - -version="\ -GNU config.guess ($timestamp) - -Originally written by Per Bothner. -Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, -2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012 -Free Software Foundation, Inc. - -This is free software; see the source for copying conditions. There is NO -warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE." - -help=" -Try \`$me --help' for more information." - -# Parse command line -while test $# -gt 0 ; do - case $1 in - --time-stamp | --time* | -t ) - echo "$timestamp" ; exit ;; - --version | -v ) - echo "$version" ; exit ;; - --help | --h* | -h ) - echo "$usage"; exit ;; - -- ) # Stop option processing - shift; break ;; - - ) # Use stdin as input. - break ;; - -* ) - echo "$me: invalid option $1$help" >&2 - exit 1 ;; - * ) - break ;; - esac -done - -if test $# != 0; then - echo "$me: too many arguments$help" >&2 - exit 1 -fi - -trap 'exit 1' 1 2 15 - -# CC_FOR_BUILD -- compiler used by this script. Note that the use of a -# compiler to aid in system detection is discouraged as it requires -# temporary files to be created and, as you can see below, it is a -# headache to deal with in a portable fashion. - -# Historically, `CC_FOR_BUILD' used to be named `HOST_CC'. We still -# use `HOST_CC' if defined, but it is deprecated. - -# Portable tmp directory creation inspired by the Autoconf team. - -set_cc_for_build=' -trap "exitcode=\$?; (rm -f \$tmpfiles 2>/dev/null; rmdir \$tmp 2>/dev/null) && exit \$exitcode" 0 ; -trap "rm -f \$tmpfiles 2>/dev/null; rmdir \$tmp 2>/dev/null; exit 1" 1 2 13 15 ; -: ${TMPDIR=/tmp} ; - { tmp=`(umask 077 && mktemp -d "$TMPDIR/cgXXXXXX") 2>/dev/null` && test -n "$tmp" && test -d "$tmp" ; } || - { test -n "$RANDOM" && tmp=$TMPDIR/cg$$-$RANDOM && (umask 077 && mkdir $tmp) ; } || - { tmp=$TMPDIR/cg-$$ && (umask 077 && mkdir $tmp) && echo "Warning: creating insecure temp directory" >&2 ; } || - { echo "$me: cannot create a temporary directory in $TMPDIR" >&2 ; exit 1 ; } ; -dummy=$tmp/dummy ; -tmpfiles="$dummy.c $dummy.o $dummy.rel $dummy" ; -case $CC_FOR_BUILD,$HOST_CC,$CC in - ,,) echo "int x;" > $dummy.c ; - for c in cc gcc c89 c99 ; do - if ($c -c -o $dummy.o $dummy.c) >/dev/null 2>&1 ; then - CC_FOR_BUILD="$c"; break ; - fi ; - done ; - if test x"$CC_FOR_BUILD" = x ; then - CC_FOR_BUILD=no_compiler_found ; - fi - ;; - ,,*) CC_FOR_BUILD=$CC ;; - ,*,*) CC_FOR_BUILD=$HOST_CC ;; -esac ; set_cc_for_build= ;' - -# This is needed to find uname on a Pyramid OSx when run in the BSD universe. -# (ghazi@noc.rutgers.edu 1994-08-24) -if (test -f /.attbin/uname) >/dev/null 2>&1 ; then - PATH=$PATH:/.attbin ; export PATH -fi - -UNAME_MACHINE=`(uname -m) 2>/dev/null` || UNAME_MACHINE=unknown -UNAME_RELEASE=`(uname -r) 2>/dev/null` || UNAME_RELEASE=unknown -UNAME_SYSTEM=`(uname -s) 2>/dev/null` || UNAME_SYSTEM=unknown -UNAME_VERSION=`(uname -v) 2>/dev/null` || UNAME_VERSION=unknown - -# Note: order is significant - the case branches are not exclusive. - -case "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" in - *:NetBSD:*:*) - # NetBSD (nbsd) targets should (where applicable) match one or - # more of the tuples: *-*-netbsdelf*, *-*-netbsdaout*, - # *-*-netbsdecoff* and *-*-netbsd*. For targets that recently - # switched to ELF, *-*-netbsd* would select the old - # object file format. This provides both forward - # compatibility and a consistent mechanism for selecting the - # object file format. - # - # Note: NetBSD doesn't particularly care about the vendor - # portion of the name. We always set it to "unknown". - sysctl="sysctl -n hw.machine_arch" - UNAME_MACHINE_ARCH=`(/sbin/$sysctl 2>/dev/null || \ - /usr/sbin/$sysctl 2>/dev/null || echo unknown)` - case "${UNAME_MACHINE_ARCH}" in - armeb) machine=armeb-unknown ;; - arm*) machine=arm-unknown ;; - sh3el) machine=shl-unknown ;; - sh3eb) machine=sh-unknown ;; - sh5el) machine=sh5le-unknown ;; - *) machine=${UNAME_MACHINE_ARCH}-unknown ;; - esac - # The Operating System including object format, if it has switched - # to ELF recently, or will in the future. - case "${UNAME_MACHINE_ARCH}" in - arm*|i386|m68k|ns32k|sh3*|sparc|vax) - eval $set_cc_for_build - if echo __ELF__ | $CC_FOR_BUILD -E - 2>/dev/null \ - | grep -q __ELF__ - then - # Once all utilities can be ECOFF (netbsdecoff) or a.out (netbsdaout). - # Return netbsd for either. FIX? - os=netbsd - else - os=netbsdelf - fi - ;; - *) - os=netbsd - ;; - esac - # The OS release - # Debian GNU/NetBSD machines have a different userland, and - # thus, need a distinct triplet. However, they do not need - # kernel version information, so it can be replaced with a - # suitable tag, in the style of linux-gnu. - case "${UNAME_VERSION}" in - Debian*) - release='-gnu' - ;; - *) - release=`echo ${UNAME_RELEASE}|sed -e 's/[-_].*/\./'` - ;; - esac - # Since CPU_TYPE-MANUFACTURER-KERNEL-OPERATING_SYSTEM: - # contains redundant information, the shorter form: - # CPU_TYPE-MANUFACTURER-OPERATING_SYSTEM is used. - echo "${machine}-${os}${release}" - exit ;; - *:OpenBSD:*:*) - UNAME_MACHINE_ARCH=`arch | sed 's/OpenBSD.//'` - echo ${UNAME_MACHINE_ARCH}-unknown-openbsd${UNAME_RELEASE} - exit ;; - *:ekkoBSD:*:*) - echo ${UNAME_MACHINE}-unknown-ekkobsd${UNAME_RELEASE} - exit ;; - *:SolidBSD:*:*) - echo ${UNAME_MACHINE}-unknown-solidbsd${UNAME_RELEASE} - exit ;; - macppc:MirBSD:*:*) - echo powerpc-unknown-mirbsd${UNAME_RELEASE} - exit ;; - *:MirBSD:*:*) - echo ${UNAME_MACHINE}-unknown-mirbsd${UNAME_RELEASE} - exit ;; - alpha:OSF1:*:*) - case $UNAME_RELEASE in - *4.0) - UNAME_RELEASE=`/usr/sbin/sizer -v | awk '{print $3}'` - ;; - *5.*) - UNAME_RELEASE=`/usr/sbin/sizer -v | awk '{print $4}'` - ;; - esac - # According to Compaq, /usr/sbin/psrinfo has been available on - # OSF/1 and Tru64 systems produced since 1995. I hope that - # covers most systems running today. This code pipes the CPU - # types through head -n 1, so we only detect the type of CPU 0. - ALPHA_CPU_TYPE=`/usr/sbin/psrinfo -v | sed -n -e 's/^ The alpha \(.*\) processor.*$/\1/p' | head -n 1` - case "$ALPHA_CPU_TYPE" in - "EV4 (21064)") - UNAME_MACHINE="alpha" ;; - "EV4.5 (21064)") - UNAME_MACHINE="alpha" ;; - "LCA4 (21066/21068)") - UNAME_MACHINE="alpha" ;; - "EV5 (21164)") - UNAME_MACHINE="alphaev5" ;; - "EV5.6 (21164A)") - UNAME_MACHINE="alphaev56" ;; - "EV5.6 (21164PC)") - UNAME_MACHINE="alphapca56" ;; - "EV5.7 (21164PC)") - UNAME_MACHINE="alphapca57" ;; - "EV6 (21264)") - UNAME_MACHINE="alphaev6" ;; - "EV6.7 (21264A)") - UNAME_MACHINE="alphaev67" ;; - "EV6.8CB (21264C)") - UNAME_MACHINE="alphaev68" ;; - "EV6.8AL (21264B)") - UNAME_MACHINE="alphaev68" ;; - "EV6.8CX (21264D)") - UNAME_MACHINE="alphaev68" ;; - "EV6.9A (21264/EV69A)") - UNAME_MACHINE="alphaev69" ;; - "EV7 (21364)") - UNAME_MACHINE="alphaev7" ;; - "EV7.9 (21364A)") - UNAME_MACHINE="alphaev79" ;; - esac - # A Pn.n version is a patched version. - # A Vn.n version is a released version. - # A Tn.n version is a released field test version. - # A Xn.n version is an unreleased experimental baselevel. - # 1.2 uses "1.2" for uname -r. - echo ${UNAME_MACHINE}-dec-osf`echo ${UNAME_RELEASE} | sed -e 's/^[PVTX]//' | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz'` - # Reset EXIT trap before exiting to avoid spurious non-zero exit code. - exitcode=$? - trap '' 0 - exit $exitcode ;; - Alpha\ *:Windows_NT*:*) - # How do we know it's Interix rather than the generic POSIX subsystem? - # Should we change UNAME_MACHINE based on the output of uname instead - # of the specific Alpha model? - echo alpha-pc-interix - exit ;; - 21064:Windows_NT:50:3) - echo alpha-dec-winnt3.5 - exit ;; - Amiga*:UNIX_System_V:4.0:*) - echo m68k-unknown-sysv4 - exit ;; - *:[Aa]miga[Oo][Ss]:*:*) - echo ${UNAME_MACHINE}-unknown-amigaos - exit ;; - *:[Mm]orph[Oo][Ss]:*:*) - echo ${UNAME_MACHINE}-unknown-morphos - exit ;; - *:OS/390:*:*) - echo i370-ibm-openedition - exit ;; - *:z/VM:*:*) - echo s390-ibm-zvmoe - exit ;; - *:OS400:*:*) - echo powerpc-ibm-os400 - exit ;; - arm:RISC*:1.[012]*:*|arm:riscix:1.[012]*:*) - echo arm-acorn-riscix${UNAME_RELEASE} - exit ;; - arm:riscos:*:*|arm:RISCOS:*:*) - echo arm-unknown-riscos - exit ;; - SR2?01:HI-UX/MPP:*:* | SR8000:HI-UX/MPP:*:*) - echo hppa1.1-hitachi-hiuxmpp - exit ;; - Pyramid*:OSx*:*:* | MIS*:OSx*:*:* | MIS*:SMP_DC-OSx*:*:*) - # akee@wpdis03.wpafb.af.mil (Earle F. Ake) contributed MIS and NILE. - if test "`(/bin/universe) 2>/dev/null`" = att ; then - echo pyramid-pyramid-sysv3 - else - echo pyramid-pyramid-bsd - fi - exit ;; - NILE*:*:*:dcosx) - echo pyramid-pyramid-svr4 - exit ;; - DRS?6000:unix:4.0:6*) - echo sparc-icl-nx6 - exit ;; - DRS?6000:UNIX_SV:4.2*:7* | DRS?6000:isis:4.2*:7*) - case `/usr/bin/uname -p` in - sparc) echo sparc-icl-nx7; exit ;; - esac ;; - s390x:SunOS:*:*) - echo ${UNAME_MACHINE}-ibm-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` - exit ;; - sun4H:SunOS:5.*:*) - echo sparc-hal-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` - exit ;; - sun4*:SunOS:5.*:* | tadpole*:SunOS:5.*:*) - echo sparc-sun-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` - exit ;; - i86pc:AuroraUX:5.*:* | i86xen:AuroraUX:5.*:*) - echo i386-pc-auroraux${UNAME_RELEASE} - exit ;; - i86pc:SunOS:5.*:* | i86xen:SunOS:5.*:*) - eval $set_cc_for_build - SUN_ARCH="i386" - # If there is a compiler, see if it is configured for 64-bit objects. - # Note that the Sun cc does not turn __LP64__ into 1 like gcc does. - # This test works for both compilers. - if [ "$CC_FOR_BUILD" != 'no_compiler_found' ]; then - if (echo '#ifdef __amd64'; echo IS_64BIT_ARCH; echo '#endif') | \ - (CCOPTS= $CC_FOR_BUILD -E - 2>/dev/null) | \ - grep IS_64BIT_ARCH >/dev/null - then - SUN_ARCH="x86_64" - fi - fi - echo ${SUN_ARCH}-pc-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` - exit ;; - sun4*:SunOS:6*:*) - # According to config.sub, this is the proper way to canonicalize - # SunOS6. Hard to guess exactly what SunOS6 will be like, but - # it's likely to be more like Solaris than SunOS4. - echo sparc-sun-solaris3`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` - exit ;; - sun4*:SunOS:*:*) - case "`/usr/bin/arch -k`" in - Series*|S4*) - UNAME_RELEASE=`uname -v` - ;; - esac - # Japanese Language versions have a version number like `4.1.3-JL'. - echo sparc-sun-sunos`echo ${UNAME_RELEASE}|sed -e 's/-/_/'` - exit ;; - sun3*:SunOS:*:*) - echo m68k-sun-sunos${UNAME_RELEASE} - exit ;; - sun*:*:4.2BSD:*) - UNAME_RELEASE=`(sed 1q /etc/motd | awk '{print substr($5,1,3)}') 2>/dev/null` - test "x${UNAME_RELEASE}" = "x" && UNAME_RELEASE=3 - case "`/bin/arch`" in - sun3) - echo m68k-sun-sunos${UNAME_RELEASE} - ;; - sun4) - echo sparc-sun-sunos${UNAME_RELEASE} - ;; - esac - exit ;; - aushp:SunOS:*:*) - echo sparc-auspex-sunos${UNAME_RELEASE} - exit ;; - # The situation for MiNT is a little confusing. The machine name - # can be virtually everything (everything which is not - # "atarist" or "atariste" at least should have a processor - # > m68000). The system name ranges from "MiNT" over "FreeMiNT" - # to the lowercase version "mint" (or "freemint"). Finally - # the system name "TOS" denotes a system which is actually not - # MiNT. But MiNT is downward compatible to TOS, so this should - # be no problem. - atarist[e]:*MiNT:*:* | atarist[e]:*mint:*:* | atarist[e]:*TOS:*:*) - echo m68k-atari-mint${UNAME_RELEASE} - exit ;; - atari*:*MiNT:*:* | atari*:*mint:*:* | atarist[e]:*TOS:*:*) - echo m68k-atari-mint${UNAME_RELEASE} - exit ;; - *falcon*:*MiNT:*:* | *falcon*:*mint:*:* | *falcon*:*TOS:*:*) - echo m68k-atari-mint${UNAME_RELEASE} - exit ;; - milan*:*MiNT:*:* | milan*:*mint:*:* | *milan*:*TOS:*:*) - echo m68k-milan-mint${UNAME_RELEASE} - exit ;; - hades*:*MiNT:*:* | hades*:*mint:*:* | *hades*:*TOS:*:*) - echo m68k-hades-mint${UNAME_RELEASE} - exit ;; - *:*MiNT:*:* | *:*mint:*:* | *:*TOS:*:*) - echo m68k-unknown-mint${UNAME_RELEASE} - exit ;; - m68k:machten:*:*) - echo m68k-apple-machten${UNAME_RELEASE} - exit ;; - powerpc:machten:*:*) - echo powerpc-apple-machten${UNAME_RELEASE} - exit ;; - RISC*:Mach:*:*) - echo mips-dec-mach_bsd4.3 - exit ;; - RISC*:ULTRIX:*:*) - echo mips-dec-ultrix${UNAME_RELEASE} - exit ;; - VAX*:ULTRIX*:*:*) - echo vax-dec-ultrix${UNAME_RELEASE} - exit ;; - 2020:CLIX:*:* | 2430:CLIX:*:*) - echo clipper-intergraph-clix${UNAME_RELEASE} - exit ;; - mips:*:*:UMIPS | mips:*:*:RISCos) - eval $set_cc_for_build - sed 's/^ //' << EOF >$dummy.c -#ifdef __cplusplus -#include <stdio.h> /* for printf() prototype */ - int main (int argc, char *argv[]) { -#else - int main (argc, argv) int argc; char *argv[]; { -#endif - #if defined (host_mips) && defined (MIPSEB) - #if defined (SYSTYPE_SYSV) - printf ("mips-mips-riscos%ssysv\n", argv[1]); exit (0); - #endif - #if defined (SYSTYPE_SVR4) - printf ("mips-mips-riscos%ssvr4\n", argv[1]); exit (0); - #endif - #if defined (SYSTYPE_BSD43) || defined(SYSTYPE_BSD) - printf ("mips-mips-riscos%sbsd\n", argv[1]); exit (0); - #endif - #endif - exit (-1); - } -EOF - $CC_FOR_BUILD -o $dummy $dummy.c && - dummyarg=`echo "${UNAME_RELEASE}" | sed -n 's/\([0-9]*\).*/\1/p'` && - SYSTEM_NAME=`$dummy $dummyarg` && - { echo "$SYSTEM_NAME"; exit; } - echo mips-mips-riscos${UNAME_RELEASE} - exit ;; - Motorola:PowerMAX_OS:*:*) - echo powerpc-motorola-powermax - exit ;; - Motorola:*:4.3:PL8-*) - echo powerpc-harris-powermax - exit ;; - Night_Hawk:*:*:PowerMAX_OS | Synergy:PowerMAX_OS:*:*) - echo powerpc-harris-powermax - exit ;; - Night_Hawk:Power_UNIX:*:*) - echo powerpc-harris-powerunix - exit ;; - m88k:CX/UX:7*:*) - echo m88k-harris-cxux7 - exit ;; - m88k:*:4*:R4*) - echo m88k-motorola-sysv4 - exit ;; - m88k:*:3*:R3*) - echo m88k-motorola-sysv3 - exit ;; - AViiON:dgux:*:*) - # DG/UX returns AViiON for all architectures - UNAME_PROCESSOR=`/usr/bin/uname -p` - if [ $UNAME_PROCESSOR = mc88100 ] || [ $UNAME_PROCESSOR = mc88110 ] - then - if [ ${TARGET_BINARY_INTERFACE}x = m88kdguxelfx ] || \ - [ ${TARGET_BINARY_INTERFACE}x = x ] - then - echo m88k-dg-dgux${UNAME_RELEASE} - else - echo m88k-dg-dguxbcs${UNAME_RELEASE} - fi - else - echo i586-dg-dgux${UNAME_RELEASE} - fi - exit ;; - M88*:DolphinOS:*:*) # DolphinOS (SVR3) - echo m88k-dolphin-sysv3 - exit ;; - M88*:*:R3*:*) - # Delta 88k system running SVR3 - echo m88k-motorola-sysv3 - exit ;; - XD88*:*:*:*) # Tektronix XD88 system running UTekV (SVR3) - echo m88k-tektronix-sysv3 - exit ;; - Tek43[0-9][0-9]:UTek:*:*) # Tektronix 4300 system running UTek (BSD) - echo m68k-tektronix-bsd - exit ;; - *:IRIX*:*:*) - echo mips-sgi-irix`echo ${UNAME_RELEASE}|sed -e 's/-/_/g'` - exit ;; - ????????:AIX?:[12].1:2) # AIX 2.2.1 or AIX 2.1.1 is RT/PC AIX. - echo romp-ibm-aix # uname -m gives an 8 hex-code CPU id - exit ;; # Note that: echo "'`uname -s`'" gives 'AIX ' - i*86:AIX:*:*) - echo i386-ibm-aix - exit ;; - ia64:AIX:*:*) - if [ -x /usr/bin/oslevel ] ; then - IBM_REV=`/usr/bin/oslevel` - else - IBM_REV=${UNAME_VERSION}.${UNAME_RELEASE} - fi - echo ${UNAME_MACHINE}-ibm-aix${IBM_REV} - exit ;; - *:AIX:2:3) - if grep bos325 /usr/include/stdio.h >/dev/null 2>&1; then - eval $set_cc_for_build - sed 's/^ //' << EOF >$dummy.c - #include <sys/systemcfg.h> - - main() - { - if (!__power_pc()) - exit(1); - puts("powerpc-ibm-aix3.2.5"); - exit(0); - } -EOF - if $CC_FOR_BUILD -o $dummy $dummy.c && SYSTEM_NAME=`$dummy` - then - echo "$SYSTEM_NAME" - else - echo rs6000-ibm-aix3.2.5 - fi - elif grep bos324 /usr/include/stdio.h >/dev/null 2>&1; then - echo rs6000-ibm-aix3.2.4 - else - echo rs6000-ibm-aix3.2 - fi - exit ;; - *:AIX:*:[4567]) - IBM_CPU_ID=`/usr/sbin/lsdev -C -c processor -S available | sed 1q | awk '{ print $1 }'` - if /usr/sbin/lsattr -El ${IBM_CPU_ID} | grep ' POWER' >/dev/null 2>&1; then - IBM_ARCH=rs6000 - else - IBM_ARCH=powerpc - fi - if [ -x /usr/bin/oslevel ] ; then - IBM_REV=`/usr/bin/oslevel` - else - IBM_REV=${UNAME_VERSION}.${UNAME_RELEASE} - fi - echo ${IBM_ARCH}-ibm-aix${IBM_REV} - exit ;; - *:AIX:*:*) - echo rs6000-ibm-aix - exit ;; - ibmrt:4.4BSD:*|romp-ibm:BSD:*) - echo romp-ibm-bsd4.4 - exit ;; - ibmrt:*BSD:*|romp-ibm:BSD:*) # covers RT/PC BSD and - echo romp-ibm-bsd${UNAME_RELEASE} # 4.3 with uname added to - exit ;; # report: romp-ibm BSD 4.3 - *:BOSX:*:*) - echo rs6000-bull-bosx - exit ;; - DPX/2?00:B.O.S.:*:*) - echo m68k-bull-sysv3 - exit ;; - 9000/[34]??:4.3bsd:1.*:*) - echo m68k-hp-bsd - exit ;; - hp300:4.4BSD:*:* | 9000/[34]??:4.3bsd:2.*:*) - echo m68k-hp-bsd4.4 - exit ;; - 9000/[34678]??:HP-UX:*:*) - HPUX_REV=`echo ${UNAME_RELEASE}|sed -e 's/[^.]*.[0B]*//'` - case "${UNAME_MACHINE}" in - 9000/31? ) HP_ARCH=m68000 ;; - 9000/[34]?? ) HP_ARCH=m68k ;; - 9000/[678][0-9][0-9]) - if [ -x /usr/bin/getconf ]; then - sc_cpu_version=`/usr/bin/getconf SC_CPU_VERSION 2>/dev/null` - sc_kernel_bits=`/usr/bin/getconf SC_KERNEL_BITS 2>/dev/null` - case "${sc_cpu_version}" in - 523) HP_ARCH="hppa1.0" ;; # CPU_PA_RISC1_0 - 528) HP_ARCH="hppa1.1" ;; # CPU_PA_RISC1_1 - 532) # CPU_PA_RISC2_0 - case "${sc_kernel_bits}" in - 32) HP_ARCH="hppa2.0n" ;; - 64) HP_ARCH="hppa2.0w" ;; - '') HP_ARCH="hppa2.0" ;; # HP-UX 10.20 - esac ;; - esac - fi - if [ "${HP_ARCH}" = "" ]; then - eval $set_cc_for_build - sed 's/^ //' << EOF >$dummy.c - - #define _HPUX_SOURCE - #include <stdlib.h> - #include <unistd.h> - - int main () - { - #if defined(_SC_KERNEL_BITS) - long bits = sysconf(_SC_KERNEL_BITS); - #endif - long cpu = sysconf (_SC_CPU_VERSION); - - switch (cpu) - { - case CPU_PA_RISC1_0: puts ("hppa1.0"); break; - case CPU_PA_RISC1_1: puts ("hppa1.1"); break; - case CPU_PA_RISC2_0: - #if defined(_SC_KERNEL_BITS) - switch (bits) - { - case 64: puts ("hppa2.0w"); break; - case 32: puts ("hppa2.0n"); break; - default: puts ("hppa2.0"); break; - } break; - #else /* !defined(_SC_KERNEL_BITS) */ - puts ("hppa2.0"); break; - #endif - default: puts ("hppa1.0"); break; - } - exit (0); - } -EOF - (CCOPTS= $CC_FOR_BUILD -o $dummy $dummy.c 2>/dev/null) && HP_ARCH=`$dummy` - test -z "$HP_ARCH" && HP_ARCH=hppa - fi ;; - esac - if [ ${HP_ARCH} = "hppa2.0w" ] - then - eval $set_cc_for_build - - # hppa2.0w-hp-hpux* has a 64-bit kernel and a compiler generating - # 32-bit code. hppa64-hp-hpux* has the same kernel and a compiler - # generating 64-bit code. GNU and HP use different nomenclature: - # - # $ CC_FOR_BUILD=cc ./config.guess - # => hppa2.0w-hp-hpux11.23 - # $ CC_FOR_BUILD="cc +DA2.0w" ./config.guess - # => hppa64-hp-hpux11.23 - - if echo __LP64__ | (CCOPTS= $CC_FOR_BUILD -E - 2>/dev/null) | - grep -q __LP64__ - then - HP_ARCH="hppa2.0w" - else - HP_ARCH="hppa64" - fi - fi - echo ${HP_ARCH}-hp-hpux${HPUX_REV} - exit ;; - ia64:HP-UX:*:*) - HPUX_REV=`echo ${UNAME_RELEASE}|sed -e 's/[^.]*.[0B]*//'` - echo ia64-hp-hpux${HPUX_REV} - exit ;; - 3050*:HI-UX:*:*) - eval $set_cc_for_build - sed 's/^ //' << EOF >$dummy.c - #include <unistd.h> - int - main () - { - long cpu = sysconf (_SC_CPU_VERSION); - /* The order matters, because CPU_IS_HP_MC68K erroneously returns - true for CPU_PA_RISC1_0. CPU_IS_PA_RISC returns correct - results, however. */ - if (CPU_IS_PA_RISC (cpu)) - { - switch (cpu) - { - case CPU_PA_RISC1_0: puts ("hppa1.0-hitachi-hiuxwe2"); break; - case CPU_PA_RISC1_1: puts ("hppa1.1-hitachi-hiuxwe2"); break; - case CPU_PA_RISC2_0: puts ("hppa2.0-hitachi-hiuxwe2"); break; - default: puts ("hppa-hitachi-hiuxwe2"); break; - } - } - else if (CPU_IS_HP_MC68K (cpu)) - puts ("m68k-hitachi-hiuxwe2"); - else puts ("unknown-hitachi-hiuxwe2"); - exit (0); - } -EOF - $CC_FOR_BUILD -o $dummy $dummy.c && SYSTEM_NAME=`$dummy` && - { echo "$SYSTEM_NAME"; exit; } - echo unknown-hitachi-hiuxwe2 - exit ;; - 9000/7??:4.3bsd:*:* | 9000/8?[79]:4.3bsd:*:* ) - echo hppa1.1-hp-bsd - exit ;; - 9000/8??:4.3bsd:*:*) - echo hppa1.0-hp-bsd - exit ;; - *9??*:MPE/iX:*:* | *3000*:MPE/iX:*:*) - echo hppa1.0-hp-mpeix - exit ;; - hp7??:OSF1:*:* | hp8?[79]:OSF1:*:* ) - echo hppa1.1-hp-osf - exit ;; - hp8??:OSF1:*:*) - echo hppa1.0-hp-osf - exit ;; - i*86:OSF1:*:*) - if [ -x /usr/sbin/sysversion ] ; then - echo ${UNAME_MACHINE}-unknown-osf1mk - else - echo ${UNAME_MACHINE}-unknown-osf1 - fi - exit ;; - parisc*:Lites*:*:*) - echo hppa1.1-hp-lites - exit ;; - C1*:ConvexOS:*:* | convex:ConvexOS:C1*:*) - echo c1-convex-bsd - exit ;; - C2*:ConvexOS:*:* | convex:ConvexOS:C2*:*) - if getsysinfo -f scalar_acc - then echo c32-convex-bsd - else echo c2-convex-bsd - fi - exit ;; - C34*:ConvexOS:*:* | convex:ConvexOS:C34*:*) - echo c34-convex-bsd - exit ;; - C38*:ConvexOS:*:* | convex:ConvexOS:C38*:*) - echo c38-convex-bsd - exit ;; - C4*:ConvexOS:*:* | convex:ConvexOS:C4*:*) - echo c4-convex-bsd - exit ;; - CRAY*Y-MP:*:*:*) - echo ymp-cray-unicos${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/' - exit ;; - CRAY*[A-Z]90:*:*:*) - echo ${UNAME_MACHINE}-cray-unicos${UNAME_RELEASE} \ - | sed -e 's/CRAY.*\([A-Z]90\)/\1/' \ - -e y/ABCDEFGHIJKLMNOPQRSTUVWXYZ/abcdefghijklmnopqrstuvwxyz/ \ - -e 's/\.[^.]*$/.X/' - exit ;; - CRAY*TS:*:*:*) - echo t90-cray-unicos${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/' - exit ;; - CRAY*T3E:*:*:*) - echo alphaev5-cray-unicosmk${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/' - exit ;; - CRAY*SV1:*:*:*) - echo sv1-cray-unicos${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/' - exit ;; - *:UNICOS/mp:*:*) - echo craynv-cray-unicosmp${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/' - exit ;; - F30[01]:UNIX_System_V:*:* | F700:UNIX_System_V:*:*) - FUJITSU_PROC=`uname -m | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz'` - FUJITSU_SYS=`uname -p | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz' | sed -e 's/\///'` - FUJITSU_REL=`echo ${UNAME_RELEASE} | sed -e 's/ /_/'` - echo "${FUJITSU_PROC}-fujitsu-${FUJITSU_SYS}${FUJITSU_REL}" - exit ;; - 5000:UNIX_System_V:4.*:*) - FUJITSU_SYS=`uname -p | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz' | sed -e 's/\///'` - FUJITSU_REL=`echo ${UNAME_RELEASE} | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz' | sed -e 's/ /_/'` - echo "sparc-fujitsu-${FUJITSU_SYS}${FUJITSU_REL}" - exit ;; - i*86:BSD/386:*:* | i*86:BSD/OS:*:* | *:Ascend\ Embedded/OS:*:*) - echo ${UNAME_MACHINE}-pc-bsdi${UNAME_RELEASE} - exit ;; - sparc*:BSD/OS:*:*) - echo sparc-unknown-bsdi${UNAME_RELEASE} - exit ;; - *:BSD/OS:*:*) - echo ${UNAME_MACHINE}-unknown-bsdi${UNAME_RELEASE} - exit ;; - *:FreeBSD:*:*) - UNAME_PROCESSOR=`/usr/bin/uname -p` - case ${UNAME_PROCESSOR} in - amd64) - echo x86_64-unknown-freebsd`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'` ;; - *) - echo ${UNAME_PROCESSOR}-unknown-freebsd`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'` ;; - esac - exit ;; - i*:CYGWIN*:*) - echo ${UNAME_MACHINE}-pc-cygwin - exit ;; - *:MINGW*:*) - echo ${UNAME_MACHINE}-pc-mingw32 - exit ;; - i*:MSYS*:*) - echo ${UNAME_MACHINE}-pc-msys - exit ;; - i*:windows32*:*) - # uname -m includes "-pc" on this system. - echo ${UNAME_MACHINE}-mingw32 - exit ;; - i*:PW*:*) - echo ${UNAME_MACHINE}-pc-pw32 - exit ;; - *:Interix*:*) - case ${UNAME_MACHINE} in - x86) - echo i586-pc-interix${UNAME_RELEASE} - exit ;; - authenticamd | genuineintel | EM64T) - echo x86_64-unknown-interix${UNAME_RELEASE} - exit ;; - IA64) - echo ia64-unknown-interix${UNAME_RELEASE} - exit ;; - esac ;; - [345]86:Windows_95:* | [345]86:Windows_98:* | [345]86:Windows_NT:*) - echo i${UNAME_MACHINE}-pc-mks - exit ;; - 8664:Windows_NT:*) - echo x86_64-pc-mks - exit ;; - i*:Windows_NT*:* | Pentium*:Windows_NT*:*) - # How do we know it's Interix rather than the generic POSIX subsystem? - # It also conflicts with pre-2.0 versions of AT&T UWIN. Should we - # UNAME_MACHINE based on the output of uname instead of i386? - echo i586-pc-interix - exit ;; - i*:UWIN*:*) - echo ${UNAME_MACHINE}-pc-uwin - exit ;; - amd64:CYGWIN*:*:* | x86_64:CYGWIN*:*:*) - echo x86_64-unknown-cygwin - exit ;; - p*:CYGWIN*:*) - echo powerpcle-unknown-cygwin - exit ;; - prep*:SunOS:5.*:*) - echo powerpcle-unknown-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` - exit ;; - *:GNU:*:*) - # the GNU system - echo `echo ${UNAME_MACHINE}|sed -e 's,[-/].*$,,'`-unknown-gnu`echo ${UNAME_RELEASE}|sed -e 's,/.*$,,'` - exit ;; - *:GNU/*:*:*) - # other systems with GNU libc and userland - echo ${UNAME_MACHINE}-unknown-`echo ${UNAME_SYSTEM} | sed 's,^[^/]*/,,' | tr '[A-Z]' '[a-z]'``echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'`-gnu - exit ;; - i*86:Minix:*:*) - echo ${UNAME_MACHINE}-pc-minix - exit ;; - aarch64:Linux:*:*) - echo ${UNAME_MACHINE}-unknown-linux-gnu - exit ;; - aarch64_be:Linux:*:*) - UNAME_MACHINE=aarch64_be - echo ${UNAME_MACHINE}-unknown-linux-gnu - exit ;; - alpha:Linux:*:*) - case `sed -n '/^cpu model/s/^.*: \(.*\)/\1/p' < /proc/cpuinfo` in - EV5) UNAME_MACHINE=alphaev5 ;; - EV56) UNAME_MACHINE=alphaev56 ;; - PCA56) UNAME_MACHINE=alphapca56 ;; - PCA57) UNAME_MACHINE=alphapca56 ;; - EV6) UNAME_MACHINE=alphaev6 ;; - EV67) UNAME_MACHINE=alphaev67 ;; - EV68*) UNAME_MACHINE=alphaev68 ;; - esac - objdump --private-headers /bin/sh | grep -q ld.so.1 - if test "$?" = 0 ; then LIBC="libc1" ; else LIBC="" ; fi - echo ${UNAME_MACHINE}-unknown-linux-gnu${LIBC} - exit ;; - arm*:Linux:*:*) - eval $set_cc_for_build - if echo __ARM_EABI__ | $CC_FOR_BUILD -E - 2>/dev/null \ - | grep -q __ARM_EABI__ - then - echo ${UNAME_MACHINE}-unknown-linux-gnu - else - if echo __ARM_PCS_VFP | $CC_FOR_BUILD -E - 2>/dev/null \ - | grep -q __ARM_PCS_VFP - then - echo ${UNAME_MACHINE}-unknown-linux-gnueabi - else - echo ${UNAME_MACHINE}-unknown-linux-gnueabihf - fi - fi - exit ;; - avr32*:Linux:*:*) - echo ${UNAME_MACHINE}-unknown-linux-gnu - exit ;; - cris:Linux:*:*) - echo ${UNAME_MACHINE}-axis-linux-gnu - exit ;; - crisv32:Linux:*:*) - echo ${UNAME_MACHINE}-axis-linux-gnu - exit ;; - frv:Linux:*:*) - echo ${UNAME_MACHINE}-unknown-linux-gnu - exit ;; - hexagon:Linux:*:*) - echo ${UNAME_MACHINE}-unknown-linux-gnu - exit ;; - i*86:Linux:*:*) - LIBC=gnu - eval $set_cc_for_build - sed 's/^ //' << EOF >$dummy.c - #ifdef __dietlibc__ - LIBC=dietlibc - #endif -EOF - eval `$CC_FOR_BUILD -E $dummy.c 2>/dev/null | grep '^LIBC'` - echo "${UNAME_MACHINE}-pc-linux-${LIBC}" - exit ;; - ia64:Linux:*:*) - echo ${UNAME_MACHINE}-unknown-linux-gnu - exit ;; - m32r*:Linux:*:*) - echo ${UNAME_MACHINE}-unknown-linux-gnu - exit ;; - m68*:Linux:*:*) - echo ${UNAME_MACHINE}-unknown-linux-gnu - exit ;; - mips:Linux:*:* | mips64:Linux:*:*) - eval $set_cc_for_build - sed 's/^ //' << EOF >$dummy.c - #undef CPU - #undef ${UNAME_MACHINE} - #undef ${UNAME_MACHINE}el - #if defined(__MIPSEL__) || defined(__MIPSEL) || defined(_MIPSEL) || defined(MIPSEL) - CPU=${UNAME_MACHINE}el - #else - #if defined(__MIPSEB__) || defined(__MIPSEB) || defined(_MIPSEB) || defined(MIPSEB) - CPU=${UNAME_MACHINE} - #else - CPU= - #endif - #endif -EOF - eval `$CC_FOR_BUILD -E $dummy.c 2>/dev/null | grep '^CPU'` - test x"${CPU}" != x && { echo "${CPU}-unknown-linux-gnu"; exit; } - ;; - or32:Linux:*:*) - echo ${UNAME_MACHINE}-unknown-linux-gnu - exit ;; - padre:Linux:*:*) - echo sparc-unknown-linux-gnu - exit ;; - parisc64:Linux:*:* | hppa64:Linux:*:*) - echo hppa64-unknown-linux-gnu - exit ;; - parisc:Linux:*:* | hppa:Linux:*:*) - # Look for CPU level - case `grep '^cpu[^a-z]*:' /proc/cpuinfo 2>/dev/null | cut -d' ' -f2` in - PA7*) echo hppa1.1-unknown-linux-gnu ;; - PA8*) echo hppa2.0-unknown-linux-gnu ;; - *) echo hppa-unknown-linux-gnu ;; - esac - exit ;; - ppc64:Linux:*:*) - echo powerpc64-unknown-linux-gnu - exit ;; - ppc:Linux:*:*) - echo powerpc-unknown-linux-gnu - exit ;; - s390:Linux:*:* | s390x:Linux:*:*) - echo ${UNAME_MACHINE}-ibm-linux - exit ;; - sh64*:Linux:*:*) - echo ${UNAME_MACHINE}-unknown-linux-gnu - exit ;; - sh*:Linux:*:*) - echo ${UNAME_MACHINE}-unknown-linux-gnu - exit ;; - sparc:Linux:*:* | sparc64:Linux:*:*) - echo ${UNAME_MACHINE}-unknown-linux-gnu - exit ;; - tile*:Linux:*:*) - echo ${UNAME_MACHINE}-unknown-linux-gnu - exit ;; - vax:Linux:*:*) - echo ${UNAME_MACHINE}-dec-linux-gnu - exit ;; - x86_64:Linux:*:*) - echo ${UNAME_MACHINE}-unknown-linux-gnu - exit ;; - xtensa*:Linux:*:*) - echo ${UNAME_MACHINE}-unknown-linux-gnu - exit ;; - i*86:DYNIX/ptx:4*:*) - # ptx 4.0 does uname -s correctly, with DYNIX/ptx in there. - # earlier versions are messed up and put the nodename in both - # sysname and nodename. - echo i386-sequent-sysv4 - exit ;; - i*86:UNIX_SV:4.2MP:2.*) - # Unixware is an offshoot of SVR4, but it has its own version - # number series starting with 2... - # I am not positive that other SVR4 systems won't match this, - # I just have to hope. -- rms. - # Use sysv4.2uw... so that sysv4* matches it. - echo ${UNAME_MACHINE}-pc-sysv4.2uw${UNAME_VERSION} - exit ;; - i*86:OS/2:*:*) - # If we were able to find `uname', then EMX Unix compatibility - # is probably installed. - echo ${UNAME_MACHINE}-pc-os2-emx - exit ;; - i*86:XTS-300:*:STOP) - echo ${UNAME_MACHINE}-unknown-stop - exit ;; - i*86:atheos:*:*) - echo ${UNAME_MACHINE}-unknown-atheos - exit ;; - i*86:syllable:*:*) - echo ${UNAME_MACHINE}-pc-syllable - exit ;; - i*86:LynxOS:2.*:* | i*86:LynxOS:3.[01]*:* | i*86:LynxOS:4.[02]*:*) - echo i386-unknown-lynxos${UNAME_RELEASE} - exit ;; - i*86:*DOS:*:*) - echo ${UNAME_MACHINE}-pc-msdosdjgpp - exit ;; - i*86:*:4.*:* | i*86:SYSTEM_V:4.*:*) - UNAME_REL=`echo ${UNAME_RELEASE} | sed 's/\/MP$//'` - if grep Novell /usr/include/link.h >/dev/null 2>/dev/null; then - echo ${UNAME_MACHINE}-univel-sysv${UNAME_REL} - else - echo ${UNAME_MACHINE}-pc-sysv${UNAME_REL} - fi - exit ;; - i*86:*:5:[678]*) - # UnixWare 7.x, OpenUNIX and OpenServer 6. - case `/bin/uname -X | grep "^Machine"` in - *486*) UNAME_MACHINE=i486 ;; - *Pentium) UNAME_MACHINE=i586 ;; - *Pent*|*Celeron) UNAME_MACHINE=i686 ;; - esac - echo ${UNAME_MACHINE}-unknown-sysv${UNAME_RELEASE}${UNAME_SYSTEM}${UNAME_VERSION} - exit ;; - i*86:*:3.2:*) - if test -f /usr/options/cb.name; then - UNAME_REL=`sed -n 's/.*Version //p' </usr/options/cb.name` - echo ${UNAME_MACHINE}-pc-isc$UNAME_REL - elif /bin/uname -X 2>/dev/null >/dev/null ; then - UNAME_REL=`(/bin/uname -X|grep Release|sed -e 's/.*= //')` - (/bin/uname -X|grep i80486 >/dev/null) && UNAME_MACHINE=i486 - (/bin/uname -X|grep '^Machine.*Pentium' >/dev/null) \ - && UNAME_MACHINE=i586 - (/bin/uname -X|grep '^Machine.*Pent *II' >/dev/null) \ - && UNAME_MACHINE=i686 - (/bin/uname -X|grep '^Machine.*Pentium Pro' >/dev/null) \ - && UNAME_MACHINE=i686 - echo ${UNAME_MACHINE}-pc-sco$UNAME_REL - else - echo ${UNAME_MACHINE}-pc-sysv32 - fi - exit ;; - pc:*:*:*) - # Left here for compatibility: - # uname -m prints for DJGPP always 'pc', but it prints nothing about - # the processor, so we play safe by assuming i586. - # Note: whatever this is, it MUST be the same as what config.sub - # prints for the "djgpp" host, or else GDB configury will decide that - # this is a cross-build. - echo i586-pc-msdosdjgpp - exit ;; - Intel:Mach:3*:*) - echo i386-pc-mach3 - exit ;; - paragon:*:*:*) - echo i860-intel-osf1 - exit ;; - i860:*:4.*:*) # i860-SVR4 - if grep Stardent /usr/include/sys/uadmin.h >/dev/null 2>&1 ; then - echo i860-stardent-sysv${UNAME_RELEASE} # Stardent Vistra i860-SVR4 - else # Add other i860-SVR4 vendors below as they are discovered. - echo i860-unknown-sysv${UNAME_RELEASE} # Unknown i860-SVR4 - fi - exit ;; - mini*:CTIX:SYS*5:*) - # "miniframe" - echo m68010-convergent-sysv - exit ;; - mc68k:UNIX:SYSTEM5:3.51m) - echo m68k-convergent-sysv - exit ;; - M680?0:D-NIX:5.3:*) - echo m68k-diab-dnix - exit ;; - M68*:*:R3V[5678]*:*) - test -r /sysV68 && { echo 'm68k-motorola-sysv'; exit; } ;; - 3[345]??:*:4.0:3.0 | 3[34]??A:*:4.0:3.0 | 3[34]??,*:*:4.0:3.0 | 3[34]??/*:*:4.0:3.0 | 4400:*:4.0:3.0 | 4850:*:4.0:3.0 | SKA40:*:4.0:3.0 | SDS2:*:4.0:3.0 | SHG2:*:4.0:3.0 | S7501*:*:4.0:3.0) - OS_REL='' - test -r /etc/.relid \ - && OS_REL=.`sed -n 's/[^ ]* [^ ]* \([0-9][0-9]\).*/\1/p' < /etc/.relid` - /bin/uname -p 2>/dev/null | grep 86 >/dev/null \ - && { echo i486-ncr-sysv4.3${OS_REL}; exit; } - /bin/uname -p 2>/dev/null | /bin/grep entium >/dev/null \ - && { echo i586-ncr-sysv4.3${OS_REL}; exit; } ;; - 3[34]??:*:4.0:* | 3[34]??,*:*:4.0:*) - /bin/uname -p 2>/dev/null | grep 86 >/dev/null \ - && { echo i486-ncr-sysv4; exit; } ;; - NCR*:*:4.2:* | MPRAS*:*:4.2:*) - OS_REL='.3' - test -r /etc/.relid \ - && OS_REL=.`sed -n 's/[^ ]* [^ ]* \([0-9][0-9]\).*/\1/p' < /etc/.relid` - /bin/uname -p 2>/dev/null | grep 86 >/dev/null \ - && { echo i486-ncr-sysv4.3${OS_REL}; exit; } - /bin/uname -p 2>/dev/null | /bin/grep entium >/dev/null \ - && { echo i586-ncr-sysv4.3${OS_REL}; exit; } - /bin/uname -p 2>/dev/null | /bin/grep pteron >/dev/null \ - && { echo i586-ncr-sysv4.3${OS_REL}; exit; } ;; - m68*:LynxOS:2.*:* | m68*:LynxOS:3.0*:*) - echo m68k-unknown-lynxos${UNAME_RELEASE} - exit ;; - mc68030:UNIX_System_V:4.*:*) - echo m68k-atari-sysv4 - exit ;; - TSUNAMI:LynxOS:2.*:*) - echo sparc-unknown-lynxos${UNAME_RELEASE} - exit ;; - rs6000:LynxOS:2.*:*) - echo rs6000-unknown-lynxos${UNAME_RELEASE} - exit ;; - PowerPC:LynxOS:2.*:* | PowerPC:LynxOS:3.[01]*:* | PowerPC:LynxOS:4.[02]*:*) - echo powerpc-unknown-lynxos${UNAME_RELEASE} - exit ;; - SM[BE]S:UNIX_SV:*:*) - echo mips-dde-sysv${UNAME_RELEASE} - exit ;; - RM*:ReliantUNIX-*:*:*) - echo mips-sni-sysv4 - exit ;; - RM*:SINIX-*:*:*) - echo mips-sni-sysv4 - exit ;; - *:SINIX-*:*:*) - if uname -p 2>/dev/null >/dev/null ; then - UNAME_MACHINE=`(uname -p) 2>/dev/null` - echo ${UNAME_MACHINE}-sni-sysv4 - else - echo ns32k-sni-sysv - fi - exit ;; - PENTIUM:*:4.0*:*) # Unisys `ClearPath HMP IX 4000' SVR4/MP effort - # says <Richard.M.Bartel@ccMail.Census.GOV> - echo i586-unisys-sysv4 - exit ;; - *:UNIX_System_V:4*:FTX*) - # From Gerald Hewes <hewes@openmarket.com>. - # How about differentiating between stratus architectures? -djm - echo hppa1.1-stratus-sysv4 - exit ;; - *:*:*:FTX*) - # From seanf@swdc.stratus.com. - echo i860-stratus-sysv4 - exit ;; - i*86:VOS:*:*) - # From Paul.Green@stratus.com. - echo ${UNAME_MACHINE}-stratus-vos - exit ;; - *:VOS:*:*) - # From Paul.Green@stratus.com. - echo hppa1.1-stratus-vos - exit ;; - mc68*:A/UX:*:*) - echo m68k-apple-aux${UNAME_RELEASE} - exit ;; - news*:NEWS-OS:6*:*) - echo mips-sony-newsos6 - exit ;; - R[34]000:*System_V*:*:* | R4000:UNIX_SYSV:*:* | R*000:UNIX_SV:*:*) - if [ -d /usr/nec ]; then - echo mips-nec-sysv${UNAME_RELEASE} - else - echo mips-unknown-sysv${UNAME_RELEASE} - fi - exit ;; - BeBox:BeOS:*:*) # BeOS running on hardware made by Be, PPC only. - echo powerpc-be-beos - exit ;; - BeMac:BeOS:*:*) # BeOS running on Mac or Mac clone, PPC only. - echo powerpc-apple-beos - exit ;; - BePC:BeOS:*:*) # BeOS running on Intel PC compatible. - echo i586-pc-beos - exit ;; - BePC:Haiku:*:*) # Haiku running on Intel PC compatible. - echo i586-pc-haiku - exit ;; - SX-4:SUPER-UX:*:*) - echo sx4-nec-superux${UNAME_RELEASE} - exit ;; - SX-5:SUPER-UX:*:*) - echo sx5-nec-superux${UNAME_RELEASE} - exit ;; - SX-6:SUPER-UX:*:*) - echo sx6-nec-superux${UNAME_RELEASE} - exit ;; - SX-7:SUPER-UX:*:*) - echo sx7-nec-superux${UNAME_RELEASE} - exit ;; - SX-8:SUPER-UX:*:*) - echo sx8-nec-superux${UNAME_RELEASE} - exit ;; - SX-8R:SUPER-UX:*:*) - echo sx8r-nec-superux${UNAME_RELEASE} - exit ;; - Power*:Rhapsody:*:*) - echo powerpc-apple-rhapsody${UNAME_RELEASE} - exit ;; - *:Rhapsody:*:*) - echo ${UNAME_MACHINE}-apple-rhapsody${UNAME_RELEASE} - exit ;; - *:Darwin:*:*) - UNAME_PROCESSOR=`uname -p` || UNAME_PROCESSOR=unknown - case $UNAME_PROCESSOR in - i386) - eval $set_cc_for_build - if [ "$CC_FOR_BUILD" != 'no_compiler_found' ]; then - if (echo '#ifdef __LP64__'; echo IS_64BIT_ARCH; echo '#endif') | \ - (CCOPTS= $CC_FOR_BUILD -E - 2>/dev/null) | \ - grep IS_64BIT_ARCH >/dev/null - then - UNAME_PROCESSOR="x86_64" - fi - fi ;; - unknown) UNAME_PROCESSOR=powerpc ;; - esac - echo ${UNAME_PROCESSOR}-apple-darwin${UNAME_RELEASE} - exit ;; - *:procnto*:*:* | *:QNX:[0123456789]*:*) - UNAME_PROCESSOR=`uname -p` - if test "$UNAME_PROCESSOR" = "x86"; then - UNAME_PROCESSOR=i386 - UNAME_MACHINE=pc - fi - echo ${UNAME_PROCESSOR}-${UNAME_MACHINE}-nto-qnx${UNAME_RELEASE} - exit ;; - *:QNX:*:4*) - echo i386-pc-qnx - exit ;; - NEO-?:NONSTOP_KERNEL:*:*) - echo neo-tandem-nsk${UNAME_RELEASE} - exit ;; - NSE-?:NONSTOP_KERNEL:*:*) - echo nse-tandem-nsk${UNAME_RELEASE} - exit ;; - NSR-?:NONSTOP_KERNEL:*:*) - echo nsr-tandem-nsk${UNAME_RELEASE} - exit ;; - *:NonStop-UX:*:*) - echo mips-compaq-nonstopux - exit ;; - BS2000:POSIX*:*:*) - echo bs2000-siemens-sysv - exit ;; - DS/*:UNIX_System_V:*:*) - echo ${UNAME_MACHINE}-${UNAME_SYSTEM}-${UNAME_RELEASE} - exit ;; - *:Plan9:*:*) - # "uname -m" is not consistent, so use $cputype instead. 386 - # is converted to i386 for consistency with other x86 - # operating systems. - if test "$cputype" = "386"; then - UNAME_MACHINE=i386 - else - UNAME_MACHINE="$cputype" - fi - echo ${UNAME_MACHINE}-unknown-plan9 - exit ;; - *:TOPS-10:*:*) - echo pdp10-unknown-tops10 - exit ;; - *:TENEX:*:*) - echo pdp10-unknown-tenex - exit ;; - KS10:TOPS-20:*:* | KL10:TOPS-20:*:* | TYPE4:TOPS-20:*:*) - echo pdp10-dec-tops20 - exit ;; - XKL-1:TOPS-20:*:* | TYPE5:TOPS-20:*:*) - echo pdp10-xkl-tops20 - exit ;; - *:TOPS-20:*:*) - echo pdp10-unknown-tops20 - exit ;; - *:ITS:*:*) - echo pdp10-unknown-its - exit ;; - SEI:*:*:SEIUX) - echo mips-sei-seiux${UNAME_RELEASE} - exit ;; - *:DragonFly:*:*) - echo ${UNAME_MACHINE}-unknown-dragonfly`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'` - exit ;; - *:*VMS:*:*) - UNAME_MACHINE=`(uname -p) 2>/dev/null` - case "${UNAME_MACHINE}" in - A*) echo alpha-dec-vms ; exit ;; - I*) echo ia64-dec-vms ; exit ;; - V*) echo vax-dec-vms ; exit ;; - esac ;; - *:XENIX:*:SysV) - echo i386-pc-xenix - exit ;; - i*86:skyos:*:*) - echo ${UNAME_MACHINE}-pc-skyos`echo ${UNAME_RELEASE}` | sed -e 's/ .*$//' - exit ;; - i*86:rdos:*:*) - echo ${UNAME_MACHINE}-pc-rdos - exit ;; - i*86:AROS:*:*) - echo ${UNAME_MACHINE}-pc-aros - exit ;; - x86_64:VMkernel:*:*) - echo ${UNAME_MACHINE}-unknown-esx - exit ;; -esac - -#echo '(No uname command or uname output not recognized.)' 1>&2 -#echo "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" 1>&2 - -eval $set_cc_for_build -cat >$dummy.c <<EOF -#ifdef _SEQUENT_ -# include <sys/types.h> -# include <sys/utsname.h> -#endif -main () -{ -#if defined (sony) -#if defined (MIPSEB) - /* BFD wants "bsd" instead of "newsos". Perhaps BFD should be changed, - I don't know.... */ - printf ("mips-sony-bsd\n"); exit (0); -#else -#include <sys/param.h> - printf ("m68k-sony-newsos%s\n", -#ifdef NEWSOS4 - "4" -#else - "" -#endif - ); exit (0); -#endif -#endif - -#if defined (__arm) && defined (__acorn) && defined (__unix) - printf ("arm-acorn-riscix\n"); exit (0); -#endif - -#if defined (hp300) && !defined (hpux) - printf ("m68k-hp-bsd\n"); exit (0); -#endif - -#if defined (NeXT) -#if !defined (__ARCHITECTURE__) -#define __ARCHITECTURE__ "m68k" -#endif - int version; - version=`(hostinfo | sed -n 's/.*NeXT Mach \([0-9]*\).*/\1/p') 2>/dev/null`; - if (version < 4) - printf ("%s-next-nextstep%d\n", __ARCHITECTURE__, version); - else - printf ("%s-next-openstep%d\n", __ARCHITECTURE__, version); - exit (0); -#endif - -#if defined (MULTIMAX) || defined (n16) -#if defined (UMAXV) - printf ("ns32k-encore-sysv\n"); exit (0); -#else -#if defined (CMU) - printf ("ns32k-encore-mach\n"); exit (0); -#else - printf ("ns32k-encore-bsd\n"); exit (0); -#endif -#endif -#endif - -#if defined (__386BSD__) - printf ("i386-pc-bsd\n"); exit (0); -#endif - -#if defined (sequent) -#if defined (i386) - printf ("i386-sequent-dynix\n"); exit (0); -#endif -#if defined (ns32000) - printf ("ns32k-sequent-dynix\n"); exit (0); -#endif -#endif - -#if defined (_SEQUENT_) - struct utsname un; - - uname(&un); - - if (strncmp(un.version, "V2", 2) == 0) { - printf ("i386-sequent-ptx2\n"); exit (0); - } - if (strncmp(un.version, "V1", 2) == 0) { /* XXX is V1 correct? */ - printf ("i386-sequent-ptx1\n"); exit (0); - } - printf ("i386-sequent-ptx\n"); exit (0); - -#endif - -#if defined (vax) -# if !defined (ultrix) -# include <sys/param.h> -# if defined (BSD) -# if BSD == 43 - printf ("vax-dec-bsd4.3\n"); exit (0); -# else -# if BSD == 199006 - printf ("vax-dec-bsd4.3reno\n"); exit (0); -# else - printf ("vax-dec-bsd\n"); exit (0); -# endif -# endif -# else - printf ("vax-dec-bsd\n"); exit (0); -# endif -# else - printf ("vax-dec-ultrix\n"); exit (0); -# endif -#endif - -#if defined (alliant) && defined (i860) - printf ("i860-alliant-bsd\n"); exit (0); -#endif - - exit (1); -} -EOF - -$CC_FOR_BUILD -o $dummy $dummy.c 2>/dev/null && SYSTEM_NAME=`$dummy` && - { echo "$SYSTEM_NAME"; exit; } - -# Apollos put the system type in the environment. - -test -d /usr/apollo && { echo ${ISP}-apollo-${SYSTYPE}; exit; } - -# Convex versions that predate uname can use getsysinfo(1) - -if [ -x /usr/convex/getsysinfo ] -then - case `getsysinfo -f cpu_type` in - c1*) - echo c1-convex-bsd - exit ;; - c2*) - if getsysinfo -f scalar_acc - then echo c32-convex-bsd - else echo c2-convex-bsd - fi - exit ;; - c34*) - echo c34-convex-bsd - exit ;; - c38*) - echo c38-convex-bsd - exit ;; - c4*) - echo c4-convex-bsd - exit ;; - esac -fi - -cat >&2 <<EOF -$0: unable to guess system type - -This script, last modified $timestamp, has failed to recognize -the operating system you are using. It is advised that you -download the most up to date version of the config scripts from - - http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.guess;hb=HEAD -and - http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.sub;hb=HEAD - -If the version you run ($0) is already up to date, please -send the following data and any information you think might be -pertinent to <config-patches@gnu.org> in order to provide the needed -information to handle your system. - -config.guess timestamp = $timestamp - -uname -m = `(uname -m) 2>/dev/null || echo unknown` -uname -r = `(uname -r) 2>/dev/null || echo unknown` -uname -s = `(uname -s) 2>/dev/null || echo unknown` -uname -v = `(uname -v) 2>/dev/null || echo unknown` - -/usr/bin/uname -p = `(/usr/bin/uname -p) 2>/dev/null` -/bin/uname -X = `(/bin/uname -X) 2>/dev/null` - -hostinfo = `(hostinfo) 2>/dev/null` -/bin/universe = `(/bin/universe) 2>/dev/null` -/usr/bin/arch -k = `(/usr/bin/arch -k) 2>/dev/null` -/bin/arch = `(/bin/arch) 2>/dev/null` -/usr/bin/oslevel = `(/usr/bin/oslevel) 2>/dev/null` -/usr/convex/getsysinfo = `(/usr/convex/getsysinfo) 2>/dev/null` - -UNAME_MACHINE = ${UNAME_MACHINE} -UNAME_RELEASE = ${UNAME_RELEASE} -UNAME_SYSTEM = ${UNAME_SYSTEM} -UNAME_VERSION = ${UNAME_VERSION} -EOF - -exit 1 - -# Local variables: -# eval: (add-hook 'write-file-hooks 'time-stamp) -# time-stamp-start: "timestamp='" -# time-stamp-format: "%:y-%02m-%02d" -# time-stamp-end: "'" -# End: diff --git a/extra/jemalloc/config.stamp.in b/extra/jemalloc/config.stamp.in deleted file mode 100644 index e69de29bb2d..00000000000 --- a/extra/jemalloc/config.stamp.in +++ /dev/null diff --git a/extra/jemalloc/config.sub b/extra/jemalloc/config.sub deleted file mode 100755 index c894da45500..00000000000 --- a/extra/jemalloc/config.sub +++ /dev/null @@ -1,1773 +0,0 @@ -#! /bin/sh -# Configuration validation subroutine script. -# Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, -# 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, -# 2011, 2012 Free Software Foundation, Inc. - -timestamp='2012-02-10' - -# This file is (in principle) common to ALL GNU software. -# The presence of a machine in this file suggests that SOME GNU software -# can handle that machine. It does not imply ALL GNU software can. -# -# This file is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; either version 2 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, see <http://www.gnu.org/licenses/>. -# -# As a special exception to the GNU General Public License, if you -# distribute this file as part of a program that contains a -# configuration script generated by Autoconf, you may include it under -# the same distribution terms that you use for the rest of that program. - - -# Please send patches to <config-patches@gnu.org>. Submit a context -# diff and a properly formatted GNU ChangeLog entry. -# -# Configuration subroutine to validate and canonicalize a configuration type. -# Supply the specified configuration type as an argument. -# If it is invalid, we print an error message on stderr and exit with code 1. -# Otherwise, we print the canonical config type on stdout and succeed. - -# You can get the latest version of this script from: -# http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.sub;hb=HEAD - -# This file is supposed to be the same for all GNU packages -# and recognize all the CPU types, system types and aliases -# that are meaningful with *any* GNU software. -# Each package is responsible for reporting which valid configurations -# it does not support. The user should be able to distinguish -# a failure to support a valid configuration from a meaningless -# configuration. - -# The goal of this file is to map all the various variations of a given -# machine specification into a single specification in the form: -# CPU_TYPE-MANUFACTURER-OPERATING_SYSTEM -# or in some cases, the newer four-part form: -# CPU_TYPE-MANUFACTURER-KERNEL-OPERATING_SYSTEM -# It is wrong to echo any other type of specification. - -me=`echo "$0" | sed -e 's,.*/,,'` - -usage="\ -Usage: $0 [OPTION] CPU-MFR-OPSYS - $0 [OPTION] ALIAS - -Canonicalize a configuration name. - -Operation modes: - -h, --help print this help, then exit - -t, --time-stamp print date of last modification, then exit - -v, --version print version number, then exit - -Report bugs and patches to <config-patches@gnu.org>." - -version="\ -GNU config.sub ($timestamp) - -Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, -2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012 -Free Software Foundation, Inc. - -This is free software; see the source for copying conditions. There is NO -warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE." - -help=" -Try \`$me --help' for more information." - -# Parse command line -while test $# -gt 0 ; do - case $1 in - --time-stamp | --time* | -t ) - echo "$timestamp" ; exit ;; - --version | -v ) - echo "$version" ; exit ;; - --help | --h* | -h ) - echo "$usage"; exit ;; - -- ) # Stop option processing - shift; break ;; - - ) # Use stdin as input. - break ;; - -* ) - echo "$me: invalid option $1$help" - exit 1 ;; - - *local*) - # First pass through any local machine types. - echo $1 - exit ;; - - * ) - break ;; - esac -done - -case $# in - 0) echo "$me: missing argument$help" >&2 - exit 1;; - 1) ;; - *) echo "$me: too many arguments$help" >&2 - exit 1;; -esac - -# Separate what the user gave into CPU-COMPANY and OS or KERNEL-OS (if any). -# Here we must recognize all the valid KERNEL-OS combinations. -maybe_os=`echo $1 | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\2/'` -case $maybe_os in - nto-qnx* | linux-gnu* | linux-android* | linux-dietlibc | linux-newlib* | \ - linux-uclibc* | uclinux-uclibc* | uclinux-gnu* | kfreebsd*-gnu* | \ - knetbsd*-gnu* | netbsd*-gnu* | \ - kopensolaris*-gnu* | \ - storm-chaos* | os2-emx* | rtmk-nova*) - os=-$maybe_os - basic_machine=`echo $1 | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\1/'` - ;; - android-linux) - os=-linux-android - basic_machine=`echo $1 | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\1/'`-unknown - ;; - *) - basic_machine=`echo $1 | sed 's/-[^-]*$//'` - if [ $basic_machine != $1 ] - then os=`echo $1 | sed 's/.*-/-/'` - else os=; fi - ;; -esac - -### Let's recognize common machines as not being operating systems so -### that things like config.sub decstation-3100 work. We also -### recognize some manufacturers as not being operating systems, so we -### can provide default operating systems below. -case $os in - -sun*os*) - # Prevent following clause from handling this invalid input. - ;; - -dec* | -mips* | -sequent* | -encore* | -pc532* | -sgi* | -sony* | \ - -att* | -7300* | -3300* | -delta* | -motorola* | -sun[234]* | \ - -unicom* | -ibm* | -next | -hp | -isi* | -apollo | -altos* | \ - -convergent* | -ncr* | -news | -32* | -3600* | -3100* | -hitachi* |\ - -c[123]* | -convex* | -sun | -crds | -omron* | -dg | -ultra | -tti* | \ - -harris | -dolphin | -highlevel | -gould | -cbm | -ns | -masscomp | \ - -apple | -axis | -knuth | -cray | -microblaze) - os= - basic_machine=$1 - ;; - -bluegene*) - os=-cnk - ;; - -sim | -cisco | -oki | -wec | -winbond) - os= - basic_machine=$1 - ;; - -scout) - ;; - -wrs) - os=-vxworks - basic_machine=$1 - ;; - -chorusos*) - os=-chorusos - basic_machine=$1 - ;; - -chorusrdb) - os=-chorusrdb - basic_machine=$1 - ;; - -hiux*) - os=-hiuxwe2 - ;; - -sco6) - os=-sco5v6 - basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` - ;; - -sco5) - os=-sco3.2v5 - basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` - ;; - -sco4) - os=-sco3.2v4 - basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` - ;; - -sco3.2.[4-9]*) - os=`echo $os | sed -e 's/sco3.2./sco3.2v/'` - basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` - ;; - -sco3.2v[4-9]*) - # Don't forget version if it is 3.2v4 or newer. - basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` - ;; - -sco5v6*) - # Don't forget version if it is 3.2v4 or newer. - basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` - ;; - -sco*) - os=-sco3.2v2 - basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` - ;; - -udk*) - basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` - ;; - -isc) - os=-isc2.2 - basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` - ;; - -clix*) - basic_machine=clipper-intergraph - ;; - -isc*) - basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` - ;; - -lynx*) - os=-lynxos - ;; - -ptx*) - basic_machine=`echo $1 | sed -e 's/86-.*/86-sequent/'` - ;; - -windowsnt*) - os=`echo $os | sed -e 's/windowsnt/winnt/'` - ;; - -psos*) - os=-psos - ;; - -mint | -mint[0-9]*) - basic_machine=m68k-atari - os=-mint - ;; -esac - -# Decode aliases for certain CPU-COMPANY combinations. -case $basic_machine in - # Recognize the basic CPU types without company name. - # Some are omitted here because they have special meanings below. - 1750a | 580 \ - | a29k \ - | aarch64 | aarch64_be \ - | alpha | alphaev[4-8] | alphaev56 | alphaev6[78] | alphapca5[67] \ - | alpha64 | alpha64ev[4-8] | alpha64ev56 | alpha64ev6[78] | alpha64pca5[67] \ - | am33_2.0 \ - | arc | arm | arm[bl]e | arme[lb] | armv[2345] | armv[345][lb] | avr | avr32 \ - | be32 | be64 \ - | bfin \ - | c4x | clipper \ - | d10v | d30v | dlx | dsp16xx \ - | epiphany \ - | fido | fr30 | frv \ - | h8300 | h8500 | hppa | hppa1.[01] | hppa2.0 | hppa2.0[nw] | hppa64 \ - | hexagon \ - | i370 | i860 | i960 | ia64 \ - | ip2k | iq2000 \ - | le32 | le64 \ - | lm32 \ - | m32c | m32r | m32rle | m68000 | m68k | m88k \ - | maxq | mb | microblaze | mcore | mep | metag \ - | mips | mipsbe | mipseb | mipsel | mipsle \ - | mips16 \ - | mips64 | mips64el \ - | mips64octeon | mips64octeonel \ - | mips64orion | mips64orionel \ - | mips64r5900 | mips64r5900el \ - | mips64vr | mips64vrel \ - | mips64vr4100 | mips64vr4100el \ - | mips64vr4300 | mips64vr4300el \ - | mips64vr5000 | mips64vr5000el \ - | mips64vr5900 | mips64vr5900el \ - | mipsisa32 | mipsisa32el \ - | mipsisa32r2 | mipsisa32r2el \ - | mipsisa64 | mipsisa64el \ - | mipsisa64r2 | mipsisa64r2el \ - | mipsisa64sb1 | mipsisa64sb1el \ - | mipsisa64sr71k | mipsisa64sr71kel \ - | mipstx39 | mipstx39el \ - | mn10200 | mn10300 \ - | moxie \ - | mt \ - | msp430 \ - | nds32 | nds32le | nds32be \ - | nios | nios2 \ - | ns16k | ns32k \ - | open8 \ - | or32 \ - | pdp10 | pdp11 | pj | pjl \ - | powerpc | powerpc64 | powerpc64le | powerpcle \ - | pyramid \ - | rl78 | rx \ - | score \ - | sh | sh[1234] | sh[24]a | sh[24]aeb | sh[23]e | sh[34]eb | sheb | shbe | shle | sh[1234]le | sh3ele \ - | sh64 | sh64le \ - | sparc | sparc64 | sparc64b | sparc64v | sparc86x | sparclet | sparclite \ - | sparcv8 | sparcv9 | sparcv9b | sparcv9v \ - | spu \ - | tahoe | tic4x | tic54x | tic55x | tic6x | tic80 | tron \ - | ubicom32 \ - | v850 | v850e | v850e1 | v850e2 | v850es | v850e2v3 \ - | we32k \ - | x86 | xc16x | xstormy16 | xtensa \ - | z8k | z80) - basic_machine=$basic_machine-unknown - ;; - c54x) - basic_machine=tic54x-unknown - ;; - c55x) - basic_machine=tic55x-unknown - ;; - c6x) - basic_machine=tic6x-unknown - ;; - m6811 | m68hc11 | m6812 | m68hc12 | m68hcs12x | picochip) - basic_machine=$basic_machine-unknown - os=-none - ;; - m88110 | m680[12346]0 | m683?2 | m68360 | m5200 | v70 | w65 | z8k) - ;; - ms1) - basic_machine=mt-unknown - ;; - - strongarm | thumb | xscale) - basic_machine=arm-unknown - ;; - xgate) - basic_machine=$basic_machine-unknown - os=-none - ;; - xscaleeb) - basic_machine=armeb-unknown - ;; - - xscaleel) - basic_machine=armel-unknown - ;; - - # We use `pc' rather than `unknown' - # because (1) that's what they normally are, and - # (2) the word "unknown" tends to confuse beginning users. - i*86 | x86_64) - basic_machine=$basic_machine-pc - ;; - # Object if more than one company name word. - *-*-*) - echo Invalid configuration \`$1\': machine \`$basic_machine\' not recognized 1>&2 - exit 1 - ;; - # Recognize the basic CPU types with company name. - 580-* \ - | a29k-* \ - | aarch64-* | aarch64_be-* \ - | alpha-* | alphaev[4-8]-* | alphaev56-* | alphaev6[78]-* \ - | alpha64-* | alpha64ev[4-8]-* | alpha64ev56-* | alpha64ev6[78]-* \ - | alphapca5[67]-* | alpha64pca5[67]-* | arc-* \ - | arm-* | armbe-* | armle-* | armeb-* | armv*-* \ - | avr-* | avr32-* \ - | be32-* | be64-* \ - | bfin-* | bs2000-* \ - | c[123]* | c30-* | [cjt]90-* | c4x-* \ - | clipper-* | craynv-* | cydra-* \ - | d10v-* | d30v-* | dlx-* \ - | elxsi-* \ - | f30[01]-* | f700-* | fido-* | fr30-* | frv-* | fx80-* \ - | h8300-* | h8500-* \ - | hppa-* | hppa1.[01]-* | hppa2.0-* | hppa2.0[nw]-* | hppa64-* \ - | hexagon-* \ - | i*86-* | i860-* | i960-* | ia64-* \ - | ip2k-* | iq2000-* \ - | le32-* | le64-* \ - | lm32-* \ - | m32c-* | m32r-* | m32rle-* \ - | m68000-* | m680[012346]0-* | m68360-* | m683?2-* | m68k-* \ - | m88110-* | m88k-* | maxq-* | mcore-* | metag-* | microblaze-* \ - | mips-* | mipsbe-* | mipseb-* | mipsel-* | mipsle-* \ - | mips16-* \ - | mips64-* | mips64el-* \ - | mips64octeon-* | mips64octeonel-* \ - | mips64orion-* | mips64orionel-* \ - | mips64r5900-* | mips64r5900el-* \ - | mips64vr-* | mips64vrel-* \ - | mips64vr4100-* | mips64vr4100el-* \ - | mips64vr4300-* | mips64vr4300el-* \ - | mips64vr5000-* | mips64vr5000el-* \ - | mips64vr5900-* | mips64vr5900el-* \ - | mipsisa32-* | mipsisa32el-* \ - | mipsisa32r2-* | mipsisa32r2el-* \ - | mipsisa64-* | mipsisa64el-* \ - | mipsisa64r2-* | mipsisa64r2el-* \ - | mipsisa64sb1-* | mipsisa64sb1el-* \ - | mipsisa64sr71k-* | mipsisa64sr71kel-* \ - | mipstx39-* | mipstx39el-* \ - | mmix-* \ - | mt-* \ - | msp430-* \ - | nds32-* | nds32le-* | nds32be-* \ - | nios-* | nios2-* \ - | none-* | np1-* | ns16k-* | ns32k-* \ - | open8-* \ - | orion-* \ - | pdp10-* | pdp11-* | pj-* | pjl-* | pn-* | power-* \ - | powerpc-* | powerpc64-* | powerpc64le-* | powerpcle-* \ - | pyramid-* \ - | rl78-* | romp-* | rs6000-* | rx-* \ - | sh-* | sh[1234]-* | sh[24]a-* | sh[24]aeb-* | sh[23]e-* | sh[34]eb-* | sheb-* | shbe-* \ - | shle-* | sh[1234]le-* | sh3ele-* | sh64-* | sh64le-* \ - | sparc-* | sparc64-* | sparc64b-* | sparc64v-* | sparc86x-* | sparclet-* \ - | sparclite-* \ - | sparcv8-* | sparcv9-* | sparcv9b-* | sparcv9v-* | sv1-* | sx?-* \ - | tahoe-* \ - | tic30-* | tic4x-* | tic54x-* | tic55x-* | tic6x-* | tic80-* \ - | tile*-* \ - | tron-* \ - | ubicom32-* \ - | v850-* | v850e-* | v850e1-* | v850es-* | v850e2-* | v850e2v3-* \ - | vax-* \ - | we32k-* \ - | x86-* | x86_64-* | xc16x-* | xps100-* \ - | xstormy16-* | xtensa*-* \ - | ymp-* \ - | z8k-* | z80-*) - ;; - # Recognize the basic CPU types without company name, with glob match. - xtensa*) - basic_machine=$basic_machine-unknown - ;; - # Recognize the various machine names and aliases which stand - # for a CPU type and a company and sometimes even an OS. - 386bsd) - basic_machine=i386-unknown - os=-bsd - ;; - 3b1 | 7300 | 7300-att | att-7300 | pc7300 | safari | unixpc) - basic_machine=m68000-att - ;; - 3b*) - basic_machine=we32k-att - ;; - a29khif) - basic_machine=a29k-amd - os=-udi - ;; - abacus) - basic_machine=abacus-unknown - ;; - adobe68k) - basic_machine=m68010-adobe - os=-scout - ;; - alliant | fx80) - basic_machine=fx80-alliant - ;; - altos | altos3068) - basic_machine=m68k-altos - ;; - am29k) - basic_machine=a29k-none - os=-bsd - ;; - amd64) - basic_machine=x86_64-pc - ;; - amd64-*) - basic_machine=x86_64-`echo $basic_machine | sed 's/^[^-]*-//'` - ;; - amdahl) - basic_machine=580-amdahl - os=-sysv - ;; - amiga | amiga-*) - basic_machine=m68k-unknown - ;; - amigaos | amigados) - basic_machine=m68k-unknown - os=-amigaos - ;; - amigaunix | amix) - basic_machine=m68k-unknown - os=-sysv4 - ;; - apollo68) - basic_machine=m68k-apollo - os=-sysv - ;; - apollo68bsd) - basic_machine=m68k-apollo - os=-bsd - ;; - aros) - basic_machine=i386-pc - os=-aros - ;; - aux) - basic_machine=m68k-apple - os=-aux - ;; - balance) - basic_machine=ns32k-sequent - os=-dynix - ;; - blackfin) - basic_machine=bfin-unknown - os=-linux - ;; - blackfin-*) - basic_machine=bfin-`echo $basic_machine | sed 's/^[^-]*-//'` - os=-linux - ;; - bluegene*) - basic_machine=powerpc-ibm - os=-cnk - ;; - c54x-*) - basic_machine=tic54x-`echo $basic_machine | sed 's/^[^-]*-//'` - ;; - c55x-*) - basic_machine=tic55x-`echo $basic_machine | sed 's/^[^-]*-//'` - ;; - c6x-*) - basic_machine=tic6x-`echo $basic_machine | sed 's/^[^-]*-//'` - ;; - c90) - basic_machine=c90-cray - os=-unicos - ;; - cegcc) - basic_machine=arm-unknown - os=-cegcc - ;; - convex-c1) - basic_machine=c1-convex - os=-bsd - ;; - convex-c2) - basic_machine=c2-convex - os=-bsd - ;; - convex-c32) - basic_machine=c32-convex - os=-bsd - ;; - convex-c34) - basic_machine=c34-convex - os=-bsd - ;; - convex-c38) - basic_machine=c38-convex - os=-bsd - ;; - cray | j90) - basic_machine=j90-cray - os=-unicos - ;; - craynv) - basic_machine=craynv-cray - os=-unicosmp - ;; - cr16 | cr16-*) - basic_machine=cr16-unknown - os=-elf - ;; - crds | unos) - basic_machine=m68k-crds - ;; - crisv32 | crisv32-* | etraxfs*) - basic_machine=crisv32-axis - ;; - cris | cris-* | etrax*) - basic_machine=cris-axis - ;; - crx) - basic_machine=crx-unknown - os=-elf - ;; - da30 | da30-*) - basic_machine=m68k-da30 - ;; - decstation | decstation-3100 | pmax | pmax-* | pmin | dec3100 | decstatn) - basic_machine=mips-dec - ;; - decsystem10* | dec10*) - basic_machine=pdp10-dec - os=-tops10 - ;; - decsystem20* | dec20*) - basic_machine=pdp10-dec - os=-tops20 - ;; - delta | 3300 | motorola-3300 | motorola-delta \ - | 3300-motorola | delta-motorola) - basic_machine=m68k-motorola - ;; - delta88) - basic_machine=m88k-motorola - os=-sysv3 - ;; - dicos) - basic_machine=i686-pc - os=-dicos - ;; - djgpp) - basic_machine=i586-pc - os=-msdosdjgpp - ;; - dpx20 | dpx20-*) - basic_machine=rs6000-bull - os=-bosx - ;; - dpx2* | dpx2*-bull) - basic_machine=m68k-bull - os=-sysv3 - ;; - ebmon29k) - basic_machine=a29k-amd - os=-ebmon - ;; - elxsi) - basic_machine=elxsi-elxsi - os=-bsd - ;; - encore | umax | mmax) - basic_machine=ns32k-encore - ;; - es1800 | OSE68k | ose68k | ose | OSE) - basic_machine=m68k-ericsson - os=-ose - ;; - fx2800) - basic_machine=i860-alliant - ;; - genix) - basic_machine=ns32k-ns - ;; - gmicro) - basic_machine=tron-gmicro - os=-sysv - ;; - go32) - basic_machine=i386-pc - os=-go32 - ;; - h3050r* | hiux*) - basic_machine=hppa1.1-hitachi - os=-hiuxwe2 - ;; - h8300hms) - basic_machine=h8300-hitachi - os=-hms - ;; - h8300xray) - basic_machine=h8300-hitachi - os=-xray - ;; - h8500hms) - basic_machine=h8500-hitachi - os=-hms - ;; - harris) - basic_machine=m88k-harris - os=-sysv3 - ;; - hp300-*) - basic_machine=m68k-hp - ;; - hp300bsd) - basic_machine=m68k-hp - os=-bsd - ;; - hp300hpux) - basic_machine=m68k-hp - os=-hpux - ;; - hp3k9[0-9][0-9] | hp9[0-9][0-9]) - basic_machine=hppa1.0-hp - ;; - hp9k2[0-9][0-9] | hp9k31[0-9]) - basic_machine=m68000-hp - ;; - hp9k3[2-9][0-9]) - basic_machine=m68k-hp - ;; - hp9k6[0-9][0-9] | hp6[0-9][0-9]) - basic_machine=hppa1.0-hp - ;; - hp9k7[0-79][0-9] | hp7[0-79][0-9]) - basic_machine=hppa1.1-hp - ;; - hp9k78[0-9] | hp78[0-9]) - # FIXME: really hppa2.0-hp - basic_machine=hppa1.1-hp - ;; - hp9k8[67]1 | hp8[67]1 | hp9k80[24] | hp80[24] | hp9k8[78]9 | hp8[78]9 | hp9k893 | hp893) - # FIXME: really hppa2.0-hp - basic_machine=hppa1.1-hp - ;; - hp9k8[0-9][13679] | hp8[0-9][13679]) - basic_machine=hppa1.1-hp - ;; - hp9k8[0-9][0-9] | hp8[0-9][0-9]) - basic_machine=hppa1.0-hp - ;; - hppa-next) - os=-nextstep3 - ;; - hppaosf) - basic_machine=hppa1.1-hp - os=-osf - ;; - hppro) - basic_machine=hppa1.1-hp - os=-proelf - ;; - i370-ibm* | ibm*) - basic_machine=i370-ibm - ;; - i*86v32) - basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'` - os=-sysv32 - ;; - i*86v4*) - basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'` - os=-sysv4 - ;; - i*86v) - basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'` - os=-sysv - ;; - i*86sol2) - basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'` - os=-solaris2 - ;; - i386mach) - basic_machine=i386-mach - os=-mach - ;; - i386-vsta | vsta) - basic_machine=i386-unknown - os=-vsta - ;; - iris | iris4d) - basic_machine=mips-sgi - case $os in - -irix*) - ;; - *) - os=-irix4 - ;; - esac - ;; - isi68 | isi) - basic_machine=m68k-isi - os=-sysv - ;; - m68knommu) - basic_machine=m68k-unknown - os=-linux - ;; - m68knommu-*) - basic_machine=m68k-`echo $basic_machine | sed 's/^[^-]*-//'` - os=-linux - ;; - m88k-omron*) - basic_machine=m88k-omron - ;; - magnum | m3230) - basic_machine=mips-mips - os=-sysv - ;; - merlin) - basic_machine=ns32k-utek - os=-sysv - ;; - microblaze) - basic_machine=microblaze-xilinx - ;; - mingw32) - basic_machine=i386-pc - os=-mingw32 - ;; - mingw32ce) - basic_machine=arm-unknown - os=-mingw32ce - ;; - miniframe) - basic_machine=m68000-convergent - ;; - *mint | -mint[0-9]* | *MiNT | *MiNT[0-9]*) - basic_machine=m68k-atari - os=-mint - ;; - mips3*-*) - basic_machine=`echo $basic_machine | sed -e 's/mips3/mips64/'` - ;; - mips3*) - basic_machine=`echo $basic_machine | sed -e 's/mips3/mips64/'`-unknown - ;; - monitor) - basic_machine=m68k-rom68k - os=-coff - ;; - morphos) - basic_machine=powerpc-unknown - os=-morphos - ;; - msdos) - basic_machine=i386-pc - os=-msdos - ;; - ms1-*) - basic_machine=`echo $basic_machine | sed -e 's/ms1-/mt-/'` - ;; - msys) - basic_machine=i386-pc - os=-msys - ;; - mvs) - basic_machine=i370-ibm - os=-mvs - ;; - nacl) - basic_machine=le32-unknown - os=-nacl - ;; - ncr3000) - basic_machine=i486-ncr - os=-sysv4 - ;; - netbsd386) - basic_machine=i386-unknown - os=-netbsd - ;; - netwinder) - basic_machine=armv4l-rebel - os=-linux - ;; - news | news700 | news800 | news900) - basic_machine=m68k-sony - os=-newsos - ;; - news1000) - basic_machine=m68030-sony - os=-newsos - ;; - news-3600 | risc-news) - basic_machine=mips-sony - os=-newsos - ;; - necv70) - basic_machine=v70-nec - os=-sysv - ;; - next | m*-next ) - basic_machine=m68k-next - case $os in - -nextstep* ) - ;; - -ns2*) - os=-nextstep2 - ;; - *) - os=-nextstep3 - ;; - esac - ;; - nh3000) - basic_machine=m68k-harris - os=-cxux - ;; - nh[45]000) - basic_machine=m88k-harris - os=-cxux - ;; - nindy960) - basic_machine=i960-intel - os=-nindy - ;; - mon960) - basic_machine=i960-intel - os=-mon960 - ;; - nonstopux) - basic_machine=mips-compaq - os=-nonstopux - ;; - np1) - basic_machine=np1-gould - ;; - neo-tandem) - basic_machine=neo-tandem - ;; - nse-tandem) - basic_machine=nse-tandem - ;; - nsr-tandem) - basic_machine=nsr-tandem - ;; - op50n-* | op60c-*) - basic_machine=hppa1.1-oki - os=-proelf - ;; - openrisc | openrisc-*) - basic_machine=or32-unknown - ;; - os400) - basic_machine=powerpc-ibm - os=-os400 - ;; - OSE68000 | ose68000) - basic_machine=m68000-ericsson - os=-ose - ;; - os68k) - basic_machine=m68k-none - os=-os68k - ;; - pa-hitachi) - basic_machine=hppa1.1-hitachi - os=-hiuxwe2 - ;; - paragon) - basic_machine=i860-intel - os=-osf - ;; - parisc) - basic_machine=hppa-unknown - os=-linux - ;; - parisc-*) - basic_machine=hppa-`echo $basic_machine | sed 's/^[^-]*-//'` - os=-linux - ;; - pbd) - basic_machine=sparc-tti - ;; - pbb) - basic_machine=m68k-tti - ;; - pc532 | pc532-*) - basic_machine=ns32k-pc532 - ;; - pc98) - basic_machine=i386-pc - ;; - pc98-*) - basic_machine=i386-`echo $basic_machine | sed 's/^[^-]*-//'` - ;; - pentium | p5 | k5 | k6 | nexgen | viac3) - basic_machine=i586-pc - ;; - pentiumpro | p6 | 6x86 | athlon | athlon_*) - basic_machine=i686-pc - ;; - pentiumii | pentium2 | pentiumiii | pentium3) - basic_machine=i686-pc - ;; - pentium4) - basic_machine=i786-pc - ;; - pentium-* | p5-* | k5-* | k6-* | nexgen-* | viac3-*) - basic_machine=i586-`echo $basic_machine | sed 's/^[^-]*-//'` - ;; - pentiumpro-* | p6-* | 6x86-* | athlon-*) - basic_machine=i686-`echo $basic_machine | sed 's/^[^-]*-//'` - ;; - pentiumii-* | pentium2-* | pentiumiii-* | pentium3-*) - basic_machine=i686-`echo $basic_machine | sed 's/^[^-]*-//'` - ;; - pentium4-*) - basic_machine=i786-`echo $basic_machine | sed 's/^[^-]*-//'` - ;; - pn) - basic_machine=pn-gould - ;; - power) basic_machine=power-ibm - ;; - ppc | ppcbe) basic_machine=powerpc-unknown - ;; - ppc-* | ppcbe-*) - basic_machine=powerpc-`echo $basic_machine | sed 's/^[^-]*-//'` - ;; - ppcle | powerpclittle | ppc-le | powerpc-little) - basic_machine=powerpcle-unknown - ;; - ppcle-* | powerpclittle-*) - basic_machine=powerpcle-`echo $basic_machine | sed 's/^[^-]*-//'` - ;; - ppc64) basic_machine=powerpc64-unknown - ;; - ppc64-*) basic_machine=powerpc64-`echo $basic_machine | sed 's/^[^-]*-//'` - ;; - ppc64le | powerpc64little | ppc64-le | powerpc64-little) - basic_machine=powerpc64le-unknown - ;; - ppc64le-* | powerpc64little-*) - basic_machine=powerpc64le-`echo $basic_machine | sed 's/^[^-]*-//'` - ;; - ps2) - basic_machine=i386-ibm - ;; - pw32) - basic_machine=i586-unknown - os=-pw32 - ;; - rdos) - basic_machine=i386-pc - os=-rdos - ;; - rom68k) - basic_machine=m68k-rom68k - os=-coff - ;; - rm[46]00) - basic_machine=mips-siemens - ;; - rtpc | rtpc-*) - basic_machine=romp-ibm - ;; - s390 | s390-*) - basic_machine=s390-ibm - ;; - s390x | s390x-*) - basic_machine=s390x-ibm - ;; - sa29200) - basic_machine=a29k-amd - os=-udi - ;; - sb1) - basic_machine=mipsisa64sb1-unknown - ;; - sb1el) - basic_machine=mipsisa64sb1el-unknown - ;; - sde) - basic_machine=mipsisa32-sde - os=-elf - ;; - sei) - basic_machine=mips-sei - os=-seiux - ;; - sequent) - basic_machine=i386-sequent - ;; - sh) - basic_machine=sh-hitachi - os=-hms - ;; - sh5el) - basic_machine=sh5le-unknown - ;; - sh64) - basic_machine=sh64-unknown - ;; - sparclite-wrs | simso-wrs) - basic_machine=sparclite-wrs - os=-vxworks - ;; - sps7) - basic_machine=m68k-bull - os=-sysv2 - ;; - spur) - basic_machine=spur-unknown - ;; - st2000) - basic_machine=m68k-tandem - ;; - stratus) - basic_machine=i860-stratus - os=-sysv4 - ;; - strongarm-* | thumb-*) - basic_machine=arm-`echo $basic_machine | sed 's/^[^-]*-//'` - ;; - sun2) - basic_machine=m68000-sun - ;; - sun2os3) - basic_machine=m68000-sun - os=-sunos3 - ;; - sun2os4) - basic_machine=m68000-sun - os=-sunos4 - ;; - sun3os3) - basic_machine=m68k-sun - os=-sunos3 - ;; - sun3os4) - basic_machine=m68k-sun - os=-sunos4 - ;; - sun4os3) - basic_machine=sparc-sun - os=-sunos3 - ;; - sun4os4) - basic_machine=sparc-sun - os=-sunos4 - ;; - sun4sol2) - basic_machine=sparc-sun - os=-solaris2 - ;; - sun3 | sun3-*) - basic_machine=m68k-sun - ;; - sun4) - basic_machine=sparc-sun - ;; - sun386 | sun386i | roadrunner) - basic_machine=i386-sun - ;; - sv1) - basic_machine=sv1-cray - os=-unicos - ;; - symmetry) - basic_machine=i386-sequent - os=-dynix - ;; - t3e) - basic_machine=alphaev5-cray - os=-unicos - ;; - t90) - basic_machine=t90-cray - os=-unicos - ;; - tile*) - basic_machine=$basic_machine-unknown - os=-linux-gnu - ;; - tx39) - basic_machine=mipstx39-unknown - ;; - tx39el) - basic_machine=mipstx39el-unknown - ;; - toad1) - basic_machine=pdp10-xkl - os=-tops20 - ;; - tower | tower-32) - basic_machine=m68k-ncr - ;; - tpf) - basic_machine=s390x-ibm - os=-tpf - ;; - udi29k) - basic_machine=a29k-amd - os=-udi - ;; - ultra3) - basic_machine=a29k-nyu - os=-sym1 - ;; - v810 | necv810) - basic_machine=v810-nec - os=-none - ;; - vaxv) - basic_machine=vax-dec - os=-sysv - ;; - vms) - basic_machine=vax-dec - os=-vms - ;; - vpp*|vx|vx-*) - basic_machine=f301-fujitsu - ;; - vxworks960) - basic_machine=i960-wrs - os=-vxworks - ;; - vxworks68) - basic_machine=m68k-wrs - os=-vxworks - ;; - vxworks29k) - basic_machine=a29k-wrs - os=-vxworks - ;; - w65*) - basic_machine=w65-wdc - os=-none - ;; - w89k-*) - basic_machine=hppa1.1-winbond - os=-proelf - ;; - xbox) - basic_machine=i686-pc - os=-mingw32 - ;; - xps | xps100) - basic_machine=xps100-honeywell - ;; - xscale-* | xscalee[bl]-*) - basic_machine=`echo $basic_machine | sed 's/^xscale/arm/'` - ;; - ymp) - basic_machine=ymp-cray - os=-unicos - ;; - z8k-*-coff) - basic_machine=z8k-unknown - os=-sim - ;; - z80-*-coff) - basic_machine=z80-unknown - os=-sim - ;; - none) - basic_machine=none-none - os=-none - ;; - -# Here we handle the default manufacturer of certain CPU types. It is in -# some cases the only manufacturer, in others, it is the most popular. - w89k) - basic_machine=hppa1.1-winbond - ;; - op50n) - basic_machine=hppa1.1-oki - ;; - op60c) - basic_machine=hppa1.1-oki - ;; - romp) - basic_machine=romp-ibm - ;; - mmix) - basic_machine=mmix-knuth - ;; - rs6000) - basic_machine=rs6000-ibm - ;; - vax) - basic_machine=vax-dec - ;; - pdp10) - # there are many clones, so DEC is not a safe bet - basic_machine=pdp10-unknown - ;; - pdp11) - basic_machine=pdp11-dec - ;; - we32k) - basic_machine=we32k-att - ;; - sh[1234] | sh[24]a | sh[24]aeb | sh[34]eb | sh[1234]le | sh[23]ele) - basic_machine=sh-unknown - ;; - sparc | sparcv8 | sparcv9 | sparcv9b | sparcv9v) - basic_machine=sparc-sun - ;; - cydra) - basic_machine=cydra-cydrome - ;; - orion) - basic_machine=orion-highlevel - ;; - orion105) - basic_machine=clipper-highlevel - ;; - mac | mpw | mac-mpw) - basic_machine=m68k-apple - ;; - pmac | pmac-mpw) - basic_machine=powerpc-apple - ;; - *-unknown) - # Make sure to match an already-canonicalized machine name. - ;; - *) - echo Invalid configuration \`$1\': machine \`$basic_machine\' not recognized 1>&2 - exit 1 - ;; -esac - -# Here we canonicalize certain aliases for manufacturers. -case $basic_machine in - *-digital*) - basic_machine=`echo $basic_machine | sed 's/digital.*/dec/'` - ;; - *-commodore*) - basic_machine=`echo $basic_machine | sed 's/commodore.*/cbm/'` - ;; - *) - ;; -esac - -# Decode manufacturer-specific aliases for certain operating systems. - -if [ x"$os" != x"" ] -then -case $os in - # First match some system type aliases - # that might get confused with valid system types. - # -solaris* is a basic system type, with this one exception. - -auroraux) - os=-auroraux - ;; - -solaris1 | -solaris1.*) - os=`echo $os | sed -e 's|solaris1|sunos4|'` - ;; - -solaris) - os=-solaris2 - ;; - -svr4*) - os=-sysv4 - ;; - -unixware*) - os=-sysv4.2uw - ;; - -gnu/linux*) - os=`echo $os | sed -e 's|gnu/linux|linux-gnu|'` - ;; - # First accept the basic system types. - # The portable systems comes first. - # Each alternative MUST END IN A *, to match a version number. - # -sysv* is not here because it comes later, after sysvr4. - -gnu* | -bsd* | -mach* | -minix* | -genix* | -ultrix* | -irix* \ - | -*vms* | -sco* | -esix* | -isc* | -aix* | -cnk* | -sunos | -sunos[34]*\ - | -hpux* | -unos* | -osf* | -luna* | -dgux* | -auroraux* | -solaris* \ - | -sym* | -kopensolaris* \ - | -amigaos* | -amigados* | -msdos* | -newsos* | -unicos* | -aof* \ - | -aos* | -aros* \ - | -nindy* | -vxsim* | -vxworks* | -ebmon* | -hms* | -mvs* \ - | -clix* | -riscos* | -uniplus* | -iris* | -rtu* | -xenix* \ - | -hiux* | -386bsd* | -knetbsd* | -mirbsd* | -netbsd* \ - | -openbsd* | -solidbsd* \ - | -ekkobsd* | -kfreebsd* | -freebsd* | -riscix* | -lynxos* \ - | -bosx* | -nextstep* | -cxux* | -aout* | -elf* | -oabi* \ - | -ptx* | -coff* | -ecoff* | -winnt* | -domain* | -vsta* \ - | -udi* | -eabi* | -lites* | -ieee* | -go32* | -aux* \ - | -chorusos* | -chorusrdb* | -cegcc* \ - | -cygwin* | -msys* | -pe* | -psos* | -moss* | -proelf* | -rtems* \ - | -mingw32* | -linux-gnu* | -linux-android* \ - | -linux-newlib* | -linux-uclibc* \ - | -uxpv* | -beos* | -mpeix* | -udk* \ - | -interix* | -uwin* | -mks* | -rhapsody* | -darwin* | -opened* \ - | -openstep* | -oskit* | -conix* | -pw32* | -nonstopux* \ - | -storm-chaos* | -tops10* | -tenex* | -tops20* | -its* \ - | -os2* | -vos* | -palmos* | -uclinux* | -nucleus* \ - | -morphos* | -superux* | -rtmk* | -rtmk-nova* | -windiss* \ - | -powermax* | -dnix* | -nx6 | -nx7 | -sei* | -dragonfly* \ - | -skyos* | -haiku* | -rdos* | -toppers* | -drops* | -es*) - # Remember, each alternative MUST END IN *, to match a version number. - ;; - -qnx*) - case $basic_machine in - x86-* | i*86-*) - ;; - *) - os=-nto$os - ;; - esac - ;; - -nto-qnx*) - ;; - -nto*) - os=`echo $os | sed -e 's|nto|nto-qnx|'` - ;; - -sim | -es1800* | -hms* | -xray | -os68k* | -none* | -v88r* \ - | -windows* | -osx | -abug | -netware* | -os9* | -beos* | -haiku* \ - | -macos* | -mpw* | -magic* | -mmixware* | -mon960* | -lnews*) - ;; - -mac*) - os=`echo $os | sed -e 's|mac|macos|'` - ;; - -linux-dietlibc) - os=-linux-dietlibc - ;; - -linux*) - os=`echo $os | sed -e 's|linux|linux-gnu|'` - ;; - -sunos5*) - os=`echo $os | sed -e 's|sunos5|solaris2|'` - ;; - -sunos6*) - os=`echo $os | sed -e 's|sunos6|solaris3|'` - ;; - -opened*) - os=-openedition - ;; - -os400*) - os=-os400 - ;; - -wince*) - os=-wince - ;; - -osfrose*) - os=-osfrose - ;; - -osf*) - os=-osf - ;; - -utek*) - os=-bsd - ;; - -dynix*) - os=-bsd - ;; - -acis*) - os=-aos - ;; - -atheos*) - os=-atheos - ;; - -syllable*) - os=-syllable - ;; - -386bsd) - os=-bsd - ;; - -ctix* | -uts*) - os=-sysv - ;; - -nova*) - os=-rtmk-nova - ;; - -ns2 ) - os=-nextstep2 - ;; - -nsk*) - os=-nsk - ;; - # Preserve the version number of sinix5. - -sinix5.*) - os=`echo $os | sed -e 's|sinix|sysv|'` - ;; - -sinix*) - os=-sysv4 - ;; - -tpf*) - os=-tpf - ;; - -triton*) - os=-sysv3 - ;; - -oss*) - os=-sysv3 - ;; - -svr4) - os=-sysv4 - ;; - -svr3) - os=-sysv3 - ;; - -sysvr4) - os=-sysv4 - ;; - # This must come after -sysvr4. - -sysv*) - ;; - -ose*) - os=-ose - ;; - -es1800*) - os=-ose - ;; - -xenix) - os=-xenix - ;; - -*mint | -mint[0-9]* | -*MiNT | -MiNT[0-9]*) - os=-mint - ;; - -aros*) - os=-aros - ;; - -kaos*) - os=-kaos - ;; - -zvmoe) - os=-zvmoe - ;; - -dicos*) - os=-dicos - ;; - -nacl*) - ;; - -none) - ;; - *) - # Get rid of the `-' at the beginning of $os. - os=`echo $os | sed 's/[^-]*-//'` - echo Invalid configuration \`$1\': system \`$os\' not recognized 1>&2 - exit 1 - ;; -esac -else - -# Here we handle the default operating systems that come with various machines. -# The value should be what the vendor currently ships out the door with their -# machine or put another way, the most popular os provided with the machine. - -# Note that if you're going to try to match "-MANUFACTURER" here (say, -# "-sun"), then you have to tell the case statement up towards the top -# that MANUFACTURER isn't an operating system. Otherwise, code above -# will signal an error saying that MANUFACTURER isn't an operating -# system, and we'll never get to this point. - -case $basic_machine in - score-*) - os=-elf - ;; - spu-*) - os=-elf - ;; - *-acorn) - os=-riscix1.2 - ;; - arm*-rebel) - os=-linux - ;; - arm*-semi) - os=-aout - ;; - c4x-* | tic4x-*) - os=-coff - ;; - tic54x-*) - os=-coff - ;; - tic55x-*) - os=-coff - ;; - tic6x-*) - os=-coff - ;; - # This must come before the *-dec entry. - pdp10-*) - os=-tops20 - ;; - pdp11-*) - os=-none - ;; - *-dec | vax-*) - os=-ultrix4.2 - ;; - m68*-apollo) - os=-domain - ;; - i386-sun) - os=-sunos4.0.2 - ;; - m68000-sun) - os=-sunos3 - ;; - m68*-cisco) - os=-aout - ;; - mep-*) - os=-elf - ;; - mips*-cisco) - os=-elf - ;; - mips*-*) - os=-elf - ;; - or32-*) - os=-coff - ;; - *-tti) # must be before sparc entry or we get the wrong os. - os=-sysv3 - ;; - sparc-* | *-sun) - os=-sunos4.1.1 - ;; - *-be) - os=-beos - ;; - *-haiku) - os=-haiku - ;; - *-ibm) - os=-aix - ;; - *-knuth) - os=-mmixware - ;; - *-wec) - os=-proelf - ;; - *-winbond) - os=-proelf - ;; - *-oki) - os=-proelf - ;; - *-hp) - os=-hpux - ;; - *-hitachi) - os=-hiux - ;; - i860-* | *-att | *-ncr | *-altos | *-motorola | *-convergent) - os=-sysv - ;; - *-cbm) - os=-amigaos - ;; - *-dg) - os=-dgux - ;; - *-dolphin) - os=-sysv3 - ;; - m68k-ccur) - os=-rtu - ;; - m88k-omron*) - os=-luna - ;; - *-next ) - os=-nextstep - ;; - *-sequent) - os=-ptx - ;; - *-crds) - os=-unos - ;; - *-ns) - os=-genix - ;; - i370-*) - os=-mvs - ;; - *-next) - os=-nextstep3 - ;; - *-gould) - os=-sysv - ;; - *-highlevel) - os=-bsd - ;; - *-encore) - os=-bsd - ;; - *-sgi) - os=-irix - ;; - *-siemens) - os=-sysv4 - ;; - *-masscomp) - os=-rtu - ;; - f30[01]-fujitsu | f700-fujitsu) - os=-uxpv - ;; - *-rom68k) - os=-coff - ;; - *-*bug) - os=-coff - ;; - *-apple) - os=-macos - ;; - *-atari*) - os=-mint - ;; - *) - os=-none - ;; -esac -fi - -# Here we handle the case where we know the os, and the CPU type, but not the -# manufacturer. We pick the logical manufacturer. -vendor=unknown -case $basic_machine in - *-unknown) - case $os in - -riscix*) - vendor=acorn - ;; - -sunos*) - vendor=sun - ;; - -cnk*|-aix*) - vendor=ibm - ;; - -beos*) - vendor=be - ;; - -hpux*) - vendor=hp - ;; - -mpeix*) - vendor=hp - ;; - -hiux*) - vendor=hitachi - ;; - -unos*) - vendor=crds - ;; - -dgux*) - vendor=dg - ;; - -luna*) - vendor=omron - ;; - -genix*) - vendor=ns - ;; - -mvs* | -opened*) - vendor=ibm - ;; - -os400*) - vendor=ibm - ;; - -ptx*) - vendor=sequent - ;; - -tpf*) - vendor=ibm - ;; - -vxsim* | -vxworks* | -windiss*) - vendor=wrs - ;; - -aux*) - vendor=apple - ;; - -hms*) - vendor=hitachi - ;; - -mpw* | -macos*) - vendor=apple - ;; - -*mint | -mint[0-9]* | -*MiNT | -MiNT[0-9]*) - vendor=atari - ;; - -vos*) - vendor=stratus - ;; - esac - basic_machine=`echo $basic_machine | sed "s/unknown/$vendor/"` - ;; -esac - -echo $basic_machine$os -exit - -# Local variables: -# eval: (add-hook 'write-file-hooks 'time-stamp) -# time-stamp-start: "timestamp='" -# time-stamp-format: "%:y-%02m-%02d" -# time-stamp-end: "'" -# End: diff --git a/extra/jemalloc/configure b/extra/jemalloc/configure deleted file mode 100755 index c112a47d45d..00000000000 --- a/extra/jemalloc/configure +++ /dev/null @@ -1,8339 +0,0 @@ -#! /bin/sh -# Guess values for system-dependent variables and create Makefiles. -# Generated by GNU Autoconf 2.68. -# -# -# Copyright (C) 1992, 1993, 1994, 1995, 1996, 1998, 1999, 2000, 2001, -# 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 Free Software -# Foundation, Inc. -# -# -# This configure script is free software; the Free Software Foundation -# gives unlimited permission to copy, distribute and modify it. -## -------------------- ## -## M4sh Initialization. ## -## -------------------- ## - -# Be more Bourne compatible -DUALCASE=1; export DUALCASE # for MKS sh -if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then : - emulate sh - NULLCMD=: - # Pre-4.2 versions of Zsh do word splitting on ${1+"$@"}, which - # is contrary to our usage. Disable this feature. - alias -g '${1+"$@"}'='"$@"' - setopt NO_GLOB_SUBST -else - case `(set -o) 2>/dev/null` in #( - *posix*) : - set -o posix ;; #( - *) : - ;; -esac -fi - - -as_nl=' -' -export as_nl -# Printing a long string crashes Solaris 7 /usr/bin/printf. -as_echo='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\' -as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo -as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo$as_echo -# Prefer a ksh shell builtin over an external printf program on Solaris, -# but without wasting forks for bash or zsh. -if test -z "$BASH_VERSION$ZSH_VERSION" \ - && (test "X`print -r -- $as_echo`" = "X$as_echo") 2>/dev/null; then - as_echo='print -r --' - as_echo_n='print -rn --' -elif (test "X`printf %s $as_echo`" = "X$as_echo") 2>/dev/null; then - as_echo='printf %s\n' - as_echo_n='printf %s' -else - if test "X`(/usr/ucb/echo -n -n $as_echo) 2>/dev/null`" = "X-n $as_echo"; then - as_echo_body='eval /usr/ucb/echo -n "$1$as_nl"' - as_echo_n='/usr/ucb/echo -n' - else - as_echo_body='eval expr "X$1" : "X\\(.*\\)"' - as_echo_n_body='eval - arg=$1; - case $arg in #( - *"$as_nl"*) - expr "X$arg" : "X\\(.*\\)$as_nl"; - arg=`expr "X$arg" : ".*$as_nl\\(.*\\)"`;; - esac; - expr "X$arg" : "X\\(.*\\)" | tr -d "$as_nl" - ' - export as_echo_n_body - as_echo_n='sh -c $as_echo_n_body as_echo' - fi - export as_echo_body - as_echo='sh -c $as_echo_body as_echo' -fi - -# The user is always right. -if test "${PATH_SEPARATOR+set}" != set; then - PATH_SEPARATOR=: - (PATH='/bin;/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 && { - (PATH='/bin:/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 || - PATH_SEPARATOR=';' - } -fi - - -# IFS -# We need space, tab and new line, in precisely that order. Quoting is -# there to prevent editors from complaining about space-tab. -# (If _AS_PATH_WALK were called with IFS unset, it would disable word -# splitting by setting IFS to empty value.) -IFS=" "" $as_nl" - -# Find who we are. Look in the path if we contain no directory separator. -as_myself= -case $0 in #(( - *[\\/]* ) as_myself=$0 ;; - *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - test -r "$as_dir/$0" && as_myself=$as_dir/$0 && break - done -IFS=$as_save_IFS - - ;; -esac -# We did not find ourselves, most probably we were run as `sh COMMAND' -# in which case we are not to be found in the path. -if test "x$as_myself" = x; then - as_myself=$0 -fi -if test ! -f "$as_myself"; then - $as_echo "$as_myself: error: cannot find myself; rerun with an absolute file name" >&2 - exit 1 -fi - -# Unset variables that we do not need and which cause bugs (e.g. in -# pre-3.0 UWIN ksh). But do not cause bugs in bash 2.01; the "|| exit 1" -# suppresses any "Segmentation fault" message there. '((' could -# trigger a bug in pdksh 5.2.14. -for as_var in BASH_ENV ENV MAIL MAILPATH -do eval test x\${$as_var+set} = xset \ - && ( (unset $as_var) || exit 1) >/dev/null 2>&1 && unset $as_var || : -done -PS1='$ ' -PS2='> ' -PS4='+ ' - -# NLS nuisances. -LC_ALL=C -export LC_ALL -LANGUAGE=C -export LANGUAGE - -# CDPATH. -(unset CDPATH) >/dev/null 2>&1 && unset CDPATH - -if test "x$CONFIG_SHELL" = x; then - as_bourne_compatible="if test -n \"\${ZSH_VERSION+set}\" && (emulate sh) >/dev/null 2>&1; then : - emulate sh - NULLCMD=: - # Pre-4.2 versions of Zsh do word splitting on \${1+\"\$@\"}, which - # is contrary to our usage. Disable this feature. - alias -g '\${1+\"\$@\"}'='\"\$@\"' - setopt NO_GLOB_SUBST -else - case \`(set -o) 2>/dev/null\` in #( - *posix*) : - set -o posix ;; #( - *) : - ;; -esac -fi -" - as_required="as_fn_return () { (exit \$1); } -as_fn_success () { as_fn_return 0; } -as_fn_failure () { as_fn_return 1; } -as_fn_ret_success () { return 0; } -as_fn_ret_failure () { return 1; } - -exitcode=0 -as_fn_success || { exitcode=1; echo as_fn_success failed.; } -as_fn_failure && { exitcode=1; echo as_fn_failure succeeded.; } -as_fn_ret_success || { exitcode=1; echo as_fn_ret_success failed.; } -as_fn_ret_failure && { exitcode=1; echo as_fn_ret_failure succeeded.; } -if ( set x; as_fn_ret_success y && test x = \"\$1\" ); then : - -else - exitcode=1; echo positional parameters were not saved. -fi -test x\$exitcode = x0 || exit 1" - as_suggested=" as_lineno_1=";as_suggested=$as_suggested$LINENO;as_suggested=$as_suggested" as_lineno_1a=\$LINENO - as_lineno_2=";as_suggested=$as_suggested$LINENO;as_suggested=$as_suggested" as_lineno_2a=\$LINENO - eval 'test \"x\$as_lineno_1'\$as_run'\" != \"x\$as_lineno_2'\$as_run'\" && - test \"x\`expr \$as_lineno_1'\$as_run' + 1\`\" = \"x\$as_lineno_2'\$as_run'\"' || exit 1 -test \$(( 1 + 1 )) = 2 || exit 1" - if (eval "$as_required") 2>/dev/null; then : - as_have_required=yes -else - as_have_required=no -fi - if test x$as_have_required = xyes && (eval "$as_suggested") 2>/dev/null; then : - -else - as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -as_found=false -for as_dir in /bin$PATH_SEPARATOR/usr/bin$PATH_SEPARATOR$PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - as_found=: - case $as_dir in #( - /*) - for as_base in sh bash ksh sh5; do - # Try only shells that exist, to save several forks. - as_shell=$as_dir/$as_base - if { test -f "$as_shell" || test -f "$as_shell.exe"; } && - { $as_echo "$as_bourne_compatible""$as_required" | as_run=a "$as_shell"; } 2>/dev/null; then : - CONFIG_SHELL=$as_shell as_have_required=yes - if { $as_echo "$as_bourne_compatible""$as_suggested" | as_run=a "$as_shell"; } 2>/dev/null; then : - break 2 -fi -fi - done;; - esac - as_found=false -done -$as_found || { if { test -f "$SHELL" || test -f "$SHELL.exe"; } && - { $as_echo "$as_bourne_compatible""$as_required" | as_run=a "$SHELL"; } 2>/dev/null; then : - CONFIG_SHELL=$SHELL as_have_required=yes -fi; } -IFS=$as_save_IFS - - - if test "x$CONFIG_SHELL" != x; then : - # We cannot yet assume a decent shell, so we have to provide a - # neutralization value for shells without unset; and this also - # works around shells that cannot unset nonexistent variables. - # Preserve -v and -x to the replacement shell. - BASH_ENV=/dev/null - ENV=/dev/null - (unset BASH_ENV) >/dev/null 2>&1 && unset BASH_ENV ENV - export CONFIG_SHELL - case $- in # (((( - *v*x* | *x*v* ) as_opts=-vx ;; - *v* ) as_opts=-v ;; - *x* ) as_opts=-x ;; - * ) as_opts= ;; - esac - exec "$CONFIG_SHELL" $as_opts "$as_myself" ${1+"$@"} -fi - - if test x$as_have_required = xno; then : - $as_echo "$0: This script requires a shell more modern than all" - $as_echo "$0: the shells that I found on your system." - if test x${ZSH_VERSION+set} = xset ; then - $as_echo "$0: In particular, zsh $ZSH_VERSION has bugs and should" - $as_echo "$0: be upgraded to zsh 4.3.4 or later." - else - $as_echo "$0: Please tell bug-autoconf@gnu.org about your system, -$0: including any error possibly output before this -$0: message. Then install a modern shell, or manually run -$0: the script under such a shell if you do have one." - fi - exit 1 -fi -fi -fi -SHELL=${CONFIG_SHELL-/bin/sh} -export SHELL -# Unset more variables known to interfere with behavior of common tools. -CLICOLOR_FORCE= GREP_OPTIONS= -unset CLICOLOR_FORCE GREP_OPTIONS - -## --------------------- ## -## M4sh Shell Functions. ## -## --------------------- ## -# as_fn_unset VAR -# --------------- -# Portably unset VAR. -as_fn_unset () -{ - { eval $1=; unset $1;} -} -as_unset=as_fn_unset - -# as_fn_set_status STATUS -# ----------------------- -# Set $? to STATUS, without forking. -as_fn_set_status () -{ - return $1 -} # as_fn_set_status - -# as_fn_exit STATUS -# ----------------- -# Exit the shell with STATUS, even in a "trap 0" or "set -e" context. -as_fn_exit () -{ - set +e - as_fn_set_status $1 - exit $1 -} # as_fn_exit - -# as_fn_mkdir_p -# ------------- -# Create "$as_dir" as a directory, including parents if necessary. -as_fn_mkdir_p () -{ - - case $as_dir in #( - -*) as_dir=./$as_dir;; - esac - test -d "$as_dir" || eval $as_mkdir_p || { - as_dirs= - while :; do - case $as_dir in #( - *\'*) as_qdir=`$as_echo "$as_dir" | sed "s/'/'\\\\\\\\''/g"`;; #'( - *) as_qdir=$as_dir;; - esac - as_dirs="'$as_qdir' $as_dirs" - as_dir=`$as_dirname -- "$as_dir" || -$as_expr X"$as_dir" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ - X"$as_dir" : 'X\(//\)[^/]' \| \ - X"$as_dir" : 'X\(//\)$' \| \ - X"$as_dir" : 'X\(/\)' \| . 2>/dev/null || -$as_echo X"$as_dir" | - sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ - s//\1/ - q - } - /^X\(\/\/\)[^/].*/{ - s//\1/ - q - } - /^X\(\/\/\)$/{ - s//\1/ - q - } - /^X\(\/\).*/{ - s//\1/ - q - } - s/.*/./; q'` - test -d "$as_dir" && break - done - test -z "$as_dirs" || eval "mkdir $as_dirs" - } || test -d "$as_dir" || as_fn_error $? "cannot create directory $as_dir" - - -} # as_fn_mkdir_p -# as_fn_append VAR VALUE -# ---------------------- -# Append the text in VALUE to the end of the definition contained in VAR. Take -# advantage of any shell optimizations that allow amortized linear growth over -# repeated appends, instead of the typical quadratic growth present in naive -# implementations. -if (eval "as_var=1; as_var+=2; test x\$as_var = x12") 2>/dev/null; then : - eval 'as_fn_append () - { - eval $1+=\$2 - }' -else - as_fn_append () - { - eval $1=\$$1\$2 - } -fi # as_fn_append - -# as_fn_arith ARG... -# ------------------ -# Perform arithmetic evaluation on the ARGs, and store the result in the -# global $as_val. Take advantage of shells that can avoid forks. The arguments -# must be portable across $(()) and expr. -if (eval "test \$(( 1 + 1 )) = 2") 2>/dev/null; then : - eval 'as_fn_arith () - { - as_val=$(( $* )) - }' -else - as_fn_arith () - { - as_val=`expr "$@" || test $? -eq 1` - } -fi # as_fn_arith - - -# as_fn_error STATUS ERROR [LINENO LOG_FD] -# ---------------------------------------- -# Output "`basename $0`: error: ERROR" to stderr. If LINENO and LOG_FD are -# provided, also output the error to LOG_FD, referencing LINENO. Then exit the -# script with STATUS, using 1 if that was 0. -as_fn_error () -{ - as_status=$1; test $as_status -eq 0 && as_status=1 - if test "$4"; then - as_lineno=${as_lineno-"$3"} as_lineno_stack=as_lineno_stack=$as_lineno_stack - $as_echo "$as_me:${as_lineno-$LINENO}: error: $2" >&$4 - fi - $as_echo "$as_me: error: $2" >&2 - as_fn_exit $as_status -} # as_fn_error - -if expr a : '\(a\)' >/dev/null 2>&1 && - test "X`expr 00001 : '.*\(...\)'`" = X001; then - as_expr=expr -else - as_expr=false -fi - -if (basename -- /) >/dev/null 2>&1 && test "X`basename -- / 2>&1`" = "X/"; then - as_basename=basename -else - as_basename=false -fi - -if (as_dir=`dirname -- /` && test "X$as_dir" = X/) >/dev/null 2>&1; then - as_dirname=dirname -else - as_dirname=false -fi - -as_me=`$as_basename -- "$0" || -$as_expr X/"$0" : '.*/\([^/][^/]*\)/*$' \| \ - X"$0" : 'X\(//\)$' \| \ - X"$0" : 'X\(/\)' \| . 2>/dev/null || -$as_echo X/"$0" | - sed '/^.*\/\([^/][^/]*\)\/*$/{ - s//\1/ - q - } - /^X\/\(\/\/\)$/{ - s//\1/ - q - } - /^X\/\(\/\).*/{ - s//\1/ - q - } - s/.*/./; q'` - -# Avoid depending upon Character Ranges. -as_cr_letters='abcdefghijklmnopqrstuvwxyz' -as_cr_LETTERS='ABCDEFGHIJKLMNOPQRSTUVWXYZ' -as_cr_Letters=$as_cr_letters$as_cr_LETTERS -as_cr_digits='0123456789' -as_cr_alnum=$as_cr_Letters$as_cr_digits - - - as_lineno_1=$LINENO as_lineno_1a=$LINENO - as_lineno_2=$LINENO as_lineno_2a=$LINENO - eval 'test "x$as_lineno_1'$as_run'" != "x$as_lineno_2'$as_run'" && - test "x`expr $as_lineno_1'$as_run' + 1`" = "x$as_lineno_2'$as_run'"' || { - # Blame Lee E. McMahon (1931-1989) for sed's syntax. :-) - sed -n ' - p - /[$]LINENO/= - ' <$as_myself | - sed ' - s/[$]LINENO.*/&-/ - t lineno - b - :lineno - N - :loop - s/[$]LINENO\([^'$as_cr_alnum'_].*\n\)\(.*\)/\2\1\2/ - t loop - s/-\n.*// - ' >$as_me.lineno && - chmod +x "$as_me.lineno" || - { $as_echo "$as_me: error: cannot create $as_me.lineno; rerun with a POSIX shell" >&2; as_fn_exit 1; } - - # Don't try to exec as it changes $[0], causing all sort of problems - # (the dirname of $[0] is not the place where we might find the - # original and so on. Autoconf is especially sensitive to this). - . "./$as_me.lineno" - # Exit status is that of the last command. - exit -} - -ECHO_C= ECHO_N= ECHO_T= -case `echo -n x` in #((((( --n*) - case `echo 'xy\c'` in - *c*) ECHO_T=' ';; # ECHO_T is single tab character. - xy) ECHO_C='\c';; - *) echo `echo ksh88 bug on AIX 6.1` > /dev/null - ECHO_T=' ';; - esac;; -*) - ECHO_N='-n';; -esac - -rm -f conf$$ conf$$.exe conf$$.file -if test -d conf$$.dir; then - rm -f conf$$.dir/conf$$.file -else - rm -f conf$$.dir - mkdir conf$$.dir 2>/dev/null -fi -if (echo >conf$$.file) 2>/dev/null; then - if ln -s conf$$.file conf$$ 2>/dev/null; then - as_ln_s='ln -s' - # ... but there are two gotchas: - # 1) On MSYS, both `ln -s file dir' and `ln file dir' fail. - # 2) DJGPP < 2.04 has no symlinks; `ln -s' creates a wrapper executable. - # In both cases, we have to default to `cp -p'. - ln -s conf$$.file conf$$.dir 2>/dev/null && test ! -f conf$$.exe || - as_ln_s='cp -p' - elif ln conf$$.file conf$$ 2>/dev/null; then - as_ln_s=ln - else - as_ln_s='cp -p' - fi -else - as_ln_s='cp -p' -fi -rm -f conf$$ conf$$.exe conf$$.dir/conf$$.file conf$$.file -rmdir conf$$.dir 2>/dev/null - -if mkdir -p . 2>/dev/null; then - as_mkdir_p='mkdir -p "$as_dir"' -else - test -d ./-p && rmdir ./-p - as_mkdir_p=false -fi - -if test -x / >/dev/null 2>&1; then - as_test_x='test -x' -else - if ls -dL / >/dev/null 2>&1; then - as_ls_L_option=L - else - as_ls_L_option= - fi - as_test_x=' - eval sh -c '\'' - if test -d "$1"; then - test -d "$1/."; - else - case $1 in #( - -*)set "./$1";; - esac; - case `ls -ld'$as_ls_L_option' "$1" 2>/dev/null` in #(( - ???[sx]*):;;*)false;;esac;fi - '\'' sh - ' -fi -as_executable_p=$as_test_x - -# Sed expression to map a string onto a valid CPP name. -as_tr_cpp="eval sed 'y%*$as_cr_letters%P$as_cr_LETTERS%;s%[^_$as_cr_alnum]%_%g'" - -# Sed expression to map a string onto a valid variable name. -as_tr_sh="eval sed 'y%*+%pp%;s%[^_$as_cr_alnum]%_%g'" - - -test -n "$DJDIR" || exec 7<&0 </dev/null -exec 6>&1 - -# Name of the host. -# hostname on some systems (SVR3.2, old GNU/Linux) returns a bogus exit status, -# so uname gets run too. -ac_hostname=`(hostname || uname -n) 2>/dev/null | sed 1q` - -# -# Initializations. -# -ac_default_prefix=/usr/local -ac_clean_files= -ac_config_libobj_dir=. -LIBOBJS= -cross_compiling=no -subdirs= -MFLAGS= -MAKEFLAGS= - -# Identity of this package. -PACKAGE_NAME= -PACKAGE_TARNAME= -PACKAGE_VERSION= -PACKAGE_STRING= -PACKAGE_BUGREPORT= -PACKAGE_URL= - -ac_unique_file="Makefile.in" -# Factoring default headers for most tests. -ac_includes_default="\ -#include <stdio.h> -#ifdef HAVE_SYS_TYPES_H -# include <sys/types.h> -#endif -#ifdef HAVE_SYS_STAT_H -# include <sys/stat.h> -#endif -#ifdef STDC_HEADERS -# include <stdlib.h> -# include <stddef.h> -#else -# ifdef HAVE_STDLIB_H -# include <stdlib.h> -# endif -#endif -#ifdef HAVE_STRING_H -# if !defined STDC_HEADERS && defined HAVE_MEMORY_H -# include <memory.h> -# endif -# include <string.h> -#endif -#ifdef HAVE_STRINGS_H -# include <strings.h> -#endif -#ifdef HAVE_INTTYPES_H -# include <inttypes.h> -#endif -#ifdef HAVE_STDINT_H -# include <stdint.h> -#endif -#ifdef HAVE_UNISTD_H -# include <unistd.h> -#endif" - -ac_subst_vars='LTLIBOBJS -LIBOBJS -cfgoutputs_out -cfgoutputs_in -cfghdrs_out -cfghdrs_in -enable_zone_allocator -enable_tls -enable_lazy_lock -jemalloc_version_gid -jemalloc_version_nrev -jemalloc_version_bugfix -jemalloc_version_minor -jemalloc_version_major -jemalloc_version -enable_xmalloc -enable_valgrind -enable_utrace -enable_fill -enable_dss -enable_munmap -enable_mremap -enable_tcache -enable_prof -enable_stats -enable_debug -install_suffix -enable_experimental -AUTOCONF -LD -AR -RANLIB -INSTALL_DATA -INSTALL_SCRIPT -INSTALL_PROGRAM -enable_autogen -RPATH_EXTRA -CC_MM -MKLIB -LDTARGET -CTARGET -PIC_CFLAGS -SOREV -EXTRA_LDFLAGS -DSO_LDFLAGS -libprefix -exe -a -o -importlib -so -LD_PRELOAD_VAR -RPATH -abi -host_os -host_vendor -host_cpu -host -build_os -build_vendor -build_cpu -build -EGREP -GREP -CPP -OBJEXT -EXEEXT -ac_ct_CC -CPPFLAGS -LDFLAGS -CFLAGS -CC -XSLROOT -XSLTPROC -MANDIR -DATADIR -LIBDIR -INCLUDEDIR -BINDIR -PREFIX -abs_objroot -objroot -abs_srcroot -srcroot -rev -target_alias -host_alias -build_alias -LIBS -ECHO_T -ECHO_N -ECHO_C -DEFS -mandir -localedir -libdir -psdir -pdfdir -dvidir -htmldir -infodir -docdir -oldincludedir -includedir -localstatedir -sharedstatedir -sysconfdir -datadir -datarootdir -libexecdir -sbindir -bindir -program_transform_name -prefix -exec_prefix -PACKAGE_URL -PACKAGE_BUGREPORT -PACKAGE_STRING -PACKAGE_VERSION -PACKAGE_TARNAME -PACKAGE_NAME -PATH_SEPARATOR -SHELL' -ac_subst_files='' -ac_user_opts=' -enable_option_checking -with_xslroot -with_rpath -enable_autogen -enable_experimental -with_mangling -with_jemalloc_prefix -with_export -with_private_namespace -with_install_suffix -enable_cc_silence -enable_debug -enable_ivsalloc -enable_stats -enable_prof -enable_prof_libunwind -with_static_libunwind -enable_prof_libgcc -enable_prof_gcc -enable_tcache -enable_mremap -enable_munmap -enable_dss -enable_fill -enable_utrace -enable_valgrind -enable_xmalloc -enable_lazy_lock -enable_tls -enable_zone_allocator -' - ac_precious_vars='build_alias -host_alias -target_alias -CC -CFLAGS -LDFLAGS -LIBS -CPPFLAGS -CPP' - - -# Initialize some variables set by options. -ac_init_help= -ac_init_version=false -ac_unrecognized_opts= -ac_unrecognized_sep= -# The variables have the same names as the options, with -# dashes changed to underlines. -cache_file=/dev/null -exec_prefix=NONE -no_create= -no_recursion= -prefix=NONE -program_prefix=NONE -program_suffix=NONE -program_transform_name=s,x,x, -silent= -site= -srcdir= -verbose= -x_includes=NONE -x_libraries=NONE - -# Installation directory options. -# These are left unexpanded so users can "make install exec_prefix=/foo" -# and all the variables that are supposed to be based on exec_prefix -# by default will actually change. -# Use braces instead of parens because sh, perl, etc. also accept them. -# (The list follows the same order as the GNU Coding Standards.) -bindir='${exec_prefix}/bin' -sbindir='${exec_prefix}/sbin' -libexecdir='${exec_prefix}/libexec' -datarootdir='${prefix}/share' -datadir='${datarootdir}' -sysconfdir='${prefix}/etc' -sharedstatedir='${prefix}/com' -localstatedir='${prefix}/var' -includedir='${prefix}/include' -oldincludedir='/usr/include' -docdir='${datarootdir}/doc/${PACKAGE}' -infodir='${datarootdir}/info' -htmldir='${docdir}' -dvidir='${docdir}' -pdfdir='${docdir}' -psdir='${docdir}' -libdir='${exec_prefix}/lib' -localedir='${datarootdir}/locale' -mandir='${datarootdir}/man' - -ac_prev= -ac_dashdash= -for ac_option -do - # If the previous option needs an argument, assign it. - if test -n "$ac_prev"; then - eval $ac_prev=\$ac_option - ac_prev= - continue - fi - - case $ac_option in - *=?*) ac_optarg=`expr "X$ac_option" : '[^=]*=\(.*\)'` ;; - *=) ac_optarg= ;; - *) ac_optarg=yes ;; - esac - - # Accept the important Cygnus configure options, so we can diagnose typos. - - case $ac_dashdash$ac_option in - --) - ac_dashdash=yes ;; - - -bindir | --bindir | --bindi | --bind | --bin | --bi) - ac_prev=bindir ;; - -bindir=* | --bindir=* | --bindi=* | --bind=* | --bin=* | --bi=*) - bindir=$ac_optarg ;; - - -build | --build | --buil | --bui | --bu) - ac_prev=build_alias ;; - -build=* | --build=* | --buil=* | --bui=* | --bu=*) - build_alias=$ac_optarg ;; - - -cache-file | --cache-file | --cache-fil | --cache-fi \ - | --cache-f | --cache- | --cache | --cach | --cac | --ca | --c) - ac_prev=cache_file ;; - -cache-file=* | --cache-file=* | --cache-fil=* | --cache-fi=* \ - | --cache-f=* | --cache-=* | --cache=* | --cach=* | --cac=* | --ca=* | --c=*) - cache_file=$ac_optarg ;; - - --config-cache | -C) - cache_file=config.cache ;; - - -datadir | --datadir | --datadi | --datad) - ac_prev=datadir ;; - -datadir=* | --datadir=* | --datadi=* | --datad=*) - datadir=$ac_optarg ;; - - -datarootdir | --datarootdir | --datarootdi | --datarootd | --dataroot \ - | --dataroo | --dataro | --datar) - ac_prev=datarootdir ;; - -datarootdir=* | --datarootdir=* | --datarootdi=* | --datarootd=* \ - | --dataroot=* | --dataroo=* | --dataro=* | --datar=*) - datarootdir=$ac_optarg ;; - - -disable-* | --disable-*) - ac_useropt=`expr "x$ac_option" : 'x-*disable-\(.*\)'` - # Reject names that are not valid shell variable names. - expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && - as_fn_error $? "invalid feature name: $ac_useropt" - ac_useropt_orig=$ac_useropt - ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'` - case $ac_user_opts in - *" -"enable_$ac_useropt" -"*) ;; - *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--disable-$ac_useropt_orig" - ac_unrecognized_sep=', ';; - esac - eval enable_$ac_useropt=no ;; - - -docdir | --docdir | --docdi | --doc | --do) - ac_prev=docdir ;; - -docdir=* | --docdir=* | --docdi=* | --doc=* | --do=*) - docdir=$ac_optarg ;; - - -dvidir | --dvidir | --dvidi | --dvid | --dvi | --dv) - ac_prev=dvidir ;; - -dvidir=* | --dvidir=* | --dvidi=* | --dvid=* | --dvi=* | --dv=*) - dvidir=$ac_optarg ;; - - -enable-* | --enable-*) - ac_useropt=`expr "x$ac_option" : 'x-*enable-\([^=]*\)'` - # Reject names that are not valid shell variable names. - expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && - as_fn_error $? "invalid feature name: $ac_useropt" - ac_useropt_orig=$ac_useropt - ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'` - case $ac_user_opts in - *" -"enable_$ac_useropt" -"*) ;; - *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--enable-$ac_useropt_orig" - ac_unrecognized_sep=', ';; - esac - eval enable_$ac_useropt=\$ac_optarg ;; - - -exec-prefix | --exec_prefix | --exec-prefix | --exec-prefi \ - | --exec-pref | --exec-pre | --exec-pr | --exec-p | --exec- \ - | --exec | --exe | --ex) - ac_prev=exec_prefix ;; - -exec-prefix=* | --exec_prefix=* | --exec-prefix=* | --exec-prefi=* \ - | --exec-pref=* | --exec-pre=* | --exec-pr=* | --exec-p=* | --exec-=* \ - | --exec=* | --exe=* | --ex=*) - exec_prefix=$ac_optarg ;; - - -gas | --gas | --ga | --g) - # Obsolete; use --with-gas. - with_gas=yes ;; - - -help | --help | --hel | --he | -h) - ac_init_help=long ;; - -help=r* | --help=r* | --hel=r* | --he=r* | -hr*) - ac_init_help=recursive ;; - -help=s* | --help=s* | --hel=s* | --he=s* | -hs*) - ac_init_help=short ;; - - -host | --host | --hos | --ho) - ac_prev=host_alias ;; - -host=* | --host=* | --hos=* | --ho=*) - host_alias=$ac_optarg ;; - - -htmldir | --htmldir | --htmldi | --htmld | --html | --htm | --ht) - ac_prev=htmldir ;; - -htmldir=* | --htmldir=* | --htmldi=* | --htmld=* | --html=* | --htm=* \ - | --ht=*) - htmldir=$ac_optarg ;; - - -includedir | --includedir | --includedi | --included | --include \ - | --includ | --inclu | --incl | --inc) - ac_prev=includedir ;; - -includedir=* | --includedir=* | --includedi=* | --included=* | --include=* \ - | --includ=* | --inclu=* | --incl=* | --inc=*) - includedir=$ac_optarg ;; - - -infodir | --infodir | --infodi | --infod | --info | --inf) - ac_prev=infodir ;; - -infodir=* | --infodir=* | --infodi=* | --infod=* | --info=* | --inf=*) - infodir=$ac_optarg ;; - - -libdir | --libdir | --libdi | --libd) - ac_prev=libdir ;; - -libdir=* | --libdir=* | --libdi=* | --libd=*) - libdir=$ac_optarg ;; - - -libexecdir | --libexecdir | --libexecdi | --libexecd | --libexec \ - | --libexe | --libex | --libe) - ac_prev=libexecdir ;; - -libexecdir=* | --libexecdir=* | --libexecdi=* | --libexecd=* | --libexec=* \ - | --libexe=* | --libex=* | --libe=*) - libexecdir=$ac_optarg ;; - - -localedir | --localedir | --localedi | --localed | --locale) - ac_prev=localedir ;; - -localedir=* | --localedir=* | --localedi=* | --localed=* | --locale=*) - localedir=$ac_optarg ;; - - -localstatedir | --localstatedir | --localstatedi | --localstated \ - | --localstate | --localstat | --localsta | --localst | --locals) - ac_prev=localstatedir ;; - -localstatedir=* | --localstatedir=* | --localstatedi=* | --localstated=* \ - | --localstate=* | --localstat=* | --localsta=* | --localst=* | --locals=*) - localstatedir=$ac_optarg ;; - - -mandir | --mandir | --mandi | --mand | --man | --ma | --m) - ac_prev=mandir ;; - -mandir=* | --mandir=* | --mandi=* | --mand=* | --man=* | --ma=* | --m=*) - mandir=$ac_optarg ;; - - -nfp | --nfp | --nf) - # Obsolete; use --without-fp. - with_fp=no ;; - - -no-create | --no-create | --no-creat | --no-crea | --no-cre \ - | --no-cr | --no-c | -n) - no_create=yes ;; - - -no-recursion | --no-recursion | --no-recursio | --no-recursi \ - | --no-recurs | --no-recur | --no-recu | --no-rec | --no-re | --no-r) - no_recursion=yes ;; - - -oldincludedir | --oldincludedir | --oldincludedi | --oldincluded \ - | --oldinclude | --oldinclud | --oldinclu | --oldincl | --oldinc \ - | --oldin | --oldi | --old | --ol | --o) - ac_prev=oldincludedir ;; - -oldincludedir=* | --oldincludedir=* | --oldincludedi=* | --oldincluded=* \ - | --oldinclude=* | --oldinclud=* | --oldinclu=* | --oldincl=* | --oldinc=* \ - | --oldin=* | --oldi=* | --old=* | --ol=* | --o=*) - oldincludedir=$ac_optarg ;; - - -prefix | --prefix | --prefi | --pref | --pre | --pr | --p) - ac_prev=prefix ;; - -prefix=* | --prefix=* | --prefi=* | --pref=* | --pre=* | --pr=* | --p=*) - prefix=$ac_optarg ;; - - -program-prefix | --program-prefix | --program-prefi | --program-pref \ - | --program-pre | --program-pr | --program-p) - ac_prev=program_prefix ;; - -program-prefix=* | --program-prefix=* | --program-prefi=* \ - | --program-pref=* | --program-pre=* | --program-pr=* | --program-p=*) - program_prefix=$ac_optarg ;; - - -program-suffix | --program-suffix | --program-suffi | --program-suff \ - | --program-suf | --program-su | --program-s) - ac_prev=program_suffix ;; - -program-suffix=* | --program-suffix=* | --program-suffi=* \ - | --program-suff=* | --program-suf=* | --program-su=* | --program-s=*) - program_suffix=$ac_optarg ;; - - -program-transform-name | --program-transform-name \ - | --program-transform-nam | --program-transform-na \ - | --program-transform-n | --program-transform- \ - | --program-transform | --program-transfor \ - | --program-transfo | --program-transf \ - | --program-trans | --program-tran \ - | --progr-tra | --program-tr | --program-t) - ac_prev=program_transform_name ;; - -program-transform-name=* | --program-transform-name=* \ - | --program-transform-nam=* | --program-transform-na=* \ - | --program-transform-n=* | --program-transform-=* \ - | --program-transform=* | --program-transfor=* \ - | --program-transfo=* | --program-transf=* \ - | --program-trans=* | --program-tran=* \ - | --progr-tra=* | --program-tr=* | --program-t=*) - program_transform_name=$ac_optarg ;; - - -pdfdir | --pdfdir | --pdfdi | --pdfd | --pdf | --pd) - ac_prev=pdfdir ;; - -pdfdir=* | --pdfdir=* | --pdfdi=* | --pdfd=* | --pdf=* | --pd=*) - pdfdir=$ac_optarg ;; - - -psdir | --psdir | --psdi | --psd | --ps) - ac_prev=psdir ;; - -psdir=* | --psdir=* | --psdi=* | --psd=* | --ps=*) - psdir=$ac_optarg ;; - - -q | -quiet | --quiet | --quie | --qui | --qu | --q \ - | -silent | --silent | --silen | --sile | --sil) - silent=yes ;; - - -sbindir | --sbindir | --sbindi | --sbind | --sbin | --sbi | --sb) - ac_prev=sbindir ;; - -sbindir=* | --sbindir=* | --sbindi=* | --sbind=* | --sbin=* \ - | --sbi=* | --sb=*) - sbindir=$ac_optarg ;; - - -sharedstatedir | --sharedstatedir | --sharedstatedi \ - | --sharedstated | --sharedstate | --sharedstat | --sharedsta \ - | --sharedst | --shareds | --shared | --share | --shar \ - | --sha | --sh) - ac_prev=sharedstatedir ;; - -sharedstatedir=* | --sharedstatedir=* | --sharedstatedi=* \ - | --sharedstated=* | --sharedstate=* | --sharedstat=* | --sharedsta=* \ - | --sharedst=* | --shareds=* | --shared=* | --share=* | --shar=* \ - | --sha=* | --sh=*) - sharedstatedir=$ac_optarg ;; - - -site | --site | --sit) - ac_prev=site ;; - -site=* | --site=* | --sit=*) - site=$ac_optarg ;; - - -srcdir | --srcdir | --srcdi | --srcd | --src | --sr) - ac_prev=srcdir ;; - -srcdir=* | --srcdir=* | --srcdi=* | --srcd=* | --src=* | --sr=*) - srcdir=$ac_optarg ;; - - -sysconfdir | --sysconfdir | --sysconfdi | --sysconfd | --sysconf \ - | --syscon | --sysco | --sysc | --sys | --sy) - ac_prev=sysconfdir ;; - -sysconfdir=* | --sysconfdir=* | --sysconfdi=* | --sysconfd=* | --sysconf=* \ - | --syscon=* | --sysco=* | --sysc=* | --sys=* | --sy=*) - sysconfdir=$ac_optarg ;; - - -target | --target | --targe | --targ | --tar | --ta | --t) - ac_prev=target_alias ;; - -target=* | --target=* | --targe=* | --targ=* | --tar=* | --ta=* | --t=*) - target_alias=$ac_optarg ;; - - -v | -verbose | --verbose | --verbos | --verbo | --verb) - verbose=yes ;; - - -version | --version | --versio | --versi | --vers | -V) - ac_init_version=: ;; - - -with-* | --with-*) - ac_useropt=`expr "x$ac_option" : 'x-*with-\([^=]*\)'` - # Reject names that are not valid shell variable names. - expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && - as_fn_error $? "invalid package name: $ac_useropt" - ac_useropt_orig=$ac_useropt - ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'` - case $ac_user_opts in - *" -"with_$ac_useropt" -"*) ;; - *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--with-$ac_useropt_orig" - ac_unrecognized_sep=', ';; - esac - eval with_$ac_useropt=\$ac_optarg ;; - - -without-* | --without-*) - ac_useropt=`expr "x$ac_option" : 'x-*without-\(.*\)'` - # Reject names that are not valid shell variable names. - expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && - as_fn_error $? "invalid package name: $ac_useropt" - ac_useropt_orig=$ac_useropt - ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'` - case $ac_user_opts in - *" -"with_$ac_useropt" -"*) ;; - *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--without-$ac_useropt_orig" - ac_unrecognized_sep=', ';; - esac - eval with_$ac_useropt=no ;; - - --x) - # Obsolete; use --with-x. - with_x=yes ;; - - -x-includes | --x-includes | --x-include | --x-includ | --x-inclu \ - | --x-incl | --x-inc | --x-in | --x-i) - ac_prev=x_includes ;; - -x-includes=* | --x-includes=* | --x-include=* | --x-includ=* | --x-inclu=* \ - | --x-incl=* | --x-inc=* | --x-in=* | --x-i=*) - x_includes=$ac_optarg ;; - - -x-libraries | --x-libraries | --x-librarie | --x-librari \ - | --x-librar | --x-libra | --x-libr | --x-lib | --x-li | --x-l) - ac_prev=x_libraries ;; - -x-libraries=* | --x-libraries=* | --x-librarie=* | --x-librari=* \ - | --x-librar=* | --x-libra=* | --x-libr=* | --x-lib=* | --x-li=* | --x-l=*) - x_libraries=$ac_optarg ;; - - -*) as_fn_error $? "unrecognized option: \`$ac_option' -Try \`$0 --help' for more information" - ;; - - *=*) - ac_envvar=`expr "x$ac_option" : 'x\([^=]*\)='` - # Reject names that are not valid shell variable names. - case $ac_envvar in #( - '' | [0-9]* | *[!_$as_cr_alnum]* ) - as_fn_error $? "invalid variable name: \`$ac_envvar'" ;; - esac - eval $ac_envvar=\$ac_optarg - export $ac_envvar ;; - - *) - # FIXME: should be removed in autoconf 3.0. - $as_echo "$as_me: WARNING: you should use --build, --host, --target" >&2 - expr "x$ac_option" : ".*[^-._$as_cr_alnum]" >/dev/null && - $as_echo "$as_me: WARNING: invalid host type: $ac_option" >&2 - : "${build_alias=$ac_option} ${host_alias=$ac_option} ${target_alias=$ac_option}" - ;; - - esac -done - -if test -n "$ac_prev"; then - ac_option=--`echo $ac_prev | sed 's/_/-/g'` - as_fn_error $? "missing argument to $ac_option" -fi - -if test -n "$ac_unrecognized_opts"; then - case $enable_option_checking in - no) ;; - fatal) as_fn_error $? "unrecognized options: $ac_unrecognized_opts" ;; - *) $as_echo "$as_me: WARNING: unrecognized options: $ac_unrecognized_opts" >&2 ;; - esac -fi - -# Check all directory arguments for consistency. -for ac_var in exec_prefix prefix bindir sbindir libexecdir datarootdir \ - datadir sysconfdir sharedstatedir localstatedir includedir \ - oldincludedir docdir infodir htmldir dvidir pdfdir psdir \ - libdir localedir mandir -do - eval ac_val=\$$ac_var - # Remove trailing slashes. - case $ac_val in - */ ) - ac_val=`expr "X$ac_val" : 'X\(.*[^/]\)' \| "X$ac_val" : 'X\(.*\)'` - eval $ac_var=\$ac_val;; - esac - # Be sure to have absolute directory names. - case $ac_val in - [\\/$]* | ?:[\\/]* ) continue;; - NONE | '' ) case $ac_var in *prefix ) continue;; esac;; - esac - as_fn_error $? "expected an absolute directory name for --$ac_var: $ac_val" -done - -# There might be people who depend on the old broken behavior: `$host' -# used to hold the argument of --host etc. -# FIXME: To remove some day. -build=$build_alias -host=$host_alias -target=$target_alias - -# FIXME: To remove some day. -if test "x$host_alias" != x; then - if test "x$build_alias" = x; then - cross_compiling=maybe - $as_echo "$as_me: WARNING: if you wanted to set the --build type, don't use --host. - If a cross compiler is detected then cross compile mode will be used" >&2 - elif test "x$build_alias" != "x$host_alias"; then - cross_compiling=yes - fi -fi - -ac_tool_prefix= -test -n "$host_alias" && ac_tool_prefix=$host_alias- - -test "$silent" = yes && exec 6>/dev/null - - -ac_pwd=`pwd` && test -n "$ac_pwd" && -ac_ls_di=`ls -di .` && -ac_pwd_ls_di=`cd "$ac_pwd" && ls -di .` || - as_fn_error $? "working directory cannot be determined" -test "X$ac_ls_di" = "X$ac_pwd_ls_di" || - as_fn_error $? "pwd does not report name of working directory" - - -# Find the source files, if location was not specified. -if test -z "$srcdir"; then - ac_srcdir_defaulted=yes - # Try the directory containing this script, then the parent directory. - ac_confdir=`$as_dirname -- "$as_myself" || -$as_expr X"$as_myself" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ - X"$as_myself" : 'X\(//\)[^/]' \| \ - X"$as_myself" : 'X\(//\)$' \| \ - X"$as_myself" : 'X\(/\)' \| . 2>/dev/null || -$as_echo X"$as_myself" | - sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ - s//\1/ - q - } - /^X\(\/\/\)[^/].*/{ - s//\1/ - q - } - /^X\(\/\/\)$/{ - s//\1/ - q - } - /^X\(\/\).*/{ - s//\1/ - q - } - s/.*/./; q'` - srcdir=$ac_confdir - if test ! -r "$srcdir/$ac_unique_file"; then - srcdir=.. - fi -else - ac_srcdir_defaulted=no -fi -if test ! -r "$srcdir/$ac_unique_file"; then - test "$ac_srcdir_defaulted" = yes && srcdir="$ac_confdir or .." - as_fn_error $? "cannot find sources ($ac_unique_file) in $srcdir" -fi -ac_msg="sources are in $srcdir, but \`cd $srcdir' does not work" -ac_abs_confdir=`( - cd "$srcdir" && test -r "./$ac_unique_file" || as_fn_error $? "$ac_msg" - pwd)` -# When building in place, set srcdir=. -if test "$ac_abs_confdir" = "$ac_pwd"; then - srcdir=. -fi -# Remove unnecessary trailing slashes from srcdir. -# Double slashes in file names in object file debugging info -# mess up M-x gdb in Emacs. -case $srcdir in -*/) srcdir=`expr "X$srcdir" : 'X\(.*[^/]\)' \| "X$srcdir" : 'X\(.*\)'`;; -esac -for ac_var in $ac_precious_vars; do - eval ac_env_${ac_var}_set=\${${ac_var}+set} - eval ac_env_${ac_var}_value=\$${ac_var} - eval ac_cv_env_${ac_var}_set=\${${ac_var}+set} - eval ac_cv_env_${ac_var}_value=\$${ac_var} -done - -# -# Report the --help message. -# -if test "$ac_init_help" = "long"; then - # Omit some internal or obsolete options to make the list less imposing. - # This message is too long to be a string in the A/UX 3.1 sh. - cat <<_ACEOF -\`configure' configures this package to adapt to many kinds of systems. - -Usage: $0 [OPTION]... [VAR=VALUE]... - -To assign environment variables (e.g., CC, CFLAGS...), specify them as -VAR=VALUE. See below for descriptions of some of the useful variables. - -Defaults for the options are specified in brackets. - -Configuration: - -h, --help display this help and exit - --help=short display options specific to this package - --help=recursive display the short help of all the included packages - -V, --version display version information and exit - -q, --quiet, --silent do not print \`checking ...' messages - --cache-file=FILE cache test results in FILE [disabled] - -C, --config-cache alias for \`--cache-file=config.cache' - -n, --no-create do not create output files - --srcdir=DIR find the sources in DIR [configure dir or \`..'] - -Installation directories: - --prefix=PREFIX install architecture-independent files in PREFIX - [$ac_default_prefix] - --exec-prefix=EPREFIX install architecture-dependent files in EPREFIX - [PREFIX] - -By default, \`make install' will install all the files in -\`$ac_default_prefix/bin', \`$ac_default_prefix/lib' etc. You can specify -an installation prefix other than \`$ac_default_prefix' using \`--prefix', -for instance \`--prefix=\$HOME'. - -For better control, use the options below. - -Fine tuning of the installation directories: - --bindir=DIR user executables [EPREFIX/bin] - --sbindir=DIR system admin executables [EPREFIX/sbin] - --libexecdir=DIR program executables [EPREFIX/libexec] - --sysconfdir=DIR read-only single-machine data [PREFIX/etc] - --sharedstatedir=DIR modifiable architecture-independent data [PREFIX/com] - --localstatedir=DIR modifiable single-machine data [PREFIX/var] - --libdir=DIR object code libraries [EPREFIX/lib] - --includedir=DIR C header files [PREFIX/include] - --oldincludedir=DIR C header files for non-gcc [/usr/include] - --datarootdir=DIR read-only arch.-independent data root [PREFIX/share] - --datadir=DIR read-only architecture-independent data [DATAROOTDIR] - --infodir=DIR info documentation [DATAROOTDIR/info] - --localedir=DIR locale-dependent data [DATAROOTDIR/locale] - --mandir=DIR man documentation [DATAROOTDIR/man] - --docdir=DIR documentation root [DATAROOTDIR/doc/PACKAGE] - --htmldir=DIR html documentation [DOCDIR] - --dvidir=DIR dvi documentation [DOCDIR] - --pdfdir=DIR pdf documentation [DOCDIR] - --psdir=DIR ps documentation [DOCDIR] -_ACEOF - - cat <<\_ACEOF - -System types: - --build=BUILD configure for building on BUILD [guessed] - --host=HOST cross-compile to build programs to run on HOST [BUILD] -_ACEOF -fi - -if test -n "$ac_init_help"; then - - cat <<\_ACEOF - -Optional Features: - --disable-option-checking ignore unrecognized --enable/--with options - --disable-FEATURE do not include FEATURE (same as --enable-FEATURE=no) - --enable-FEATURE[=ARG] include FEATURE [ARG=yes] - --enable-autogen Automatically regenerate configure output - --disable-experimental Disable support for the experimental API - --enable-cc-silence Silence irrelevant compiler warnings - --enable-debug Build debugging code (implies --enable-ivsalloc) - --enable-ivsalloc Validate pointers passed through the public API - --disable-stats Disable statistics calculation/reporting - --enable-prof Enable allocation profiling - --enable-prof-libunwind Use libunwind for backtracing - --disable-prof-libgcc Do not use libgcc for backtracing - --disable-prof-gcc Do not use gcc intrinsics for backtracing - --disable-tcache Disable per thread caches - --enable-mremap Enable mremap(2) for huge realloc() - --disable-munmap Disable VM deallocation via munmap(2) - --enable-dss Enable allocation from DSS - --disable-fill Disable support for junk/zero filling, quarantine, - and redzones - --enable-utrace Enable utrace(2)-based tracing - --disable-valgrind Disable support for Valgrind - --enable-xmalloc Support xmalloc option - --enable-lazy-lock Enable lazy locking (only lock when multi-threaded) - --disable-tls Disable thread-local storage (__thread keyword) - --disable-zone-allocator - Disable zone allocator for Darwin - -Optional Packages: - --with-PACKAGE[=ARG] use PACKAGE [ARG=yes] - --without-PACKAGE do not use PACKAGE (same as --with-PACKAGE=no) - --with-xslroot=<path> XSL stylesheet root path - --with-rpath=<rpath> Colon-separated rpath (ELF systems only) - --with-mangling=<map> Mangle symbols in <map> - --with-jemalloc-prefix=<prefix> - Prefix to prepend to all public APIs - --without-export disable exporting jemalloc public APIs - --with-private-namespace=<prefix> - Prefix to prepend to all library-private APIs - --with-install-suffix=<suffix> - Suffix to append to all installed files - --with-static-libunwind=<libunwind.a> - Path to static libunwind library; use rather than - dynamically linking - -Some influential environment variables: - CC C compiler command - CFLAGS C compiler flags - LDFLAGS linker flags, e.g. -L<lib dir> if you have libraries in a - nonstandard directory <lib dir> - LIBS libraries to pass to the linker, e.g. -l<library> - CPPFLAGS (Objective) C/C++ preprocessor flags, e.g. -I<include dir> if - you have headers in a nonstandard directory <include dir> - CPP C preprocessor - -Use these variables to override the choices made by `configure' or to help -it to find libraries and programs with nonstandard names/locations. - -Report bugs to the package provider. -_ACEOF -ac_status=$? -fi - -if test "$ac_init_help" = "recursive"; then - # If there are subdirs, report their specific --help. - for ac_dir in : $ac_subdirs_all; do test "x$ac_dir" = x: && continue - test -d "$ac_dir" || - { cd "$srcdir" && ac_pwd=`pwd` && srcdir=. && test -d "$ac_dir"; } || - continue - ac_builddir=. - -case "$ac_dir" in -.) ac_dir_suffix= ac_top_builddir_sub=. ac_top_build_prefix= ;; -*) - ac_dir_suffix=/`$as_echo "$ac_dir" | sed 's|^\.[\\/]||'` - # A ".." for each directory in $ac_dir_suffix. - ac_top_builddir_sub=`$as_echo "$ac_dir_suffix" | sed 's|/[^\\/]*|/..|g;s|/||'` - case $ac_top_builddir_sub in - "") ac_top_builddir_sub=. ac_top_build_prefix= ;; - *) ac_top_build_prefix=$ac_top_builddir_sub/ ;; - esac ;; -esac -ac_abs_top_builddir=$ac_pwd -ac_abs_builddir=$ac_pwd$ac_dir_suffix -# for backward compatibility: -ac_top_builddir=$ac_top_build_prefix - -case $srcdir in - .) # We are building in place. - ac_srcdir=. - ac_top_srcdir=$ac_top_builddir_sub - ac_abs_top_srcdir=$ac_pwd ;; - [\\/]* | ?:[\\/]* ) # Absolute name. - ac_srcdir=$srcdir$ac_dir_suffix; - ac_top_srcdir=$srcdir - ac_abs_top_srcdir=$srcdir ;; - *) # Relative name. - ac_srcdir=$ac_top_build_prefix$srcdir$ac_dir_suffix - ac_top_srcdir=$ac_top_build_prefix$srcdir - ac_abs_top_srcdir=$ac_pwd/$srcdir ;; -esac -ac_abs_srcdir=$ac_abs_top_srcdir$ac_dir_suffix - - cd "$ac_dir" || { ac_status=$?; continue; } - # Check for guested configure. - if test -f "$ac_srcdir/configure.gnu"; then - echo && - $SHELL "$ac_srcdir/configure.gnu" --help=recursive - elif test -f "$ac_srcdir/configure"; then - echo && - $SHELL "$ac_srcdir/configure" --help=recursive - else - $as_echo "$as_me: WARNING: no configuration information is in $ac_dir" >&2 - fi || ac_status=$? - cd "$ac_pwd" || { ac_status=$?; break; } - done -fi - -test -n "$ac_init_help" && exit $ac_status -if $ac_init_version; then - cat <<\_ACEOF -configure -generated by GNU Autoconf 2.68 - -Copyright (C) 2010 Free Software Foundation, Inc. -This configure script is free software; the Free Software Foundation -gives unlimited permission to copy, distribute and modify it. -_ACEOF - exit -fi - -## ------------------------ ## -## Autoconf initialization. ## -## ------------------------ ## - -# ac_fn_c_try_compile LINENO -# -------------------------- -# Try to compile conftest.$ac_ext, and return whether this succeeded. -ac_fn_c_try_compile () -{ - as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack - rm -f conftest.$ac_objext - if { { ac_try="$ac_compile" -case "(($ac_try" in - *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; - *) ac_try_echo=$ac_try;; -esac -eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" -$as_echo "$ac_try_echo"; } >&5 - (eval "$ac_compile") 2>conftest.err - ac_status=$? - if test -s conftest.err; then - grep -v '^ *+' conftest.err >conftest.er1 - cat conftest.er1 >&5 - mv -f conftest.er1 conftest.err - fi - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 - test $ac_status = 0; } && { - test -z "$ac_c_werror_flag" || - test ! -s conftest.err - } && test -s conftest.$ac_objext; then : - ac_retval=0 -else - $as_echo "$as_me: failed program was:" >&5 -sed 's/^/| /' conftest.$ac_ext >&5 - - ac_retval=1 -fi - eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno - as_fn_set_status $ac_retval - -} # ac_fn_c_try_compile - -# ac_fn_c_try_cpp LINENO -# ---------------------- -# Try to preprocess conftest.$ac_ext, and return whether this succeeded. -ac_fn_c_try_cpp () -{ - as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack - if { { ac_try="$ac_cpp conftest.$ac_ext" -case "(($ac_try" in - *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; - *) ac_try_echo=$ac_try;; -esac -eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" -$as_echo "$ac_try_echo"; } >&5 - (eval "$ac_cpp conftest.$ac_ext") 2>conftest.err - ac_status=$? - if test -s conftest.err; then - grep -v '^ *+' conftest.err >conftest.er1 - cat conftest.er1 >&5 - mv -f conftest.er1 conftest.err - fi - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 - test $ac_status = 0; } > conftest.i && { - test -z "$ac_c_preproc_warn_flag$ac_c_werror_flag" || - test ! -s conftest.err - }; then : - ac_retval=0 -else - $as_echo "$as_me: failed program was:" >&5 -sed 's/^/| /' conftest.$ac_ext >&5 - - ac_retval=1 -fi - eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno - as_fn_set_status $ac_retval - -} # ac_fn_c_try_cpp - -# ac_fn_c_try_run LINENO -# ---------------------- -# Try to link conftest.$ac_ext, and return whether this succeeded. Assumes -# that executables *can* be run. -ac_fn_c_try_run () -{ - as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack - if { { ac_try="$ac_link" -case "(($ac_try" in - *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; - *) ac_try_echo=$ac_try;; -esac -eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" -$as_echo "$ac_try_echo"; } >&5 - (eval "$ac_link") 2>&5 - ac_status=$? - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 - test $ac_status = 0; } && { ac_try='./conftest$ac_exeext' - { { case "(($ac_try" in - *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; - *) ac_try_echo=$ac_try;; -esac -eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" -$as_echo "$ac_try_echo"; } >&5 - (eval "$ac_try") 2>&5 - ac_status=$? - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 - test $ac_status = 0; }; }; then : - ac_retval=0 -else - $as_echo "$as_me: program exited with status $ac_status" >&5 - $as_echo "$as_me: failed program was:" >&5 -sed 's/^/| /' conftest.$ac_ext >&5 - - ac_retval=$ac_status -fi - rm -rf conftest.dSYM conftest_ipa8_conftest.oo - eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno - as_fn_set_status $ac_retval - -} # ac_fn_c_try_run - -# ac_fn_c_compute_int LINENO EXPR VAR INCLUDES -# -------------------------------------------- -# Tries to find the compile-time value of EXPR in a program that includes -# INCLUDES, setting VAR accordingly. Returns whether the value could be -# computed -ac_fn_c_compute_int () -{ - as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack - if test "$cross_compiling" = yes; then - # Depending upon the size, compute the lo and hi bounds. -cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -$4 -int -main () -{ -static int test_array [1 - 2 * !(($2) >= 0)]; -test_array [0] = 0 - - ; - return 0; -} -_ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - ac_lo=0 ac_mid=0 - while :; do - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -$4 -int -main () -{ -static int test_array [1 - 2 * !(($2) <= $ac_mid)]; -test_array [0] = 0 - - ; - return 0; -} -_ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - ac_hi=$ac_mid; break -else - as_fn_arith $ac_mid + 1 && ac_lo=$as_val - if test $ac_lo -le $ac_mid; then - ac_lo= ac_hi= - break - fi - as_fn_arith 2 '*' $ac_mid + 1 && ac_mid=$as_val -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext - done -else - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -$4 -int -main () -{ -static int test_array [1 - 2 * !(($2) < 0)]; -test_array [0] = 0 - - ; - return 0; -} -_ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - ac_hi=-1 ac_mid=-1 - while :; do - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -$4 -int -main () -{ -static int test_array [1 - 2 * !(($2) >= $ac_mid)]; -test_array [0] = 0 - - ; - return 0; -} -_ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - ac_lo=$ac_mid; break -else - as_fn_arith '(' $ac_mid ')' - 1 && ac_hi=$as_val - if test $ac_mid -le $ac_hi; then - ac_lo= ac_hi= - break - fi - as_fn_arith 2 '*' $ac_mid && ac_mid=$as_val -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext - done -else - ac_lo= ac_hi= -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext -# Binary search between lo and hi bounds. -while test "x$ac_lo" != "x$ac_hi"; do - as_fn_arith '(' $ac_hi - $ac_lo ')' / 2 + $ac_lo && ac_mid=$as_val - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -$4 -int -main () -{ -static int test_array [1 - 2 * !(($2) <= $ac_mid)]; -test_array [0] = 0 - - ; - return 0; -} -_ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - ac_hi=$ac_mid -else - as_fn_arith '(' $ac_mid ')' + 1 && ac_lo=$as_val -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext -done -case $ac_lo in #(( -?*) eval "$3=\$ac_lo"; ac_retval=0 ;; -'') ac_retval=1 ;; -esac - else - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -$4 -static long int longval () { return $2; } -static unsigned long int ulongval () { return $2; } -#include <stdio.h> -#include <stdlib.h> -int -main () -{ - - FILE *f = fopen ("conftest.val", "w"); - if (! f) - return 1; - if (($2) < 0) - { - long int i = longval (); - if (i != ($2)) - return 1; - fprintf (f, "%ld", i); - } - else - { - unsigned long int i = ulongval (); - if (i != ($2)) - return 1; - fprintf (f, "%lu", i); - } - /* Do not output a trailing newline, as this causes \r\n confusion - on some platforms. */ - return ferror (f) || fclose (f) != 0; - - ; - return 0; -} -_ACEOF -if ac_fn_c_try_run "$LINENO"; then : - echo >>conftest.val; read $3 <conftest.val; ac_retval=0 -else - ac_retval=1 -fi -rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \ - conftest.$ac_objext conftest.beam conftest.$ac_ext -rm -f conftest.val - - fi - eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno - as_fn_set_status $ac_retval - -} # ac_fn_c_compute_int - -# ac_fn_c_check_header_compile LINENO HEADER VAR INCLUDES -# ------------------------------------------------------- -# Tests whether HEADER exists and can be compiled using the include files in -# INCLUDES, setting the cache variable VAR accordingly. -ac_fn_c_check_header_compile () -{ - as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 -$as_echo_n "checking for $2... " >&6; } -if eval \${$3+:} false; then : - $as_echo_n "(cached) " >&6 -else - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -$4 -#include <$2> -_ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - eval "$3=yes" -else - eval "$3=no" -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext -fi -eval ac_res=\$$3 - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 -$as_echo "$ac_res" >&6; } - eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno - -} # ac_fn_c_check_header_compile - -# ac_fn_c_try_link LINENO -# ----------------------- -# Try to link conftest.$ac_ext, and return whether this succeeded. -ac_fn_c_try_link () -{ - as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack - rm -f conftest.$ac_objext conftest$ac_exeext - if { { ac_try="$ac_link" -case "(($ac_try" in - *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; - *) ac_try_echo=$ac_try;; -esac -eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" -$as_echo "$ac_try_echo"; } >&5 - (eval "$ac_link") 2>conftest.err - ac_status=$? - if test -s conftest.err; then - grep -v '^ *+' conftest.err >conftest.er1 - cat conftest.er1 >&5 - mv -f conftest.er1 conftest.err - fi - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 - test $ac_status = 0; } && { - test -z "$ac_c_werror_flag" || - test ! -s conftest.err - } && test -s conftest$ac_exeext && { - test "$cross_compiling" = yes || - $as_test_x conftest$ac_exeext - }; then : - ac_retval=0 -else - $as_echo "$as_me: failed program was:" >&5 -sed 's/^/| /' conftest.$ac_ext >&5 - - ac_retval=1 -fi - # Delete the IPA/IPO (Inter Procedural Analysis/Optimization) information - # created by the PGI compiler (conftest_ipa8_conftest.oo), as it would - # interfere with the next link command; also delete a directory that is - # left behind by Apple's compiler. We do this before executing the actions. - rm -rf conftest.dSYM conftest_ipa8_conftest.oo - eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno - as_fn_set_status $ac_retval - -} # ac_fn_c_try_link - -# ac_fn_c_check_func LINENO FUNC VAR -# ---------------------------------- -# Tests whether FUNC exists, setting the cache variable VAR accordingly -ac_fn_c_check_func () -{ - as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 -$as_echo_n "checking for $2... " >&6; } -if eval \${$3+:} false; then : - $as_echo_n "(cached) " >&6 -else - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -/* Define $2 to an innocuous variant, in case <limits.h> declares $2. - For example, HP-UX 11i <limits.h> declares gettimeofday. */ -#define $2 innocuous_$2 - -/* System header to define __stub macros and hopefully few prototypes, - which can conflict with char $2 (); below. - Prefer <limits.h> to <assert.h> if __STDC__ is defined, since - <limits.h> exists even on freestanding compilers. */ - -#ifdef __STDC__ -# include <limits.h> -#else -# include <assert.h> -#endif - -#undef $2 - -/* Override any GCC internal prototype to avoid an error. - Use char because int might match the return type of a GCC - builtin and then its argument prototype would still apply. */ -#ifdef __cplusplus -extern "C" -#endif -char $2 (); -/* The GNU C library defines this for functions which it implements - to always fail with ENOSYS. Some functions are actually named - something starting with __ and the normal name is an alias. */ -#if defined __stub_$2 || defined __stub___$2 -choke me -#endif - -int -main () -{ -return $2 (); - ; - return 0; -} -_ACEOF -if ac_fn_c_try_link "$LINENO"; then : - eval "$3=yes" -else - eval "$3=no" -fi -rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext conftest.$ac_ext -fi -eval ac_res=\$$3 - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 -$as_echo "$ac_res" >&6; } - eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno - -} # ac_fn_c_check_func - -# ac_fn_c_check_header_mongrel LINENO HEADER VAR INCLUDES -# ------------------------------------------------------- -# Tests whether HEADER exists, giving a warning if it cannot be compiled using -# the include files in INCLUDES and setting the cache variable VAR -# accordingly. -ac_fn_c_check_header_mongrel () -{ - as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack - if eval \${$3+:} false; then : - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 -$as_echo_n "checking for $2... " >&6; } -if eval \${$3+:} false; then : - $as_echo_n "(cached) " >&6 -fi -eval ac_res=\$$3 - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 -$as_echo "$ac_res" >&6; } -else - # Is the header compilable? -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking $2 usability" >&5 -$as_echo_n "checking $2 usability... " >&6; } -cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -$4 -#include <$2> -_ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - ac_header_compiler=yes -else - ac_header_compiler=no -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_header_compiler" >&5 -$as_echo "$ac_header_compiler" >&6; } - -# Is the header present? -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking $2 presence" >&5 -$as_echo_n "checking $2 presence... " >&6; } -cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -#include <$2> -_ACEOF -if ac_fn_c_try_cpp "$LINENO"; then : - ac_header_preproc=yes -else - ac_header_preproc=no -fi -rm -f conftest.err conftest.i conftest.$ac_ext -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_header_preproc" >&5 -$as_echo "$ac_header_preproc" >&6; } - -# So? What about this header? -case $ac_header_compiler:$ac_header_preproc:$ac_c_preproc_warn_flag in #(( - yes:no: ) - { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: accepted by the compiler, rejected by the preprocessor!" >&5 -$as_echo "$as_me: WARNING: $2: accepted by the compiler, rejected by the preprocessor!" >&2;} - { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: proceeding with the compiler's result" >&5 -$as_echo "$as_me: WARNING: $2: proceeding with the compiler's result" >&2;} - ;; - no:yes:* ) - { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: present but cannot be compiled" >&5 -$as_echo "$as_me: WARNING: $2: present but cannot be compiled" >&2;} - { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: check for missing prerequisite headers?" >&5 -$as_echo "$as_me: WARNING: $2: check for missing prerequisite headers?" >&2;} - { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: see the Autoconf documentation" >&5 -$as_echo "$as_me: WARNING: $2: see the Autoconf documentation" >&2;} - { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: section \"Present But Cannot Be Compiled\"" >&5 -$as_echo "$as_me: WARNING: $2: section \"Present But Cannot Be Compiled\"" >&2;} - { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: proceeding with the compiler's result" >&5 -$as_echo "$as_me: WARNING: $2: proceeding with the compiler's result" >&2;} - ;; -esac - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 -$as_echo_n "checking for $2... " >&6; } -if eval \${$3+:} false; then : - $as_echo_n "(cached) " >&6 -else - eval "$3=\$ac_header_compiler" -fi -eval ac_res=\$$3 - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 -$as_echo "$ac_res" >&6; } -fi - eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno - -} # ac_fn_c_check_header_mongrel - -# ac_fn_c_check_type LINENO TYPE VAR INCLUDES -# ------------------------------------------- -# Tests whether TYPE exists after having included INCLUDES, setting cache -# variable VAR accordingly. -ac_fn_c_check_type () -{ - as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 -$as_echo_n "checking for $2... " >&6; } -if eval \${$3+:} false; then : - $as_echo_n "(cached) " >&6 -else - eval "$3=no" - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -$4 -int -main () -{ -if (sizeof ($2)) - return 0; - ; - return 0; -} -_ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -$4 -int -main () -{ -if (sizeof (($2))) - return 0; - ; - return 0; -} -_ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - -else - eval "$3=yes" -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext -fi -eval ac_res=\$$3 - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 -$as_echo "$ac_res" >&6; } - eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno - -} # ac_fn_c_check_type -cat >config.log <<_ACEOF -This file contains any messages produced by compilers while -running configure, to aid debugging if configure makes a mistake. - -It was created by $as_me, which was -generated by GNU Autoconf 2.68. Invocation command line was - - $ $0 $@ - -_ACEOF -exec 5>>config.log -{ -cat <<_ASUNAME -## --------- ## -## Platform. ## -## --------- ## - -hostname = `(hostname || uname -n) 2>/dev/null | sed 1q` -uname -m = `(uname -m) 2>/dev/null || echo unknown` -uname -r = `(uname -r) 2>/dev/null || echo unknown` -uname -s = `(uname -s) 2>/dev/null || echo unknown` -uname -v = `(uname -v) 2>/dev/null || echo unknown` - -/usr/bin/uname -p = `(/usr/bin/uname -p) 2>/dev/null || echo unknown` -/bin/uname -X = `(/bin/uname -X) 2>/dev/null || echo unknown` - -/bin/arch = `(/bin/arch) 2>/dev/null || echo unknown` -/usr/bin/arch -k = `(/usr/bin/arch -k) 2>/dev/null || echo unknown` -/usr/convex/getsysinfo = `(/usr/convex/getsysinfo) 2>/dev/null || echo unknown` -/usr/bin/hostinfo = `(/usr/bin/hostinfo) 2>/dev/null || echo unknown` -/bin/machine = `(/bin/machine) 2>/dev/null || echo unknown` -/usr/bin/oslevel = `(/usr/bin/oslevel) 2>/dev/null || echo unknown` -/bin/universe = `(/bin/universe) 2>/dev/null || echo unknown` - -_ASUNAME - -as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - $as_echo "PATH: $as_dir" - done -IFS=$as_save_IFS - -} >&5 - -cat >&5 <<_ACEOF - - -## ----------- ## -## Core tests. ## -## ----------- ## - -_ACEOF - - -# Keep a trace of the command line. -# Strip out --no-create and --no-recursion so they do not pile up. -# Strip out --silent because we don't want to record it for future runs. -# Also quote any args containing shell meta-characters. -# Make two passes to allow for proper duplicate-argument suppression. -ac_configure_args= -ac_configure_args0= -ac_configure_args1= -ac_must_keep_next=false -for ac_pass in 1 2 -do - for ac_arg - do - case $ac_arg in - -no-create | --no-c* | -n | -no-recursion | --no-r*) continue ;; - -q | -quiet | --quiet | --quie | --qui | --qu | --q \ - | -silent | --silent | --silen | --sile | --sil) - continue ;; - *\'*) - ac_arg=`$as_echo "$ac_arg" | sed "s/'/'\\\\\\\\''/g"` ;; - esac - case $ac_pass in - 1) as_fn_append ac_configure_args0 " '$ac_arg'" ;; - 2) - as_fn_append ac_configure_args1 " '$ac_arg'" - if test $ac_must_keep_next = true; then - ac_must_keep_next=false # Got value, back to normal. - else - case $ac_arg in - *=* | --config-cache | -C | -disable-* | --disable-* \ - | -enable-* | --enable-* | -gas | --g* | -nfp | --nf* \ - | -q | -quiet | --q* | -silent | --sil* | -v | -verb* \ - | -with-* | --with-* | -without-* | --without-* | --x) - case "$ac_configure_args0 " in - "$ac_configure_args1"*" '$ac_arg' "* ) continue ;; - esac - ;; - -* ) ac_must_keep_next=true ;; - esac - fi - as_fn_append ac_configure_args " '$ac_arg'" - ;; - esac - done -done -{ ac_configure_args0=; unset ac_configure_args0;} -{ ac_configure_args1=; unset ac_configure_args1;} - -# When interrupted or exit'd, cleanup temporary files, and complete -# config.log. We remove comments because anyway the quotes in there -# would cause problems or look ugly. -# WARNING: Use '\'' to represent an apostrophe within the trap. -# WARNING: Do not start the trap code with a newline, due to a FreeBSD 4.0 bug. -trap 'exit_status=$? - # Save into config.log some information that might help in debugging. - { - echo - - $as_echo "## ---------------- ## -## Cache variables. ## -## ---------------- ##" - echo - # The following way of writing the cache mishandles newlines in values, -( - for ac_var in `(set) 2>&1 | sed -n '\''s/^\([a-zA-Z_][a-zA-Z0-9_]*\)=.*/\1/p'\''`; do - eval ac_val=\$$ac_var - case $ac_val in #( - *${as_nl}*) - case $ac_var in #( - *_cv_*) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: cache variable $ac_var contains a newline" >&5 -$as_echo "$as_me: WARNING: cache variable $ac_var contains a newline" >&2;} ;; - esac - case $ac_var in #( - _ | IFS | as_nl) ;; #( - BASH_ARGV | BASH_SOURCE) eval $ac_var= ;; #( - *) { eval $ac_var=; unset $ac_var;} ;; - esac ;; - esac - done - (set) 2>&1 | - case $as_nl`(ac_space='\'' '\''; set) 2>&1` in #( - *${as_nl}ac_space=\ *) - sed -n \ - "s/'\''/'\''\\\\'\'''\''/g; - s/^\\([_$as_cr_alnum]*_cv_[_$as_cr_alnum]*\\)=\\(.*\\)/\\1='\''\\2'\''/p" - ;; #( - *) - sed -n "/^[_$as_cr_alnum]*_cv_[_$as_cr_alnum]*=/p" - ;; - esac | - sort -) - echo - - $as_echo "## ----------------- ## -## Output variables. ## -## ----------------- ##" - echo - for ac_var in $ac_subst_vars - do - eval ac_val=\$$ac_var - case $ac_val in - *\'\''*) ac_val=`$as_echo "$ac_val" | sed "s/'\''/'\''\\\\\\\\'\'''\''/g"`;; - esac - $as_echo "$ac_var='\''$ac_val'\''" - done | sort - echo - - if test -n "$ac_subst_files"; then - $as_echo "## ------------------- ## -## File substitutions. ## -## ------------------- ##" - echo - for ac_var in $ac_subst_files - do - eval ac_val=\$$ac_var - case $ac_val in - *\'\''*) ac_val=`$as_echo "$ac_val" | sed "s/'\''/'\''\\\\\\\\'\'''\''/g"`;; - esac - $as_echo "$ac_var='\''$ac_val'\''" - done | sort - echo - fi - - if test -s confdefs.h; then - $as_echo "## ----------- ## -## confdefs.h. ## -## ----------- ##" - echo - cat confdefs.h - echo - fi - test "$ac_signal" != 0 && - $as_echo "$as_me: caught signal $ac_signal" - $as_echo "$as_me: exit $exit_status" - } >&5 - rm -f core *.core core.conftest.* && - rm -f -r conftest* confdefs* conf$$* $ac_clean_files && - exit $exit_status -' 0 -for ac_signal in 1 2 13 15; do - trap 'ac_signal='$ac_signal'; as_fn_exit 1' $ac_signal -done -ac_signal=0 - -# confdefs.h avoids OS command line length limits that DEFS can exceed. -rm -f -r conftest* confdefs.h - -$as_echo "/* confdefs.h */" > confdefs.h - -# Predefined preprocessor variables. - -cat >>confdefs.h <<_ACEOF -#define PACKAGE_NAME "$PACKAGE_NAME" -_ACEOF - -cat >>confdefs.h <<_ACEOF -#define PACKAGE_TARNAME "$PACKAGE_TARNAME" -_ACEOF - -cat >>confdefs.h <<_ACEOF -#define PACKAGE_VERSION "$PACKAGE_VERSION" -_ACEOF - -cat >>confdefs.h <<_ACEOF -#define PACKAGE_STRING "$PACKAGE_STRING" -_ACEOF - -cat >>confdefs.h <<_ACEOF -#define PACKAGE_BUGREPORT "$PACKAGE_BUGREPORT" -_ACEOF - -cat >>confdefs.h <<_ACEOF -#define PACKAGE_URL "$PACKAGE_URL" -_ACEOF - - -# Let the site file select an alternate cache file if it wants to. -# Prefer an explicitly selected file to automatically selected ones. -ac_site_file1=NONE -ac_site_file2=NONE -if test -n "$CONFIG_SITE"; then - # We do not want a PATH search for config.site. - case $CONFIG_SITE in #(( - -*) ac_site_file1=./$CONFIG_SITE;; - */*) ac_site_file1=$CONFIG_SITE;; - *) ac_site_file1=./$CONFIG_SITE;; - esac -elif test "x$prefix" != xNONE; then - ac_site_file1=$prefix/share/config.site - ac_site_file2=$prefix/etc/config.site -else - ac_site_file1=$ac_default_prefix/share/config.site - ac_site_file2=$ac_default_prefix/etc/config.site -fi -for ac_site_file in "$ac_site_file1" "$ac_site_file2" -do - test "x$ac_site_file" = xNONE && continue - if test /dev/null != "$ac_site_file" && test -r "$ac_site_file"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: loading site script $ac_site_file" >&5 -$as_echo "$as_me: loading site script $ac_site_file" >&6;} - sed 's/^/| /' "$ac_site_file" >&5 - . "$ac_site_file" \ - || { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 -$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} -as_fn_error $? "failed to load site script $ac_site_file -See \`config.log' for more details" "$LINENO" 5; } - fi -done - -if test -r "$cache_file"; then - # Some versions of bash will fail to source /dev/null (special files - # actually), so we avoid doing that. DJGPP emulates it as a regular file. - if test /dev/null != "$cache_file" && test -f "$cache_file"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: loading cache $cache_file" >&5 -$as_echo "$as_me: loading cache $cache_file" >&6;} - case $cache_file in - [\\/]* | ?:[\\/]* ) . "$cache_file";; - *) . "./$cache_file";; - esac - fi -else - { $as_echo "$as_me:${as_lineno-$LINENO}: creating cache $cache_file" >&5 -$as_echo "$as_me: creating cache $cache_file" >&6;} - >$cache_file -fi - -# Check that the precious variables saved in the cache have kept the same -# value. -ac_cache_corrupted=false -for ac_var in $ac_precious_vars; do - eval ac_old_set=\$ac_cv_env_${ac_var}_set - eval ac_new_set=\$ac_env_${ac_var}_set - eval ac_old_val=\$ac_cv_env_${ac_var}_value - eval ac_new_val=\$ac_env_${ac_var}_value - case $ac_old_set,$ac_new_set in - set,) - { $as_echo "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' was set to \`$ac_old_val' in the previous run" >&5 -$as_echo "$as_me: error: \`$ac_var' was set to \`$ac_old_val' in the previous run" >&2;} - ac_cache_corrupted=: ;; - ,set) - { $as_echo "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' was not set in the previous run" >&5 -$as_echo "$as_me: error: \`$ac_var' was not set in the previous run" >&2;} - ac_cache_corrupted=: ;; - ,);; - *) - if test "x$ac_old_val" != "x$ac_new_val"; then - # differences in whitespace do not lead to failure. - ac_old_val_w=`echo x $ac_old_val` - ac_new_val_w=`echo x $ac_new_val` - if test "$ac_old_val_w" != "$ac_new_val_w"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' has changed since the previous run:" >&5 -$as_echo "$as_me: error: \`$ac_var' has changed since the previous run:" >&2;} - ac_cache_corrupted=: - else - { $as_echo "$as_me:${as_lineno-$LINENO}: warning: ignoring whitespace changes in \`$ac_var' since the previous run:" >&5 -$as_echo "$as_me: warning: ignoring whitespace changes in \`$ac_var' since the previous run:" >&2;} - eval $ac_var=\$ac_old_val - fi - { $as_echo "$as_me:${as_lineno-$LINENO}: former value: \`$ac_old_val'" >&5 -$as_echo "$as_me: former value: \`$ac_old_val'" >&2;} - { $as_echo "$as_me:${as_lineno-$LINENO}: current value: \`$ac_new_val'" >&5 -$as_echo "$as_me: current value: \`$ac_new_val'" >&2;} - fi;; - esac - # Pass precious variables to config.status. - if test "$ac_new_set" = set; then - case $ac_new_val in - *\'*) ac_arg=$ac_var=`$as_echo "$ac_new_val" | sed "s/'/'\\\\\\\\''/g"` ;; - *) ac_arg=$ac_var=$ac_new_val ;; - esac - case " $ac_configure_args " in - *" '$ac_arg' "*) ;; # Avoid dups. Use of quotes ensures accuracy. - *) as_fn_append ac_configure_args " '$ac_arg'" ;; - esac - fi -done -if $ac_cache_corrupted; then - { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 -$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} - { $as_echo "$as_me:${as_lineno-$LINENO}: error: changes in the environment can compromise the build" >&5 -$as_echo "$as_me: error: changes in the environment can compromise the build" >&2;} - as_fn_error $? "run \`make distclean' and/or \`rm $cache_file' and start over" "$LINENO" 5 -fi -## -------------------- ## -## Main body of script. ## -## -------------------- ## - -ac_ext=c -ac_cpp='$CPP $CPPFLAGS' -ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' -ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' -ac_compiler_gnu=$ac_cv_c_compiler_gnu - - - - - - - - - -rev=1 - - -srcroot=$srcdir -if test "x${srcroot}" = "x." ; then - srcroot="" -else - srcroot="${srcroot}/" -fi - -abs_srcroot="`cd \"${srcdir}\"; pwd`/" - - -objroot="" - -abs_objroot="`pwd`/" - - -if test "x$prefix" = "xNONE" ; then - prefix="/usr/local" -fi -if test "x$exec_prefix" = "xNONE" ; then - exec_prefix=$prefix -fi -PREFIX=$prefix - -BINDIR=`eval echo $bindir` -BINDIR=`eval echo $BINDIR` - -INCLUDEDIR=`eval echo $includedir` -INCLUDEDIR=`eval echo $INCLUDEDIR` - -LIBDIR=`eval echo $libdir` -LIBDIR=`eval echo $LIBDIR` - -DATADIR=`eval echo $datadir` -DATADIR=`eval echo $DATADIR` - -MANDIR=`eval echo $mandir` -MANDIR=`eval echo $MANDIR` - - -# Extract the first word of "xsltproc", so it can be a program name with args. -set dummy xsltproc; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if ${ac_cv_path_XSLTPROC+:} false; then : - $as_echo_n "(cached) " >&6 -else - case $XSLTPROC in - [\\/]* | ?:[\\/]*) - ac_cv_path_XSLTPROC="$XSLTPROC" # Let the user override the test with a path. - ;; - *) - as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then - ac_cv_path_XSLTPROC="$as_dir/$ac_word$ac_exec_ext" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 - fi -done - done -IFS=$as_save_IFS - - test -z "$ac_cv_path_XSLTPROC" && ac_cv_path_XSLTPROC="false" - ;; -esac -fi -XSLTPROC=$ac_cv_path_XSLTPROC -if test -n "$XSLTPROC"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $XSLTPROC" >&5 -$as_echo "$XSLTPROC" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -fi - - -if test -d "/usr/share/xml/docbook/stylesheet/docbook-xsl" ; then - DEFAULT_XSLROOT="/usr/share/xml/docbook/stylesheet/docbook-xsl" -elif test -d "/usr/share/sgml/docbook/xsl-stylesheets" ; then - DEFAULT_XSLROOT="/usr/share/sgml/docbook/xsl-stylesheets" -else - DEFAULT_XSLROOT="" -fi - -# Check whether --with-xslroot was given. -if test "${with_xslroot+set}" = set; then : - withval=$with_xslroot; -if test "x$with_xslroot" = "xno" ; then - XSLROOT="${DEFAULT_XSLROOT}" -else - XSLROOT="${with_xslroot}" -fi - -else - XSLROOT="${DEFAULT_XSLROOT}" - -fi - - - -CFLAGS=$CFLAGS -ac_ext=c -ac_cpp='$CPP $CPPFLAGS' -ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' -ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' -ac_compiler_gnu=$ac_cv_c_compiler_gnu -if test -n "$ac_tool_prefix"; then - # Extract the first word of "${ac_tool_prefix}gcc", so it can be a program name with args. -set dummy ${ac_tool_prefix}gcc; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if ${ac_cv_prog_CC+:} false; then : - $as_echo_n "(cached) " >&6 -else - if test -n "$CC"; then - ac_cv_prog_CC="$CC" # Let the user override the test. -else -as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then - ac_cv_prog_CC="${ac_tool_prefix}gcc" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 - fi -done - done -IFS=$as_save_IFS - -fi -fi -CC=$ac_cv_prog_CC -if test -n "$CC"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 -$as_echo "$CC" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -fi - - -fi -if test -z "$ac_cv_prog_CC"; then - ac_ct_CC=$CC - # Extract the first word of "gcc", so it can be a program name with args. -set dummy gcc; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if ${ac_cv_prog_ac_ct_CC+:} false; then : - $as_echo_n "(cached) " >&6 -else - if test -n "$ac_ct_CC"; then - ac_cv_prog_ac_ct_CC="$ac_ct_CC" # Let the user override the test. -else -as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then - ac_cv_prog_ac_ct_CC="gcc" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 - fi -done - done -IFS=$as_save_IFS - -fi -fi -ac_ct_CC=$ac_cv_prog_ac_ct_CC -if test -n "$ac_ct_CC"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CC" >&5 -$as_echo "$ac_ct_CC" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -fi - - if test "x$ac_ct_CC" = x; then - CC="" - else - case $cross_compiling:$ac_tool_warned in -yes:) -{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 -$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} -ac_tool_warned=yes ;; -esac - CC=$ac_ct_CC - fi -else - CC="$ac_cv_prog_CC" -fi - -if test -z "$CC"; then - if test -n "$ac_tool_prefix"; then - # Extract the first word of "${ac_tool_prefix}cc", so it can be a program name with args. -set dummy ${ac_tool_prefix}cc; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if ${ac_cv_prog_CC+:} false; then : - $as_echo_n "(cached) " >&6 -else - if test -n "$CC"; then - ac_cv_prog_CC="$CC" # Let the user override the test. -else -as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then - ac_cv_prog_CC="${ac_tool_prefix}cc" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 - fi -done - done -IFS=$as_save_IFS - -fi -fi -CC=$ac_cv_prog_CC -if test -n "$CC"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 -$as_echo "$CC" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -fi - - - fi -fi -if test -z "$CC"; then - # Extract the first word of "cc", so it can be a program name with args. -set dummy cc; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if ${ac_cv_prog_CC+:} false; then : - $as_echo_n "(cached) " >&6 -else - if test -n "$CC"; then - ac_cv_prog_CC="$CC" # Let the user override the test. -else - ac_prog_rejected=no -as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then - if test "$as_dir/$ac_word$ac_exec_ext" = "/usr/ucb/cc"; then - ac_prog_rejected=yes - continue - fi - ac_cv_prog_CC="cc" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 - fi -done - done -IFS=$as_save_IFS - -if test $ac_prog_rejected = yes; then - # We found a bogon in the path, so make sure we never use it. - set dummy $ac_cv_prog_CC - shift - if test $# != 0; then - # We chose a different compiler from the bogus one. - # However, it has the same basename, so the bogon will be chosen - # first if we set CC to just the basename; use the full file name. - shift - ac_cv_prog_CC="$as_dir/$ac_word${1+' '}$@" - fi -fi -fi -fi -CC=$ac_cv_prog_CC -if test -n "$CC"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 -$as_echo "$CC" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -fi - - -fi -if test -z "$CC"; then - if test -n "$ac_tool_prefix"; then - for ac_prog in cl.exe - do - # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args. -set dummy $ac_tool_prefix$ac_prog; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if ${ac_cv_prog_CC+:} false; then : - $as_echo_n "(cached) " >&6 -else - if test -n "$CC"; then - ac_cv_prog_CC="$CC" # Let the user override the test. -else -as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then - ac_cv_prog_CC="$ac_tool_prefix$ac_prog" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 - fi -done - done -IFS=$as_save_IFS - -fi -fi -CC=$ac_cv_prog_CC -if test -n "$CC"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 -$as_echo "$CC" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -fi - - - test -n "$CC" && break - done -fi -if test -z "$CC"; then - ac_ct_CC=$CC - for ac_prog in cl.exe -do - # Extract the first word of "$ac_prog", so it can be a program name with args. -set dummy $ac_prog; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if ${ac_cv_prog_ac_ct_CC+:} false; then : - $as_echo_n "(cached) " >&6 -else - if test -n "$ac_ct_CC"; then - ac_cv_prog_ac_ct_CC="$ac_ct_CC" # Let the user override the test. -else -as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then - ac_cv_prog_ac_ct_CC="$ac_prog" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 - fi -done - done -IFS=$as_save_IFS - -fi -fi -ac_ct_CC=$ac_cv_prog_ac_ct_CC -if test -n "$ac_ct_CC"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CC" >&5 -$as_echo "$ac_ct_CC" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -fi - - - test -n "$ac_ct_CC" && break -done - - if test "x$ac_ct_CC" = x; then - CC="" - else - case $cross_compiling:$ac_tool_warned in -yes:) -{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 -$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} -ac_tool_warned=yes ;; -esac - CC=$ac_ct_CC - fi -fi - -fi - - -test -z "$CC" && { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 -$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} -as_fn_error $? "no acceptable C compiler found in \$PATH -See \`config.log' for more details" "$LINENO" 5; } - -# Provide some information about the compiler. -$as_echo "$as_me:${as_lineno-$LINENO}: checking for C compiler version" >&5 -set X $ac_compile -ac_compiler=$2 -for ac_option in --version -v -V -qversion; do - { { ac_try="$ac_compiler $ac_option >&5" -case "(($ac_try" in - *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; - *) ac_try_echo=$ac_try;; -esac -eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" -$as_echo "$ac_try_echo"; } >&5 - (eval "$ac_compiler $ac_option >&5") 2>conftest.err - ac_status=$? - if test -s conftest.err; then - sed '10a\ -... rest of stderr output deleted ... - 10q' conftest.err >conftest.er1 - cat conftest.er1 >&5 - fi - rm -f conftest.er1 conftest.err - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 - test $ac_status = 0; } -done - -cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ - -int -main () -{ - - ; - return 0; -} -_ACEOF -ac_clean_files_save=$ac_clean_files -ac_clean_files="$ac_clean_files a.out a.out.dSYM a.exe b.out" -# Try to create an executable without -o first, disregard a.out. -# It will help us diagnose broken compilers, and finding out an intuition -# of exeext. -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the C compiler works" >&5 -$as_echo_n "checking whether the C compiler works... " >&6; } -ac_link_default=`$as_echo "$ac_link" | sed 's/ -o *conftest[^ ]*//'` - -# The possible output files: -ac_files="a.out conftest.exe conftest a.exe a_out.exe b.out conftest.*" - -ac_rmfiles= -for ac_file in $ac_files -do - case $ac_file in - *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM | *.o | *.obj ) ;; - * ) ac_rmfiles="$ac_rmfiles $ac_file";; - esac -done -rm -f $ac_rmfiles - -if { { ac_try="$ac_link_default" -case "(($ac_try" in - *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; - *) ac_try_echo=$ac_try;; -esac -eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" -$as_echo "$ac_try_echo"; } >&5 - (eval "$ac_link_default") 2>&5 - ac_status=$? - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 - test $ac_status = 0; }; then : - # Autoconf-2.13 could set the ac_cv_exeext variable to `no'. -# So ignore a value of `no', otherwise this would lead to `EXEEXT = no' -# in a Makefile. We should not override ac_cv_exeext if it was cached, -# so that the user can short-circuit this test for compilers unknown to -# Autoconf. -for ac_file in $ac_files '' -do - test -f "$ac_file" || continue - case $ac_file in - *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM | *.o | *.obj ) - ;; - [ab].out ) - # We found the default executable, but exeext='' is most - # certainly right. - break;; - *.* ) - if test "${ac_cv_exeext+set}" = set && test "$ac_cv_exeext" != no; - then :; else - ac_cv_exeext=`expr "$ac_file" : '[^.]*\(\..*\)'` - fi - # We set ac_cv_exeext here because the later test for it is not - # safe: cross compilers may not add the suffix if given an `-o' - # argument, so we may need to know it at that point already. - # Even if this section looks crufty: it has the advantage of - # actually working. - break;; - * ) - break;; - esac -done -test "$ac_cv_exeext" = no && ac_cv_exeext= - -else - ac_file='' -fi -if test -z "$ac_file"; then : - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -$as_echo "$as_me: failed program was:" >&5 -sed 's/^/| /' conftest.$ac_ext >&5 - -{ { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 -$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} -as_fn_error 77 "C compiler cannot create executables -See \`config.log' for more details" "$LINENO" 5; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 -$as_echo "yes" >&6; } -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for C compiler default output file name" >&5 -$as_echo_n "checking for C compiler default output file name... " >&6; } -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_file" >&5 -$as_echo "$ac_file" >&6; } -ac_exeext=$ac_cv_exeext - -rm -f -r a.out a.out.dSYM a.exe conftest$ac_cv_exeext b.out -ac_clean_files=$ac_clean_files_save -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for suffix of executables" >&5 -$as_echo_n "checking for suffix of executables... " >&6; } -if { { ac_try="$ac_link" -case "(($ac_try" in - *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; - *) ac_try_echo=$ac_try;; -esac -eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" -$as_echo "$ac_try_echo"; } >&5 - (eval "$ac_link") 2>&5 - ac_status=$? - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 - test $ac_status = 0; }; then : - # If both `conftest.exe' and `conftest' are `present' (well, observable) -# catch `conftest.exe'. For instance with Cygwin, `ls conftest' will -# work properly (i.e., refer to `conftest.exe'), while it won't with -# `rm'. -for ac_file in conftest.exe conftest conftest.*; do - test -f "$ac_file" || continue - case $ac_file in - *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM | *.o | *.obj ) ;; - *.* ) ac_cv_exeext=`expr "$ac_file" : '[^.]*\(\..*\)'` - break;; - * ) break;; - esac -done -else - { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 -$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} -as_fn_error $? "cannot compute suffix of executables: cannot compile and link -See \`config.log' for more details" "$LINENO" 5; } -fi -rm -f conftest conftest$ac_cv_exeext -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_exeext" >&5 -$as_echo "$ac_cv_exeext" >&6; } - -rm -f conftest.$ac_ext -EXEEXT=$ac_cv_exeext -ac_exeext=$EXEEXT -cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -#include <stdio.h> -int -main () -{ -FILE *f = fopen ("conftest.out", "w"); - return ferror (f) || fclose (f) != 0; - - ; - return 0; -} -_ACEOF -ac_clean_files="$ac_clean_files conftest.out" -# Check that the compiler produces executables we can run. If not, either -# the compiler is broken, or we cross compile. -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether we are cross compiling" >&5 -$as_echo_n "checking whether we are cross compiling... " >&6; } -if test "$cross_compiling" != yes; then - { { ac_try="$ac_link" -case "(($ac_try" in - *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; - *) ac_try_echo=$ac_try;; -esac -eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" -$as_echo "$ac_try_echo"; } >&5 - (eval "$ac_link") 2>&5 - ac_status=$? - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 - test $ac_status = 0; } - if { ac_try='./conftest$ac_cv_exeext' - { { case "(($ac_try" in - *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; - *) ac_try_echo=$ac_try;; -esac -eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" -$as_echo "$ac_try_echo"; } >&5 - (eval "$ac_try") 2>&5 - ac_status=$? - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 - test $ac_status = 0; }; }; then - cross_compiling=no - else - if test "$cross_compiling" = maybe; then - cross_compiling=yes - else - { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 -$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} -as_fn_error $? "cannot run C compiled programs. -If you meant to cross compile, use \`--host'. -See \`config.log' for more details" "$LINENO" 5; } - fi - fi -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $cross_compiling" >&5 -$as_echo "$cross_compiling" >&6; } - -rm -f conftest.$ac_ext conftest$ac_cv_exeext conftest.out -ac_clean_files=$ac_clean_files_save -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for suffix of object files" >&5 -$as_echo_n "checking for suffix of object files... " >&6; } -if ${ac_cv_objext+:} false; then : - $as_echo_n "(cached) " >&6 -else - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ - -int -main () -{ - - ; - return 0; -} -_ACEOF -rm -f conftest.o conftest.obj -if { { ac_try="$ac_compile" -case "(($ac_try" in - *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; - *) ac_try_echo=$ac_try;; -esac -eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" -$as_echo "$ac_try_echo"; } >&5 - (eval "$ac_compile") 2>&5 - ac_status=$? - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 - test $ac_status = 0; }; then : - for ac_file in conftest.o conftest.obj conftest.*; do - test -f "$ac_file" || continue; - case $ac_file in - *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM ) ;; - *) ac_cv_objext=`expr "$ac_file" : '.*\.\(.*\)'` - break;; - esac -done -else - $as_echo "$as_me: failed program was:" >&5 -sed 's/^/| /' conftest.$ac_ext >&5 - -{ { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 -$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} -as_fn_error $? "cannot compute suffix of object files: cannot compile -See \`config.log' for more details" "$LINENO" 5; } -fi -rm -f conftest.$ac_cv_objext conftest.$ac_ext -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_objext" >&5 -$as_echo "$ac_cv_objext" >&6; } -OBJEXT=$ac_cv_objext -ac_objext=$OBJEXT -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether we are using the GNU C compiler" >&5 -$as_echo_n "checking whether we are using the GNU C compiler... " >&6; } -if ${ac_cv_c_compiler_gnu+:} false; then : - $as_echo_n "(cached) " >&6 -else - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ - -int -main () -{ -#ifndef __GNUC__ - choke me -#endif - - ; - return 0; -} -_ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - ac_compiler_gnu=yes -else - ac_compiler_gnu=no -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext -ac_cv_c_compiler_gnu=$ac_compiler_gnu - -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_c_compiler_gnu" >&5 -$as_echo "$ac_cv_c_compiler_gnu" >&6; } -if test $ac_compiler_gnu = yes; then - GCC=yes -else - GCC= -fi -ac_test_CFLAGS=${CFLAGS+set} -ac_save_CFLAGS=$CFLAGS -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $CC accepts -g" >&5 -$as_echo_n "checking whether $CC accepts -g... " >&6; } -if ${ac_cv_prog_cc_g+:} false; then : - $as_echo_n "(cached) " >&6 -else - ac_save_c_werror_flag=$ac_c_werror_flag - ac_c_werror_flag=yes - ac_cv_prog_cc_g=no - CFLAGS="-g" - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ - -int -main () -{ - - ; - return 0; -} -_ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - ac_cv_prog_cc_g=yes -else - CFLAGS="" - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ - -int -main () -{ - - ; - return 0; -} -_ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - -else - ac_c_werror_flag=$ac_save_c_werror_flag - CFLAGS="-g" - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ - -int -main () -{ - - ; - return 0; -} -_ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - ac_cv_prog_cc_g=yes -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext - ac_c_werror_flag=$ac_save_c_werror_flag -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cc_g" >&5 -$as_echo "$ac_cv_prog_cc_g" >&6; } -if test "$ac_test_CFLAGS" = set; then - CFLAGS=$ac_save_CFLAGS -elif test $ac_cv_prog_cc_g = yes; then - if test "$GCC" = yes; then - CFLAGS="-g -O2" - else - CFLAGS="-g" - fi -else - if test "$GCC" = yes; then - CFLAGS="-O2" - else - CFLAGS= - fi -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $CC option to accept ISO C89" >&5 -$as_echo_n "checking for $CC option to accept ISO C89... " >&6; } -if ${ac_cv_prog_cc_c89+:} false; then : - $as_echo_n "(cached) " >&6 -else - ac_cv_prog_cc_c89=no -ac_save_CC=$CC -cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -#include <stdarg.h> -#include <stdio.h> -#include <sys/types.h> -#include <sys/stat.h> -/* Most of the following tests are stolen from RCS 5.7's src/conf.sh. */ -struct buf { int x; }; -FILE * (*rcsopen) (struct buf *, struct stat *, int); -static char *e (p, i) - char **p; - int i; -{ - return p[i]; -} -static char *f (char * (*g) (char **, int), char **p, ...) -{ - char *s; - va_list v; - va_start (v,p); - s = g (p, va_arg (v,int)); - va_end (v); - return s; -} - -/* OSF 4.0 Compaq cc is some sort of almost-ANSI by default. It has - function prototypes and stuff, but not '\xHH' hex character constants. - These don't provoke an error unfortunately, instead are silently treated - as 'x'. The following induces an error, until -std is added to get - proper ANSI mode. Curiously '\x00'!='x' always comes out true, for an - array size at least. It's necessary to write '\x00'==0 to get something - that's true only with -std. */ -int osf4_cc_array ['\x00' == 0 ? 1 : -1]; - -/* IBM C 6 for AIX is almost-ANSI by default, but it replaces macro parameters - inside strings and character constants. */ -#define FOO(x) 'x' -int xlc6_cc_array[FOO(a) == 'x' ? 1 : -1]; - -int test (int i, double x); -struct s1 {int (*f) (int a);}; -struct s2 {int (*f) (double a);}; -int pairnames (int, char **, FILE *(*)(struct buf *, struct stat *, int), int, int); -int argc; -char **argv; -int -main () -{ -return f (e, argv, 0) != argv[0] || f (e, argv, 1) != argv[1]; - ; - return 0; -} -_ACEOF -for ac_arg in '' -qlanglvl=extc89 -qlanglvl=ansi -std \ - -Ae "-Aa -D_HPUX_SOURCE" "-Xc -D__EXTENSIONS__" -do - CC="$ac_save_CC $ac_arg" - if ac_fn_c_try_compile "$LINENO"; then : - ac_cv_prog_cc_c89=$ac_arg -fi -rm -f core conftest.err conftest.$ac_objext - test "x$ac_cv_prog_cc_c89" != "xno" && break -done -rm -f conftest.$ac_ext -CC=$ac_save_CC - -fi -# AC_CACHE_VAL -case "x$ac_cv_prog_cc_c89" in - x) - { $as_echo "$as_me:${as_lineno-$LINENO}: result: none needed" >&5 -$as_echo "none needed" >&6; } ;; - xno) - { $as_echo "$as_me:${as_lineno-$LINENO}: result: unsupported" >&5 -$as_echo "unsupported" >&6; } ;; - *) - CC="$CC $ac_cv_prog_cc_c89" - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cc_c89" >&5 -$as_echo "$ac_cv_prog_cc_c89" >&6; } ;; -esac -if test "x$ac_cv_prog_cc_c89" != xno; then : - -fi - -ac_ext=c -ac_cpp='$CPP $CPPFLAGS' -ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' -ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' -ac_compiler_gnu=$ac_cv_c_compiler_gnu - -if test "x$GCC" != "xyes" ; then - -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler is MSVC" >&5 -$as_echo_n "checking whether compiler is MSVC... " >&6; } -if ${je_cv_msvc+:} false; then : - $as_echo_n "(cached) " >&6 -else - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ - -int -main () -{ - -#ifndef _MSC_VER - int fail-1; -#endif - - ; - return 0; -} -_ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - je_cv_msvc=yes -else - je_cv_msvc=no -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_msvc" >&5 -$as_echo "$je_cv_msvc" >&6; } -fi - -if test "x$CFLAGS" = "x" ; then - no_CFLAGS="yes" - if test "x$GCC" = "xyes" ; then - -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -std=gnu99" >&5 -$as_echo_n "checking whether compiler supports -std=gnu99... " >&6; } -TCFLAGS="${CFLAGS}" -if test "x${CFLAGS}" = "x" ; then - CFLAGS="-std=gnu99" -else - CFLAGS="${CFLAGS} -std=gnu99" -fi -cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ - - -int -main () -{ - - return 0; - - ; - return 0; -} -_ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 -$as_echo "yes" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } - CFLAGS="${TCFLAGS}" - -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext - - -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -Wall" >&5 -$as_echo_n "checking whether compiler supports -Wall... " >&6; } -TCFLAGS="${CFLAGS}" -if test "x${CFLAGS}" = "x" ; then - CFLAGS="-Wall" -else - CFLAGS="${CFLAGS} -Wall" -fi -cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ - - -int -main () -{ - - return 0; - - ; - return 0; -} -_ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 -$as_echo "yes" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } - CFLAGS="${TCFLAGS}" - -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext - - -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -pipe" >&5 -$as_echo_n "checking whether compiler supports -pipe... " >&6; } -TCFLAGS="${CFLAGS}" -if test "x${CFLAGS}" = "x" ; then - CFLAGS="-pipe" -else - CFLAGS="${CFLAGS} -pipe" -fi -cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ - - -int -main () -{ - - return 0; - - ; - return 0; -} -_ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 -$as_echo "yes" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } - CFLAGS="${TCFLAGS}" - -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext - - -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -g3" >&5 -$as_echo_n "checking whether compiler supports -g3... " >&6; } -TCFLAGS="${CFLAGS}" -if test "x${CFLAGS}" = "x" ; then - CFLAGS="-g3" -else - CFLAGS="${CFLAGS} -g3" -fi -cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ - - -int -main () -{ - - return 0; - - ; - return 0; -} -_ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 -$as_echo "yes" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } - CFLAGS="${TCFLAGS}" - -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext - - elif test "x$je_cv_msvc" = "xyes" ; then - CC="$CC -nologo" - -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -Zi" >&5 -$as_echo_n "checking whether compiler supports -Zi... " >&6; } -TCFLAGS="${CFLAGS}" -if test "x${CFLAGS}" = "x" ; then - CFLAGS="-Zi" -else - CFLAGS="${CFLAGS} -Zi" -fi -cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ - - -int -main () -{ - - return 0; - - ; - return 0; -} -_ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 -$as_echo "yes" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } - CFLAGS="${TCFLAGS}" - -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext - - -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -MT" >&5 -$as_echo_n "checking whether compiler supports -MT... " >&6; } -TCFLAGS="${CFLAGS}" -if test "x${CFLAGS}" = "x" ; then - CFLAGS="-MT" -else - CFLAGS="${CFLAGS} -MT" -fi -cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ - - -int -main () -{ - - return 0; - - ; - return 0; -} -_ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 -$as_echo "yes" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } - CFLAGS="${TCFLAGS}" - -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext - - -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -W3" >&5 -$as_echo_n "checking whether compiler supports -W3... " >&6; } -TCFLAGS="${CFLAGS}" -if test "x${CFLAGS}" = "x" ; then - CFLAGS="-W3" -else - CFLAGS="${CFLAGS} -W3" -fi -cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ - - -int -main () -{ - - return 0; - - ; - return 0; -} -_ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 -$as_echo "yes" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } - CFLAGS="${TCFLAGS}" - -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext - - CPPFLAGS="$CPPFLAGS -I${srcroot}/include/msvc_compat" - fi -fi -if test "x$EXTRA_CFLAGS" != "x" ; then - -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports $EXTRA_CFLAGS" >&5 -$as_echo_n "checking whether compiler supports $EXTRA_CFLAGS... " >&6; } -TCFLAGS="${CFLAGS}" -if test "x${CFLAGS}" = "x" ; then - CFLAGS="$EXTRA_CFLAGS" -else - CFLAGS="${CFLAGS} $EXTRA_CFLAGS" -fi -cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ - - -int -main () -{ - - return 0; - - ; - return 0; -} -_ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 -$as_echo "yes" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } - CFLAGS="${TCFLAGS}" - -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext - -fi -ac_ext=c -ac_cpp='$CPP $CPPFLAGS' -ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' -ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' -ac_compiler_gnu=$ac_cv_c_compiler_gnu -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking how to run the C preprocessor" >&5 -$as_echo_n "checking how to run the C preprocessor... " >&6; } -# On Suns, sometimes $CPP names a directory. -if test -n "$CPP" && test -d "$CPP"; then - CPP= -fi -if test -z "$CPP"; then - if ${ac_cv_prog_CPP+:} false; then : - $as_echo_n "(cached) " >&6 -else - # Double quotes because CPP needs to be expanded - for CPP in "$CC -E" "$CC -E -traditional-cpp" "/lib/cpp" - do - ac_preproc_ok=false -for ac_c_preproc_warn_flag in '' yes -do - # Use a header file that comes with gcc, so configuring glibc - # with a fresh cross-compiler works. - # Prefer <limits.h> to <assert.h> if __STDC__ is defined, since - # <limits.h> exists even on freestanding compilers. - # On the NeXT, cc -E runs the code through the compiler's parser, - # not just through cpp. "Syntax error" is here to catch this case. - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -#ifdef __STDC__ -# include <limits.h> -#else -# include <assert.h> -#endif - Syntax error -_ACEOF -if ac_fn_c_try_cpp "$LINENO"; then : - -else - # Broken: fails on valid input. -continue -fi -rm -f conftest.err conftest.i conftest.$ac_ext - - # OK, works on sane cases. Now check whether nonexistent headers - # can be detected and how. - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -#include <ac_nonexistent.h> -_ACEOF -if ac_fn_c_try_cpp "$LINENO"; then : - # Broken: success on invalid input. -continue -else - # Passes both tests. -ac_preproc_ok=: -break -fi -rm -f conftest.err conftest.i conftest.$ac_ext - -done -# Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped. -rm -f conftest.i conftest.err conftest.$ac_ext -if $ac_preproc_ok; then : - break -fi - - done - ac_cv_prog_CPP=$CPP - -fi - CPP=$ac_cv_prog_CPP -else - ac_cv_prog_CPP=$CPP -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $CPP" >&5 -$as_echo "$CPP" >&6; } -ac_preproc_ok=false -for ac_c_preproc_warn_flag in '' yes -do - # Use a header file that comes with gcc, so configuring glibc - # with a fresh cross-compiler works. - # Prefer <limits.h> to <assert.h> if __STDC__ is defined, since - # <limits.h> exists even on freestanding compilers. - # On the NeXT, cc -E runs the code through the compiler's parser, - # not just through cpp. "Syntax error" is here to catch this case. - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -#ifdef __STDC__ -# include <limits.h> -#else -# include <assert.h> -#endif - Syntax error -_ACEOF -if ac_fn_c_try_cpp "$LINENO"; then : - -else - # Broken: fails on valid input. -continue -fi -rm -f conftest.err conftest.i conftest.$ac_ext - - # OK, works on sane cases. Now check whether nonexistent headers - # can be detected and how. - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -#include <ac_nonexistent.h> -_ACEOF -if ac_fn_c_try_cpp "$LINENO"; then : - # Broken: success on invalid input. -continue -else - # Passes both tests. -ac_preproc_ok=: -break -fi -rm -f conftest.err conftest.i conftest.$ac_ext - -done -# Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped. -rm -f conftest.i conftest.err conftest.$ac_ext -if $ac_preproc_ok; then : - -else - { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 -$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} -as_fn_error $? "C preprocessor \"$CPP\" fails sanity check -See \`config.log' for more details" "$LINENO" 5; } -fi - -ac_ext=c -ac_cpp='$CPP $CPPFLAGS' -ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' -ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' -ac_compiler_gnu=$ac_cv_c_compiler_gnu - - - -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for grep that handles long lines and -e" >&5 -$as_echo_n "checking for grep that handles long lines and -e... " >&6; } -if ${ac_cv_path_GREP+:} false; then : - $as_echo_n "(cached) " >&6 -else - if test -z "$GREP"; then - ac_path_GREP_found=false - # Loop through the user's path and test for each of PROGNAME-LIST - as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH$PATH_SEPARATOR/usr/xpg4/bin -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_prog in grep ggrep; do - for ac_exec_ext in '' $ac_executable_extensions; do - ac_path_GREP="$as_dir/$ac_prog$ac_exec_ext" - { test -f "$ac_path_GREP" && $as_test_x "$ac_path_GREP"; } || continue -# Check for GNU ac_path_GREP and select it if it is found. - # Check for GNU $ac_path_GREP -case `"$ac_path_GREP" --version 2>&1` in -*GNU*) - ac_cv_path_GREP="$ac_path_GREP" ac_path_GREP_found=:;; -*) - ac_count=0 - $as_echo_n 0123456789 >"conftest.in" - while : - do - cat "conftest.in" "conftest.in" >"conftest.tmp" - mv "conftest.tmp" "conftest.in" - cp "conftest.in" "conftest.nl" - $as_echo 'GREP' >> "conftest.nl" - "$ac_path_GREP" -e 'GREP$' -e '-(cannot match)-' < "conftest.nl" >"conftest.out" 2>/dev/null || break - diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break - as_fn_arith $ac_count + 1 && ac_count=$as_val - if test $ac_count -gt ${ac_path_GREP_max-0}; then - # Best one so far, save it but keep looking for a better one - ac_cv_path_GREP="$ac_path_GREP" - ac_path_GREP_max=$ac_count - fi - # 10*(2^10) chars as input seems more than enough - test $ac_count -gt 10 && break - done - rm -f conftest.in conftest.tmp conftest.nl conftest.out;; -esac - - $ac_path_GREP_found && break 3 - done - done - done -IFS=$as_save_IFS - if test -z "$ac_cv_path_GREP"; then - as_fn_error $? "no acceptable grep could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" "$LINENO" 5 - fi -else - ac_cv_path_GREP=$GREP -fi - -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_GREP" >&5 -$as_echo "$ac_cv_path_GREP" >&6; } - GREP="$ac_cv_path_GREP" - - -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for egrep" >&5 -$as_echo_n "checking for egrep... " >&6; } -if ${ac_cv_path_EGREP+:} false; then : - $as_echo_n "(cached) " >&6 -else - if echo a | $GREP -E '(a|b)' >/dev/null 2>&1 - then ac_cv_path_EGREP="$GREP -E" - else - if test -z "$EGREP"; then - ac_path_EGREP_found=false - # Loop through the user's path and test for each of PROGNAME-LIST - as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH$PATH_SEPARATOR/usr/xpg4/bin -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_prog in egrep; do - for ac_exec_ext in '' $ac_executable_extensions; do - ac_path_EGREP="$as_dir/$ac_prog$ac_exec_ext" - { test -f "$ac_path_EGREP" && $as_test_x "$ac_path_EGREP"; } || continue -# Check for GNU ac_path_EGREP and select it if it is found. - # Check for GNU $ac_path_EGREP -case `"$ac_path_EGREP" --version 2>&1` in -*GNU*) - ac_cv_path_EGREP="$ac_path_EGREP" ac_path_EGREP_found=:;; -*) - ac_count=0 - $as_echo_n 0123456789 >"conftest.in" - while : - do - cat "conftest.in" "conftest.in" >"conftest.tmp" - mv "conftest.tmp" "conftest.in" - cp "conftest.in" "conftest.nl" - $as_echo 'EGREP' >> "conftest.nl" - "$ac_path_EGREP" 'EGREP$' < "conftest.nl" >"conftest.out" 2>/dev/null || break - diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break - as_fn_arith $ac_count + 1 && ac_count=$as_val - if test $ac_count -gt ${ac_path_EGREP_max-0}; then - # Best one so far, save it but keep looking for a better one - ac_cv_path_EGREP="$ac_path_EGREP" - ac_path_EGREP_max=$ac_count - fi - # 10*(2^10) chars as input seems more than enough - test $ac_count -gt 10 && break - done - rm -f conftest.in conftest.tmp conftest.nl conftest.out;; -esac - - $ac_path_EGREP_found && break 3 - done - done - done -IFS=$as_save_IFS - if test -z "$ac_cv_path_EGREP"; then - as_fn_error $? "no acceptable egrep could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" "$LINENO" 5 - fi -else - ac_cv_path_EGREP=$EGREP -fi - - fi -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_EGREP" >&5 -$as_echo "$ac_cv_path_EGREP" >&6; } - EGREP="$ac_cv_path_EGREP" - - -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for ANSI C header files" >&5 -$as_echo_n "checking for ANSI C header files... " >&6; } -if ${ac_cv_header_stdc+:} false; then : - $as_echo_n "(cached) " >&6 -else - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -#include <stdlib.h> -#include <stdarg.h> -#include <string.h> -#include <float.h> - -int -main () -{ - - ; - return 0; -} -_ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - ac_cv_header_stdc=yes -else - ac_cv_header_stdc=no -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext - -if test $ac_cv_header_stdc = yes; then - # SunOS 4.x string.h does not declare mem*, contrary to ANSI. - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -#include <string.h> - -_ACEOF -if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | - $EGREP "memchr" >/dev/null 2>&1; then : - -else - ac_cv_header_stdc=no -fi -rm -f conftest* - -fi - -if test $ac_cv_header_stdc = yes; then - # ISC 2.0.2 stdlib.h does not declare free, contrary to ANSI. - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -#include <stdlib.h> - -_ACEOF -if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | - $EGREP "free" >/dev/null 2>&1; then : - -else - ac_cv_header_stdc=no -fi -rm -f conftest* - -fi - -if test $ac_cv_header_stdc = yes; then - # /bin/cc in Irix-4.0.5 gets non-ANSI ctype macros unless using -ansi. - if test "$cross_compiling" = yes; then : - : -else - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -#include <ctype.h> -#include <stdlib.h> -#if ((' ' & 0x0FF) == 0x020) -# define ISLOWER(c) ('a' <= (c) && (c) <= 'z') -# define TOUPPER(c) (ISLOWER(c) ? 'A' + ((c) - 'a') : (c)) -#else -# define ISLOWER(c) \ - (('a' <= (c) && (c) <= 'i') \ - || ('j' <= (c) && (c) <= 'r') \ - || ('s' <= (c) && (c) <= 'z')) -# define TOUPPER(c) (ISLOWER(c) ? ((c) | 0x40) : (c)) -#endif - -#define XOR(e, f) (((e) && !(f)) || (!(e) && (f))) -int -main () -{ - int i; - for (i = 0; i < 256; i++) - if (XOR (islower (i), ISLOWER (i)) - || toupper (i) != TOUPPER (i)) - return 2; - return 0; -} -_ACEOF -if ac_fn_c_try_run "$LINENO"; then : - -else - ac_cv_header_stdc=no -fi -rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \ - conftest.$ac_objext conftest.beam conftest.$ac_ext -fi - -fi -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_header_stdc" >&5 -$as_echo "$ac_cv_header_stdc" >&6; } -if test $ac_cv_header_stdc = yes; then - -$as_echo "#define STDC_HEADERS 1" >>confdefs.h - -fi - -# On IRIX 5.3, sys/types and inttypes.h are conflicting. -for ac_header in sys/types.h sys/stat.h stdlib.h string.h memory.h strings.h \ - inttypes.h stdint.h unistd.h -do : - as_ac_Header=`$as_echo "ac_cv_header_$ac_header" | $as_tr_sh` -ac_fn_c_check_header_compile "$LINENO" "$ac_header" "$as_ac_Header" "$ac_includes_default -" -if eval test \"x\$"$as_ac_Header"\" = x"yes"; then : - cat >>confdefs.h <<_ACEOF -#define `$as_echo "HAVE_$ac_header" | $as_tr_cpp` 1 -_ACEOF - -fi - -done - - -# The cast to long int works around a bug in the HP C Compiler -# version HP92453-01 B.11.11.23709.GP, which incorrectly rejects -# declarations like `int a3[[(sizeof (unsigned char)) >= 0]];'. -# This bug is HP SR number 8606223364. -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking size of void *" >&5 -$as_echo_n "checking size of void *... " >&6; } -if ${ac_cv_sizeof_void_p+:} false; then : - $as_echo_n "(cached) " >&6 -else - if ac_fn_c_compute_int "$LINENO" "(long int) (sizeof (void *))" "ac_cv_sizeof_void_p" "$ac_includes_default"; then : - -else - if test "$ac_cv_type_void_p" = yes; then - { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 -$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} -as_fn_error 77 "cannot compute sizeof (void *) -See \`config.log' for more details" "$LINENO" 5; } - else - ac_cv_sizeof_void_p=0 - fi -fi - -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_sizeof_void_p" >&5 -$as_echo "$ac_cv_sizeof_void_p" >&6; } - - - -cat >>confdefs.h <<_ACEOF -#define SIZEOF_VOID_P $ac_cv_sizeof_void_p -_ACEOF - - -if test "x${ac_cv_sizeof_void_p}" = "x8" ; then - LG_SIZEOF_PTR=3 -elif test "x${ac_cv_sizeof_void_p}" = "x4" ; then - LG_SIZEOF_PTR=2 -else - as_fn_error $? "Unsupported pointer size: ${ac_cv_sizeof_void_p}" "$LINENO" 5 -fi -cat >>confdefs.h <<_ACEOF -#define LG_SIZEOF_PTR $LG_SIZEOF_PTR -_ACEOF - - -# The cast to long int works around a bug in the HP C Compiler -# version HP92453-01 B.11.11.23709.GP, which incorrectly rejects -# declarations like `int a3[[(sizeof (unsigned char)) >= 0]];'. -# This bug is HP SR number 8606223364. -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking size of int" >&5 -$as_echo_n "checking size of int... " >&6; } -if ${ac_cv_sizeof_int+:} false; then : - $as_echo_n "(cached) " >&6 -else - if ac_fn_c_compute_int "$LINENO" "(long int) (sizeof (int))" "ac_cv_sizeof_int" "$ac_includes_default"; then : - -else - if test "$ac_cv_type_int" = yes; then - { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 -$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} -as_fn_error 77 "cannot compute sizeof (int) -See \`config.log' for more details" "$LINENO" 5; } - else - ac_cv_sizeof_int=0 - fi -fi - -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_sizeof_int" >&5 -$as_echo "$ac_cv_sizeof_int" >&6; } - - - -cat >>confdefs.h <<_ACEOF -#define SIZEOF_INT $ac_cv_sizeof_int -_ACEOF - - -if test "x${ac_cv_sizeof_int}" = "x8" ; then - LG_SIZEOF_INT=3 -elif test "x${ac_cv_sizeof_int}" = "x4" ; then - LG_SIZEOF_INT=2 -else - as_fn_error $? "Unsupported int size: ${ac_cv_sizeof_int}" "$LINENO" 5 -fi -cat >>confdefs.h <<_ACEOF -#define LG_SIZEOF_INT $LG_SIZEOF_INT -_ACEOF - - -# The cast to long int works around a bug in the HP C Compiler -# version HP92453-01 B.11.11.23709.GP, which incorrectly rejects -# declarations like `int a3[[(sizeof (unsigned char)) >= 0]];'. -# This bug is HP SR number 8606223364. -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking size of long" >&5 -$as_echo_n "checking size of long... " >&6; } -if ${ac_cv_sizeof_long+:} false; then : - $as_echo_n "(cached) " >&6 -else - if ac_fn_c_compute_int "$LINENO" "(long int) (sizeof (long))" "ac_cv_sizeof_long" "$ac_includes_default"; then : - -else - if test "$ac_cv_type_long" = yes; then - { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 -$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} -as_fn_error 77 "cannot compute sizeof (long) -See \`config.log' for more details" "$LINENO" 5; } - else - ac_cv_sizeof_long=0 - fi -fi - -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_sizeof_long" >&5 -$as_echo "$ac_cv_sizeof_long" >&6; } - - - -cat >>confdefs.h <<_ACEOF -#define SIZEOF_LONG $ac_cv_sizeof_long -_ACEOF - - -if test "x${ac_cv_sizeof_long}" = "x8" ; then - LG_SIZEOF_LONG=3 -elif test "x${ac_cv_sizeof_long}" = "x4" ; then - LG_SIZEOF_LONG=2 -else - as_fn_error $? "Unsupported long size: ${ac_cv_sizeof_long}" "$LINENO" 5 -fi -cat >>confdefs.h <<_ACEOF -#define LG_SIZEOF_LONG $LG_SIZEOF_LONG -_ACEOF - - -# The cast to long int works around a bug in the HP C Compiler -# version HP92453-01 B.11.11.23709.GP, which incorrectly rejects -# declarations like `int a3[[(sizeof (unsigned char)) >= 0]];'. -# This bug is HP SR number 8606223364. -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking size of intmax_t" >&5 -$as_echo_n "checking size of intmax_t... " >&6; } -if ${ac_cv_sizeof_intmax_t+:} false; then : - $as_echo_n "(cached) " >&6 -else - if ac_fn_c_compute_int "$LINENO" "(long int) (sizeof (intmax_t))" "ac_cv_sizeof_intmax_t" "$ac_includes_default"; then : - -else - if test "$ac_cv_type_intmax_t" = yes; then - { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 -$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} -as_fn_error 77 "cannot compute sizeof (intmax_t) -See \`config.log' for more details" "$LINENO" 5; } - else - ac_cv_sizeof_intmax_t=0 - fi -fi - -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_sizeof_intmax_t" >&5 -$as_echo "$ac_cv_sizeof_intmax_t" >&6; } - - - -cat >>confdefs.h <<_ACEOF -#define SIZEOF_INTMAX_T $ac_cv_sizeof_intmax_t -_ACEOF - - -if test "x${ac_cv_sizeof_intmax_t}" = "x16" ; then - LG_SIZEOF_INTMAX_T=4 -elif test "x${ac_cv_sizeof_intmax_t}" = "x8" ; then - LG_SIZEOF_INTMAX_T=3 -elif test "x${ac_cv_sizeof_intmax_t}" = "x4" ; then - LG_SIZEOF_INTMAX_T=2 -else - as_fn_error $? "Unsupported intmax_t size: ${ac_cv_sizeof_intmax_t}" "$LINENO" 5 -fi -cat >>confdefs.h <<_ACEOF -#define LG_SIZEOF_INTMAX_T $LG_SIZEOF_INTMAX_T -_ACEOF - - -ac_aux_dir= -for ac_dir in "$srcdir" "$srcdir/.." "$srcdir/../.."; do - if test -f "$ac_dir/install-sh"; then - ac_aux_dir=$ac_dir - ac_install_sh="$ac_aux_dir/install-sh -c" - break - elif test -f "$ac_dir/install.sh"; then - ac_aux_dir=$ac_dir - ac_install_sh="$ac_aux_dir/install.sh -c" - break - elif test -f "$ac_dir/shtool"; then - ac_aux_dir=$ac_dir - ac_install_sh="$ac_aux_dir/shtool install -c" - break - fi -done -if test -z "$ac_aux_dir"; then - as_fn_error $? "cannot find install-sh, install.sh, or shtool in \"$srcdir\" \"$srcdir/..\" \"$srcdir/../..\"" "$LINENO" 5 -fi - -# These three variables are undocumented and unsupported, -# and are intended to be withdrawn in a future Autoconf release. -# They can cause serious problems if a builder's source tree is in a directory -# whose full name contains unusual characters. -ac_config_guess="$SHELL $ac_aux_dir/config.guess" # Please don't use this var. -ac_config_sub="$SHELL $ac_aux_dir/config.sub" # Please don't use this var. -ac_configure="$SHELL $ac_aux_dir/configure" # Please don't use this var. - - -# Make sure we can run config.sub. -$SHELL "$ac_aux_dir/config.sub" sun4 >/dev/null 2>&1 || - as_fn_error $? "cannot run $SHELL $ac_aux_dir/config.sub" "$LINENO" 5 - -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking build system type" >&5 -$as_echo_n "checking build system type... " >&6; } -if ${ac_cv_build+:} false; then : - $as_echo_n "(cached) " >&6 -else - ac_build_alias=$build_alias -test "x$ac_build_alias" = x && - ac_build_alias=`$SHELL "$ac_aux_dir/config.guess"` -test "x$ac_build_alias" = x && - as_fn_error $? "cannot guess build type; you must specify one" "$LINENO" 5 -ac_cv_build=`$SHELL "$ac_aux_dir/config.sub" $ac_build_alias` || - as_fn_error $? "$SHELL $ac_aux_dir/config.sub $ac_build_alias failed" "$LINENO" 5 - -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_build" >&5 -$as_echo "$ac_cv_build" >&6; } -case $ac_cv_build in -*-*-*) ;; -*) as_fn_error $? "invalid value of canonical build" "$LINENO" 5;; -esac -build=$ac_cv_build -ac_save_IFS=$IFS; IFS='-' -set x $ac_cv_build -shift -build_cpu=$1 -build_vendor=$2 -shift; shift -# Remember, the first character of IFS is used to create $*, -# except with old shells: -build_os=$* -IFS=$ac_save_IFS -case $build_os in *\ *) build_os=`echo "$build_os" | sed 's/ /-/g'`;; esac - - -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking host system type" >&5 -$as_echo_n "checking host system type... " >&6; } -if ${ac_cv_host+:} false; then : - $as_echo_n "(cached) " >&6 -else - if test "x$host_alias" = x; then - ac_cv_host=$ac_cv_build -else - ac_cv_host=`$SHELL "$ac_aux_dir/config.sub" $host_alias` || - as_fn_error $? "$SHELL $ac_aux_dir/config.sub $host_alias failed" "$LINENO" 5 -fi - -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_host" >&5 -$as_echo "$ac_cv_host" >&6; } -case $ac_cv_host in -*-*-*) ;; -*) as_fn_error $? "invalid value of canonical host" "$LINENO" 5;; -esac -host=$ac_cv_host -ac_save_IFS=$IFS; IFS='-' -set x $ac_cv_host -shift -host_cpu=$1 -host_vendor=$2 -shift; shift -# Remember, the first character of IFS is used to create $*, -# except with old shells: -host_os=$* -IFS=$ac_save_IFS -case $host_os in *\ *) host_os=`echo "$host_os" | sed 's/ /-/g'`;; esac - - -CPU_SPINWAIT="" -case "${host_cpu}" in - i[345]86) - ;; - i686) - -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether __asm__ is compilable" >&5 -$as_echo_n "checking whether __asm__ is compilable... " >&6; } -if ${je_cv_asm+:} false; then : - $as_echo_n "(cached) " >&6 -else - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ - -int -main () -{ -__asm__ volatile("pause"); return 0; - ; - return 0; -} -_ACEOF -if ac_fn_c_try_link "$LINENO"; then : - je_cv_asm=yes -else - je_cv_asm=no -fi -rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext conftest.$ac_ext -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_asm" >&5 -$as_echo "$je_cv_asm" >&6; } - - if test "x${je_cv_asm}" = "xyes" ; then - CPU_SPINWAIT='__asm__ volatile("pause")' - fi - ;; - x86_64) - -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether __asm__ syntax is compilable" >&5 -$as_echo_n "checking whether __asm__ syntax is compilable... " >&6; } -if ${je_cv_asm+:} false; then : - $as_echo_n "(cached) " >&6 -else - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ - -int -main () -{ -__asm__ volatile("pause"); return 0; - ; - return 0; -} -_ACEOF -if ac_fn_c_try_link "$LINENO"; then : - je_cv_asm=yes -else - je_cv_asm=no -fi -rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext conftest.$ac_ext -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_asm" >&5 -$as_echo "$je_cv_asm" >&6; } - - if test "x${je_cv_asm}" = "xyes" ; then - CPU_SPINWAIT='__asm__ volatile("pause")' - fi - ;; - *) - ;; -esac -cat >>confdefs.h <<_ACEOF -#define CPU_SPINWAIT $CPU_SPINWAIT -_ACEOF - - -LD_PRELOAD_VAR="LD_PRELOAD" -so="so" -importlib="${so}" -o="$ac_objext" -a="a" -exe="$ac_exeext" -libprefix="lib" -DSO_LDFLAGS='-shared -Wl,-soname,$(@F)' -RPATH='-Wl,-rpath,$(1)' -SOREV="${so}.${rev}" -PIC_CFLAGS='-fPIC -DPIC' -CTARGET='-o $@' -LDTARGET='-o $@' -EXTRA_LDFLAGS= -MKLIB='ar crus $@' -CC_MM=1 - -default_munmap="1" -JEMALLOC_USABLE_SIZE_CONST="const" -case "${host}" in - *-*-darwin*) - CFLAGS="$CFLAGS" - abi="macho" - $as_echo "#define JEMALLOC_PURGE_MADVISE_FREE " >>confdefs.h - - RPATH="" - LD_PRELOAD_VAR="DYLD_INSERT_LIBRARIES" - so="dylib" - importlib="${so}" - force_tls="0" - DSO_LDFLAGS='-shared -Wl,-dylib_install_name,$(@F)' - SOREV="${rev}.${so}" - ;; - *-*-freebsd*) - CFLAGS="$CFLAGS" - abi="elf" - $as_echo "#define JEMALLOC_PURGE_MADVISE_FREE " >>confdefs.h - - force_lazy_lock="1" - ;; - *-*-linux*) - CFLAGS="$CFLAGS" - CPPFLAGS="$CPPFLAGS -D_GNU_SOURCE" - abi="elf" - $as_echo "#define JEMALLOC_HAS_ALLOCA_H 1" >>confdefs.h - - $as_echo "#define JEMALLOC_PURGE_MADVISE_DONTNEED " >>confdefs.h - - $as_echo "#define JEMALLOC_THREADED_INIT " >>confdefs.h - - JEMALLOC_USABLE_SIZE_CONST="" - default_munmap="0" - ;; - *-*-netbsd*) - { $as_echo "$as_me:${as_lineno-$LINENO}: checking ABI" >&5 -$as_echo_n "checking ABI... " >&6; } - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -#ifdef __ELF__ -/* ELF */ -#else -#error aout -#endif - -int -main () -{ - - ; - return 0; -} -_ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - CFLAGS="$CFLAGS"; abi="elf" -else - abi="aout" -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $abi" >&5 -$as_echo "$abi" >&6; } - $as_echo "#define JEMALLOC_PURGE_MADVISE_FREE " >>confdefs.h - - ;; - *-*-solaris2*) - CFLAGS="$CFLAGS" - abi="elf" - RPATH='-Wl,-R,$(1)' - CPPFLAGS="$CPPFLAGS -D_POSIX_PTHREAD_SEMANTICS" - LIBS="$LIBS -lposix4 -lsocket -lnsl" - ;; - *-ibm-aix*) - if "$LG_SIZEOF_PTR" = "8"; then - LD_PRELOAD_VAR="LDR_PRELOAD64" - else - LD_PRELOAD_VAR="LDR_PRELOAD" - fi - abi="xcoff" - ;; - *-*-mingw*) - abi="pecoff" - force_tls="0" - RPATH="" - so="dll" - if test "x$je_cv_msvc" = "xyes" ; then - importlib="lib" - DSO_LDFLAGS="-LD" - EXTRA_LDFLAGS="-link -DEBUG" - CTARGET='-Fo$@' - LDTARGET='-Fe$@' - MKLIB='lib -nologo -out:$@' - CC_MM= - else - importlib="${so}" - DSO_LDFLAGS="-shared" - fi - a="lib" - libprefix="" - SOREV="${so}" - PIC_CFLAGS="" - ;; - *) - { $as_echo "$as_me:${as_lineno-$LINENO}: result: Unsupported operating system: ${host}" >&5 -$as_echo "Unsupported operating system: ${host}" >&6; } - abi="elf" - ;; -esac -cat >>confdefs.h <<_ACEOF -#define JEMALLOC_USABLE_SIZE_CONST $JEMALLOC_USABLE_SIZE_CONST -_ACEOF - - - - - - - - - - - - - - - - - - - -if test "x$abi" != "xpecoff"; then - LIBS="$LIBS -lm" -fi - - -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether __attribute__ syntax is compilable" >&5 -$as_echo_n "checking whether __attribute__ syntax is compilable... " >&6; } -if ${je_cv_attribute+:} false; then : - $as_echo_n "(cached) " >&6 -else - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -static __attribute__((unused)) void foo(void){} -int -main () -{ - - ; - return 0; -} -_ACEOF -if ac_fn_c_try_link "$LINENO"; then : - je_cv_attribute=yes -else - je_cv_attribute=no -fi -rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext conftest.$ac_ext -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_attribute" >&5 -$as_echo "$je_cv_attribute" >&6; } - -if test "x${je_cv_attribute}" = "xyes" ; then - $as_echo "#define JEMALLOC_HAVE_ATTR " >>confdefs.h - - if test "x${GCC}" = "xyes" -a "x${abi}" = "xelf"; then - -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -fvisibility=hidden" >&5 -$as_echo_n "checking whether compiler supports -fvisibility=hidden... " >&6; } -TCFLAGS="${CFLAGS}" -if test "x${CFLAGS}" = "x" ; then - CFLAGS="-fvisibility=hidden" -else - CFLAGS="${CFLAGS} -fvisibility=hidden" -fi -cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ - - -int -main () -{ - - return 0; - - ; - return 0; -} -_ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 -$as_echo "yes" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } - CFLAGS="${TCFLAGS}" - -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext - - fi -fi -SAVED_CFLAGS="${CFLAGS}" - -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -Werror" >&5 -$as_echo_n "checking whether compiler supports -Werror... " >&6; } -TCFLAGS="${CFLAGS}" -if test "x${CFLAGS}" = "x" ; then - CFLAGS="-Werror" -else - CFLAGS="${CFLAGS} -Werror" -fi -cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ - - -int -main () -{ - - return 0; - - ; - return 0; -} -_ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 -$as_echo "yes" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } - CFLAGS="${TCFLAGS}" - -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext - - -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether tls_model attribute is compilable" >&5 -$as_echo_n "checking whether tls_model attribute is compilable... " >&6; } -if ${je_cv_tls_model+:} false; then : - $as_echo_n "(cached) " >&6 -else - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ - -int -main () -{ -static __thread int - __attribute__((tls_model("initial-exec"))) foo; - foo = 0; - ; - return 0; -} -_ACEOF -if ac_fn_c_try_link "$LINENO"; then : - je_cv_tls_model=yes -else - je_cv_tls_model=no -fi -rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext conftest.$ac_ext -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_tls_model" >&5 -$as_echo "$je_cv_tls_model" >&6; } - -CFLAGS="${SAVED_CFLAGS}" -if test "x${je_cv_tls_model}" = "xyes" ; then - $as_echo "#define JEMALLOC_TLS_MODEL __attribute__((tls_model(\"initial-exec\")))" >>confdefs.h - -else - $as_echo "#define JEMALLOC_TLS_MODEL " >>confdefs.h - -fi - - -# Check whether --with-rpath was given. -if test "${with_rpath+set}" = set; then : - withval=$with_rpath; if test "x$with_rpath" = "xno" ; then - RPATH_EXTRA= -else - RPATH_EXTRA="`echo $with_rpath | tr \":\" \" \"`" -fi -else - RPATH_EXTRA= - -fi - - - -# Check whether --enable-autogen was given. -if test "${enable_autogen+set}" = set; then : - enableval=$enable_autogen; if test "x$enable_autogen" = "xno" ; then - enable_autogen="0" -else - enable_autogen="1" -fi - -else - enable_autogen="0" - -fi - - - -# Find a good install program. We prefer a C program (faster), -# so one script is as good as another. But avoid the broken or -# incompatible versions: -# SysV /etc/install, /usr/sbin/install -# SunOS /usr/etc/install -# IRIX /sbin/install -# AIX /bin/install -# AmigaOS /C/install, which installs bootblocks on floppy discs -# AIX 4 /usr/bin/installbsd, which doesn't work without a -g flag -# AFS /usr/afsws/bin/install, which mishandles nonexistent args -# SVR4 /usr/ucb/install, which tries to use the nonexistent group "staff" -# OS/2's system install, which has a completely different semantic -# ./install, which can be erroneously created by make from ./install.sh. -# Reject install programs that cannot install multiple files. -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for a BSD-compatible install" >&5 -$as_echo_n "checking for a BSD-compatible install... " >&6; } -if test -z "$INSTALL"; then -if ${ac_cv_path_install+:} false; then : - $as_echo_n "(cached) " >&6 -else - as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - # Account for people who put trailing slashes in PATH elements. -case $as_dir/ in #(( - ./ | .// | /[cC]/* | \ - /etc/* | /usr/sbin/* | /usr/etc/* | /sbin/* | /usr/afsws/bin/* | \ - ?:[\\/]os2[\\/]install[\\/]* | ?:[\\/]OS2[\\/]INSTALL[\\/]* | \ - /usr/ucb/* ) ;; - *) - # OSF1 and SCO ODT 3.0 have their own names for install. - # Don't use installbsd from OSF since it installs stuff as root - # by default. - for ac_prog in ginstall scoinst install; do - for ac_exec_ext in '' $ac_executable_extensions; do - if { test -f "$as_dir/$ac_prog$ac_exec_ext" && $as_test_x "$as_dir/$ac_prog$ac_exec_ext"; }; then - if test $ac_prog = install && - grep dspmsg "$as_dir/$ac_prog$ac_exec_ext" >/dev/null 2>&1; then - # AIX install. It has an incompatible calling convention. - : - elif test $ac_prog = install && - grep pwplus "$as_dir/$ac_prog$ac_exec_ext" >/dev/null 2>&1; then - # program-specific install script used by HP pwplus--don't use. - : - else - rm -rf conftest.one conftest.two conftest.dir - echo one > conftest.one - echo two > conftest.two - mkdir conftest.dir - if "$as_dir/$ac_prog$ac_exec_ext" -c conftest.one conftest.two "`pwd`/conftest.dir" && - test -s conftest.one && test -s conftest.two && - test -s conftest.dir/conftest.one && - test -s conftest.dir/conftest.two - then - ac_cv_path_install="$as_dir/$ac_prog$ac_exec_ext -c" - break 3 - fi - fi - fi - done - done - ;; -esac - - done -IFS=$as_save_IFS - -rm -rf conftest.one conftest.two conftest.dir - -fi - if test "${ac_cv_path_install+set}" = set; then - INSTALL=$ac_cv_path_install - else - # As a last resort, use the slow shell script. Don't cache a - # value for INSTALL within a source directory, because that will - # break other packages using the cache if that directory is - # removed, or if the value is a relative name. - INSTALL=$ac_install_sh - fi -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $INSTALL" >&5 -$as_echo "$INSTALL" >&6; } - -# Use test -z because SunOS4 sh mishandles braces in ${var-val}. -# It thinks the first close brace ends the variable substitution. -test -z "$INSTALL_PROGRAM" && INSTALL_PROGRAM='${INSTALL}' - -test -z "$INSTALL_SCRIPT" && INSTALL_SCRIPT='${INSTALL}' - -test -z "$INSTALL_DATA" && INSTALL_DATA='${INSTALL} -m 644' - -if test -n "$ac_tool_prefix"; then - # Extract the first word of "${ac_tool_prefix}ranlib", so it can be a program name with args. -set dummy ${ac_tool_prefix}ranlib; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if ${ac_cv_prog_RANLIB+:} false; then : - $as_echo_n "(cached) " >&6 -else - if test -n "$RANLIB"; then - ac_cv_prog_RANLIB="$RANLIB" # Let the user override the test. -else -as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then - ac_cv_prog_RANLIB="${ac_tool_prefix}ranlib" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 - fi -done - done -IFS=$as_save_IFS - -fi -fi -RANLIB=$ac_cv_prog_RANLIB -if test -n "$RANLIB"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $RANLIB" >&5 -$as_echo "$RANLIB" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -fi - - -fi -if test -z "$ac_cv_prog_RANLIB"; then - ac_ct_RANLIB=$RANLIB - # Extract the first word of "ranlib", so it can be a program name with args. -set dummy ranlib; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if ${ac_cv_prog_ac_ct_RANLIB+:} false; then : - $as_echo_n "(cached) " >&6 -else - if test -n "$ac_ct_RANLIB"; then - ac_cv_prog_ac_ct_RANLIB="$ac_ct_RANLIB" # Let the user override the test. -else -as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then - ac_cv_prog_ac_ct_RANLIB="ranlib" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 - fi -done - done -IFS=$as_save_IFS - -fi -fi -ac_ct_RANLIB=$ac_cv_prog_ac_ct_RANLIB -if test -n "$ac_ct_RANLIB"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_RANLIB" >&5 -$as_echo "$ac_ct_RANLIB" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -fi - - if test "x$ac_ct_RANLIB" = x; then - RANLIB=":" - else - case $cross_compiling:$ac_tool_warned in -yes:) -{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 -$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} -ac_tool_warned=yes ;; -esac - RANLIB=$ac_ct_RANLIB - fi -else - RANLIB="$ac_cv_prog_RANLIB" -fi - -# Extract the first word of "ar", so it can be a program name with args. -set dummy ar; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if ${ac_cv_path_AR+:} false; then : - $as_echo_n "(cached) " >&6 -else - case $AR in - [\\/]* | ?:[\\/]*) - ac_cv_path_AR="$AR" # Let the user override the test with a path. - ;; - *) - as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then - ac_cv_path_AR="$as_dir/$ac_word$ac_exec_ext" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 - fi -done - done -IFS=$as_save_IFS - - test -z "$ac_cv_path_AR" && ac_cv_path_AR="false" - ;; -esac -fi -AR=$ac_cv_path_AR -if test -n "$AR"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $AR" >&5 -$as_echo "$AR" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -fi - - -# Extract the first word of "ld", so it can be a program name with args. -set dummy ld; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if ${ac_cv_path_LD+:} false; then : - $as_echo_n "(cached) " >&6 -else - case $LD in - [\\/]* | ?:[\\/]*) - ac_cv_path_LD="$LD" # Let the user override the test with a path. - ;; - *) - as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then - ac_cv_path_LD="$as_dir/$ac_word$ac_exec_ext" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 - fi -done - done -IFS=$as_save_IFS - - test -z "$ac_cv_path_LD" && ac_cv_path_LD="false" - ;; -esac -fi -LD=$ac_cv_path_LD -if test -n "$LD"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $LD" >&5 -$as_echo "$LD" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -fi - - -# Extract the first word of "autoconf", so it can be a program name with args. -set dummy autoconf; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if ${ac_cv_path_AUTOCONF+:} false; then : - $as_echo_n "(cached) " >&6 -else - case $AUTOCONF in - [\\/]* | ?:[\\/]*) - ac_cv_path_AUTOCONF="$AUTOCONF" # Let the user override the test with a path. - ;; - *) - as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then - ac_cv_path_AUTOCONF="$as_dir/$ac_word$ac_exec_ext" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 - fi -done - done -IFS=$as_save_IFS - - test -z "$ac_cv_path_AUTOCONF" && ac_cv_path_AUTOCONF="false" - ;; -esac -fi -AUTOCONF=$ac_cv_path_AUTOCONF -if test -n "$AUTOCONF"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $AUTOCONF" >&5 -$as_echo "$AUTOCONF" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -fi - - - -public_syms="malloc_conf malloc_message malloc calloc posix_memalign aligned_alloc realloc free malloc_usable_size malloc_stats_print mallctl mallctlnametomib mallctlbymib" - -ac_fn_c_check_func "$LINENO" "memalign" "ac_cv_func_memalign" -if test "x$ac_cv_func_memalign" = xyes; then : - $as_echo "#define JEMALLOC_OVERRIDE_MEMALIGN " >>confdefs.h - - public_syms="${public_syms} memalign" -fi - -ac_fn_c_check_func "$LINENO" "valloc" "ac_cv_func_valloc" -if test "x$ac_cv_func_valloc" = xyes; then : - $as_echo "#define JEMALLOC_OVERRIDE_VALLOC " >>confdefs.h - - public_syms="${public_syms} valloc" -fi - - -# Check whether --enable-experimental was given. -if test "${enable_experimental+set}" = set; then : - enableval=$enable_experimental; if test "x$enable_experimental" = "xno" ; then - enable_experimental="0" -else - enable_experimental="1" -fi - -else - enable_experimental="1" - -fi - -if test "x$enable_experimental" = "x1" ; then - $as_echo "#define JEMALLOC_EXPERIMENTAL " >>confdefs.h - - public_syms="${public_syms} allocm dallocm nallocm rallocm sallocm" -fi - - - -# Check whether --with-mangling was given. -if test "${with_mangling+set}" = set; then : - withval=$with_mangling; mangling_map="$with_mangling" -else - mangling_map="" -fi - -for nm in `echo ${mangling_map} |tr ',' ' '` ; do - k="`echo ${nm} |tr ':' ' ' |awk '{print $1}'`" - n="je_${k}" - m=`echo ${nm} |tr ':' ' ' |awk '{print $2}'` - cat >>confdefs.h <<_ACEOF -#define ${n} ${m} -_ACEOF - - public_syms=`for sym in ${public_syms}; do echo "${sym}"; done |grep -v "^${k}\$" |tr '\n' ' '` -done - - -# Check whether --with-jemalloc_prefix was given. -if test "${with_jemalloc_prefix+set}" = set; then : - withval=$with_jemalloc_prefix; JEMALLOC_PREFIX="$with_jemalloc_prefix" -else - if test "x$abi" != "xmacho" -a "x$abi" != "xpecoff"; then - JEMALLOC_PREFIX="" -else - JEMALLOC_PREFIX="je_" -fi - -fi - -if test "x$JEMALLOC_PREFIX" != "x" ; then - JEMALLOC_CPREFIX=`echo ${JEMALLOC_PREFIX} | tr "a-z" "A-Z"` - cat >>confdefs.h <<_ACEOF -#define JEMALLOC_PREFIX "$JEMALLOC_PREFIX" -_ACEOF - - cat >>confdefs.h <<_ACEOF -#define JEMALLOC_CPREFIX "$JEMALLOC_CPREFIX" -_ACEOF - -fi -for stem in ${public_syms}; do - n="je_${stem}" - m="${JEMALLOC_PREFIX}${stem}" - cat >>confdefs.h <<_ACEOF -#define ${n} ${m} -_ACEOF - -done - - -# Check whether --with-export was given. -if test "${with_export+set}" = set; then : - withval=$with_export; if test "x$with_export" = "xno"; then - $as_echo "#define JEMALLOC_EXPORT /**/" >>confdefs.h - -fi - -fi - - - -# Check whether --with-private_namespace was given. -if test "${with_private_namespace+set}" = set; then : - withval=$with_private_namespace; JEMALLOC_PRIVATE_NAMESPACE="$with_private_namespace" -else - JEMALLOC_PRIVATE_NAMESPACE="" - -fi - -cat >>confdefs.h <<_ACEOF -#define JEMALLOC_PRIVATE_NAMESPACE "$JEMALLOC_PRIVATE_NAMESPACE" -_ACEOF - -if test "x$JEMALLOC_PRIVATE_NAMESPACE" != "x" ; then - cat >>confdefs.h <<_ACEOF -#define JEMALLOC_N(string_that_no_one_should_want_to_use_as_a_jemalloc_private_namespace_prefix) ${JEMALLOC_PRIVATE_NAMESPACE}##string_that_no_one_should_want_to_use_as_a_jemalloc_private_namespace_prefix -_ACEOF - -else - cat >>confdefs.h <<_ACEOF -#define JEMALLOC_N(string_that_no_one_should_want_to_use_as_a_jemalloc_private_namespace_prefix) string_that_no_one_should_want_to_use_as_a_jemalloc_private_namespace_prefix -_ACEOF - -fi - - -# Check whether --with-install_suffix was given. -if test "${with_install_suffix+set}" = set; then : - withval=$with_install_suffix; INSTALL_SUFFIX="$with_install_suffix" -else - INSTALL_SUFFIX= - -fi - -install_suffix="$INSTALL_SUFFIX" - - -cfgoutputs_in="${srcroot}Makefile.in" -cfgoutputs_in="${cfgoutputs_in} ${srcroot}doc/html.xsl.in" -cfgoutputs_in="${cfgoutputs_in} ${srcroot}doc/manpages.xsl.in" -cfgoutputs_in="${cfgoutputs_in} ${srcroot}doc/jemalloc.xml.in" -cfgoutputs_in="${cfgoutputs_in} ${srcroot}include/jemalloc/jemalloc.h.in" -cfgoutputs_in="${cfgoutputs_in} ${srcroot}include/jemalloc/internal/jemalloc_internal.h.in" -cfgoutputs_in="${cfgoutputs_in} ${srcroot}test/jemalloc_test.h.in" - -cfgoutputs_out="Makefile" -cfgoutputs_out="${cfgoutputs_out} doc/html.xsl" -cfgoutputs_out="${cfgoutputs_out} doc/manpages.xsl" -cfgoutputs_out="${cfgoutputs_out} doc/jemalloc${install_suffix}.xml" -cfgoutputs_out="${cfgoutputs_out} include/jemalloc/jemalloc${install_suffix}.h" -cfgoutputs_out="${cfgoutputs_out} include/jemalloc/internal/jemalloc_internal.h" -cfgoutputs_out="${cfgoutputs_out} test/jemalloc_test.h" - -cfgoutputs_tup="Makefile" -cfgoutputs_tup="${cfgoutputs_tup} doc/html.xsl:doc/html.xsl.in" -cfgoutputs_tup="${cfgoutputs_tup} doc/manpages.xsl:doc/manpages.xsl.in" -cfgoutputs_tup="${cfgoutputs_tup} doc/jemalloc${install_suffix}.xml:doc/jemalloc.xml.in" -cfgoutputs_tup="${cfgoutputs_tup} include/jemalloc/jemalloc${install_suffix}.h:include/jemalloc/jemalloc.h.in" -cfgoutputs_tup="${cfgoutputs_tup} include/jemalloc/internal/jemalloc_internal.h" -cfgoutputs_tup="${cfgoutputs_tup} test/jemalloc_test.h:test/jemalloc_test.h.in" - -cfghdrs_in="${srcroot}include/jemalloc/jemalloc_defs.h.in" -cfghdrs_in="${cfghdrs_in} ${srcroot}include/jemalloc/internal/size_classes.sh" - -cfghdrs_out="include/jemalloc/jemalloc_defs${install_suffix}.h" -cfghdrs_out="${cfghdrs_out} include/jemalloc/internal/size_classes.h" - -cfghdrs_tup="include/jemalloc/jemalloc_defs${install_suffix}.h:include/jemalloc/jemalloc_defs.h.in" - -# Check whether --enable-cc-silence was given. -if test "${enable_cc_silence+set}" = set; then : - enableval=$enable_cc_silence; if test "x$enable_cc_silence" = "xno" ; then - enable_cc_silence="0" -else - enable_cc_silence="1" -fi - -else - enable_cc_silence="0" - -fi - -if test "x$enable_cc_silence" = "x1" ; then - $as_echo "#define JEMALLOC_CC_SILENCE " >>confdefs.h - -fi - -# Check whether --enable-debug was given. -if test "${enable_debug+set}" = set; then : - enableval=$enable_debug; if test "x$enable_debug" = "xno" ; then - enable_debug="0" -else - enable_debug="1" -fi - -else - enable_debug="0" - -fi - -if test "x$enable_debug" = "x1" ; then - $as_echo "#define JEMALLOC_DEBUG " >>confdefs.h - - enable_ivsalloc="1" -fi - - -# Check whether --enable-ivsalloc was given. -if test "${enable_ivsalloc+set}" = set; then : - enableval=$enable_ivsalloc; if test "x$enable_ivsalloc" = "xno" ; then - enable_ivsalloc="0" -else - enable_ivsalloc="1" -fi - -else - enable_ivsalloc="0" - -fi - -if test "x$enable_ivsalloc" = "x1" ; then - $as_echo "#define JEMALLOC_IVSALLOC " >>confdefs.h - -fi - -if test "x$enable_debug" = "x0" -a "x$no_CFLAGS" = "xyes" ; then - optimize="no" - echo "$EXTRA_CFLAGS" | grep "\-O" >/dev/null || optimize="yes" - if test "x${optimize}" = "xyes" ; then - if test "x$GCC" = "xyes" ; then - -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -O3" >&5 -$as_echo_n "checking whether compiler supports -O3... " >&6; } -TCFLAGS="${CFLAGS}" -if test "x${CFLAGS}" = "x" ; then - CFLAGS="-O3" -else - CFLAGS="${CFLAGS} -O3" -fi -cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ - - -int -main () -{ - - return 0; - - ; - return 0; -} -_ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 -$as_echo "yes" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } - CFLAGS="${TCFLAGS}" - -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext - - -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -funroll-loops" >&5 -$as_echo_n "checking whether compiler supports -funroll-loops... " >&6; } -TCFLAGS="${CFLAGS}" -if test "x${CFLAGS}" = "x" ; then - CFLAGS="-funroll-loops" -else - CFLAGS="${CFLAGS} -funroll-loops" -fi -cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ - - -int -main () -{ - - return 0; - - ; - return 0; -} -_ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 -$as_echo "yes" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } - CFLAGS="${TCFLAGS}" - -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext - - elif test "x$je_cv_msvc" = "xyes" ; then - -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -O2" >&5 -$as_echo_n "checking whether compiler supports -O2... " >&6; } -TCFLAGS="${CFLAGS}" -if test "x${CFLAGS}" = "x" ; then - CFLAGS="-O2" -else - CFLAGS="${CFLAGS} -O2" -fi -cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ - - -int -main () -{ - - return 0; - - ; - return 0; -} -_ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 -$as_echo "yes" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } - CFLAGS="${TCFLAGS}" - -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext - - else - -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -O" >&5 -$as_echo_n "checking whether compiler supports -O... " >&6; } -TCFLAGS="${CFLAGS}" -if test "x${CFLAGS}" = "x" ; then - CFLAGS="-O" -else - CFLAGS="${CFLAGS} -O" -fi -cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ - - -int -main () -{ - - return 0; - - ; - return 0; -} -_ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 -$as_echo "yes" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } - CFLAGS="${TCFLAGS}" - -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext - - fi - fi -fi - -# Check whether --enable-stats was given. -if test "${enable_stats+set}" = set; then : - enableval=$enable_stats; if test "x$enable_stats" = "xno" ; then - enable_stats="0" -else - enable_stats="1" -fi - -else - enable_stats="1" - -fi - -if test "x$enable_stats" = "x1" ; then - $as_echo "#define JEMALLOC_STATS " >>confdefs.h - -fi - - -# Check whether --enable-prof was given. -if test "${enable_prof+set}" = set; then : - enableval=$enable_prof; if test "x$enable_prof" = "xno" ; then - enable_prof="0" -else - enable_prof="1" -fi - -else - enable_prof="0" - -fi - -if test "x$enable_prof" = "x1" ; then - backtrace_method="" -else - backtrace_method="N/A" -fi - -# Check whether --enable-prof-libunwind was given. -if test "${enable_prof_libunwind+set}" = set; then : - enableval=$enable_prof_libunwind; if test "x$enable_prof_libunwind" = "xno" ; then - enable_prof_libunwind="0" -else - enable_prof_libunwind="1" -fi - -else - enable_prof_libunwind="0" - -fi - - -# Check whether --with-static_libunwind was given. -if test "${with_static_libunwind+set}" = set; then : - withval=$with_static_libunwind; if test "x$with_static_libunwind" = "xno" ; then - LUNWIND="-lunwind" -else - if test ! -f "$with_static_libunwind" ; then - as_fn_error $? "Static libunwind not found: $with_static_libunwind" "$LINENO" 5 - fi - LUNWIND="$with_static_libunwind" -fi -else - LUNWIND="-lunwind" - -fi - -if test "x$backtrace_method" = "x" -a "x$enable_prof_libunwind" = "x1" ; then - for ac_header in libunwind.h -do : - ac_fn_c_check_header_mongrel "$LINENO" "libunwind.h" "ac_cv_header_libunwind_h" "$ac_includes_default" -if test "x$ac_cv_header_libunwind_h" = xyes; then : - cat >>confdefs.h <<_ACEOF -#define HAVE_LIBUNWIND_H 1 -_ACEOF - -else - enable_prof_libunwind="0" -fi - -done - - if test "x$LUNWIND" = "x-lunwind" ; then - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for backtrace in -lunwind" >&5 -$as_echo_n "checking for backtrace in -lunwind... " >&6; } -if ${ac_cv_lib_unwind_backtrace+:} false; then : - $as_echo_n "(cached) " >&6 -else - ac_check_lib_save_LIBS=$LIBS -LIBS="-lunwind $LIBS" -cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ - -/* Override any GCC internal prototype to avoid an error. - Use char because int might match the return type of a GCC - builtin and then its argument prototype would still apply. */ -#ifdef __cplusplus -extern "C" -#endif -char backtrace (); -int -main () -{ -return backtrace (); - ; - return 0; -} -_ACEOF -if ac_fn_c_try_link "$LINENO"; then : - ac_cv_lib_unwind_backtrace=yes -else - ac_cv_lib_unwind_backtrace=no -fi -rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext conftest.$ac_ext -LIBS=$ac_check_lib_save_LIBS -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_unwind_backtrace" >&5 -$as_echo "$ac_cv_lib_unwind_backtrace" >&6; } -if test "x$ac_cv_lib_unwind_backtrace" = xyes; then : - LIBS="$LIBS $LUNWIND" -else - enable_prof_libunwind="0" -fi - - else - LIBS="$LIBS $LUNWIND" - fi - if test "x${enable_prof_libunwind}" = "x1" ; then - backtrace_method="libunwind" - $as_echo "#define JEMALLOC_PROF_LIBUNWIND " >>confdefs.h - - fi -fi - -# Check whether --enable-prof-libgcc was given. -if test "${enable_prof_libgcc+set}" = set; then : - enableval=$enable_prof_libgcc; if test "x$enable_prof_libgcc" = "xno" ; then - enable_prof_libgcc="0" -else - enable_prof_libgcc="1" -fi - -else - enable_prof_libgcc="1" - -fi - -if test "x$backtrace_method" = "x" -a "x$enable_prof_libgcc" = "x1" \ - -a "x$GCC" = "xyes" ; then - for ac_header in unwind.h -do : - ac_fn_c_check_header_mongrel "$LINENO" "unwind.h" "ac_cv_header_unwind_h" "$ac_includes_default" -if test "x$ac_cv_header_unwind_h" = xyes; then : - cat >>confdefs.h <<_ACEOF -#define HAVE_UNWIND_H 1 -_ACEOF - -else - enable_prof_libgcc="0" -fi - -done - - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for _Unwind_Backtrace in -lgcc" >&5 -$as_echo_n "checking for _Unwind_Backtrace in -lgcc... " >&6; } -if ${ac_cv_lib_gcc__Unwind_Backtrace+:} false; then : - $as_echo_n "(cached) " >&6 -else - ac_check_lib_save_LIBS=$LIBS -LIBS="-lgcc $LIBS" -cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ - -/* Override any GCC internal prototype to avoid an error. - Use char because int might match the return type of a GCC - builtin and then its argument prototype would still apply. */ -#ifdef __cplusplus -extern "C" -#endif -char _Unwind_Backtrace (); -int -main () -{ -return _Unwind_Backtrace (); - ; - return 0; -} -_ACEOF -if ac_fn_c_try_link "$LINENO"; then : - ac_cv_lib_gcc__Unwind_Backtrace=yes -else - ac_cv_lib_gcc__Unwind_Backtrace=no -fi -rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext conftest.$ac_ext -LIBS=$ac_check_lib_save_LIBS -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_gcc__Unwind_Backtrace" >&5 -$as_echo "$ac_cv_lib_gcc__Unwind_Backtrace" >&6; } -if test "x$ac_cv_lib_gcc__Unwind_Backtrace" = xyes; then : - LIBS="$LIBS -lgcc" -else - enable_prof_libgcc="0" -fi - - { $as_echo "$as_me:${as_lineno-$LINENO}: checking libgcc-based backtracing reliability on ${host_cpu}" >&5 -$as_echo_n "checking libgcc-based backtracing reliability on ${host_cpu}... " >&6; } - case "${host_cpu}" in - i[3456]86) - { $as_echo "$as_me:${as_lineno-$LINENO}: result: unreliable" >&5 -$as_echo "unreliable" >&6; } - enable_prof_libgcc="0"; - ;; - x86_64) - { $as_echo "$as_me:${as_lineno-$LINENO}: result: reliable" >&5 -$as_echo "reliable" >&6; } - ;; - *) - { $as_echo "$as_me:${as_lineno-$LINENO}: result: unreliable" >&5 -$as_echo "unreliable" >&6; } - enable_prof_libgcc="0"; - ;; - esac - if test "x${enable_prof_libgcc}" = "x1" ; then - backtrace_method="libgcc" - $as_echo "#define JEMALLOC_PROF_LIBGCC " >>confdefs.h - - fi -else - enable_prof_libgcc="0" -fi - -# Check whether --enable-prof-gcc was given. -if test "${enable_prof_gcc+set}" = set; then : - enableval=$enable_prof_gcc; if test "x$enable_prof_gcc" = "xno" ; then - enable_prof_gcc="0" -else - enable_prof_gcc="1" -fi - -else - enable_prof_gcc="1" - -fi - -if test "x$backtrace_method" = "x" -a "x$enable_prof_gcc" = "x1" \ - -a "x$GCC" = "xyes" ; then - backtrace_method="gcc intrinsics" - $as_echo "#define JEMALLOC_PROF_GCC " >>confdefs.h - -else - enable_prof_gcc="0" -fi - -if test "x$backtrace_method" = "x" ; then - backtrace_method="none (disabling profiling)" - enable_prof="0" -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking configured backtracing method" >&5 -$as_echo_n "checking configured backtracing method... " >&6; } -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $backtrace_method" >&5 -$as_echo "$backtrace_method" >&6; } -if test "x$enable_prof" = "x1" ; then - if test "x${force_tls}" = "x0" ; then - as_fn_error $? "Heap profiling requires TLS" "$LINENO" 5; - fi - force_tls="1" - $as_echo "#define JEMALLOC_PROF " >>confdefs.h - -fi - - -# Check whether --enable-tcache was given. -if test "${enable_tcache+set}" = set; then : - enableval=$enable_tcache; if test "x$enable_tcache" = "xno" ; then - enable_tcache="0" -else - enable_tcache="1" -fi - -else - enable_tcache="1" - -fi - -if test "x$enable_tcache" = "x1" ; then - $as_echo "#define JEMALLOC_TCACHE " >>confdefs.h - -fi - - -# Check whether --enable-mremap was given. -if test "${enable_mremap+set}" = set; then : - enableval=$enable_mremap; if test "x$enable_mremap" = "xno" ; then - enable_mremap="0" -else - enable_mremap="1" -fi - -else - enable_mremap="0" - -fi - -if test "x$enable_mremap" = "x1" ; then - -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether mremap(...MREMAP_FIXED...) is compilable" >&5 -$as_echo_n "checking whether mremap(...MREMAP_FIXED...) is compilable... " >&6; } -if ${je_cv_mremap_fixed+:} false; then : - $as_echo_n "(cached) " >&6 -else - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ - -#define _GNU_SOURCE -#include <sys/mman.h> - -int -main () -{ - -void *p = mremap((void *)0, 0, 0, MREMAP_MAYMOVE|MREMAP_FIXED, (void *)0); - - ; - return 0; -} -_ACEOF -if ac_fn_c_try_link "$LINENO"; then : - je_cv_mremap_fixed=yes -else - je_cv_mremap_fixed=no -fi -rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext conftest.$ac_ext -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_mremap_fixed" >&5 -$as_echo "$je_cv_mremap_fixed" >&6; } - - if test "x${je_cv_mremap_fixed}" = "xno" ; then - enable_mremap="0" - fi -fi -if test "x$enable_mremap" = "x1" ; then - $as_echo "#define JEMALLOC_MREMAP " >>confdefs.h - -fi - - -# Check whether --enable-munmap was given. -if test "${enable_munmap+set}" = set; then : - enableval=$enable_munmap; if test "x$enable_munmap" = "xno" ; then - enable_munmap="0" -else - enable_munmap="1" -fi - -else - enable_munmap="${default_munmap}" - -fi - -if test "x$enable_munmap" = "x1" ; then - $as_echo "#define JEMALLOC_MUNMAP " >>confdefs.h - -fi - - -# Check whether --enable-dss was given. -if test "${enable_dss+set}" = set; then : - enableval=$enable_dss; if test "x$enable_dss" = "xno" ; then - enable_dss="0" -else - enable_dss="1" -fi - -else - enable_dss="0" - -fi - -ac_fn_c_check_func "$LINENO" "sbrk" "ac_cv_func_sbrk" -if test "x$ac_cv_func_sbrk" = xyes; then : - have_sbrk="1" -else - have_sbrk="0" -fi - -if test "x$have_sbrk" = "x1" ; then - $as_echo "#define JEMALLOC_HAVE_SBRK " >>confdefs.h - -else - enable_dss="0" -fi - -if test "x$enable_dss" = "x1" ; then - $as_echo "#define JEMALLOC_DSS " >>confdefs.h - -fi - - -# Check whether --enable-fill was given. -if test "${enable_fill+set}" = set; then : - enableval=$enable_fill; if test "x$enable_fill" = "xno" ; then - enable_fill="0" -else - enable_fill="1" -fi - -else - enable_fill="1" - -fi - -if test "x$enable_fill" = "x1" ; then - $as_echo "#define JEMALLOC_FILL " >>confdefs.h - -fi - - -# Check whether --enable-utrace was given. -if test "${enable_utrace+set}" = set; then : - enableval=$enable_utrace; if test "x$enable_utrace" = "xno" ; then - enable_utrace="0" -else - enable_utrace="1" -fi - -else - enable_utrace="0" - -fi - - -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether utrace(2) is compilable" >&5 -$as_echo_n "checking whether utrace(2) is compilable... " >&6; } -if ${je_cv_utrace+:} false; then : - $as_echo_n "(cached) " >&6 -else - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ - -#include <sys/types.h> -#include <sys/param.h> -#include <sys/time.h> -#include <sys/uio.h> -#include <sys/ktrace.h> - -int -main () -{ - - utrace((void *)0, 0); - - ; - return 0; -} -_ACEOF -if ac_fn_c_try_link "$LINENO"; then : - je_cv_utrace=yes -else - je_cv_utrace=no -fi -rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext conftest.$ac_ext -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_utrace" >&5 -$as_echo "$je_cv_utrace" >&6; } - -if test "x${je_cv_utrace}" = "xno" ; then - enable_utrace="0" -fi -if test "x$enable_utrace" = "x1" ; then - $as_echo "#define JEMALLOC_UTRACE " >>confdefs.h - -fi - - -# Check whether --enable-valgrind was given. -if test "${enable_valgrind+set}" = set; then : - enableval=$enable_valgrind; if test "x$enable_valgrind" = "xno" ; then - enable_valgrind="0" -else - enable_valgrind="1" -fi - -else - enable_valgrind="1" - -fi - -if test "x$enable_valgrind" = "x1" ; then - -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether valgrind is compilable" >&5 -$as_echo_n "checking whether valgrind is compilable... " >&6; } -if ${je_cv_valgrind+:} false; then : - $as_echo_n "(cached) " >&6 -else - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ - -#include <valgrind/valgrind.h> -#include <valgrind/memcheck.h> - -#if !defined(VALGRIND_RESIZEINPLACE_BLOCK) -# error "Incompatible Valgrind version" -#endif - -int -main () -{ - - ; - return 0; -} -_ACEOF -if ac_fn_c_try_link "$LINENO"; then : - je_cv_valgrind=yes -else - je_cv_valgrind=no -fi -rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext conftest.$ac_ext -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_valgrind" >&5 -$as_echo "$je_cv_valgrind" >&6; } - - if test "x${je_cv_valgrind}" = "xno" ; then - enable_valgrind="0" - fi - if test "x$enable_valgrind" = "x1" ; then - $as_echo "#define JEMALLOC_VALGRIND " >>confdefs.h - - fi -fi - - -# Check whether --enable-xmalloc was given. -if test "${enable_xmalloc+set}" = set; then : - enableval=$enable_xmalloc; if test "x$enable_xmalloc" = "xno" ; then - enable_xmalloc="0" -else - enable_xmalloc="1" -fi - -else - enable_xmalloc="0" - -fi - -if test "x$enable_xmalloc" = "x1" ; then - $as_echo "#define JEMALLOC_XMALLOC " >>confdefs.h - -fi - - -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking STATIC_PAGE_SHIFT" >&5 -$as_echo_n "checking STATIC_PAGE_SHIFT... " >&6; } -if ${je_cv_static_page_shift+:} false; then : - $as_echo_n "(cached) " >&6 -else - if test "$cross_compiling" = yes; then : - { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 -$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} -as_fn_error $? "cannot run test program while cross compiling -See \`config.log' for more details" "$LINENO" 5; } -else - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ - -#include <strings.h> -#ifdef _WIN32 -#include <windows.h> -#else -#include <unistd.h> -#endif -#include <stdio.h> - -int -main () -{ - - int result; - FILE *f; - -#ifdef _WIN32 - SYSTEM_INFO si; - GetSystemInfo(&si); - result = si.dwPageSize; -#else - result = sysconf(_SC_PAGESIZE); -#endif - if (result == -1) { - return 1; - } - result = ffsl(result) - 1; - - f = fopen("conftest.out", "w"); - if (f == NULL) { - return 1; - } - fprintf(f, "%d\n", result); - fclose(f); - - return 0; - - ; - return 0; -} -_ACEOF -if ac_fn_c_try_run "$LINENO"; then : - je_cv_static_page_shift=`cat conftest.out` -else - je_cv_static_page_shift=undefined -fi -rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \ - conftest.$ac_objext conftest.beam conftest.$ac_ext -fi - -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_static_page_shift" >&5 -$as_echo "$je_cv_static_page_shift" >&6; } - -if test "x$je_cv_static_page_shift" != "xundefined"; then - cat >>confdefs.h <<_ACEOF -#define STATIC_PAGE_SHIFT $je_cv_static_page_shift -_ACEOF - -else - as_fn_error $? "cannot determine value for STATIC_PAGE_SHIFT" "$LINENO" 5 -fi - - -if test -d "${srcroot}.git" ; then - git describe --long --abbrev=40 > ${srcroot}VERSION -fi -jemalloc_version=`cat ${srcroot}VERSION` -jemalloc_version_major=`echo ${jemalloc_version} | tr ".g-" " " | awk '{print $1}'` -jemalloc_version_minor=`echo ${jemalloc_version} | tr ".g-" " " | awk '{print $2}'` -jemalloc_version_bugfix=`echo ${jemalloc_version} | tr ".g-" " " | awk '{print $3}'` -jemalloc_version_nrev=`echo ${jemalloc_version} | tr ".g-" " " | awk '{print $4}'` -jemalloc_version_gid=`echo ${jemalloc_version} | tr ".g-" " " | awk '{print $5}'` - - - - - - - - -if test "x$abi" != "xpecoff" ; then - for ac_header in pthread.h -do : - ac_fn_c_check_header_mongrel "$LINENO" "pthread.h" "ac_cv_header_pthread_h" "$ac_includes_default" -if test "x$ac_cv_header_pthread_h" = xyes; then : - cat >>confdefs.h <<_ACEOF -#define HAVE_PTHREAD_H 1 -_ACEOF - -else - as_fn_error $? "pthread.h is missing" "$LINENO" 5 -fi - -done - - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for pthread_create in -lpthread" >&5 -$as_echo_n "checking for pthread_create in -lpthread... " >&6; } -if ${ac_cv_lib_pthread_pthread_create+:} false; then : - $as_echo_n "(cached) " >&6 -else - ac_check_lib_save_LIBS=$LIBS -LIBS="-lpthread $LIBS" -cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ - -/* Override any GCC internal prototype to avoid an error. - Use char because int might match the return type of a GCC - builtin and then its argument prototype would still apply. */ -#ifdef __cplusplus -extern "C" -#endif -char pthread_create (); -int -main () -{ -return pthread_create (); - ; - return 0; -} -_ACEOF -if ac_fn_c_try_link "$LINENO"; then : - ac_cv_lib_pthread_pthread_create=yes -else - ac_cv_lib_pthread_pthread_create=no -fi -rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext conftest.$ac_ext -LIBS=$ac_check_lib_save_LIBS -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_pthread_pthread_create" >&5 -$as_echo "$ac_cv_lib_pthread_pthread_create" >&6; } -if test "x$ac_cv_lib_pthread_pthread_create" = xyes; then : - LIBS="$LIBS -lpthread" -else - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing pthread_create" >&5 -$as_echo_n "checking for library containing pthread_create... " >&6; } -if ${ac_cv_search_pthread_create+:} false; then : - $as_echo_n "(cached) " >&6 -else - ac_func_search_save_LIBS=$LIBS -cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ - -/* Override any GCC internal prototype to avoid an error. - Use char because int might match the return type of a GCC - builtin and then its argument prototype would still apply. */ -#ifdef __cplusplus -extern "C" -#endif -char pthread_create (); -int -main () -{ -return pthread_create (); - ; - return 0; -} -_ACEOF -for ac_lib in '' ; do - if test -z "$ac_lib"; then - ac_res="none required" - else - ac_res=-l$ac_lib - LIBS="-l$ac_lib $ac_func_search_save_LIBS" - fi - if ac_fn_c_try_link "$LINENO"; then : - ac_cv_search_pthread_create=$ac_res -fi -rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext - if ${ac_cv_search_pthread_create+:} false; then : - break -fi -done -if ${ac_cv_search_pthread_create+:} false; then : - -else - ac_cv_search_pthread_create=no -fi -rm conftest.$ac_ext -LIBS=$ac_func_search_save_LIBS -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_search_pthread_create" >&5 -$as_echo "$ac_cv_search_pthread_create" >&6; } -ac_res=$ac_cv_search_pthread_create -if test "$ac_res" != no; then : - test "$ac_res" = "none required" || LIBS="$ac_res $LIBS" - -else - as_fn_error $? "libpthread is missing" "$LINENO" 5 -fi - -fi - -fi - -CPPFLAGS="$CPPFLAGS -D_REENTRANT" - -ac_fn_c_check_func "$LINENO" "_malloc_thread_cleanup" "ac_cv_func__malloc_thread_cleanup" -if test "x$ac_cv_func__malloc_thread_cleanup" = xyes; then : - have__malloc_thread_cleanup="1" -else - have__malloc_thread_cleanup="0" - -fi - -if test "x$have__malloc_thread_cleanup" = "x1" ; then - $as_echo "#define JEMALLOC_MALLOC_THREAD_CLEANUP " >>confdefs.h - - force_tls="1" -fi - -ac_fn_c_check_func "$LINENO" "_pthread_mutex_init_calloc_cb" "ac_cv_func__pthread_mutex_init_calloc_cb" -if test "x$ac_cv_func__pthread_mutex_init_calloc_cb" = xyes; then : - have__pthread_mutex_init_calloc_cb="1" -else - have__pthread_mutex_init_calloc_cb="0" - -fi - -if test "x$have__pthread_mutex_init_calloc_cb" = "x1" ; then - $as_echo "#define JEMALLOC_MUTEX_INIT_CB 1" >>confdefs.h - -fi - -# Check whether --enable-lazy_lock was given. -if test "${enable_lazy_lock+set}" = set; then : - enableval=$enable_lazy_lock; if test "x$enable_lazy_lock" = "xno" ; then - enable_lazy_lock="0" -else - enable_lazy_lock="1" -fi - -else - enable_lazy_lock="0" - -fi - -if test "x$enable_lazy_lock" = "x0" -a "x${force_lazy_lock}" = "x1" ; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: Forcing lazy-lock to avoid allocator/threading bootstrap issues" >&5 -$as_echo "Forcing lazy-lock to avoid allocator/threading bootstrap issues" >&6; } - enable_lazy_lock="1" -fi -if test "x$enable_lazy_lock" = "x1" ; then - if test "x$abi" != "xpecoff" ; then - for ac_header in dlfcn.h -do : - ac_fn_c_check_header_mongrel "$LINENO" "dlfcn.h" "ac_cv_header_dlfcn_h" "$ac_includes_default" -if test "x$ac_cv_header_dlfcn_h" = xyes; then : - cat >>confdefs.h <<_ACEOF -#define HAVE_DLFCN_H 1 -_ACEOF - -else - as_fn_error $? "dlfcn.h is missing" "$LINENO" 5 -fi - -done - - ac_fn_c_check_func "$LINENO" "dlsym" "ac_cv_func_dlsym" -if test "x$ac_cv_func_dlsym" = xyes; then : - -else - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for dlsym in -ldl" >&5 -$as_echo_n "checking for dlsym in -ldl... " >&6; } -if ${ac_cv_lib_dl_dlsym+:} false; then : - $as_echo_n "(cached) " >&6 -else - ac_check_lib_save_LIBS=$LIBS -LIBS="-ldl $LIBS" -cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ - -/* Override any GCC internal prototype to avoid an error. - Use char because int might match the return type of a GCC - builtin and then its argument prototype would still apply. */ -#ifdef __cplusplus -extern "C" -#endif -char dlsym (); -int -main () -{ -return dlsym (); - ; - return 0; -} -_ACEOF -if ac_fn_c_try_link "$LINENO"; then : - ac_cv_lib_dl_dlsym=yes -else - ac_cv_lib_dl_dlsym=no -fi -rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext conftest.$ac_ext -LIBS=$ac_check_lib_save_LIBS -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_dl_dlsym" >&5 -$as_echo "$ac_cv_lib_dl_dlsym" >&6; } -if test "x$ac_cv_lib_dl_dlsym" = xyes; then : - LIBS="$LIBS -ldl" -else - as_fn_error $? "libdl is missing" "$LINENO" 5 -fi - - -fi - - fi - $as_echo "#define JEMALLOC_LAZY_LOCK " >>confdefs.h - -fi - - -# Check whether --enable-tls was given. -if test "${enable_tls+set}" = set; then : - enableval=$enable_tls; if test "x$enable_tls" = "xno" ; then - enable_tls="0" -else - enable_tls="1" -fi - -else - enable_tls="1" - -fi - -if test "x${enable_tls}" = "x0" -a "x${force_tls}" = "x1" ; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: Forcing TLS to avoid allocator/threading bootstrap issues" >&5 -$as_echo "Forcing TLS to avoid allocator/threading bootstrap issues" >&6; } - enable_tls="1" -fi -if test "x${enable_tls}" = "x1" -a "x${force_tls}" = "x0" ; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: Forcing no TLS to avoid allocator/threading bootstrap issues" >&5 -$as_echo "Forcing no TLS to avoid allocator/threading bootstrap issues" >&6; } - enable_tls="0" -fi -if test "x${enable_tls}" = "x1" ; then -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for TLS" >&5 -$as_echo_n "checking for TLS... " >&6; } -cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ - - __thread int x; - -int -main () -{ - - x = 42; - - return 0; - - ; - return 0; -} -_ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 -$as_echo "yes" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } - enable_tls="0" -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext -fi - -if test "x${enable_tls}" = "x1" ; then - cat >>confdefs.h <<_ACEOF -#define JEMALLOC_TLS -_ACEOF - -elif test "x${force_tls}" = "x1" ; then - as_fn_error $? "Failed to configure TLS, which is mandatory for correct function" "$LINENO" 5 -fi - - -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether a program using ffsl is compilable" >&5 -$as_echo_n "checking whether a program using ffsl is compilable... " >&6; } -if ${je_cv_function_ffsl+:} false; then : - $as_echo_n "(cached) " >&6 -else - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ - -#include <stdio.h> -#include <strings.h> -#include <string.h> - -int -main () -{ - - { - int rv = ffsl(0x08); - printf("%d\n", rv); - } - - ; - return 0; -} -_ACEOF -if ac_fn_c_try_link "$LINENO"; then : - je_cv_function_ffsl=yes -else - je_cv_function_ffsl=no -fi -rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext conftest.$ac_ext -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_function_ffsl" >&5 -$as_echo "$je_cv_function_ffsl" >&6; } - -if test "x${je_cv_function_ffsl}" != "xyes" ; then - as_fn_error $? "Cannot build without ffsl(3)" "$LINENO" 5 -fi - - - -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether atomic(9) is compilable" >&5 -$as_echo_n "checking whether atomic(9) is compilable... " >&6; } -if ${je_cv_atomic9+:} false; then : - $as_echo_n "(cached) " >&6 -else - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ - -#include <sys/types.h> -#include <machine/atomic.h> -#include <inttypes.h> - -int -main () -{ - - { - uint32_t x32 = 0; - volatile uint32_t *x32p = &x32; - atomic_fetchadd_32(x32p, 1); - } - { - unsigned long xlong = 0; - volatile unsigned long *xlongp = &xlong; - atomic_fetchadd_long(xlongp, 1); - } - - ; - return 0; -} -_ACEOF -if ac_fn_c_try_link "$LINENO"; then : - je_cv_atomic9=yes -else - je_cv_atomic9=no -fi -rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext conftest.$ac_ext -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_atomic9" >&5 -$as_echo "$je_cv_atomic9" >&6; } - -if test "x${je_cv_atomic9}" = "xyes" ; then - $as_echo "#define JEMALLOC_ATOMIC9 1" >>confdefs.h - -fi - - - -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether Darwin OSAtomic*() is compilable" >&5 -$as_echo_n "checking whether Darwin OSAtomic*() is compilable... " >&6; } -if ${je_cv_osatomic+:} false; then : - $as_echo_n "(cached) " >&6 -else - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ - -#include <libkern/OSAtomic.h> -#include <inttypes.h> - -int -main () -{ - - { - int32_t x32 = 0; - volatile int32_t *x32p = &x32; - OSAtomicAdd32(1, x32p); - } - { - int64_t x64 = 0; - volatile int64_t *x64p = &x64; - OSAtomicAdd64(1, x64p); - } - - ; - return 0; -} -_ACEOF -if ac_fn_c_try_link "$LINENO"; then : - je_cv_osatomic=yes -else - je_cv_osatomic=no -fi -rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext conftest.$ac_ext -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_osatomic" >&5 -$as_echo "$je_cv_osatomic" >&6; } - -if test "x${je_cv_osatomic}" = "xyes" ; then - $as_echo "#define JEMALLOC_OSATOMIC " >>confdefs.h - -fi - - - - -if test "x${je_cv_atomic9}" != "xyes" -a "x${je_cv_osatomic}" != "xyes" ; then - - { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether to force 32-bit __sync_{add,sub}_and_fetch()" >&5 -$as_echo_n "checking whether to force 32-bit __sync_{add,sub}_and_fetch()... " >&6; } -if ${je_cv_sync_compare_and_swap_4+:} false; then : - $as_echo_n "(cached) " >&6 -else - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ - - #include <stdint.h> - -int -main () -{ - - #ifndef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4 - { - uint32_t x32 = 0; - __sync_add_and_fetch(&x32, 42); - __sync_sub_and_fetch(&x32, 1); - } - #else - #error __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4 is defined, no need to force - #endif - - ; - return 0; -} -_ACEOF -if ac_fn_c_try_link "$LINENO"; then : - je_cv_sync_compare_and_swap_4=yes -else - je_cv_sync_compare_and_swap_4=no -fi -rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext conftest.$ac_ext -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_sync_compare_and_swap_4" >&5 -$as_echo "$je_cv_sync_compare_and_swap_4" >&6; } - - if test "x${je_cv_sync_compare_and_swap_4}" = "xyes" ; then - $as_echo "#define JE_FORCE_SYNC_COMPARE_AND_SWAP_4 " >>confdefs.h - - fi - - - { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether to force 64-bit __sync_{add,sub}_and_fetch()" >&5 -$as_echo_n "checking whether to force 64-bit __sync_{add,sub}_and_fetch()... " >&6; } -if ${je_cv_sync_compare_and_swap_8+:} false; then : - $as_echo_n "(cached) " >&6 -else - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ - - #include <stdint.h> - -int -main () -{ - - #ifndef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_8 - { - uint64_t x64 = 0; - __sync_add_and_fetch(&x64, 42); - __sync_sub_and_fetch(&x64, 1); - } - #else - #error __GCC_HAVE_SYNC_COMPARE_AND_SWAP_8 is defined, no need to force - #endif - - ; - return 0; -} -_ACEOF -if ac_fn_c_try_link "$LINENO"; then : - je_cv_sync_compare_and_swap_8=yes -else - je_cv_sync_compare_and_swap_8=no -fi -rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext conftest.$ac_ext -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_sync_compare_and_swap_8" >&5 -$as_echo "$je_cv_sync_compare_and_swap_8" >&6; } - - if test "x${je_cv_sync_compare_and_swap_8}" = "xyes" ; then - $as_echo "#define JE_FORCE_SYNC_COMPARE_AND_SWAP_8 " >>confdefs.h - - fi - -fi - - - -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether Darwin OSSpin*() is compilable" >&5 -$as_echo_n "checking whether Darwin OSSpin*() is compilable... " >&6; } -if ${je_cv_osspin+:} false; then : - $as_echo_n "(cached) " >&6 -else - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ - -#include <libkern/OSAtomic.h> -#include <inttypes.h> - -int -main () -{ - - OSSpinLock lock = 0; - OSSpinLockLock(&lock); - OSSpinLockUnlock(&lock); - - ; - return 0; -} -_ACEOF -if ac_fn_c_try_link "$LINENO"; then : - je_cv_osspin=yes -else - je_cv_osspin=no -fi -rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext conftest.$ac_ext -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_osspin" >&5 -$as_echo "$je_cv_osspin" >&6; } - -if test "x${je_cv_osspin}" = "xyes" ; then - $as_echo "#define JEMALLOC_OSSPIN " >>confdefs.h - -fi - - -# Check whether --enable-zone-allocator was given. -if test "${enable_zone_allocator+set}" = set; then : - enableval=$enable_zone_allocator; if test "x$enable_zone_allocator" = "xno" ; then - enable_zone_allocator="0" -else - enable_zone_allocator="1" -fi - -else - if test "x${abi}" = "xmacho"; then - enable_zone_allocator="1" -fi - - -fi - - - -if test "x${enable_zone_allocator}" = "x1" ; then - if test "x${abi}" != "xmacho"; then - as_fn_error $? "--enable-zone-allocator is only supported on Darwin" "$LINENO" 5 - fi - $as_echo "#define JEMALLOC_IVSALLOC " >>confdefs.h - - $as_echo "#define JEMALLOC_ZONE " >>confdefs.h - - - { $as_echo "$as_me:${as_lineno-$LINENO}: checking malloc zone version" >&5 -$as_echo_n "checking malloc zone version... " >&6; } - - - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -#include <malloc/malloc.h> -int -main () -{ -static foo[sizeof(malloc_zone_t) == sizeof(void *) * 14 ? 1 : -1] - - ; - return 0; -} -_ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - JEMALLOC_ZONE_VERSION=3 -else - - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -#include <malloc/malloc.h> -int -main () -{ -static foo[sizeof(malloc_zone_t) == sizeof(void *) * 15 ? 1 : -1] - - ; - return 0; -} -_ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - JEMALLOC_ZONE_VERSION=5 -else - - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -#include <malloc/malloc.h> -int -main () -{ -static foo[sizeof(malloc_zone_t) == sizeof(void *) * 16 ? 1 : -1] - - ; - return 0; -} -_ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -#include <malloc/malloc.h> -int -main () -{ -static foo[sizeof(malloc_introspection_t) == sizeof(void *) * 9 ? 1 : -1] - - ; - return 0; -} -_ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - JEMALLOC_ZONE_VERSION=6 -else - - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -#include <malloc/malloc.h> -int -main () -{ -static foo[sizeof(malloc_introspection_t) == sizeof(void *) * 13 ? 1 : -1] - - ; - return 0; -} -_ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - JEMALLOC_ZONE_VERSION=7 -else - JEMALLOC_ZONE_VERSION= - -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext -else - - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -#include <malloc/malloc.h> -int -main () -{ -static foo[sizeof(malloc_zone_t) == sizeof(void *) * 17 ? 1 : -1] - - ; - return 0; -} -_ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - JEMALLOC_ZONE_VERSION=8 -else - - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -#include <malloc/malloc.h> -int -main () -{ -static foo[sizeof(malloc_zone_t) > sizeof(void *) * 17 ? 1 : -1] - - ; - return 0; -} -_ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - JEMALLOC_ZONE_VERSION=9 -else - JEMALLOC_ZONE_VERSION= - -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext - if test "x${JEMALLOC_ZONE_VERSION}" = "x"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: unsupported" >&5 -$as_echo "unsupported" >&6; } - as_fn_error $? "Unsupported malloc zone version" "$LINENO" 5 - fi - if test "${JEMALLOC_ZONE_VERSION}" = 9; then - JEMALLOC_ZONE_VERSION=8 - { $as_echo "$as_me:${as_lineno-$LINENO}: result: > 8" >&5 -$as_echo "> 8" >&6; } - else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $JEMALLOC_ZONE_VERSION" >&5 -$as_echo "$JEMALLOC_ZONE_VERSION" >&6; } - fi - cat >>confdefs.h <<_ACEOF -#define JEMALLOC_ZONE_VERSION $JEMALLOC_ZONE_VERSION -_ACEOF - -fi - -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for stdbool.h that conforms to C99" >&5 -$as_echo_n "checking for stdbool.h that conforms to C99... " >&6; } -if ${ac_cv_header_stdbool_h+:} false; then : - $as_echo_n "(cached) " >&6 -else - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ - -#include <stdbool.h> -#ifndef bool - "error: bool is not defined" -#endif -#ifndef false - "error: false is not defined" -#endif -#if false - "error: false is not 0" -#endif -#ifndef true - "error: true is not defined" -#endif -#if true != 1 - "error: true is not 1" -#endif -#ifndef __bool_true_false_are_defined - "error: __bool_true_false_are_defined is not defined" -#endif - - struct s { _Bool s: 1; _Bool t; } s; - - char a[true == 1 ? 1 : -1]; - char b[false == 0 ? 1 : -1]; - char c[__bool_true_false_are_defined == 1 ? 1 : -1]; - char d[(bool) 0.5 == true ? 1 : -1]; - /* See body of main program for 'e'. */ - char f[(_Bool) 0.0 == false ? 1 : -1]; - char g[true]; - char h[sizeof (_Bool)]; - char i[sizeof s.t]; - enum { j = false, k = true, l = false * true, m = true * 256 }; - /* The following fails for - HP aC++/ANSI C B3910B A.05.55 [Dec 04 2003]. */ - _Bool n[m]; - char o[sizeof n == m * sizeof n[0] ? 1 : -1]; - char p[-1 - (_Bool) 0 < 0 && -1 - (bool) 0 < 0 ? 1 : -1]; - /* Catch a bug in an HP-UX C compiler. See - http://gcc.gnu.org/ml/gcc-patches/2003-12/msg02303.html - http://lists.gnu.org/archive/html/bug-coreutils/2005-11/msg00161.html - */ - _Bool q = true; - _Bool *pq = &q; - -int -main () -{ - - bool e = &s; - *pq |= q; - *pq |= ! q; - /* Refer to every declared value, to avoid compiler optimizations. */ - return (!a + !b + !c + !d + !e + !f + !g + !h + !i + !!j + !k + !!l - + !m + !n + !o + !p + !q + !pq); - - ; - return 0; -} -_ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - ac_cv_header_stdbool_h=yes -else - ac_cv_header_stdbool_h=no -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_header_stdbool_h" >&5 -$as_echo "$ac_cv_header_stdbool_h" >&6; } -ac_fn_c_check_type "$LINENO" "_Bool" "ac_cv_type__Bool" "$ac_includes_default" -if test "x$ac_cv_type__Bool" = xyes; then : - -cat >>confdefs.h <<_ACEOF -#define HAVE__BOOL 1 -_ACEOF - - -fi - -if test $ac_cv_header_stdbool_h = yes; then - -$as_echo "#define HAVE_STDBOOL_H 1" >>confdefs.h - -fi - - -ac_config_commands="$ac_config_commands include/jemalloc/internal/size_classes.h" - - - - -ac_config_headers="$ac_config_headers $cfghdrs_tup" - - -ac_config_files="$ac_config_files $cfgoutputs_tup config.stamp bin/jemalloc.sh" - - - -cat >confcache <<\_ACEOF -# This file is a shell script that caches the results of configure -# tests run on this system so they can be shared between configure -# scripts and configure runs, see configure's option --config-cache. -# It is not useful on other systems. If it contains results you don't -# want to keep, you may remove or edit it. -# -# config.status only pays attention to the cache file if you give it -# the --recheck option to rerun configure. -# -# `ac_cv_env_foo' variables (set or unset) will be overridden when -# loading this file, other *unset* `ac_cv_foo' will be assigned the -# following values. - -_ACEOF - -# The following way of writing the cache mishandles newlines in values, -# but we know of no workaround that is simple, portable, and efficient. -# So, we kill variables containing newlines. -# Ultrix sh set writes to stderr and can't be redirected directly, -# and sets the high bit in the cache file unless we assign to the vars. -( - for ac_var in `(set) 2>&1 | sed -n 's/^\([a-zA-Z_][a-zA-Z0-9_]*\)=.*/\1/p'`; do - eval ac_val=\$$ac_var - case $ac_val in #( - *${as_nl}*) - case $ac_var in #( - *_cv_*) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: cache variable $ac_var contains a newline" >&5 -$as_echo "$as_me: WARNING: cache variable $ac_var contains a newline" >&2;} ;; - esac - case $ac_var in #( - _ | IFS | as_nl) ;; #( - BASH_ARGV | BASH_SOURCE) eval $ac_var= ;; #( - *) { eval $ac_var=; unset $ac_var;} ;; - esac ;; - esac - done - - (set) 2>&1 | - case $as_nl`(ac_space=' '; set) 2>&1` in #( - *${as_nl}ac_space=\ *) - # `set' does not quote correctly, so add quotes: double-quote - # substitution turns \\\\ into \\, and sed turns \\ into \. - sed -n \ - "s/'/'\\\\''/g; - s/^\\([_$as_cr_alnum]*_cv_[_$as_cr_alnum]*\\)=\\(.*\\)/\\1='\\2'/p" - ;; #( - *) - # `set' quotes correctly as required by POSIX, so do not add quotes. - sed -n "/^[_$as_cr_alnum]*_cv_[_$as_cr_alnum]*=/p" - ;; - esac | - sort -) | - sed ' - /^ac_cv_env_/b end - t clear - :clear - s/^\([^=]*\)=\(.*[{}].*\)$/test "${\1+set}" = set || &/ - t end - s/^\([^=]*\)=\(.*\)$/\1=${\1=\2}/ - :end' >>confcache -if diff "$cache_file" confcache >/dev/null 2>&1; then :; else - if test -w "$cache_file"; then - if test "x$cache_file" != "x/dev/null"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: updating cache $cache_file" >&5 -$as_echo "$as_me: updating cache $cache_file" >&6;} - if test ! -f "$cache_file" || test -h "$cache_file"; then - cat confcache >"$cache_file" - else - case $cache_file in #( - */* | ?:*) - mv -f confcache "$cache_file"$$ && - mv -f "$cache_file"$$ "$cache_file" ;; #( - *) - mv -f confcache "$cache_file" ;; - esac - fi - fi - else - { $as_echo "$as_me:${as_lineno-$LINENO}: not updating unwritable cache $cache_file" >&5 -$as_echo "$as_me: not updating unwritable cache $cache_file" >&6;} - fi -fi -rm -f confcache - -test "x$prefix" = xNONE && prefix=$ac_default_prefix -# Let make expand exec_prefix. -test "x$exec_prefix" = xNONE && exec_prefix='${prefix}' - -DEFS=-DHAVE_CONFIG_H - -ac_libobjs= -ac_ltlibobjs= -U= -for ac_i in : $LIBOBJS; do test "x$ac_i" = x: && continue - # 1. Remove the extension, and $U if already installed. - ac_script='s/\$U\././;s/\.o$//;s/\.obj$//' - ac_i=`$as_echo "$ac_i" | sed "$ac_script"` - # 2. Prepend LIBOBJDIR. When used with automake>=1.10 LIBOBJDIR - # will be set to the directory where LIBOBJS objects are built. - as_fn_append ac_libobjs " \${LIBOBJDIR}$ac_i\$U.$ac_objext" - as_fn_append ac_ltlibobjs " \${LIBOBJDIR}$ac_i"'$U.lo' -done -LIBOBJS=$ac_libobjs - -LTLIBOBJS=$ac_ltlibobjs - - - -: "${CONFIG_STATUS=./config.status}" -ac_write_fail=0 -ac_clean_files_save=$ac_clean_files -ac_clean_files="$ac_clean_files $CONFIG_STATUS" -{ $as_echo "$as_me:${as_lineno-$LINENO}: creating $CONFIG_STATUS" >&5 -$as_echo "$as_me: creating $CONFIG_STATUS" >&6;} -as_write_fail=0 -cat >$CONFIG_STATUS <<_ASEOF || as_write_fail=1 -#! $SHELL -# Generated by $as_me. -# Run this file to recreate the current configuration. -# Compiler output produced by configure, useful for debugging -# configure, is in config.log if it exists. - -debug=false -ac_cs_recheck=false -ac_cs_silent=false - -SHELL=\${CONFIG_SHELL-$SHELL} -export SHELL -_ASEOF -cat >>$CONFIG_STATUS <<\_ASEOF || as_write_fail=1 -## -------------------- ## -## M4sh Initialization. ## -## -------------------- ## - -# Be more Bourne compatible -DUALCASE=1; export DUALCASE # for MKS sh -if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then : - emulate sh - NULLCMD=: - # Pre-4.2 versions of Zsh do word splitting on ${1+"$@"}, which - # is contrary to our usage. Disable this feature. - alias -g '${1+"$@"}'='"$@"' - setopt NO_GLOB_SUBST -else - case `(set -o) 2>/dev/null` in #( - *posix*) : - set -o posix ;; #( - *) : - ;; -esac -fi - - -as_nl=' -' -export as_nl -# Printing a long string crashes Solaris 7 /usr/bin/printf. -as_echo='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\' -as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo -as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo$as_echo -# Prefer a ksh shell builtin over an external printf program on Solaris, -# but without wasting forks for bash or zsh. -if test -z "$BASH_VERSION$ZSH_VERSION" \ - && (test "X`print -r -- $as_echo`" = "X$as_echo") 2>/dev/null; then - as_echo='print -r --' - as_echo_n='print -rn --' -elif (test "X`printf %s $as_echo`" = "X$as_echo") 2>/dev/null; then - as_echo='printf %s\n' - as_echo_n='printf %s' -else - if test "X`(/usr/ucb/echo -n -n $as_echo) 2>/dev/null`" = "X-n $as_echo"; then - as_echo_body='eval /usr/ucb/echo -n "$1$as_nl"' - as_echo_n='/usr/ucb/echo -n' - else - as_echo_body='eval expr "X$1" : "X\\(.*\\)"' - as_echo_n_body='eval - arg=$1; - case $arg in #( - *"$as_nl"*) - expr "X$arg" : "X\\(.*\\)$as_nl"; - arg=`expr "X$arg" : ".*$as_nl\\(.*\\)"`;; - esac; - expr "X$arg" : "X\\(.*\\)" | tr -d "$as_nl" - ' - export as_echo_n_body - as_echo_n='sh -c $as_echo_n_body as_echo' - fi - export as_echo_body - as_echo='sh -c $as_echo_body as_echo' -fi - -# The user is always right. -if test "${PATH_SEPARATOR+set}" != set; then - PATH_SEPARATOR=: - (PATH='/bin;/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 && { - (PATH='/bin:/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 || - PATH_SEPARATOR=';' - } -fi - - -# IFS -# We need space, tab and new line, in precisely that order. Quoting is -# there to prevent editors from complaining about space-tab. -# (If _AS_PATH_WALK were called with IFS unset, it would disable word -# splitting by setting IFS to empty value.) -IFS=" "" $as_nl" - -# Find who we are. Look in the path if we contain no directory separator. -as_myself= -case $0 in #(( - *[\\/]* ) as_myself=$0 ;; - *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - test -r "$as_dir/$0" && as_myself=$as_dir/$0 && break - done -IFS=$as_save_IFS - - ;; -esac -# We did not find ourselves, most probably we were run as `sh COMMAND' -# in which case we are not to be found in the path. -if test "x$as_myself" = x; then - as_myself=$0 -fi -if test ! -f "$as_myself"; then - $as_echo "$as_myself: error: cannot find myself; rerun with an absolute file name" >&2 - exit 1 -fi - -# Unset variables that we do not need and which cause bugs (e.g. in -# pre-3.0 UWIN ksh). But do not cause bugs in bash 2.01; the "|| exit 1" -# suppresses any "Segmentation fault" message there. '((' could -# trigger a bug in pdksh 5.2.14. -for as_var in BASH_ENV ENV MAIL MAILPATH -do eval test x\${$as_var+set} = xset \ - && ( (unset $as_var) || exit 1) >/dev/null 2>&1 && unset $as_var || : -done -PS1='$ ' -PS2='> ' -PS4='+ ' - -# NLS nuisances. -LC_ALL=C -export LC_ALL -LANGUAGE=C -export LANGUAGE - -# CDPATH. -(unset CDPATH) >/dev/null 2>&1 && unset CDPATH - - -# as_fn_error STATUS ERROR [LINENO LOG_FD] -# ---------------------------------------- -# Output "`basename $0`: error: ERROR" to stderr. If LINENO and LOG_FD are -# provided, also output the error to LOG_FD, referencing LINENO. Then exit the -# script with STATUS, using 1 if that was 0. -as_fn_error () -{ - as_status=$1; test $as_status -eq 0 && as_status=1 - if test "$4"; then - as_lineno=${as_lineno-"$3"} as_lineno_stack=as_lineno_stack=$as_lineno_stack - $as_echo "$as_me:${as_lineno-$LINENO}: error: $2" >&$4 - fi - $as_echo "$as_me: error: $2" >&2 - as_fn_exit $as_status -} # as_fn_error - - -# as_fn_set_status STATUS -# ----------------------- -# Set $? to STATUS, without forking. -as_fn_set_status () -{ - return $1 -} # as_fn_set_status - -# as_fn_exit STATUS -# ----------------- -# Exit the shell with STATUS, even in a "trap 0" or "set -e" context. -as_fn_exit () -{ - set +e - as_fn_set_status $1 - exit $1 -} # as_fn_exit - -# as_fn_unset VAR -# --------------- -# Portably unset VAR. -as_fn_unset () -{ - { eval $1=; unset $1;} -} -as_unset=as_fn_unset -# as_fn_append VAR VALUE -# ---------------------- -# Append the text in VALUE to the end of the definition contained in VAR. Take -# advantage of any shell optimizations that allow amortized linear growth over -# repeated appends, instead of the typical quadratic growth present in naive -# implementations. -if (eval "as_var=1; as_var+=2; test x\$as_var = x12") 2>/dev/null; then : - eval 'as_fn_append () - { - eval $1+=\$2 - }' -else - as_fn_append () - { - eval $1=\$$1\$2 - } -fi # as_fn_append - -# as_fn_arith ARG... -# ------------------ -# Perform arithmetic evaluation on the ARGs, and store the result in the -# global $as_val. Take advantage of shells that can avoid forks. The arguments -# must be portable across $(()) and expr. -if (eval "test \$(( 1 + 1 )) = 2") 2>/dev/null; then : - eval 'as_fn_arith () - { - as_val=$(( $* )) - }' -else - as_fn_arith () - { - as_val=`expr "$@" || test $? -eq 1` - } -fi # as_fn_arith - - -if expr a : '\(a\)' >/dev/null 2>&1 && - test "X`expr 00001 : '.*\(...\)'`" = X001; then - as_expr=expr -else - as_expr=false -fi - -if (basename -- /) >/dev/null 2>&1 && test "X`basename -- / 2>&1`" = "X/"; then - as_basename=basename -else - as_basename=false -fi - -if (as_dir=`dirname -- /` && test "X$as_dir" = X/) >/dev/null 2>&1; then - as_dirname=dirname -else - as_dirname=false -fi - -as_me=`$as_basename -- "$0" || -$as_expr X/"$0" : '.*/\([^/][^/]*\)/*$' \| \ - X"$0" : 'X\(//\)$' \| \ - X"$0" : 'X\(/\)' \| . 2>/dev/null || -$as_echo X/"$0" | - sed '/^.*\/\([^/][^/]*\)\/*$/{ - s//\1/ - q - } - /^X\/\(\/\/\)$/{ - s//\1/ - q - } - /^X\/\(\/\).*/{ - s//\1/ - q - } - s/.*/./; q'` - -# Avoid depending upon Character Ranges. -as_cr_letters='abcdefghijklmnopqrstuvwxyz' -as_cr_LETTERS='ABCDEFGHIJKLMNOPQRSTUVWXYZ' -as_cr_Letters=$as_cr_letters$as_cr_LETTERS -as_cr_digits='0123456789' -as_cr_alnum=$as_cr_Letters$as_cr_digits - -ECHO_C= ECHO_N= ECHO_T= -case `echo -n x` in #((((( --n*) - case `echo 'xy\c'` in - *c*) ECHO_T=' ';; # ECHO_T is single tab character. - xy) ECHO_C='\c';; - *) echo `echo ksh88 bug on AIX 6.1` > /dev/null - ECHO_T=' ';; - esac;; -*) - ECHO_N='-n';; -esac - -rm -f conf$$ conf$$.exe conf$$.file -if test -d conf$$.dir; then - rm -f conf$$.dir/conf$$.file -else - rm -f conf$$.dir - mkdir conf$$.dir 2>/dev/null -fi -if (echo >conf$$.file) 2>/dev/null; then - if ln -s conf$$.file conf$$ 2>/dev/null; then - as_ln_s='ln -s' - # ... but there are two gotchas: - # 1) On MSYS, both `ln -s file dir' and `ln file dir' fail. - # 2) DJGPP < 2.04 has no symlinks; `ln -s' creates a wrapper executable. - # In both cases, we have to default to `cp -p'. - ln -s conf$$.file conf$$.dir 2>/dev/null && test ! -f conf$$.exe || - as_ln_s='cp -p' - elif ln conf$$.file conf$$ 2>/dev/null; then - as_ln_s=ln - else - as_ln_s='cp -p' - fi -else - as_ln_s='cp -p' -fi -rm -f conf$$ conf$$.exe conf$$.dir/conf$$.file conf$$.file -rmdir conf$$.dir 2>/dev/null - - -# as_fn_mkdir_p -# ------------- -# Create "$as_dir" as a directory, including parents if necessary. -as_fn_mkdir_p () -{ - - case $as_dir in #( - -*) as_dir=./$as_dir;; - esac - test -d "$as_dir" || eval $as_mkdir_p || { - as_dirs= - while :; do - case $as_dir in #( - *\'*) as_qdir=`$as_echo "$as_dir" | sed "s/'/'\\\\\\\\''/g"`;; #'( - *) as_qdir=$as_dir;; - esac - as_dirs="'$as_qdir' $as_dirs" - as_dir=`$as_dirname -- "$as_dir" || -$as_expr X"$as_dir" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ - X"$as_dir" : 'X\(//\)[^/]' \| \ - X"$as_dir" : 'X\(//\)$' \| \ - X"$as_dir" : 'X\(/\)' \| . 2>/dev/null || -$as_echo X"$as_dir" | - sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ - s//\1/ - q - } - /^X\(\/\/\)[^/].*/{ - s//\1/ - q - } - /^X\(\/\/\)$/{ - s//\1/ - q - } - /^X\(\/\).*/{ - s//\1/ - q - } - s/.*/./; q'` - test -d "$as_dir" && break - done - test -z "$as_dirs" || eval "mkdir $as_dirs" - } || test -d "$as_dir" || as_fn_error $? "cannot create directory $as_dir" - - -} # as_fn_mkdir_p -if mkdir -p . 2>/dev/null; then - as_mkdir_p='mkdir -p "$as_dir"' -else - test -d ./-p && rmdir ./-p - as_mkdir_p=false -fi - -if test -x / >/dev/null 2>&1; then - as_test_x='test -x' -else - if ls -dL / >/dev/null 2>&1; then - as_ls_L_option=L - else - as_ls_L_option= - fi - as_test_x=' - eval sh -c '\'' - if test -d "$1"; then - test -d "$1/."; - else - case $1 in #( - -*)set "./$1";; - esac; - case `ls -ld'$as_ls_L_option' "$1" 2>/dev/null` in #(( - ???[sx]*):;;*)false;;esac;fi - '\'' sh - ' -fi -as_executable_p=$as_test_x - -# Sed expression to map a string onto a valid CPP name. -as_tr_cpp="eval sed 'y%*$as_cr_letters%P$as_cr_LETTERS%;s%[^_$as_cr_alnum]%_%g'" - -# Sed expression to map a string onto a valid variable name. -as_tr_sh="eval sed 'y%*+%pp%;s%[^_$as_cr_alnum]%_%g'" - - -exec 6>&1 -## ----------------------------------- ## -## Main body of $CONFIG_STATUS script. ## -## ----------------------------------- ## -_ASEOF -test $as_write_fail = 0 && chmod +x $CONFIG_STATUS || ac_write_fail=1 - -cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 -# Save the log message, to keep $0 and so on meaningful, and to -# report actual input values of CONFIG_FILES etc. instead of their -# values after options handling. -ac_log=" -This file was extended by $as_me, which was -generated by GNU Autoconf 2.68. Invocation command line was - - CONFIG_FILES = $CONFIG_FILES - CONFIG_HEADERS = $CONFIG_HEADERS - CONFIG_LINKS = $CONFIG_LINKS - CONFIG_COMMANDS = $CONFIG_COMMANDS - $ $0 $@ - -on `(hostname || uname -n) 2>/dev/null | sed 1q` -" - -_ACEOF - -case $ac_config_files in *" -"*) set x $ac_config_files; shift; ac_config_files=$*;; -esac - -case $ac_config_headers in *" -"*) set x $ac_config_headers; shift; ac_config_headers=$*;; -esac - - -cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 -# Files that config.status was made for. -config_files="$ac_config_files" -config_headers="$ac_config_headers" -config_commands="$ac_config_commands" - -_ACEOF - -cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 -ac_cs_usage="\ -\`$as_me' instantiates files and other configuration actions -from templates according to the current configuration. Unless the files -and actions are specified as TAGs, all are instantiated by default. - -Usage: $0 [OPTION]... [TAG]... - - -h, --help print this help, then exit - -V, --version print version number and configuration settings, then exit - --config print configuration, then exit - -q, --quiet, --silent - do not print progress messages - -d, --debug don't remove temporary files - --recheck update $as_me by reconfiguring in the same conditions - --file=FILE[:TEMPLATE] - instantiate the configuration file FILE - --header=FILE[:TEMPLATE] - instantiate the configuration header FILE - -Configuration files: -$config_files - -Configuration headers: -$config_headers - -Configuration commands: -$config_commands - -Report bugs to the package provider." - -_ACEOF -cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 -ac_cs_config="`$as_echo "$ac_configure_args" | sed 's/^ //; s/[\\""\`\$]/\\\\&/g'`" -ac_cs_version="\\ -config.status -configured by $0, generated by GNU Autoconf 2.68, - with options \\"\$ac_cs_config\\" - -Copyright (C) 2010 Free Software Foundation, Inc. -This config.status script is free software; the Free Software Foundation -gives unlimited permission to copy, distribute and modify it." - -ac_pwd='$ac_pwd' -srcdir='$srcdir' -INSTALL='$INSTALL' -test -n "\$AWK" || AWK=awk -_ACEOF - -cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 -# The default lists apply if the user does not specify any file. -ac_need_defaults=: -while test $# != 0 -do - case $1 in - --*=?*) - ac_option=`expr "X$1" : 'X\([^=]*\)='` - ac_optarg=`expr "X$1" : 'X[^=]*=\(.*\)'` - ac_shift=: - ;; - --*=) - ac_option=`expr "X$1" : 'X\([^=]*\)='` - ac_optarg= - ac_shift=: - ;; - *) - ac_option=$1 - ac_optarg=$2 - ac_shift=shift - ;; - esac - - case $ac_option in - # Handling of the options. - -recheck | --recheck | --rechec | --reche | --rech | --rec | --re | --r) - ac_cs_recheck=: ;; - --version | --versio | --versi | --vers | --ver | --ve | --v | -V ) - $as_echo "$ac_cs_version"; exit ;; - --config | --confi | --conf | --con | --co | --c ) - $as_echo "$ac_cs_config"; exit ;; - --debug | --debu | --deb | --de | --d | -d ) - debug=: ;; - --file | --fil | --fi | --f ) - $ac_shift - case $ac_optarg in - *\'*) ac_optarg=`$as_echo "$ac_optarg" | sed "s/'/'\\\\\\\\''/g"` ;; - '') as_fn_error $? "missing file argument" ;; - esac - as_fn_append CONFIG_FILES " '$ac_optarg'" - ac_need_defaults=false;; - --header | --heade | --head | --hea ) - $ac_shift - case $ac_optarg in - *\'*) ac_optarg=`$as_echo "$ac_optarg" | sed "s/'/'\\\\\\\\''/g"` ;; - esac - as_fn_append CONFIG_HEADERS " '$ac_optarg'" - ac_need_defaults=false;; - --he | --h) - # Conflict between --help and --header - as_fn_error $? "ambiguous option: \`$1' -Try \`$0 --help' for more information.";; - --help | --hel | -h ) - $as_echo "$ac_cs_usage"; exit ;; - -q | -quiet | --quiet | --quie | --qui | --qu | --q \ - | -silent | --silent | --silen | --sile | --sil | --si | --s) - ac_cs_silent=: ;; - - # This is an error. - -*) as_fn_error $? "unrecognized option: \`$1' -Try \`$0 --help' for more information." ;; - - *) as_fn_append ac_config_targets " $1" - ac_need_defaults=false ;; - - esac - shift -done - -ac_configure_extra_args= - -if $ac_cs_silent; then - exec 6>/dev/null - ac_configure_extra_args="$ac_configure_extra_args --silent" -fi - -_ACEOF -cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 -if \$ac_cs_recheck; then - set X '$SHELL' '$0' $ac_configure_args \$ac_configure_extra_args --no-create --no-recursion - shift - \$as_echo "running CONFIG_SHELL=$SHELL \$*" >&6 - CONFIG_SHELL='$SHELL' - export CONFIG_SHELL - exec "\$@" -fi - -_ACEOF -cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 -exec 5>>config.log -{ - echo - sed 'h;s/./-/g;s/^.../## /;s/...$/ ##/;p;x;p;x' <<_ASBOX -## Running $as_me. ## -_ASBOX - $as_echo "$ac_log" -} >&5 - -_ACEOF -cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 -_ACEOF - -cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 - -# Handling of arguments. -for ac_config_target in $ac_config_targets -do - case $ac_config_target in - "include/jemalloc/internal/size_classes.h") CONFIG_COMMANDS="$CONFIG_COMMANDS include/jemalloc/internal/size_classes.h" ;; - "$cfghdrs_tup") CONFIG_HEADERS="$CONFIG_HEADERS $cfghdrs_tup" ;; - "$cfgoutputs_tup") CONFIG_FILES="$CONFIG_FILES $cfgoutputs_tup" ;; - "config.stamp") CONFIG_FILES="$CONFIG_FILES config.stamp" ;; - "bin/jemalloc.sh") CONFIG_FILES="$CONFIG_FILES bin/jemalloc.sh" ;; - - *) as_fn_error $? "invalid argument: \`$ac_config_target'" "$LINENO" 5;; - esac -done - - -# If the user did not use the arguments to specify the items to instantiate, -# then the envvar interface is used. Set only those that are not. -# We use the long form for the default assignment because of an extremely -# bizarre bug on SunOS 4.1.3. -if $ac_need_defaults; then - test "${CONFIG_FILES+set}" = set || CONFIG_FILES=$config_files - test "${CONFIG_HEADERS+set}" = set || CONFIG_HEADERS=$config_headers - test "${CONFIG_COMMANDS+set}" = set || CONFIG_COMMANDS=$config_commands -fi - -# Have a temporary directory for convenience. Make it in the build tree -# simply because there is no reason against having it here, and in addition, -# creating and moving files from /tmp can sometimes cause problems. -# Hook for its removal unless debugging. -# Note that there is a small window in which the directory will not be cleaned: -# after its creation but before its name has been assigned to `$tmp'. -$debug || -{ - tmp= ac_tmp= - trap 'exit_status=$? - : "${ac_tmp:=$tmp}" - { test ! -d "$ac_tmp" || rm -fr "$ac_tmp"; } && exit $exit_status -' 0 - trap 'as_fn_exit 1' 1 2 13 15 -} -# Create a (secure) tmp directory for tmp files. - -{ - tmp=`(umask 077 && mktemp -d "./confXXXXXX") 2>/dev/null` && - test -d "$tmp" -} || -{ - tmp=./conf$$-$RANDOM - (umask 077 && mkdir "$tmp") -} || as_fn_error $? "cannot create a temporary directory in ." "$LINENO" 5 -ac_tmp=$tmp - -# Set up the scripts for CONFIG_FILES section. -# No need to generate them if there are no CONFIG_FILES. -# This happens for instance with `./config.status config.h'. -if test -n "$CONFIG_FILES"; then - - -ac_cr=`echo X | tr X '\015'` -# On cygwin, bash can eat \r inside `` if the user requested igncr. -# But we know of no other shell where ac_cr would be empty at this -# point, so we can use a bashism as a fallback. -if test "x$ac_cr" = x; then - eval ac_cr=\$\'\\r\' -fi -ac_cs_awk_cr=`$AWK 'BEGIN { print "a\rb" }' </dev/null 2>/dev/null` -if test "$ac_cs_awk_cr" = "a${ac_cr}b"; then - ac_cs_awk_cr='\\r' -else - ac_cs_awk_cr=$ac_cr -fi - -echo 'BEGIN {' >"$ac_tmp/subs1.awk" && -_ACEOF - - -{ - echo "cat >conf$$subs.awk <<_ACEOF" && - echo "$ac_subst_vars" | sed 's/.*/&!$&$ac_delim/' && - echo "_ACEOF" -} >conf$$subs.sh || - as_fn_error $? "could not make $CONFIG_STATUS" "$LINENO" 5 -ac_delim_num=`echo "$ac_subst_vars" | grep -c '^'` -ac_delim='%!_!# ' -for ac_last_try in false false false false false :; do - . ./conf$$subs.sh || - as_fn_error $? "could not make $CONFIG_STATUS" "$LINENO" 5 - - ac_delim_n=`sed -n "s/.*$ac_delim\$/X/p" conf$$subs.awk | grep -c X` - if test $ac_delim_n = $ac_delim_num; then - break - elif $ac_last_try; then - as_fn_error $? "could not make $CONFIG_STATUS" "$LINENO" 5 - else - ac_delim="$ac_delim!$ac_delim _$ac_delim!! " - fi -done -rm -f conf$$subs.sh - -cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 -cat >>"\$ac_tmp/subs1.awk" <<\\_ACAWK && -_ACEOF -sed -n ' -h -s/^/S["/; s/!.*/"]=/ -p -g -s/^[^!]*!// -:repl -t repl -s/'"$ac_delim"'$// -t delim -:nl -h -s/\(.\{148\}\)..*/\1/ -t more1 -s/["\\]/\\&/g; s/^/"/; s/$/\\n"\\/ -p -n -b repl -:more1 -s/["\\]/\\&/g; s/^/"/; s/$/"\\/ -p -g -s/.\{148\}// -t nl -:delim -h -s/\(.\{148\}\)..*/\1/ -t more2 -s/["\\]/\\&/g; s/^/"/; s/$/"/ -p -b -:more2 -s/["\\]/\\&/g; s/^/"/; s/$/"\\/ -p -g -s/.\{148\}// -t delim -' <conf$$subs.awk | sed ' -/^[^""]/{ - N - s/\n// -} -' >>$CONFIG_STATUS || ac_write_fail=1 -rm -f conf$$subs.awk -cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 -_ACAWK -cat >>"\$ac_tmp/subs1.awk" <<_ACAWK && - for (key in S) S_is_set[key] = 1 - FS = "" - -} -{ - line = $ 0 - nfields = split(line, field, "@") - substed = 0 - len = length(field[1]) - for (i = 2; i < nfields; i++) { - key = field[i] - keylen = length(key) - if (S_is_set[key]) { - value = S[key] - line = substr(line, 1, len) "" value "" substr(line, len + keylen + 3) - len += length(value) + length(field[++i]) - substed = 1 - } else - len += 1 + keylen - } - - print line -} - -_ACAWK -_ACEOF -cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 -if sed "s/$ac_cr//" < /dev/null > /dev/null 2>&1; then - sed "s/$ac_cr\$//; s/$ac_cr/$ac_cs_awk_cr/g" -else - cat -fi < "$ac_tmp/subs1.awk" > "$ac_tmp/subs.awk" \ - || as_fn_error $? "could not setup config files machinery" "$LINENO" 5 -_ACEOF - -# VPATH may cause trouble with some makes, so we remove sole $(srcdir), -# ${srcdir} and @srcdir@ entries from VPATH if srcdir is ".", strip leading and -# trailing colons and then remove the whole line if VPATH becomes empty -# (actually we leave an empty line to preserve line numbers). -if test "x$srcdir" = x.; then - ac_vpsub='/^[ ]*VPATH[ ]*=[ ]*/{ -h -s/// -s/^/:/ -s/[ ]*$/:/ -s/:\$(srcdir):/:/g -s/:\${srcdir}:/:/g -s/:@srcdir@:/:/g -s/^:*// -s/:*$// -x -s/\(=[ ]*\).*/\1/ -G -s/\n// -s/^[^=]*=[ ]*$// -}' -fi - -cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 -fi # test -n "$CONFIG_FILES" - -# Set up the scripts for CONFIG_HEADERS section. -# No need to generate them if there are no CONFIG_HEADERS. -# This happens for instance with `./config.status Makefile'. -if test -n "$CONFIG_HEADERS"; then -cat >"$ac_tmp/defines.awk" <<\_ACAWK || -BEGIN { -_ACEOF - -# Transform confdefs.h into an awk script `defines.awk', embedded as -# here-document in config.status, that substitutes the proper values into -# config.h.in to produce config.h. - -# Create a delimiter string that does not exist in confdefs.h, to ease -# handling of long lines. -ac_delim='%!_!# ' -for ac_last_try in false false :; do - ac_tt=`sed -n "/$ac_delim/p" confdefs.h` - if test -z "$ac_tt"; then - break - elif $ac_last_try; then - as_fn_error $? "could not make $CONFIG_HEADERS" "$LINENO" 5 - else - ac_delim="$ac_delim!$ac_delim _$ac_delim!! " - fi -done - -# For the awk script, D is an array of macro values keyed by name, -# likewise P contains macro parameters if any. Preserve backslash -# newline sequences. - -ac_word_re=[_$as_cr_Letters][_$as_cr_alnum]* -sed -n ' -s/.\{148\}/&'"$ac_delim"'/g -t rset -:rset -s/^[ ]*#[ ]*define[ ][ ]*/ / -t def -d -:def -s/\\$// -t bsnl -s/["\\]/\\&/g -s/^ \('"$ac_word_re"'\)\(([^()]*)\)[ ]*\(.*\)/P["\1"]="\2"\ -D["\1"]=" \3"/p -s/^ \('"$ac_word_re"'\)[ ]*\(.*\)/D["\1"]=" \2"/p -d -:bsnl -s/["\\]/\\&/g -s/^ \('"$ac_word_re"'\)\(([^()]*)\)[ ]*\(.*\)/P["\1"]="\2"\ -D["\1"]=" \3\\\\\\n"\\/p -t cont -s/^ \('"$ac_word_re"'\)[ ]*\(.*\)/D["\1"]=" \2\\\\\\n"\\/p -t cont -d -:cont -n -s/.\{148\}/&'"$ac_delim"'/g -t clear -:clear -s/\\$// -t bsnlc -s/["\\]/\\&/g; s/^/"/; s/$/"/p -d -:bsnlc -s/["\\]/\\&/g; s/^/"/; s/$/\\\\\\n"\\/p -b cont -' <confdefs.h | sed ' -s/'"$ac_delim"'/"\\\ -"/g' >>$CONFIG_STATUS || ac_write_fail=1 - -cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 - for (key in D) D_is_set[key] = 1 - FS = "" -} -/^[\t ]*#[\t ]*(define|undef)[\t ]+$ac_word_re([\t (]|\$)/ { - line = \$ 0 - split(line, arg, " ") - if (arg[1] == "#") { - defundef = arg[2] - mac1 = arg[3] - } else { - defundef = substr(arg[1], 2) - mac1 = arg[2] - } - split(mac1, mac2, "(") #) - macro = mac2[1] - prefix = substr(line, 1, index(line, defundef) - 1) - if (D_is_set[macro]) { - # Preserve the white space surrounding the "#". - print prefix "define", macro P[macro] D[macro] - next - } else { - # Replace #undef with comments. This is necessary, for example, - # in the case of _POSIX_SOURCE, which is predefined and required - # on some systems where configure will not decide to define it. - if (defundef == "undef") { - print "/*", prefix defundef, macro, "*/" - next - } - } -} -{ print } -_ACAWK -_ACEOF -cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 - as_fn_error $? "could not setup config headers machinery" "$LINENO" 5 -fi # test -n "$CONFIG_HEADERS" - - -eval set X " :F $CONFIG_FILES :H $CONFIG_HEADERS :C $CONFIG_COMMANDS" -shift -for ac_tag -do - case $ac_tag in - :[FHLC]) ac_mode=$ac_tag; continue;; - esac - case $ac_mode$ac_tag in - :[FHL]*:*);; - :L* | :C*:*) as_fn_error $? "invalid tag \`$ac_tag'" "$LINENO" 5;; - :[FH]-) ac_tag=-:-;; - :[FH]*) ac_tag=$ac_tag:$ac_tag.in;; - esac - ac_save_IFS=$IFS - IFS=: - set x $ac_tag - IFS=$ac_save_IFS - shift - ac_file=$1 - shift - - case $ac_mode in - :L) ac_source=$1;; - :[FH]) - ac_file_inputs= - for ac_f - do - case $ac_f in - -) ac_f="$ac_tmp/stdin";; - *) # Look for the file first in the build tree, then in the source tree - # (if the path is not absolute). The absolute path cannot be DOS-style, - # because $ac_f cannot contain `:'. - test -f "$ac_f" || - case $ac_f in - [\\/$]*) false;; - *) test -f "$srcdir/$ac_f" && ac_f="$srcdir/$ac_f";; - esac || - as_fn_error 1 "cannot find input file: \`$ac_f'" "$LINENO" 5;; - esac - case $ac_f in *\'*) ac_f=`$as_echo "$ac_f" | sed "s/'/'\\\\\\\\''/g"`;; esac - as_fn_append ac_file_inputs " '$ac_f'" - done - - # Let's still pretend it is `configure' which instantiates (i.e., don't - # use $as_me), people would be surprised to read: - # /* config.h. Generated by config.status. */ - configure_input='Generated from '` - $as_echo "$*" | sed 's|^[^:]*/||;s|:[^:]*/|, |g' - `' by configure.' - if test x"$ac_file" != x-; then - configure_input="$ac_file. $configure_input" - { $as_echo "$as_me:${as_lineno-$LINENO}: creating $ac_file" >&5 -$as_echo "$as_me: creating $ac_file" >&6;} - fi - # Neutralize special characters interpreted by sed in replacement strings. - case $configure_input in #( - *\&* | *\|* | *\\* ) - ac_sed_conf_input=`$as_echo "$configure_input" | - sed 's/[\\\\&|]/\\\\&/g'`;; #( - *) ac_sed_conf_input=$configure_input;; - esac - - case $ac_tag in - *:-:* | *:-) cat >"$ac_tmp/stdin" \ - || as_fn_error $? "could not create $ac_file" "$LINENO" 5 ;; - esac - ;; - esac - - ac_dir=`$as_dirname -- "$ac_file" || -$as_expr X"$ac_file" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ - X"$ac_file" : 'X\(//\)[^/]' \| \ - X"$ac_file" : 'X\(//\)$' \| \ - X"$ac_file" : 'X\(/\)' \| . 2>/dev/null || -$as_echo X"$ac_file" | - sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ - s//\1/ - q - } - /^X\(\/\/\)[^/].*/{ - s//\1/ - q - } - /^X\(\/\/\)$/{ - s//\1/ - q - } - /^X\(\/\).*/{ - s//\1/ - q - } - s/.*/./; q'` - as_dir="$ac_dir"; as_fn_mkdir_p - ac_builddir=. - -case "$ac_dir" in -.) ac_dir_suffix= ac_top_builddir_sub=. ac_top_build_prefix= ;; -*) - ac_dir_suffix=/`$as_echo "$ac_dir" | sed 's|^\.[\\/]||'` - # A ".." for each directory in $ac_dir_suffix. - ac_top_builddir_sub=`$as_echo "$ac_dir_suffix" | sed 's|/[^\\/]*|/..|g;s|/||'` - case $ac_top_builddir_sub in - "") ac_top_builddir_sub=. ac_top_build_prefix= ;; - *) ac_top_build_prefix=$ac_top_builddir_sub/ ;; - esac ;; -esac -ac_abs_top_builddir=$ac_pwd -ac_abs_builddir=$ac_pwd$ac_dir_suffix -# for backward compatibility: -ac_top_builddir=$ac_top_build_prefix - -case $srcdir in - .) # We are building in place. - ac_srcdir=. - ac_top_srcdir=$ac_top_builddir_sub - ac_abs_top_srcdir=$ac_pwd ;; - [\\/]* | ?:[\\/]* ) # Absolute name. - ac_srcdir=$srcdir$ac_dir_suffix; - ac_top_srcdir=$srcdir - ac_abs_top_srcdir=$srcdir ;; - *) # Relative name. - ac_srcdir=$ac_top_build_prefix$srcdir$ac_dir_suffix - ac_top_srcdir=$ac_top_build_prefix$srcdir - ac_abs_top_srcdir=$ac_pwd/$srcdir ;; -esac -ac_abs_srcdir=$ac_abs_top_srcdir$ac_dir_suffix - - - case $ac_mode in - :F) - # - # CONFIG_FILE - # - - case $INSTALL in - [\\/$]* | ?:[\\/]* ) ac_INSTALL=$INSTALL ;; - *) ac_INSTALL=$ac_top_build_prefix$INSTALL ;; - esac -_ACEOF - -cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 -# If the template does not know about datarootdir, expand it. -# FIXME: This hack should be removed a few years after 2.60. -ac_datarootdir_hack=; ac_datarootdir_seen= -ac_sed_dataroot=' -/datarootdir/ { - p - q -} -/@datadir@/p -/@docdir@/p -/@infodir@/p -/@localedir@/p -/@mandir@/p' -case `eval "sed -n \"\$ac_sed_dataroot\" $ac_file_inputs"` in -*datarootdir*) ac_datarootdir_seen=yes;; -*@datadir@*|*@docdir@*|*@infodir@*|*@localedir@*|*@mandir@*) - { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $ac_file_inputs seems to ignore the --datarootdir setting" >&5 -$as_echo "$as_me: WARNING: $ac_file_inputs seems to ignore the --datarootdir setting" >&2;} -_ACEOF -cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 - ac_datarootdir_hack=' - s&@datadir@&$datadir&g - s&@docdir@&$docdir&g - s&@infodir@&$infodir&g - s&@localedir@&$localedir&g - s&@mandir@&$mandir&g - s&\\\${datarootdir}&$datarootdir&g' ;; -esac -_ACEOF - -# Neutralize VPATH when `$srcdir' = `.'. -# Shell code in configure.ac might set extrasub. -# FIXME: do we really want to maintain this feature? -cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 -ac_sed_extra="$ac_vpsub -$extrasub -_ACEOF -cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 -:t -/@[a-zA-Z_][a-zA-Z_0-9]*@/!b -s|@configure_input@|$ac_sed_conf_input|;t t -s&@top_builddir@&$ac_top_builddir_sub&;t t -s&@top_build_prefix@&$ac_top_build_prefix&;t t -s&@srcdir@&$ac_srcdir&;t t -s&@abs_srcdir@&$ac_abs_srcdir&;t t -s&@top_srcdir@&$ac_top_srcdir&;t t -s&@abs_top_srcdir@&$ac_abs_top_srcdir&;t t -s&@builddir@&$ac_builddir&;t t -s&@abs_builddir@&$ac_abs_builddir&;t t -s&@abs_top_builddir@&$ac_abs_top_builddir&;t t -s&@INSTALL@&$ac_INSTALL&;t t -$ac_datarootdir_hack -" -eval sed \"\$ac_sed_extra\" "$ac_file_inputs" | $AWK -f "$ac_tmp/subs.awk" \ - >$ac_tmp/out || as_fn_error $? "could not create $ac_file" "$LINENO" 5 - -test -z "$ac_datarootdir_hack$ac_datarootdir_seen" && - { ac_out=`sed -n '/\${datarootdir}/p' "$ac_tmp/out"`; test -n "$ac_out"; } && - { ac_out=`sed -n '/^[ ]*datarootdir[ ]*:*=/p' \ - "$ac_tmp/out"`; test -z "$ac_out"; } && - { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $ac_file contains a reference to the variable \`datarootdir' -which seems to be undefined. Please make sure it is defined" >&5 -$as_echo "$as_me: WARNING: $ac_file contains a reference to the variable \`datarootdir' -which seems to be undefined. Please make sure it is defined" >&2;} - - rm -f "$ac_tmp/stdin" - case $ac_file in - -) cat "$ac_tmp/out" && rm -f "$ac_tmp/out";; - *) rm -f "$ac_file" && mv "$ac_tmp/out" "$ac_file";; - esac \ - || as_fn_error $? "could not create $ac_file" "$LINENO" 5 - ;; - :H) - # - # CONFIG_HEADER - # - if test x"$ac_file" != x-; then - { - $as_echo "/* $configure_input */" \ - && eval '$AWK -f "$ac_tmp/defines.awk"' "$ac_file_inputs" - } >"$ac_tmp/config.h" \ - || as_fn_error $? "could not create $ac_file" "$LINENO" 5 - if diff "$ac_file" "$ac_tmp/config.h" >/dev/null 2>&1; then - { $as_echo "$as_me:${as_lineno-$LINENO}: $ac_file is unchanged" >&5 -$as_echo "$as_me: $ac_file is unchanged" >&6;} - else - rm -f "$ac_file" - mv "$ac_tmp/config.h" "$ac_file" \ - || as_fn_error $? "could not create $ac_file" "$LINENO" 5 - fi - else - $as_echo "/* $configure_input */" \ - && eval '$AWK -f "$ac_tmp/defines.awk"' "$ac_file_inputs" \ - || as_fn_error $? "could not create -" "$LINENO" 5 - fi - ;; - - :C) { $as_echo "$as_me:${as_lineno-$LINENO}: executing $ac_file commands" >&5 -$as_echo "$as_me: executing $ac_file commands" >&6;} - ;; - esac - - - case $ac_file$ac_mode in - "include/jemalloc/internal/size_classes.h":C) - mkdir -p "include/jemalloc/internal" - "${srcdir}/include/jemalloc/internal/size_classes.sh" > "${objroot}include/jemalloc/internal/size_classes.h" - ;; - - esac -done # for ac_tag - - -as_fn_exit 0 -_ACEOF -ac_clean_files=$ac_clean_files_save - -test $ac_write_fail = 0 || - as_fn_error $? "write failure creating $CONFIG_STATUS" "$LINENO" 5 - - -# configure is writing to config.log, and then calls config.status. -# config.status does its own redirection, appending to config.log. -# Unfortunately, on DOS this fails, as config.log is still kept open -# by configure, so config.status won't be able to write to it; its -# output is simply discarded. So we exec the FD to /dev/null, -# effectively closing config.log, so it can be properly (re)opened and -# appended to by config.status. When coming back to configure, we -# need to make the FD available again. -if test "$no_create" != yes; then - ac_cs_success=: - ac_config_status_args= - test "$silent" = yes && - ac_config_status_args="$ac_config_status_args --quiet" - exec 5>/dev/null - $SHELL $CONFIG_STATUS $ac_config_status_args || ac_cs_success=false - exec 5>>config.log - # Use ||, not &&, to avoid exiting from the if with $? = 1, which - # would make configure fail if this is the last instruction. - $ac_cs_success || as_fn_exit 1 -fi -if test -n "$ac_unrecognized_opts" && test "$enable_option_checking" != no; then - { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: unrecognized options: $ac_unrecognized_opts" >&5 -$as_echo "$as_me: WARNING: unrecognized options: $ac_unrecognized_opts" >&2;} -fi - - -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: ===============================================================================" >&5 -$as_echo "===============================================================================" >&6; } -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: jemalloc version : ${jemalloc_version}" >&5 -$as_echo "jemalloc version : ${jemalloc_version}" >&6; } -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: library revision : ${rev}" >&5 -$as_echo "library revision : ${rev}" >&6; } -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: " >&5 -$as_echo "" >&6; } -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: CC : ${CC}" >&5 -$as_echo "CC : ${CC}" >&6; } -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: CPPFLAGS : ${CPPFLAGS}" >&5 -$as_echo "CPPFLAGS : ${CPPFLAGS}" >&6; } -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: CFLAGS : ${CFLAGS}" >&5 -$as_echo "CFLAGS : ${CFLAGS}" >&6; } -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: LDFLAGS : ${LDFLAGS}" >&5 -$as_echo "LDFLAGS : ${LDFLAGS}" >&6; } -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: LIBS : ${LIBS}" >&5 -$as_echo "LIBS : ${LIBS}" >&6; } -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: RPATH_EXTRA : ${RPATH_EXTRA}" >&5 -$as_echo "RPATH_EXTRA : ${RPATH_EXTRA}" >&6; } -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: " >&5 -$as_echo "" >&6; } -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: XSLTPROC : ${XSLTPROC}" >&5 -$as_echo "XSLTPROC : ${XSLTPROC}" >&6; } -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: XSLROOT : ${XSLROOT}" >&5 -$as_echo "XSLROOT : ${XSLROOT}" >&6; } -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: " >&5 -$as_echo "" >&6; } -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: PREFIX : ${PREFIX}" >&5 -$as_echo "PREFIX : ${PREFIX}" >&6; } -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: BINDIR : ${BINDIR}" >&5 -$as_echo "BINDIR : ${BINDIR}" >&6; } -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: INCLUDEDIR : ${INCLUDEDIR}" >&5 -$as_echo "INCLUDEDIR : ${INCLUDEDIR}" >&6; } -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: LIBDIR : ${LIBDIR}" >&5 -$as_echo "LIBDIR : ${LIBDIR}" >&6; } -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: DATADIR : ${DATADIR}" >&5 -$as_echo "DATADIR : ${DATADIR}" >&6; } -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: MANDIR : ${MANDIR}" >&5 -$as_echo "MANDIR : ${MANDIR}" >&6; } -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: " >&5 -$as_echo "" >&6; } -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: srcroot : ${srcroot}" >&5 -$as_echo "srcroot : ${srcroot}" >&6; } -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: abs_srcroot : ${abs_srcroot}" >&5 -$as_echo "abs_srcroot : ${abs_srcroot}" >&6; } -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: objroot : ${objroot}" >&5 -$as_echo "objroot : ${objroot}" >&6; } -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: abs_objroot : ${abs_objroot}" >&5 -$as_echo "abs_objroot : ${abs_objroot}" >&6; } -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: " >&5 -$as_echo "" >&6; } -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: JEMALLOC_PREFIX : ${JEMALLOC_PREFIX}" >&5 -$as_echo "JEMALLOC_PREFIX : ${JEMALLOC_PREFIX}" >&6; } -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: JEMALLOC_PRIVATE_NAMESPACE" >&5 -$as_echo "JEMALLOC_PRIVATE_NAMESPACE" >&6; } -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: : ${JEMALLOC_PRIVATE_NAMESPACE}" >&5 -$as_echo " : ${JEMALLOC_PRIVATE_NAMESPACE}" >&6; } -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: install_suffix : ${install_suffix}" >&5 -$as_echo "install_suffix : ${install_suffix}" >&6; } -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: autogen : ${enable_autogen}" >&5 -$as_echo "autogen : ${enable_autogen}" >&6; } -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: experimental : ${enable_experimental}" >&5 -$as_echo "experimental : ${enable_experimental}" >&6; } -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: cc-silence : ${enable_cc_silence}" >&5 -$as_echo "cc-silence : ${enable_cc_silence}" >&6; } -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: debug : ${enable_debug}" >&5 -$as_echo "debug : ${enable_debug}" >&6; } -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: stats : ${enable_stats}" >&5 -$as_echo "stats : ${enable_stats}" >&6; } -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: prof : ${enable_prof}" >&5 -$as_echo "prof : ${enable_prof}" >&6; } -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: prof-libunwind : ${enable_prof_libunwind}" >&5 -$as_echo "prof-libunwind : ${enable_prof_libunwind}" >&6; } -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: prof-libgcc : ${enable_prof_libgcc}" >&5 -$as_echo "prof-libgcc : ${enable_prof_libgcc}" >&6; } -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: prof-gcc : ${enable_prof_gcc}" >&5 -$as_echo "prof-gcc : ${enable_prof_gcc}" >&6; } -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: tcache : ${enable_tcache}" >&5 -$as_echo "tcache : ${enable_tcache}" >&6; } -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: fill : ${enable_fill}" >&5 -$as_echo "fill : ${enable_fill}" >&6; } -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: utrace : ${enable_utrace}" >&5 -$as_echo "utrace : ${enable_utrace}" >&6; } -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: valgrind : ${enable_valgrind}" >&5 -$as_echo "valgrind : ${enable_valgrind}" >&6; } -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: xmalloc : ${enable_xmalloc}" >&5 -$as_echo "xmalloc : ${enable_xmalloc}" >&6; } -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: mremap : ${enable_mremap}" >&5 -$as_echo "mremap : ${enable_mremap}" >&6; } -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: munmap : ${enable_munmap}" >&5 -$as_echo "munmap : ${enable_munmap}" >&6; } -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: dss : ${enable_dss}" >&5 -$as_echo "dss : ${enable_dss}" >&6; } -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: lazy_lock : ${enable_lazy_lock}" >&5 -$as_echo "lazy_lock : ${enable_lazy_lock}" >&6; } -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: tls : ${enable_tls}" >&5 -$as_echo "tls : ${enable_tls}" >&6; } -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: ===============================================================================" >&5 -$as_echo "===============================================================================" >&6; } diff --git a/extra/jemalloc/configure.ac b/extra/jemalloc/configure.ac deleted file mode 100644 index c270662b6d4..00000000000 --- a/extra/jemalloc/configure.ac +++ /dev/null @@ -1,1333 +0,0 @@ -dnl Process this file with autoconf to produce a configure script. -AC_INIT([Makefile.in]) - -dnl ============================================================================ -dnl Custom macro definitions. - -dnl JE_CFLAGS_APPEND(cflag) -AC_DEFUN([JE_CFLAGS_APPEND], -[ -AC_MSG_CHECKING([whether compiler supports $1]) -TCFLAGS="${CFLAGS}" -if test "x${CFLAGS}" = "x" ; then - CFLAGS="$1" -else - CFLAGS="${CFLAGS} $1" -fi -AC_COMPILE_IFELSE([AC_LANG_PROGRAM( -[[ -]], [[ - return 0; -]])], - AC_MSG_RESULT([yes]), - AC_MSG_RESULT([no]) - [CFLAGS="${TCFLAGS}"] -) -]) - -dnl JE_COMPILABLE(label, hcode, mcode, rvar) -dnl -dnl Use AC_LINK_IFELSE() rather than AC_COMPILE_IFELSE() so that linker errors -dnl cause failure. -AC_DEFUN([JE_COMPILABLE], -[ -AC_CACHE_CHECK([whether $1 is compilable], - [$4], - [AC_LINK_IFELSE([AC_LANG_PROGRAM([$2], - [$3])], - [$4=yes], - [$4=no])]) -]) - -dnl ============================================================================ - -dnl Library revision. -rev=1 -AC_SUBST([rev]) - -srcroot=$srcdir -if test "x${srcroot}" = "x." ; then - srcroot="" -else - srcroot="${srcroot}/" -fi -AC_SUBST([srcroot]) -abs_srcroot="`cd \"${srcdir}\"; pwd`/" -AC_SUBST([abs_srcroot]) - -objroot="" -AC_SUBST([objroot]) -abs_objroot="`pwd`/" -AC_SUBST([abs_objroot]) - -dnl Munge install path variables. -if test "x$prefix" = "xNONE" ; then - prefix="/usr/local" -fi -if test "x$exec_prefix" = "xNONE" ; then - exec_prefix=$prefix -fi -PREFIX=$prefix -AC_SUBST([PREFIX]) -BINDIR=`eval echo $bindir` -BINDIR=`eval echo $BINDIR` -AC_SUBST([BINDIR]) -INCLUDEDIR=`eval echo $includedir` -INCLUDEDIR=`eval echo $INCLUDEDIR` -AC_SUBST([INCLUDEDIR]) -LIBDIR=`eval echo $libdir` -LIBDIR=`eval echo $LIBDIR` -AC_SUBST([LIBDIR]) -DATADIR=`eval echo $datadir` -DATADIR=`eval echo $DATADIR` -AC_SUBST([DATADIR]) -MANDIR=`eval echo $mandir` -MANDIR=`eval echo $MANDIR` -AC_SUBST([MANDIR]) - -dnl Support for building documentation. -AC_PATH_PROG([XSLTPROC], [xsltproc], [false], [$PATH]) -if test -d "/usr/share/xml/docbook/stylesheet/docbook-xsl" ; then - DEFAULT_XSLROOT="/usr/share/xml/docbook/stylesheet/docbook-xsl" -elif test -d "/usr/share/sgml/docbook/xsl-stylesheets" ; then - DEFAULT_XSLROOT="/usr/share/sgml/docbook/xsl-stylesheets" -else - dnl Documentation building will fail if this default gets used. - DEFAULT_XSLROOT="" -fi -AC_ARG_WITH([xslroot], - [AS_HELP_STRING([--with-xslroot=<path>], [XSL stylesheet root path])], [ -if test "x$with_xslroot" = "xno" ; then - XSLROOT="${DEFAULT_XSLROOT}" -else - XSLROOT="${with_xslroot}" -fi -], - XSLROOT="${DEFAULT_XSLROOT}" -) -AC_SUBST([XSLROOT]) - -dnl If CFLAGS isn't defined, set CFLAGS to something reasonable. Otherwise, -dnl just prevent autoconf from molesting CFLAGS. -CFLAGS=$CFLAGS -AC_PROG_CC -if test "x$GCC" != "xyes" ; then - AC_CACHE_CHECK([whether compiler is MSVC], - [je_cv_msvc], - [AC_COMPILE_IFELSE([AC_LANG_PROGRAM([], - [ -#ifndef _MSC_VER - int fail[-1]; -#endif -])], - [je_cv_msvc=yes], - [je_cv_msvc=no])]) -fi - -if test "x$CFLAGS" = "x" ; then - no_CFLAGS="yes" - if test "x$GCC" = "xyes" ; then - JE_CFLAGS_APPEND([-std=gnu99]) - JE_CFLAGS_APPEND([-Wall]) - JE_CFLAGS_APPEND([-pipe]) - JE_CFLAGS_APPEND([-g3]) - elif test "x$je_cv_msvc" = "xyes" ; then - CC="$CC -nologo" - JE_CFLAGS_APPEND([-Zi]) - JE_CFLAGS_APPEND([-MT]) - JE_CFLAGS_APPEND([-W3]) - CPPFLAGS="$CPPFLAGS -I${srcroot}/include/msvc_compat" - fi -fi -dnl Append EXTRA_CFLAGS to CFLAGS, if defined. -if test "x$EXTRA_CFLAGS" != "x" ; then - JE_CFLAGS_APPEND([$EXTRA_CFLAGS]) -fi -AC_PROG_CPP - -AC_CHECK_SIZEOF([void *]) -if test "x${ac_cv_sizeof_void_p}" = "x8" ; then - LG_SIZEOF_PTR=3 -elif test "x${ac_cv_sizeof_void_p}" = "x4" ; then - LG_SIZEOF_PTR=2 -else - AC_MSG_ERROR([Unsupported pointer size: ${ac_cv_sizeof_void_p}]) -fi -AC_DEFINE_UNQUOTED([LG_SIZEOF_PTR], [$LG_SIZEOF_PTR]) - -AC_CHECK_SIZEOF([int]) -if test "x${ac_cv_sizeof_int}" = "x8" ; then - LG_SIZEOF_INT=3 -elif test "x${ac_cv_sizeof_int}" = "x4" ; then - LG_SIZEOF_INT=2 -else - AC_MSG_ERROR([Unsupported int size: ${ac_cv_sizeof_int}]) -fi -AC_DEFINE_UNQUOTED([LG_SIZEOF_INT], [$LG_SIZEOF_INT]) - -AC_CHECK_SIZEOF([long]) -if test "x${ac_cv_sizeof_long}" = "x8" ; then - LG_SIZEOF_LONG=3 -elif test "x${ac_cv_sizeof_long}" = "x4" ; then - LG_SIZEOF_LONG=2 -else - AC_MSG_ERROR([Unsupported long size: ${ac_cv_sizeof_long}]) -fi -AC_DEFINE_UNQUOTED([LG_SIZEOF_LONG], [$LG_SIZEOF_LONG]) - -AC_CHECK_SIZEOF([intmax_t]) -if test "x${ac_cv_sizeof_intmax_t}" = "x16" ; then - LG_SIZEOF_INTMAX_T=4 -elif test "x${ac_cv_sizeof_intmax_t}" = "x8" ; then - LG_SIZEOF_INTMAX_T=3 -elif test "x${ac_cv_sizeof_intmax_t}" = "x4" ; then - LG_SIZEOF_INTMAX_T=2 -else - AC_MSG_ERROR([Unsupported intmax_t size: ${ac_cv_sizeof_intmax_t}]) -fi -AC_DEFINE_UNQUOTED([LG_SIZEOF_INTMAX_T], [$LG_SIZEOF_INTMAX_T]) - -AC_CANONICAL_HOST -dnl CPU-specific settings. -CPU_SPINWAIT="" -case "${host_cpu}" in - i[[345]]86) - ;; - i686) - JE_COMPILABLE([__asm__], [], [[__asm__ volatile("pause"); return 0;]], - [je_cv_asm]) - if test "x${je_cv_asm}" = "xyes" ; then - CPU_SPINWAIT='__asm__ volatile("pause")' - fi - ;; - x86_64) - JE_COMPILABLE([__asm__ syntax], [], - [[__asm__ volatile("pause"); return 0;]], [je_cv_asm]) - if test "x${je_cv_asm}" = "xyes" ; then - CPU_SPINWAIT='__asm__ volatile("pause")' - fi - ;; - *) - ;; -esac -AC_DEFINE_UNQUOTED([CPU_SPINWAIT], [$CPU_SPINWAIT]) - -LD_PRELOAD_VAR="LD_PRELOAD" -so="so" -importlib="${so}" -o="$ac_objext" -a="a" -exe="$ac_exeext" -libprefix="lib" -DSO_LDFLAGS='-shared -Wl,-soname,$(@F)' -RPATH='-Wl,-rpath,$(1)' -SOREV="${so}.${rev}" -PIC_CFLAGS='-fPIC -DPIC' -CTARGET='-o $@' -LDTARGET='-o $@' -EXTRA_LDFLAGS= -MKLIB='ar crus $@' -CC_MM=1 - -dnl Platform-specific settings. abi and RPATH can probably be determined -dnl programmatically, but doing so is error-prone, which makes it generally -dnl not worth the trouble. -dnl -dnl Define cpp macros in CPPFLAGS, rather than doing AC_DEFINE(macro), since the -dnl definitions need to be seen before any headers are included, which is a pain -dnl to make happen otherwise. -default_munmap="1" -JEMALLOC_USABLE_SIZE_CONST="const" -case "${host}" in - *-*-darwin*) - CFLAGS="$CFLAGS" - abi="macho" - AC_DEFINE([JEMALLOC_PURGE_MADVISE_FREE], [ ]) - RPATH="" - LD_PRELOAD_VAR="DYLD_INSERT_LIBRARIES" - so="dylib" - importlib="${so}" - force_tls="0" - DSO_LDFLAGS='-shared -Wl,-dylib_install_name,$(@F)' - SOREV="${rev}.${so}" - ;; - *-*-freebsd*) - CFLAGS="$CFLAGS" - abi="elf" - AC_DEFINE([JEMALLOC_PURGE_MADVISE_FREE], [ ]) - force_lazy_lock="1" - ;; - *-*-linux*) - CFLAGS="$CFLAGS" - CPPFLAGS="$CPPFLAGS -D_GNU_SOURCE" - abi="elf" - AC_DEFINE([JEMALLOC_HAS_ALLOCA_H]) - AC_DEFINE([JEMALLOC_PURGE_MADVISE_DONTNEED], [ ]) - AC_DEFINE([JEMALLOC_THREADED_INIT], [ ]) - JEMALLOC_USABLE_SIZE_CONST="" - default_munmap="0" - ;; - *-*-netbsd*) - AC_MSG_CHECKING([ABI]) - AC_COMPILE_IFELSE([AC_LANG_PROGRAM( -[[#ifdef __ELF__ -/* ELF */ -#else -#error aout -#endif -]])], - [CFLAGS="$CFLAGS"; abi="elf"], - [abi="aout"]) - AC_MSG_RESULT([$abi]) - AC_DEFINE([JEMALLOC_PURGE_MADVISE_FREE], [ ]) - ;; - *-*-solaris2*) - CFLAGS="$CFLAGS" - abi="elf" - RPATH='-Wl,-R,$(1)' - dnl Solaris needs this for sigwait(). - CPPFLAGS="$CPPFLAGS -D_POSIX_PTHREAD_SEMANTICS" - LIBS="$LIBS -lposix4 -lsocket -lnsl" - ;; - *-ibm-aix*) - if "$LG_SIZEOF_PTR" = "8"; then - dnl 64bit AIX - LD_PRELOAD_VAR="LDR_PRELOAD64" - else - dnl 32bit AIX - LD_PRELOAD_VAR="LDR_PRELOAD" - fi - abi="xcoff" - ;; - *-*-mingw*) - abi="pecoff" - force_tls="0" - RPATH="" - so="dll" - if test "x$je_cv_msvc" = "xyes" ; then - importlib="lib" - DSO_LDFLAGS="-LD" - EXTRA_LDFLAGS="-link -DEBUG" - CTARGET='-Fo$@' - LDTARGET='-Fe$@' - MKLIB='lib -nologo -out:$@' - CC_MM= - else - importlib="${so}" - DSO_LDFLAGS="-shared" - fi - a="lib" - libprefix="" - SOREV="${so}" - PIC_CFLAGS="" - ;; - *) - AC_MSG_RESULT([Unsupported operating system: ${host}]) - abi="elf" - ;; -esac -AC_DEFINE_UNQUOTED([JEMALLOC_USABLE_SIZE_CONST], [$JEMALLOC_USABLE_SIZE_CONST]) -AC_SUBST([abi]) -AC_SUBST([RPATH]) -AC_SUBST([LD_PRELOAD_VAR]) -AC_SUBST([so]) -AC_SUBST([importlib]) -AC_SUBST([o]) -AC_SUBST([a]) -AC_SUBST([exe]) -AC_SUBST([libprefix]) -AC_SUBST([DSO_LDFLAGS]) -AC_SUBST([EXTRA_LDFLAGS]) -AC_SUBST([SOREV]) -AC_SUBST([PIC_CFLAGS]) -AC_SUBST([CTARGET]) -AC_SUBST([LDTARGET]) -AC_SUBST([MKLIB]) -AC_SUBST([CC_MM]) - -if test "x$abi" != "xpecoff"; then - dnl Heap profiling uses the log(3) function. - LIBS="$LIBS -lm" -fi - -JE_COMPILABLE([__attribute__ syntax], - [static __attribute__((unused)) void foo(void){}], - [], - [je_cv_attribute]) -if test "x${je_cv_attribute}" = "xyes" ; then - AC_DEFINE([JEMALLOC_HAVE_ATTR], [ ]) - if test "x${GCC}" = "xyes" -a "x${abi}" = "xelf"; then - JE_CFLAGS_APPEND([-fvisibility=hidden]) - fi -fi -dnl Check for tls_model attribute support (clang 3.0 still lacks support). -SAVED_CFLAGS="${CFLAGS}" -JE_CFLAGS_APPEND([-Werror]) -JE_COMPILABLE([tls_model attribute], [], - [static __thread int - __attribute__((tls_model("initial-exec"))) foo; - foo = 0;], - [je_cv_tls_model]) -CFLAGS="${SAVED_CFLAGS}" -if test "x${je_cv_tls_model}" = "xyes" ; then - AC_DEFINE([JEMALLOC_TLS_MODEL], - [__attribute__((tls_model("initial-exec")))]) -else - AC_DEFINE([JEMALLOC_TLS_MODEL], [ ]) -fi - -dnl Support optional additions to rpath. -AC_ARG_WITH([rpath], - [AS_HELP_STRING([--with-rpath=<rpath>], [Colon-separated rpath (ELF systems only)])], -if test "x$with_rpath" = "xno" ; then - RPATH_EXTRA= -else - RPATH_EXTRA="`echo $with_rpath | tr \":\" \" \"`" -fi, - RPATH_EXTRA= -) -AC_SUBST([RPATH_EXTRA]) - -dnl Disable rules that do automatic regeneration of configure output by default. -AC_ARG_ENABLE([autogen], - [AS_HELP_STRING([--enable-autogen], [Automatically regenerate configure output])], -if test "x$enable_autogen" = "xno" ; then - enable_autogen="0" -else - enable_autogen="1" -fi -, -enable_autogen="0" -) -AC_SUBST([enable_autogen]) - -AC_PROG_INSTALL -AC_PROG_RANLIB -AC_PATH_PROG([AR], [ar], [false], [$PATH]) -AC_PATH_PROG([LD], [ld], [false], [$PATH]) -AC_PATH_PROG([AUTOCONF], [autoconf], [false], [$PATH]) - -public_syms="malloc_conf malloc_message malloc calloc posix_memalign aligned_alloc realloc free malloc_usable_size malloc_stats_print mallctl mallctlnametomib mallctlbymib" - -dnl Check for allocator-related functions that should be wrapped. -AC_CHECK_FUNC([memalign], - [AC_DEFINE([JEMALLOC_OVERRIDE_MEMALIGN], [ ]) - public_syms="${public_syms} memalign"]) -AC_CHECK_FUNC([valloc], - [AC_DEFINE([JEMALLOC_OVERRIDE_VALLOC], [ ]) - public_syms="${public_syms} valloc"]) - -dnl Support the experimental API by default. -AC_ARG_ENABLE([experimental], - [AS_HELP_STRING([--disable-experimental], - [Disable support for the experimental API])], -[if test "x$enable_experimental" = "xno" ; then - enable_experimental="0" -else - enable_experimental="1" -fi -], -[enable_experimental="1"] -) -if test "x$enable_experimental" = "x1" ; then - AC_DEFINE([JEMALLOC_EXPERIMENTAL], [ ]) - public_syms="${public_syms} allocm dallocm nallocm rallocm sallocm" -fi -AC_SUBST([enable_experimental]) - -dnl Perform no name mangling by default. -AC_ARG_WITH([mangling], - [AS_HELP_STRING([--with-mangling=<map>], [Mangle symbols in <map>])], - [mangling_map="$with_mangling"], [mangling_map=""]) -for nm in `echo ${mangling_map} |tr ',' ' '` ; do - k="`echo ${nm} |tr ':' ' ' |awk '{print $1}'`" - n="je_${k}" - m=`echo ${nm} |tr ':' ' ' |awk '{print $2}'` - AC_DEFINE_UNQUOTED([${n}], [${m}]) - dnl Remove key from public_syms so that it isn't redefined later. - public_syms=`for sym in ${public_syms}; do echo "${sym}"; done |grep -v "^${k}\$" |tr '\n' ' '` -done - -dnl Do not prefix public APIs by default. -AC_ARG_WITH([jemalloc_prefix], - [AS_HELP_STRING([--with-jemalloc-prefix=<prefix>], [Prefix to prepend to all public APIs])], - [JEMALLOC_PREFIX="$with_jemalloc_prefix"], - [if test "x$abi" != "xmacho" -a "x$abi" != "xpecoff"; then - JEMALLOC_PREFIX="" -else - JEMALLOC_PREFIX="je_" -fi] -) -if test "x$JEMALLOC_PREFIX" != "x" ; then - JEMALLOC_CPREFIX=`echo ${JEMALLOC_PREFIX} | tr "a-z" "A-Z"` - AC_DEFINE_UNQUOTED([JEMALLOC_PREFIX], ["$JEMALLOC_PREFIX"]) - AC_DEFINE_UNQUOTED([JEMALLOC_CPREFIX], ["$JEMALLOC_CPREFIX"]) -fi -dnl Generate macros to rename public symbols. All public symbols are prefixed -dnl with je_ in the source code, so these macro definitions are needed even if -dnl --with-jemalloc-prefix wasn't specified. -for stem in ${public_syms}; do - n="je_${stem}" - m="${JEMALLOC_PREFIX}${stem}" - AC_DEFINE_UNQUOTED([${n}], [${m}]) -done - -AC_ARG_WITH([export], - [AS_HELP_STRING([--without-export], [disable exporting jemalloc public APIs])], - [if test "x$with_export" = "xno"; then - AC_DEFINE([JEMALLOC_EXPORT],[]) -fi] -) - -dnl Do not mangle library-private APIs by default. -AC_ARG_WITH([private_namespace], - [AS_HELP_STRING([--with-private-namespace=<prefix>], [Prefix to prepend to all library-private APIs])], - [JEMALLOC_PRIVATE_NAMESPACE="$with_private_namespace"], - [JEMALLOC_PRIVATE_NAMESPACE=""] -) -AC_DEFINE_UNQUOTED([JEMALLOC_PRIVATE_NAMESPACE], ["$JEMALLOC_PRIVATE_NAMESPACE"]) -if test "x$JEMALLOC_PRIVATE_NAMESPACE" != "x" ; then - AC_DEFINE_UNQUOTED([JEMALLOC_N(string_that_no_one_should_want_to_use_as_a_jemalloc_private_namespace_prefix)], [${JEMALLOC_PRIVATE_NAMESPACE}##string_that_no_one_should_want_to_use_as_a_jemalloc_private_namespace_prefix]) -else - AC_DEFINE_UNQUOTED([JEMALLOC_N(string_that_no_one_should_want_to_use_as_a_jemalloc_private_namespace_prefix)], [string_that_no_one_should_want_to_use_as_a_jemalloc_private_namespace_prefix]) -fi - -dnl Do not add suffix to installed files by default. -AC_ARG_WITH([install_suffix], - [AS_HELP_STRING([--with-install-suffix=<suffix>], [Suffix to append to all installed files])], - [INSTALL_SUFFIX="$with_install_suffix"], - [INSTALL_SUFFIX=] -) -install_suffix="$INSTALL_SUFFIX" -AC_SUBST([install_suffix]) - -cfgoutputs_in="${srcroot}Makefile.in" -cfgoutputs_in="${cfgoutputs_in} ${srcroot}doc/html.xsl.in" -cfgoutputs_in="${cfgoutputs_in} ${srcroot}doc/manpages.xsl.in" -cfgoutputs_in="${cfgoutputs_in} ${srcroot}doc/jemalloc.xml.in" -cfgoutputs_in="${cfgoutputs_in} ${srcroot}include/jemalloc/jemalloc.h.in" -cfgoutputs_in="${cfgoutputs_in} ${srcroot}include/jemalloc/internal/jemalloc_internal.h.in" -cfgoutputs_in="${cfgoutputs_in} ${srcroot}test/jemalloc_test.h.in" - -cfgoutputs_out="Makefile" -cfgoutputs_out="${cfgoutputs_out} doc/html.xsl" -cfgoutputs_out="${cfgoutputs_out} doc/manpages.xsl" -cfgoutputs_out="${cfgoutputs_out} doc/jemalloc${install_suffix}.xml" -cfgoutputs_out="${cfgoutputs_out} include/jemalloc/jemalloc${install_suffix}.h" -cfgoutputs_out="${cfgoutputs_out} include/jemalloc/internal/jemalloc_internal.h" -cfgoutputs_out="${cfgoutputs_out} test/jemalloc_test.h" - -cfgoutputs_tup="Makefile" -cfgoutputs_tup="${cfgoutputs_tup} doc/html.xsl:doc/html.xsl.in" -cfgoutputs_tup="${cfgoutputs_tup} doc/manpages.xsl:doc/manpages.xsl.in" -cfgoutputs_tup="${cfgoutputs_tup} doc/jemalloc${install_suffix}.xml:doc/jemalloc.xml.in" -cfgoutputs_tup="${cfgoutputs_tup} include/jemalloc/jemalloc${install_suffix}.h:include/jemalloc/jemalloc.h.in" -cfgoutputs_tup="${cfgoutputs_tup} include/jemalloc/internal/jemalloc_internal.h" -cfgoutputs_tup="${cfgoutputs_tup} test/jemalloc_test.h:test/jemalloc_test.h.in" - -cfghdrs_in="${srcroot}include/jemalloc/jemalloc_defs.h.in" -cfghdrs_in="${cfghdrs_in} ${srcroot}include/jemalloc/internal/size_classes.sh" - -cfghdrs_out="include/jemalloc/jemalloc_defs${install_suffix}.h" -cfghdrs_out="${cfghdrs_out} include/jemalloc/internal/size_classes.h" - -cfghdrs_tup="include/jemalloc/jemalloc_defs${install_suffix}.h:include/jemalloc/jemalloc_defs.h.in" - -dnl Do not silence irrelevant compiler warnings by default, since enabling this -dnl option incurs a performance penalty. -AC_ARG_ENABLE([cc-silence], - [AS_HELP_STRING([--enable-cc-silence], - [Silence irrelevant compiler warnings])], -[if test "x$enable_cc_silence" = "xno" ; then - enable_cc_silence="0" -else - enable_cc_silence="1" -fi -], -[enable_cc_silence="0"] -) -if test "x$enable_cc_silence" = "x1" ; then - AC_DEFINE([JEMALLOC_CC_SILENCE], [ ]) -fi - -dnl Do not compile with debugging by default. -AC_ARG_ENABLE([debug], - [AS_HELP_STRING([--enable-debug], [Build debugging code (implies --enable-ivsalloc)])], -[if test "x$enable_debug" = "xno" ; then - enable_debug="0" -else - enable_debug="1" -fi -], -[enable_debug="0"] -) -if test "x$enable_debug" = "x1" ; then - AC_DEFINE([JEMALLOC_DEBUG], [ ]) - enable_ivsalloc="1" -fi -AC_SUBST([enable_debug]) - -dnl Do not validate pointers by default. -AC_ARG_ENABLE([ivsalloc], - [AS_HELP_STRING([--enable-ivsalloc], [Validate pointers passed through the public API])], -[if test "x$enable_ivsalloc" = "xno" ; then - enable_ivsalloc="0" -else - enable_ivsalloc="1" -fi -], -[enable_ivsalloc="0"] -) -if test "x$enable_ivsalloc" = "x1" ; then - AC_DEFINE([JEMALLOC_IVSALLOC], [ ]) -fi - -dnl Only optimize if not debugging. -if test "x$enable_debug" = "x0" -a "x$no_CFLAGS" = "xyes" ; then - dnl Make sure that an optimization flag was not specified in EXTRA_CFLAGS. - optimize="no" - echo "$EXTRA_CFLAGS" | grep "\-O" >/dev/null || optimize="yes" - if test "x${optimize}" = "xyes" ; then - if test "x$GCC" = "xyes" ; then - JE_CFLAGS_APPEND([-O3]) - JE_CFLAGS_APPEND([-funroll-loops]) - elif test "x$je_cv_msvc" = "xyes" ; then - JE_CFLAGS_APPEND([-O2]) - else - JE_CFLAGS_APPEND([-O]) - fi - fi -fi - -dnl Enable statistics calculation by default. -AC_ARG_ENABLE([stats], - [AS_HELP_STRING([--disable-stats], - [Disable statistics calculation/reporting])], -[if test "x$enable_stats" = "xno" ; then - enable_stats="0" -else - enable_stats="1" -fi -], -[enable_stats="1"] -) -if test "x$enable_stats" = "x1" ; then - AC_DEFINE([JEMALLOC_STATS], [ ]) -fi -AC_SUBST([enable_stats]) - -dnl Do not enable profiling by default. -AC_ARG_ENABLE([prof], - [AS_HELP_STRING([--enable-prof], [Enable allocation profiling])], -[if test "x$enable_prof" = "xno" ; then - enable_prof="0" -else - enable_prof="1" -fi -], -[enable_prof="0"] -) -if test "x$enable_prof" = "x1" ; then - backtrace_method="" -else - backtrace_method="N/A" -fi - -AC_ARG_ENABLE([prof-libunwind], - [AS_HELP_STRING([--enable-prof-libunwind], [Use libunwind for backtracing])], -[if test "x$enable_prof_libunwind" = "xno" ; then - enable_prof_libunwind="0" -else - enable_prof_libunwind="1" -fi -], -[enable_prof_libunwind="0"] -) -AC_ARG_WITH([static_libunwind], - [AS_HELP_STRING([--with-static-libunwind=<libunwind.a>], - [Path to static libunwind library; use rather than dynamically linking])], -if test "x$with_static_libunwind" = "xno" ; then - LUNWIND="-lunwind" -else - if test ! -f "$with_static_libunwind" ; then - AC_MSG_ERROR([Static libunwind not found: $with_static_libunwind]) - fi - LUNWIND="$with_static_libunwind" -fi, - LUNWIND="-lunwind" -) -if test "x$backtrace_method" = "x" -a "x$enable_prof_libunwind" = "x1" ; then - AC_CHECK_HEADERS([libunwind.h], , [enable_prof_libunwind="0"]) - if test "x$LUNWIND" = "x-lunwind" ; then - AC_CHECK_LIB([unwind], [backtrace], [LIBS="$LIBS $LUNWIND"], - [enable_prof_libunwind="0"]) - else - LIBS="$LIBS $LUNWIND" - fi - if test "x${enable_prof_libunwind}" = "x1" ; then - backtrace_method="libunwind" - AC_DEFINE([JEMALLOC_PROF_LIBUNWIND], [ ]) - fi -fi - -AC_ARG_ENABLE([prof-libgcc], - [AS_HELP_STRING([--disable-prof-libgcc], - [Do not use libgcc for backtracing])], -[if test "x$enable_prof_libgcc" = "xno" ; then - enable_prof_libgcc="0" -else - enable_prof_libgcc="1" -fi -], -[enable_prof_libgcc="1"] -) -if test "x$backtrace_method" = "x" -a "x$enable_prof_libgcc" = "x1" \ - -a "x$GCC" = "xyes" ; then - AC_CHECK_HEADERS([unwind.h], , [enable_prof_libgcc="0"]) - AC_CHECK_LIB([gcc], [_Unwind_Backtrace], [LIBS="$LIBS -lgcc"], [enable_prof_libgcc="0"]) - dnl The following is conservative, in that it only has entries for CPUs on - dnl which jemalloc has been tested. - AC_MSG_CHECKING([libgcc-based backtracing reliability on ${host_cpu}]) - case "${host_cpu}" in - i[[3456]]86) - AC_MSG_RESULT([unreliable]) - enable_prof_libgcc="0"; - ;; - x86_64) - AC_MSG_RESULT([reliable]) - ;; - *) - AC_MSG_RESULT([unreliable]) - enable_prof_libgcc="0"; - ;; - esac - if test "x${enable_prof_libgcc}" = "x1" ; then - backtrace_method="libgcc" - AC_DEFINE([JEMALLOC_PROF_LIBGCC], [ ]) - fi -else - enable_prof_libgcc="0" -fi - -AC_ARG_ENABLE([prof-gcc], - [AS_HELP_STRING([--disable-prof-gcc], - [Do not use gcc intrinsics for backtracing])], -[if test "x$enable_prof_gcc" = "xno" ; then - enable_prof_gcc="0" -else - enable_prof_gcc="1" -fi -], -[enable_prof_gcc="1"] -) -if test "x$backtrace_method" = "x" -a "x$enable_prof_gcc" = "x1" \ - -a "x$GCC" = "xyes" ; then - backtrace_method="gcc intrinsics" - AC_DEFINE([JEMALLOC_PROF_GCC], [ ]) -else - enable_prof_gcc="0" -fi - -if test "x$backtrace_method" = "x" ; then - backtrace_method="none (disabling profiling)" - enable_prof="0" -fi -AC_MSG_CHECKING([configured backtracing method]) -AC_MSG_RESULT([$backtrace_method]) -if test "x$enable_prof" = "x1" ; then - if test "x${force_tls}" = "x0" ; then - AC_MSG_ERROR([Heap profiling requires TLS]); - fi - force_tls="1" - AC_DEFINE([JEMALLOC_PROF], [ ]) -fi -AC_SUBST([enable_prof]) - -dnl Enable thread-specific caching by default. -AC_ARG_ENABLE([tcache], - [AS_HELP_STRING([--disable-tcache], [Disable per thread caches])], -[if test "x$enable_tcache" = "xno" ; then - enable_tcache="0" -else - enable_tcache="1" -fi -], -[enable_tcache="1"] -) -if test "x$enable_tcache" = "x1" ; then - AC_DEFINE([JEMALLOC_TCACHE], [ ]) -fi -AC_SUBST([enable_tcache]) - -dnl Disable mremap() for huge realloc() by default. -AC_ARG_ENABLE([mremap], - [AS_HELP_STRING([--enable-mremap], [Enable mremap(2) for huge realloc()])], -[if test "x$enable_mremap" = "xno" ; then - enable_mremap="0" -else - enable_mremap="1" -fi -], -[enable_mremap="0"] -) -if test "x$enable_mremap" = "x1" ; then - JE_COMPILABLE([mremap(...MREMAP_FIXED...)], [ -#define _GNU_SOURCE -#include <sys/mman.h> -], [ -void *p = mremap((void *)0, 0, 0, MREMAP_MAYMOVE|MREMAP_FIXED, (void *)0); -], [je_cv_mremap_fixed]) - if test "x${je_cv_mremap_fixed}" = "xno" ; then - enable_mremap="0" - fi -fi -if test "x$enable_mremap" = "x1" ; then - AC_DEFINE([JEMALLOC_MREMAP], [ ]) -fi -AC_SUBST([enable_mremap]) - -dnl Enable VM deallocation via munmap() by default. -AC_ARG_ENABLE([munmap], - [AS_HELP_STRING([--disable-munmap], [Disable VM deallocation via munmap(2)])], -[if test "x$enable_munmap" = "xno" ; then - enable_munmap="0" -else - enable_munmap="1" -fi -], -[enable_munmap="${default_munmap}"] -) -if test "x$enable_munmap" = "x1" ; then - AC_DEFINE([JEMALLOC_MUNMAP], [ ]) -fi -AC_SUBST([enable_munmap]) - -dnl Do not enable allocation from DSS by default. -AC_ARG_ENABLE([dss], - [AS_HELP_STRING([--enable-dss], [Enable allocation from DSS])], -[if test "x$enable_dss" = "xno" ; then - enable_dss="0" -else - enable_dss="1" -fi -], -[enable_dss="0"] -) -dnl Check whether the BSD/SUSv1 sbrk() exists. If not, disable DSS support. -AC_CHECK_FUNC([sbrk], [have_sbrk="1"], [have_sbrk="0"]) -if test "x$have_sbrk" = "x1" ; then - AC_DEFINE([JEMALLOC_HAVE_SBRK], [ ]) -else - enable_dss="0" -fi - -if test "x$enable_dss" = "x1" ; then - AC_DEFINE([JEMALLOC_DSS], [ ]) -fi -AC_SUBST([enable_dss]) - -dnl Support the junk/zero filling option by default. -AC_ARG_ENABLE([fill], - [AS_HELP_STRING([--disable-fill], - [Disable support for junk/zero filling, quarantine, and redzones])], -[if test "x$enable_fill" = "xno" ; then - enable_fill="0" -else - enable_fill="1" -fi -], -[enable_fill="1"] -) -if test "x$enable_fill" = "x1" ; then - AC_DEFINE([JEMALLOC_FILL], [ ]) -fi -AC_SUBST([enable_fill]) - -dnl Disable utrace(2)-based tracing by default. -AC_ARG_ENABLE([utrace], - [AS_HELP_STRING([--enable-utrace], [Enable utrace(2)-based tracing])], -[if test "x$enable_utrace" = "xno" ; then - enable_utrace="0" -else - enable_utrace="1" -fi -], -[enable_utrace="0"] -) -JE_COMPILABLE([utrace(2)], [ -#include <sys/types.h> -#include <sys/param.h> -#include <sys/time.h> -#include <sys/uio.h> -#include <sys/ktrace.h> -], [ - utrace((void *)0, 0); -], [je_cv_utrace]) -if test "x${je_cv_utrace}" = "xno" ; then - enable_utrace="0" -fi -if test "x$enable_utrace" = "x1" ; then - AC_DEFINE([JEMALLOC_UTRACE], [ ]) -fi -AC_SUBST([enable_utrace]) - -dnl Support Valgrind by default. -AC_ARG_ENABLE([valgrind], - [AS_HELP_STRING([--disable-valgrind], [Disable support for Valgrind])], -[if test "x$enable_valgrind" = "xno" ; then - enable_valgrind="0" -else - enable_valgrind="1" -fi -], -[enable_valgrind="1"] -) -if test "x$enable_valgrind" = "x1" ; then - JE_COMPILABLE([valgrind], [ -#include <valgrind/valgrind.h> -#include <valgrind/memcheck.h> - -#if !defined(VALGRIND_RESIZEINPLACE_BLOCK) -# error "Incompatible Valgrind version" -#endif -], [], [je_cv_valgrind]) - if test "x${je_cv_valgrind}" = "xno" ; then - enable_valgrind="0" - fi - if test "x$enable_valgrind" = "x1" ; then - AC_DEFINE([JEMALLOC_VALGRIND], [ ]) - fi -fi -AC_SUBST([enable_valgrind]) - -dnl Do not support the xmalloc option by default. -AC_ARG_ENABLE([xmalloc], - [AS_HELP_STRING([--enable-xmalloc], [Support xmalloc option])], -[if test "x$enable_xmalloc" = "xno" ; then - enable_xmalloc="0" -else - enable_xmalloc="1" -fi -], -[enable_xmalloc="0"] -) -if test "x$enable_xmalloc" = "x1" ; then - AC_DEFINE([JEMALLOC_XMALLOC], [ ]) -fi -AC_SUBST([enable_xmalloc]) - -AC_CACHE_CHECK([STATIC_PAGE_SHIFT], - [je_cv_static_page_shift], - AC_RUN_IFELSE([AC_LANG_PROGRAM( -[[ -#include <strings.h> -#ifdef _WIN32 -#include <windows.h> -#else -#include <unistd.h> -#endif -#include <stdio.h> -]], -[[ - int result; - FILE *f; - -#ifdef _WIN32 - SYSTEM_INFO si; - GetSystemInfo(&si); - result = si.dwPageSize; -#else - result = sysconf(_SC_PAGESIZE); -#endif - if (result == -1) { - return 1; - } - result = ffsl(result) - 1; - - f = fopen("conftest.out", "w"); - if (f == NULL) { - return 1; - } - fprintf(f, "%d\n", result); - fclose(f); - - return 0; -]])], - [je_cv_static_page_shift=`cat conftest.out`], - [je_cv_static_page_shift=undefined])) - -if test "x$je_cv_static_page_shift" != "xundefined"; then - AC_DEFINE_UNQUOTED([STATIC_PAGE_SHIFT], [$je_cv_static_page_shift]) -else - AC_MSG_ERROR([cannot determine value for STATIC_PAGE_SHIFT]) -fi - -dnl ============================================================================ -dnl jemalloc configuration. -dnl - -dnl Set VERSION if source directory has an embedded git repository. -if test -d "${srcroot}.git" ; then - git describe --long --abbrev=40 > ${srcroot}VERSION -fi -jemalloc_version=`cat ${srcroot}VERSION` -jemalloc_version_major=`echo ${jemalloc_version} | tr ".g-" " " | awk '{print [$]1}'` -jemalloc_version_minor=`echo ${jemalloc_version} | tr ".g-" " " | awk '{print [$]2}'` -jemalloc_version_bugfix=`echo ${jemalloc_version} | tr ".g-" " " | awk '{print [$]3}'` -jemalloc_version_nrev=`echo ${jemalloc_version} | tr ".g-" " " | awk '{print [$]4}'` -jemalloc_version_gid=`echo ${jemalloc_version} | tr ".g-" " " | awk '{print [$]5}'` -AC_SUBST([jemalloc_version]) -AC_SUBST([jemalloc_version_major]) -AC_SUBST([jemalloc_version_minor]) -AC_SUBST([jemalloc_version_bugfix]) -AC_SUBST([jemalloc_version_nrev]) -AC_SUBST([jemalloc_version_gid]) - -dnl ============================================================================ -dnl Configure pthreads. - -if test "x$abi" != "xpecoff" ; then - AC_CHECK_HEADERS([pthread.h], , [AC_MSG_ERROR([pthread.h is missing])]) - dnl Some systems may embed pthreads functionality in libc; check for libpthread - dnl first, but try libc too before failing. - AC_CHECK_LIB([pthread], [pthread_create], [LIBS="$LIBS -lpthread"], - [AC_SEARCH_LIBS([pthread_create], , , - AC_MSG_ERROR([libpthread is missing]))]) -fi - -CPPFLAGS="$CPPFLAGS -D_REENTRANT" - -dnl Check whether the BSD-specific _malloc_thread_cleanup() exists. If so, use -dnl it rather than pthreads TSD cleanup functions to support cleanup during -dnl thread exit, in order to avoid pthreads library recursion during -dnl bootstrapping. -AC_CHECK_FUNC([_malloc_thread_cleanup], - [have__malloc_thread_cleanup="1"], - [have__malloc_thread_cleanup="0"] - ) -if test "x$have__malloc_thread_cleanup" = "x1" ; then - AC_DEFINE([JEMALLOC_MALLOC_THREAD_CLEANUP], [ ]) - force_tls="1" -fi - -dnl Check whether the BSD-specific _pthread_mutex_init_calloc_cb() exists. If -dnl so, mutex initialization causes allocation, and we need to implement this -dnl callback function in order to prevent recursive allocation. -AC_CHECK_FUNC([_pthread_mutex_init_calloc_cb], - [have__pthread_mutex_init_calloc_cb="1"], - [have__pthread_mutex_init_calloc_cb="0"] - ) -if test "x$have__pthread_mutex_init_calloc_cb" = "x1" ; then - AC_DEFINE([JEMALLOC_MUTEX_INIT_CB]) -fi - -dnl Disable lazy locking by default. -AC_ARG_ENABLE([lazy_lock], - [AS_HELP_STRING([--enable-lazy-lock], - [Enable lazy locking (only lock when multi-threaded)])], -[if test "x$enable_lazy_lock" = "xno" ; then - enable_lazy_lock="0" -else - enable_lazy_lock="1" -fi -], -[enable_lazy_lock="0"] -) -if test "x$enable_lazy_lock" = "x0" -a "x${force_lazy_lock}" = "x1" ; then - AC_MSG_RESULT([Forcing lazy-lock to avoid allocator/threading bootstrap issues]) - enable_lazy_lock="1" -fi -if test "x$enable_lazy_lock" = "x1" ; then - if test "x$abi" != "xpecoff" ; then - AC_CHECK_HEADERS([dlfcn.h], , [AC_MSG_ERROR([dlfcn.h is missing])]) - AC_CHECK_FUNC([dlsym], [], - [AC_CHECK_LIB([dl], [dlsym], [LIBS="$LIBS -ldl"], - [AC_MSG_ERROR([libdl is missing])]) - ]) - fi - AC_DEFINE([JEMALLOC_LAZY_LOCK], [ ]) -fi -AC_SUBST([enable_lazy_lock]) - -AC_ARG_ENABLE([tls], - [AS_HELP_STRING([--disable-tls], [Disable thread-local storage (__thread keyword)])], -if test "x$enable_tls" = "xno" ; then - enable_tls="0" -else - enable_tls="1" -fi -, -enable_tls="1" -) -if test "x${enable_tls}" = "x0" -a "x${force_tls}" = "x1" ; then - AC_MSG_RESULT([Forcing TLS to avoid allocator/threading bootstrap issues]) - enable_tls="1" -fi -if test "x${enable_tls}" = "x1" -a "x${force_tls}" = "x0" ; then - AC_MSG_RESULT([Forcing no TLS to avoid allocator/threading bootstrap issues]) - enable_tls="0" -fi -if test "x${enable_tls}" = "x1" ; then -AC_MSG_CHECKING([for TLS]) -AC_COMPILE_IFELSE([AC_LANG_PROGRAM( -[[ - __thread int x; -]], [[ - x = 42; - - return 0; -]])], - AC_MSG_RESULT([yes]), - AC_MSG_RESULT([no]) - enable_tls="0") -fi -AC_SUBST([enable_tls]) -if test "x${enable_tls}" = "x1" ; then - AC_DEFINE_UNQUOTED([JEMALLOC_TLS], [ ]) -elif test "x${force_tls}" = "x1" ; then - AC_MSG_ERROR([Failed to configure TLS, which is mandatory for correct function]) -fi - -dnl ============================================================================ -dnl Check for ffsl(3), and fail if not found. This function exists on all -dnl platforms that jemalloc currently has a chance of functioning on without -dnl modification. -JE_COMPILABLE([a program using ffsl], [ -#include <stdio.h> -#include <strings.h> -#include <string.h> -], [ - { - int rv = ffsl(0x08); - printf("%d\n", rv); - } -], [je_cv_function_ffsl]) -if test "x${je_cv_function_ffsl}" != "xyes" ; then - AC_MSG_ERROR([Cannot build without ffsl(3)]) -fi - -dnl ============================================================================ -dnl Check for atomic(9) operations as provided on FreeBSD. - -JE_COMPILABLE([atomic(9)], [ -#include <sys/types.h> -#include <machine/atomic.h> -#include <inttypes.h> -], [ - { - uint32_t x32 = 0; - volatile uint32_t *x32p = &x32; - atomic_fetchadd_32(x32p, 1); - } - { - unsigned long xlong = 0; - volatile unsigned long *xlongp = &xlong; - atomic_fetchadd_long(xlongp, 1); - } -], [je_cv_atomic9]) -if test "x${je_cv_atomic9}" = "xyes" ; then - AC_DEFINE([JEMALLOC_ATOMIC9]) -fi - -dnl ============================================================================ -dnl Check for atomic(3) operations as provided on Darwin. - -JE_COMPILABLE([Darwin OSAtomic*()], [ -#include <libkern/OSAtomic.h> -#include <inttypes.h> -], [ - { - int32_t x32 = 0; - volatile int32_t *x32p = &x32; - OSAtomicAdd32(1, x32p); - } - { - int64_t x64 = 0; - volatile int64_t *x64p = &x64; - OSAtomicAdd64(1, x64p); - } -], [je_cv_osatomic]) -if test "x${je_cv_osatomic}" = "xyes" ; then - AC_DEFINE([JEMALLOC_OSATOMIC], [ ]) -fi - -dnl ============================================================================ -dnl Check whether __sync_{add,sub}_and_fetch() are available despite -dnl __GCC_HAVE_SYNC_COMPARE_AND_SWAP_n macros being undefined. - -AC_DEFUN([JE_SYNC_COMPARE_AND_SWAP_CHECK],[ - AC_CACHE_CHECK([whether to force $1-bit __sync_{add,sub}_and_fetch()], - [je_cv_sync_compare_and_swap_$2], - [AC_LINK_IFELSE([AC_LANG_PROGRAM([ - #include <stdint.h> - ], - [ - #ifndef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_$2 - { - uint$1_t x$1 = 0; - __sync_add_and_fetch(&x$1, 42); - __sync_sub_and_fetch(&x$1, 1); - } - #else - #error __GCC_HAVE_SYNC_COMPARE_AND_SWAP_$2 is defined, no need to force - #endif - ])], - [je_cv_sync_compare_and_swap_$2=yes], - [je_cv_sync_compare_and_swap_$2=no])]) - - if test "x${je_cv_sync_compare_and_swap_$2}" = "xyes" ; then - AC_DEFINE([JE_FORCE_SYNC_COMPARE_AND_SWAP_$2], [ ]) - fi -]) - -if test "x${je_cv_atomic9}" != "xyes" -a "x${je_cv_osatomic}" != "xyes" ; then - JE_SYNC_COMPARE_AND_SWAP_CHECK(32, 4) - JE_SYNC_COMPARE_AND_SWAP_CHECK(64, 8) -fi - -dnl ============================================================================ -dnl Check for spinlock(3) operations as provided on Darwin. - -JE_COMPILABLE([Darwin OSSpin*()], [ -#include <libkern/OSAtomic.h> -#include <inttypes.h> -], [ - OSSpinLock lock = 0; - OSSpinLockLock(&lock); - OSSpinLockUnlock(&lock); -], [je_cv_osspin]) -if test "x${je_cv_osspin}" = "xyes" ; then - AC_DEFINE([JEMALLOC_OSSPIN], [ ]) -fi - -dnl ============================================================================ -dnl Darwin-related configuration. - -AC_ARG_ENABLE([zone-allocator], - [AS_HELP_STRING([--disable-zone-allocator], - [Disable zone allocator for Darwin])], -[if test "x$enable_zone_allocator" = "xno" ; then - enable_zone_allocator="0" -else - enable_zone_allocator="1" -fi -], -[if test "x${abi}" = "xmacho"; then - enable_zone_allocator="1" -fi -] -) -AC_SUBST([enable_zone_allocator]) - -if test "x${enable_zone_allocator}" = "x1" ; then - if test "x${abi}" != "xmacho"; then - AC_MSG_ERROR([--enable-zone-allocator is only supported on Darwin]) - fi - AC_DEFINE([JEMALLOC_IVSALLOC], [ ]) - AC_DEFINE([JEMALLOC_ZONE], [ ]) - - dnl The szone version jumped from 3 to 6 between the OS X 10.5.x and 10.6 - dnl releases. malloc_zone_t and malloc_introspection_t have new fields in - dnl 10.6, which is the only source-level indication of the change. - AC_MSG_CHECKING([malloc zone version]) - AC_DEFUN([JE_ZONE_PROGRAM], - [AC_LANG_PROGRAM( - [#include <malloc/malloc.h>], - [static foo[[sizeof($1) $2 sizeof(void *) * $3 ? 1 : -1]]] - )]) - - AC_COMPILE_IFELSE([JE_ZONE_PROGRAM(malloc_zone_t,==,14)],[JEMALLOC_ZONE_VERSION=3],[ - AC_COMPILE_IFELSE([JE_ZONE_PROGRAM(malloc_zone_t,==,15)],[JEMALLOC_ZONE_VERSION=5],[ - AC_COMPILE_IFELSE([JE_ZONE_PROGRAM(malloc_zone_t,==,16)],[ - AC_COMPILE_IFELSE([JE_ZONE_PROGRAM(malloc_introspection_t,==,9)],[JEMALLOC_ZONE_VERSION=6],[ - AC_COMPILE_IFELSE([JE_ZONE_PROGRAM(malloc_introspection_t,==,13)],[JEMALLOC_ZONE_VERSION=7],[JEMALLOC_ZONE_VERSION=] - )])],[ - AC_COMPILE_IFELSE([JE_ZONE_PROGRAM(malloc_zone_t,==,17)],[JEMALLOC_ZONE_VERSION=8],[ - AC_COMPILE_IFELSE([JE_ZONE_PROGRAM(malloc_zone_t,>,17)],[JEMALLOC_ZONE_VERSION=9],[JEMALLOC_ZONE_VERSION=] - )])])])]) - if test "x${JEMALLOC_ZONE_VERSION}" = "x"; then - AC_MSG_RESULT([unsupported]) - AC_MSG_ERROR([Unsupported malloc zone version]) - fi - if test "${JEMALLOC_ZONE_VERSION}" = 9; then - JEMALLOC_ZONE_VERSION=8 - AC_MSG_RESULT([> 8]) - else - AC_MSG_RESULT([$JEMALLOC_ZONE_VERSION]) - fi - AC_DEFINE_UNQUOTED(JEMALLOC_ZONE_VERSION, [$JEMALLOC_ZONE_VERSION]) -fi - -dnl ============================================================================ -dnl Check for typedefs, structures, and compiler characteristics. -AC_HEADER_STDBOOL - -AC_CONFIG_COMMANDS([include/jemalloc/internal/size_classes.h], [ - mkdir -p "include/jemalloc/internal" - "${srcdir}/include/jemalloc/internal/size_classes.sh" > "${objroot}include/jemalloc/internal/size_classes.h" -]) - -dnl Process .in files. -AC_SUBST([cfghdrs_in]) -AC_SUBST([cfghdrs_out]) -AC_CONFIG_HEADERS([$cfghdrs_tup]) - -dnl ============================================================================ -dnl Generate outputs. -AC_CONFIG_FILES([$cfgoutputs_tup config.stamp bin/jemalloc.sh]) -AC_SUBST([cfgoutputs_in]) -AC_SUBST([cfgoutputs_out]) -AC_OUTPUT - -dnl ============================================================================ -dnl Print out the results of configuration. -AC_MSG_RESULT([===============================================================================]) -AC_MSG_RESULT([jemalloc version : ${jemalloc_version}]) -AC_MSG_RESULT([library revision : ${rev}]) -AC_MSG_RESULT([]) -AC_MSG_RESULT([CC : ${CC}]) -AC_MSG_RESULT([CPPFLAGS : ${CPPFLAGS}]) -AC_MSG_RESULT([CFLAGS : ${CFLAGS}]) -AC_MSG_RESULT([LDFLAGS : ${LDFLAGS}]) -AC_MSG_RESULT([LIBS : ${LIBS}]) -AC_MSG_RESULT([RPATH_EXTRA : ${RPATH_EXTRA}]) -AC_MSG_RESULT([]) -AC_MSG_RESULT([XSLTPROC : ${XSLTPROC}]) -AC_MSG_RESULT([XSLROOT : ${XSLROOT}]) -AC_MSG_RESULT([]) -AC_MSG_RESULT([PREFIX : ${PREFIX}]) -AC_MSG_RESULT([BINDIR : ${BINDIR}]) -AC_MSG_RESULT([INCLUDEDIR : ${INCLUDEDIR}]) -AC_MSG_RESULT([LIBDIR : ${LIBDIR}]) -AC_MSG_RESULT([DATADIR : ${DATADIR}]) -AC_MSG_RESULT([MANDIR : ${MANDIR}]) -AC_MSG_RESULT([]) -AC_MSG_RESULT([srcroot : ${srcroot}]) -AC_MSG_RESULT([abs_srcroot : ${abs_srcroot}]) -AC_MSG_RESULT([objroot : ${objroot}]) -AC_MSG_RESULT([abs_objroot : ${abs_objroot}]) -AC_MSG_RESULT([]) -AC_MSG_RESULT([JEMALLOC_PREFIX : ${JEMALLOC_PREFIX}]) -AC_MSG_RESULT([JEMALLOC_PRIVATE_NAMESPACE]) -AC_MSG_RESULT([ : ${JEMALLOC_PRIVATE_NAMESPACE}]) -AC_MSG_RESULT([install_suffix : ${install_suffix}]) -AC_MSG_RESULT([autogen : ${enable_autogen}]) -AC_MSG_RESULT([experimental : ${enable_experimental}]) -AC_MSG_RESULT([cc-silence : ${enable_cc_silence}]) -AC_MSG_RESULT([debug : ${enable_debug}]) -AC_MSG_RESULT([stats : ${enable_stats}]) -AC_MSG_RESULT([prof : ${enable_prof}]) -AC_MSG_RESULT([prof-libunwind : ${enable_prof_libunwind}]) -AC_MSG_RESULT([prof-libgcc : ${enable_prof_libgcc}]) -AC_MSG_RESULT([prof-gcc : ${enable_prof_gcc}]) -AC_MSG_RESULT([tcache : ${enable_tcache}]) -AC_MSG_RESULT([fill : ${enable_fill}]) -AC_MSG_RESULT([utrace : ${enable_utrace}]) -AC_MSG_RESULT([valgrind : ${enable_valgrind}]) -AC_MSG_RESULT([xmalloc : ${enable_xmalloc}]) -AC_MSG_RESULT([mremap : ${enable_mremap}]) -AC_MSG_RESULT([munmap : ${enable_munmap}]) -AC_MSG_RESULT([dss : ${enable_dss}]) -AC_MSG_RESULT([lazy_lock : ${enable_lazy_lock}]) -AC_MSG_RESULT([tls : ${enable_tls}]) -AC_MSG_RESULT([===============================================================================]) diff --git a/extra/jemalloc/doc/html.xsl.in b/extra/jemalloc/doc/html.xsl.in deleted file mode 100644 index a91d9746f62..00000000000 --- a/extra/jemalloc/doc/html.xsl.in +++ /dev/null @@ -1,4 +0,0 @@ -<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0"> - <xsl:import href="@XSLROOT@/html/docbook.xsl"/> - <xsl:import href="@abs_srcroot@doc/stylesheet.xsl"/> -</xsl:stylesheet> diff --git a/extra/jemalloc/doc/jemalloc.3 b/extra/jemalloc/doc/jemalloc.3 deleted file mode 100644 index 1462e2c2b34..00000000000 --- a/extra/jemalloc/doc/jemalloc.3 +++ /dev/null @@ -1,1482 +0,0 @@ -'\" t -.\" Title: JEMALLOC -.\" Author: Jason Evans -.\" Generator: DocBook XSL Stylesheets v1.76.1 <http://docbook.sf.net/> -.\" Date: 03/06/2013 -.\" Manual: User Manual -.\" Source: jemalloc 3.3.1-0-g9ef9d9e8c271cdf14f664b871a8f98c827714784 -.\" Language: English -.\" -.TH "JEMALLOC" "3" "03/06/2013" "jemalloc 3.3.1-0-g9ef9d9e8c271" "User Manual" -.\" ----------------------------------------------------------------- -.\" * Define some portability stuff -.\" ----------------------------------------------------------------- -.\" ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -.\" http://bugs.debian.org/507673 -.\" http://lists.gnu.org/archive/html/groff/2009-02/msg00013.html -.\" ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -.ie \n(.g .ds Aq \(aq -.el .ds Aq ' -.\" ----------------------------------------------------------------- -.\" * set default formatting -.\" ----------------------------------------------------------------- -.\" disable hyphenation -.nh -.\" disable justification (adjust text to left margin only) -.ad l -.\" ----------------------------------------------------------------- -.\" * MAIN CONTENT STARTS HERE * -.\" ----------------------------------------------------------------- -.SH "NAME" -jemalloc \- general purpose memory allocation functions -.SH "LIBRARY" -.PP -This manual describes jemalloc 3\&.3\&.1\-0\-g9ef9d9e8c271cdf14f664b871a8f98c827714784\&. More information can be found at the -\m[blue]\fBjemalloc website\fR\m[]\&\s-2\u[1]\d\s+2\&. -.SH "SYNOPSIS" -.sp -.ft B -.nf -#include <stdlib\&.h> -#include <jemalloc/jemalloc\&.h> -.fi -.ft -.SS "Standard API" -.HP \w'void\ *malloc('u -.BI "void *malloc(size_t\ " "size" ");" -.HP \w'void\ *calloc('u -.BI "void *calloc(size_t\ " "number" ", size_t\ " "size" ");" -.HP \w'int\ posix_memalign('u -.BI "int posix_memalign(void\ **" "ptr" ", size_t\ " "alignment" ", size_t\ " "size" ");" -.HP \w'void\ *aligned_alloc('u -.BI "void *aligned_alloc(size_t\ " "alignment" ", size_t\ " "size" ");" -.HP \w'void\ *realloc('u -.BI "void *realloc(void\ *" "ptr" ", size_t\ " "size" ");" -.HP \w'void\ free('u -.BI "void free(void\ *" "ptr" ");" -.SS "Non\-standard API" -.HP \w'size_t\ malloc_usable_size('u -.BI "size_t malloc_usable_size(const\ void\ *" "ptr" ");" -.HP \w'void\ malloc_stats_print('u -.BI "void malloc_stats_print(void\ " "(*write_cb)" "\ (void\ *,\ const\ char\ *), void\ *" "cbopaque" ", const\ char\ *" "opts" ");" -.HP \w'int\ mallctl('u -.BI "int mallctl(const\ char\ *" "name" ", void\ *" "oldp" ", size_t\ *" "oldlenp" ", void\ *" "newp" ", size_t\ " "newlen" ");" -.HP \w'int\ mallctlnametomib('u -.BI "int mallctlnametomib(const\ char\ *" "name" ", size_t\ *" "mibp" ", size_t\ *" "miblenp" ");" -.HP \w'int\ mallctlbymib('u -.BI "int mallctlbymib(const\ size_t\ *" "mib" ", size_t\ " "miblen" ", void\ *" "oldp" ", size_t\ *" "oldlenp" ", void\ *" "newp" ", size_t\ " "newlen" ");" -.HP \w'void\ (*malloc_message)('u -.BI "void (*malloc_message)(void\ *" "cbopaque" ", const\ char\ *" "s" ");" -.PP -const char *\fImalloc_conf\fR; -.SS "Experimental API" -.HP \w'int\ allocm('u -.BI "int allocm(void\ **" "ptr" ", size_t\ *" "rsize" ", size_t\ " "size" ", int\ " "flags" ");" -.HP \w'int\ rallocm('u -.BI "int rallocm(void\ **" "ptr" ", size_t\ *" "rsize" ", size_t\ " "size" ", size_t\ " "extra" ", int\ " "flags" ");" -.HP \w'int\ sallocm('u -.BI "int sallocm(const\ void\ *" "ptr" ", size_t\ *" "rsize" ", int\ " "flags" ");" -.HP \w'int\ dallocm('u -.BI "int dallocm(void\ *" "ptr" ", int\ " "flags" ");" -.HP \w'int\ nallocm('u -.BI "int nallocm(size_t\ *" "rsize" ", size_t\ " "size" ", int\ " "flags" ");" -.SH "DESCRIPTION" -.SS "Standard API" -.PP -The -\fBmalloc\fR\fB\fR -function allocates -\fIsize\fR -bytes of uninitialized memory\&. The allocated space is suitably aligned (after possible pointer coercion) for storage of any type of object\&. -.PP -The -\fBcalloc\fR\fB\fR -function allocates space for -\fInumber\fR -objects, each -\fIsize\fR -bytes in length\&. The result is identical to calling -\fBmalloc\fR\fB\fR -with an argument of -\fInumber\fR -* -\fIsize\fR, with the exception that the allocated memory is explicitly initialized to zero bytes\&. -.PP -The -\fBposix_memalign\fR\fB\fR -function allocates -\fIsize\fR -bytes of memory such that the allocation\*(Aqs base address is an even multiple of -\fIalignment\fR, and returns the allocation in the value pointed to by -\fIptr\fR\&. The requested -\fIalignment\fR -must be a power of 2 at least as large as -sizeof(\fBvoid *\fR)\&. -.PP -The -\fBaligned_alloc\fR\fB\fR -function allocates -\fIsize\fR -bytes of memory such that the allocation\*(Aqs base address is an even multiple of -\fIalignment\fR\&. The requested -\fIalignment\fR -must be a power of 2\&. Behavior is undefined if -\fIsize\fR -is not an integral multiple of -\fIalignment\fR\&. -.PP -The -\fBrealloc\fR\fB\fR -function changes the size of the previously allocated memory referenced by -\fIptr\fR -to -\fIsize\fR -bytes\&. The contents of the memory are unchanged up to the lesser of the new and old sizes\&. If the new size is larger, the contents of the newly allocated portion of the memory are undefined\&. Upon success, the memory referenced by -\fIptr\fR -is freed and a pointer to the newly allocated memory is returned\&. Note that -\fBrealloc\fR\fB\fR -may move the memory allocation, resulting in a different return value than -\fIptr\fR\&. If -\fIptr\fR -is -\fBNULL\fR, the -\fBrealloc\fR\fB\fR -function behaves identically to -\fBmalloc\fR\fB\fR -for the specified size\&. -.PP -The -\fBfree\fR\fB\fR -function causes the allocated memory referenced by -\fIptr\fR -to be made available for future allocations\&. If -\fIptr\fR -is -\fBNULL\fR, no action occurs\&. -.SS "Non\-standard API" -.PP -The -\fBmalloc_usable_size\fR\fB\fR -function returns the usable size of the allocation pointed to by -\fIptr\fR\&. The return value may be larger than the size that was requested during allocation\&. The -\fBmalloc_usable_size\fR\fB\fR -function is not a mechanism for in\-place -\fBrealloc\fR\fB\fR; rather it is provided solely as a tool for introspection purposes\&. Any discrepancy between the requested allocation size and the size reported by -\fBmalloc_usable_size\fR\fB\fR -should not be depended on, since such behavior is entirely implementation\-dependent\&. -.PP -The -\fBmalloc_stats_print\fR\fB\fR -function writes human\-readable summary statistics via the -\fIwrite_cb\fR -callback function pointer and -\fIcbopaque\fR -data passed to -\fIwrite_cb\fR, or -\fBmalloc_message\fR\fB\fR -if -\fIwrite_cb\fR -is -\fBNULL\fR\&. This function can be called repeatedly\&. General information that never changes during execution can be omitted by specifying "g" as a character within the -\fIopts\fR -string\&. Note that -\fBmalloc_message\fR\fB\fR -uses the -\fBmallctl*\fR\fB\fR -functions internally, so inconsistent statistics can be reported if multiple threads use these functions simultaneously\&. If -\fB\-\-enable\-stats\fR -is specified during configuration, \(lqm\(rq and \(lqa\(rq can be specified to omit merged arena and per arena statistics, respectively; \(lqb\(rq and \(lql\(rq can be specified to omit per size class statistics for bins and large objects, respectively\&. Unrecognized characters are silently ignored\&. Note that thread caching may prevent some statistics from being completely up to date, since extra locking would be required to merge counters that track thread cache operations\&. -.PP -The -\fBmallctl\fR\fB\fR -function provides a general interface for introspecting the memory allocator, as well as setting modifiable parameters and triggering actions\&. The period\-separated -\fIname\fR -argument specifies a location in a tree\-structured namespace; see the -MALLCTL NAMESPACE -section for documentation on the tree contents\&. To read a value, pass a pointer via -\fIoldp\fR -to adequate space to contain the value, and a pointer to its length via -\fIoldlenp\fR; otherwise pass -\fBNULL\fR -and -\fBNULL\fR\&. Similarly, to write a value, pass a pointer to the value via -\fInewp\fR, and its length via -\fInewlen\fR; otherwise pass -\fBNULL\fR -and -\fB0\fR\&. -.PP -The -\fBmallctlnametomib\fR\fB\fR -function provides a way to avoid repeated name lookups for applications that repeatedly query the same portion of the namespace, by translating a name to a \(lqManagement Information Base\(rq (MIB) that can be passed repeatedly to -\fBmallctlbymib\fR\fB\fR\&. Upon successful return from -\fBmallctlnametomib\fR\fB\fR, -\fImibp\fR -contains an array of -\fI*miblenp\fR -integers, where -\fI*miblenp\fR -is the lesser of the number of components in -\fIname\fR -and the input value of -\fI*miblenp\fR\&. Thus it is possible to pass a -\fI*miblenp\fR -that is smaller than the number of period\-separated name components, which results in a partial MIB that can be used as the basis for constructing a complete MIB\&. For name components that are integers (e\&.g\&. the 2 in -"arenas\&.bin\&.2\&.size"), the corresponding MIB component will always be that integer\&. Therefore, it is legitimate to construct code like the following: -.sp -.if n \{\ -.RS 4 -.\} -.nf -unsigned nbins, i; - -int mib[4]; -size_t len, miblen; - -len = sizeof(nbins); -mallctl("arenas\&.nbins", &nbins, &len, NULL, 0); - -miblen = 4; -mallnametomib("arenas\&.bin\&.0\&.size", mib, &miblen); -for (i = 0; i < nbins; i++) { - size_t bin_size; - - mib[2] = i; - len = sizeof(bin_size); - mallctlbymib(mib, miblen, &bin_size, &len, NULL, 0); - /* Do something with bin_size\&.\&.\&. */ -} -.fi -.if n \{\ -.RE -.\} -.SS "Experimental API" -.PP -The experimental API is subject to change or removal without regard for backward compatibility\&. If -\fB\-\-disable\-experimental\fR -is specified during configuration, the experimental API is omitted\&. -.PP -The -\fBallocm\fR\fB\fR, -\fBrallocm\fR\fB\fR, -\fBsallocm\fR\fB\fR, -\fBdallocm\fR\fB\fR, and -\fBnallocm\fR\fB\fR -functions all have a -\fIflags\fR -argument that can be used to specify options\&. The functions only check the options that are contextually relevant\&. Use bitwise or (|) operations to specify one or more of the following: -.PP -\fBALLOCM_LG_ALIGN(\fR\fB\fIla\fR\fR\fB) \fR -.RS 4 -Align the memory allocation to start at an address that is a multiple of -(1 << \fIla\fR)\&. This macro does not validate that -\fIla\fR -is within the valid range\&. -.RE -.PP -\fBALLOCM_ALIGN(\fR\fB\fIa\fR\fR\fB) \fR -.RS 4 -Align the memory allocation to start at an address that is a multiple of -\fIa\fR, where -\fIa\fR -is a power of two\&. This macro does not validate that -\fIa\fR -is a power of 2\&. -.RE -.PP -\fBALLOCM_ZERO\fR -.RS 4 -Initialize newly allocated memory to contain zero bytes\&. In the growing reallocation case, the real size prior to reallocation defines the boundary between untouched bytes and those that are initialized to contain zero bytes\&. If this option is absent, newly allocated memory is uninitialized\&. -.RE -.PP -\fBALLOCM_NO_MOVE\fR -.RS 4 -For reallocation, fail rather than moving the object\&. This constraint can apply to both growth and shrinkage\&. -.RE -.PP -\fBALLOCM_ARENA(\fR\fB\fIa\fR\fR\fB) \fR -.RS 4 -Use the arena specified by the index -\fIa\fR\&. This macro does not validate that -\fIa\fR -specifies an arena in the valid range\&. -.RE -.PP -The -\fBallocm\fR\fB\fR -function allocates at least -\fIsize\fR -bytes of memory, sets -\fI*ptr\fR -to the base address of the allocation, and sets -\fI*rsize\fR -to the real size of the allocation if -\fIrsize\fR -is not -\fBNULL\fR\&. Behavior is undefined if -\fIsize\fR -is -\fB0\fR\&. -.PP -The -\fBrallocm\fR\fB\fR -function resizes the allocation at -\fI*ptr\fR -to be at least -\fIsize\fR -bytes, sets -\fI*ptr\fR -to the base address of the allocation if it moved, and sets -\fI*rsize\fR -to the real size of the allocation if -\fIrsize\fR -is not -\fBNULL\fR\&. If -\fIextra\fR -is non\-zero, an attempt is made to resize the allocation to be at least -\fIsize\fR + \fIextra\fR) -bytes, though inability to allocate the extra byte(s) will not by itself result in failure\&. Behavior is undefined if -\fIsize\fR -is -\fB0\fR, or if -(\fIsize\fR + \fIextra\fR > \fBSIZE_T_MAX\fR)\&. -.PP -The -\fBsallocm\fR\fB\fR -function sets -\fI*rsize\fR -to the real size of the allocation\&. -.PP -The -\fBdallocm\fR\fB\fR -function causes the memory referenced by -\fIptr\fR -to be made available for future allocations\&. -.PP -The -\fBnallocm\fR\fB\fR -function allocates no memory, but it performs the same size computation as the -\fBallocm\fR\fB\fR -function, and if -\fIrsize\fR -is not -\fBNULL\fR -it sets -\fI*rsize\fR -to the real size of the allocation that would result from the equivalent -\fBallocm\fR\fB\fR -function call\&. Behavior is undefined if -\fIsize\fR -is -\fB0\fR\&. -.SH "TUNING" -.PP -Once, when the first call is made to one of the memory allocation routines, the allocator initializes its internals based in part on various options that can be specified at compile\- or run\-time\&. -.PP -The string pointed to by the global variable -\fImalloc_conf\fR, the \(lqname\(rq of the file referenced by the symbolic link named -/etc/malloc\&.conf, and the value of the environment variable -\fBMALLOC_CONF\fR, will be interpreted, in that order, from left to right as options\&. -.PP -An options string is a comma\-separated list of option:value pairs\&. There is one key corresponding to each -"opt\&.*" -mallctl (see the -MALLCTL NAMESPACE -section for options documentation)\&. For example, -abort:true,narenas:1 -sets the -"opt\&.abort" -and -"opt\&.narenas" -options\&. Some options have boolean values (true/false), others have integer values (base 8, 10, or 16, depending on prefix), and yet others have raw string values\&. -.SH "IMPLEMENTATION NOTES" -.PP -Traditionally, allocators have used -\fBsbrk\fR(2) -to obtain memory, which is suboptimal for several reasons, including race conditions, increased fragmentation, and artificial limitations on maximum usable memory\&. If -\fB\-\-enable\-dss\fR -is specified during configuration, this allocator uses both -\fBmmap\fR(2) -and -\fBsbrk\fR(2), in that order of preference; otherwise only -\fBmmap\fR(2) -is used\&. -.PP -This allocator uses multiple arenas in order to reduce lock contention for threaded programs on multi\-processor systems\&. This works well with regard to threading scalability, but incurs some costs\&. There is a small fixed per\-arena overhead, and additionally, arenas manage memory completely independently of each other, which means a small fixed increase in overall memory fragmentation\&. These overheads are not generally an issue, given the number of arenas normally used\&. Note that using substantially more arenas than the default is not likely to improve performance, mainly due to reduced cache performance\&. However, it may make sense to reduce the number of arenas if an application does not make much use of the allocation functions\&. -.PP -In addition to multiple arenas, unless -\fB\-\-disable\-tcache\fR -is specified during configuration, this allocator supports thread\-specific caching for small and large objects, in order to make it possible to completely avoid synchronization for most allocation requests\&. Such caching allows very fast allocation in the common case, but it increases memory usage and fragmentation, since a bounded number of objects can remain allocated in each thread cache\&. -.PP -Memory is conceptually broken into equal\-sized chunks, where the chunk size is a power of two that is greater than the page size\&. Chunks are always aligned to multiples of the chunk size\&. This alignment makes it possible to find metadata for user objects very quickly\&. -.PP -User objects are broken into three categories according to size: small, large, and huge\&. Small objects are smaller than one page\&. Large objects are smaller than the chunk size\&. Huge objects are a multiple of the chunk size\&. Small and large objects are managed by arenas; huge objects are managed separately in a single data structure that is shared by all threads\&. Huge objects are used by applications infrequently enough that this single data structure is not a scalability issue\&. -.PP -Each chunk that is managed by an arena tracks its contents as runs of contiguous pages (unused, backing a set of small objects, or backing one large object)\&. The combination of chunk alignment and chunk page maps makes it possible to determine all metadata regarding small and large allocations in constant time\&. -.PP -Small objects are managed in groups by page runs\&. Each run maintains a frontier and free list to track which regions are in use\&. Allocation requests that are no more than half the quantum (8 or 16, depending on architecture) are rounded up to the nearest power of two that is at least -sizeof(\fBdouble\fR)\&. All other small object size classes are multiples of the quantum, spaced such that internal fragmentation is limited to approximately 25% for all but the smallest size classes\&. Allocation requests that are larger than the maximum small size class, but small enough to fit in an arena\-managed chunk (see the -"opt\&.lg_chunk" -option), are rounded up to the nearest run size\&. Allocation requests that are too large to fit in an arena\-managed chunk are rounded up to the nearest multiple of the chunk size\&. -.PP -Allocations are packed tightly together, which can be an issue for multi\-threaded applications\&. If you need to assure that allocations do not suffer from cacheline sharing, round your allocation requests up to the nearest multiple of the cacheline size, or specify cacheline alignment when allocating\&. -.PP -Assuming 4 MiB chunks, 4 KiB pages, and a 16\-byte quantum on a 64\-bit system, the size classes in each category are as shown in -Table 1\&. -.sp -.it 1 an-trap -.nr an-no-space-flag 1 -.nr an-break-flag 1 -.br -.B Table\ \&1.\ \&Size classes -.TS -allbox tab(:); -lB rB lB. -T{ -Category -T}:T{ -Spacing -T}:T{ -Size -T} -.T& -l r l -^ r l -^ r l -^ r l -^ r l -^ r l -^ r l -l r l -l r l. -T{ -Small -T}:T{ -lg -T}:T{ -[8] -T} -:T{ -16 -T}:T{ -[16, 32, 48, \&.\&.\&., 128] -T} -:T{ -32 -T}:T{ -[160, 192, 224, 256] -T} -:T{ -64 -T}:T{ -[320, 384, 448, 512] -T} -:T{ -128 -T}:T{ -[640, 768, 896, 1024] -T} -:T{ -256 -T}:T{ -[1280, 1536, 1792, 2048] -T} -:T{ -512 -T}:T{ -[2560, 3072, 3584] -T} -T{ -Large -T}:T{ -4 KiB -T}:T{ -[4 KiB, 8 KiB, 12 KiB, \&.\&.\&., 4072 KiB] -T} -T{ -Huge -T}:T{ -4 MiB -T}:T{ -[4 MiB, 8 MiB, 12 MiB, \&.\&.\&.] -T} -.TE -.sp 1 -.SH "MALLCTL NAMESPACE" -.PP -The following names are defined in the namespace accessible via the -\fBmallctl*\fR\fB\fR -functions\&. Value types are specified in parentheses, their readable/writable statuses are encoded as -rw, -r\-, -\-w, or -\-\-, and required build configuration flags follow, if any\&. A name element encoded as -<i> -or -<j> -indicates an integer component, where the integer varies from 0 to some upper value that must be determined via introspection\&. In the case of -"stats\&.arenas\&.<i>\&.*", -<i> -equal to -"arenas\&.narenas" -can be used to access the summation of statistics from all arenas\&. Take special note of the -"epoch" -mallctl, which controls refreshing of cached dynamic statistics\&. -.PP -"version" (\fBconst char *\fR) r\- -.RS 4 -Return the jemalloc version string\&. -.RE -.PP -"epoch" (\fBuint64_t\fR) rw -.RS 4 -If a value is passed in, refresh the data from which the -\fBmallctl*\fR\fB\fR -functions report values, and increment the epoch\&. Return the current epoch\&. This is useful for detecting whether another thread caused a refresh\&. -.RE -.PP -"config\&.debug" (\fBbool\fR) r\- -.RS 4 -\fB\-\-enable\-debug\fR -was specified during build configuration\&. -.RE -.PP -"config\&.dss" (\fBbool\fR) r\- -.RS 4 -\fB\-\-enable\-dss\fR -was specified during build configuration\&. -.RE -.PP -"config\&.fill" (\fBbool\fR) r\- -.RS 4 -\fB\-\-enable\-fill\fR -was specified during build configuration\&. -.RE -.PP -"config\&.lazy_lock" (\fBbool\fR) r\- -.RS 4 -\fB\-\-enable\-lazy\-lock\fR -was specified during build configuration\&. -.RE -.PP -"config\&.mremap" (\fBbool\fR) r\- -.RS 4 -\fB\-\-enable\-mremap\fR -was specified during build configuration\&. -.RE -.PP -"config\&.munmap" (\fBbool\fR) r\- -.RS 4 -\fB\-\-enable\-munmap\fR -was specified during build configuration\&. -.RE -.PP -"config\&.prof" (\fBbool\fR) r\- -.RS 4 -\fB\-\-enable\-prof\fR -was specified during build configuration\&. -.RE -.PP -"config\&.prof_libgcc" (\fBbool\fR) r\- -.RS 4 -\fB\-\-disable\-prof\-libgcc\fR -was not specified during build configuration\&. -.RE -.PP -"config\&.prof_libunwind" (\fBbool\fR) r\- -.RS 4 -\fB\-\-enable\-prof\-libunwind\fR -was specified during build configuration\&. -.RE -.PP -"config\&.stats" (\fBbool\fR) r\- -.RS 4 -\fB\-\-enable\-stats\fR -was specified during build configuration\&. -.RE -.PP -"config\&.tcache" (\fBbool\fR) r\- -.RS 4 -\fB\-\-disable\-tcache\fR -was not specified during build configuration\&. -.RE -.PP -"config\&.tls" (\fBbool\fR) r\- -.RS 4 -\fB\-\-disable\-tls\fR -was not specified during build configuration\&. -.RE -.PP -"config\&.utrace" (\fBbool\fR) r\- -.RS 4 -\fB\-\-enable\-utrace\fR -was specified during build configuration\&. -.RE -.PP -"config\&.valgrind" (\fBbool\fR) r\- -.RS 4 -\fB\-\-enable\-valgrind\fR -was specified during build configuration\&. -.RE -.PP -"config\&.xmalloc" (\fBbool\fR) r\- -.RS 4 -\fB\-\-enable\-xmalloc\fR -was specified during build configuration\&. -.RE -.PP -"opt\&.abort" (\fBbool\fR) r\- -.RS 4 -Abort\-on\-warning enabled/disabled\&. If true, most warnings are fatal\&. The process will call -\fBabort\fR(3) -in these cases\&. This option is disabled by default unless -\fB\-\-enable\-debug\fR -is specified during configuration, in which case it is enabled by default\&. -.RE -.PP -"opt\&.lg_chunk" (\fBsize_t\fR) r\- -.RS 4 -Virtual memory chunk size (log base 2)\&. If a chunk size outside the supported size range is specified, the size is silently clipped to the minimum/maximum supported size\&. The default chunk size is 4 MiB (2^22)\&. -.RE -.PP -"opt\&.dss" (\fBconst char *\fR) r\- -.RS 4 -dss (\fBsbrk\fR(2)) allocation precedence as related to -\fBmmap\fR(2) -allocation\&. The following settings are supported: \(lqdisabled\(rq, \(lqprimary\(rq, and \(lqsecondary\(rq (default)\&. -.RE -.PP -"opt\&.narenas" (\fBsize_t\fR) r\- -.RS 4 -Maximum number of arenas to use for automatic multiplexing of threads and arenas\&. The default is four times the number of CPUs, or one if there is a single CPU\&. -.RE -.PP -"opt\&.lg_dirty_mult" (\fBssize_t\fR) r\- -.RS 4 -Per\-arena minimum ratio (log base 2) of active to dirty pages\&. Some dirty unused pages may be allowed to accumulate, within the limit set by the ratio (or one chunk worth of dirty pages, whichever is greater), before informing the kernel about some of those pages via -\fBmadvise\fR(2) -or a similar system call\&. This provides the kernel with sufficient information to recycle dirty pages if physical memory becomes scarce and the pages remain unused\&. The default minimum ratio is 8:1 (2^3:1); an option value of \-1 will disable dirty page purging\&. -.RE -.PP -"opt\&.stats_print" (\fBbool\fR) r\- -.RS 4 -Enable/disable statistics printing at exit\&. If enabled, the -\fBmalloc_stats_print\fR\fB\fR -function is called at program exit via an -\fBatexit\fR(3) -function\&. If -\fB\-\-enable\-stats\fR -is specified during configuration, this has the potential to cause deadlock for a multi\-threaded process that exits while one or more threads are executing in the memory allocation functions\&. Therefore, this option should only be used with care; it is primarily intended as a performance tuning aid during application development\&. This option is disabled by default\&. -.RE -.PP -"opt\&.junk" (\fBbool\fR) r\- [\fB\-\-enable\-fill\fR] -.RS 4 -Junk filling enabled/disabled\&. If enabled, each byte of uninitialized allocated memory will be initialized to -0xa5\&. All deallocated memory will be initialized to -0x5a\&. This is intended for debugging and will impact performance negatively\&. This option is disabled by default unless -\fB\-\-enable\-debug\fR -is specified during configuration, in which case it is enabled by default unless running inside -\m[blue]\fBValgrind\fR\m[]\&\s-2\u[2]\d\s+2\&. -.RE -.PP -"opt\&.quarantine" (\fBsize_t\fR) r\- [\fB\-\-enable\-fill\fR] -.RS 4 -Per thread quarantine size in bytes\&. If non\-zero, each thread maintains a FIFO object quarantine that stores up to the specified number of bytes of memory\&. The quarantined memory is not freed until it is released from quarantine, though it is immediately junk\-filled if the -"opt\&.junk" -option is enabled\&. This feature is of particular use in combination with -\m[blue]\fBValgrind\fR\m[]\&\s-2\u[2]\d\s+2, which can detect attempts to access quarantined objects\&. This is intended for debugging and will impact performance negatively\&. The default quarantine size is 0 unless running inside Valgrind, in which case the default is 16 MiB\&. -.RE -.PP -"opt\&.redzone" (\fBbool\fR) r\- [\fB\-\-enable\-fill\fR] -.RS 4 -Redzones enabled/disabled\&. If enabled, small allocations have redzones before and after them\&. Furthermore, if the -"opt\&.junk" -option is enabled, the redzones are checked for corruption during deallocation\&. However, the primary intended purpose of this feature is to be used in combination with -\m[blue]\fBValgrind\fR\m[]\&\s-2\u[2]\d\s+2, which needs redzones in order to do effective buffer overflow/underflow detection\&. This option is intended for debugging and will impact performance negatively\&. This option is disabled by default unless running inside Valgrind\&. -.RE -.PP -"opt\&.zero" (\fBbool\fR) r\- [\fB\-\-enable\-fill\fR] -.RS 4 -Zero filling enabled/disabled\&. If enabled, each byte of uninitialized allocated memory will be initialized to 0\&. Note that this initialization only happens once for each byte, so -\fBrealloc\fR\fB\fR -and -\fBrallocm\fR\fB\fR -calls do not zero memory that was previously allocated\&. This is intended for debugging and will impact performance negatively\&. This option is disabled by default\&. -.RE -.PP -"opt\&.utrace" (\fBbool\fR) r\- [\fB\-\-enable\-utrace\fR] -.RS 4 -Allocation tracing based on -\fButrace\fR(2) -enabled/disabled\&. This option is disabled by default\&. -.RE -.PP -"opt\&.valgrind" (\fBbool\fR) r\- [\fB\-\-enable\-valgrind\fR] -.RS 4 -\m[blue]\fBValgrind\fR\m[]\&\s-2\u[2]\d\s+2 -support enabled/disabled\&. This option is vestigal because jemalloc auto\-detects whether it is running inside Valgrind\&. This option is disabled by default, unless running inside Valgrind\&. -.RE -.PP -"opt\&.xmalloc" (\fBbool\fR) r\- [\fB\-\-enable\-xmalloc\fR] -.RS 4 -Abort\-on\-out\-of\-memory enabled/disabled\&. If enabled, rather than returning failure for any allocation function, display a diagnostic message on -\fBSTDERR_FILENO\fR -and cause the program to drop core (using -\fBabort\fR(3))\&. If an application is designed to depend on this behavior, set the option at compile time by including the following in the source code: -.sp -.if n \{\ -.RS 4 -.\} -.nf -malloc_conf = "xmalloc:true"; -.fi -.if n \{\ -.RE -.\} -.sp -This option is disabled by default\&. -.RE -.PP -"opt\&.tcache" (\fBbool\fR) r\- [\fB\-\-enable\-tcache\fR] -.RS 4 -Thread\-specific caching enabled/disabled\&. When there are multiple threads, each thread uses a thread\-specific cache for objects up to a certain size\&. Thread\-specific caching allows many allocations to be satisfied without performing any thread synchronization, at the cost of increased memory use\&. See the -"opt\&.lg_tcache_max" -option for related tuning information\&. This option is enabled by default unless running inside -\m[blue]\fBValgrind\fR\m[]\&\s-2\u[2]\d\s+2\&. -.RE -.PP -"opt\&.lg_tcache_max" (\fBsize_t\fR) r\- [\fB\-\-enable\-tcache\fR] -.RS 4 -Maximum size class (log base 2) to cache in the thread\-specific cache\&. At a minimum, all small size classes are cached, and at a maximum all large size classes are cached\&. The default maximum is 32 KiB (2^15)\&. -.RE -.PP -"opt\&.prof" (\fBbool\fR) r\- [\fB\-\-enable\-prof\fR] -.RS 4 -Memory profiling enabled/disabled\&. If enabled, profile memory allocation activity\&. See the -"opt\&.prof_active" -option for on\-the\-fly activation/deactivation\&. See the -"opt\&.lg_prof_sample" -option for probabilistic sampling control\&. See the -"opt\&.prof_accum" -option for control of cumulative sample reporting\&. See the -"opt\&.lg_prof_interval" -option for information on interval\-triggered profile dumping, the -"opt\&.prof_gdump" -option for information on high\-water\-triggered profile dumping, and the -"opt\&.prof_final" -option for final profile dumping\&. Profile output is compatible with the included -\fBpprof\fR -Perl script, which originates from the -\m[blue]\fBgperftools package\fR\m[]\&\s-2\u[3]\d\s+2\&. -.RE -.PP -"opt\&.prof_prefix" (\fBconst char *\fR) r\- [\fB\-\-enable\-prof\fR] -.RS 4 -Filename prefix for profile dumps\&. If the prefix is set to the empty string, no automatic dumps will occur; this is primarily useful for disabling the automatic final heap dump (which also disables leak reporting, if enabled)\&. The default prefix is -jeprof\&. -.RE -.PP -"opt\&.prof_active" (\fBbool\fR) r\- [\fB\-\-enable\-prof\fR] -.RS 4 -Profiling activated/deactivated\&. This is a secondary control mechanism that makes it possible to start the application with profiling enabled (see the -"opt\&.prof" -option) but inactive, then toggle profiling at any time during program execution with the -"prof\&.active" -mallctl\&. This option is enabled by default\&. -.RE -.PP -"opt\&.lg_prof_sample" (\fBssize_t\fR) r\- [\fB\-\-enable\-prof\fR] -.RS 4 -Average interval (log base 2) between allocation samples, as measured in bytes of allocation activity\&. Increasing the sampling interval decreases profile fidelity, but also decreases the computational overhead\&. The default sample interval is 512 KiB (2^19 B)\&. -.RE -.PP -"opt\&.prof_accum" (\fBbool\fR) r\- [\fB\-\-enable\-prof\fR] -.RS 4 -Reporting of cumulative object/byte counts in profile dumps enabled/disabled\&. If this option is enabled, every unique backtrace must be stored for the duration of execution\&. Depending on the application, this can impose a large memory overhead, and the cumulative counts are not always of interest\&. This option is disabled by default\&. -.RE -.PP -"opt\&.lg_prof_interval" (\fBssize_t\fR) r\- [\fB\-\-enable\-prof\fR] -.RS 4 -Average interval (log base 2) between memory profile dumps, as measured in bytes of allocation activity\&. The actual interval between dumps may be sporadic because decentralized allocation counters are used to avoid synchronization bottlenecks\&. Profiles are dumped to files named according to the pattern -<prefix>\&.<pid>\&.<seq>\&.i<iseq>\&.heap, where -<prefix> -is controlled by the -"opt\&.prof_prefix" -option\&. By default, interval\-triggered profile dumping is disabled (encoded as \-1)\&. -.RE -.PP -"opt\&.prof_gdump" (\fBbool\fR) r\- [\fB\-\-enable\-prof\fR] -.RS 4 -Trigger a memory profile dump every time the total virtual memory exceeds the previous maximum\&. Profiles are dumped to files named according to the pattern -<prefix>\&.<pid>\&.<seq>\&.u<useq>\&.heap, where -<prefix> -is controlled by the -"opt\&.prof_prefix" -option\&. This option is disabled by default\&. -.RE -.PP -"opt\&.prof_final" (\fBbool\fR) r\- [\fB\-\-enable\-prof\fR] -.RS 4 -Use an -\fBatexit\fR(3) -function to dump final memory usage to a file named according to the pattern -<prefix>\&.<pid>\&.<seq>\&.f\&.heap, where -<prefix> -is controlled by the -"opt\&.prof_prefix" -option\&. This option is enabled by default\&. -.RE -.PP -"opt\&.prof_leak" (\fBbool\fR) r\- [\fB\-\-enable\-prof\fR] -.RS 4 -Leak reporting enabled/disabled\&. If enabled, use an -\fBatexit\fR(3) -function to report memory leaks detected by allocation sampling\&. See the -"opt\&.prof" -option for information on analyzing heap profile output\&. This option is disabled by default\&. -.RE -.PP -"thread\&.arena" (\fBunsigned\fR) rw -.RS 4 -Get or set the arena associated with the calling thread\&. If the specified arena was not initialized beforehand (see the -"arenas\&.initialized" -mallctl), it will be automatically initialized as a side effect of calling this interface\&. -.RE -.PP -"thread\&.allocated" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR] -.RS 4 -Get the total number of bytes ever allocated by the calling thread\&. This counter has the potential to wrap around; it is up to the application to appropriately interpret the counter in such cases\&. -.RE -.PP -"thread\&.allocatedp" (\fBuint64_t *\fR) r\- [\fB\-\-enable\-stats\fR] -.RS 4 -Get a pointer to the the value that is returned by the -"thread\&.allocated" -mallctl\&. This is useful for avoiding the overhead of repeated -\fBmallctl*\fR\fB\fR -calls\&. -.RE -.PP -"thread\&.deallocated" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR] -.RS 4 -Get the total number of bytes ever deallocated by the calling thread\&. This counter has the potential to wrap around; it is up to the application to appropriately interpret the counter in such cases\&. -.RE -.PP -"thread\&.deallocatedp" (\fBuint64_t *\fR) r\- [\fB\-\-enable\-stats\fR] -.RS 4 -Get a pointer to the the value that is returned by the -"thread\&.deallocated" -mallctl\&. This is useful for avoiding the overhead of repeated -\fBmallctl*\fR\fB\fR -calls\&. -.RE -.PP -"thread\&.tcache\&.enabled" (\fBbool\fR) rw [\fB\-\-enable\-tcache\fR] -.RS 4 -Enable/disable calling thread\*(Aqs tcache\&. The tcache is implicitly flushed as a side effect of becoming disabled (see -"thread\&.tcache\&.flush")\&. -.RE -.PP -"thread\&.tcache\&.flush" (\fBvoid\fR) \-\- [\fB\-\-enable\-tcache\fR] -.RS 4 -Flush calling thread\*(Aqs tcache\&. This interface releases all cached objects and internal data structures associated with the calling thread\*(Aqs thread\-specific cache\&. Ordinarily, this interface need not be called, since automatic periodic incremental garbage collection occurs, and the thread cache is automatically discarded when a thread exits\&. However, garbage collection is triggered by allocation activity, so it is possible for a thread that stops allocating/deallocating to retain its cache indefinitely, in which case the developer may find manual flushing useful\&. -.RE -.PP -"arena\&.<i>\&.purge" (\fBunsigned\fR) \-\- -.RS 4 -Purge unused dirty pages for arena <i>, or for all arenas if <i> equals -"arenas\&.narenas"\&. -.RE -.PP -"arena\&.<i>\&.dss" (\fBconst char *\fR) rw -.RS 4 -Set the precedence of dss allocation as related to mmap allocation for arena <i>, or for all arenas if <i> equals -"arenas\&.narenas"\&. See -"opt\&.dss" -for supported settings\&. -.RE -.PP -"arenas\&.narenas" (\fBunsigned\fR) r\- -.RS 4 -Current limit on number of arenas\&. -.RE -.PP -"arenas\&.initialized" (\fBbool *\fR) r\- -.RS 4 -An array of -"arenas\&.narenas" -booleans\&. Each boolean indicates whether the corresponding arena is initialized\&. -.RE -.PP -"arenas\&.quantum" (\fBsize_t\fR) r\- -.RS 4 -Quantum size\&. -.RE -.PP -"arenas\&.page" (\fBsize_t\fR) r\- -.RS 4 -Page size\&. -.RE -.PP -"arenas\&.tcache_max" (\fBsize_t\fR) r\- [\fB\-\-enable\-tcache\fR] -.RS 4 -Maximum thread\-cached size class\&. -.RE -.PP -"arenas\&.nbins" (\fBunsigned\fR) r\- -.RS 4 -Number of bin size classes\&. -.RE -.PP -"arenas\&.nhbins" (\fBunsigned\fR) r\- [\fB\-\-enable\-tcache\fR] -.RS 4 -Total number of thread cache bin size classes\&. -.RE -.PP -"arenas\&.bin\&.<i>\&.size" (\fBsize_t\fR) r\- -.RS 4 -Maximum size supported by size class\&. -.RE -.PP -"arenas\&.bin\&.<i>\&.nregs" (\fBuint32_t\fR) r\- -.RS 4 -Number of regions per page run\&. -.RE -.PP -"arenas\&.bin\&.<i>\&.run_size" (\fBsize_t\fR) r\- -.RS 4 -Number of bytes per page run\&. -.RE -.PP -"arenas\&.nlruns" (\fBsize_t\fR) r\- -.RS 4 -Total number of large size classes\&. -.RE -.PP -"arenas\&.lrun\&.<i>\&.size" (\fBsize_t\fR) r\- -.RS 4 -Maximum size supported by this large size class\&. -.RE -.PP -"arenas\&.purge" (\fBunsigned\fR) \-w -.RS 4 -Purge unused dirty pages for the specified arena, or for all arenas if none is specified\&. -.RE -.PP -"arenas\&.extend" (\fBunsigned\fR) r\- -.RS 4 -Extend the array of arenas by appending a new arena, and returning the new arena index\&. -.RE -.PP -"prof\&.active" (\fBbool\fR) rw [\fB\-\-enable\-prof\fR] -.RS 4 -Control whether sampling is currently active\&. See the -"opt\&.prof_active" -option for additional information\&. -.RE -.PP -"prof\&.dump" (\fBconst char *\fR) \-w [\fB\-\-enable\-prof\fR] -.RS 4 -Dump a memory profile to the specified file, or if NULL is specified, to a file according to the pattern -<prefix>\&.<pid>\&.<seq>\&.m<mseq>\&.heap, where -<prefix> -is controlled by the -"opt\&.prof_prefix" -option\&. -.RE -.PP -"prof\&.interval" (\fBuint64_t\fR) r\- [\fB\-\-enable\-prof\fR] -.RS 4 -Average number of bytes allocated between inverval\-based profile dumps\&. See the -"opt\&.lg_prof_interval" -option for additional information\&. -.RE -.PP -"stats\&.cactive" (\fBsize_t *\fR) r\- [\fB\-\-enable\-stats\fR] -.RS 4 -Pointer to a counter that contains an approximate count of the current number of bytes in active pages\&. The estimate may be high, but never low, because each arena rounds up to the nearest multiple of the chunk size when computing its contribution to the counter\&. Note that the -"epoch" -mallctl has no bearing on this counter\&. Furthermore, counter consistency is maintained via atomic operations, so it is necessary to use an atomic operation in order to guarantee a consistent read when dereferencing the pointer\&. -.RE -.PP -"stats\&.allocated" (\fBsize_t\fR) r\- [\fB\-\-enable\-stats\fR] -.RS 4 -Total number of bytes allocated by the application\&. -.RE -.PP -"stats\&.active" (\fBsize_t\fR) r\- [\fB\-\-enable\-stats\fR] -.RS 4 -Total number of bytes in active pages allocated by the application\&. This is a multiple of the page size, and greater than or equal to -"stats\&.allocated"\&. This does not include -"stats\&.arenas\&.<i>\&.pdirty" -and pages entirely devoted to allocator metadata\&. -.RE -.PP -"stats\&.mapped" (\fBsize_t\fR) r\- [\fB\-\-enable\-stats\fR] -.RS 4 -Total number of bytes in chunks mapped on behalf of the application\&. This is a multiple of the chunk size, and is at least as large as -"stats\&.active"\&. This does not include inactive chunks\&. -.RE -.PP -"stats\&.chunks\&.current" (\fBsize_t\fR) r\- [\fB\-\-enable\-stats\fR] -.RS 4 -Total number of chunks actively mapped on behalf of the application\&. This does not include inactive chunks\&. -.RE -.PP -"stats\&.chunks\&.total" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR] -.RS 4 -Cumulative number of chunks allocated\&. -.RE -.PP -"stats\&.chunks\&.high" (\fBsize_t\fR) r\- [\fB\-\-enable\-stats\fR] -.RS 4 -Maximum number of active chunks at any time thus far\&. -.RE -.PP -"stats\&.huge\&.allocated" (\fBsize_t\fR) r\- [\fB\-\-enable\-stats\fR] -.RS 4 -Number of bytes currently allocated by huge objects\&. -.RE -.PP -"stats\&.huge\&.nmalloc" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR] -.RS 4 -Cumulative number of huge allocation requests\&. -.RE -.PP -"stats\&.huge\&.ndalloc" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR] -.RS 4 -Cumulative number of huge deallocation requests\&. -.RE -.PP -"stats\&.arenas\&.<i>\&.dss" (\fBconst char *\fR) r\- -.RS 4 -dss (\fBsbrk\fR(2)) allocation precedence as related to -\fBmmap\fR(2) -allocation\&. See -"opt\&.dss" -for details\&. -.RE -.PP -"stats\&.arenas\&.<i>\&.nthreads" (\fBunsigned\fR) r\- -.RS 4 -Number of threads currently assigned to arena\&. -.RE -.PP -"stats\&.arenas\&.<i>\&.pactive" (\fBsize_t\fR) r\- -.RS 4 -Number of pages in active runs\&. -.RE -.PP -"stats\&.arenas\&.<i>\&.pdirty" (\fBsize_t\fR) r\- -.RS 4 -Number of pages within unused runs that are potentially dirty, and for which -\fBmadvise\fR\fB\fI\&.\&.\&.\fR\fR\fB \fR\fB\fI\fBMADV_DONTNEED\fR\fR\fR -or similar has not been called\&. -.RE -.PP -"stats\&.arenas\&.<i>\&.mapped" (\fBsize_t\fR) r\- [\fB\-\-enable\-stats\fR] -.RS 4 -Number of mapped bytes\&. -.RE -.PP -"stats\&.arenas\&.<i>\&.npurge" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR] -.RS 4 -Number of dirty page purge sweeps performed\&. -.RE -.PP -"stats\&.arenas\&.<i>\&.nmadvise" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR] -.RS 4 -Number of -\fBmadvise\fR\fB\fI\&.\&.\&.\fR\fR\fB \fR\fB\fI\fBMADV_DONTNEED\fR\fR\fR -or similar calls made to purge dirty pages\&. -.RE -.PP -"stats\&.arenas\&.<i>\&.npurged" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR] -.RS 4 -Number of pages purged\&. -.RE -.PP -"stats\&.arenas\&.<i>\&.small\&.allocated" (\fBsize_t\fR) r\- [\fB\-\-enable\-stats\fR] -.RS 4 -Number of bytes currently allocated by small objects\&. -.RE -.PP -"stats\&.arenas\&.<i>\&.small\&.nmalloc" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR] -.RS 4 -Cumulative number of allocation requests served by small bins\&. -.RE -.PP -"stats\&.arenas\&.<i>\&.small\&.ndalloc" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR] -.RS 4 -Cumulative number of small objects returned to bins\&. -.RE -.PP -"stats\&.arenas\&.<i>\&.small\&.nrequests" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR] -.RS 4 -Cumulative number of small allocation requests\&. -.RE -.PP -"stats\&.arenas\&.<i>\&.large\&.allocated" (\fBsize_t\fR) r\- [\fB\-\-enable\-stats\fR] -.RS 4 -Number of bytes currently allocated by large objects\&. -.RE -.PP -"stats\&.arenas\&.<i>\&.large\&.nmalloc" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR] -.RS 4 -Cumulative number of large allocation requests served directly by the arena\&. -.RE -.PP -"stats\&.arenas\&.<i>\&.large\&.ndalloc" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR] -.RS 4 -Cumulative number of large deallocation requests served directly by the arena\&. -.RE -.PP -"stats\&.arenas\&.<i>\&.large\&.nrequests" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR] -.RS 4 -Cumulative number of large allocation requests\&. -.RE -.PP -"stats\&.arenas\&.<i>\&.bins\&.<j>\&.allocated" (\fBsize_t\fR) r\- [\fB\-\-enable\-stats\fR] -.RS 4 -Current number of bytes allocated by bin\&. -.RE -.PP -"stats\&.arenas\&.<i>\&.bins\&.<j>\&.nmalloc" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR] -.RS 4 -Cumulative number of allocations served by bin\&. -.RE -.PP -"stats\&.arenas\&.<i>\&.bins\&.<j>\&.ndalloc" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR] -.RS 4 -Cumulative number of allocations returned to bin\&. -.RE -.PP -"stats\&.arenas\&.<i>\&.bins\&.<j>\&.nrequests" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR] -.RS 4 -Cumulative number of allocation requests\&. -.RE -.PP -"stats\&.arenas\&.<i>\&.bins\&.<j>\&.nfills" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR \fB\-\-enable\-tcache\fR] -.RS 4 -Cumulative number of tcache fills\&. -.RE -.PP -"stats\&.arenas\&.<i>\&.bins\&.<j>\&.nflushes" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR \fB\-\-enable\-tcache\fR] -.RS 4 -Cumulative number of tcache flushes\&. -.RE -.PP -"stats\&.arenas\&.<i>\&.bins\&.<j>\&.nruns" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR] -.RS 4 -Cumulative number of runs created\&. -.RE -.PP -"stats\&.arenas\&.<i>\&.bins\&.<j>\&.nreruns" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR] -.RS 4 -Cumulative number of times the current run from which to allocate changed\&. -.RE -.PP -"stats\&.arenas\&.<i>\&.bins\&.<j>\&.curruns" (\fBsize_t\fR) r\- [\fB\-\-enable\-stats\fR] -.RS 4 -Current number of runs\&. -.RE -.PP -"stats\&.arenas\&.<i>\&.lruns\&.<j>\&.nmalloc" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR] -.RS 4 -Cumulative number of allocation requests for this size class served directly by the arena\&. -.RE -.PP -"stats\&.arenas\&.<i>\&.lruns\&.<j>\&.ndalloc" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR] -.RS 4 -Cumulative number of deallocation requests for this size class served directly by the arena\&. -.RE -.PP -"stats\&.arenas\&.<i>\&.lruns\&.<j>\&.nrequests" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR] -.RS 4 -Cumulative number of allocation requests for this size class\&. -.RE -.PP -"stats\&.arenas\&.<i>\&.lruns\&.<j>\&.curruns" (\fBsize_t\fR) r\- [\fB\-\-enable\-stats\fR] -.RS 4 -Current number of runs for this size class\&. -.RE -.SH "DEBUGGING MALLOC PROBLEMS" -.PP -When debugging, it is a good idea to configure/build jemalloc with the -\fB\-\-enable\-debug\fR -and -\fB\-\-enable\-fill\fR -options, and recompile the program with suitable options and symbols for debugger support\&. When so configured, jemalloc incorporates a wide variety of run\-time assertions that catch application errors such as double\-free, write\-after\-free, etc\&. -.PP -Programs often accidentally depend on \(lquninitialized\(rq memory actually being filled with zero bytes\&. Junk filling (see the -"opt\&.junk" -option) tends to expose such bugs in the form of obviously incorrect results and/or coredumps\&. Conversely, zero filling (see the -"opt\&.zero" -option) eliminates the symptoms of such bugs\&. Between these two options, it is usually possible to quickly detect, diagnose, and eliminate such bugs\&. -.PP -This implementation does not provide much detail about the problems it detects, because the performance impact for storing such information would be prohibitive\&. However, jemalloc does integrate with the most excellent -\m[blue]\fBValgrind\fR\m[]\&\s-2\u[2]\d\s+2 -tool if the -\fB\-\-enable\-valgrind\fR -configuration option is enabled\&. -.SH "DIAGNOSTIC MESSAGES" -.PP -If any of the memory allocation/deallocation functions detect an error or warning condition, a message will be printed to file descriptor -\fBSTDERR_FILENO\fR\&. Errors will result in the process dumping core\&. If the -"opt\&.abort" -option is set, most warnings are treated as errors\&. -.PP -The -\fImalloc_message\fR -variable allows the programmer to override the function which emits the text strings forming the errors and warnings if for some reason the -\fBSTDERR_FILENO\fR -file descriptor is not suitable for this\&. -\fBmalloc_message\fR\fB\fR -takes the -\fIcbopaque\fR -pointer argument that is -\fBNULL\fR -unless overridden by the arguments in a call to -\fBmalloc_stats_print\fR\fB\fR, followed by a string pointer\&. Please note that doing anything which tries to allocate memory in this function is likely to result in a crash or deadlock\&. -.PP -All messages are prefixed by \(lq<jemalloc>:\(rq\&. -.SH "RETURN VALUES" -.SS "Standard API" -.PP -The -\fBmalloc\fR\fB\fR -and -\fBcalloc\fR\fB\fR -functions return a pointer to the allocated memory if successful; otherwise a -\fBNULL\fR -pointer is returned and -\fIerrno\fR -is set to -ENOMEM\&. -.PP -The -\fBposix_memalign\fR\fB\fR -function returns the value 0 if successful; otherwise it returns an error value\&. The -\fBposix_memalign\fR\fB\fR -function will fail if: -.PP -EINVAL -.RS 4 -The -\fIalignment\fR -parameter is not a power of 2 at least as large as -sizeof(\fBvoid *\fR)\&. -.RE -.PP -ENOMEM -.RS 4 -Memory allocation error\&. -.RE -.PP -The -\fBaligned_alloc\fR\fB\fR -function returns a pointer to the allocated memory if successful; otherwise a -\fBNULL\fR -pointer is returned and -\fIerrno\fR -is set\&. The -\fBaligned_alloc\fR\fB\fR -function will fail if: -.PP -EINVAL -.RS 4 -The -\fIalignment\fR -parameter is not a power of 2\&. -.RE -.PP -ENOMEM -.RS 4 -Memory allocation error\&. -.RE -.PP -The -\fBrealloc\fR\fB\fR -function returns a pointer, possibly identical to -\fIptr\fR, to the allocated memory if successful; otherwise a -\fBNULL\fR -pointer is returned, and -\fIerrno\fR -is set to -ENOMEM -if the error was the result of an allocation failure\&. The -\fBrealloc\fR\fB\fR -function always leaves the original buffer intact when an error occurs\&. -.PP -The -\fBfree\fR\fB\fR -function returns no value\&. -.SS "Non\-standard API" -.PP -The -\fBmalloc_usable_size\fR\fB\fR -function returns the usable size of the allocation pointed to by -\fIptr\fR\&. -.PP -The -\fBmallctl\fR\fB\fR, -\fBmallctlnametomib\fR\fB\fR, and -\fBmallctlbymib\fR\fB\fR -functions return 0 on success; otherwise they return an error value\&. The functions will fail if: -.PP -EINVAL -.RS 4 -\fInewp\fR -is not -\fBNULL\fR, and -\fInewlen\fR -is too large or too small\&. Alternatively, -\fI*oldlenp\fR -is too large or too small; in this case as much data as possible are read despite the error\&. -.RE -.PP -ENOMEM -.RS 4 -\fI*oldlenp\fR -is too short to hold the requested value\&. -.RE -.PP -ENOENT -.RS 4 -\fIname\fR -or -\fImib\fR -specifies an unknown/invalid value\&. -.RE -.PP -EPERM -.RS 4 -Attempt to read or write void value, or attempt to write read\-only value\&. -.RE -.PP -EAGAIN -.RS 4 -A memory allocation failure occurred\&. -.RE -.PP -EFAULT -.RS 4 -An interface with side effects failed in some way not directly related to -\fBmallctl*\fR\fB\fR -read/write processing\&. -.RE -.SS "Experimental API" -.PP -The -\fBallocm\fR\fB\fR, -\fBrallocm\fR\fB\fR, -\fBsallocm\fR\fB\fR, -\fBdallocm\fR\fB\fR, and -\fBnallocm\fR\fB\fR -functions return -\fBALLOCM_SUCCESS\fR -on success; otherwise they return an error value\&. The -\fBallocm\fR\fB\fR, -\fBrallocm\fR\fB\fR, and -\fBnallocm\fR\fB\fR -functions will fail if: -.PP -ALLOCM_ERR_OOM -.RS 4 -Out of memory\&. Insufficient contiguous memory was available to service the allocation request\&. The -\fBallocm\fR\fB\fR -function additionally sets -\fI*ptr\fR -to -\fBNULL\fR, whereas the -\fBrallocm\fR\fB\fR -function leaves -\fB*ptr\fR -unmodified\&. -.RE -The -\fBrallocm\fR\fB\fR -function will also fail if: -.PP -ALLOCM_ERR_NOT_MOVED -.RS 4 -\fBALLOCM_NO_MOVE\fR -was specified, but the reallocation request could not be serviced without moving the object\&. -.RE -.SH "ENVIRONMENT" -.PP -The following environment variable affects the execution of the allocation functions: -.PP -\fBMALLOC_CONF\fR -.RS 4 -If the environment variable -\fBMALLOC_CONF\fR -is set, the characters it contains will be interpreted as options\&. -.RE -.SH "EXAMPLES" -.PP -To dump core whenever a problem occurs: -.sp -.if n \{\ -.RS 4 -.\} -.nf -ln \-s \*(Aqabort:true\*(Aq /etc/malloc\&.conf -.fi -.if n \{\ -.RE -.\} -.PP -To specify in the source a chunk size that is 16 MiB: -.sp -.if n \{\ -.RS 4 -.\} -.nf -malloc_conf = "lg_chunk:24"; -.fi -.if n \{\ -.RE -.\} -.SH "SEE ALSO" -.PP -\fBmadvise\fR(2), -\fBmmap\fR(2), -\fBsbrk\fR(2), -\fButrace\fR(2), -\fBalloca\fR(3), -\fBatexit\fR(3), -\fBgetpagesize\fR(3) -.SH "STANDARDS" -.PP -The -\fBmalloc\fR\fB\fR, -\fBcalloc\fR\fB\fR, -\fBrealloc\fR\fB\fR, and -\fBfree\fR\fB\fR -functions conform to ISO/IEC 9899:1990 (\(lqISO C90\(rq)\&. -.PP -The -\fBposix_memalign\fR\fB\fR -function conforms to IEEE Std 1003\&.1\-2001 (\(lqPOSIX\&.1\(rq)\&. -.SH "AUTHOR" -.PP -\fBJason Evans\fR -.RS 4 -.RE -.SH "NOTES" -.IP " 1." 4 -jemalloc website -.RS 4 -\%http://www.canonware.com/jemalloc/ -.RE -.IP " 2." 4 -Valgrind -.RS 4 -\%http://valgrind.org/ -.RE -.IP " 3." 4 -gperftools package -.RS 4 -\%http://code.google.com/p/gperftools/ -.RE diff --git a/extra/jemalloc/doc/jemalloc.html b/extra/jemalloc/doc/jemalloc.html deleted file mode 100644 index 3e0fe26d1bd..00000000000 --- a/extra/jemalloc/doc/jemalloc.html +++ /dev/null @@ -1,1417 +0,0 @@ -<html><head><meta http-equiv="Content-Type" content="text/html; charset=ISO-8859-1"><title>JEMALLOC</title><meta name="generator" content="DocBook XSL Stylesheets V1.76.1"></head><body bgcolor="white" text="black" link="#0000FF" vlink="#840084" alink="#0000FF"><div class="refentry" title="JEMALLOC"><a name="id286949159"></a><div class="titlepage"></div><div class="refnamediv"><h2>Name</h2><p>jemalloc — general purpose memory allocation functions</p></div><div class="refsect1" title="LIBRARY"><a name="library"></a><h2>LIBRARY</h2><p>This manual describes jemalloc 3.3.1-0-g9ef9d9e8c271cdf14f664b871a8f98c827714784. More information - can be found at the <a class="ulink" href="http://www.canonware.com/jemalloc/" target="_top">jemalloc website</a>.</p></div><div class="refsynopsisdiv" title="SYNOPSIS"><h2>SYNOPSIS</h2><div class="funcsynopsis"><pre class="funcsynopsisinfo">#include <<code class="filename">stdlib.h</code>> -#include <<code class="filename">jemalloc/jemalloc.h</code>></pre><div class="refsect2" title="Standard API"><a name="id286901505"></a><h3>Standard API</h3><table border="0" summary="Function synopsis" cellspacing="0" cellpadding="0" class="funcprototype-table"><tr><td><code class="funcdef">void *<b class="fsfunc">malloc</b>(</code></td><td>size_t <var class="pdparam">size</var><code>)</code>;</td></tr></table><div class="funcprototype-spacer"> </div><table border="0" summary="Function synopsis" cellspacing="0" cellpadding="0" class="funcprototype-table"><tr><td><code class="funcdef">void *<b class="fsfunc">calloc</b>(</code></td><td>size_t <var class="pdparam">number</var>, </td></tr><tr><td> </td><td>size_t <var class="pdparam">size</var><code>)</code>;</td></tr></table><div class="funcprototype-spacer"> </div><table border="0" summary="Function synopsis" cellspacing="0" cellpadding="0" class="funcprototype-table"><tr><td><code class="funcdef">int <b class="fsfunc">posix_memalign</b>(</code></td><td>void **<var class="pdparam">ptr</var>, </td></tr><tr><td> </td><td>size_t <var class="pdparam">alignment</var>, </td></tr><tr><td> </td><td>size_t <var class="pdparam">size</var><code>)</code>;</td></tr></table><div class="funcprototype-spacer"> </div><table border="0" summary="Function synopsis" cellspacing="0" cellpadding="0" class="funcprototype-table"><tr><td><code class="funcdef">void *<b class="fsfunc">aligned_alloc</b>(</code></td><td>size_t <var class="pdparam">alignment</var>, </td></tr><tr><td> </td><td>size_t <var class="pdparam">size</var><code>)</code>;</td></tr></table><div class="funcprototype-spacer"> </div><table border="0" summary="Function synopsis" cellspacing="0" cellpadding="0" class="funcprototype-table"><tr><td><code class="funcdef">void *<b class="fsfunc">realloc</b>(</code></td><td>void *<var class="pdparam">ptr</var>, </td></tr><tr><td> </td><td>size_t <var class="pdparam">size</var><code>)</code>;</td></tr></table><div class="funcprototype-spacer"> </div><table border="0" summary="Function synopsis" cellspacing="0" cellpadding="0" class="funcprototype-table"><tr><td><code class="funcdef">void <b class="fsfunc">free</b>(</code></td><td>void *<var class="pdparam">ptr</var><code>)</code>;</td></tr></table><div class="funcprototype-spacer"> </div></div><div class="refsect2" title="Non-standard API"><a name="id286900549"></a><h3>Non-standard API</h3><table border="0" summary="Function synopsis" cellspacing="0" cellpadding="0" class="funcprototype-table"><tr><td><code class="funcdef">size_t <b class="fsfunc">malloc_usable_size</b>(</code></td><td>const void *<var class="pdparam">ptr</var><code>)</code>;</td></tr></table><div class="funcprototype-spacer"> </div><table border="0" summary="Function synopsis" cellspacing="0" cellpadding="0" class="funcprototype-table"><tr><td><code class="funcdef">void <b class="fsfunc">malloc_stats_print</b>(</code></td><td>void <var class="pdparam">(*write_cb)</var> - <code>(</code>void *, const char *<code>)</code> - , </td></tr><tr><td> </td><td>void *<var class="pdparam">cbopaque</var>, </td></tr><tr><td> </td><td>const char *<var class="pdparam">opts</var><code>)</code>;</td></tr></table><div class="funcprototype-spacer"> </div><table border="0" summary="Function synopsis" cellspacing="0" cellpadding="0" class="funcprototype-table"><tr><td><code class="funcdef">int <b class="fsfunc">mallctl</b>(</code></td><td>const char *<var class="pdparam">name</var>, </td></tr><tr><td> </td><td>void *<var class="pdparam">oldp</var>, </td></tr><tr><td> </td><td>size_t *<var class="pdparam">oldlenp</var>, </td></tr><tr><td> </td><td>void *<var class="pdparam">newp</var>, </td></tr><tr><td> </td><td>size_t <var class="pdparam">newlen</var><code>)</code>;</td></tr></table><div class="funcprototype-spacer"> </div><table border="0" summary="Function synopsis" cellspacing="0" cellpadding="0" class="funcprototype-table"><tr><td><code class="funcdef">int <b class="fsfunc">mallctlnametomib</b>(</code></td><td>const char *<var class="pdparam">name</var>, </td></tr><tr><td> </td><td>size_t *<var class="pdparam">mibp</var>, </td></tr><tr><td> </td><td>size_t *<var class="pdparam">miblenp</var><code>)</code>;</td></tr></table><div class="funcprototype-spacer"> </div><table border="0" summary="Function synopsis" cellspacing="0" cellpadding="0" class="funcprototype-table"><tr><td><code class="funcdef">int <b class="fsfunc">mallctlbymib</b>(</code></td><td>const size_t *<var class="pdparam">mib</var>, </td></tr><tr><td> </td><td>size_t <var class="pdparam">miblen</var>, </td></tr><tr><td> </td><td>void *<var class="pdparam">oldp</var>, </td></tr><tr><td> </td><td>size_t *<var class="pdparam">oldlenp</var>, </td></tr><tr><td> </td><td>void *<var class="pdparam">newp</var>, </td></tr><tr><td> </td><td>size_t <var class="pdparam">newlen</var><code>)</code>;</td></tr></table><div class="funcprototype-spacer"> </div><table border="0" summary="Function synopsis" cellspacing="0" cellpadding="0" class="funcprototype-table"><tr><td><code class="funcdef">void <b class="fsfunc">(*malloc_message)</b>(</code></td><td>void *<var class="pdparam">cbopaque</var>, </td></tr><tr><td> </td><td>const char *<var class="pdparam">s</var><code>)</code>;</td></tr></table><div class="funcprototype-spacer"> </div><p><span class="type">const char *</span><code class="varname">malloc_conf</code>;</p></div><div class="refsect2" title="Experimental API"><a name="id286900756"></a><h3>Experimental API</h3><table border="0" summary="Function synopsis" cellspacing="0" cellpadding="0" class="funcprototype-table"><tr><td><code class="funcdef">int <b class="fsfunc">allocm</b>(</code></td><td>void **<var class="pdparam">ptr</var>, </td></tr><tr><td> </td><td>size_t *<var class="pdparam">rsize</var>, </td></tr><tr><td> </td><td>size_t <var class="pdparam">size</var>, </td></tr><tr><td> </td><td>int <var class="pdparam">flags</var><code>)</code>;</td></tr></table><div class="funcprototype-spacer"> </div><table border="0" summary="Function synopsis" cellspacing="0" cellpadding="0" class="funcprototype-table"><tr><td><code class="funcdef">int <b class="fsfunc">rallocm</b>(</code></td><td>void **<var class="pdparam">ptr</var>, </td></tr><tr><td> </td><td>size_t *<var class="pdparam">rsize</var>, </td></tr><tr><td> </td><td>size_t <var class="pdparam">size</var>, </td></tr><tr><td> </td><td>size_t <var class="pdparam">extra</var>, </td></tr><tr><td> </td><td>int <var class="pdparam">flags</var><code>)</code>;</td></tr></table><div class="funcprototype-spacer"> </div><table border="0" summary="Function synopsis" cellspacing="0" cellpadding="0" class="funcprototype-table"><tr><td><code class="funcdef">int <b class="fsfunc">sallocm</b>(</code></td><td>const void *<var class="pdparam">ptr</var>, </td></tr><tr><td> </td><td>size_t *<var class="pdparam">rsize</var>, </td></tr><tr><td> </td><td>int <var class="pdparam">flags</var><code>)</code>;</td></tr></table><div class="funcprototype-spacer"> </div><table border="0" summary="Function synopsis" cellspacing="0" cellpadding="0" class="funcprototype-table"><tr><td><code class="funcdef">int <b class="fsfunc">dallocm</b>(</code></td><td>void *<var class="pdparam">ptr</var>, </td></tr><tr><td> </td><td>int <var class="pdparam">flags</var><code>)</code>;</td></tr></table><div class="funcprototype-spacer"> </div><table border="0" summary="Function synopsis" cellspacing="0" cellpadding="0" class="funcprototype-table"><tr><td><code class="funcdef">int <b class="fsfunc">nallocm</b>(</code></td><td>size_t *<var class="pdparam">rsize</var>, </td></tr><tr><td> </td><td>size_t <var class="pdparam">size</var>, </td></tr><tr><td> </td><td>int <var class="pdparam">flags</var><code>)</code>;</td></tr></table><div class="funcprototype-spacer"> </div></div></div></div><div class="refsect1" title="DESCRIPTION"><a name="description"></a><h2>DESCRIPTION</h2><div class="refsect2" title="Standard API"><a name="id286949297"></a><h3>Standard API</h3><p>The <code class="function">malloc</code>(<em class="parameter"><code></code></em>) function allocates - <em class="parameter"><code>size</code></em> bytes of uninitialized memory. The allocated - space is suitably aligned (after possible pointer coercion) for storage - of any type of object.</p><p>The <code class="function">calloc</code>(<em class="parameter"><code></code></em>) function allocates - space for <em class="parameter"><code>number</code></em> objects, each - <em class="parameter"><code>size</code></em> bytes in length. The result is identical to - calling <code class="function">malloc</code>(<em class="parameter"><code></code></em>) with an argument of - <em class="parameter"><code>number</code></em> * <em class="parameter"><code>size</code></em>, with the - exception that the allocated memory is explicitly initialized to zero - bytes.</p><p>The <code class="function">posix_memalign</code>(<em class="parameter"><code></code></em>) function - allocates <em class="parameter"><code>size</code></em> bytes of memory such that the - allocation's base address is an even multiple of - <em class="parameter"><code>alignment</code></em>, and returns the allocation in the value - pointed to by <em class="parameter"><code>ptr</code></em>. The requested - <em class="parameter"><code>alignment</code></em> must be a power of 2 at least as large - as <code class="code">sizeof(<span class="type">void *</span>)</code>.</p><p>The <code class="function">aligned_alloc</code>(<em class="parameter"><code></code></em>) function - allocates <em class="parameter"><code>size</code></em> bytes of memory such that the - allocation's base address is an even multiple of - <em class="parameter"><code>alignment</code></em>. The requested - <em class="parameter"><code>alignment</code></em> must be a power of 2. Behavior is - undefined if <em class="parameter"><code>size</code></em> is not an integral multiple of - <em class="parameter"><code>alignment</code></em>.</p><p>The <code class="function">realloc</code>(<em class="parameter"><code></code></em>) function changes the - size of the previously allocated memory referenced by - <em class="parameter"><code>ptr</code></em> to <em class="parameter"><code>size</code></em> bytes. The - contents of the memory are unchanged up to the lesser of the new and old - sizes. If the new size is larger, the contents of the newly allocated - portion of the memory are undefined. Upon success, the memory referenced - by <em class="parameter"><code>ptr</code></em> is freed and a pointer to the newly - allocated memory is returned. Note that - <code class="function">realloc</code>(<em class="parameter"><code></code></em>) may move the memory allocation, - resulting in a different return value than <em class="parameter"><code>ptr</code></em>. - If <em class="parameter"><code>ptr</code></em> is <code class="constant">NULL</code>, the - <code class="function">realloc</code>(<em class="parameter"><code></code></em>) function behaves identically to - <code class="function">malloc</code>(<em class="parameter"><code></code></em>) for the specified size.</p><p>The <code class="function">free</code>(<em class="parameter"><code></code></em>) function causes the - allocated memory referenced by <em class="parameter"><code>ptr</code></em> to be made - available for future allocations. If <em class="parameter"><code>ptr</code></em> is - <code class="constant">NULL</code>, no action occurs.</p></div><div class="refsect2" title="Non-standard API"><a name="id286949561"></a><h3>Non-standard API</h3><p>The <code class="function">malloc_usable_size</code>(<em class="parameter"><code></code></em>) function - returns the usable size of the allocation pointed to by - <em class="parameter"><code>ptr</code></em>. The return value may be larger than the size - that was requested during allocation. The - <code class="function">malloc_usable_size</code>(<em class="parameter"><code></code></em>) function is not a - mechanism for in-place <code class="function">realloc</code>(<em class="parameter"><code></code></em>); rather - it is provided solely as a tool for introspection purposes. Any - discrepancy between the requested allocation size and the size reported - by <code class="function">malloc_usable_size</code>(<em class="parameter"><code></code></em>) should not be - depended on, since such behavior is entirely implementation-dependent. - </p><p>The <code class="function">malloc_stats_print</code>(<em class="parameter"><code></code></em>) function - writes human-readable summary statistics via the - <em class="parameter"><code>write_cb</code></em> callback function pointer and - <em class="parameter"><code>cbopaque</code></em> data passed to - <em class="parameter"><code>write_cb</code></em>, or - <code class="function">malloc_message</code>(<em class="parameter"><code></code></em>) if - <em class="parameter"><code>write_cb</code></em> is <code class="constant">NULL</code>. This - function can be called repeatedly. General information that never - changes during execution can be omitted by specifying "g" as a character - within the <em class="parameter"><code>opts</code></em> string. Note that - <code class="function">malloc_message</code>(<em class="parameter"><code></code></em>) uses the - <code class="function">mallctl*</code>(<em class="parameter"><code></code></em>) functions internally, so - inconsistent statistics can be reported if multiple threads use these - functions simultaneously. If <code class="option">--enable-stats</code> is - specified during configuration, “m” and “a” can - be specified to omit merged arena and per arena statistics, respectively; - “b” and “l” can be specified to omit per size - class statistics for bins and large objects, respectively. Unrecognized - characters are silently ignored. Note that thread caching may prevent - some statistics from being completely up to date, since extra locking - would be required to merge counters that track thread cache operations. - </p><p>The <code class="function">mallctl</code>(<em class="parameter"><code></code></em>) function provides a - general interface for introspecting the memory allocator, as well as - setting modifiable parameters and triggering actions. The - period-separated <em class="parameter"><code>name</code></em> argument specifies a - location in a tree-structured namespace; see the <a class="xref" href="#mallctl_namespace" title="MALLCTL NAMESPACE">MALLCTL NAMESPACE</a> section for - documentation on the tree contents. To read a value, pass a pointer via - <em class="parameter"><code>oldp</code></em> to adequate space to contain the value, and a - pointer to its length via <em class="parameter"><code>oldlenp</code></em>; otherwise pass - <code class="constant">NULL</code> and <code class="constant">NULL</code>. Similarly, to - write a value, pass a pointer to the value via - <em class="parameter"><code>newp</code></em>, and its length via - <em class="parameter"><code>newlen</code></em>; otherwise pass <code class="constant">NULL</code> - and <code class="constant">0</code>.</p><p>The <code class="function">mallctlnametomib</code>(<em class="parameter"><code></code></em>) function - provides a way to avoid repeated name lookups for applications that - repeatedly query the same portion of the namespace, by translating a name - to a “Management Information Base” (MIB) that can be passed - repeatedly to <code class="function">mallctlbymib</code>(<em class="parameter"><code></code></em>). Upon - successful return from <code class="function">mallctlnametomib</code>(<em class="parameter"><code></code></em>), - <em class="parameter"><code>mibp</code></em> contains an array of - <em class="parameter"><code>*miblenp</code></em> integers, where - <em class="parameter"><code>*miblenp</code></em> is the lesser of the number of components - in <em class="parameter"><code>name</code></em> and the input value of - <em class="parameter"><code>*miblenp</code></em>. Thus it is possible to pass a - <em class="parameter"><code>*miblenp</code></em> that is smaller than the number of - period-separated name components, which results in a partial MIB that can - be used as the basis for constructing a complete MIB. For name - components that are integers (e.g. the 2 in - <a class="link" href="#arenas.bin.i.size"> - "<code class="mallctl">arenas.bin.2.size</code>" - </a>), - the corresponding MIB component will always be that integer. Therefore, - it is legitimate to construct code like the following: </p><pre class="programlisting"> -unsigned nbins, i; - -int mib[4]; -size_t len, miblen; - -len = sizeof(nbins); -mallctl("arenas.nbins", &nbins, &len, NULL, 0); - -miblen = 4; -mallnametomib("arenas.bin.0.size", mib, &miblen); -for (i = 0; i < nbins; i++) { - size_t bin_size; - - mib[2] = i; - len = sizeof(bin_size); - mallctlbymib(mib, miblen, &bin_size, &len, NULL, 0); - /* Do something with bin_size... */ -}</pre></div><div class="refsect2" title="Experimental API"><a name="id286949870"></a><h3>Experimental API</h3><p>The experimental API is subject to change or removal without regard - for backward compatibility. If <code class="option">--disable-experimental</code> - is specified during configuration, the experimental API is - omitted.</p><p>The <code class="function">allocm</code>(<em class="parameter"><code></code></em>), - <code class="function">rallocm</code>(<em class="parameter"><code></code></em>), - <code class="function">sallocm</code>(<em class="parameter"><code></code></em>), - <code class="function">dallocm</code>(<em class="parameter"><code></code></em>), and - <code class="function">nallocm</code>(<em class="parameter"><code></code></em>) functions all have a - <em class="parameter"><code>flags</code></em> argument that can be used to specify - options. The functions only check the options that are contextually - relevant. Use bitwise or (<code class="code">|</code>) operations to - specify one or more of the following: - </p><div class="variablelist"><dl><dt><span class="term"><code class="constant">ALLOCM_LG_ALIGN(<em class="parameter"><code>la</code></em>) - </code></span></dt><dd><p>Align the memory allocation to start at an address - that is a multiple of <code class="code">(1 << - <em class="parameter"><code>la</code></em>)</code>. This macro does not validate - that <em class="parameter"><code>la</code></em> is within the valid - range.</p></dd><dt><span class="term"><code class="constant">ALLOCM_ALIGN(<em class="parameter"><code>a</code></em>) - </code></span></dt><dd><p>Align the memory allocation to start at an address - that is a multiple of <em class="parameter"><code>a</code></em>, where - <em class="parameter"><code>a</code></em> is a power of two. This macro does not - validate that <em class="parameter"><code>a</code></em> is a power of 2. - </p></dd><dt><span class="term"><code class="constant">ALLOCM_ZERO</code></span></dt><dd><p>Initialize newly allocated memory to contain zero - bytes. In the growing reallocation case, the real size prior to - reallocation defines the boundary between untouched bytes and those - that are initialized to contain zero bytes. If this option is - absent, newly allocated memory is uninitialized.</p></dd><dt><span class="term"><code class="constant">ALLOCM_NO_MOVE</code></span></dt><dd><p>For reallocation, fail rather than moving the - object. This constraint can apply to both growth and - shrinkage.</p></dd><dt><span class="term"><code class="constant">ALLOCM_ARENA(<em class="parameter"><code>a</code></em>) - </code></span></dt><dd><p>Use the arena specified by the index - <em class="parameter"><code>a</code></em>. This macro does not validate that - <em class="parameter"><code>a</code></em> specifies an arena in the valid - range.</p></dd></dl></div><p> - </p><p>The <code class="function">allocm</code>(<em class="parameter"><code></code></em>) function allocates at - least <em class="parameter"><code>size</code></em> bytes of memory, sets - <em class="parameter"><code>*ptr</code></em> to the base address of the allocation, and - sets <em class="parameter"><code>*rsize</code></em> to the real size of the allocation if - <em class="parameter"><code>rsize</code></em> is not <code class="constant">NULL</code>. Behavior - is undefined if <em class="parameter"><code>size</code></em> is - <code class="constant">0</code>.</p><p>The <code class="function">rallocm</code>(<em class="parameter"><code></code></em>) function resizes the - allocation at <em class="parameter"><code>*ptr</code></em> to be at least - <em class="parameter"><code>size</code></em> bytes, sets <em class="parameter"><code>*ptr</code></em> to - the base address of the allocation if it moved, and sets - <em class="parameter"><code>*rsize</code></em> to the real size of the allocation if - <em class="parameter"><code>rsize</code></em> is not <code class="constant">NULL</code>. If - <em class="parameter"><code>extra</code></em> is non-zero, an attempt is made to resize - the allocation to be at least <code class="code"><em class="parameter"><code>size</code></em> + - <em class="parameter"><code>extra</code></em>)</code> bytes, though inability to allocate - the extra byte(s) will not by itself result in failure. Behavior is - undefined if <em class="parameter"><code>size</code></em> is <code class="constant">0</code>, or if - <code class="code">(<em class="parameter"><code>size</code></em> + - <em class="parameter"><code>extra</code></em> > - <code class="constant">SIZE_T_MAX</code>)</code>.</p><p>The <code class="function">sallocm</code>(<em class="parameter"><code></code></em>) function sets - <em class="parameter"><code>*rsize</code></em> to the real size of the allocation.</p><p>The <code class="function">dallocm</code>(<em class="parameter"><code></code></em>) function causes the - memory referenced by <em class="parameter"><code>ptr</code></em> to be made available for - future allocations.</p><p>The <code class="function">nallocm</code>(<em class="parameter"><code></code></em>) function allocates no - memory, but it performs the same size computation as the - <code class="function">allocm</code>(<em class="parameter"><code></code></em>) function, and if - <em class="parameter"><code>rsize</code></em> is not <code class="constant">NULL</code> it sets - <em class="parameter"><code>*rsize</code></em> to the real size of the allocation that - would result from the equivalent <code class="function">allocm</code>(<em class="parameter"><code></code></em>) - function call. Behavior is undefined if - <em class="parameter"><code>size</code></em> is <code class="constant">0</code>.</p></div></div><div class="refsect1" title="TUNING"><a name="tuning"></a><h2>TUNING</h2><p>Once, when the first call is made to one of the memory allocation - routines, the allocator initializes its internals based in part on various - options that can be specified at compile- or run-time.</p><p>The string pointed to by the global variable - <code class="varname">malloc_conf</code>, the “name” of the file - referenced by the symbolic link named <code class="filename">/etc/malloc.conf</code>, and the value of the - environment variable <code class="envar">MALLOC_CONF</code>, will be interpreted, in - that order, from left to right as options.</p><p>An options string is a comma-separated list of option:value pairs. - There is one key corresponding to each <a class="link" href="#opt.abort"> - "<code class="mallctl">opt.*</code>" - </a> mallctl (see the <a class="xref" href="#mallctl_namespace" title="MALLCTL NAMESPACE">MALLCTL NAMESPACE</a> section for options - documentation). For example, <code class="literal">abort:true,narenas:1</code> sets - the <a class="link" href="#opt.abort"> - "<code class="mallctl">opt.abort</code>" - </a> and <a class="link" href="#opt.narenas"> - "<code class="mallctl">opt.narenas</code>" - </a> options. Some - options have boolean values (true/false), others have integer values (base - 8, 10, or 16, depending on prefix), and yet others have raw string - values.</p></div><div class="refsect1" title="IMPLEMENTATION NOTES"><a name="implementation_notes"></a><h2>IMPLEMENTATION NOTES</h2><p>Traditionally, allocators have used - <span class="citerefentry"><span class="refentrytitle">sbrk</span>(2)</span> to obtain memory, which is - suboptimal for several reasons, including race conditions, increased - fragmentation, and artificial limitations on maximum usable memory. If - <code class="option">--enable-dss</code> is specified during configuration, this - allocator uses both <span class="citerefentry"><span class="refentrytitle">mmap</span>(2)</span> and - <span class="citerefentry"><span class="refentrytitle">sbrk</span>(2)</span>, in that order of preference; - otherwise only <span class="citerefentry"><span class="refentrytitle">mmap</span>(2)</span> is used.</p><p>This allocator uses multiple arenas in order to reduce lock - contention for threaded programs on multi-processor systems. This works - well with regard to threading scalability, but incurs some costs. There is - a small fixed per-arena overhead, and additionally, arenas manage memory - completely independently of each other, which means a small fixed increase - in overall memory fragmentation. These overheads are not generally an - issue, given the number of arenas normally used. Note that using - substantially more arenas than the default is not likely to improve - performance, mainly due to reduced cache performance. However, it may make - sense to reduce the number of arenas if an application does not make much - use of the allocation functions.</p><p>In addition to multiple arenas, unless - <code class="option">--disable-tcache</code> is specified during configuration, this - allocator supports thread-specific caching for small and large objects, in - order to make it possible to completely avoid synchronization for most - allocation requests. Such caching allows very fast allocation in the - common case, but it increases memory usage and fragmentation, since a - bounded number of objects can remain allocated in each thread cache.</p><p>Memory is conceptually broken into equal-sized chunks, where the - chunk size is a power of two that is greater than the page size. Chunks - are always aligned to multiples of the chunk size. This alignment makes it - possible to find metadata for user objects very quickly.</p><p>User objects are broken into three categories according to size: - small, large, and huge. Small objects are smaller than one page. Large - objects are smaller than the chunk size. Huge objects are a multiple of - the chunk size. Small and large objects are managed by arenas; huge - objects are managed separately in a single data structure that is shared by - all threads. Huge objects are used by applications infrequently enough - that this single data structure is not a scalability issue.</p><p>Each chunk that is managed by an arena tracks its contents as runs of - contiguous pages (unused, backing a set of small objects, or backing one - large object). The combination of chunk alignment and chunk page maps - makes it possible to determine all metadata regarding small and large - allocations in constant time.</p><p>Small objects are managed in groups by page runs. Each run maintains - a frontier and free list to track which regions are in use. Allocation - requests that are no more than half the quantum (8 or 16, depending on - architecture) are rounded up to the nearest power of two that is at least - <code class="code">sizeof(<span class="type">double</span>)</code>. All other small - object size classes are multiples of the quantum, spaced such that internal - fragmentation is limited to approximately 25% for all but the smallest size - classes. Allocation requests that are larger than the maximum small size - class, but small enough to fit in an arena-managed chunk (see the <a class="link" href="#opt.lg_chunk"> - "<code class="mallctl">opt.lg_chunk</code>" - </a> option), are - rounded up to the nearest run size. Allocation requests that are too large - to fit in an arena-managed chunk are rounded up to the nearest multiple of - the chunk size.</p><p>Allocations are packed tightly together, which can be an issue for - multi-threaded applications. If you need to assure that allocations do not - suffer from cacheline sharing, round your allocation requests up to the - nearest multiple of the cacheline size, or specify cacheline alignment when - allocating.</p><p>Assuming 4 MiB chunks, 4 KiB pages, and a 16-byte quantum on a 64-bit - system, the size classes in each category are as shown in <a class="xref" href="#size_classes" title="Table 1. Size classes">Table 1</a>.</p><div class="table"><a name="size_classes"></a><p class="title"><b>Table 1. Size classes</b></p><div class="table-contents"><table summary="Size classes" border="1"><colgroup><col align="left" class="c1"><col align="right" class="c2"><col align="left" class="c3"></colgroup><thead><tr><th align="left">Category</th><th align="right">Spacing</th><th align="left">Size</th></tr></thead><tbody><tr><td rowspan="7" align="left">Small</td><td align="right">lg</td><td align="left">[8]</td></tr><tr><td align="right">16</td><td align="left">[16, 32, 48, ..., 128]</td></tr><tr><td align="right">32</td><td align="left">[160, 192, 224, 256]</td></tr><tr><td align="right">64</td><td align="left">[320, 384, 448, 512]</td></tr><tr><td align="right">128</td><td align="left">[640, 768, 896, 1024]</td></tr><tr><td align="right">256</td><td align="left">[1280, 1536, 1792, 2048]</td></tr><tr><td align="right">512</td><td align="left">[2560, 3072, 3584]</td></tr><tr><td align="left">Large</td><td align="right">4 KiB</td><td align="left">[4 KiB, 8 KiB, 12 KiB, ..., 4072 KiB]</td></tr><tr><td align="left">Huge</td><td align="right">4 MiB</td><td align="left">[4 MiB, 8 MiB, 12 MiB, ...]</td></tr></tbody></table></div></div><br class="table-break"></div><div class="refsect1" title="MALLCTL NAMESPACE"><a name="mallctl_namespace"></a><h2>MALLCTL NAMESPACE</h2><p>The following names are defined in the namespace accessible via the - <code class="function">mallctl*</code>(<em class="parameter"><code></code></em>) functions. Value types are - specified in parentheses, their readable/writable statuses are encoded as - <code class="literal">rw</code>, <code class="literal">r-</code>, <code class="literal">-w</code>, or - <code class="literal">--</code>, and required build configuration flags follow, if - any. A name element encoded as <code class="literal"><i></code> or - <code class="literal"><j></code> indicates an integer component, where the - integer varies from 0 to some upper value that must be determined via - introspection. In the case of - "<code class="mallctl">stats.arenas.<i>.*</code>" - , - <code class="literal"><i></code> equal to <a class="link" href="#arenas.narenas"> - "<code class="mallctl">arenas.narenas</code>" - </a> can be - used to access the summation of statistics from all arenas. Take special - note of the <a class="link" href="#epoch"> - "<code class="mallctl">epoch</code>" - </a> mallctl, - which controls refreshing of cached dynamic statistics.</p><div class="variablelist"><dl><dt><span class="term"> - - "<code class="mallctl">version</code>" - - (<span class="type">const char *</span>) - <code class="literal">r-</code> - </span></dt><dd><p>Return the jemalloc version string.</p></dd><dt><a name="epoch"></a><span class="term"> - - "<code class="mallctl">epoch</code>" - - (<span class="type">uint64_t</span>) - <code class="literal">rw</code> - </span></dt><dd><p>If a value is passed in, refresh the data from which - the <code class="function">mallctl*</code>(<em class="parameter"><code></code></em>) functions report values, - and increment the epoch. Return the current epoch. This is useful for - detecting whether another thread caused a refresh.</p></dd><dt><span class="term"> - - "<code class="mallctl">config.debug</code>" - - (<span class="type">bool</span>) - <code class="literal">r-</code> - </span></dt><dd><p><code class="option">--enable-debug</code> was specified during - build configuration.</p></dd><dt><span class="term"> - - "<code class="mallctl">config.dss</code>" - - (<span class="type">bool</span>) - <code class="literal">r-</code> - </span></dt><dd><p><code class="option">--enable-dss</code> was specified during - build configuration.</p></dd><dt><span class="term"> - - "<code class="mallctl">config.fill</code>" - - (<span class="type">bool</span>) - <code class="literal">r-</code> - </span></dt><dd><p><code class="option">--enable-fill</code> was specified during - build configuration.</p></dd><dt><span class="term"> - - "<code class="mallctl">config.lazy_lock</code>" - - (<span class="type">bool</span>) - <code class="literal">r-</code> - </span></dt><dd><p><code class="option">--enable-lazy-lock</code> was specified - during build configuration.</p></dd><dt><span class="term"> - - "<code class="mallctl">config.mremap</code>" - - (<span class="type">bool</span>) - <code class="literal">r-</code> - </span></dt><dd><p><code class="option">--enable-mremap</code> was specified during - build configuration.</p></dd><dt><span class="term"> - - "<code class="mallctl">config.munmap</code>" - - (<span class="type">bool</span>) - <code class="literal">r-</code> - </span></dt><dd><p><code class="option">--enable-munmap</code> was specified during - build configuration.</p></dd><dt><span class="term"> - - "<code class="mallctl">config.prof</code>" - - (<span class="type">bool</span>) - <code class="literal">r-</code> - </span></dt><dd><p><code class="option">--enable-prof</code> was specified during - build configuration.</p></dd><dt><span class="term"> - - "<code class="mallctl">config.prof_libgcc</code>" - - (<span class="type">bool</span>) - <code class="literal">r-</code> - </span></dt><dd><p><code class="option">--disable-prof-libgcc</code> was not - specified during build configuration.</p></dd><dt><span class="term"> - - "<code class="mallctl">config.prof_libunwind</code>" - - (<span class="type">bool</span>) - <code class="literal">r-</code> - </span></dt><dd><p><code class="option">--enable-prof-libunwind</code> was specified - during build configuration.</p></dd><dt><span class="term"> - - "<code class="mallctl">config.stats</code>" - - (<span class="type">bool</span>) - <code class="literal">r-</code> - </span></dt><dd><p><code class="option">--enable-stats</code> was specified during - build configuration.</p></dd><dt><span class="term"> - - "<code class="mallctl">config.tcache</code>" - - (<span class="type">bool</span>) - <code class="literal">r-</code> - </span></dt><dd><p><code class="option">--disable-tcache</code> was not specified - during build configuration.</p></dd><dt><span class="term"> - - "<code class="mallctl">config.tls</code>" - - (<span class="type">bool</span>) - <code class="literal">r-</code> - </span></dt><dd><p><code class="option">--disable-tls</code> was not specified during - build configuration.</p></dd><dt><span class="term"> - - "<code class="mallctl">config.utrace</code>" - - (<span class="type">bool</span>) - <code class="literal">r-</code> - </span></dt><dd><p><code class="option">--enable-utrace</code> was specified during - build configuration.</p></dd><dt><span class="term"> - - "<code class="mallctl">config.valgrind</code>" - - (<span class="type">bool</span>) - <code class="literal">r-</code> - </span></dt><dd><p><code class="option">--enable-valgrind</code> was specified during - build configuration.</p></dd><dt><span class="term"> - - "<code class="mallctl">config.xmalloc</code>" - - (<span class="type">bool</span>) - <code class="literal">r-</code> - </span></dt><dd><p><code class="option">--enable-xmalloc</code> was specified during - build configuration.</p></dd><dt><a name="opt.abort"></a><span class="term"> - - "<code class="mallctl">opt.abort</code>" - - (<span class="type">bool</span>) - <code class="literal">r-</code> - </span></dt><dd><p>Abort-on-warning enabled/disabled. If true, most - warnings are fatal. The process will call - <span class="citerefentry"><span class="refentrytitle">abort</span>(3)</span> in these cases. This option is - disabled by default unless <code class="option">--enable-debug</code> is - specified during configuration, in which case it is enabled by default. - </p></dd><dt><a name="opt.lg_chunk"></a><span class="term"> - - "<code class="mallctl">opt.lg_chunk</code>" - - (<span class="type">size_t</span>) - <code class="literal">r-</code> - </span></dt><dd><p>Virtual memory chunk size (log base 2). If a chunk - size outside the supported size range is specified, the size is - silently clipped to the minimum/maximum supported size. The default - chunk size is 4 MiB (2^22). - </p></dd><dt><a name="opt.dss"></a><span class="term"> - - "<code class="mallctl">opt.dss</code>" - - (<span class="type">const char *</span>) - <code class="literal">r-</code> - </span></dt><dd><p>dss (<span class="citerefentry"><span class="refentrytitle">sbrk</span>(2)</span>) allocation precedence as - related to <span class="citerefentry"><span class="refentrytitle">mmap</span>(2)</span> allocation. The following - settings are supported: “disabled”, “primary”, - and “secondary” (default).</p></dd><dt><a name="opt.narenas"></a><span class="term"> - - "<code class="mallctl">opt.narenas</code>" - - (<span class="type">size_t</span>) - <code class="literal">r-</code> - </span></dt><dd><p>Maximum number of arenas to use for automatic - multiplexing of threads and arenas. The default is four times the - number of CPUs, or one if there is a single CPU.</p></dd><dt><a name="opt.lg_dirty_mult"></a><span class="term"> - - "<code class="mallctl">opt.lg_dirty_mult</code>" - - (<span class="type">ssize_t</span>) - <code class="literal">r-</code> - </span></dt><dd><p>Per-arena minimum ratio (log base 2) of active to dirty - pages. Some dirty unused pages may be allowed to accumulate, within - the limit set by the ratio (or one chunk worth of dirty pages, - whichever is greater), before informing the kernel about some of those - pages via <span class="citerefentry"><span class="refentrytitle">madvise</span>(2)</span> or a similar system call. This - provides the kernel with sufficient information to recycle dirty pages - if physical memory becomes scarce and the pages remain unused. The - default minimum ratio is 8:1 (2^3:1); an option value of -1 will - disable dirty page purging.</p></dd><dt><a name="opt.stats_print"></a><span class="term"> - - "<code class="mallctl">opt.stats_print</code>" - - (<span class="type">bool</span>) - <code class="literal">r-</code> - </span></dt><dd><p>Enable/disable statistics printing at exit. If - enabled, the <code class="function">malloc_stats_print</code>(<em class="parameter"><code></code></em>) - function is called at program exit via an - <span class="citerefentry"><span class="refentrytitle">atexit</span>(3)</span> function. If - <code class="option">--enable-stats</code> is specified during configuration, this - has the potential to cause deadlock for a multi-threaded process that - exits while one or more threads are executing in the memory allocation - functions. Therefore, this option should only be used with care; it is - primarily intended as a performance tuning aid during application - development. This option is disabled by default.</p></dd><dt><a name="opt.junk"></a><span class="term"> - - "<code class="mallctl">opt.junk</code>" - - (<span class="type">bool</span>) - <code class="literal">r-</code> - [<code class="option">--enable-fill</code>] - </span></dt><dd><p>Junk filling enabled/disabled. If enabled, each byte - of uninitialized allocated memory will be initialized to - <code class="literal">0xa5</code>. All deallocated memory will be initialized to - <code class="literal">0x5a</code>. This is intended for debugging and will - impact performance negatively. This option is disabled by default - unless <code class="option">--enable-debug</code> is specified during - configuration, in which case it is enabled by default unless running - inside <a class="ulink" href="http://valgrind.org/" target="_top">Valgrind</a>.</p></dd><dt><a name="opt.quarantine"></a><span class="term"> - - "<code class="mallctl">opt.quarantine</code>" - - (<span class="type">size_t</span>) - <code class="literal">r-</code> - [<code class="option">--enable-fill</code>] - </span></dt><dd><p>Per thread quarantine size in bytes. If non-zero, each - thread maintains a FIFO object quarantine that stores up to the - specified number of bytes of memory. The quarantined memory is not - freed until it is released from quarantine, though it is immediately - junk-filled if the <a class="link" href="#opt.junk"> - "<code class="mallctl">opt.junk</code>" - </a> option is - enabled. This feature is of particular use in combination with <a class="ulink" href="http://valgrind.org/" target="_top">Valgrind</a>, which can detect attempts - to access quarantined objects. This is intended for debugging and will - impact performance negatively. The default quarantine size is 0 unless - running inside Valgrind, in which case the default is 16 - MiB.</p></dd><dt><a name="opt.redzone"></a><span class="term"> - - "<code class="mallctl">opt.redzone</code>" - - (<span class="type">bool</span>) - <code class="literal">r-</code> - [<code class="option">--enable-fill</code>] - </span></dt><dd><p>Redzones enabled/disabled. If enabled, small - allocations have redzones before and after them. Furthermore, if the - <a class="link" href="#opt.junk"> - "<code class="mallctl">opt.junk</code>" - </a> option is - enabled, the redzones are checked for corruption during deallocation. - However, the primary intended purpose of this feature is to be used in - combination with <a class="ulink" href="http://valgrind.org/" target="_top">Valgrind</a>, - which needs redzones in order to do effective buffer overflow/underflow - detection. This option is intended for debugging and will impact - performance negatively. This option is disabled by - default unless running inside Valgrind.</p></dd><dt><a name="opt.zero"></a><span class="term"> - - "<code class="mallctl">opt.zero</code>" - - (<span class="type">bool</span>) - <code class="literal">r-</code> - [<code class="option">--enable-fill</code>] - </span></dt><dd><p>Zero filling enabled/disabled. If enabled, each byte - of uninitialized allocated memory will be initialized to 0. Note that - this initialization only happens once for each byte, so - <code class="function">realloc</code>(<em class="parameter"><code></code></em>) and - <code class="function">rallocm</code>(<em class="parameter"><code></code></em>) calls do not zero memory that - was previously allocated. This is intended for debugging and will - impact performance negatively. This option is disabled by default. - </p></dd><dt><a name="opt.utrace"></a><span class="term"> - - "<code class="mallctl">opt.utrace</code>" - - (<span class="type">bool</span>) - <code class="literal">r-</code> - [<code class="option">--enable-utrace</code>] - </span></dt><dd><p>Allocation tracing based on - <span class="citerefentry"><span class="refentrytitle">utrace</span>(2)</span> enabled/disabled. This option - is disabled by default.</p></dd><dt><a name="opt.valgrind"></a><span class="term"> - - "<code class="mallctl">opt.valgrind</code>" - - (<span class="type">bool</span>) - <code class="literal">r-</code> - [<code class="option">--enable-valgrind</code>] - </span></dt><dd><p><a class="ulink" href="http://valgrind.org/" target="_top">Valgrind</a> - support enabled/disabled. This option is vestigal because jemalloc - auto-detects whether it is running inside Valgrind. This option is - disabled by default, unless running inside Valgrind.</p></dd><dt><a name="opt.xmalloc"></a><span class="term"> - - "<code class="mallctl">opt.xmalloc</code>" - - (<span class="type">bool</span>) - <code class="literal">r-</code> - [<code class="option">--enable-xmalloc</code>] - </span></dt><dd><p>Abort-on-out-of-memory enabled/disabled. If enabled, - rather than returning failure for any allocation function, display a - diagnostic message on <code class="constant">STDERR_FILENO</code> and cause the - program to drop core (using - <span class="citerefentry"><span class="refentrytitle">abort</span>(3)</span>). If an application is - designed to depend on this behavior, set the option at compile time by - including the following in the source code: - </p><pre class="programlisting"> -malloc_conf = "xmalloc:true";</pre><p> - This option is disabled by default.</p></dd><dt><a name="opt.tcache"></a><span class="term"> - - "<code class="mallctl">opt.tcache</code>" - - (<span class="type">bool</span>) - <code class="literal">r-</code> - [<code class="option">--enable-tcache</code>] - </span></dt><dd><p>Thread-specific caching enabled/disabled. When there - are multiple threads, each thread uses a thread-specific cache for - objects up to a certain size. Thread-specific caching allows many - allocations to be satisfied without performing any thread - synchronization, at the cost of increased memory use. See the - <a class="link" href="#opt.lg_tcache_max"> - "<code class="mallctl">opt.lg_tcache_max</code>" - </a> - option for related tuning information. This option is enabled by - default unless running inside <a class="ulink" href="http://valgrind.org/" target="_top">Valgrind</a>.</p></dd><dt><a name="opt.lg_tcache_max"></a><span class="term"> - - "<code class="mallctl">opt.lg_tcache_max</code>" - - (<span class="type">size_t</span>) - <code class="literal">r-</code> - [<code class="option">--enable-tcache</code>] - </span></dt><dd><p>Maximum size class (log base 2) to cache in the - thread-specific cache. At a minimum, all small size classes are - cached, and at a maximum all large size classes are cached. The - default maximum is 32 KiB (2^15).</p></dd><dt><a name="opt.prof"></a><span class="term"> - - "<code class="mallctl">opt.prof</code>" - - (<span class="type">bool</span>) - <code class="literal">r-</code> - [<code class="option">--enable-prof</code>] - </span></dt><dd><p>Memory profiling enabled/disabled. If enabled, profile - memory allocation activity. See the <a class="link" href="#opt.prof_active"> - "<code class="mallctl">opt.prof_active</code>" - </a> - option for on-the-fly activation/deactivation. See the <a class="link" href="#opt.lg_prof_sample"> - "<code class="mallctl">opt.lg_prof_sample</code>" - </a> - option for probabilistic sampling control. See the <a class="link" href="#opt.prof_accum"> - "<code class="mallctl">opt.prof_accum</code>" - </a> - option for control of cumulative sample reporting. See the <a class="link" href="#opt.lg_prof_interval"> - "<code class="mallctl">opt.lg_prof_interval</code>" - </a> - option for information on interval-triggered profile dumping, the <a class="link" href="#opt.prof_gdump"> - "<code class="mallctl">opt.prof_gdump</code>" - </a> - option for information on high-water-triggered profile dumping, and the - <a class="link" href="#opt.prof_final"> - "<code class="mallctl">opt.prof_final</code>" - </a> - option for final profile dumping. Profile output is compatible with - the included <span class="command"><strong>pprof</strong></span> Perl script, which originates - from the <a class="ulink" href="http://code.google.com/p/gperftools/" target="_top">gperftools - package</a>.</p></dd><dt><a name="opt.prof_prefix"></a><span class="term"> - - "<code class="mallctl">opt.prof_prefix</code>" - - (<span class="type">const char *</span>) - <code class="literal">r-</code> - [<code class="option">--enable-prof</code>] - </span></dt><dd><p>Filename prefix for profile dumps. If the prefix is - set to the empty string, no automatic dumps will occur; this is - primarily useful for disabling the automatic final heap dump (which - also disables leak reporting, if enabled). The default prefix is - <code class="filename">jeprof</code>.</p></dd><dt><a name="opt.prof_active"></a><span class="term"> - - "<code class="mallctl">opt.prof_active</code>" - - (<span class="type">bool</span>) - <code class="literal">r-</code> - [<code class="option">--enable-prof</code>] - </span></dt><dd><p>Profiling activated/deactivated. This is a secondary - control mechanism that makes it possible to start the application with - profiling enabled (see the <a class="link" href="#opt.prof"> - "<code class="mallctl">opt.prof</code>" - </a> option) but - inactive, then toggle profiling at any time during program execution - with the <a class="link" href="#prof.active"> - "<code class="mallctl">prof.active</code>" - </a> mallctl. - This option is enabled by default.</p></dd><dt><a name="opt.lg_prof_sample"></a><span class="term"> - - "<code class="mallctl">opt.lg_prof_sample</code>" - - (<span class="type">ssize_t</span>) - <code class="literal">r-</code> - [<code class="option">--enable-prof</code>] - </span></dt><dd><p>Average interval (log base 2) between allocation - samples, as measured in bytes of allocation activity. Increasing the - sampling interval decreases profile fidelity, but also decreases the - computational overhead. The default sample interval is 512 KiB (2^19 - B).</p></dd><dt><a name="opt.prof_accum"></a><span class="term"> - - "<code class="mallctl">opt.prof_accum</code>" - - (<span class="type">bool</span>) - <code class="literal">r-</code> - [<code class="option">--enable-prof</code>] - </span></dt><dd><p>Reporting of cumulative object/byte counts in profile - dumps enabled/disabled. If this option is enabled, every unique - backtrace must be stored for the duration of execution. Depending on - the application, this can impose a large memory overhead, and the - cumulative counts are not always of interest. This option is disabled - by default.</p></dd><dt><a name="opt.lg_prof_interval"></a><span class="term"> - - "<code class="mallctl">opt.lg_prof_interval</code>" - - (<span class="type">ssize_t</span>) - <code class="literal">r-</code> - [<code class="option">--enable-prof</code>] - </span></dt><dd><p>Average interval (log base 2) between memory profile - dumps, as measured in bytes of allocation activity. The actual - interval between dumps may be sporadic because decentralized allocation - counters are used to avoid synchronization bottlenecks. Profiles are - dumped to files named according to the pattern - <code class="filename"><prefix>.<pid>.<seq>.i<iseq>.heap</code>, - where <code class="literal"><prefix></code> is controlled by the - <a class="link" href="#opt.prof_prefix"> - "<code class="mallctl">opt.prof_prefix</code>" - </a> - option. By default, interval-triggered profile dumping is disabled - (encoded as -1). - </p></dd><dt><a name="opt.prof_gdump"></a><span class="term"> - - "<code class="mallctl">opt.prof_gdump</code>" - - (<span class="type">bool</span>) - <code class="literal">r-</code> - [<code class="option">--enable-prof</code>] - </span></dt><dd><p>Trigger a memory profile dump every time the total - virtual memory exceeds the previous maximum. Profiles are dumped to - files named according to the pattern - <code class="filename"><prefix>.<pid>.<seq>.u<useq>.heap</code>, - where <code class="literal"><prefix></code> is controlled by the <a class="link" href="#opt.prof_prefix"> - "<code class="mallctl">opt.prof_prefix</code>" - </a> - option. This option is disabled by default.</p></dd><dt><a name="opt.prof_final"></a><span class="term"> - - "<code class="mallctl">opt.prof_final</code>" - - (<span class="type">bool</span>) - <code class="literal">r-</code> - [<code class="option">--enable-prof</code>] - </span></dt><dd><p>Use an - <span class="citerefentry"><span class="refentrytitle">atexit</span>(3)</span> function to dump final memory - usage to a file named according to the pattern - <code class="filename"><prefix>.<pid>.<seq>.f.heap</code>, - where <code class="literal"><prefix></code> is controlled by the <a class="link" href="#opt.prof_prefix"> - "<code class="mallctl">opt.prof_prefix</code>" - </a> - option. This option is enabled by default.</p></dd><dt><a name="opt.prof_leak"></a><span class="term"> - - "<code class="mallctl">opt.prof_leak</code>" - - (<span class="type">bool</span>) - <code class="literal">r-</code> - [<code class="option">--enable-prof</code>] - </span></dt><dd><p>Leak reporting enabled/disabled. If enabled, use an - <span class="citerefentry"><span class="refentrytitle">atexit</span>(3)</span> function to report memory leaks - detected by allocation sampling. See the - <a class="link" href="#opt.prof"> - "<code class="mallctl">opt.prof</code>" - </a> option for - information on analyzing heap profile output. This option is disabled - by default.</p></dd><dt><span class="term"> - - "<code class="mallctl">thread.arena</code>" - - (<span class="type">unsigned</span>) - <code class="literal">rw</code> - </span></dt><dd><p>Get or set the arena associated with the calling - thread. If the specified arena was not initialized beforehand (see the - <a class="link" href="#arenas.initialized"> - "<code class="mallctl">arenas.initialized</code>" - </a> - mallctl), it will be automatically initialized as a side effect of - calling this interface.</p></dd><dt><a name="thread.allocated"></a><span class="term"> - - "<code class="mallctl">thread.allocated</code>" - - (<span class="type">uint64_t</span>) - <code class="literal">r-</code> - [<code class="option">--enable-stats</code>] - </span></dt><dd><p>Get the total number of bytes ever allocated by the - calling thread. This counter has the potential to wrap around; it is - up to the application to appropriately interpret the counter in such - cases.</p></dd><dt><span class="term"> - - "<code class="mallctl">thread.allocatedp</code>" - - (<span class="type">uint64_t *</span>) - <code class="literal">r-</code> - [<code class="option">--enable-stats</code>] - </span></dt><dd><p>Get a pointer to the the value that is returned by the - <a class="link" href="#thread.allocated"> - "<code class="mallctl">thread.allocated</code>" - </a> - mallctl. This is useful for avoiding the overhead of repeated - <code class="function">mallctl*</code>(<em class="parameter"><code></code></em>) calls.</p></dd><dt><a name="thread.deallocated"></a><span class="term"> - - "<code class="mallctl">thread.deallocated</code>" - - (<span class="type">uint64_t</span>) - <code class="literal">r-</code> - [<code class="option">--enable-stats</code>] - </span></dt><dd><p>Get the total number of bytes ever deallocated by the - calling thread. This counter has the potential to wrap around; it is - up to the application to appropriately interpret the counter in such - cases.</p></dd><dt><span class="term"> - - "<code class="mallctl">thread.deallocatedp</code>" - - (<span class="type">uint64_t *</span>) - <code class="literal">r-</code> - [<code class="option">--enable-stats</code>] - </span></dt><dd><p>Get a pointer to the the value that is returned by the - <a class="link" href="#thread.deallocated"> - "<code class="mallctl">thread.deallocated</code>" - </a> - mallctl. This is useful for avoiding the overhead of repeated - <code class="function">mallctl*</code>(<em class="parameter"><code></code></em>) calls.</p></dd><dt><span class="term"> - - "<code class="mallctl">thread.tcache.enabled</code>" - - (<span class="type">bool</span>) - <code class="literal">rw</code> - [<code class="option">--enable-tcache</code>] - </span></dt><dd><p>Enable/disable calling thread's tcache. The tcache is - implicitly flushed as a side effect of becoming - disabled (see - "<code class="mallctl">thread.tcache.flush</code>" - ). - </p></dd><dt><span class="term"> - - "<code class="mallctl">thread.tcache.flush</code>" - - (<span class="type">void</span>) - <code class="literal">--</code> - [<code class="option">--enable-tcache</code>] - </span></dt><dd><p>Flush calling thread's tcache. This interface releases - all cached objects and internal data structures associated with the - calling thread's thread-specific cache. Ordinarily, this interface - need not be called, since automatic periodic incremental garbage - collection occurs, and the thread cache is automatically discarded when - a thread exits. However, garbage collection is triggered by allocation - activity, so it is possible for a thread that stops - allocating/deallocating to retain its cache indefinitely, in which case - the developer may find manual flushing useful.</p></dd><dt><a name="arena.i.purge"></a><span class="term"> - - "<code class="mallctl">arena.<i>.purge</code>" - - (<span class="type">unsigned</span>) - <code class="literal">--</code> - </span></dt><dd><p>Purge unused dirty pages for arena <i>, or for - all arenas if <i> equals <a class="link" href="#arenas.narenas"> - "<code class="mallctl">arenas.narenas</code>" - </a>. - </p></dd><dt><a name="arena.i.dss"></a><span class="term"> - - "<code class="mallctl">arena.<i>.dss</code>" - - (<span class="type">const char *</span>) - <code class="literal">rw</code> - </span></dt><dd><p>Set the precedence of dss allocation as related to mmap - allocation for arena <i>, or for all arenas if <i> equals - <a class="link" href="#arenas.narenas"> - "<code class="mallctl">arenas.narenas</code>" - </a>. See - <a class="link" href="#opt.dss"> - "<code class="mallctl">opt.dss</code>" - </a> for supported - settings. - </p></dd><dt><a name="arenas.narenas"></a><span class="term"> - - "<code class="mallctl">arenas.narenas</code>" - - (<span class="type">unsigned</span>) - <code class="literal">r-</code> - </span></dt><dd><p>Current limit on number of arenas.</p></dd><dt><a name="arenas.initialized"></a><span class="term"> - - "<code class="mallctl">arenas.initialized</code>" - - (<span class="type">bool *</span>) - <code class="literal">r-</code> - </span></dt><dd><p>An array of <a class="link" href="#arenas.narenas"> - "<code class="mallctl">arenas.narenas</code>" - </a> - booleans. Each boolean indicates whether the corresponding arena is - initialized.</p></dd><dt><span class="term"> - - "<code class="mallctl">arenas.quantum</code>" - - (<span class="type">size_t</span>) - <code class="literal">r-</code> - </span></dt><dd><p>Quantum size.</p></dd><dt><span class="term"> - - "<code class="mallctl">arenas.page</code>" - - (<span class="type">size_t</span>) - <code class="literal">r-</code> - </span></dt><dd><p>Page size.</p></dd><dt><span class="term"> - - "<code class="mallctl">arenas.tcache_max</code>" - - (<span class="type">size_t</span>) - <code class="literal">r-</code> - [<code class="option">--enable-tcache</code>] - </span></dt><dd><p>Maximum thread-cached size class.</p></dd><dt><span class="term"> - - "<code class="mallctl">arenas.nbins</code>" - - (<span class="type">unsigned</span>) - <code class="literal">r-</code> - </span></dt><dd><p>Number of bin size classes.</p></dd><dt><span class="term"> - - "<code class="mallctl">arenas.nhbins</code>" - - (<span class="type">unsigned</span>) - <code class="literal">r-</code> - [<code class="option">--enable-tcache</code>] - </span></dt><dd><p>Total number of thread cache bin size - classes.</p></dd><dt><a name="arenas.bin.i.size"></a><span class="term"> - - "<code class="mallctl">arenas.bin.<i>.size</code>" - - (<span class="type">size_t</span>) - <code class="literal">r-</code> - </span></dt><dd><p>Maximum size supported by size class.</p></dd><dt><span class="term"> - - "<code class="mallctl">arenas.bin.<i>.nregs</code>" - - (<span class="type">uint32_t</span>) - <code class="literal">r-</code> - </span></dt><dd><p>Number of regions per page run.</p></dd><dt><span class="term"> - - "<code class="mallctl">arenas.bin.<i>.run_size</code>" - - (<span class="type">size_t</span>) - <code class="literal">r-</code> - </span></dt><dd><p>Number of bytes per page run.</p></dd><dt><span class="term"> - - "<code class="mallctl">arenas.nlruns</code>" - - (<span class="type">size_t</span>) - <code class="literal">r-</code> - </span></dt><dd><p>Total number of large size classes.</p></dd><dt><span class="term"> - - "<code class="mallctl">arenas.lrun.<i>.size</code>" - - (<span class="type">size_t</span>) - <code class="literal">r-</code> - </span></dt><dd><p>Maximum size supported by this large size - class.</p></dd><dt><span class="term"> - - "<code class="mallctl">arenas.purge</code>" - - (<span class="type">unsigned</span>) - <code class="literal">-w</code> - </span></dt><dd><p>Purge unused dirty pages for the specified arena, or - for all arenas if none is specified.</p></dd><dt><span class="term"> - - "<code class="mallctl">arenas.extend</code>" - - (<span class="type">unsigned</span>) - <code class="literal">r-</code> - </span></dt><dd><p>Extend the array of arenas by appending a new arena, - and returning the new arena index.</p></dd><dt><a name="prof.active"></a><span class="term"> - - "<code class="mallctl">prof.active</code>" - - (<span class="type">bool</span>) - <code class="literal">rw</code> - [<code class="option">--enable-prof</code>] - </span></dt><dd><p>Control whether sampling is currently active. See the - <a class="link" href="#opt.prof_active"> - "<code class="mallctl">opt.prof_active</code>" - </a> - option for additional information. - </p></dd><dt><span class="term"> - - "<code class="mallctl">prof.dump</code>" - - (<span class="type">const char *</span>) - <code class="literal">-w</code> - [<code class="option">--enable-prof</code>] - </span></dt><dd><p>Dump a memory profile to the specified file, or if NULL - is specified, to a file according to the pattern - <code class="filename"><prefix>.<pid>.<seq>.m<mseq>.heap</code>, - where <code class="literal"><prefix></code> is controlled by the - <a class="link" href="#opt.prof_prefix"> - "<code class="mallctl">opt.prof_prefix</code>" - </a> - option.</p></dd><dt><span class="term"> - - "<code class="mallctl">prof.interval</code>" - - (<span class="type">uint64_t</span>) - <code class="literal">r-</code> - [<code class="option">--enable-prof</code>] - </span></dt><dd><p>Average number of bytes allocated between - inverval-based profile dumps. See the - <a class="link" href="#opt.lg_prof_interval"> - "<code class="mallctl">opt.lg_prof_interval</code>" - </a> - option for additional information.</p></dd><dt><a name="stats.cactive"></a><span class="term"> - - "<code class="mallctl">stats.cactive</code>" - - (<span class="type">size_t *</span>) - <code class="literal">r-</code> - [<code class="option">--enable-stats</code>] - </span></dt><dd><p>Pointer to a counter that contains an approximate count - of the current number of bytes in active pages. The estimate may be - high, but never low, because each arena rounds up to the nearest - multiple of the chunk size when computing its contribution to the - counter. Note that the <a class="link" href="#epoch"> - "<code class="mallctl">epoch</code>" - </a> mallctl has no bearing - on this counter. Furthermore, counter consistency is maintained via - atomic operations, so it is necessary to use an atomic operation in - order to guarantee a consistent read when dereferencing the pointer. - </p></dd><dt><a name="stats.allocated"></a><span class="term"> - - "<code class="mallctl">stats.allocated</code>" - - (<span class="type">size_t</span>) - <code class="literal">r-</code> - [<code class="option">--enable-stats</code>] - </span></dt><dd><p>Total number of bytes allocated by the - application.</p></dd><dt><a name="stats.active"></a><span class="term"> - - "<code class="mallctl">stats.active</code>" - - (<span class="type">size_t</span>) - <code class="literal">r-</code> - [<code class="option">--enable-stats</code>] - </span></dt><dd><p>Total number of bytes in active pages allocated by the - application. This is a multiple of the page size, and greater than or - equal to <a class="link" href="#stats.allocated"> - "<code class="mallctl">stats.allocated</code>" - </a>. - This does not include <a class="link" href="#stats.arenas.i.pdirty"> - - "<code class="mallctl">stats.arenas.<i>.pdirty</code>" - </a> and pages - entirely devoted to allocator metadata.</p></dd><dt><span class="term"> - - "<code class="mallctl">stats.mapped</code>" - - (<span class="type">size_t</span>) - <code class="literal">r-</code> - [<code class="option">--enable-stats</code>] - </span></dt><dd><p>Total number of bytes in chunks mapped on behalf of the - application. This is a multiple of the chunk size, and is at least as - large as <a class="link" href="#stats.active"> - "<code class="mallctl">stats.active</code>" - </a>. This - does not include inactive chunks.</p></dd><dt><span class="term"> - - "<code class="mallctl">stats.chunks.current</code>" - - (<span class="type">size_t</span>) - <code class="literal">r-</code> - [<code class="option">--enable-stats</code>] - </span></dt><dd><p>Total number of chunks actively mapped on behalf of the - application. This does not include inactive chunks. - </p></dd><dt><span class="term"> - - "<code class="mallctl">stats.chunks.total</code>" - - (<span class="type">uint64_t</span>) - <code class="literal">r-</code> - [<code class="option">--enable-stats</code>] - </span></dt><dd><p>Cumulative number of chunks allocated.</p></dd><dt><span class="term"> - - "<code class="mallctl">stats.chunks.high</code>" - - (<span class="type">size_t</span>) - <code class="literal">r-</code> - [<code class="option">--enable-stats</code>] - </span></dt><dd><p>Maximum number of active chunks at any time thus far. - </p></dd><dt><span class="term"> - - "<code class="mallctl">stats.huge.allocated</code>" - - (<span class="type">size_t</span>) - <code class="literal">r-</code> - [<code class="option">--enable-stats</code>] - </span></dt><dd><p>Number of bytes currently allocated by huge objects. - </p></dd><dt><span class="term"> - - "<code class="mallctl">stats.huge.nmalloc</code>" - - (<span class="type">uint64_t</span>) - <code class="literal">r-</code> - [<code class="option">--enable-stats</code>] - </span></dt><dd><p>Cumulative number of huge allocation requests. - </p></dd><dt><span class="term"> - - "<code class="mallctl">stats.huge.ndalloc</code>" - - (<span class="type">uint64_t</span>) - <code class="literal">r-</code> - [<code class="option">--enable-stats</code>] - </span></dt><dd><p>Cumulative number of huge deallocation requests. - </p></dd><dt><span class="term"> - - "<code class="mallctl">stats.arenas.<i>.dss</code>" - - (<span class="type">const char *</span>) - <code class="literal">r-</code> - </span></dt><dd><p>dss (<span class="citerefentry"><span class="refentrytitle">sbrk</span>(2)</span>) allocation precedence as - related to <span class="citerefentry"><span class="refentrytitle">mmap</span>(2)</span> allocation. See <a class="link" href="#opt.dss"> - "<code class="mallctl">opt.dss</code>" - </a> for details. - </p></dd><dt><span class="term"> - - "<code class="mallctl">stats.arenas.<i>.nthreads</code>" - - (<span class="type">unsigned</span>) - <code class="literal">r-</code> - </span></dt><dd><p>Number of threads currently assigned to - arena.</p></dd><dt><span class="term"> - - "<code class="mallctl">stats.arenas.<i>.pactive</code>" - - (<span class="type">size_t</span>) - <code class="literal">r-</code> - </span></dt><dd><p>Number of pages in active runs.</p></dd><dt><a name="stats.arenas.i.pdirty"></a><span class="term"> - - "<code class="mallctl">stats.arenas.<i>.pdirty</code>" - - (<span class="type">size_t</span>) - <code class="literal">r-</code> - </span></dt><dd><p>Number of pages within unused runs that are potentially - dirty, and for which <code class="function">madvise</code>(<em class="parameter"><code>...</code></em>, - <em class="parameter"><code><code class="constant">MADV_DONTNEED</code></code></em>) or - similar has not been called.</p></dd><dt><span class="term"> - - "<code class="mallctl">stats.arenas.<i>.mapped</code>" - - (<span class="type">size_t</span>) - <code class="literal">r-</code> - [<code class="option">--enable-stats</code>] - </span></dt><dd><p>Number of mapped bytes.</p></dd><dt><span class="term"> - - "<code class="mallctl">stats.arenas.<i>.npurge</code>" - - (<span class="type">uint64_t</span>) - <code class="literal">r-</code> - [<code class="option">--enable-stats</code>] - </span></dt><dd><p>Number of dirty page purge sweeps performed. - </p></dd><dt><span class="term"> - - "<code class="mallctl">stats.arenas.<i>.nmadvise</code>" - - (<span class="type">uint64_t</span>) - <code class="literal">r-</code> - [<code class="option">--enable-stats</code>] - </span></dt><dd><p>Number of <code class="function">madvise</code>(<em class="parameter"><code>...</code></em>, - <em class="parameter"><code><code class="constant">MADV_DONTNEED</code></code></em>) or - similar calls made to purge dirty pages.</p></dd><dt><span class="term"> - - "<code class="mallctl">stats.arenas.<i>.npurged</code>" - - (<span class="type">uint64_t</span>) - <code class="literal">r-</code> - [<code class="option">--enable-stats</code>] - </span></dt><dd><p>Number of pages purged.</p></dd><dt><span class="term"> - - "<code class="mallctl">stats.arenas.<i>.small.allocated</code>" - - (<span class="type">size_t</span>) - <code class="literal">r-</code> - [<code class="option">--enable-stats</code>] - </span></dt><dd><p>Number of bytes currently allocated by small objects. - </p></dd><dt><span class="term"> - - "<code class="mallctl">stats.arenas.<i>.small.nmalloc</code>" - - (<span class="type">uint64_t</span>) - <code class="literal">r-</code> - [<code class="option">--enable-stats</code>] - </span></dt><dd><p>Cumulative number of allocation requests served by - small bins.</p></dd><dt><span class="term"> - - "<code class="mallctl">stats.arenas.<i>.small.ndalloc</code>" - - (<span class="type">uint64_t</span>) - <code class="literal">r-</code> - [<code class="option">--enable-stats</code>] - </span></dt><dd><p>Cumulative number of small objects returned to bins. - </p></dd><dt><span class="term"> - - "<code class="mallctl">stats.arenas.<i>.small.nrequests</code>" - - (<span class="type">uint64_t</span>) - <code class="literal">r-</code> - [<code class="option">--enable-stats</code>] - </span></dt><dd><p>Cumulative number of small allocation requests. - </p></dd><dt><span class="term"> - - "<code class="mallctl">stats.arenas.<i>.large.allocated</code>" - - (<span class="type">size_t</span>) - <code class="literal">r-</code> - [<code class="option">--enable-stats</code>] - </span></dt><dd><p>Number of bytes currently allocated by large objects. - </p></dd><dt><span class="term"> - - "<code class="mallctl">stats.arenas.<i>.large.nmalloc</code>" - - (<span class="type">uint64_t</span>) - <code class="literal">r-</code> - [<code class="option">--enable-stats</code>] - </span></dt><dd><p>Cumulative number of large allocation requests served - directly by the arena.</p></dd><dt><span class="term"> - - "<code class="mallctl">stats.arenas.<i>.large.ndalloc</code>" - - (<span class="type">uint64_t</span>) - <code class="literal">r-</code> - [<code class="option">--enable-stats</code>] - </span></dt><dd><p>Cumulative number of large deallocation requests served - directly by the arena.</p></dd><dt><span class="term"> - - "<code class="mallctl">stats.arenas.<i>.large.nrequests</code>" - - (<span class="type">uint64_t</span>) - <code class="literal">r-</code> - [<code class="option">--enable-stats</code>] - </span></dt><dd><p>Cumulative number of large allocation requests. - </p></dd><dt><span class="term"> - - "<code class="mallctl">stats.arenas.<i>.bins.<j>.allocated</code>" - - (<span class="type">size_t</span>) - <code class="literal">r-</code> - [<code class="option">--enable-stats</code>] - </span></dt><dd><p>Current number of bytes allocated by - bin.</p></dd><dt><span class="term"> - - "<code class="mallctl">stats.arenas.<i>.bins.<j>.nmalloc</code>" - - (<span class="type">uint64_t</span>) - <code class="literal">r-</code> - [<code class="option">--enable-stats</code>] - </span></dt><dd><p>Cumulative number of allocations served by bin. - </p></dd><dt><span class="term"> - - "<code class="mallctl">stats.arenas.<i>.bins.<j>.ndalloc</code>" - - (<span class="type">uint64_t</span>) - <code class="literal">r-</code> - [<code class="option">--enable-stats</code>] - </span></dt><dd><p>Cumulative number of allocations returned to bin. - </p></dd><dt><span class="term"> - - "<code class="mallctl">stats.arenas.<i>.bins.<j>.nrequests</code>" - - (<span class="type">uint64_t</span>) - <code class="literal">r-</code> - [<code class="option">--enable-stats</code>] - </span></dt><dd><p>Cumulative number of allocation - requests.</p></dd><dt><span class="term"> - - "<code class="mallctl">stats.arenas.<i>.bins.<j>.nfills</code>" - - (<span class="type">uint64_t</span>) - <code class="literal">r-</code> - [<code class="option">--enable-stats</code> <code class="option">--enable-tcache</code>] - </span></dt><dd><p>Cumulative number of tcache fills.</p></dd><dt><span class="term"> - - "<code class="mallctl">stats.arenas.<i>.bins.<j>.nflushes</code>" - - (<span class="type">uint64_t</span>) - <code class="literal">r-</code> - [<code class="option">--enable-stats</code> <code class="option">--enable-tcache</code>] - </span></dt><dd><p>Cumulative number of tcache flushes.</p></dd><dt><span class="term"> - - "<code class="mallctl">stats.arenas.<i>.bins.<j>.nruns</code>" - - (<span class="type">uint64_t</span>) - <code class="literal">r-</code> - [<code class="option">--enable-stats</code>] - </span></dt><dd><p>Cumulative number of runs created.</p></dd><dt><span class="term"> - - "<code class="mallctl">stats.arenas.<i>.bins.<j>.nreruns</code>" - - (<span class="type">uint64_t</span>) - <code class="literal">r-</code> - [<code class="option">--enable-stats</code>] - </span></dt><dd><p>Cumulative number of times the current run from which - to allocate changed.</p></dd><dt><span class="term"> - - "<code class="mallctl">stats.arenas.<i>.bins.<j>.curruns</code>" - - (<span class="type">size_t</span>) - <code class="literal">r-</code> - [<code class="option">--enable-stats</code>] - </span></dt><dd><p>Current number of runs.</p></dd><dt><span class="term"> - - "<code class="mallctl">stats.arenas.<i>.lruns.<j>.nmalloc</code>" - - (<span class="type">uint64_t</span>) - <code class="literal">r-</code> - [<code class="option">--enable-stats</code>] - </span></dt><dd><p>Cumulative number of allocation requests for this size - class served directly by the arena.</p></dd><dt><span class="term"> - - "<code class="mallctl">stats.arenas.<i>.lruns.<j>.ndalloc</code>" - - (<span class="type">uint64_t</span>) - <code class="literal">r-</code> - [<code class="option">--enable-stats</code>] - </span></dt><dd><p>Cumulative number of deallocation requests for this - size class served directly by the arena.</p></dd><dt><span class="term"> - - "<code class="mallctl">stats.arenas.<i>.lruns.<j>.nrequests</code>" - - (<span class="type">uint64_t</span>) - <code class="literal">r-</code> - [<code class="option">--enable-stats</code>] - </span></dt><dd><p>Cumulative number of allocation requests for this size - class.</p></dd><dt><span class="term"> - - "<code class="mallctl">stats.arenas.<i>.lruns.<j>.curruns</code>" - - (<span class="type">size_t</span>) - <code class="literal">r-</code> - [<code class="option">--enable-stats</code>] - </span></dt><dd><p>Current number of runs for this size class. - </p></dd></dl></div></div><div class="refsect1" title="DEBUGGING MALLOC PROBLEMS"><a name="debugging_malloc_problems"></a><h2>DEBUGGING MALLOC PROBLEMS</h2><p>When debugging, it is a good idea to configure/build jemalloc with - the <code class="option">--enable-debug</code> and <code class="option">--enable-fill</code> - options, and recompile the program with suitable options and symbols for - debugger support. When so configured, jemalloc incorporates a wide variety - of run-time assertions that catch application errors such as double-free, - write-after-free, etc.</p><p>Programs often accidentally depend on “uninitialized” - memory actually being filled with zero bytes. Junk filling - (see the <a class="link" href="#opt.junk"> - "<code class="mallctl">opt.junk</code>" - </a> - option) tends to expose such bugs in the form of obviously incorrect - results and/or coredumps. Conversely, zero - filling (see the <a class="link" href="#opt.zero"> - "<code class="mallctl">opt.zero</code>" - </a> option) eliminates - the symptoms of such bugs. Between these two options, it is usually - possible to quickly detect, diagnose, and eliminate such bugs.</p><p>This implementation does not provide much detail about the problems - it detects, because the performance impact for storing such information - would be prohibitive. However, jemalloc does integrate with the most - excellent <a class="ulink" href="http://valgrind.org/" target="_top">Valgrind</a> tool if the - <code class="option">--enable-valgrind</code> configuration option is enabled.</p></div><div class="refsect1" title="DIAGNOSTIC MESSAGES"><a name="diagnostic_messages"></a><h2>DIAGNOSTIC MESSAGES</h2><p>If any of the memory allocation/deallocation functions detect an - error or warning condition, a message will be printed to file descriptor - <code class="constant">STDERR_FILENO</code>. Errors will result in the process - dumping core. If the <a class="link" href="#opt.abort"> - "<code class="mallctl">opt.abort</code>" - </a> option is set, most - warnings are treated as errors.</p><p>The <code class="varname">malloc_message</code> variable allows the programmer - to override the function which emits the text strings forming the errors - and warnings if for some reason the <code class="constant">STDERR_FILENO</code> file - descriptor is not suitable for this. - <code class="function">malloc_message</code>(<em class="parameter"><code></code></em>) takes the - <em class="parameter"><code>cbopaque</code></em> pointer argument that is - <code class="constant">NULL</code> unless overridden by the arguments in a call to - <code class="function">malloc_stats_print</code>(<em class="parameter"><code></code></em>), followed by a string - pointer. Please note that doing anything which tries to allocate memory in - this function is likely to result in a crash or deadlock.</p><p>All messages are prefixed by - “<code class="computeroutput"><jemalloc>: </code>”.</p></div><div class="refsect1" title="RETURN VALUES"><a name="return_values"></a><h2>RETURN VALUES</h2><div class="refsect2" title="Standard API"><a name="id286954473"></a><h3>Standard API</h3><p>The <code class="function">malloc</code>(<em class="parameter"><code></code></em>) and - <code class="function">calloc</code>(<em class="parameter"><code></code></em>) functions return a pointer to the - allocated memory if successful; otherwise a <code class="constant">NULL</code> - pointer is returned and <code class="varname">errno</code> is set to - <span class="errorname">ENOMEM</span>.</p><p>The <code class="function">posix_memalign</code>(<em class="parameter"><code></code></em>) function - returns the value 0 if successful; otherwise it returns an error value. - The <code class="function">posix_memalign</code>(<em class="parameter"><code></code></em>) function will fail - if: - </p><div class="variablelist"><dl><dt><span class="term"><span class="errorname">EINVAL</span></span></dt><dd><p>The <em class="parameter"><code>alignment</code></em> parameter is - not a power of 2 at least as large as - <code class="code">sizeof(<span class="type">void *</span>)</code>. - </p></dd><dt><span class="term"><span class="errorname">ENOMEM</span></span></dt><dd><p>Memory allocation error.</p></dd></dl></div><p> - </p><p>The <code class="function">aligned_alloc</code>(<em class="parameter"><code></code></em>) function returns - a pointer to the allocated memory if successful; otherwise a - <code class="constant">NULL</code> pointer is returned and - <code class="varname">errno</code> is set. The - <code class="function">aligned_alloc</code>(<em class="parameter"><code></code></em>) function will fail if: - </p><div class="variablelist"><dl><dt><span class="term"><span class="errorname">EINVAL</span></span></dt><dd><p>The <em class="parameter"><code>alignment</code></em> parameter is - not a power of 2. - </p></dd><dt><span class="term"><span class="errorname">ENOMEM</span></span></dt><dd><p>Memory allocation error.</p></dd></dl></div><p> - </p><p>The <code class="function">realloc</code>(<em class="parameter"><code></code></em>) function returns a - pointer, possibly identical to <em class="parameter"><code>ptr</code></em>, to the - allocated memory if successful; otherwise a <code class="constant">NULL</code> - pointer is returned, and <code class="varname">errno</code> is set to - <span class="errorname">ENOMEM</span> if the error was the result of an - allocation failure. The <code class="function">realloc</code>(<em class="parameter"><code></code></em>) - function always leaves the original buffer intact when an error occurs. - </p><p>The <code class="function">free</code>(<em class="parameter"><code></code></em>) function returns no - value.</p></div><div class="refsect2" title="Non-standard API"><a name="id286954690"></a><h3>Non-standard API</h3><p>The <code class="function">malloc_usable_size</code>(<em class="parameter"><code></code></em>) function - returns the usable size of the allocation pointed to by - <em class="parameter"><code>ptr</code></em>. </p><p>The <code class="function">mallctl</code>(<em class="parameter"><code></code></em>), - <code class="function">mallctlnametomib</code>(<em class="parameter"><code></code></em>), and - <code class="function">mallctlbymib</code>(<em class="parameter"><code></code></em>) functions return 0 on - success; otherwise they return an error value. The functions will fail - if: - </p><div class="variablelist"><dl><dt><span class="term"><span class="errorname">EINVAL</span></span></dt><dd><p><em class="parameter"><code>newp</code></em> is not - <code class="constant">NULL</code>, and <em class="parameter"><code>newlen</code></em> is too - large or too small. Alternatively, <em class="parameter"><code>*oldlenp</code></em> - is too large or too small; in this case as much data as possible - are read despite the error.</p></dd><dt><span class="term"><span class="errorname">ENOMEM</span></span></dt><dd><p><em class="parameter"><code>*oldlenp</code></em> is too short to - hold the requested value.</p></dd><dt><span class="term"><span class="errorname">ENOENT</span></span></dt><dd><p><em class="parameter"><code>name</code></em> or - <em class="parameter"><code>mib</code></em> specifies an unknown/invalid - value.</p></dd><dt><span class="term"><span class="errorname">EPERM</span></span></dt><dd><p>Attempt to read or write void value, or attempt to - write read-only value.</p></dd><dt><span class="term"><span class="errorname">EAGAIN</span></span></dt><dd><p>A memory allocation failure - occurred.</p></dd><dt><span class="term"><span class="errorname">EFAULT</span></span></dt><dd><p>An interface with side effects failed in some way - not directly related to <code class="function">mallctl*</code>(<em class="parameter"><code></code></em>) - read/write processing.</p></dd></dl></div><p> - </p></div><div class="refsect2" title="Experimental API"><a name="id286954842"></a><h3>Experimental API</h3><p>The <code class="function">allocm</code>(<em class="parameter"><code></code></em>), - <code class="function">rallocm</code>(<em class="parameter"><code></code></em>), - <code class="function">sallocm</code>(<em class="parameter"><code></code></em>), - <code class="function">dallocm</code>(<em class="parameter"><code></code></em>), and - <code class="function">nallocm</code>(<em class="parameter"><code></code></em>) functions return - <code class="constant">ALLOCM_SUCCESS</code> on success; otherwise they return an - error value. The <code class="function">allocm</code>(<em class="parameter"><code></code></em>), - <code class="function">rallocm</code>(<em class="parameter"><code></code></em>), and - <code class="function">nallocm</code>(<em class="parameter"><code></code></em>) functions will fail if: - </p><div class="variablelist"><dl><dt><span class="term"><span class="errorname">ALLOCM_ERR_OOM</span></span></dt><dd><p>Out of memory. Insufficient contiguous memory was - available to service the allocation request. The - <code class="function">allocm</code>(<em class="parameter"><code></code></em>) function additionally sets - <em class="parameter"><code>*ptr</code></em> to <code class="constant">NULL</code>, whereas - the <code class="function">rallocm</code>(<em class="parameter"><code></code></em>) function leaves - <code class="constant">*ptr</code> unmodified.</p></dd></dl></div><p> - The <code class="function">rallocm</code>(<em class="parameter"><code></code></em>) function will also - fail if: - </p><div class="variablelist"><dl><dt><span class="term"><span class="errorname">ALLOCM_ERR_NOT_MOVED</span></span></dt><dd><p><code class="constant">ALLOCM_NO_MOVE</code> was specified, - but the reallocation request could not be serviced without moving - the object.</p></dd></dl></div><p> - </p></div></div><div class="refsect1" title="ENVIRONMENT"><a name="environment"></a><h2>ENVIRONMENT</h2><p>The following environment variable affects the execution of the - allocation functions: - </p><div class="variablelist"><dl><dt><span class="term"><code class="envar">MALLOC_CONF</code></span></dt><dd><p>If the environment variable - <code class="envar">MALLOC_CONF</code> is set, the characters it contains - will be interpreted as options.</p></dd></dl></div><p> - </p></div><div class="refsect1" title="EXAMPLES"><a name="examples"></a><h2>EXAMPLES</h2><p>To dump core whenever a problem occurs: - </p><pre class="screen">ln -s 'abort:true' /etc/malloc.conf</pre><p> - </p><p>To specify in the source a chunk size that is 16 MiB: - </p><pre class="programlisting"> -malloc_conf = "lg_chunk:24";</pre></div><div class="refsect1" title="SEE ALSO"><a name="see_also"></a><h2>SEE ALSO</h2><p><span class="citerefentry"><span class="refentrytitle">madvise</span>(2)</span>, - <span class="citerefentry"><span class="refentrytitle">mmap</span>(2)</span>, - <span class="citerefentry"><span class="refentrytitle">sbrk</span>(2)</span>, - <span class="citerefentry"><span class="refentrytitle">utrace</span>(2)</span>, - <span class="citerefentry"><span class="refentrytitle">alloca</span>(3)</span>, - <span class="citerefentry"><span class="refentrytitle">atexit</span>(3)</span>, - <span class="citerefentry"><span class="refentrytitle">getpagesize</span>(3)</span></p></div><div class="refsect1" title="STANDARDS"><a name="standards"></a><h2>STANDARDS</h2><p>The <code class="function">malloc</code>(<em class="parameter"><code></code></em>), - <code class="function">calloc</code>(<em class="parameter"><code></code></em>), - <code class="function">realloc</code>(<em class="parameter"><code></code></em>), and - <code class="function">free</code>(<em class="parameter"><code></code></em>) functions conform to ISO/IEC - 9899:1990 (“ISO C90”).</p><p>The <code class="function">posix_memalign</code>(<em class="parameter"><code></code></em>) function conforms - to IEEE Std 1003.1-2001 (“POSIX.1”).</p></div></div></body></html> diff --git a/extra/jemalloc/doc/jemalloc.xml.in b/extra/jemalloc/doc/jemalloc.xml.in deleted file mode 100644 index 09305801bab..00000000000 --- a/extra/jemalloc/doc/jemalloc.xml.in +++ /dev/null @@ -1,2176 +0,0 @@ -<?xml version='1.0' encoding='UTF-8'?> -<?xml-stylesheet type="text/xsl" - href="http://docbook.sourceforge.net/release/xsl/current/manpages/docbook.xsl"?> -<!DOCTYPE refentry PUBLIC "-//OASIS//DTD DocBook XML V4.4//EN" - "http://www.oasis-open.org/docbook/xml/4.4/docbookx.dtd" [ -]> - -<refentry> - <refentryinfo> - <title>User Manual</title> - <productname>jemalloc</productname> - <releaseinfo role="version">@jemalloc_version@</releaseinfo> - <authorgroup> - <author> - <firstname>Jason</firstname> - <surname>Evans</surname> - <personblurb>Author</personblurb> - </author> - </authorgroup> - </refentryinfo> - <refmeta> - <refentrytitle>JEMALLOC</refentrytitle> - <manvolnum>3</manvolnum> - </refmeta> - <refnamediv> - <refdescriptor>jemalloc</refdescriptor> - <refname>jemalloc</refname> - <!-- Each refname causes a man page file to be created. Only if this were - the system malloc(3) implementation would these files be appropriate. - <refname>malloc</refname> - <refname>calloc</refname> - <refname>posix_memalign</refname> - <refname>aligned_alloc</refname> - <refname>realloc</refname> - <refname>free</refname> - <refname>malloc_usable_size</refname> - <refname>malloc_stats_print</refname> - <refname>mallctl</refname> - <refname>mallctlnametomib</refname> - <refname>mallctlbymib</refname> - <refname>allocm</refname> - <refname>rallocm</refname> - <refname>sallocm</refname> - <refname>dallocm</refname> - <refname>nallocm</refname> - --> - <refpurpose>general purpose memory allocation functions</refpurpose> - </refnamediv> - <refsect1 id="library"> - <title>LIBRARY</title> - <para>This manual describes jemalloc @jemalloc_version@. More information - can be found at the <ulink - url="http://www.canonware.com/jemalloc/">jemalloc website</ulink>.</para> - </refsect1> - <refsynopsisdiv> - <title>SYNOPSIS</title> - <funcsynopsis> - <funcsynopsisinfo>#include <<filename class="headerfile">stdlib.h</filename>> -#include <<filename class="headerfile">jemalloc/jemalloc.h</filename>></funcsynopsisinfo> - <refsect2> - <title>Standard API</title> - <funcprototype> - <funcdef>void *<function>malloc</function></funcdef> - <paramdef>size_t <parameter>size</parameter></paramdef> - </funcprototype> - <funcprototype> - <funcdef>void *<function>calloc</function></funcdef> - <paramdef>size_t <parameter>number</parameter></paramdef> - <paramdef>size_t <parameter>size</parameter></paramdef> - </funcprototype> - <funcprototype> - <funcdef>int <function>posix_memalign</function></funcdef> - <paramdef>void **<parameter>ptr</parameter></paramdef> - <paramdef>size_t <parameter>alignment</parameter></paramdef> - <paramdef>size_t <parameter>size</parameter></paramdef> - </funcprototype> - <funcprototype> - <funcdef>void *<function>aligned_alloc</function></funcdef> - <paramdef>size_t <parameter>alignment</parameter></paramdef> - <paramdef>size_t <parameter>size</parameter></paramdef> - </funcprototype> - <funcprototype> - <funcdef>void *<function>realloc</function></funcdef> - <paramdef>void *<parameter>ptr</parameter></paramdef> - <paramdef>size_t <parameter>size</parameter></paramdef> - </funcprototype> - <funcprototype> - <funcdef>void <function>free</function></funcdef> - <paramdef>void *<parameter>ptr</parameter></paramdef> - </funcprototype> - </refsect2> - <refsect2> - <title>Non-standard API</title> - <funcprototype> - <funcdef>size_t <function>malloc_usable_size</function></funcdef> - <paramdef>const void *<parameter>ptr</parameter></paramdef> - </funcprototype> - <funcprototype> - <funcdef>void <function>malloc_stats_print</function></funcdef> - <paramdef>void <parameter>(*write_cb)</parameter> - <funcparams>void *, const char *</funcparams> - </paramdef> - <paramdef>void *<parameter>cbopaque</parameter></paramdef> - <paramdef>const char *<parameter>opts</parameter></paramdef> - </funcprototype> - <funcprototype> - <funcdef>int <function>mallctl</function></funcdef> - <paramdef>const char *<parameter>name</parameter></paramdef> - <paramdef>void *<parameter>oldp</parameter></paramdef> - <paramdef>size_t *<parameter>oldlenp</parameter></paramdef> - <paramdef>void *<parameter>newp</parameter></paramdef> - <paramdef>size_t <parameter>newlen</parameter></paramdef> - </funcprototype> - <funcprototype> - <funcdef>int <function>mallctlnametomib</function></funcdef> - <paramdef>const char *<parameter>name</parameter></paramdef> - <paramdef>size_t *<parameter>mibp</parameter></paramdef> - <paramdef>size_t *<parameter>miblenp</parameter></paramdef> - </funcprototype> - <funcprototype> - <funcdef>int <function>mallctlbymib</function></funcdef> - <paramdef>const size_t *<parameter>mib</parameter></paramdef> - <paramdef>size_t <parameter>miblen</parameter></paramdef> - <paramdef>void *<parameter>oldp</parameter></paramdef> - <paramdef>size_t *<parameter>oldlenp</parameter></paramdef> - <paramdef>void *<parameter>newp</parameter></paramdef> - <paramdef>size_t <parameter>newlen</parameter></paramdef> - </funcprototype> - <funcprototype> - <funcdef>void <function>(*malloc_message)</function></funcdef> - <paramdef>void *<parameter>cbopaque</parameter></paramdef> - <paramdef>const char *<parameter>s</parameter></paramdef> - </funcprototype> - <para><type>const char *</type><varname>malloc_conf</varname>;</para> - </refsect2> - <refsect2> - <title>Experimental API</title> - <funcprototype> - <funcdef>int <function>allocm</function></funcdef> - <paramdef>void **<parameter>ptr</parameter></paramdef> - <paramdef>size_t *<parameter>rsize</parameter></paramdef> - <paramdef>size_t <parameter>size</parameter></paramdef> - <paramdef>int <parameter>flags</parameter></paramdef> - </funcprototype> - <funcprototype> - <funcdef>int <function>rallocm</function></funcdef> - <paramdef>void **<parameter>ptr</parameter></paramdef> - <paramdef>size_t *<parameter>rsize</parameter></paramdef> - <paramdef>size_t <parameter>size</parameter></paramdef> - <paramdef>size_t <parameter>extra</parameter></paramdef> - <paramdef>int <parameter>flags</parameter></paramdef> - </funcprototype> - <funcprototype> - <funcdef>int <function>sallocm</function></funcdef> - <paramdef>const void *<parameter>ptr</parameter></paramdef> - <paramdef>size_t *<parameter>rsize</parameter></paramdef> - <paramdef>int <parameter>flags</parameter></paramdef> - </funcprototype> - <funcprototype> - <funcdef>int <function>dallocm</function></funcdef> - <paramdef>void *<parameter>ptr</parameter></paramdef> - <paramdef>int <parameter>flags</parameter></paramdef> - </funcprototype> - <funcprototype> - <funcdef>int <function>nallocm</function></funcdef> - <paramdef>size_t *<parameter>rsize</parameter></paramdef> - <paramdef>size_t <parameter>size</parameter></paramdef> - <paramdef>int <parameter>flags</parameter></paramdef> - </funcprototype> - </refsect2> - </funcsynopsis> - </refsynopsisdiv> - <refsect1 id="description"> - <title>DESCRIPTION</title> - <refsect2> - <title>Standard API</title> - - <para>The <function>malloc<parameter/></function> function allocates - <parameter>size</parameter> bytes of uninitialized memory. The allocated - space is suitably aligned (after possible pointer coercion) for storage - of any type of object.</para> - - <para>The <function>calloc<parameter/></function> function allocates - space for <parameter>number</parameter> objects, each - <parameter>size</parameter> bytes in length. The result is identical to - calling <function>malloc<parameter/></function> with an argument of - <parameter>number</parameter> * <parameter>size</parameter>, with the - exception that the allocated memory is explicitly initialized to zero - bytes.</para> - - <para>The <function>posix_memalign<parameter/></function> function - allocates <parameter>size</parameter> bytes of memory such that the - allocation's base address is an even multiple of - <parameter>alignment</parameter>, and returns the allocation in the value - pointed to by <parameter>ptr</parameter>. The requested - <parameter>alignment</parameter> must be a power of 2 at least as large - as <code language="C">sizeof(<type>void *</type>)</code>.</para> - - <para>The <function>aligned_alloc<parameter/></function> function - allocates <parameter>size</parameter> bytes of memory such that the - allocation's base address is an even multiple of - <parameter>alignment</parameter>. The requested - <parameter>alignment</parameter> must be a power of 2. Behavior is - undefined if <parameter>size</parameter> is not an integral multiple of - <parameter>alignment</parameter>.</para> - - <para>The <function>realloc<parameter/></function> function changes the - size of the previously allocated memory referenced by - <parameter>ptr</parameter> to <parameter>size</parameter> bytes. The - contents of the memory are unchanged up to the lesser of the new and old - sizes. If the new size is larger, the contents of the newly allocated - portion of the memory are undefined. Upon success, the memory referenced - by <parameter>ptr</parameter> is freed and a pointer to the newly - allocated memory is returned. Note that - <function>realloc<parameter/></function> may move the memory allocation, - resulting in a different return value than <parameter>ptr</parameter>. - If <parameter>ptr</parameter> is <constant>NULL</constant>, the - <function>realloc<parameter/></function> function behaves identically to - <function>malloc<parameter/></function> for the specified size.</para> - - <para>The <function>free<parameter/></function> function causes the - allocated memory referenced by <parameter>ptr</parameter> to be made - available for future allocations. If <parameter>ptr</parameter> is - <constant>NULL</constant>, no action occurs.</para> - </refsect2> - <refsect2> - <title>Non-standard API</title> - - <para>The <function>malloc_usable_size<parameter/></function> function - returns the usable size of the allocation pointed to by - <parameter>ptr</parameter>. The return value may be larger than the size - that was requested during allocation. The - <function>malloc_usable_size<parameter/></function> function is not a - mechanism for in-place <function>realloc<parameter/></function>; rather - it is provided solely as a tool for introspection purposes. Any - discrepancy between the requested allocation size and the size reported - by <function>malloc_usable_size<parameter/></function> should not be - depended on, since such behavior is entirely implementation-dependent. - </para> - - <para>The <function>malloc_stats_print<parameter/></function> function - writes human-readable summary statistics via the - <parameter>write_cb</parameter> callback function pointer and - <parameter>cbopaque</parameter> data passed to - <parameter>write_cb</parameter>, or - <function>malloc_message<parameter/></function> if - <parameter>write_cb</parameter> is <constant>NULL</constant>. This - function can be called repeatedly. General information that never - changes during execution can be omitted by specifying "g" as a character - within the <parameter>opts</parameter> string. Note that - <function>malloc_message<parameter/></function> uses the - <function>mallctl*<parameter/></function> functions internally, so - inconsistent statistics can be reported if multiple threads use these - functions simultaneously. If <option>--enable-stats</option> is - specified during configuration, “m” and “a” can - be specified to omit merged arena and per arena statistics, respectively; - “b” and “l” can be specified to omit per size - class statistics for bins and large objects, respectively. Unrecognized - characters are silently ignored. Note that thread caching may prevent - some statistics from being completely up to date, since extra locking - would be required to merge counters that track thread cache operations. - </para> - - <para>The <function>mallctl<parameter/></function> function provides a - general interface for introspecting the memory allocator, as well as - setting modifiable parameters and triggering actions. The - period-separated <parameter>name</parameter> argument specifies a - location in a tree-structured namespace; see the <xref - linkend="mallctl_namespace" xrefstyle="template:%t"/> section for - documentation on the tree contents. To read a value, pass a pointer via - <parameter>oldp</parameter> to adequate space to contain the value, and a - pointer to its length via <parameter>oldlenp</parameter>; otherwise pass - <constant>NULL</constant> and <constant>NULL</constant>. Similarly, to - write a value, pass a pointer to the value via - <parameter>newp</parameter>, and its length via - <parameter>newlen</parameter>; otherwise pass <constant>NULL</constant> - and <constant>0</constant>.</para> - - <para>The <function>mallctlnametomib<parameter/></function> function - provides a way to avoid repeated name lookups for applications that - repeatedly query the same portion of the namespace, by translating a name - to a “Management Information Base” (MIB) that can be passed - repeatedly to <function>mallctlbymib<parameter/></function>. Upon - successful return from <function>mallctlnametomib<parameter/></function>, - <parameter>mibp</parameter> contains an array of - <parameter>*miblenp</parameter> integers, where - <parameter>*miblenp</parameter> is the lesser of the number of components - in <parameter>name</parameter> and the input value of - <parameter>*miblenp</parameter>. Thus it is possible to pass a - <parameter>*miblenp</parameter> that is smaller than the number of - period-separated name components, which results in a partial MIB that can - be used as the basis for constructing a complete MIB. For name - components that are integers (e.g. the 2 in - <link - linkend="arenas.bin.i.size"><mallctl>arenas.bin.2.size</mallctl></link>), - the corresponding MIB component will always be that integer. Therefore, - it is legitimate to construct code like the following: <programlisting - language="C"><![CDATA[ -unsigned nbins, i; - -int mib[4]; -size_t len, miblen; - -len = sizeof(nbins); -mallctl("arenas.nbins", &nbins, &len, NULL, 0); - -miblen = 4; -mallnametomib("arenas.bin.0.size", mib, &miblen); -for (i = 0; i < nbins; i++) { - size_t bin_size; - - mib[2] = i; - len = sizeof(bin_size); - mallctlbymib(mib, miblen, &bin_size, &len, NULL, 0); - /* Do something with bin_size... */ -}]]></programlisting></para> - </refsect2> - <refsect2> - <title>Experimental API</title> - <para>The experimental API is subject to change or removal without regard - for backward compatibility. If <option>--disable-experimental</option> - is specified during configuration, the experimental API is - omitted.</para> - - <para>The <function>allocm<parameter/></function>, - <function>rallocm<parameter/></function>, - <function>sallocm<parameter/></function>, - <function>dallocm<parameter/></function>, and - <function>nallocm<parameter/></function> functions all have a - <parameter>flags</parameter> argument that can be used to specify - options. The functions only check the options that are contextually - relevant. Use bitwise or (<code language="C">|</code>) operations to - specify one or more of the following: - <variablelist> - <varlistentry> - <term><constant>ALLOCM_LG_ALIGN(<parameter>la</parameter>) - </constant></term> - - <listitem><para>Align the memory allocation to start at an address - that is a multiple of <code language="C">(1 << - <parameter>la</parameter>)</code>. This macro does not validate - that <parameter>la</parameter> is within the valid - range.</para></listitem> - </varlistentry> - <varlistentry> - <term><constant>ALLOCM_ALIGN(<parameter>a</parameter>) - </constant></term> - - <listitem><para>Align the memory allocation to start at an address - that is a multiple of <parameter>a</parameter>, where - <parameter>a</parameter> is a power of two. This macro does not - validate that <parameter>a</parameter> is a power of 2. - </para></listitem> - </varlistentry> - <varlistentry> - <term><constant>ALLOCM_ZERO</constant></term> - - <listitem><para>Initialize newly allocated memory to contain zero - bytes. In the growing reallocation case, the real size prior to - reallocation defines the boundary between untouched bytes and those - that are initialized to contain zero bytes. If this option is - absent, newly allocated memory is uninitialized.</para></listitem> - </varlistentry> - <varlistentry> - <term><constant>ALLOCM_NO_MOVE</constant></term> - - <listitem><para>For reallocation, fail rather than moving the - object. This constraint can apply to both growth and - shrinkage.</para></listitem> - </varlistentry> - <varlistentry> - <term><constant>ALLOCM_ARENA(<parameter>a</parameter>) - </constant></term> - - <listitem><para>Use the arena specified by the index - <parameter>a</parameter>. This macro does not validate that - <parameter>a</parameter> specifies an arena in the valid - range.</para></listitem> - </varlistentry> - </variablelist> - </para> - - <para>The <function>allocm<parameter/></function> function allocates at - least <parameter>size</parameter> bytes of memory, sets - <parameter>*ptr</parameter> to the base address of the allocation, and - sets <parameter>*rsize</parameter> to the real size of the allocation if - <parameter>rsize</parameter> is not <constant>NULL</constant>. Behavior - is undefined if <parameter>size</parameter> is - <constant>0</constant>.</para> - - <para>The <function>rallocm<parameter/></function> function resizes the - allocation at <parameter>*ptr</parameter> to be at least - <parameter>size</parameter> bytes, sets <parameter>*ptr</parameter> to - the base address of the allocation if it moved, and sets - <parameter>*rsize</parameter> to the real size of the allocation if - <parameter>rsize</parameter> is not <constant>NULL</constant>. If - <parameter>extra</parameter> is non-zero, an attempt is made to resize - the allocation to be at least <code - language="C"><parameter>size</parameter> + - <parameter>extra</parameter>)</code> bytes, though inability to allocate - the extra byte(s) will not by itself result in failure. Behavior is - undefined if <parameter>size</parameter> is <constant>0</constant>, or if - <code language="C">(<parameter>size</parameter> + - <parameter>extra</parameter> > - <constant>SIZE_T_MAX</constant>)</code>.</para> - - <para>The <function>sallocm<parameter/></function> function sets - <parameter>*rsize</parameter> to the real size of the allocation.</para> - - <para>The <function>dallocm<parameter/></function> function causes the - memory referenced by <parameter>ptr</parameter> to be made available for - future allocations.</para> - - <para>The <function>nallocm<parameter/></function> function allocates no - memory, but it performs the same size computation as the - <function>allocm<parameter/></function> function, and if - <parameter>rsize</parameter> is not <constant>NULL</constant> it sets - <parameter>*rsize</parameter> to the real size of the allocation that - would result from the equivalent <function>allocm<parameter/></function> - function call. Behavior is undefined if - <parameter>size</parameter> is <constant>0</constant>.</para> - </refsect2> - </refsect1> - <refsect1 id="tuning"> - <title>TUNING</title> - <para>Once, when the first call is made to one of the memory allocation - routines, the allocator initializes its internals based in part on various - options that can be specified at compile- or run-time.</para> - - <para>The string pointed to by the global variable - <varname>malloc_conf</varname>, the “name” of the file - referenced by the symbolic link named <filename - class="symlink">/etc/malloc.conf</filename>, and the value of the - environment variable <envar>MALLOC_CONF</envar>, will be interpreted, in - that order, from left to right as options.</para> - - <para>An options string is a comma-separated list of option:value pairs. - There is one key corresponding to each <link - linkend="opt.abort"><mallctl>opt.*</mallctl></link> mallctl (see the <xref - linkend="mallctl_namespace" xrefstyle="template:%t"/> section for options - documentation). For example, <literal>abort:true,narenas:1</literal> sets - the <link linkend="opt.abort"><mallctl>opt.abort</mallctl></link> and <link - linkend="opt.narenas"><mallctl>opt.narenas</mallctl></link> options. Some - options have boolean values (true/false), others have integer values (base - 8, 10, or 16, depending on prefix), and yet others have raw string - values.</para> - </refsect1> - <refsect1 id="implementation_notes"> - <title>IMPLEMENTATION NOTES</title> - <para>Traditionally, allocators have used - <citerefentry><refentrytitle>sbrk</refentrytitle> - <manvolnum>2</manvolnum></citerefentry> to obtain memory, which is - suboptimal for several reasons, including race conditions, increased - fragmentation, and artificial limitations on maximum usable memory. If - <option>--enable-dss</option> is specified during configuration, this - allocator uses both <citerefentry><refentrytitle>mmap</refentrytitle> - <manvolnum>2</manvolnum></citerefentry> and - <citerefentry><refentrytitle>sbrk</refentrytitle> - <manvolnum>2</manvolnum></citerefentry>, in that order of preference; - otherwise only <citerefentry><refentrytitle>mmap</refentrytitle> - <manvolnum>2</manvolnum></citerefentry> is used.</para> - - <para>This allocator uses multiple arenas in order to reduce lock - contention for threaded programs on multi-processor systems. This works - well with regard to threading scalability, but incurs some costs. There is - a small fixed per-arena overhead, and additionally, arenas manage memory - completely independently of each other, which means a small fixed increase - in overall memory fragmentation. These overheads are not generally an - issue, given the number of arenas normally used. Note that using - substantially more arenas than the default is not likely to improve - performance, mainly due to reduced cache performance. However, it may make - sense to reduce the number of arenas if an application does not make much - use of the allocation functions.</para> - - <para>In addition to multiple arenas, unless - <option>--disable-tcache</option> is specified during configuration, this - allocator supports thread-specific caching for small and large objects, in - order to make it possible to completely avoid synchronization for most - allocation requests. Such caching allows very fast allocation in the - common case, but it increases memory usage and fragmentation, since a - bounded number of objects can remain allocated in each thread cache.</para> - - <para>Memory is conceptually broken into equal-sized chunks, where the - chunk size is a power of two that is greater than the page size. Chunks - are always aligned to multiples of the chunk size. This alignment makes it - possible to find metadata for user objects very quickly.</para> - - <para>User objects are broken into three categories according to size: - small, large, and huge. Small objects are smaller than one page. Large - objects are smaller than the chunk size. Huge objects are a multiple of - the chunk size. Small and large objects are managed by arenas; huge - objects are managed separately in a single data structure that is shared by - all threads. Huge objects are used by applications infrequently enough - that this single data structure is not a scalability issue.</para> - - <para>Each chunk that is managed by an arena tracks its contents as runs of - contiguous pages (unused, backing a set of small objects, or backing one - large object). The combination of chunk alignment and chunk page maps - makes it possible to determine all metadata regarding small and large - allocations in constant time.</para> - - <para>Small objects are managed in groups by page runs. Each run maintains - a frontier and free list to track which regions are in use. Allocation - requests that are no more than half the quantum (8 or 16, depending on - architecture) are rounded up to the nearest power of two that is at least - <code language="C">sizeof(<type>double</type>)</code>. All other small - object size classes are multiples of the quantum, spaced such that internal - fragmentation is limited to approximately 25% for all but the smallest size - classes. Allocation requests that are larger than the maximum small size - class, but small enough to fit in an arena-managed chunk (see the <link - linkend="opt.lg_chunk"><mallctl>opt.lg_chunk</mallctl></link> option), are - rounded up to the nearest run size. Allocation requests that are too large - to fit in an arena-managed chunk are rounded up to the nearest multiple of - the chunk size.</para> - - <para>Allocations are packed tightly together, which can be an issue for - multi-threaded applications. If you need to assure that allocations do not - suffer from cacheline sharing, round your allocation requests up to the - nearest multiple of the cacheline size, or specify cacheline alignment when - allocating.</para> - - <para>Assuming 4 MiB chunks, 4 KiB pages, and a 16-byte quantum on a 64-bit - system, the size classes in each category are as shown in <xref - linkend="size_classes" xrefstyle="template:Table %n"/>.</para> - - <table xml:id="size_classes" frame="all"> - <title>Size classes</title> - <tgroup cols="3" colsep="1" rowsep="1"> - <colspec colname="c1" align="left"/> - <colspec colname="c2" align="right"/> - <colspec colname="c3" align="left"/> - <thead> - <row> - <entry>Category</entry> - <entry>Spacing</entry> - <entry>Size</entry> - </row> - </thead> - <tbody> - <row> - <entry morerows="6">Small</entry> - <entry>lg</entry> - <entry>[8]</entry> - </row> - <row> - <entry>16</entry> - <entry>[16, 32, 48, ..., 128]</entry> - </row> - <row> - <entry>32</entry> - <entry>[160, 192, 224, 256]</entry> - </row> - <row> - <entry>64</entry> - <entry>[320, 384, 448, 512]</entry> - </row> - <row> - <entry>128</entry> - <entry>[640, 768, 896, 1024]</entry> - </row> - <row> - <entry>256</entry> - <entry>[1280, 1536, 1792, 2048]</entry> - </row> - <row> - <entry>512</entry> - <entry>[2560, 3072, 3584]</entry> - </row> - <row> - <entry>Large</entry> - <entry>4 KiB</entry> - <entry>[4 KiB, 8 KiB, 12 KiB, ..., 4072 KiB]</entry> - </row> - <row> - <entry>Huge</entry> - <entry>4 MiB</entry> - <entry>[4 MiB, 8 MiB, 12 MiB, ...]</entry> - </row> - </tbody> - </tgroup> - </table> - </refsect1> - <refsect1 id="mallctl_namespace"> - <title>MALLCTL NAMESPACE</title> - <para>The following names are defined in the namespace accessible via the - <function>mallctl*<parameter/></function> functions. Value types are - specified in parentheses, their readable/writable statuses are encoded as - <literal>rw</literal>, <literal>r-</literal>, <literal>-w</literal>, or - <literal>--</literal>, and required build configuration flags follow, if - any. A name element encoded as <literal><i></literal> or - <literal><j></literal> indicates an integer component, where the - integer varies from 0 to some upper value that must be determined via - introspection. In the case of <mallctl>stats.arenas.<i>.*</mallctl>, - <literal><i></literal> equal to <link - linkend="arenas.narenas"><mallctl>arenas.narenas</mallctl></link> can be - used to access the summation of statistics from all arenas. Take special - note of the <link linkend="epoch"><mallctl>epoch</mallctl></link> mallctl, - which controls refreshing of cached dynamic statistics.</para> - - <variablelist> - <varlistentry> - <term> - <mallctl>version</mallctl> - (<type>const char *</type>) - <literal>r-</literal> - </term> - <listitem><para>Return the jemalloc version string.</para></listitem> - </varlistentry> - - <varlistentry id="epoch"> - <term> - <mallctl>epoch</mallctl> - (<type>uint64_t</type>) - <literal>rw</literal> - </term> - <listitem><para>If a value is passed in, refresh the data from which - the <function>mallctl*<parameter/></function> functions report values, - and increment the epoch. Return the current epoch. This is useful for - detecting whether another thread caused a refresh.</para></listitem> - </varlistentry> - - <varlistentry> - <term> - <mallctl>config.debug</mallctl> - (<type>bool</type>) - <literal>r-</literal> - </term> - <listitem><para><option>--enable-debug</option> was specified during - build configuration.</para></listitem> - </varlistentry> - - <varlistentry> - <term> - <mallctl>config.dss</mallctl> - (<type>bool</type>) - <literal>r-</literal> - </term> - <listitem><para><option>--enable-dss</option> was specified during - build configuration.</para></listitem> - </varlistentry> - - <varlistentry> - <term> - <mallctl>config.fill</mallctl> - (<type>bool</type>) - <literal>r-</literal> - </term> - <listitem><para><option>--enable-fill</option> was specified during - build configuration.</para></listitem> - </varlistentry> - - <varlistentry> - <term> - <mallctl>config.lazy_lock</mallctl> - (<type>bool</type>) - <literal>r-</literal> - </term> - <listitem><para><option>--enable-lazy-lock</option> was specified - during build configuration.</para></listitem> - </varlistentry> - - <varlistentry> - <term> - <mallctl>config.mremap</mallctl> - (<type>bool</type>) - <literal>r-</literal> - </term> - <listitem><para><option>--enable-mremap</option> was specified during - build configuration.</para></listitem> - </varlistentry> - - <varlistentry> - <term> - <mallctl>config.munmap</mallctl> - (<type>bool</type>) - <literal>r-</literal> - </term> - <listitem><para><option>--enable-munmap</option> was specified during - build configuration.</para></listitem> - </varlistentry> - - <varlistentry> - <term> - <mallctl>config.prof</mallctl> - (<type>bool</type>) - <literal>r-</literal> - </term> - <listitem><para><option>--enable-prof</option> was specified during - build configuration.</para></listitem> - </varlistentry> - - <varlistentry> - <term> - <mallctl>config.prof_libgcc</mallctl> - (<type>bool</type>) - <literal>r-</literal> - </term> - <listitem><para><option>--disable-prof-libgcc</option> was not - specified during build configuration.</para></listitem> - </varlistentry> - - <varlistentry> - <term> - <mallctl>config.prof_libunwind</mallctl> - (<type>bool</type>) - <literal>r-</literal> - </term> - <listitem><para><option>--enable-prof-libunwind</option> was specified - during build configuration.</para></listitem> - </varlistentry> - - <varlistentry> - <term> - <mallctl>config.stats</mallctl> - (<type>bool</type>) - <literal>r-</literal> - </term> - <listitem><para><option>--enable-stats</option> was specified during - build configuration.</para></listitem> - </varlistentry> - - <varlistentry> - <term> - <mallctl>config.tcache</mallctl> - (<type>bool</type>) - <literal>r-</literal> - </term> - <listitem><para><option>--disable-tcache</option> was not specified - during build configuration.</para></listitem> - </varlistentry> - - <varlistentry> - <term> - <mallctl>config.tls</mallctl> - (<type>bool</type>) - <literal>r-</literal> - </term> - <listitem><para><option>--disable-tls</option> was not specified during - build configuration.</para></listitem> - </varlistentry> - - <varlistentry> - <term> - <mallctl>config.utrace</mallctl> - (<type>bool</type>) - <literal>r-</literal> - </term> - <listitem><para><option>--enable-utrace</option> was specified during - build configuration.</para></listitem> - </varlistentry> - - <varlistentry> - <term> - <mallctl>config.valgrind</mallctl> - (<type>bool</type>) - <literal>r-</literal> - </term> - <listitem><para><option>--enable-valgrind</option> was specified during - build configuration.</para></listitem> - </varlistentry> - - <varlistentry> - <term> - <mallctl>config.xmalloc</mallctl> - (<type>bool</type>) - <literal>r-</literal> - </term> - <listitem><para><option>--enable-xmalloc</option> was specified during - build configuration.</para></listitem> - </varlistentry> - - <varlistentry id="opt.abort"> - <term> - <mallctl>opt.abort</mallctl> - (<type>bool</type>) - <literal>r-</literal> - </term> - <listitem><para>Abort-on-warning enabled/disabled. If true, most - warnings are fatal. The process will call - <citerefentry><refentrytitle>abort</refentrytitle> - <manvolnum>3</manvolnum></citerefentry> in these cases. This option is - disabled by default unless <option>--enable-debug</option> is - specified during configuration, in which case it is enabled by default. - </para></listitem> - </varlistentry> - - <varlistentry id="opt.lg_chunk"> - <term> - <mallctl>opt.lg_chunk</mallctl> - (<type>size_t</type>) - <literal>r-</literal> - </term> - <listitem><para>Virtual memory chunk size (log base 2). If a chunk - size outside the supported size range is specified, the size is - silently clipped to the minimum/maximum supported size. The default - chunk size is 4 MiB (2^22). - </para></listitem> - </varlistentry> - - <varlistentry id="opt.dss"> - <term> - <mallctl>opt.dss</mallctl> - (<type>const char *</type>) - <literal>r-</literal> - </term> - <listitem><para>dss (<citerefentry><refentrytitle>sbrk</refentrytitle> - <manvolnum>2</manvolnum></citerefentry>) allocation precedence as - related to <citerefentry><refentrytitle>mmap</refentrytitle> - <manvolnum>2</manvolnum></citerefentry> allocation. The following - settings are supported: “disabled”, “primary”, - and “secondary” (default).</para></listitem> - </varlistentry> - - <varlistentry id="opt.narenas"> - <term> - <mallctl>opt.narenas</mallctl> - (<type>size_t</type>) - <literal>r-</literal> - </term> - <listitem><para>Maximum number of arenas to use for automatic - multiplexing of threads and arenas. The default is four times the - number of CPUs, or one if there is a single CPU.</para></listitem> - </varlistentry> - - <varlistentry id="opt.lg_dirty_mult"> - <term> - <mallctl>opt.lg_dirty_mult</mallctl> - (<type>ssize_t</type>) - <literal>r-</literal> - </term> - <listitem><para>Per-arena minimum ratio (log base 2) of active to dirty - pages. Some dirty unused pages may be allowed to accumulate, within - the limit set by the ratio (or one chunk worth of dirty pages, - whichever is greater), before informing the kernel about some of those - pages via <citerefentry><refentrytitle>madvise</refentrytitle> - <manvolnum>2</manvolnum></citerefentry> or a similar system call. This - provides the kernel with sufficient information to recycle dirty pages - if physical memory becomes scarce and the pages remain unused. The - default minimum ratio is 8:1 (2^3:1); an option value of -1 will - disable dirty page purging.</para></listitem> - </varlistentry> - - <varlistentry id="opt.stats_print"> - <term> - <mallctl>opt.stats_print</mallctl> - (<type>bool</type>) - <literal>r-</literal> - </term> - <listitem><para>Enable/disable statistics printing at exit. If - enabled, the <function>malloc_stats_print<parameter/></function> - function is called at program exit via an - <citerefentry><refentrytitle>atexit</refentrytitle> - <manvolnum>3</manvolnum></citerefentry> function. If - <option>--enable-stats</option> is specified during configuration, this - has the potential to cause deadlock for a multi-threaded process that - exits while one or more threads are executing in the memory allocation - functions. Therefore, this option should only be used with care; it is - primarily intended as a performance tuning aid during application - development. This option is disabled by default.</para></listitem> - </varlistentry> - - <varlistentry id="opt.junk"> - <term> - <mallctl>opt.junk</mallctl> - (<type>bool</type>) - <literal>r-</literal> - [<option>--enable-fill</option>] - </term> - <listitem><para>Junk filling enabled/disabled. If enabled, each byte - of uninitialized allocated memory will be initialized to - <literal>0xa5</literal>. All deallocated memory will be initialized to - <literal>0x5a</literal>. This is intended for debugging and will - impact performance negatively. This option is disabled by default - unless <option>--enable-debug</option> is specified during - configuration, in which case it is enabled by default unless running - inside <ulink - url="http://valgrind.org/">Valgrind</ulink>.</para></listitem> - </varlistentry> - - <varlistentry id="opt.quarantine"> - <term> - <mallctl>opt.quarantine</mallctl> - (<type>size_t</type>) - <literal>r-</literal> - [<option>--enable-fill</option>] - </term> - <listitem><para>Per thread quarantine size in bytes. If non-zero, each - thread maintains a FIFO object quarantine that stores up to the - specified number of bytes of memory. The quarantined memory is not - freed until it is released from quarantine, though it is immediately - junk-filled if the <link - linkend="opt.junk"><mallctl>opt.junk</mallctl></link> option is - enabled. This feature is of particular use in combination with <ulink - url="http://valgrind.org/">Valgrind</ulink>, which can detect attempts - to access quarantined objects. This is intended for debugging and will - impact performance negatively. The default quarantine size is 0 unless - running inside Valgrind, in which case the default is 16 - MiB.</para></listitem> - </varlistentry> - - <varlistentry id="opt.redzone"> - <term> - <mallctl>opt.redzone</mallctl> - (<type>bool</type>) - <literal>r-</literal> - [<option>--enable-fill</option>] - </term> - <listitem><para>Redzones enabled/disabled. If enabled, small - allocations have redzones before and after them. Furthermore, if the - <link linkend="opt.junk"><mallctl>opt.junk</mallctl></link> option is - enabled, the redzones are checked for corruption during deallocation. - However, the primary intended purpose of this feature is to be used in - combination with <ulink url="http://valgrind.org/">Valgrind</ulink>, - which needs redzones in order to do effective buffer overflow/underflow - detection. This option is intended for debugging and will impact - performance negatively. This option is disabled by - default unless running inside Valgrind.</para></listitem> - </varlistentry> - - <varlistentry id="opt.zero"> - <term> - <mallctl>opt.zero</mallctl> - (<type>bool</type>) - <literal>r-</literal> - [<option>--enable-fill</option>] - </term> - <listitem><para>Zero filling enabled/disabled. If enabled, each byte - of uninitialized allocated memory will be initialized to 0. Note that - this initialization only happens once for each byte, so - <function>realloc<parameter/></function> and - <function>rallocm<parameter/></function> calls do not zero memory that - was previously allocated. This is intended for debugging and will - impact performance negatively. This option is disabled by default. - </para></listitem> - </varlistentry> - - <varlistentry id="opt.utrace"> - <term> - <mallctl>opt.utrace</mallctl> - (<type>bool</type>) - <literal>r-</literal> - [<option>--enable-utrace</option>] - </term> - <listitem><para>Allocation tracing based on - <citerefentry><refentrytitle>utrace</refentrytitle> - <manvolnum>2</manvolnum></citerefentry> enabled/disabled. This option - is disabled by default.</para></listitem> - </varlistentry> - - <varlistentry id="opt.valgrind"> - <term> - <mallctl>opt.valgrind</mallctl> - (<type>bool</type>) - <literal>r-</literal> - [<option>--enable-valgrind</option>] - </term> - <listitem><para><ulink url="http://valgrind.org/">Valgrind</ulink> - support enabled/disabled. This option is vestigal because jemalloc - auto-detects whether it is running inside Valgrind. This option is - disabled by default, unless running inside Valgrind.</para></listitem> - </varlistentry> - - <varlistentry id="opt.xmalloc"> - <term> - <mallctl>opt.xmalloc</mallctl> - (<type>bool</type>) - <literal>r-</literal> - [<option>--enable-xmalloc</option>] - </term> - <listitem><para>Abort-on-out-of-memory enabled/disabled. If enabled, - rather than returning failure for any allocation function, display a - diagnostic message on <constant>STDERR_FILENO</constant> and cause the - program to drop core (using - <citerefentry><refentrytitle>abort</refentrytitle> - <manvolnum>3</manvolnum></citerefentry>). If an application is - designed to depend on this behavior, set the option at compile time by - including the following in the source code: - <programlisting language="C"><![CDATA[ -malloc_conf = "xmalloc:true";]]></programlisting> - This option is disabled by default.</para></listitem> - </varlistentry> - - <varlistentry id="opt.tcache"> - <term> - <mallctl>opt.tcache</mallctl> - (<type>bool</type>) - <literal>r-</literal> - [<option>--enable-tcache</option>] - </term> - <listitem><para>Thread-specific caching enabled/disabled. When there - are multiple threads, each thread uses a thread-specific cache for - objects up to a certain size. Thread-specific caching allows many - allocations to be satisfied without performing any thread - synchronization, at the cost of increased memory use. See the - <link - linkend="opt.lg_tcache_max"><mallctl>opt.lg_tcache_max</mallctl></link> - option for related tuning information. This option is enabled by - default unless running inside <ulink - url="http://valgrind.org/">Valgrind</ulink>.</para></listitem> - </varlistentry> - - <varlistentry id="opt.lg_tcache_max"> - <term> - <mallctl>opt.lg_tcache_max</mallctl> - (<type>size_t</type>) - <literal>r-</literal> - [<option>--enable-tcache</option>] - </term> - <listitem><para>Maximum size class (log base 2) to cache in the - thread-specific cache. At a minimum, all small size classes are - cached, and at a maximum all large size classes are cached. The - default maximum is 32 KiB (2^15).</para></listitem> - </varlistentry> - - <varlistentry id="opt.prof"> - <term> - <mallctl>opt.prof</mallctl> - (<type>bool</type>) - <literal>r-</literal> - [<option>--enable-prof</option>] - </term> - <listitem><para>Memory profiling enabled/disabled. If enabled, profile - memory allocation activity. See the <link - linkend="opt.prof_active"><mallctl>opt.prof_active</mallctl></link> - option for on-the-fly activation/deactivation. See the <link - linkend="opt.lg_prof_sample"><mallctl>opt.lg_prof_sample</mallctl></link> - option for probabilistic sampling control. See the <link - linkend="opt.prof_accum"><mallctl>opt.prof_accum</mallctl></link> - option for control of cumulative sample reporting. See the <link - linkend="opt.lg_prof_interval"><mallctl>opt.lg_prof_interval</mallctl></link> - option for information on interval-triggered profile dumping, the <link - linkend="opt.prof_gdump"><mallctl>opt.prof_gdump</mallctl></link> - option for information on high-water-triggered profile dumping, and the - <link linkend="opt.prof_final"><mallctl>opt.prof_final</mallctl></link> - option for final profile dumping. Profile output is compatible with - the included <command>pprof</command> Perl script, which originates - from the <ulink url="http://code.google.com/p/gperftools/">gperftools - package</ulink>.</para></listitem> - </varlistentry> - - <varlistentry id="opt.prof_prefix"> - <term> - <mallctl>opt.prof_prefix</mallctl> - (<type>const char *</type>) - <literal>r-</literal> - [<option>--enable-prof</option>] - </term> - <listitem><para>Filename prefix for profile dumps. If the prefix is - set to the empty string, no automatic dumps will occur; this is - primarily useful for disabling the automatic final heap dump (which - also disables leak reporting, if enabled). The default prefix is - <filename>jeprof</filename>.</para></listitem> - </varlistentry> - - <varlistentry id="opt.prof_active"> - <term> - <mallctl>opt.prof_active</mallctl> - (<type>bool</type>) - <literal>r-</literal> - [<option>--enable-prof</option>] - </term> - <listitem><para>Profiling activated/deactivated. This is a secondary - control mechanism that makes it possible to start the application with - profiling enabled (see the <link - linkend="opt.prof"><mallctl>opt.prof</mallctl></link> option) but - inactive, then toggle profiling at any time during program execution - with the <link - linkend="prof.active"><mallctl>prof.active</mallctl></link> mallctl. - This option is enabled by default.</para></listitem> - </varlistentry> - - <varlistentry id="opt.lg_prof_sample"> - <term> - <mallctl>opt.lg_prof_sample</mallctl> - (<type>ssize_t</type>) - <literal>r-</literal> - [<option>--enable-prof</option>] - </term> - <listitem><para>Average interval (log base 2) between allocation - samples, as measured in bytes of allocation activity. Increasing the - sampling interval decreases profile fidelity, but also decreases the - computational overhead. The default sample interval is 512 KiB (2^19 - B).</para></listitem> - </varlistentry> - - <varlistentry id="opt.prof_accum"> - <term> - <mallctl>opt.prof_accum</mallctl> - (<type>bool</type>) - <literal>r-</literal> - [<option>--enable-prof</option>] - </term> - <listitem><para>Reporting of cumulative object/byte counts in profile - dumps enabled/disabled. If this option is enabled, every unique - backtrace must be stored for the duration of execution. Depending on - the application, this can impose a large memory overhead, and the - cumulative counts are not always of interest. This option is disabled - by default.</para></listitem> - </varlistentry> - - <varlistentry id="opt.lg_prof_interval"> - <term> - <mallctl>opt.lg_prof_interval</mallctl> - (<type>ssize_t</type>) - <literal>r-</literal> - [<option>--enable-prof</option>] - </term> - <listitem><para>Average interval (log base 2) between memory profile - dumps, as measured in bytes of allocation activity. The actual - interval between dumps may be sporadic because decentralized allocation - counters are used to avoid synchronization bottlenecks. Profiles are - dumped to files named according to the pattern - <filename><prefix>.<pid>.<seq>.i<iseq>.heap</filename>, - where <literal><prefix></literal> is controlled by the - <link - linkend="opt.prof_prefix"><mallctl>opt.prof_prefix</mallctl></link> - option. By default, interval-triggered profile dumping is disabled - (encoded as -1). - </para></listitem> - </varlistentry> - - <varlistentry id="opt.prof_gdump"> - <term> - <mallctl>opt.prof_gdump</mallctl> - (<type>bool</type>) - <literal>r-</literal> - [<option>--enable-prof</option>] - </term> - <listitem><para>Trigger a memory profile dump every time the total - virtual memory exceeds the previous maximum. Profiles are dumped to - files named according to the pattern - <filename><prefix>.<pid>.<seq>.u<useq>.heap</filename>, - where <literal><prefix></literal> is controlled by the <link - linkend="opt.prof_prefix"><mallctl>opt.prof_prefix</mallctl></link> - option. This option is disabled by default.</para></listitem> - </varlistentry> - - <varlistentry id="opt.prof_final"> - <term> - <mallctl>opt.prof_final</mallctl> - (<type>bool</type>) - <literal>r-</literal> - [<option>--enable-prof</option>] - </term> - <listitem><para>Use an - <citerefentry><refentrytitle>atexit</refentrytitle> - <manvolnum>3</manvolnum></citerefentry> function to dump final memory - usage to a file named according to the pattern - <filename><prefix>.<pid>.<seq>.f.heap</filename>, - where <literal><prefix></literal> is controlled by the <link - linkend="opt.prof_prefix"><mallctl>opt.prof_prefix</mallctl></link> - option. This option is enabled by default.</para></listitem> - </varlistentry> - - <varlistentry id="opt.prof_leak"> - <term> - <mallctl>opt.prof_leak</mallctl> - (<type>bool</type>) - <literal>r-</literal> - [<option>--enable-prof</option>] - </term> - <listitem><para>Leak reporting enabled/disabled. If enabled, use an - <citerefentry><refentrytitle>atexit</refentrytitle> - <manvolnum>3</manvolnum></citerefentry> function to report memory leaks - detected by allocation sampling. See the - <link linkend="opt.prof"><mallctl>opt.prof</mallctl></link> option for - information on analyzing heap profile output. This option is disabled - by default.</para></listitem> - </varlistentry> - - <varlistentry> - <term> - <mallctl>thread.arena</mallctl> - (<type>unsigned</type>) - <literal>rw</literal> - </term> - <listitem><para>Get or set the arena associated with the calling - thread. If the specified arena was not initialized beforehand (see the - <link - linkend="arenas.initialized"><mallctl>arenas.initialized</mallctl></link> - mallctl), it will be automatically initialized as a side effect of - calling this interface.</para></listitem> - </varlistentry> - - <varlistentry id="thread.allocated"> - <term> - <mallctl>thread.allocated</mallctl> - (<type>uint64_t</type>) - <literal>r-</literal> - [<option>--enable-stats</option>] - </term> - <listitem><para>Get the total number of bytes ever allocated by the - calling thread. This counter has the potential to wrap around; it is - up to the application to appropriately interpret the counter in such - cases.</para></listitem> - </varlistentry> - - <varlistentry> - <term> - <mallctl>thread.allocatedp</mallctl> - (<type>uint64_t *</type>) - <literal>r-</literal> - [<option>--enable-stats</option>] - </term> - <listitem><para>Get a pointer to the the value that is returned by the - <link - linkend="thread.allocated"><mallctl>thread.allocated</mallctl></link> - mallctl. This is useful for avoiding the overhead of repeated - <function>mallctl*<parameter/></function> calls.</para></listitem> - </varlistentry> - - <varlistentry id="thread.deallocated"> - <term> - <mallctl>thread.deallocated</mallctl> - (<type>uint64_t</type>) - <literal>r-</literal> - [<option>--enable-stats</option>] - </term> - <listitem><para>Get the total number of bytes ever deallocated by the - calling thread. This counter has the potential to wrap around; it is - up to the application to appropriately interpret the counter in such - cases.</para></listitem> - </varlistentry> - - <varlistentry> - <term> - <mallctl>thread.deallocatedp</mallctl> - (<type>uint64_t *</type>) - <literal>r-</literal> - [<option>--enable-stats</option>] - </term> - <listitem><para>Get a pointer to the the value that is returned by the - <link - linkend="thread.deallocated"><mallctl>thread.deallocated</mallctl></link> - mallctl. This is useful for avoiding the overhead of repeated - <function>mallctl*<parameter/></function> calls.</para></listitem> - </varlistentry> - - <varlistentry> - <term> - <mallctl>thread.tcache.enabled</mallctl> - (<type>bool</type>) - <literal>rw</literal> - [<option>--enable-tcache</option>] - </term> - <listitem><para>Enable/disable calling thread's tcache. The tcache is - implicitly flushed as a side effect of becoming - disabled (see <link - lenkend="thread.tcache.flush"><mallctl>thread.tcache.flush</mallctl></link>). - </para></listitem> - </varlistentry> - - <varlistentry> - <term> - <mallctl>thread.tcache.flush</mallctl> - (<type>void</type>) - <literal>--</literal> - [<option>--enable-tcache</option>] - </term> - <listitem><para>Flush calling thread's tcache. This interface releases - all cached objects and internal data structures associated with the - calling thread's thread-specific cache. Ordinarily, this interface - need not be called, since automatic periodic incremental garbage - collection occurs, and the thread cache is automatically discarded when - a thread exits. However, garbage collection is triggered by allocation - activity, so it is possible for a thread that stops - allocating/deallocating to retain its cache indefinitely, in which case - the developer may find manual flushing useful.</para></listitem> - </varlistentry> - - <varlistentry id="arena.i.purge"> - <term> - <mallctl>arena.<i>.purge</mallctl> - (<type>unsigned</type>) - <literal>--</literal> - </term> - <listitem><para>Purge unused dirty pages for arena <i>, or for - all arenas if <i> equals <link - linkend="arenas.narenas"><mallctl>arenas.narenas</mallctl></link>. - </para></listitem> - </varlistentry> - - <varlistentry id="arena.i.dss"> - <term> - <mallctl>arena.<i>.dss</mallctl> - (<type>const char *</type>) - <literal>rw</literal> - </term> - <listitem><para>Set the precedence of dss allocation as related to mmap - allocation for arena <i>, or for all arenas if <i> equals - <link - linkend="arenas.narenas"><mallctl>arenas.narenas</mallctl></link>. See - <link linkend="opt.dss"><mallctl>opt.dss</mallctl></link> for supported - settings. - </para></listitem> - </varlistentry> - - <varlistentry id="arenas.narenas"> - <term> - <mallctl>arenas.narenas</mallctl> - (<type>unsigned</type>) - <literal>r-</literal> - </term> - <listitem><para>Current limit on number of arenas.</para></listitem> - </varlistentry> - - <varlistentry id="arenas.initialized"> - <term> - <mallctl>arenas.initialized</mallctl> - (<type>bool *</type>) - <literal>r-</literal> - </term> - <listitem><para>An array of <link - linkend="arenas.narenas"><mallctl>arenas.narenas</mallctl></link> - booleans. Each boolean indicates whether the corresponding arena is - initialized.</para></listitem> - </varlistentry> - - <varlistentry> - <term> - <mallctl>arenas.quantum</mallctl> - (<type>size_t</type>) - <literal>r-</literal> - </term> - <listitem><para>Quantum size.</para></listitem> - </varlistentry> - - <varlistentry> - <term> - <mallctl>arenas.page</mallctl> - (<type>size_t</type>) - <literal>r-</literal> - </term> - <listitem><para>Page size.</para></listitem> - </varlistentry> - - <varlistentry> - <term> - <mallctl>arenas.tcache_max</mallctl> - (<type>size_t</type>) - <literal>r-</literal> - [<option>--enable-tcache</option>] - </term> - <listitem><para>Maximum thread-cached size class.</para></listitem> - </varlistentry> - - <varlistentry> - <term> - <mallctl>arenas.nbins</mallctl> - (<type>unsigned</type>) - <literal>r-</literal> - </term> - <listitem><para>Number of bin size classes.</para></listitem> - </varlistentry> - - <varlistentry> - <term> - <mallctl>arenas.nhbins</mallctl> - (<type>unsigned</type>) - <literal>r-</literal> - [<option>--enable-tcache</option>] - </term> - <listitem><para>Total number of thread cache bin size - classes.</para></listitem> - </varlistentry> - - <varlistentry id="arenas.bin.i.size"> - <term> - <mallctl>arenas.bin.<i>.size</mallctl> - (<type>size_t</type>) - <literal>r-</literal> - </term> - <listitem><para>Maximum size supported by size class.</para></listitem> - </varlistentry> - - <varlistentry> - <term> - <mallctl>arenas.bin.<i>.nregs</mallctl> - (<type>uint32_t</type>) - <literal>r-</literal> - </term> - <listitem><para>Number of regions per page run.</para></listitem> - </varlistentry> - - <varlistentry> - <term> - <mallctl>arenas.bin.<i>.run_size</mallctl> - (<type>size_t</type>) - <literal>r-</literal> - </term> - <listitem><para>Number of bytes per page run.</para></listitem> - </varlistentry> - - <varlistentry> - <term> - <mallctl>arenas.nlruns</mallctl> - (<type>size_t</type>) - <literal>r-</literal> - </term> - <listitem><para>Total number of large size classes.</para></listitem> - </varlistentry> - - <varlistentry> - <term> - <mallctl>arenas.lrun.<i>.size</mallctl> - (<type>size_t</type>) - <literal>r-</literal> - </term> - <listitem><para>Maximum size supported by this large size - class.</para></listitem> - </varlistentry> - - <varlistentry> - <term> - <mallctl>arenas.purge</mallctl> - (<type>unsigned</type>) - <literal>-w</literal> - </term> - <listitem><para>Purge unused dirty pages for the specified arena, or - for all arenas if none is specified.</para></listitem> - </varlistentry> - - <varlistentry> - <term> - <mallctl>arenas.extend</mallctl> - (<type>unsigned</type>) - <literal>r-</literal> - </term> - <listitem><para>Extend the array of arenas by appending a new arena, - and returning the new arena index.</para></listitem> - </varlistentry> - - <varlistentry id="prof.active"> - <term> - <mallctl>prof.active</mallctl> - (<type>bool</type>) - <literal>rw</literal> - [<option>--enable-prof</option>] - </term> - <listitem><para>Control whether sampling is currently active. See the - <link - linkend="opt.prof_active"><mallctl>opt.prof_active</mallctl></link> - option for additional information. - </para></listitem> - </varlistentry> - - <varlistentry> - <term> - <mallctl>prof.dump</mallctl> - (<type>const char *</type>) - <literal>-w</literal> - [<option>--enable-prof</option>] - </term> - <listitem><para>Dump a memory profile to the specified file, or if NULL - is specified, to a file according to the pattern - <filename><prefix>.<pid>.<seq>.m<mseq>.heap</filename>, - where <literal><prefix></literal> is controlled by the - <link - linkend="opt.prof_prefix"><mallctl>opt.prof_prefix</mallctl></link> - option.</para></listitem> - </varlistentry> - - <varlistentry> - <term> - <mallctl>prof.interval</mallctl> - (<type>uint64_t</type>) - <literal>r-</literal> - [<option>--enable-prof</option>] - </term> - <listitem><para>Average number of bytes allocated between - inverval-based profile dumps. See the - <link - linkend="opt.lg_prof_interval"><mallctl>opt.lg_prof_interval</mallctl></link> - option for additional information.</para></listitem> - </varlistentry> - - <varlistentry id="stats.cactive"> - <term> - <mallctl>stats.cactive</mallctl> - (<type>size_t *</type>) - <literal>r-</literal> - [<option>--enable-stats</option>] - </term> - <listitem><para>Pointer to a counter that contains an approximate count - of the current number of bytes in active pages. The estimate may be - high, but never low, because each arena rounds up to the nearest - multiple of the chunk size when computing its contribution to the - counter. Note that the <link - linkend="epoch"><mallctl>epoch</mallctl></link> mallctl has no bearing - on this counter. Furthermore, counter consistency is maintained via - atomic operations, so it is necessary to use an atomic operation in - order to guarantee a consistent read when dereferencing the pointer. - </para></listitem> - </varlistentry> - - <varlistentry id="stats.allocated"> - <term> - <mallctl>stats.allocated</mallctl> - (<type>size_t</type>) - <literal>r-</literal> - [<option>--enable-stats</option>] - </term> - <listitem><para>Total number of bytes allocated by the - application.</para></listitem> - </varlistentry> - - <varlistentry id="stats.active"> - <term> - <mallctl>stats.active</mallctl> - (<type>size_t</type>) - <literal>r-</literal> - [<option>--enable-stats</option>] - </term> - <listitem><para>Total number of bytes in active pages allocated by the - application. This is a multiple of the page size, and greater than or - equal to <link - linkend="stats.allocated"><mallctl>stats.allocated</mallctl></link>. - This does not include <link linkend="stats.arenas.i.pdirty"> - <mallctl>stats.arenas.<i>.pdirty</mallctl></link> and pages - entirely devoted to allocator metadata.</para></listitem> - </varlistentry> - - <varlistentry> - <term> - <mallctl>stats.mapped</mallctl> - (<type>size_t</type>) - <literal>r-</literal> - [<option>--enable-stats</option>] - </term> - <listitem><para>Total number of bytes in chunks mapped on behalf of the - application. This is a multiple of the chunk size, and is at least as - large as <link - linkend="stats.active"><mallctl>stats.active</mallctl></link>. This - does not include inactive chunks.</para></listitem> - </varlistentry> - - <varlistentry> - <term> - <mallctl>stats.chunks.current</mallctl> - (<type>size_t</type>) - <literal>r-</literal> - [<option>--enable-stats</option>] - </term> - <listitem><para>Total number of chunks actively mapped on behalf of the - application. This does not include inactive chunks. - </para></listitem> - </varlistentry> - - <varlistentry> - <term> - <mallctl>stats.chunks.total</mallctl> - (<type>uint64_t</type>) - <literal>r-</literal> - [<option>--enable-stats</option>] - </term> - <listitem><para>Cumulative number of chunks allocated.</para></listitem> - </varlistentry> - - <varlistentry> - <term> - <mallctl>stats.chunks.high</mallctl> - (<type>size_t</type>) - <literal>r-</literal> - [<option>--enable-stats</option>] - </term> - <listitem><para>Maximum number of active chunks at any time thus far. - </para></listitem> - </varlistentry> - - <varlistentry> - <term> - <mallctl>stats.huge.allocated</mallctl> - (<type>size_t</type>) - <literal>r-</literal> - [<option>--enable-stats</option>] - </term> - <listitem><para>Number of bytes currently allocated by huge objects. - </para></listitem> - </varlistentry> - - <varlistentry> - <term> - <mallctl>stats.huge.nmalloc</mallctl> - (<type>uint64_t</type>) - <literal>r-</literal> - [<option>--enable-stats</option>] - </term> - <listitem><para>Cumulative number of huge allocation requests. - </para></listitem> - </varlistentry> - - <varlistentry> - <term> - <mallctl>stats.huge.ndalloc</mallctl> - (<type>uint64_t</type>) - <literal>r-</literal> - [<option>--enable-stats</option>] - </term> - <listitem><para>Cumulative number of huge deallocation requests. - </para></listitem> - </varlistentry> - - <varlistentry> - <term> - <mallctl>stats.arenas.<i>.dss</mallctl> - (<type>const char *</type>) - <literal>r-</literal> - </term> - <listitem><para>dss (<citerefentry><refentrytitle>sbrk</refentrytitle> - <manvolnum>2</manvolnum></citerefentry>) allocation precedence as - related to <citerefentry><refentrytitle>mmap</refentrytitle> - <manvolnum>2</manvolnum></citerefentry> allocation. See <link - linkend="opt.dss"><mallctl>opt.dss</mallctl></link> for details. - </para></listitem> - </varlistentry> - - <varlistentry> - <term> - <mallctl>stats.arenas.<i>.nthreads</mallctl> - (<type>unsigned</type>) - <literal>r-</literal> - </term> - <listitem><para>Number of threads currently assigned to - arena.</para></listitem> - </varlistentry> - - <varlistentry> - <term> - <mallctl>stats.arenas.<i>.pactive</mallctl> - (<type>size_t</type>) - <literal>r-</literal> - </term> - <listitem><para>Number of pages in active runs.</para></listitem> - </varlistentry> - - <varlistentry id="stats.arenas.i.pdirty"> - <term> - <mallctl>stats.arenas.<i>.pdirty</mallctl> - (<type>size_t</type>) - <literal>r-</literal> - </term> - <listitem><para>Number of pages within unused runs that are potentially - dirty, and for which <function>madvise<parameter>...</parameter> - <parameter><constant>MADV_DONTNEED</constant></parameter></function> or - similar has not been called.</para></listitem> - </varlistentry> - - <varlistentry> - <term> - <mallctl>stats.arenas.<i>.mapped</mallctl> - (<type>size_t</type>) - <literal>r-</literal> - [<option>--enable-stats</option>] - </term> - <listitem><para>Number of mapped bytes.</para></listitem> - </varlistentry> - - <varlistentry> - <term> - <mallctl>stats.arenas.<i>.npurge</mallctl> - (<type>uint64_t</type>) - <literal>r-</literal> - [<option>--enable-stats</option>] - </term> - <listitem><para>Number of dirty page purge sweeps performed. - </para></listitem> - </varlistentry> - - <varlistentry> - <term> - <mallctl>stats.arenas.<i>.nmadvise</mallctl> - (<type>uint64_t</type>) - <literal>r-</literal> - [<option>--enable-stats</option>] - </term> - <listitem><para>Number of <function>madvise<parameter>...</parameter> - <parameter><constant>MADV_DONTNEED</constant></parameter></function> or - similar calls made to purge dirty pages.</para></listitem> - </varlistentry> - - <varlistentry> - <term> - <mallctl>stats.arenas.<i>.npurged</mallctl> - (<type>uint64_t</type>) - <literal>r-</literal> - [<option>--enable-stats</option>] - </term> - <listitem><para>Number of pages purged.</para></listitem> - </varlistentry> - - <varlistentry> - <term> - <mallctl>stats.arenas.<i>.small.allocated</mallctl> - (<type>size_t</type>) - <literal>r-</literal> - [<option>--enable-stats</option>] - </term> - <listitem><para>Number of bytes currently allocated by small objects. - </para></listitem> - </varlistentry> - - <varlistentry> - <term> - <mallctl>stats.arenas.<i>.small.nmalloc</mallctl> - (<type>uint64_t</type>) - <literal>r-</literal> - [<option>--enable-stats</option>] - </term> - <listitem><para>Cumulative number of allocation requests served by - small bins.</para></listitem> - </varlistentry> - - <varlistentry> - <term> - <mallctl>stats.arenas.<i>.small.ndalloc</mallctl> - (<type>uint64_t</type>) - <literal>r-</literal> - [<option>--enable-stats</option>] - </term> - <listitem><para>Cumulative number of small objects returned to bins. - </para></listitem> - </varlistentry> - - <varlistentry> - <term> - <mallctl>stats.arenas.<i>.small.nrequests</mallctl> - (<type>uint64_t</type>) - <literal>r-</literal> - [<option>--enable-stats</option>] - </term> - <listitem><para>Cumulative number of small allocation requests. - </para></listitem> - </varlistentry> - - <varlistentry> - <term> - <mallctl>stats.arenas.<i>.large.allocated</mallctl> - (<type>size_t</type>) - <literal>r-</literal> - [<option>--enable-stats</option>] - </term> - <listitem><para>Number of bytes currently allocated by large objects. - </para></listitem> - </varlistentry> - - <varlistentry> - <term> - <mallctl>stats.arenas.<i>.large.nmalloc</mallctl> - (<type>uint64_t</type>) - <literal>r-</literal> - [<option>--enable-stats</option>] - </term> - <listitem><para>Cumulative number of large allocation requests served - directly by the arena.</para></listitem> - </varlistentry> - - <varlistentry> - <term> - <mallctl>stats.arenas.<i>.large.ndalloc</mallctl> - (<type>uint64_t</type>) - <literal>r-</literal> - [<option>--enable-stats</option>] - </term> - <listitem><para>Cumulative number of large deallocation requests served - directly by the arena.</para></listitem> - </varlistentry> - - <varlistentry> - <term> - <mallctl>stats.arenas.<i>.large.nrequests</mallctl> - (<type>uint64_t</type>) - <literal>r-</literal> - [<option>--enable-stats</option>] - </term> - <listitem><para>Cumulative number of large allocation requests. - </para></listitem> - </varlistentry> - - <varlistentry> - <term> - <mallctl>stats.arenas.<i>.bins.<j>.allocated</mallctl> - (<type>size_t</type>) - <literal>r-</literal> - [<option>--enable-stats</option>] - </term> - <listitem><para>Current number of bytes allocated by - bin.</para></listitem> - </varlistentry> - - <varlistentry> - <term> - <mallctl>stats.arenas.<i>.bins.<j>.nmalloc</mallctl> - (<type>uint64_t</type>) - <literal>r-</literal> - [<option>--enable-stats</option>] - </term> - <listitem><para>Cumulative number of allocations served by bin. - </para></listitem> - </varlistentry> - - <varlistentry> - <term> - <mallctl>stats.arenas.<i>.bins.<j>.ndalloc</mallctl> - (<type>uint64_t</type>) - <literal>r-</literal> - [<option>--enable-stats</option>] - </term> - <listitem><para>Cumulative number of allocations returned to bin. - </para></listitem> - </varlistentry> - - <varlistentry> - <term> - <mallctl>stats.arenas.<i>.bins.<j>.nrequests</mallctl> - (<type>uint64_t</type>) - <literal>r-</literal> - [<option>--enable-stats</option>] - </term> - <listitem><para>Cumulative number of allocation - requests.</para></listitem> - </varlistentry> - - <varlistentry> - <term> - <mallctl>stats.arenas.<i>.bins.<j>.nfills</mallctl> - (<type>uint64_t</type>) - <literal>r-</literal> - [<option>--enable-stats</option> <option>--enable-tcache</option>] - </term> - <listitem><para>Cumulative number of tcache fills.</para></listitem> - </varlistentry> - - <varlistentry> - <term> - <mallctl>stats.arenas.<i>.bins.<j>.nflushes</mallctl> - (<type>uint64_t</type>) - <literal>r-</literal> - [<option>--enable-stats</option> <option>--enable-tcache</option>] - </term> - <listitem><para>Cumulative number of tcache flushes.</para></listitem> - </varlistentry> - - <varlistentry> - <term> - <mallctl>stats.arenas.<i>.bins.<j>.nruns</mallctl> - (<type>uint64_t</type>) - <literal>r-</literal> - [<option>--enable-stats</option>] - </term> - <listitem><para>Cumulative number of runs created.</para></listitem> - </varlistentry> - - <varlistentry> - <term> - <mallctl>stats.arenas.<i>.bins.<j>.nreruns</mallctl> - (<type>uint64_t</type>) - <literal>r-</literal> - [<option>--enable-stats</option>] - </term> - <listitem><para>Cumulative number of times the current run from which - to allocate changed.</para></listitem> - </varlistentry> - - <varlistentry> - <term> - <mallctl>stats.arenas.<i>.bins.<j>.curruns</mallctl> - (<type>size_t</type>) - <literal>r-</literal> - [<option>--enable-stats</option>] - </term> - <listitem><para>Current number of runs.</para></listitem> - </varlistentry> - - <varlistentry> - <term> - <mallctl>stats.arenas.<i>.lruns.<j>.nmalloc</mallctl> - (<type>uint64_t</type>) - <literal>r-</literal> - [<option>--enable-stats</option>] - </term> - <listitem><para>Cumulative number of allocation requests for this size - class served directly by the arena.</para></listitem> - </varlistentry> - - <varlistentry> - <term> - <mallctl>stats.arenas.<i>.lruns.<j>.ndalloc</mallctl> - (<type>uint64_t</type>) - <literal>r-</literal> - [<option>--enable-stats</option>] - </term> - <listitem><para>Cumulative number of deallocation requests for this - size class served directly by the arena.</para></listitem> - </varlistentry> - - <varlistentry> - <term> - <mallctl>stats.arenas.<i>.lruns.<j>.nrequests</mallctl> - (<type>uint64_t</type>) - <literal>r-</literal> - [<option>--enable-stats</option>] - </term> - <listitem><para>Cumulative number of allocation requests for this size - class.</para></listitem> - </varlistentry> - - <varlistentry> - <term> - <mallctl>stats.arenas.<i>.lruns.<j>.curruns</mallctl> - (<type>size_t</type>) - <literal>r-</literal> - [<option>--enable-stats</option>] - </term> - <listitem><para>Current number of runs for this size class. - </para></listitem> - </varlistentry> - </variablelist> - </refsect1> - <refsect1 id="debugging_malloc_problems"> - <title>DEBUGGING MALLOC PROBLEMS</title> - <para>When debugging, it is a good idea to configure/build jemalloc with - the <option>--enable-debug</option> and <option>--enable-fill</option> - options, and recompile the program with suitable options and symbols for - debugger support. When so configured, jemalloc incorporates a wide variety - of run-time assertions that catch application errors such as double-free, - write-after-free, etc.</para> - - <para>Programs often accidentally depend on “uninitialized” - memory actually being filled with zero bytes. Junk filling - (see the <link linkend="opt.junk"><mallctl>opt.junk</mallctl></link> - option) tends to expose such bugs in the form of obviously incorrect - results and/or coredumps. Conversely, zero - filling (see the <link - linkend="opt.zero"><mallctl>opt.zero</mallctl></link> option) eliminates - the symptoms of such bugs. Between these two options, it is usually - possible to quickly detect, diagnose, and eliminate such bugs.</para> - - <para>This implementation does not provide much detail about the problems - it detects, because the performance impact for storing such information - would be prohibitive. However, jemalloc does integrate with the most - excellent <ulink url="http://valgrind.org/">Valgrind</ulink> tool if the - <option>--enable-valgrind</option> configuration option is enabled.</para> - </refsect1> - <refsect1 id="diagnostic_messages"> - <title>DIAGNOSTIC MESSAGES</title> - <para>If any of the memory allocation/deallocation functions detect an - error or warning condition, a message will be printed to file descriptor - <constant>STDERR_FILENO</constant>. Errors will result in the process - dumping core. If the <link - linkend="opt.abort"><mallctl>opt.abort</mallctl></link> option is set, most - warnings are treated as errors.</para> - - <para>The <varname>malloc_message</varname> variable allows the programmer - to override the function which emits the text strings forming the errors - and warnings if for some reason the <constant>STDERR_FILENO</constant> file - descriptor is not suitable for this. - <function>malloc_message<parameter/></function> takes the - <parameter>cbopaque</parameter> pointer argument that is - <constant>NULL</constant> unless overridden by the arguments in a call to - <function>malloc_stats_print<parameter/></function>, followed by a string - pointer. Please note that doing anything which tries to allocate memory in - this function is likely to result in a crash or deadlock.</para> - - <para>All messages are prefixed by - “<computeroutput><jemalloc>: </computeroutput>”.</para> - </refsect1> - <refsect1 id="return_values"> - <title>RETURN VALUES</title> - <refsect2> - <title>Standard API</title> - <para>The <function>malloc<parameter/></function> and - <function>calloc<parameter/></function> functions return a pointer to the - allocated memory if successful; otherwise a <constant>NULL</constant> - pointer is returned and <varname>errno</varname> is set to - <errorname>ENOMEM</errorname>.</para> - - <para>The <function>posix_memalign<parameter/></function> function - returns the value 0 if successful; otherwise it returns an error value. - The <function>posix_memalign<parameter/></function> function will fail - if: - <variablelist> - <varlistentry> - <term><errorname>EINVAL</errorname></term> - - <listitem><para>The <parameter>alignment</parameter> parameter is - not a power of 2 at least as large as - <code language="C">sizeof(<type>void *</type>)</code>. - </para></listitem> - </varlistentry> - <varlistentry> - <term><errorname>ENOMEM</errorname></term> - - <listitem><para>Memory allocation error.</para></listitem> - </varlistentry> - </variablelist> - </para> - - <para>The <function>aligned_alloc<parameter/></function> function returns - a pointer to the allocated memory if successful; otherwise a - <constant>NULL</constant> pointer is returned and - <varname>errno</varname> is set. The - <function>aligned_alloc<parameter/></function> function will fail if: - <variablelist> - <varlistentry> - <term><errorname>EINVAL</errorname></term> - - <listitem><para>The <parameter>alignment</parameter> parameter is - not a power of 2. - </para></listitem> - </varlistentry> - <varlistentry> - <term><errorname>ENOMEM</errorname></term> - - <listitem><para>Memory allocation error.</para></listitem> - </varlistentry> - </variablelist> - </para> - - <para>The <function>realloc<parameter/></function> function returns a - pointer, possibly identical to <parameter>ptr</parameter>, to the - allocated memory if successful; otherwise a <constant>NULL</constant> - pointer is returned, and <varname>errno</varname> is set to - <errorname>ENOMEM</errorname> if the error was the result of an - allocation failure. The <function>realloc<parameter/></function> - function always leaves the original buffer intact when an error occurs. - </para> - - <para>The <function>free<parameter/></function> function returns no - value.</para> - </refsect2> - <refsect2> - <title>Non-standard API</title> - <para>The <function>malloc_usable_size<parameter/></function> function - returns the usable size of the allocation pointed to by - <parameter>ptr</parameter>. </para> - - <para>The <function>mallctl<parameter/></function>, - <function>mallctlnametomib<parameter/></function>, and - <function>mallctlbymib<parameter/></function> functions return 0 on - success; otherwise they return an error value. The functions will fail - if: - <variablelist> - <varlistentry> - <term><errorname>EINVAL</errorname></term> - - <listitem><para><parameter>newp</parameter> is not - <constant>NULL</constant>, and <parameter>newlen</parameter> is too - large or too small. Alternatively, <parameter>*oldlenp</parameter> - is too large or too small; in this case as much data as possible - are read despite the error.</para></listitem> - </varlistentry> - <varlistentry> - <term><errorname>ENOMEM</errorname></term> - - <listitem><para><parameter>*oldlenp</parameter> is too short to - hold the requested value.</para></listitem> - </varlistentry> - <varlistentry> - <term><errorname>ENOENT</errorname></term> - - <listitem><para><parameter>name</parameter> or - <parameter>mib</parameter> specifies an unknown/invalid - value.</para></listitem> - </varlistentry> - <varlistentry> - <term><errorname>EPERM</errorname></term> - - <listitem><para>Attempt to read or write void value, or attempt to - write read-only value.</para></listitem> - </varlistentry> - <varlistentry> - <term><errorname>EAGAIN</errorname></term> - - <listitem><para>A memory allocation failure - occurred.</para></listitem> - </varlistentry> - <varlistentry> - <term><errorname>EFAULT</errorname></term> - - <listitem><para>An interface with side effects failed in some way - not directly related to <function>mallctl*<parameter/></function> - read/write processing.</para></listitem> - </varlistentry> - </variablelist> - </para> - </refsect2> - <refsect2> - <title>Experimental API</title> - <para>The <function>allocm<parameter/></function>, - <function>rallocm<parameter/></function>, - <function>sallocm<parameter/></function>, - <function>dallocm<parameter/></function>, and - <function>nallocm<parameter/></function> functions return - <constant>ALLOCM_SUCCESS</constant> on success; otherwise they return an - error value. The <function>allocm<parameter/></function>, - <function>rallocm<parameter/></function>, and - <function>nallocm<parameter/></function> functions will fail if: - <variablelist> - <varlistentry> - <term><errorname>ALLOCM_ERR_OOM</errorname></term> - - <listitem><para>Out of memory. Insufficient contiguous memory was - available to service the allocation request. The - <function>allocm<parameter/></function> function additionally sets - <parameter>*ptr</parameter> to <constant>NULL</constant>, whereas - the <function>rallocm<parameter/></function> function leaves - <constant>*ptr</constant> unmodified.</para></listitem> - </varlistentry> - </variablelist> - The <function>rallocm<parameter/></function> function will also - fail if: - <variablelist> - <varlistentry> - <term><errorname>ALLOCM_ERR_NOT_MOVED</errorname></term> - - <listitem><para><constant>ALLOCM_NO_MOVE</constant> was specified, - but the reallocation request could not be serviced without moving - the object.</para></listitem> - </varlistentry> - </variablelist> - </para> - </refsect2> - </refsect1> - <refsect1 id="environment"> - <title>ENVIRONMENT</title> - <para>The following environment variable affects the execution of the - allocation functions: - <variablelist> - <varlistentry> - <term><envar>MALLOC_CONF</envar></term> - - <listitem><para>If the environment variable - <envar>MALLOC_CONF</envar> is set, the characters it contains - will be interpreted as options.</para></listitem> - </varlistentry> - </variablelist> - </para> - </refsect1> - <refsect1 id="examples"> - <title>EXAMPLES</title> - <para>To dump core whenever a problem occurs: - <screen>ln -s 'abort:true' /etc/malloc.conf</screen> - </para> - <para>To specify in the source a chunk size that is 16 MiB: - <programlisting language="C"><![CDATA[ -malloc_conf = "lg_chunk:24";]]></programlisting></para> - </refsect1> - <refsect1 id="see_also"> - <title>SEE ALSO</title> - <para><citerefentry><refentrytitle>madvise</refentrytitle> - <manvolnum>2</manvolnum></citerefentry>, - <citerefentry><refentrytitle>mmap</refentrytitle> - <manvolnum>2</manvolnum></citerefentry>, - <citerefentry><refentrytitle>sbrk</refentrytitle> - <manvolnum>2</manvolnum></citerefentry>, - <citerefentry><refentrytitle>utrace</refentrytitle> - <manvolnum>2</manvolnum></citerefentry>, - <citerefentry><refentrytitle>alloca</refentrytitle> - <manvolnum>3</manvolnum></citerefentry>, - <citerefentry><refentrytitle>atexit</refentrytitle> - <manvolnum>3</manvolnum></citerefentry>, - <citerefentry><refentrytitle>getpagesize</refentrytitle> - <manvolnum>3</manvolnum></citerefentry></para> - </refsect1> - <refsect1 id="standards"> - <title>STANDARDS</title> - <para>The <function>malloc<parameter/></function>, - <function>calloc<parameter/></function>, - <function>realloc<parameter/></function>, and - <function>free<parameter/></function> functions conform to ISO/IEC - 9899:1990 (“ISO C90”).</para> - - <para>The <function>posix_memalign<parameter/></function> function conforms - to IEEE Std 1003.1-2001 (“POSIX.1”).</para> - </refsect1> -</refentry> diff --git a/extra/jemalloc/doc/manpages.xsl.in b/extra/jemalloc/doc/manpages.xsl.in deleted file mode 100644 index 88b2626b958..00000000000 --- a/extra/jemalloc/doc/manpages.xsl.in +++ /dev/null @@ -1,4 +0,0 @@ -<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0"> - <xsl:import href="@XSLROOT@/manpages/docbook.xsl"/> - <xsl:import href="@abs_srcroot@doc/stylesheet.xsl"/> -</xsl:stylesheet> diff --git a/extra/jemalloc/doc/stylesheet.xsl b/extra/jemalloc/doc/stylesheet.xsl deleted file mode 100644 index 4e334a86f87..00000000000 --- a/extra/jemalloc/doc/stylesheet.xsl +++ /dev/null @@ -1,7 +0,0 @@ -<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0"> - <xsl:param name="funcsynopsis.style">ansi</xsl:param> - <xsl:param name="function.parens" select="1"/> - <xsl:template match="mallctl"> - "<xsl:call-template name="inline.monoseq"/>" - </xsl:template> -</xsl:stylesheet> diff --git a/extra/jemalloc/include/jemalloc/internal/arena.h b/extra/jemalloc/include/jemalloc/internal/arena.h deleted file mode 100644 index bbcfedacead..00000000000 --- a/extra/jemalloc/include/jemalloc/internal/arena.h +++ /dev/null @@ -1,1038 +0,0 @@ -/******************************************************************************/ -#ifdef JEMALLOC_H_TYPES - -/* - * RUN_MAX_OVRHD indicates maximum desired run header overhead. Runs are sized - * as small as possible such that this setting is still honored, without - * violating other constraints. The goal is to make runs as small as possible - * without exceeding a per run external fragmentation threshold. - * - * We use binary fixed point math for overhead computations, where the binary - * point is implicitly RUN_BFP bits to the left. - * - * Note that it is possible to set RUN_MAX_OVRHD low enough that it cannot be - * honored for some/all object sizes, since when heap profiling is enabled - * there is one pointer of header overhead per object (plus a constant). This - * constraint is relaxed (ignored) for runs that are so small that the - * per-region overhead is greater than: - * - * (RUN_MAX_OVRHD / (reg_interval << (3+RUN_BFP)) - */ -#define RUN_BFP 12 -/* \/ Implicit binary fixed point. */ -#define RUN_MAX_OVRHD 0x0000003dU -#define RUN_MAX_OVRHD_RELAX 0x00001800U - -/* Maximum number of regions in one run. */ -#define LG_RUN_MAXREGS 11 -#define RUN_MAXREGS (1U << LG_RUN_MAXREGS) - -/* - * Minimum redzone size. Redzones may be larger than this if necessary to - * preserve region alignment. - */ -#define REDZONE_MINSIZE 16 - -/* - * The minimum ratio of active:dirty pages per arena is computed as: - * - * (nactive >> opt_lg_dirty_mult) >= ndirty - * - * So, supposing that opt_lg_dirty_mult is 3, there can be no less than 8 times - * as many active pages as dirty pages. - */ -#define LG_DIRTY_MULT_DEFAULT 3 - -typedef struct arena_chunk_map_s arena_chunk_map_t; -typedef struct arena_chunk_s arena_chunk_t; -typedef struct arena_run_s arena_run_t; -typedef struct arena_bin_info_s arena_bin_info_t; -typedef struct arena_bin_s arena_bin_t; -typedef struct arena_s arena_t; - -#endif /* JEMALLOC_H_TYPES */ -/******************************************************************************/ -#ifdef JEMALLOC_H_STRUCTS - -/* Each element of the chunk map corresponds to one page within the chunk. */ -struct arena_chunk_map_s { -#ifndef JEMALLOC_PROF - /* - * Overlay prof_ctx in order to allow it to be referenced by dead code. - * Such antics aren't warranted for per arena data structures, but - * chunk map overhead accounts for a percentage of memory, rather than - * being just a fixed cost. - */ - union { -#endif - union { - /* - * Linkage for run trees. There are two disjoint uses: - * - * 1) arena_t's runs_avail tree. - * 2) arena_run_t conceptually uses this linkage for in-use - * non-full runs, rather than directly embedding linkage. - */ - rb_node(arena_chunk_map_t) rb_link; - /* - * List of runs currently in purgatory. arena_chunk_purge() - * temporarily allocates runs that contain dirty pages while - * purging, so that other threads cannot use the runs while the - * purging thread is operating without the arena lock held. - */ - ql_elm(arena_chunk_map_t) ql_link; - } u; - - /* Profile counters, used for large object runs. */ - prof_ctx_t *prof_ctx; -#ifndef JEMALLOC_PROF - }; /* union { ... }; */ -#endif - - /* - * Run address (or size) and various flags are stored together. The bit - * layout looks like (assuming 32-bit system): - * - * ???????? ???????? ????nnnn nnnndula - * - * ? : Unallocated: Run address for first/last pages, unset for internal - * pages. - * Small: Run page offset. - * Large: Run size for first page, unset for trailing pages. - * n : binind for small size class, BININD_INVALID for large size class. - * d : dirty? - * u : unzeroed? - * l : large? - * a : allocated? - * - * Following are example bit patterns for the three types of runs. - * - * p : run page offset - * s : run size - * n : binind for size class; large objects set these to BININD_INVALID - * except for promoted allocations (see prof_promote) - * x : don't care - * - : 0 - * + : 1 - * [DULA] : bit set - * [dula] : bit unset - * - * Unallocated (clean): - * ssssssss ssssssss ssss++++ ++++du-a - * xxxxxxxx xxxxxxxx xxxxxxxx xxxx-Uxx - * ssssssss ssssssss ssss++++ ++++dU-a - * - * Unallocated (dirty): - * ssssssss ssssssss ssss++++ ++++D--a - * xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx - * ssssssss ssssssss ssss++++ ++++D--a - * - * Small: - * pppppppp pppppppp ppppnnnn nnnnd--A - * pppppppp pppppppp ppppnnnn nnnn---A - * pppppppp pppppppp ppppnnnn nnnnd--A - * - * Large: - * ssssssss ssssssss ssss++++ ++++D-LA - * xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx - * -------- -------- ----++++ ++++D-LA - * - * Large (sampled, size <= PAGE): - * ssssssss ssssssss ssssnnnn nnnnD-LA - * - * Large (not sampled, size == PAGE): - * ssssssss ssssssss ssss++++ ++++D-LA - */ - size_t bits; -#define CHUNK_MAP_BININD_SHIFT 4 -#define BININD_INVALID ((size_t)0xffU) -/* CHUNK_MAP_BININD_MASK == (BININD_INVALID << CHUNK_MAP_BININD_SHIFT) */ -#define CHUNK_MAP_BININD_MASK ((size_t)0xff0U) -#define CHUNK_MAP_BININD_INVALID CHUNK_MAP_BININD_MASK -#define CHUNK_MAP_FLAGS_MASK ((size_t)0xcU) -#define CHUNK_MAP_DIRTY ((size_t)0x8U) -#define CHUNK_MAP_UNZEROED ((size_t)0x4U) -#define CHUNK_MAP_LARGE ((size_t)0x2U) -#define CHUNK_MAP_ALLOCATED ((size_t)0x1U) -#define CHUNK_MAP_KEY CHUNK_MAP_ALLOCATED -}; -typedef rb_tree(arena_chunk_map_t) arena_avail_tree_t; -typedef rb_tree(arena_chunk_map_t) arena_run_tree_t; - -/* Arena chunk header. */ -struct arena_chunk_s { - /* Arena that owns the chunk. */ - arena_t *arena; - - /* Linkage for tree of arena chunks that contain dirty runs. */ - rb_node(arena_chunk_t) dirty_link; - - /* Number of dirty pages. */ - size_t ndirty; - - /* Number of available runs. */ - size_t nruns_avail; - - /* - * Number of available run adjacencies. Clean and dirty available runs - * are not coalesced, which causes virtual memory fragmentation. The - * ratio of (nruns_avail-nruns_adjac):nruns_adjac is used for tracking - * this fragmentation. - * */ - size_t nruns_adjac; - - /* - * Map of pages within chunk that keeps track of free/large/small. The - * first map_bias entries are omitted, since the chunk header does not - * need to be tracked in the map. This omission saves a header page - * for common chunk sizes (e.g. 4 MiB). - */ - arena_chunk_map_t map[1]; /* Dynamically sized. */ -}; -typedef rb_tree(arena_chunk_t) arena_chunk_tree_t; - -struct arena_run_s { - /* Bin this run is associated with. */ - arena_bin_t *bin; - - /* Index of next region that has never been allocated, or nregs. */ - uint32_t nextind; - - /* Number of free regions in run. */ - unsigned nfree; -}; - -/* - * Read-only information associated with each element of arena_t's bins array - * is stored separately, partly to reduce memory usage (only one copy, rather - * than one per arena), but mainly to avoid false cacheline sharing. - * - * Each run has the following layout: - * - * /--------------------\ - * | arena_run_t header | - * | ... | - * bitmap_offset | bitmap | - * | ... | - * ctx0_offset | ctx map | - * | ... | - * |--------------------| - * | redzone | - * reg0_offset | region 0 | - * | redzone | - * |--------------------| \ - * | redzone | | - * | region 1 | > reg_interval - * | redzone | / - * |--------------------| - * | ... | - * | ... | - * | ... | - * |--------------------| - * | redzone | - * | region nregs-1 | - * | redzone | - * |--------------------| - * | alignment pad? | - * \--------------------/ - * - * reg_interval has at least the same minimum alignment as reg_size; this - * preserves the alignment constraint that sa2u() depends on. Alignment pad is - * either 0 or redzone_size; it is present only if needed to align reg0_offset. - */ -struct arena_bin_info_s { - /* Size of regions in a run for this bin's size class. */ - size_t reg_size; - - /* Redzone size. */ - size_t redzone_size; - - /* Interval between regions (reg_size + (redzone_size << 1)). */ - size_t reg_interval; - - /* Total size of a run for this bin's size class. */ - size_t run_size; - - /* Total number of regions in a run for this bin's size class. */ - uint32_t nregs; - - /* - * Offset of first bitmap_t element in a run header for this bin's size - * class. - */ - uint32_t bitmap_offset; - - /* - * Metadata used to manipulate bitmaps for runs associated with this - * bin. - */ - bitmap_info_t bitmap_info; - - /* - * Offset of first (prof_ctx_t *) in a run header for this bin's size - * class, or 0 if (config_prof == false || opt_prof == false). - */ - uint32_t ctx0_offset; - - /* Offset of first region in a run for this bin's size class. */ - uint32_t reg0_offset; -}; - -struct arena_bin_s { - /* - * All operations on runcur, runs, and stats require that lock be - * locked. Run allocation/deallocation are protected by the arena lock, - * which may be acquired while holding one or more bin locks, but not - * vise versa. - */ - malloc_mutex_t lock; - - /* - * Current run being used to service allocations of this bin's size - * class. - */ - arena_run_t *runcur; - - /* - * Tree of non-full runs. This tree is used when looking for an - * existing run when runcur is no longer usable. We choose the - * non-full run that is lowest in memory; this policy tends to keep - * objects packed well, and it can also help reduce the number of - * almost-empty chunks. - */ - arena_run_tree_t runs; - - /* Bin statistics. */ - malloc_bin_stats_t stats; -}; - -struct arena_s { - /* This arena's index within the arenas array. */ - unsigned ind; - - /* - * Number of threads currently assigned to this arena. This field is - * protected by arenas_lock. - */ - unsigned nthreads; - - /* - * There are three classes of arena operations from a locking - * perspective: - * 1) Thread asssignment (modifies nthreads) is protected by - * arenas_lock. - * 2) Bin-related operations are protected by bin locks. - * 3) Chunk- and run-related operations are protected by this mutex. - */ - malloc_mutex_t lock; - - arena_stats_t stats; - /* - * List of tcaches for extant threads associated with this arena. - * Stats from these are merged incrementally, and at exit. - */ - ql_head(tcache_t) tcache_ql; - - uint64_t prof_accumbytes; - - dss_prec_t dss_prec; - - /* Tree of dirty-page-containing chunks this arena manages. */ - arena_chunk_tree_t chunks_dirty; - - /* - * In order to avoid rapid chunk allocation/deallocation when an arena - * oscillates right on the cusp of needing a new chunk, cache the most - * recently freed chunk. The spare is left in the arena's chunk trees - * until it is deleted. - * - * There is one spare chunk per arena, rather than one spare total, in - * order to avoid interactions between multiple threads that could make - * a single spare inadequate. - */ - arena_chunk_t *spare; - - /* Number of pages in active runs. */ - size_t nactive; - - /* - * Current count of pages within unused runs that are potentially - * dirty, and for which madvise(... MADV_DONTNEED) has not been called. - * By tracking this, we can institute a limit on how much dirty unused - * memory is mapped for each arena. - */ - size_t ndirty; - - /* - * Approximate number of pages being purged. It is possible for - * multiple threads to purge dirty pages concurrently, and they use - * npurgatory to indicate the total number of pages all threads are - * attempting to purge. - */ - size_t npurgatory; - - /* - * Size/address-ordered trees of this arena's available runs. The trees - * are used for first-best-fit run allocation. - */ - arena_avail_tree_t runs_avail; - - /* bins is used to store trees of free regions. */ - arena_bin_t bins[NBINS]; -}; - -#endif /* JEMALLOC_H_STRUCTS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_EXTERNS - -extern ssize_t opt_lg_dirty_mult; -/* - * small_size2bin is a compact lookup table that rounds request sizes up to - * size classes. In order to reduce cache footprint, the table is compressed, - * and all accesses are via the SMALL_SIZE2BIN macro. - */ -extern uint8_t const small_size2bin[]; -#define SMALL_SIZE2BIN(s) (small_size2bin[(s-1) >> LG_TINY_MIN]) - -extern arena_bin_info_t arena_bin_info[NBINS]; - -/* Number of large size classes. */ -#define nlclasses (chunk_npages - map_bias) - -void arena_purge_all(arena_t *arena); -void arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin, - size_t binind, uint64_t prof_accumbytes); -void arena_alloc_junk_small(void *ptr, arena_bin_info_t *bin_info, - bool zero); -void arena_dalloc_junk_small(void *ptr, arena_bin_info_t *bin_info); -void *arena_malloc_small(arena_t *arena, size_t size, bool zero); -void *arena_malloc_large(arena_t *arena, size_t size, bool zero); -void *arena_palloc(arena_t *arena, size_t size, size_t alignment, bool zero); -void arena_prof_promoted(const void *ptr, size_t size); -void arena_dalloc_bin_locked(arena_t *arena, arena_chunk_t *chunk, void *ptr, - arena_chunk_map_t *mapelm); -void arena_dalloc_bin(arena_t *arena, arena_chunk_t *chunk, void *ptr, - size_t pageind, arena_chunk_map_t *mapelm); -void arena_dalloc_small(arena_t *arena, arena_chunk_t *chunk, void *ptr, - size_t pageind); -void arena_dalloc_large_locked(arena_t *arena, arena_chunk_t *chunk, - void *ptr); -void arena_dalloc_large(arena_t *arena, arena_chunk_t *chunk, void *ptr); -void *arena_ralloc_no_move(void *ptr, size_t oldsize, size_t size, - size_t extra, bool zero); -void *arena_ralloc(arena_t *arena, void *ptr, size_t oldsize, size_t size, - size_t extra, size_t alignment, bool zero, bool try_tcache_alloc, - bool try_tcache_dalloc); -dss_prec_t arena_dss_prec_get(arena_t *arena); -void arena_dss_prec_set(arena_t *arena, dss_prec_t dss_prec); -void arena_stats_merge(arena_t *arena, const char **dss, size_t *nactive, - size_t *ndirty, arena_stats_t *astats, malloc_bin_stats_t *bstats, - malloc_large_stats_t *lstats); -bool arena_new(arena_t *arena, unsigned ind); -void arena_boot(void); -void arena_prefork(arena_t *arena); -void arena_postfork_parent(arena_t *arena); -void arena_postfork_child(arena_t *arena); - -#endif /* JEMALLOC_H_EXTERNS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_INLINES - -#ifndef JEMALLOC_ENABLE_INLINE -arena_chunk_map_t *arena_mapp_get(arena_chunk_t *chunk, size_t pageind); -size_t *arena_mapbitsp_get(arena_chunk_t *chunk, size_t pageind); -size_t arena_mapbitsp_read(size_t *mapbitsp); -size_t arena_mapbits_get(arena_chunk_t *chunk, size_t pageind); -size_t arena_mapbits_unallocated_size_get(arena_chunk_t *chunk, - size_t pageind); -size_t arena_mapbits_large_size_get(arena_chunk_t *chunk, size_t pageind); -size_t arena_mapbits_small_runind_get(arena_chunk_t *chunk, size_t pageind); -size_t arena_mapbits_binind_get(arena_chunk_t *chunk, size_t pageind); -size_t arena_mapbits_dirty_get(arena_chunk_t *chunk, size_t pageind); -size_t arena_mapbits_unzeroed_get(arena_chunk_t *chunk, size_t pageind); -size_t arena_mapbits_large_get(arena_chunk_t *chunk, size_t pageind); -size_t arena_mapbits_allocated_get(arena_chunk_t *chunk, size_t pageind); -void arena_mapbitsp_write(size_t *mapbitsp, size_t mapbits); -void arena_mapbits_unallocated_set(arena_chunk_t *chunk, size_t pageind, - size_t size, size_t flags); -void arena_mapbits_unallocated_size_set(arena_chunk_t *chunk, size_t pageind, - size_t size); -void arena_mapbits_large_set(arena_chunk_t *chunk, size_t pageind, - size_t size, size_t flags); -void arena_mapbits_large_binind_set(arena_chunk_t *chunk, size_t pageind, - size_t binind); -void arena_mapbits_small_set(arena_chunk_t *chunk, size_t pageind, - size_t runind, size_t binind, size_t flags); -void arena_mapbits_unzeroed_set(arena_chunk_t *chunk, size_t pageind, - size_t unzeroed); -bool arena_prof_accum_impl(arena_t *arena, uint64_t accumbytes); -bool arena_prof_accum_locked(arena_t *arena, uint64_t accumbytes); -bool arena_prof_accum(arena_t *arena, uint64_t accumbytes); -size_t arena_ptr_small_binind_get(const void *ptr, size_t mapbits); -size_t arena_bin_index(arena_t *arena, arena_bin_t *bin); -unsigned arena_run_regind(arena_run_t *run, arena_bin_info_t *bin_info, - const void *ptr); -prof_ctx_t *arena_prof_ctx_get(const void *ptr); -void arena_prof_ctx_set(const void *ptr, prof_ctx_t *ctx); -void *arena_malloc(arena_t *arena, size_t size, bool zero, bool try_tcache); -size_t arena_salloc(const void *ptr, bool demote); -void arena_dalloc(arena_t *arena, arena_chunk_t *chunk, void *ptr, - bool try_tcache); -#endif - -#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_ARENA_C_)) -# ifdef JEMALLOC_ARENA_INLINE_A -JEMALLOC_ALWAYS_INLINE arena_chunk_map_t * -arena_mapp_get(arena_chunk_t *chunk, size_t pageind) -{ - - assert(pageind >= map_bias); - assert(pageind < chunk_npages); - - return (&chunk->map[pageind-map_bias]); -} - -JEMALLOC_ALWAYS_INLINE size_t * -arena_mapbitsp_get(arena_chunk_t *chunk, size_t pageind) -{ - - return (&arena_mapp_get(chunk, pageind)->bits); -} - -JEMALLOC_ALWAYS_INLINE size_t -arena_mapbitsp_read(size_t *mapbitsp) -{ - - return (*mapbitsp); -} - -JEMALLOC_ALWAYS_INLINE size_t -arena_mapbits_get(arena_chunk_t *chunk, size_t pageind) -{ - - return (arena_mapbitsp_read(arena_mapbitsp_get(chunk, pageind))); -} - -JEMALLOC_ALWAYS_INLINE size_t -arena_mapbits_unallocated_size_get(arena_chunk_t *chunk, size_t pageind) -{ - size_t mapbits; - - mapbits = arena_mapbits_get(chunk, pageind); - assert((mapbits & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) == 0); - return (mapbits & ~PAGE_MASK); -} - -JEMALLOC_ALWAYS_INLINE size_t -arena_mapbits_large_size_get(arena_chunk_t *chunk, size_t pageind) -{ - size_t mapbits; - - mapbits = arena_mapbits_get(chunk, pageind); - assert((mapbits & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) == - (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)); - return (mapbits & ~PAGE_MASK); -} - -JEMALLOC_ALWAYS_INLINE size_t -arena_mapbits_small_runind_get(arena_chunk_t *chunk, size_t pageind) -{ - size_t mapbits; - - mapbits = arena_mapbits_get(chunk, pageind); - assert((mapbits & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) == - CHUNK_MAP_ALLOCATED); - return (mapbits >> LG_PAGE); -} - -JEMALLOC_ALWAYS_INLINE size_t -arena_mapbits_binind_get(arena_chunk_t *chunk, size_t pageind) -{ - size_t mapbits; - size_t binind; - - mapbits = arena_mapbits_get(chunk, pageind); - binind = (mapbits & CHUNK_MAP_BININD_MASK) >> CHUNK_MAP_BININD_SHIFT; - assert(binind < NBINS || binind == BININD_INVALID); - return (binind); -} - -JEMALLOC_ALWAYS_INLINE size_t -arena_mapbits_dirty_get(arena_chunk_t *chunk, size_t pageind) -{ - size_t mapbits; - - mapbits = arena_mapbits_get(chunk, pageind); - return (mapbits & CHUNK_MAP_DIRTY); -} - -JEMALLOC_ALWAYS_INLINE size_t -arena_mapbits_unzeroed_get(arena_chunk_t *chunk, size_t pageind) -{ - size_t mapbits; - - mapbits = arena_mapbits_get(chunk, pageind); - return (mapbits & CHUNK_MAP_UNZEROED); -} - -JEMALLOC_ALWAYS_INLINE size_t -arena_mapbits_large_get(arena_chunk_t *chunk, size_t pageind) -{ - size_t mapbits; - - mapbits = arena_mapbits_get(chunk, pageind); - return (mapbits & CHUNK_MAP_LARGE); -} - -JEMALLOC_ALWAYS_INLINE size_t -arena_mapbits_allocated_get(arena_chunk_t *chunk, size_t pageind) -{ - size_t mapbits; - - mapbits = arena_mapbits_get(chunk, pageind); - return (mapbits & CHUNK_MAP_ALLOCATED); -} - -JEMALLOC_ALWAYS_INLINE void -arena_mapbitsp_write(size_t *mapbitsp, size_t mapbits) -{ - - *mapbitsp = mapbits; -} - -JEMALLOC_ALWAYS_INLINE void -arena_mapbits_unallocated_set(arena_chunk_t *chunk, size_t pageind, size_t size, - size_t flags) -{ - size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind); - - assert((size & PAGE_MASK) == 0); - assert((flags & ~CHUNK_MAP_FLAGS_MASK) == 0); - assert((flags & (CHUNK_MAP_DIRTY|CHUNK_MAP_UNZEROED)) == flags); - arena_mapbitsp_write(mapbitsp, size | CHUNK_MAP_BININD_INVALID | flags); -} - -JEMALLOC_ALWAYS_INLINE void -arena_mapbits_unallocated_size_set(arena_chunk_t *chunk, size_t pageind, - size_t size) -{ - size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind); - size_t mapbits = arena_mapbitsp_read(mapbitsp); - - assert((size & PAGE_MASK) == 0); - assert((mapbits & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) == 0); - arena_mapbitsp_write(mapbitsp, size | (mapbits & PAGE_MASK)); -} - -JEMALLOC_ALWAYS_INLINE void -arena_mapbits_large_set(arena_chunk_t *chunk, size_t pageind, size_t size, - size_t flags) -{ - size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind); - size_t mapbits = arena_mapbitsp_read(mapbitsp); - size_t unzeroed; - - assert((size & PAGE_MASK) == 0); - assert((flags & CHUNK_MAP_DIRTY) == flags); - unzeroed = mapbits & CHUNK_MAP_UNZEROED; /* Preserve unzeroed. */ - arena_mapbitsp_write(mapbitsp, size | CHUNK_MAP_BININD_INVALID | flags - | unzeroed | CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED); -} - -JEMALLOC_ALWAYS_INLINE void -arena_mapbits_large_binind_set(arena_chunk_t *chunk, size_t pageind, - size_t binind) -{ - size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind); - size_t mapbits = arena_mapbitsp_read(mapbitsp); - - assert(binind <= BININD_INVALID); - assert(arena_mapbits_large_size_get(chunk, pageind) == PAGE); - arena_mapbitsp_write(mapbitsp, (mapbits & ~CHUNK_MAP_BININD_MASK) | - (binind << CHUNK_MAP_BININD_SHIFT)); -} - -JEMALLOC_ALWAYS_INLINE void -arena_mapbits_small_set(arena_chunk_t *chunk, size_t pageind, size_t runind, - size_t binind, size_t flags) -{ - size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind); - size_t mapbits = arena_mapbitsp_read(mapbitsp); - size_t unzeroed; - - assert(binind < BININD_INVALID); - assert(pageind - runind >= map_bias); - assert((flags & CHUNK_MAP_DIRTY) == flags); - unzeroed = mapbits & CHUNK_MAP_UNZEROED; /* Preserve unzeroed. */ - arena_mapbitsp_write(mapbitsp, (runind << LG_PAGE) | (binind << - CHUNK_MAP_BININD_SHIFT) | flags | unzeroed | CHUNK_MAP_ALLOCATED); -} - -JEMALLOC_ALWAYS_INLINE void -arena_mapbits_unzeroed_set(arena_chunk_t *chunk, size_t pageind, - size_t unzeroed) -{ - size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind); - size_t mapbits = arena_mapbitsp_read(mapbitsp); - - arena_mapbitsp_write(mapbitsp, (mapbits & ~CHUNK_MAP_UNZEROED) | - unzeroed); -} - -JEMALLOC_INLINE bool -arena_prof_accum_impl(arena_t *arena, uint64_t accumbytes) -{ - - cassert(config_prof); - assert(prof_interval != 0); - - arena->prof_accumbytes += accumbytes; - if (arena->prof_accumbytes >= prof_interval) { - arena->prof_accumbytes -= prof_interval; - return (true); - } - return (false); -} - -JEMALLOC_INLINE bool -arena_prof_accum_locked(arena_t *arena, uint64_t accumbytes) -{ - - cassert(config_prof); - - if (prof_interval == 0) - return (false); - return (arena_prof_accum_impl(arena, accumbytes)); -} - -JEMALLOC_INLINE bool -arena_prof_accum(arena_t *arena, uint64_t accumbytes) -{ - - cassert(config_prof); - - if (prof_interval == 0) - return (false); - - { - bool ret; - - malloc_mutex_lock(&arena->lock); - ret = arena_prof_accum_impl(arena, accumbytes); - malloc_mutex_unlock(&arena->lock); - return (ret); - } -} - -JEMALLOC_ALWAYS_INLINE size_t -arena_ptr_small_binind_get(const void *ptr, size_t mapbits) -{ - size_t binind; - - binind = (mapbits & CHUNK_MAP_BININD_MASK) >> CHUNK_MAP_BININD_SHIFT; - - if (config_debug) { - arena_chunk_t *chunk; - arena_t *arena; - size_t pageind; - size_t actual_mapbits; - arena_run_t *run; - arena_bin_t *bin; - size_t actual_binind; - arena_bin_info_t *bin_info; - - assert(binind != BININD_INVALID); - assert(binind < NBINS); - chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); - arena = chunk->arena; - pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; - actual_mapbits = arena_mapbits_get(chunk, pageind); - assert(mapbits == actual_mapbits); - assert(arena_mapbits_large_get(chunk, pageind) == 0); - assert(arena_mapbits_allocated_get(chunk, pageind) != 0); - run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)((pageind - - (actual_mapbits >> LG_PAGE)) << LG_PAGE)); - bin = run->bin; - actual_binind = bin - arena->bins; - assert(binind == actual_binind); - bin_info = &arena_bin_info[actual_binind]; - assert(((uintptr_t)ptr - ((uintptr_t)run + - (uintptr_t)bin_info->reg0_offset)) % bin_info->reg_interval - == 0); - } - - return (binind); -} -# endif /* JEMALLOC_ARENA_INLINE_A */ - -# ifdef JEMALLOC_ARENA_INLINE_B -JEMALLOC_INLINE size_t -arena_bin_index(arena_t *arena, arena_bin_t *bin) -{ - size_t binind = bin - arena->bins; - assert(binind < NBINS); - return (binind); -} - -JEMALLOC_INLINE unsigned -arena_run_regind(arena_run_t *run, arena_bin_info_t *bin_info, const void *ptr) -{ - unsigned shift, diff, regind; - size_t interval; - - /* - * Freeing a pointer lower than region zero can cause assertion - * failure. - */ - assert((uintptr_t)ptr >= (uintptr_t)run + - (uintptr_t)bin_info->reg0_offset); - - /* - * Avoid doing division with a variable divisor if possible. Using - * actual division here can reduce allocator throughput by over 20%! - */ - diff = (unsigned)((uintptr_t)ptr - (uintptr_t)run - - bin_info->reg0_offset); - - /* Rescale (factor powers of 2 out of the numerator and denominator). */ - interval = bin_info->reg_interval; - shift = ffs(interval) - 1; - diff >>= shift; - interval >>= shift; - - if (interval == 1) { - /* The divisor was a power of 2. */ - regind = diff; - } else { - /* - * To divide by a number D that is not a power of two we - * multiply by (2^21 / D) and then right shift by 21 positions. - * - * X / D - * - * becomes - * - * (X * interval_invs[D - 3]) >> SIZE_INV_SHIFT - * - * We can omit the first three elements, because we never - * divide by 0, and 1 and 2 are both powers of two, which are - * handled above. - */ -#define SIZE_INV_SHIFT ((sizeof(unsigned) << 3) - LG_RUN_MAXREGS) -#define SIZE_INV(s) (((1U << SIZE_INV_SHIFT) / (s)) + 1) - static const unsigned interval_invs[] = { - SIZE_INV(3), - SIZE_INV(4), SIZE_INV(5), SIZE_INV(6), SIZE_INV(7), - SIZE_INV(8), SIZE_INV(9), SIZE_INV(10), SIZE_INV(11), - SIZE_INV(12), SIZE_INV(13), SIZE_INV(14), SIZE_INV(15), - SIZE_INV(16), SIZE_INV(17), SIZE_INV(18), SIZE_INV(19), - SIZE_INV(20), SIZE_INV(21), SIZE_INV(22), SIZE_INV(23), - SIZE_INV(24), SIZE_INV(25), SIZE_INV(26), SIZE_INV(27), - SIZE_INV(28), SIZE_INV(29), SIZE_INV(30), SIZE_INV(31) - }; - - if (interval <= ((sizeof(interval_invs) / sizeof(unsigned)) + - 2)) { - regind = (diff * interval_invs[interval - 3]) >> - SIZE_INV_SHIFT; - } else - regind = diff / interval; -#undef SIZE_INV -#undef SIZE_INV_SHIFT - } - assert(diff == regind * interval); - assert(regind < bin_info->nregs); - - return (regind); -} - -JEMALLOC_INLINE prof_ctx_t * -arena_prof_ctx_get(const void *ptr) -{ - prof_ctx_t *ret; - arena_chunk_t *chunk; - size_t pageind, mapbits; - - cassert(config_prof); - assert(ptr != NULL); - assert(CHUNK_ADDR2BASE(ptr) != ptr); - - chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); - pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; - mapbits = arena_mapbits_get(chunk, pageind); - assert((mapbits & CHUNK_MAP_ALLOCATED) != 0); - if ((mapbits & CHUNK_MAP_LARGE) == 0) { - if (prof_promote) - ret = (prof_ctx_t *)(uintptr_t)1U; - else { - arena_run_t *run = (arena_run_t *)((uintptr_t)chunk + - (uintptr_t)((pageind - (mapbits >> LG_PAGE)) << - LG_PAGE)); - size_t binind = arena_ptr_small_binind_get(ptr, - mapbits); - arena_bin_info_t *bin_info = &arena_bin_info[binind]; - unsigned regind; - - regind = arena_run_regind(run, bin_info, ptr); - ret = *(prof_ctx_t **)((uintptr_t)run + - bin_info->ctx0_offset + (regind * - sizeof(prof_ctx_t *))); - } - } else - ret = arena_mapp_get(chunk, pageind)->prof_ctx; - - return (ret); -} - -JEMALLOC_INLINE void -arena_prof_ctx_set(const void *ptr, prof_ctx_t *ctx) -{ - arena_chunk_t *chunk; - size_t pageind, mapbits; - - cassert(config_prof); - assert(ptr != NULL); - assert(CHUNK_ADDR2BASE(ptr) != ptr); - - chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); - pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; - mapbits = arena_mapbits_get(chunk, pageind); - assert((mapbits & CHUNK_MAP_ALLOCATED) != 0); - if ((mapbits & CHUNK_MAP_LARGE) == 0) { - if (prof_promote == false) { - arena_run_t *run = (arena_run_t *)((uintptr_t)chunk + - (uintptr_t)((pageind - (mapbits >> LG_PAGE)) << - LG_PAGE)); - size_t binind; - arena_bin_info_t *bin_info; - unsigned regind; - - binind = arena_ptr_small_binind_get(ptr, mapbits); - bin_info = &arena_bin_info[binind]; - regind = arena_run_regind(run, bin_info, ptr); - - *((prof_ctx_t **)((uintptr_t)run + bin_info->ctx0_offset - + (regind * sizeof(prof_ctx_t *)))) = ctx; - } else - assert((uintptr_t)ctx == (uintptr_t)1U); - } else - arena_mapp_get(chunk, pageind)->prof_ctx = ctx; -} - -JEMALLOC_ALWAYS_INLINE void * -arena_malloc(arena_t *arena, size_t size, bool zero, bool try_tcache) -{ - tcache_t *tcache; - - assert(size != 0); - assert(size <= arena_maxclass); - - if (size <= SMALL_MAXCLASS) { - if (try_tcache && (tcache = tcache_get(true)) != NULL) - return (tcache_alloc_small(tcache, size, zero)); - else { - return (arena_malloc_small(choose_arena(arena), size, - zero)); - } - } else { - /* - * Initialize tcache after checking size in order to avoid - * infinite recursion during tcache initialization. - */ - if (try_tcache && size <= tcache_maxclass && (tcache = - tcache_get(true)) != NULL) - return (tcache_alloc_large(tcache, size, zero)); - else { - return (arena_malloc_large(choose_arena(arena), size, - zero)); - } - } -} - -/* Return the size of the allocation pointed to by ptr. */ -JEMALLOC_ALWAYS_INLINE size_t -arena_salloc(const void *ptr, bool demote) -{ - size_t ret; - arena_chunk_t *chunk; - size_t pageind, binind; - - assert(ptr != NULL); - assert(CHUNK_ADDR2BASE(ptr) != ptr); - - chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); - pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; - assert(arena_mapbits_allocated_get(chunk, pageind) != 0); - binind = arena_mapbits_binind_get(chunk, pageind); - if (binind == BININD_INVALID || (config_prof && demote == false && - prof_promote && arena_mapbits_large_get(chunk, pageind) != 0)) { - /* - * Large allocation. In the common case (demote == true), and - * as this is an inline function, most callers will only end up - * looking at binind to determine that ptr is a small - * allocation. - */ - assert(((uintptr_t)ptr & PAGE_MASK) == 0); - ret = arena_mapbits_large_size_get(chunk, pageind); - assert(ret != 0); - assert(pageind + (ret>>LG_PAGE) <= chunk_npages); - assert(ret == PAGE || arena_mapbits_large_size_get(chunk, - pageind+(ret>>LG_PAGE)-1) == 0); - assert(binind == arena_mapbits_binind_get(chunk, - pageind+(ret>>LG_PAGE)-1)); - assert(arena_mapbits_dirty_get(chunk, pageind) == - arena_mapbits_dirty_get(chunk, pageind+(ret>>LG_PAGE)-1)); - } else { - /* - * Small allocation (possibly promoted to a large object due to - * prof_promote). - */ - assert(arena_mapbits_large_get(chunk, pageind) != 0 || - arena_ptr_small_binind_get(ptr, arena_mapbits_get(chunk, - pageind)) == binind); - ret = arena_bin_info[binind].reg_size; - } - - return (ret); -} - -JEMALLOC_ALWAYS_INLINE void -arena_dalloc(arena_t *arena, arena_chunk_t *chunk, void *ptr, bool try_tcache) -{ - size_t pageind, mapbits; - tcache_t *tcache; - - assert(arena != NULL); - assert(chunk->arena == arena); - assert(ptr != NULL); - assert(CHUNK_ADDR2BASE(ptr) != ptr); - - pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; - mapbits = arena_mapbits_get(chunk, pageind); - assert(arena_mapbits_allocated_get(chunk, pageind) != 0); - if ((mapbits & CHUNK_MAP_LARGE) == 0) { - /* Small allocation. */ - if (try_tcache && (tcache = tcache_get(false)) != NULL) { - size_t binind; - - binind = arena_ptr_small_binind_get(ptr, mapbits); - tcache_dalloc_small(tcache, ptr, binind); - } else - arena_dalloc_small(arena, chunk, ptr, pageind); - } else { - size_t size = arena_mapbits_large_size_get(chunk, pageind); - - assert(((uintptr_t)ptr & PAGE_MASK) == 0); - - if (try_tcache && size <= tcache_maxclass && (tcache = - tcache_get(false)) != NULL) { - tcache_dalloc_large(tcache, ptr, size); - } else - arena_dalloc_large(arena, chunk, ptr); - } -} -# endif /* JEMALLOC_ARENA_INLINE_B */ -#endif - -#endif /* JEMALLOC_H_INLINES */ -/******************************************************************************/ diff --git a/extra/jemalloc/include/jemalloc/internal/atomic.h b/extra/jemalloc/include/jemalloc/internal/atomic.h deleted file mode 100644 index 11a7b47fe0f..00000000000 --- a/extra/jemalloc/include/jemalloc/internal/atomic.h +++ /dev/null @@ -1,304 +0,0 @@ -/******************************************************************************/ -#ifdef JEMALLOC_H_TYPES - -#endif /* JEMALLOC_H_TYPES */ -/******************************************************************************/ -#ifdef JEMALLOC_H_STRUCTS - -#endif /* JEMALLOC_H_STRUCTS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_EXTERNS - -#define atomic_read_uint64(p) atomic_add_uint64(p, 0) -#define atomic_read_uint32(p) atomic_add_uint32(p, 0) -#define atomic_read_z(p) atomic_add_z(p, 0) -#define atomic_read_u(p) atomic_add_u(p, 0) - -#endif /* JEMALLOC_H_EXTERNS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_INLINES - -#ifndef JEMALLOC_ENABLE_INLINE -uint64_t atomic_add_uint64(uint64_t *p, uint64_t x); -uint64_t atomic_sub_uint64(uint64_t *p, uint64_t x); -uint32_t atomic_add_uint32(uint32_t *p, uint32_t x); -uint32_t atomic_sub_uint32(uint32_t *p, uint32_t x); -size_t atomic_add_z(size_t *p, size_t x); -size_t atomic_sub_z(size_t *p, size_t x); -unsigned atomic_add_u(unsigned *p, unsigned x); -unsigned atomic_sub_u(unsigned *p, unsigned x); -#endif - -#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_ATOMIC_C_)) -/******************************************************************************/ -/* 64-bit operations. */ -#if (LG_SIZEOF_PTR == 3 || LG_SIZEOF_INT == 3) -# ifdef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_8 -JEMALLOC_INLINE uint64_t -atomic_add_uint64(uint64_t *p, uint64_t x) -{ - - return (__sync_add_and_fetch(p, x)); -} - -JEMALLOC_INLINE uint64_t -atomic_sub_uint64(uint64_t *p, uint64_t x) -{ - - return (__sync_sub_and_fetch(p, x)); -} -#elif (defined(_MSC_VER)) -JEMALLOC_INLINE uint64_t -atomic_add_uint64(uint64_t *p, uint64_t x) -{ - - return (InterlockedExchangeAdd64(p, x)); -} - -JEMALLOC_INLINE uint64_t -atomic_sub_uint64(uint64_t *p, uint64_t x) -{ - - return (InterlockedExchangeAdd64(p, -((int64_t)x))); -} -#elif (defined(JEMALLOC_OSATOMIC)) -JEMALLOC_INLINE uint64_t -atomic_add_uint64(uint64_t *p, uint64_t x) -{ - - return (OSAtomicAdd64((int64_t)x, (int64_t *)p)); -} - -JEMALLOC_INLINE uint64_t -atomic_sub_uint64(uint64_t *p, uint64_t x) -{ - - return (OSAtomicAdd64(-((int64_t)x), (int64_t *)p)); -} -# elif (defined(__amd64__) || defined(__x86_64__)) -JEMALLOC_INLINE uint64_t -atomic_add_uint64(uint64_t *p, uint64_t x) -{ - - asm volatile ( - "lock; xaddq %0, %1;" - : "+r" (x), "=m" (*p) /* Outputs. */ - : "m" (*p) /* Inputs. */ - ); - - return (x); -} - -JEMALLOC_INLINE uint64_t -atomic_sub_uint64(uint64_t *p, uint64_t x) -{ - - x = (uint64_t)(-(int64_t)x); - asm volatile ( - "lock; xaddq %0, %1;" - : "+r" (x), "=m" (*p) /* Outputs. */ - : "m" (*p) /* Inputs. */ - ); - - return (x); -} -# elif (defined(JEMALLOC_ATOMIC9)) -JEMALLOC_INLINE uint64_t -atomic_add_uint64(uint64_t *p, uint64_t x) -{ - - /* - * atomic_fetchadd_64() doesn't exist, but we only ever use this - * function on LP64 systems, so atomic_fetchadd_long() will do. - */ - assert(sizeof(uint64_t) == sizeof(unsigned long)); - - return (atomic_fetchadd_long(p, (unsigned long)x) + x); -} - -JEMALLOC_INLINE uint64_t -atomic_sub_uint64(uint64_t *p, uint64_t x) -{ - - assert(sizeof(uint64_t) == sizeof(unsigned long)); - - return (atomic_fetchadd_long(p, (unsigned long)(-(long)x)) - x); -} -# elif (defined(JE_FORCE_SYNC_COMPARE_AND_SWAP_8)) -JEMALLOC_INLINE uint64_t -atomic_add_uint64(uint64_t *p, uint64_t x) -{ - - return (__sync_add_and_fetch(p, x)); -} - -JEMALLOC_INLINE uint64_t -atomic_sub_uint64(uint64_t *p, uint64_t x) -{ - - return (__sync_sub_and_fetch(p, x)); -} -# else -# error "Missing implementation for 64-bit atomic operations" -# endif -#endif - -/******************************************************************************/ -/* 32-bit operations. */ -#ifdef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4 -JEMALLOC_INLINE uint32_t -atomic_add_uint32(uint32_t *p, uint32_t x) -{ - - return (__sync_add_and_fetch(p, x)); -} - -JEMALLOC_INLINE uint32_t -atomic_sub_uint32(uint32_t *p, uint32_t x) -{ - - return (__sync_sub_and_fetch(p, x)); -} -#elif (defined(_MSC_VER)) -JEMALLOC_INLINE uint32_t -atomic_add_uint32(uint32_t *p, uint32_t x) -{ - - return (InterlockedExchangeAdd(p, x)); -} - -JEMALLOC_INLINE uint32_t -atomic_sub_uint32(uint32_t *p, uint32_t x) -{ - - return (InterlockedExchangeAdd(p, -((int32_t)x))); -} -#elif (defined(JEMALLOC_OSATOMIC)) -JEMALLOC_INLINE uint32_t -atomic_add_uint32(uint32_t *p, uint32_t x) -{ - - return (OSAtomicAdd32((int32_t)x, (int32_t *)p)); -} - -JEMALLOC_INLINE uint32_t -atomic_sub_uint32(uint32_t *p, uint32_t x) -{ - - return (OSAtomicAdd32(-((int32_t)x), (int32_t *)p)); -} -#elif (defined(__i386__) || defined(__amd64__) || defined(__x86_64__)) -JEMALLOC_INLINE uint32_t -atomic_add_uint32(uint32_t *p, uint32_t x) -{ - - asm volatile ( - "lock; xaddl %0, %1;" - : "+r" (x), "=m" (*p) /* Outputs. */ - : "m" (*p) /* Inputs. */ - ); - - return (x); -} - -JEMALLOC_INLINE uint32_t -atomic_sub_uint32(uint32_t *p, uint32_t x) -{ - - x = (uint32_t)(-(int32_t)x); - asm volatile ( - "lock; xaddl %0, %1;" - : "+r" (x), "=m" (*p) /* Outputs. */ - : "m" (*p) /* Inputs. */ - ); - - return (x); -} -#elif (defined(JEMALLOC_ATOMIC9)) -JEMALLOC_INLINE uint32_t -atomic_add_uint32(uint32_t *p, uint32_t x) -{ - - return (atomic_fetchadd_32(p, x) + x); -} - -JEMALLOC_INLINE uint32_t -atomic_sub_uint32(uint32_t *p, uint32_t x) -{ - - return (atomic_fetchadd_32(p, (uint32_t)(-(int32_t)x)) - x); -} -#elif (defined(JE_FORCE_SYNC_COMPARE_AND_SWAP_4)) -JEMALLOC_INLINE uint32_t -atomic_add_uint32(uint32_t *p, uint32_t x) -{ - - return (__sync_add_and_fetch(p, x)); -} - -JEMALLOC_INLINE uint32_t -atomic_sub_uint32(uint32_t *p, uint32_t x) -{ - - return (__sync_sub_and_fetch(p, x)); -} -#else -# error "Missing implementation for 32-bit atomic operations" -#endif - -/******************************************************************************/ -/* size_t operations. */ -JEMALLOC_INLINE size_t -atomic_add_z(size_t *p, size_t x) -{ - -#if (LG_SIZEOF_PTR == 3) - return ((size_t)atomic_add_uint64((uint64_t *)p, (uint64_t)x)); -#elif (LG_SIZEOF_PTR == 2) - return ((size_t)atomic_add_uint32((uint32_t *)p, (uint32_t)x)); -#endif -} - -JEMALLOC_INLINE size_t -atomic_sub_z(size_t *p, size_t x) -{ - -#if (LG_SIZEOF_PTR == 3) - return ((size_t)atomic_add_uint64((uint64_t *)p, - (uint64_t)-((int64_t)x))); -#elif (LG_SIZEOF_PTR == 2) - return ((size_t)atomic_add_uint32((uint32_t *)p, - (uint32_t)-((int32_t)x))); -#endif -} - -/******************************************************************************/ -/* unsigned operations. */ -JEMALLOC_INLINE unsigned -atomic_add_u(unsigned *p, unsigned x) -{ - -#if (LG_SIZEOF_INT == 3) - return ((unsigned)atomic_add_uint64((uint64_t *)p, (uint64_t)x)); -#elif (LG_SIZEOF_INT == 2) - return ((unsigned)atomic_add_uint32((uint32_t *)p, (uint32_t)x)); -#endif -} - -JEMALLOC_INLINE unsigned -atomic_sub_u(unsigned *p, unsigned x) -{ - -#if (LG_SIZEOF_INT == 3) - return ((unsigned)atomic_add_uint64((uint64_t *)p, - (uint64_t)-((int64_t)x))); -#elif (LG_SIZEOF_INT == 2) - return ((unsigned)atomic_add_uint32((uint32_t *)p, - (uint32_t)-((int32_t)x))); -#endif -} -/******************************************************************************/ -#endif - -#endif /* JEMALLOC_H_INLINES */ -/******************************************************************************/ diff --git a/extra/jemalloc/include/jemalloc/internal/base.h b/extra/jemalloc/include/jemalloc/internal/base.h deleted file mode 100644 index 9cf75ffb0b3..00000000000 --- a/extra/jemalloc/include/jemalloc/internal/base.h +++ /dev/null @@ -1,26 +0,0 @@ -/******************************************************************************/ -#ifdef JEMALLOC_H_TYPES - -#endif /* JEMALLOC_H_TYPES */ -/******************************************************************************/ -#ifdef JEMALLOC_H_STRUCTS - -#endif /* JEMALLOC_H_STRUCTS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_EXTERNS - -void *base_alloc(size_t size); -void *base_calloc(size_t number, size_t size); -extent_node_t *base_node_alloc(void); -void base_node_dealloc(extent_node_t *node); -bool base_boot(void); -void base_prefork(void); -void base_postfork_parent(void); -void base_postfork_child(void); - -#endif /* JEMALLOC_H_EXTERNS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_INLINES - -#endif /* JEMALLOC_H_INLINES */ -/******************************************************************************/ diff --git a/extra/jemalloc/include/jemalloc/internal/bitmap.h b/extra/jemalloc/include/jemalloc/internal/bitmap.h deleted file mode 100644 index 605ebac58c1..00000000000 --- a/extra/jemalloc/include/jemalloc/internal/bitmap.h +++ /dev/null @@ -1,184 +0,0 @@ -/******************************************************************************/ -#ifdef JEMALLOC_H_TYPES - -/* Maximum bitmap bit count is 2^LG_BITMAP_MAXBITS. */ -#define LG_BITMAP_MAXBITS LG_RUN_MAXREGS - -typedef struct bitmap_level_s bitmap_level_t; -typedef struct bitmap_info_s bitmap_info_t; -typedef unsigned long bitmap_t; -#define LG_SIZEOF_BITMAP LG_SIZEOF_LONG - -/* Number of bits per group. */ -#define LG_BITMAP_GROUP_NBITS (LG_SIZEOF_BITMAP + 3) -#define BITMAP_GROUP_NBITS (ZU(1) << LG_BITMAP_GROUP_NBITS) -#define BITMAP_GROUP_NBITS_MASK (BITMAP_GROUP_NBITS-1) - -/* Maximum number of levels possible. */ -#define BITMAP_MAX_LEVELS \ - (LG_BITMAP_MAXBITS / LG_SIZEOF_BITMAP) \ - + !!(LG_BITMAP_MAXBITS % LG_SIZEOF_BITMAP) - -#endif /* JEMALLOC_H_TYPES */ -/******************************************************************************/ -#ifdef JEMALLOC_H_STRUCTS - -struct bitmap_level_s { - /* Offset of this level's groups within the array of groups. */ - size_t group_offset; -}; - -struct bitmap_info_s { - /* Logical number of bits in bitmap (stored at bottom level). */ - size_t nbits; - - /* Number of levels necessary for nbits. */ - unsigned nlevels; - - /* - * Only the first (nlevels+1) elements are used, and levels are ordered - * bottom to top (e.g. the bottom level is stored in levels[0]). - */ - bitmap_level_t levels[BITMAP_MAX_LEVELS+1]; -}; - -#endif /* JEMALLOC_H_STRUCTS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_EXTERNS - -void bitmap_info_init(bitmap_info_t *binfo, size_t nbits); -size_t bitmap_info_ngroups(const bitmap_info_t *binfo); -size_t bitmap_size(size_t nbits); -void bitmap_init(bitmap_t *bitmap, const bitmap_info_t *binfo); - -#endif /* JEMALLOC_H_EXTERNS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_INLINES - -#ifndef JEMALLOC_ENABLE_INLINE -bool bitmap_full(bitmap_t *bitmap, const bitmap_info_t *binfo); -bool bitmap_get(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit); -void bitmap_set(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit); -size_t bitmap_sfu(bitmap_t *bitmap, const bitmap_info_t *binfo); -void bitmap_unset(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit); -#endif - -#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_BITMAP_C_)) -JEMALLOC_INLINE bool -bitmap_full(bitmap_t *bitmap, const bitmap_info_t *binfo) -{ - unsigned rgoff = binfo->levels[binfo->nlevels].group_offset - 1; - bitmap_t rg = bitmap[rgoff]; - /* The bitmap is full iff the root group is 0. */ - return (rg == 0); -} - -JEMALLOC_INLINE bool -bitmap_get(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit) -{ - size_t goff; - bitmap_t g; - - assert(bit < binfo->nbits); - goff = bit >> LG_BITMAP_GROUP_NBITS; - g = bitmap[goff]; - return (!(g & (1LU << (bit & BITMAP_GROUP_NBITS_MASK)))); -} - -JEMALLOC_INLINE void -bitmap_set(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit) -{ - size_t goff; - bitmap_t *gp; - bitmap_t g; - - assert(bit < binfo->nbits); - assert(bitmap_get(bitmap, binfo, bit) == false); - goff = bit >> LG_BITMAP_GROUP_NBITS; - gp = &bitmap[goff]; - g = *gp; - assert(g & (1LU << (bit & BITMAP_GROUP_NBITS_MASK))); - g ^= 1LU << (bit & BITMAP_GROUP_NBITS_MASK); - *gp = g; - assert(bitmap_get(bitmap, binfo, bit)); - /* Propagate group state transitions up the tree. */ - if (g == 0) { - unsigned i; - for (i = 1; i < binfo->nlevels; i++) { - bit = goff; - goff = bit >> LG_BITMAP_GROUP_NBITS; - gp = &bitmap[binfo->levels[i].group_offset + goff]; - g = *gp; - assert(g & (1LU << (bit & BITMAP_GROUP_NBITS_MASK))); - g ^= 1LU << (bit & BITMAP_GROUP_NBITS_MASK); - *gp = g; - if (g != 0) - break; - } - } -} - -/* sfu: set first unset. */ -JEMALLOC_INLINE size_t -bitmap_sfu(bitmap_t *bitmap, const bitmap_info_t *binfo) -{ - size_t bit; - bitmap_t g; - unsigned i; - - assert(bitmap_full(bitmap, binfo) == false); - - i = binfo->nlevels - 1; - g = bitmap[binfo->levels[i].group_offset]; - bit = ffsl(g) - 1; - while (i > 0) { - i--; - g = bitmap[binfo->levels[i].group_offset + bit]; - bit = (bit << LG_BITMAP_GROUP_NBITS) + (ffsl(g) - 1); - } - - bitmap_set(bitmap, binfo, bit); - return (bit); -} - -JEMALLOC_INLINE void -bitmap_unset(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit) -{ - size_t goff; - bitmap_t *gp; - bitmap_t g; - bool propagate; - - assert(bit < binfo->nbits); - assert(bitmap_get(bitmap, binfo, bit)); - goff = bit >> LG_BITMAP_GROUP_NBITS; - gp = &bitmap[goff]; - g = *gp; - propagate = (g == 0); - assert((g & (1LU << (bit & BITMAP_GROUP_NBITS_MASK))) == 0); - g ^= 1LU << (bit & BITMAP_GROUP_NBITS_MASK); - *gp = g; - assert(bitmap_get(bitmap, binfo, bit) == false); - /* Propagate group state transitions up the tree. */ - if (propagate) { - unsigned i; - for (i = 1; i < binfo->nlevels; i++) { - bit = goff; - goff = bit >> LG_BITMAP_GROUP_NBITS; - gp = &bitmap[binfo->levels[i].group_offset + goff]; - g = *gp; - propagate = (g == 0); - assert((g & (1LU << (bit & BITMAP_GROUP_NBITS_MASK))) - == 0); - g ^= 1LU << (bit & BITMAP_GROUP_NBITS_MASK); - *gp = g; - if (propagate == false) - break; - } - } -} - -#endif - -#endif /* JEMALLOC_H_INLINES */ -/******************************************************************************/ diff --git a/extra/jemalloc/include/jemalloc/internal/chunk.h b/extra/jemalloc/include/jemalloc/internal/chunk.h deleted file mode 100644 index 87d8700dac8..00000000000 --- a/extra/jemalloc/include/jemalloc/internal/chunk.h +++ /dev/null @@ -1,63 +0,0 @@ -/******************************************************************************/ -#ifdef JEMALLOC_H_TYPES - -/* - * Size and alignment of memory chunks that are allocated by the OS's virtual - * memory system. - */ -#define LG_CHUNK_DEFAULT 22 - -/* Return the chunk address for allocation address a. */ -#define CHUNK_ADDR2BASE(a) \ - ((void *)((uintptr_t)(a) & ~chunksize_mask)) - -/* Return the chunk offset of address a. */ -#define CHUNK_ADDR2OFFSET(a) \ - ((size_t)((uintptr_t)(a) & chunksize_mask)) - -/* Return the smallest chunk multiple that is >= s. */ -#define CHUNK_CEILING(s) \ - (((s) + chunksize_mask) & ~chunksize_mask) - -#endif /* JEMALLOC_H_TYPES */ -/******************************************************************************/ -#ifdef JEMALLOC_H_STRUCTS - -#endif /* JEMALLOC_H_STRUCTS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_EXTERNS - -extern size_t opt_lg_chunk; -extern const char *opt_dss; - -/* Protects stats_chunks; currently not used for any other purpose. */ -extern malloc_mutex_t chunks_mtx; -/* Chunk statistics. */ -extern chunk_stats_t stats_chunks; - -extern rtree_t *chunks_rtree; - -extern size_t chunksize; -extern size_t chunksize_mask; /* (chunksize - 1). */ -extern size_t chunk_npages; -extern size_t map_bias; /* Number of arena chunk header pages. */ -extern size_t arena_maxclass; /* Max size class for arenas. */ - -void *chunk_alloc(size_t size, size_t alignment, bool base, bool *zero, - dss_prec_t dss_prec); -void chunk_unmap(void *chunk, size_t size); -void chunk_dealloc(void *chunk, size_t size, bool unmap); -bool chunk_boot(void); -void chunk_prefork(void); -void chunk_postfork_parent(void); -void chunk_postfork_child(void); - -#endif /* JEMALLOC_H_EXTERNS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_INLINES - -#endif /* JEMALLOC_H_INLINES */ -/******************************************************************************/ - -#include "jemalloc/internal/chunk_dss.h" -#include "jemalloc/internal/chunk_mmap.h" diff --git a/extra/jemalloc/include/jemalloc/internal/chunk_dss.h b/extra/jemalloc/include/jemalloc/internal/chunk_dss.h deleted file mode 100644 index 6585f071bbe..00000000000 --- a/extra/jemalloc/include/jemalloc/internal/chunk_dss.h +++ /dev/null @@ -1,38 +0,0 @@ -/******************************************************************************/ -#ifdef JEMALLOC_H_TYPES - -typedef enum { - dss_prec_disabled = 0, - dss_prec_primary = 1, - dss_prec_secondary = 2, - - dss_prec_limit = 3 -} dss_prec_t ; -#define DSS_PREC_DEFAULT dss_prec_secondary -#define DSS_DEFAULT "secondary" - -#endif /* JEMALLOC_H_TYPES */ -/******************************************************************************/ -#ifdef JEMALLOC_H_STRUCTS - -extern const char *dss_prec_names[]; - -#endif /* JEMALLOC_H_STRUCTS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_EXTERNS - -dss_prec_t chunk_dss_prec_get(void); -bool chunk_dss_prec_set(dss_prec_t dss_prec); -void *chunk_alloc_dss(size_t size, size_t alignment, bool *zero); -bool chunk_in_dss(void *chunk); -bool chunk_dss_boot(void); -void chunk_dss_prefork(void); -void chunk_dss_postfork_parent(void); -void chunk_dss_postfork_child(void); - -#endif /* JEMALLOC_H_EXTERNS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_INLINES - -#endif /* JEMALLOC_H_INLINES */ -/******************************************************************************/ diff --git a/extra/jemalloc/include/jemalloc/internal/chunk_mmap.h b/extra/jemalloc/include/jemalloc/internal/chunk_mmap.h deleted file mode 100644 index f24abac7538..00000000000 --- a/extra/jemalloc/include/jemalloc/internal/chunk_mmap.h +++ /dev/null @@ -1,22 +0,0 @@ -/******************************************************************************/ -#ifdef JEMALLOC_H_TYPES - -#endif /* JEMALLOC_H_TYPES */ -/******************************************************************************/ -#ifdef JEMALLOC_H_STRUCTS - -#endif /* JEMALLOC_H_STRUCTS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_EXTERNS - -bool pages_purge(void *addr, size_t length); - -void *chunk_alloc_mmap(size_t size, size_t alignment, bool *zero); -bool chunk_dealloc_mmap(void *chunk, size_t size); - -#endif /* JEMALLOC_H_EXTERNS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_INLINES - -#endif /* JEMALLOC_H_INLINES */ -/******************************************************************************/ diff --git a/extra/jemalloc/include/jemalloc/internal/ckh.h b/extra/jemalloc/include/jemalloc/internal/ckh.h deleted file mode 100644 index 50c39ed9581..00000000000 --- a/extra/jemalloc/include/jemalloc/internal/ckh.h +++ /dev/null @@ -1,88 +0,0 @@ -/******************************************************************************/ -#ifdef JEMALLOC_H_TYPES - -typedef struct ckh_s ckh_t; -typedef struct ckhc_s ckhc_t; - -/* Typedefs to allow easy function pointer passing. */ -typedef void ckh_hash_t (const void *, size_t[2]); -typedef bool ckh_keycomp_t (const void *, const void *); - -/* Maintain counters used to get an idea of performance. */ -/* #define CKH_COUNT */ -/* Print counter values in ckh_delete() (requires CKH_COUNT). */ -/* #define CKH_VERBOSE */ - -/* - * There are 2^LG_CKH_BUCKET_CELLS cells in each hash table bucket. Try to fit - * one bucket per L1 cache line. - */ -#define LG_CKH_BUCKET_CELLS (LG_CACHELINE - LG_SIZEOF_PTR - 1) - -#endif /* JEMALLOC_H_TYPES */ -/******************************************************************************/ -#ifdef JEMALLOC_H_STRUCTS - -/* Hash table cell. */ -struct ckhc_s { - const void *key; - const void *data; -}; - -struct ckh_s { -#ifdef CKH_COUNT - /* Counters used to get an idea of performance. */ - uint64_t ngrows; - uint64_t nshrinks; - uint64_t nshrinkfails; - uint64_t ninserts; - uint64_t nrelocs; -#endif - - /* Used for pseudo-random number generation. */ -#define CKH_A 1103515241 -#define CKH_C 12347 - uint32_t prng_state; - - /* Total number of items. */ - size_t count; - - /* - * Minimum and current number of hash table buckets. There are - * 2^LG_CKH_BUCKET_CELLS cells per bucket. - */ - unsigned lg_minbuckets; - unsigned lg_curbuckets; - - /* Hash and comparison functions. */ - ckh_hash_t *hash; - ckh_keycomp_t *keycomp; - - /* Hash table with 2^lg_curbuckets buckets. */ - ckhc_t *tab; -}; - -#endif /* JEMALLOC_H_STRUCTS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_EXTERNS - -bool ckh_new(ckh_t *ckh, size_t minitems, ckh_hash_t *hash, - ckh_keycomp_t *keycomp); -void ckh_delete(ckh_t *ckh); -size_t ckh_count(ckh_t *ckh); -bool ckh_iter(ckh_t *ckh, size_t *tabind, void **key, void **data); -bool ckh_insert(ckh_t *ckh, const void *key, const void *data); -bool ckh_remove(ckh_t *ckh, const void *searchkey, void **key, - void **data); -bool ckh_search(ckh_t *ckh, const void *seachkey, void **key, void **data); -void ckh_string_hash(const void *key, size_t r_hash[2]); -bool ckh_string_keycomp(const void *k1, const void *k2); -void ckh_pointer_hash(const void *key, size_t r_hash[2]); -bool ckh_pointer_keycomp(const void *k1, const void *k2); - -#endif /* JEMALLOC_H_EXTERNS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_INLINES - -#endif /* JEMALLOC_H_INLINES */ -/******************************************************************************/ diff --git a/extra/jemalloc/include/jemalloc/internal/ctl.h b/extra/jemalloc/include/jemalloc/internal/ctl.h deleted file mode 100644 index 0ffecc5f2a2..00000000000 --- a/extra/jemalloc/include/jemalloc/internal/ctl.h +++ /dev/null @@ -1,117 +0,0 @@ -/******************************************************************************/ -#ifdef JEMALLOC_H_TYPES - -typedef struct ctl_node_s ctl_node_t; -typedef struct ctl_named_node_s ctl_named_node_t; -typedef struct ctl_indexed_node_s ctl_indexed_node_t; -typedef struct ctl_arena_stats_s ctl_arena_stats_t; -typedef struct ctl_stats_s ctl_stats_t; - -#endif /* JEMALLOC_H_TYPES */ -/******************************************************************************/ -#ifdef JEMALLOC_H_STRUCTS - -struct ctl_node_s { - bool named; -}; - -struct ctl_named_node_s { - struct ctl_node_s node; - const char *name; - /* If (nchildren == 0), this is a terminal node. */ - unsigned nchildren; - const ctl_node_t *children; - int (*ctl)(const size_t *, size_t, void *, size_t *, - void *, size_t); -}; - -struct ctl_indexed_node_s { - struct ctl_node_s node; - const ctl_named_node_t *(*index)(const size_t *, size_t, size_t); -}; - -struct ctl_arena_stats_s { - bool initialized; - unsigned nthreads; - const char *dss; - size_t pactive; - size_t pdirty; - arena_stats_t astats; - - /* Aggregate stats for small size classes, based on bin stats. */ - size_t allocated_small; - uint64_t nmalloc_small; - uint64_t ndalloc_small; - uint64_t nrequests_small; - - malloc_bin_stats_t bstats[NBINS]; - malloc_large_stats_t *lstats; /* nlclasses elements. */ -}; - -struct ctl_stats_s { - size_t allocated; - size_t active; - size_t mapped; - struct { - size_t current; /* stats_chunks.curchunks */ - uint64_t total; /* stats_chunks.nchunks */ - size_t high; /* stats_chunks.highchunks */ - } chunks; - struct { - size_t allocated; /* huge_allocated */ - uint64_t nmalloc; /* huge_nmalloc */ - uint64_t ndalloc; /* huge_ndalloc */ - } huge; - unsigned narenas; - ctl_arena_stats_t *arenas; /* (narenas + 1) elements. */ -}; - -#endif /* JEMALLOC_H_STRUCTS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_EXTERNS - -int ctl_byname(const char *name, void *oldp, size_t *oldlenp, void *newp, - size_t newlen); -int ctl_nametomib(const char *name, size_t *mibp, size_t *miblenp); - -int ctl_bymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, - void *newp, size_t newlen); -bool ctl_boot(void); -void ctl_prefork(void); -void ctl_postfork_parent(void); -void ctl_postfork_child(void); - -#define xmallctl(name, oldp, oldlenp, newp, newlen) do { \ - if (je_mallctl(name, oldp, oldlenp, newp, newlen) \ - != 0) { \ - malloc_printf( \ - "<jemalloc>: Failure in xmallctl(\"%s\", ...)\n", \ - name); \ - abort(); \ - } \ -} while (0) - -#define xmallctlnametomib(name, mibp, miblenp) do { \ - if (je_mallctlnametomib(name, mibp, miblenp) != 0) { \ - malloc_printf("<jemalloc>: Failure in " \ - "xmallctlnametomib(\"%s\", ...)\n", name); \ - abort(); \ - } \ -} while (0) - -#define xmallctlbymib(mib, miblen, oldp, oldlenp, newp, newlen) do { \ - if (je_mallctlbymib(mib, miblen, oldp, oldlenp, newp, \ - newlen) != 0) { \ - malloc_write( \ - "<jemalloc>: Failure in xmallctlbymib()\n"); \ - abort(); \ - } \ -} while (0) - -#endif /* JEMALLOC_H_EXTERNS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_INLINES - -#endif /* JEMALLOC_H_INLINES */ -/******************************************************************************/ - diff --git a/extra/jemalloc/include/jemalloc/internal/extent.h b/extra/jemalloc/include/jemalloc/internal/extent.h deleted file mode 100644 index ba95ca816bd..00000000000 --- a/extra/jemalloc/include/jemalloc/internal/extent.h +++ /dev/null @@ -1,46 +0,0 @@ -/******************************************************************************/ -#ifdef JEMALLOC_H_TYPES - -typedef struct extent_node_s extent_node_t; - -#endif /* JEMALLOC_H_TYPES */ -/******************************************************************************/ -#ifdef JEMALLOC_H_STRUCTS - -/* Tree of extents. */ -struct extent_node_s { - /* Linkage for the size/address-ordered tree. */ - rb_node(extent_node_t) link_szad; - - /* Linkage for the address-ordered tree. */ - rb_node(extent_node_t) link_ad; - - /* Profile counters, used for huge objects. */ - prof_ctx_t *prof_ctx; - - /* Pointer to the extent that this tree node is responsible for. */ - void *addr; - - /* Total region size. */ - size_t size; - - /* True if zero-filled; used by chunk recycling code. */ - bool zeroed; -}; -typedef rb_tree(extent_node_t) extent_tree_t; - -#endif /* JEMALLOC_H_STRUCTS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_EXTERNS - -rb_proto(, extent_tree_szad_, extent_tree_t, extent_node_t) - -rb_proto(, extent_tree_ad_, extent_tree_t, extent_node_t) - -#endif /* JEMALLOC_H_EXTERNS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_INLINES - -#endif /* JEMALLOC_H_INLINES */ -/******************************************************************************/ - diff --git a/extra/jemalloc/include/jemalloc/internal/hash.h b/extra/jemalloc/include/jemalloc/internal/hash.h deleted file mode 100644 index 56ecc793b36..00000000000 --- a/extra/jemalloc/include/jemalloc/internal/hash.h +++ /dev/null @@ -1,331 +0,0 @@ -/* - * The following hash function is based on MurmurHash3, placed into the public - * domain by Austin Appleby. See http://code.google.com/p/smhasher/ for - * details. - */ -/******************************************************************************/ -#ifdef JEMALLOC_H_TYPES - -#endif /* JEMALLOC_H_TYPES */ -/******************************************************************************/ -#ifdef JEMALLOC_H_STRUCTS - -#endif /* JEMALLOC_H_STRUCTS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_EXTERNS - -#endif /* JEMALLOC_H_EXTERNS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_INLINES - -#ifndef JEMALLOC_ENABLE_INLINE -void hash(const void *key, size_t len, const uint32_t seed, - size_t r_hash[2]); -#endif - -#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_HASH_C_)) -/******************************************************************************/ -/* Internal implementation. */ -JEMALLOC_INLINE uint32_t -hash_rotl_32(uint32_t x, int8_t r) -{ - - return (x << r) | (x >> (32 - r)); -} - -JEMALLOC_INLINE uint64_t -hash_rotl_64(uint64_t x, int8_t r) -{ - return (x << r) | (x >> (64 - r)); -} - -JEMALLOC_INLINE uint32_t -hash_get_block_32(const uint32_t *p, int i) -{ - - return p[i]; -} - -JEMALLOC_INLINE uint64_t -hash_get_block_64(const uint64_t *p, int i) -{ - - return p[i]; -} - -JEMALLOC_INLINE uint32_t -hash_fmix_32(uint32_t h) -{ - - h ^= h >> 16; - h *= 0x85ebca6b; - h ^= h >> 13; - h *= 0xc2b2ae35; - h ^= h >> 16; - - return h; -} - -JEMALLOC_INLINE uint64_t -hash_fmix_64(uint64_t k) -{ - - k ^= k >> 33; - k *= QU(0xff51afd7ed558ccdLLU); - k ^= k >> 33; - k *= QU(0xc4ceb9fe1a85ec53LLU); - k ^= k >> 33; - - return k; -} - -JEMALLOC_INLINE uint32_t -hash_x86_32(const void *key, int len, uint32_t seed) -{ - const uint8_t *data = (const uint8_t *) key; - const int nblocks = len / 4; - - uint32_t h1 = seed; - - const uint32_t c1 = 0xcc9e2d51; - const uint32_t c2 = 0x1b873593; - - /* body */ - { - const uint32_t *blocks = (const uint32_t *) (data + nblocks*4); - int i; - - for (i = -nblocks; i; i++) { - uint32_t k1 = hash_get_block_32(blocks, i); - - k1 *= c1; - k1 = hash_rotl_32(k1, 15); - k1 *= c2; - - h1 ^= k1; - h1 = hash_rotl_32(h1, 13); - h1 = h1*5 + 0xe6546b64; - } - } - - /* tail */ - { - const uint8_t *tail = (const uint8_t *) (data + nblocks*4); - - uint32_t k1 = 0; - - switch (len & 3) { - case 3: k1 ^= tail[2] << 16; - case 2: k1 ^= tail[1] << 8; - case 1: k1 ^= tail[0]; k1 *= c1; k1 = hash_rotl_32(k1, 15); - k1 *= c2; h1 ^= k1; - } - } - - /* finalization */ - h1 ^= len; - - h1 = hash_fmix_32(h1); - - return h1; -} - -UNUSED JEMALLOC_INLINE void -hash_x86_128(const void *key, const int len, uint32_t seed, - uint64_t r_out[2]) -{ - const uint8_t * data = (const uint8_t *) key; - const int nblocks = len / 16; - - uint32_t h1 = seed; - uint32_t h2 = seed; - uint32_t h3 = seed; - uint32_t h4 = seed; - - const uint32_t c1 = 0x239b961b; - const uint32_t c2 = 0xab0e9789; - const uint32_t c3 = 0x38b34ae5; - const uint32_t c4 = 0xa1e38b93; - - /* body */ - { - const uint32_t *blocks = (const uint32_t *) (data + nblocks*16); - int i; - - for (i = -nblocks; i; i++) { - uint32_t k1 = hash_get_block_32(blocks, i*4 + 0); - uint32_t k2 = hash_get_block_32(blocks, i*4 + 1); - uint32_t k3 = hash_get_block_32(blocks, i*4 + 2); - uint32_t k4 = hash_get_block_32(blocks, i*4 + 3); - - k1 *= c1; k1 = hash_rotl_32(k1, 15); k1 *= c2; h1 ^= k1; - - h1 = hash_rotl_32(h1, 19); h1 += h2; - h1 = h1*5 + 0x561ccd1b; - - k2 *= c2; k2 = hash_rotl_32(k2, 16); k2 *= c3; h2 ^= k2; - - h2 = hash_rotl_32(h2, 17); h2 += h3; - h2 = h2*5 + 0x0bcaa747; - - k3 *= c3; k3 = hash_rotl_32(k3, 17); k3 *= c4; h3 ^= k3; - - h3 = hash_rotl_32(h3, 15); h3 += h4; - h3 = h3*5 + 0x96cd1c35; - - k4 *= c4; k4 = hash_rotl_32(k4, 18); k4 *= c1; h4 ^= k4; - - h4 = hash_rotl_32(h4, 13); h4 += h1; - h4 = h4*5 + 0x32ac3b17; - } - } - - /* tail */ - { - const uint8_t *tail = (const uint8_t *) (data + nblocks*16); - uint32_t k1 = 0; - uint32_t k2 = 0; - uint32_t k3 = 0; - uint32_t k4 = 0; - - switch (len & 15) { - case 15: k4 ^= tail[14] << 16; - case 14: k4 ^= tail[13] << 8; - case 13: k4 ^= tail[12] << 0; - k4 *= c4; k4 = hash_rotl_32(k4, 18); k4 *= c1; h4 ^= k4; - - case 12: k3 ^= tail[11] << 24; - case 11: k3 ^= tail[10] << 16; - case 10: k3 ^= tail[ 9] << 8; - case 9: k3 ^= tail[ 8] << 0; - k3 *= c3; k3 = hash_rotl_32(k3, 17); k3 *= c4; h3 ^= k3; - - case 8: k2 ^= tail[ 7] << 24; - case 7: k2 ^= tail[ 6] << 16; - case 6: k2 ^= tail[ 5] << 8; - case 5: k2 ^= tail[ 4] << 0; - k2 *= c2; k2 = hash_rotl_32(k2, 16); k2 *= c3; h2 ^= k2; - - case 4: k1 ^= tail[ 3] << 24; - case 3: k1 ^= tail[ 2] << 16; - case 2: k1 ^= tail[ 1] << 8; - case 1: k1 ^= tail[ 0] << 0; - k1 *= c1; k1 = hash_rotl_32(k1, 15); k1 *= c2; h1 ^= k1; - } - } - - /* finalization */ - h1 ^= len; h2 ^= len; h3 ^= len; h4 ^= len; - - h1 += h2; h1 += h3; h1 += h4; - h2 += h1; h3 += h1; h4 += h1; - - h1 = hash_fmix_32(h1); - h2 = hash_fmix_32(h2); - h3 = hash_fmix_32(h3); - h4 = hash_fmix_32(h4); - - h1 += h2; h1 += h3; h1 += h4; - h2 += h1; h3 += h1; h4 += h1; - - r_out[0] = (((uint64_t) h2) << 32) | h1; - r_out[1] = (((uint64_t) h4) << 32) | h3; -} - -UNUSED JEMALLOC_INLINE void -hash_x64_128(const void *key, const int len, const uint32_t seed, - uint64_t r_out[2]) -{ - const uint8_t *data = (const uint8_t *) key; - const int nblocks = len / 16; - - uint64_t h1 = seed; - uint64_t h2 = seed; - - const uint64_t c1 = QU(0x87c37b91114253d5LLU); - const uint64_t c2 = QU(0x4cf5ad432745937fLLU); - - /* body */ - { - const uint64_t *blocks = (const uint64_t *) (data); - int i; - - for (i = 0; i < nblocks; i++) { - uint64_t k1 = hash_get_block_64(blocks, i*2 + 0); - uint64_t k2 = hash_get_block_64(blocks, i*2 + 1); - - k1 *= c1; k1 = hash_rotl_64(k1, 31); k1 *= c2; h1 ^= k1; - - h1 = hash_rotl_64(h1, 27); h1 += h2; - h1 = h1*5 + 0x52dce729; - - k2 *= c2; k2 = hash_rotl_64(k2, 33); k2 *= c1; h2 ^= k2; - - h2 = hash_rotl_64(h2, 31); h2 += h1; - h2 = h2*5 + 0x38495ab5; - } - } - - /* tail */ - { - const uint8_t *tail = (const uint8_t*)(data + nblocks*16); - uint64_t k1 = 0; - uint64_t k2 = 0; - - switch (len & 15) { - case 15: k2 ^= ((uint64_t)(tail[14])) << 48; - case 14: k2 ^= ((uint64_t)(tail[13])) << 40; - case 13: k2 ^= ((uint64_t)(tail[12])) << 32; - case 12: k2 ^= ((uint64_t)(tail[11])) << 24; - case 11: k2 ^= ((uint64_t)(tail[10])) << 16; - case 10: k2 ^= ((uint64_t)(tail[ 9])) << 8; - case 9: k2 ^= ((uint64_t)(tail[ 8])) << 0; - k2 *= c2; k2 = hash_rotl_64(k2, 33); k2 *= c1; h2 ^= k2; - - case 8: k1 ^= ((uint64_t)(tail[ 7])) << 56; - case 7: k1 ^= ((uint64_t)(tail[ 6])) << 48; - case 6: k1 ^= ((uint64_t)(tail[ 5])) << 40; - case 5: k1 ^= ((uint64_t)(tail[ 4])) << 32; - case 4: k1 ^= ((uint64_t)(tail[ 3])) << 24; - case 3: k1 ^= ((uint64_t)(tail[ 2])) << 16; - case 2: k1 ^= ((uint64_t)(tail[ 1])) << 8; - case 1: k1 ^= ((uint64_t)(tail[ 0])) << 0; - k1 *= c1; k1 = hash_rotl_64(k1, 31); k1 *= c2; h1 ^= k1; - } - } - - /* finalization */ - h1 ^= len; h2 ^= len; - - h1 += h2; - h2 += h1; - - h1 = hash_fmix_64(h1); - h2 = hash_fmix_64(h2); - - h1 += h2; - h2 += h1; - - r_out[0] = h1; - r_out[1] = h2; -} - - -/******************************************************************************/ -/* API. */ -JEMALLOC_INLINE void -hash(const void *key, size_t len, const uint32_t seed, size_t r_hash[2]) -{ -#if (LG_SIZEOF_PTR == 3) - hash_x64_128(key, len, seed, (uint64_t *)r_hash); -#else - uint64_t hashes[2]; - hash_x86_128(key, len, seed, hashes); - r_hash[0] = (size_t)hashes[0]; - r_hash[1] = (size_t)hashes[1]; -#endif -} -#endif - -#endif /* JEMALLOC_H_INLINES */ -/******************************************************************************/ diff --git a/extra/jemalloc/include/jemalloc/internal/huge.h b/extra/jemalloc/include/jemalloc/internal/huge.h deleted file mode 100644 index d987d370767..00000000000 --- a/extra/jemalloc/include/jemalloc/internal/huge.h +++ /dev/null @@ -1,40 +0,0 @@ -/******************************************************************************/ -#ifdef JEMALLOC_H_TYPES - -#endif /* JEMALLOC_H_TYPES */ -/******************************************************************************/ -#ifdef JEMALLOC_H_STRUCTS - -#endif /* JEMALLOC_H_STRUCTS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_EXTERNS - -/* Huge allocation statistics. */ -extern uint64_t huge_nmalloc; -extern uint64_t huge_ndalloc; -extern size_t huge_allocated; - -/* Protects chunk-related data structures. */ -extern malloc_mutex_t huge_mtx; - -void *huge_malloc(size_t size, bool zero); -void *huge_palloc(size_t size, size_t alignment, bool zero); -void *huge_ralloc_no_move(void *ptr, size_t oldsize, size_t size, - size_t extra); -void *huge_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra, - size_t alignment, bool zero, bool try_tcache_dalloc); -void huge_dalloc(void *ptr, bool unmap); -size_t huge_salloc(const void *ptr); -prof_ctx_t *huge_prof_ctx_get(const void *ptr); -void huge_prof_ctx_set(const void *ptr, prof_ctx_t *ctx); -bool huge_boot(void); -void huge_prefork(void); -void huge_postfork_parent(void); -void huge_postfork_child(void); - -#endif /* JEMALLOC_H_EXTERNS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_INLINES - -#endif /* JEMALLOC_H_INLINES */ -/******************************************************************************/ diff --git a/extra/jemalloc/include/jemalloc/internal/jemalloc_internal.h.in b/extra/jemalloc/include/jemalloc/internal/jemalloc_internal.h.in deleted file mode 100644 index 124ec34bddf..00000000000 --- a/extra/jemalloc/include/jemalloc/internal/jemalloc_internal.h.in +++ /dev/null @@ -1,1030 +0,0 @@ -#ifndef JEMALLOC_INTERNAL_H -#define JEMALLOC_INTERNAL_H -#include <math.h> -#ifdef _WIN32 -# include <windows.h> -# define ENOENT ERROR_PATH_NOT_FOUND -# define EINVAL ERROR_BAD_ARGUMENTS -# define EAGAIN ERROR_OUTOFMEMORY -# define EPERM ERROR_WRITE_FAULT -# define EFAULT ERROR_INVALID_ADDRESS -# define ENOMEM ERROR_NOT_ENOUGH_MEMORY -# undef ERANGE -# define ERANGE ERROR_INVALID_DATA -#else -# include <sys/param.h> -# include <sys/mman.h> -# include <sys/syscall.h> -# if !defined(SYS_write) && defined(__NR_write) -# define SYS_write __NR_write -# endif -# include <sys/uio.h> -# include <pthread.h> -# include <errno.h> -#endif -#include <sys/types.h> - -#include <limits.h> -#ifndef SIZE_T_MAX -# define SIZE_T_MAX SIZE_MAX -#endif -#include <stdarg.h> -#include <stdbool.h> -#include <stdio.h> -#include <stdlib.h> -#include <stdint.h> -#include <stddef.h> -#ifndef offsetof -# define offsetof(type, member) ((size_t)&(((type *)NULL)->member)) -#endif -#include <inttypes.h> -#include <string.h> -#include <strings.h> -#include <ctype.h> -#ifdef _MSC_VER -# include <io.h> -typedef intptr_t ssize_t; -# define PATH_MAX 1024 -# define STDERR_FILENO 2 -# define __func__ __FUNCTION__ -/* Disable warnings about deprecated system functions */ -# pragma warning(disable: 4996) -#else -# include <unistd.h> -#endif -#include <fcntl.h> - -#define JEMALLOC_NO_DEMANGLE -#include "../jemalloc@install_suffix@.h" - -#ifdef JEMALLOC_UTRACE -#include <sys/ktrace.h> -#endif - -#ifdef JEMALLOC_VALGRIND -#include <valgrind/valgrind.h> -#include <valgrind/memcheck.h> -#endif - -#include "jemalloc/internal/private_namespace.h" - -#ifdef JEMALLOC_CC_SILENCE -#define UNUSED JEMALLOC_ATTR(unused) -#else -#define UNUSED -#endif - -static const bool config_debug = -#ifdef JEMALLOC_DEBUG - true -#else - false -#endif - ; -static const bool config_dss = -#ifdef JEMALLOC_DSS - true -#else - false -#endif - ; -static const bool config_fill = -#ifdef JEMALLOC_FILL - true -#else - false -#endif - ; -static const bool config_lazy_lock = -#ifdef JEMALLOC_LAZY_LOCK - true -#else - false -#endif - ; -static const bool config_prof = -#ifdef JEMALLOC_PROF - true -#else - false -#endif - ; -static const bool config_prof_libgcc = -#ifdef JEMALLOC_PROF_LIBGCC - true -#else - false -#endif - ; -static const bool config_prof_libunwind = -#ifdef JEMALLOC_PROF_LIBUNWIND - true -#else - false -#endif - ; -static const bool config_mremap = -#ifdef JEMALLOC_MREMAP - true -#else - false -#endif - ; -static const bool config_munmap = -#ifdef JEMALLOC_MUNMAP - true -#else - false -#endif - ; -static const bool config_stats = -#ifdef JEMALLOC_STATS - true -#else - false -#endif - ; -static const bool config_tcache = -#ifdef JEMALLOC_TCACHE - true -#else - false -#endif - ; -static const bool config_tls = -#ifdef JEMALLOC_TLS - true -#else - false -#endif - ; -static const bool config_utrace = -#ifdef JEMALLOC_UTRACE - true -#else - false -#endif - ; -static const bool config_valgrind = -#ifdef JEMALLOC_VALGRIND - true -#else - false -#endif - ; -static const bool config_xmalloc = -#ifdef JEMALLOC_XMALLOC - true -#else - false -#endif - ; -static const bool config_ivsalloc = -#ifdef JEMALLOC_IVSALLOC - true -#else - false -#endif - ; - -#ifdef JEMALLOC_ATOMIC9 -#include <machine/atomic.h> -#endif - -#if (defined(JEMALLOC_OSATOMIC) || defined(JEMALLOC_OSSPIN)) -#include <libkern/OSAtomic.h> -#endif - -#ifdef JEMALLOC_ZONE -#include <mach/mach_error.h> -#include <mach/mach_init.h> -#include <mach/vm_map.h> -#include <malloc/malloc.h> -#endif - -#define RB_COMPACT -#include "jemalloc/internal/rb.h" -#include "jemalloc/internal/qr.h" -#include "jemalloc/internal/ql.h" - -/* - * jemalloc can conceptually be broken into components (arena, tcache, etc.), - * but there are circular dependencies that cannot be broken without - * substantial performance degradation. In order to reduce the effect on - * visual code flow, read the header files in multiple passes, with one of the - * following cpp variables defined during each pass: - * - * JEMALLOC_H_TYPES : Preprocessor-defined constants and psuedo-opaque data - * types. - * JEMALLOC_H_STRUCTS : Data structures. - * JEMALLOC_H_EXTERNS : Extern data declarations and function prototypes. - * JEMALLOC_H_INLINES : Inline functions. - */ -/******************************************************************************/ -#define JEMALLOC_H_TYPES - -#define ALLOCM_LG_ALIGN_MASK ((int)0x3f) - -#define ZU(z) ((size_t)z) -#define QU(q) ((uint64_t)q) - -#ifndef __DECONST -# define __DECONST(type, var) ((type)(uintptr_t)(const void *)(var)) -#endif - -#ifdef JEMALLOC_DEBUG - /* Disable inlining to make debugging easier. */ -# define JEMALLOC_ALWAYS_INLINE -# define JEMALLOC_ALWAYS_INLINE_C static -# define JEMALLOC_INLINE -# define inline -#else -# define JEMALLOC_ENABLE_INLINE -# ifdef JEMALLOC_HAVE_ATTR -# define JEMALLOC_ALWAYS_INLINE \ - static inline JEMALLOC_ATTR(unused) JEMALLOC_ATTR(always_inline) -# define JEMALLOC_ALWAYS_INLINE_C \ - static inline JEMALLOC_ATTR(always_inline) -# else -# define JEMALLOC_ALWAYS_INLINE static inline -# define JEMALLOC_ALWAYS_INLINE_C static inline -# endif -# define JEMALLOC_INLINE static inline -# ifdef _MSC_VER -# define inline _inline -# endif -#endif - -/* Smallest size class to support. */ -#define LG_TINY_MIN 3 -#define TINY_MIN (1U << LG_TINY_MIN) - -/* - * Minimum alignment of allocations is 2^LG_QUANTUM bytes (ignoring tiny size - * classes). - */ -#ifndef LG_QUANTUM -# if (defined(__i386__) || defined(_M_IX86)) -# define LG_QUANTUM 4 -# endif -# ifdef __ia64__ -# define LG_QUANTUM 4 -# endif -# ifdef __alpha__ -# define LG_QUANTUM 4 -# endif -# ifdef __sparc64__ -# define LG_QUANTUM 4 -# endif -# if (defined(__amd64__) || defined(__x86_64__) || defined(_M_X64)) -# define LG_QUANTUM 4 -# endif -# ifdef __arm__ -# define LG_QUANTUM 3 -# endif -# ifdef __hppa__ -# define LG_QUANTUM 4 -# endif -# ifdef __mips__ -# define LG_QUANTUM 3 -# endif -# ifdef __powerpc__ -# define LG_QUANTUM 4 -# endif -# ifdef __s390__ -# define LG_QUANTUM 4 -# endif -# ifdef __SH4__ -# define LG_QUANTUM 4 -# endif -# ifdef __tile__ -# define LG_QUANTUM 4 -# endif -# ifndef LG_QUANTUM -# error "No LG_QUANTUM definition for architecture; specify via CPPFLAGS" -# endif -#endif - -#define QUANTUM ((size_t)(1U << LG_QUANTUM)) -#define QUANTUM_MASK (QUANTUM - 1) - -/* Return the smallest quantum multiple that is >= a. */ -#define QUANTUM_CEILING(a) \ - (((a) + QUANTUM_MASK) & ~QUANTUM_MASK) - -#define LONG ((size_t)(1U << LG_SIZEOF_LONG)) -#define LONG_MASK (LONG - 1) - -/* Return the smallest long multiple that is >= a. */ -#define LONG_CEILING(a) \ - (((a) + LONG_MASK) & ~LONG_MASK) - -#define SIZEOF_PTR (1U << LG_SIZEOF_PTR) -#define PTR_MASK (SIZEOF_PTR - 1) - -/* Return the smallest (void *) multiple that is >= a. */ -#define PTR_CEILING(a) \ - (((a) + PTR_MASK) & ~PTR_MASK) - -/* - * Maximum size of L1 cache line. This is used to avoid cache line aliasing. - * In addition, this controls the spacing of cacheline-spaced size classes. - * - * CACHELINE cannot be based on LG_CACHELINE because __declspec(align()) can - * only handle raw constants. - */ -#define LG_CACHELINE 6 -#define CACHELINE 64 -#define CACHELINE_MASK (CACHELINE - 1) - -/* Return the smallest cacheline multiple that is >= s. */ -#define CACHELINE_CEILING(s) \ - (((s) + CACHELINE_MASK) & ~CACHELINE_MASK) - -/* Page size. STATIC_PAGE_SHIFT is determined by the configure script. */ -#ifdef PAGE_MASK -# undef PAGE_MASK -#endif -#define LG_PAGE STATIC_PAGE_SHIFT -#define PAGE ((size_t)(1U << STATIC_PAGE_SHIFT)) -#define PAGE_MASK ((size_t)(PAGE - 1)) - -/* Return the smallest pagesize multiple that is >= s. */ -#define PAGE_CEILING(s) \ - (((s) + PAGE_MASK) & ~PAGE_MASK) - -/* Return the nearest aligned address at or below a. */ -#define ALIGNMENT_ADDR2BASE(a, alignment) \ - ((void *)((uintptr_t)(a) & (-(alignment)))) - -/* Return the offset between a and the nearest aligned address at or below a. */ -#define ALIGNMENT_ADDR2OFFSET(a, alignment) \ - ((size_t)((uintptr_t)(a) & (alignment - 1))) - -/* Return the smallest alignment multiple that is >= s. */ -#define ALIGNMENT_CEILING(s, alignment) \ - (((s) + (alignment - 1)) & (-(alignment))) - -/* Declare a variable length array */ -#if __STDC_VERSION__ < 199901L -# ifdef _MSC_VER -# include <malloc.h> -# define alloca _alloca -# else -# ifdef JEMALLOC_HAS_ALLOCA_H -# include <alloca.h> -# else -# include <stdlib.h> -# endif -# endif -# define VARIABLE_ARRAY(type, name, count) \ - type *name = alloca(sizeof(type) * count) -#else -# define VARIABLE_ARRAY(type, name, count) type name[count] -#endif - -#ifdef JEMALLOC_VALGRIND -/* - * The JEMALLOC_VALGRIND_*() macros must be macros rather than functions - * so that when Valgrind reports errors, there are no extra stack frames - * in the backtraces. - * - * The size that is reported to valgrind must be consistent through a chain of - * malloc..realloc..realloc calls. Request size isn't recorded anywhere in - * jemalloc, so it is critical that all callers of these macros provide usize - * rather than request size. As a result, buffer overflow detection is - * technically weakened for the standard API, though it is generally accepted - * practice to consider any extra bytes reported by malloc_usable_size() as - * usable space. - */ -#define JEMALLOC_VALGRIND_MALLOC(cond, ptr, usize, zero) do { \ - if (config_valgrind && opt_valgrind && cond) \ - VALGRIND_MALLOCLIKE_BLOCK(ptr, usize, p2rz(ptr), zero); \ -} while (0) -#define JEMALLOC_VALGRIND_REALLOC(ptr, usize, old_ptr, old_usize, \ - old_rzsize, zero) do { \ - if (config_valgrind && opt_valgrind) { \ - size_t rzsize = p2rz(ptr); \ - \ - if (ptr == old_ptr) { \ - VALGRIND_RESIZEINPLACE_BLOCK(ptr, old_usize, \ - usize, rzsize); \ - if (zero && old_usize < usize) { \ - VALGRIND_MAKE_MEM_DEFINED( \ - (void *)((uintptr_t)ptr + \ - old_usize), usize - old_usize); \ - } \ - } else { \ - if (old_ptr != NULL) { \ - VALGRIND_FREELIKE_BLOCK(old_ptr, \ - old_rzsize); \ - } \ - if (ptr != NULL) { \ - size_t copy_size = (old_usize < usize) \ - ? old_usize : usize; \ - size_t tail_size = usize - copy_size; \ - VALGRIND_MALLOCLIKE_BLOCK(ptr, usize, \ - rzsize, false); \ - if (copy_size > 0) { \ - VALGRIND_MAKE_MEM_DEFINED(ptr, \ - copy_size); \ - } \ - if (zero && tail_size > 0) { \ - VALGRIND_MAKE_MEM_DEFINED( \ - (void *)((uintptr_t)ptr + \ - copy_size), tail_size); \ - } \ - } \ - } \ - } \ -} while (0) -#define JEMALLOC_VALGRIND_FREE(ptr, rzsize) do { \ - if (config_valgrind && opt_valgrind) \ - VALGRIND_FREELIKE_BLOCK(ptr, rzsize); \ -} while (0) -#else -#define RUNNING_ON_VALGRIND ((unsigned)0) -#define VALGRIND_MALLOCLIKE_BLOCK(addr, sizeB, rzB, is_zeroed) \ - do {} while (0) -#define VALGRIND_RESIZEINPLACE_BLOCK(addr, oldSizeB, newSizeB, rzB) \ - do {} while (0) -#define VALGRIND_FREELIKE_BLOCK(addr, rzB) do {} while (0) -#define VALGRIND_MAKE_MEM_NOACCESS(_qzz_addr, _qzz_len) do {} while (0) -#define VALGRIND_MAKE_MEM_UNDEFINED(_qzz_addr, _qzz_len) do {} while (0) -#define VALGRIND_MAKE_MEM_DEFINED(_qzz_addr, _qzz_len) do {} while (0) -#define JEMALLOC_VALGRIND_MALLOC(cond, ptr, usize, zero) do {} while (0) -#define JEMALLOC_VALGRIND_REALLOC(ptr, usize, old_ptr, old_usize, \ - old_rzsize, zero) do {} while (0) -#define JEMALLOC_VALGRIND_FREE(ptr, rzsize) do {} while (0) -#endif - -#include "jemalloc/internal/util.h" -#include "jemalloc/internal/atomic.h" -#include "jemalloc/internal/prng.h" -#include "jemalloc/internal/ckh.h" -#include "jemalloc/internal/size_classes.h" -#include "jemalloc/internal/stats.h" -#include "jemalloc/internal/ctl.h" -#include "jemalloc/internal/mutex.h" -#include "jemalloc/internal/tsd.h" -#include "jemalloc/internal/mb.h" -#include "jemalloc/internal/extent.h" -#include "jemalloc/internal/arena.h" -#include "jemalloc/internal/bitmap.h" -#include "jemalloc/internal/base.h" -#include "jemalloc/internal/chunk.h" -#include "jemalloc/internal/huge.h" -#include "jemalloc/internal/rtree.h" -#include "jemalloc/internal/tcache.h" -#include "jemalloc/internal/hash.h" -#include "jemalloc/internal/quarantine.h" -#include "jemalloc/internal/prof.h" - -#undef JEMALLOC_H_TYPES -/******************************************************************************/ -#define JEMALLOC_H_STRUCTS - -#include "jemalloc/internal/util.h" -#include "jemalloc/internal/atomic.h" -#include "jemalloc/internal/prng.h" -#include "jemalloc/internal/ckh.h" -#include "jemalloc/internal/size_classes.h" -#include "jemalloc/internal/stats.h" -#include "jemalloc/internal/ctl.h" -#include "jemalloc/internal/mutex.h" -#include "jemalloc/internal/tsd.h" -#include "jemalloc/internal/mb.h" -#include "jemalloc/internal/bitmap.h" -#include "jemalloc/internal/extent.h" -#include "jemalloc/internal/arena.h" -#include "jemalloc/internal/base.h" -#include "jemalloc/internal/chunk.h" -#include "jemalloc/internal/huge.h" -#include "jemalloc/internal/rtree.h" -#include "jemalloc/internal/tcache.h" -#include "jemalloc/internal/hash.h" -#include "jemalloc/internal/quarantine.h" -#include "jemalloc/internal/prof.h" - -typedef struct { - uint64_t allocated; - uint64_t deallocated; -} thread_allocated_t; -/* - * The JEMALLOC_CONCAT() wrapper is necessary to pass {0, 0} via a cpp macro - * argument. - */ -#define THREAD_ALLOCATED_INITIALIZER JEMALLOC_CONCAT({0, 0}) - -#undef JEMALLOC_H_STRUCTS -/******************************************************************************/ -#define JEMALLOC_H_EXTERNS - -extern bool opt_abort; -extern bool opt_junk; -extern size_t opt_quarantine; -extern bool opt_redzone; -extern bool opt_utrace; -extern bool opt_valgrind; -extern bool opt_xmalloc; -extern bool opt_zero; -extern size_t opt_narenas; - -/* Number of CPUs. */ -extern unsigned ncpus; - -/* Protects arenas initialization (arenas, arenas_total). */ -extern malloc_mutex_t arenas_lock; -/* - * Arenas that are used to service external requests. Not all elements of the - * arenas array are necessarily used; arenas are created lazily as needed. - * - * arenas[0..narenas_auto) are used for automatic multiplexing of threads and - * arenas. arenas[narenas_auto..narenas_total) are only used if the application - * takes some action to create them and allocate from them. - */ -extern arena_t **arenas; -extern unsigned narenas_total; -extern unsigned narenas_auto; /* Read-only after initialization. */ - -arena_t *arenas_extend(unsigned ind); -void arenas_cleanup(void *arg); -arena_t *choose_arena_hard(void); -void jemalloc_prefork(void); -void jemalloc_postfork_parent(void); -void jemalloc_postfork_child(void); - -#include "jemalloc/internal/util.h" -#include "jemalloc/internal/atomic.h" -#include "jemalloc/internal/prng.h" -#include "jemalloc/internal/ckh.h" -#include "jemalloc/internal/size_classes.h" -#include "jemalloc/internal/stats.h" -#include "jemalloc/internal/ctl.h" -#include "jemalloc/internal/mutex.h" -#include "jemalloc/internal/tsd.h" -#include "jemalloc/internal/mb.h" -#include "jemalloc/internal/bitmap.h" -#include "jemalloc/internal/extent.h" -#include "jemalloc/internal/arena.h" -#include "jemalloc/internal/base.h" -#include "jemalloc/internal/chunk.h" -#include "jemalloc/internal/huge.h" -#include "jemalloc/internal/rtree.h" -#include "jemalloc/internal/tcache.h" -#include "jemalloc/internal/hash.h" -#include "jemalloc/internal/quarantine.h" -#include "jemalloc/internal/prof.h" - -#undef JEMALLOC_H_EXTERNS -/******************************************************************************/ -#define JEMALLOC_H_INLINES - -#include "jemalloc/internal/util.h" -#include "jemalloc/internal/atomic.h" -#include "jemalloc/internal/prng.h" -#include "jemalloc/internal/ckh.h" -#include "jemalloc/internal/size_classes.h" -#include "jemalloc/internal/stats.h" -#include "jemalloc/internal/ctl.h" -#include "jemalloc/internal/mutex.h" -#include "jemalloc/internal/tsd.h" -#include "jemalloc/internal/mb.h" -#include "jemalloc/internal/extent.h" -#include "jemalloc/internal/base.h" -#include "jemalloc/internal/chunk.h" -#include "jemalloc/internal/huge.h" - -#ifndef JEMALLOC_ENABLE_INLINE -malloc_tsd_protos(JEMALLOC_ATTR(unused), arenas, arena_t *) - -size_t s2u(size_t size); -size_t sa2u(size_t size, size_t alignment); -unsigned narenas_total_get(void); -arena_t *choose_arena(arena_t *arena); -#endif - -#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_)) -/* - * Map of pthread_self() --> arenas[???], used for selecting an arena to use - * for allocations. - */ -malloc_tsd_externs(arenas, arena_t *) -malloc_tsd_funcs(JEMALLOC_ALWAYS_INLINE, arenas, arena_t *, NULL, - arenas_cleanup) - -/* - * Compute usable size that would result from allocating an object with the - * specified size. - */ -JEMALLOC_ALWAYS_INLINE size_t -s2u(size_t size) -{ - - if (size <= SMALL_MAXCLASS) - return (arena_bin_info[SMALL_SIZE2BIN(size)].reg_size); - if (size <= arena_maxclass) - return (PAGE_CEILING(size)); - return (CHUNK_CEILING(size)); -} - -/* - * Compute usable size that would result from allocating an object with the - * specified size and alignment. - */ -JEMALLOC_ALWAYS_INLINE size_t -sa2u(size_t size, size_t alignment) -{ - size_t usize; - - assert(alignment != 0 && ((alignment - 1) & alignment) == 0); - - /* - * Round size up to the nearest multiple of alignment. - * - * This done, we can take advantage of the fact that for each small - * size class, every object is aligned at the smallest power of two - * that is non-zero in the base two representation of the size. For - * example: - * - * Size | Base 2 | Minimum alignment - * -----+----------+------------------ - * 96 | 1100000 | 32 - * 144 | 10100000 | 32 - * 192 | 11000000 | 64 - */ - usize = ALIGNMENT_CEILING(size, alignment); - /* - * (usize < size) protects against the combination of maximal - * alignment and size greater than maximal alignment. - */ - if (usize < size) { - /* size_t overflow. */ - return (0); - } - - if (usize <= arena_maxclass && alignment <= PAGE) { - if (usize <= SMALL_MAXCLASS) - return (arena_bin_info[SMALL_SIZE2BIN(usize)].reg_size); - return (PAGE_CEILING(usize)); - } else { - size_t run_size; - - /* - * We can't achieve subpage alignment, so round up alignment - * permanently; it makes later calculations simpler. - */ - alignment = PAGE_CEILING(alignment); - usize = PAGE_CEILING(size); - /* - * (usize < size) protects against very large sizes within - * PAGE of SIZE_T_MAX. - * - * (usize + alignment < usize) protects against the - * combination of maximal alignment and usize large enough - * to cause overflow. This is similar to the first overflow - * check above, but it needs to be repeated due to the new - * usize value, which may now be *equal* to maximal - * alignment, whereas before we only detected overflow if the - * original size was *greater* than maximal alignment. - */ - if (usize < size || usize + alignment < usize) { - /* size_t overflow. */ - return (0); - } - - /* - * Calculate the size of the over-size run that arena_palloc() - * would need to allocate in order to guarantee the alignment. - * If the run wouldn't fit within a chunk, round up to a huge - * allocation size. - */ - run_size = usize + alignment - PAGE; - if (run_size <= arena_maxclass) - return (PAGE_CEILING(usize)); - return (CHUNK_CEILING(usize)); - } -} - -JEMALLOC_INLINE unsigned -narenas_total_get(void) -{ - unsigned narenas; - - malloc_mutex_lock(&arenas_lock); - narenas = narenas_total; - malloc_mutex_unlock(&arenas_lock); - - return (narenas); -} - -/* Choose an arena based on a per-thread value. */ -JEMALLOC_INLINE arena_t * -choose_arena(arena_t *arena) -{ - arena_t *ret; - - if (arena != NULL) - return (arena); - - if ((ret = *arenas_tsd_get()) == NULL) { - ret = choose_arena_hard(); - assert(ret != NULL); - } - - return (ret); -} -#endif - -#include "jemalloc/internal/bitmap.h" -#include "jemalloc/internal/rtree.h" -/* - * Include arena.h twice in order to resolve circular dependencies with - * tcache.h. - */ -#define JEMALLOC_ARENA_INLINE_A -#include "jemalloc/internal/arena.h" -#undef JEMALLOC_ARENA_INLINE_A -#include "jemalloc/internal/tcache.h" -#define JEMALLOC_ARENA_INLINE_B -#include "jemalloc/internal/arena.h" -#undef JEMALLOC_ARENA_INLINE_B -#include "jemalloc/internal/hash.h" -#include "jemalloc/internal/quarantine.h" - -#ifndef JEMALLOC_ENABLE_INLINE -void *imallocx(size_t size, bool try_tcache, arena_t *arena); -void *imalloc(size_t size); -void *icallocx(size_t size, bool try_tcache, arena_t *arena); -void *icalloc(size_t size); -void *ipallocx(size_t usize, size_t alignment, bool zero, bool try_tcache, - arena_t *arena); -void *ipalloc(size_t usize, size_t alignment, bool zero); -size_t isalloc(const void *ptr, bool demote); -size_t ivsalloc(const void *ptr, bool demote); -size_t u2rz(size_t usize); -size_t p2rz(const void *ptr); -void idallocx(void *ptr, bool try_tcache); -void idalloc(void *ptr); -void iqallocx(void *ptr, bool try_tcache); -void iqalloc(void *ptr); -void *irallocx(void *ptr, size_t size, size_t extra, size_t alignment, - bool zero, bool no_move, bool try_tcache_alloc, bool try_tcache_dalloc, - arena_t *arena); -void *iralloc(void *ptr, size_t size, size_t extra, size_t alignment, - bool zero, bool no_move); -malloc_tsd_protos(JEMALLOC_ATTR(unused), thread_allocated, thread_allocated_t) -#endif - -#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_)) -JEMALLOC_ALWAYS_INLINE void * -imallocx(size_t size, bool try_tcache, arena_t *arena) -{ - - assert(size != 0); - - if (size <= arena_maxclass) - return (arena_malloc(arena, size, false, try_tcache)); - else - return (huge_malloc(size, false)); -} - -JEMALLOC_ALWAYS_INLINE void * -imalloc(size_t size) -{ - - return (imallocx(size, true, NULL)); -} - -JEMALLOC_ALWAYS_INLINE void * -icallocx(size_t size, bool try_tcache, arena_t *arena) -{ - - if (size <= arena_maxclass) - return (arena_malloc(arena, size, true, try_tcache)); - else - return (huge_malloc(size, true)); -} - -JEMALLOC_ALWAYS_INLINE void * -icalloc(size_t size) -{ - - return (icallocx(size, true, NULL)); -} - -JEMALLOC_ALWAYS_INLINE void * -ipallocx(size_t usize, size_t alignment, bool zero, bool try_tcache, - arena_t *arena) -{ - void *ret; - - assert(usize != 0); - assert(usize == sa2u(usize, alignment)); - - if (usize <= arena_maxclass && alignment <= PAGE) - ret = arena_malloc(arena, usize, zero, try_tcache); - else { - if (usize <= arena_maxclass) { - ret = arena_palloc(choose_arena(arena), usize, - alignment, zero); - } else if (alignment <= chunksize) - ret = huge_malloc(usize, zero); - else - ret = huge_palloc(usize, alignment, zero); - } - - assert(ALIGNMENT_ADDR2BASE(ret, alignment) == ret); - return (ret); -} - -JEMALLOC_ALWAYS_INLINE void * -ipalloc(size_t usize, size_t alignment, bool zero) -{ - - return (ipallocx(usize, alignment, zero, true, NULL)); -} - -/* - * Typical usage: - * void *ptr = [...] - * size_t sz = isalloc(ptr, config_prof); - */ -JEMALLOC_ALWAYS_INLINE size_t -isalloc(const void *ptr, bool demote) -{ - size_t ret; - arena_chunk_t *chunk; - - assert(ptr != NULL); - /* Demotion only makes sense if config_prof is true. */ - assert(config_prof || demote == false); - - chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); - if (chunk != ptr) - ret = arena_salloc(ptr, demote); - else - ret = huge_salloc(ptr); - - return (ret); -} - -JEMALLOC_ALWAYS_INLINE size_t -ivsalloc(const void *ptr, bool demote) -{ - - /* Return 0 if ptr is not within a chunk managed by jemalloc. */ - if (rtree_get(chunks_rtree, (uintptr_t)CHUNK_ADDR2BASE(ptr)) == NULL) - return (0); - - return (isalloc(ptr, demote)); -} - -JEMALLOC_INLINE size_t -u2rz(size_t usize) -{ - size_t ret; - - if (usize <= SMALL_MAXCLASS) { - size_t binind = SMALL_SIZE2BIN(usize); - ret = arena_bin_info[binind].redzone_size; - } else - ret = 0; - - return (ret); -} - -JEMALLOC_INLINE size_t -p2rz(const void *ptr) -{ - size_t usize = isalloc(ptr, false); - - return (u2rz(usize)); -} - -JEMALLOC_ALWAYS_INLINE void -idallocx(void *ptr, bool try_tcache) -{ - arena_chunk_t *chunk; - - assert(ptr != NULL); - - chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); - if (chunk != ptr) - arena_dalloc(chunk->arena, chunk, ptr, try_tcache); - else - huge_dalloc(ptr, true); -} - -JEMALLOC_ALWAYS_INLINE void -idalloc(void *ptr) -{ - - idallocx(ptr, true); -} - -JEMALLOC_ALWAYS_INLINE void -iqallocx(void *ptr, bool try_tcache) -{ - - if (config_fill && opt_quarantine) - quarantine(ptr); - else - idallocx(ptr, try_tcache); -} - -JEMALLOC_ALWAYS_INLINE void -iqalloc(void *ptr) -{ - - iqallocx(ptr, true); -} - -JEMALLOC_ALWAYS_INLINE void * -irallocx(void *ptr, size_t size, size_t extra, size_t alignment, bool zero, - bool no_move, bool try_tcache_alloc, bool try_tcache_dalloc, arena_t *arena) -{ - void *ret; - size_t oldsize; - - assert(ptr != NULL); - assert(size != 0); - - oldsize = isalloc(ptr, config_prof); - - if (alignment != 0 && ((uintptr_t)ptr & ((uintptr_t)alignment-1)) - != 0) { - size_t usize, copysize; - - /* - * Existing object alignment is inadequate; allocate new space - * and copy. - */ - if (no_move) - return (NULL); - usize = sa2u(size + extra, alignment); - if (usize == 0) - return (NULL); - ret = ipallocx(usize, alignment, zero, try_tcache_alloc, arena); - if (ret == NULL) { - if (extra == 0) - return (NULL); - /* Try again, without extra this time. */ - usize = sa2u(size, alignment); - if (usize == 0) - return (NULL); - ret = ipallocx(usize, alignment, zero, try_tcache_alloc, - arena); - if (ret == NULL) - return (NULL); - } - /* - * Copy at most size bytes (not size+extra), since the caller - * has no expectation that the extra bytes will be reliably - * preserved. - */ - copysize = (size < oldsize) ? size : oldsize; - memcpy(ret, ptr, copysize); - iqallocx(ptr, try_tcache_dalloc); - return (ret); - } - - if (no_move) { - if (size <= arena_maxclass) { - return (arena_ralloc_no_move(ptr, oldsize, size, - extra, zero)); - } else { - return (huge_ralloc_no_move(ptr, oldsize, size, - extra)); - } - } else { - if (size + extra <= arena_maxclass) { - return (arena_ralloc(arena, ptr, oldsize, size, extra, - alignment, zero, try_tcache_alloc, - try_tcache_dalloc)); - } else { - return (huge_ralloc(ptr, oldsize, size, extra, - alignment, zero, try_tcache_dalloc)); - } - } -} - -JEMALLOC_ALWAYS_INLINE void * -iralloc(void *ptr, size_t size, size_t extra, size_t alignment, bool zero, - bool no_move) -{ - - return (irallocx(ptr, size, extra, alignment, zero, no_move, true, true, - NULL)); -} - -malloc_tsd_externs(thread_allocated, thread_allocated_t) -malloc_tsd_funcs(JEMALLOC_ALWAYS_INLINE, thread_allocated, thread_allocated_t, - THREAD_ALLOCATED_INITIALIZER, malloc_tsd_no_cleanup) -#endif - -#include "jemalloc/internal/prof.h" - -#undef JEMALLOC_H_INLINES -/******************************************************************************/ -#endif /* JEMALLOC_INTERNAL_H */ diff --git a/extra/jemalloc/include/jemalloc/internal/mb.h b/extra/jemalloc/include/jemalloc/internal/mb.h deleted file mode 100644 index 3cfa7872942..00000000000 --- a/extra/jemalloc/include/jemalloc/internal/mb.h +++ /dev/null @@ -1,115 +0,0 @@ -/******************************************************************************/ -#ifdef JEMALLOC_H_TYPES - -#endif /* JEMALLOC_H_TYPES */ -/******************************************************************************/ -#ifdef JEMALLOC_H_STRUCTS - -#endif /* JEMALLOC_H_STRUCTS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_EXTERNS - -#endif /* JEMALLOC_H_EXTERNS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_INLINES - -#ifndef JEMALLOC_ENABLE_INLINE -void mb_write(void); -#endif - -#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_MB_C_)) -#ifdef __i386__ -/* - * According to the Intel Architecture Software Developer's Manual, current - * processors execute instructions in order from the perspective of other - * processors in a multiprocessor system, but 1) Intel reserves the right to - * change that, and 2) the compiler's optimizer could re-order instructions if - * there weren't some form of barrier. Therefore, even if running on an - * architecture that does not need memory barriers (everything through at least - * i686), an "optimizer barrier" is necessary. - */ -JEMALLOC_INLINE void -mb_write(void) -{ - -# if 0 - /* This is a true memory barrier. */ - asm volatile ("pusha;" - "xor %%eax,%%eax;" - "cpuid;" - "popa;" - : /* Outputs. */ - : /* Inputs. */ - : "memory" /* Clobbers. */ - ); -#else - /* - * This is hopefully enough to keep the compiler from reordering - * instructions around this one. - */ - asm volatile ("nop;" - : /* Outputs. */ - : /* Inputs. */ - : "memory" /* Clobbers. */ - ); -#endif -} -#elif (defined(__amd64__) || defined(__x86_64__)) -JEMALLOC_INLINE void -mb_write(void) -{ - - asm volatile ("sfence" - : /* Outputs. */ - : /* Inputs. */ - : "memory" /* Clobbers. */ - ); -} -#elif defined(__powerpc__) -JEMALLOC_INLINE void -mb_write(void) -{ - - asm volatile ("eieio" - : /* Outputs. */ - : /* Inputs. */ - : "memory" /* Clobbers. */ - ); -} -#elif defined(__sparc64__) -JEMALLOC_INLINE void -mb_write(void) -{ - - asm volatile ("membar #StoreStore" - : /* Outputs. */ - : /* Inputs. */ - : "memory" /* Clobbers. */ - ); -} -#elif defined(__tile__) -JEMALLOC_INLINE void -mb_write(void) -{ - - __sync_synchronize(); -} -#else -/* - * This is much slower than a simple memory barrier, but the semantics of mutex - * unlock make this work. - */ -JEMALLOC_INLINE void -mb_write(void) -{ - malloc_mutex_t mtx; - - malloc_mutex_init(&mtx); - malloc_mutex_lock(&mtx); - malloc_mutex_unlock(&mtx); -} -#endif -#endif - -#endif /* JEMALLOC_H_INLINES */ -/******************************************************************************/ diff --git a/extra/jemalloc/include/jemalloc/internal/mutex.h b/extra/jemalloc/include/jemalloc/internal/mutex.h deleted file mode 100644 index de44e1435ad..00000000000 --- a/extra/jemalloc/include/jemalloc/internal/mutex.h +++ /dev/null @@ -1,99 +0,0 @@ -/******************************************************************************/ -#ifdef JEMALLOC_H_TYPES - -typedef struct malloc_mutex_s malloc_mutex_t; - -#ifdef _WIN32 -# define MALLOC_MUTEX_INITIALIZER -#elif (defined(JEMALLOC_OSSPIN)) -# define MALLOC_MUTEX_INITIALIZER {0} -#elif (defined(JEMALLOC_MUTEX_INIT_CB)) -# define MALLOC_MUTEX_INITIALIZER {PTHREAD_MUTEX_INITIALIZER, NULL} -#else -# if (defined(PTHREAD_MUTEX_ADAPTIVE_NP) && \ - defined(PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP)) -# define MALLOC_MUTEX_TYPE PTHREAD_MUTEX_ADAPTIVE_NP -# define MALLOC_MUTEX_INITIALIZER {PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP} -# else -# define MALLOC_MUTEX_TYPE PTHREAD_MUTEX_DEFAULT -# define MALLOC_MUTEX_INITIALIZER {PTHREAD_MUTEX_INITIALIZER} -# endif -#endif - -#endif /* JEMALLOC_H_TYPES */ -/******************************************************************************/ -#ifdef JEMALLOC_H_STRUCTS - -struct malloc_mutex_s { -#ifdef _WIN32 - CRITICAL_SECTION lock; -#elif (defined(JEMALLOC_OSSPIN)) - OSSpinLock lock; -#elif (defined(JEMALLOC_MUTEX_INIT_CB)) - pthread_mutex_t lock; - malloc_mutex_t *postponed_next; -#else - pthread_mutex_t lock; -#endif -}; - -#endif /* JEMALLOC_H_STRUCTS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_EXTERNS - -#ifdef JEMALLOC_LAZY_LOCK -extern bool isthreaded; -#else -# undef isthreaded /* Undo private_namespace.h definition. */ -# define isthreaded true -#endif - -bool malloc_mutex_init(malloc_mutex_t *mutex); -void malloc_mutex_prefork(malloc_mutex_t *mutex); -void malloc_mutex_postfork_parent(malloc_mutex_t *mutex); -void malloc_mutex_postfork_child(malloc_mutex_t *mutex); -bool mutex_boot(void); - -#endif /* JEMALLOC_H_EXTERNS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_INLINES - -#ifndef JEMALLOC_ENABLE_INLINE -void malloc_mutex_lock(malloc_mutex_t *mutex); -void malloc_mutex_unlock(malloc_mutex_t *mutex); -#endif - -#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_MUTEX_C_)) -JEMALLOC_INLINE void -malloc_mutex_lock(malloc_mutex_t *mutex) -{ - - if (isthreaded) { -#ifdef _WIN32 - EnterCriticalSection(&mutex->lock); -#elif (defined(JEMALLOC_OSSPIN)) - OSSpinLockLock(&mutex->lock); -#else - pthread_mutex_lock(&mutex->lock); -#endif - } -} - -JEMALLOC_INLINE void -malloc_mutex_unlock(malloc_mutex_t *mutex) -{ - - if (isthreaded) { -#ifdef _WIN32 - LeaveCriticalSection(&mutex->lock); -#elif (defined(JEMALLOC_OSSPIN)) - OSSpinLockUnlock(&mutex->lock); -#else - pthread_mutex_unlock(&mutex->lock); -#endif - } -} -#endif - -#endif /* JEMALLOC_H_INLINES */ -/******************************************************************************/ diff --git a/extra/jemalloc/include/jemalloc/internal/private_namespace.h b/extra/jemalloc/include/jemalloc/internal/private_namespace.h deleted file mode 100644 index cdb0b0eb1c4..00000000000 --- a/extra/jemalloc/include/jemalloc/internal/private_namespace.h +++ /dev/null @@ -1,392 +0,0 @@ -#define a0calloc JEMALLOC_N(a0calloc) -#define a0free JEMALLOC_N(a0free) -#define a0malloc JEMALLOC_N(a0malloc) -#define arena_alloc_junk_small JEMALLOC_N(arena_alloc_junk_small) -#define arena_bin_index JEMALLOC_N(arena_bin_index) -#define arena_bin_info JEMALLOC_N(arena_bin_info) -#define arena_boot JEMALLOC_N(arena_boot) -#define arena_dalloc JEMALLOC_N(arena_dalloc) -#define arena_dalloc_bin JEMALLOC_N(arena_dalloc_bin) -#define arena_dalloc_bin_locked JEMALLOC_N(arena_dalloc_bin_locked) -#define arena_dalloc_junk_small JEMALLOC_N(arena_dalloc_junk_small) -#define arena_dalloc_large JEMALLOC_N(arena_dalloc_large) -#define arena_dalloc_large_locked JEMALLOC_N(arena_dalloc_large_locked) -#define arena_dalloc_small JEMALLOC_N(arena_dalloc_small) -#define arena_dss_prec_get JEMALLOC_N(arena_dss_prec_get) -#define arena_dss_prec_set JEMALLOC_N(arena_dss_prec_set) -#define arena_malloc JEMALLOC_N(arena_malloc) -#define arena_malloc_large JEMALLOC_N(arena_malloc_large) -#define arena_malloc_small JEMALLOC_N(arena_malloc_small) -#define arena_mapbits_allocated_get JEMALLOC_N(arena_mapbits_allocated_get) -#define arena_mapbits_binind_get JEMALLOC_N(arena_mapbits_binind_get) -#define arena_mapbits_dirty_get JEMALLOC_N(arena_mapbits_dirty_get) -#define arena_mapbits_get JEMALLOC_N(arena_mapbits_get) -#define arena_mapbits_large_binind_set JEMALLOC_N(arena_mapbits_large_binind_set) -#define arena_mapbits_large_get JEMALLOC_N(arena_mapbits_large_get) -#define arena_mapbits_large_set JEMALLOC_N(arena_mapbits_large_set) -#define arena_mapbits_large_size_get JEMALLOC_N(arena_mapbits_large_size_get) -#define arena_mapbits_small_runind_get JEMALLOC_N(arena_mapbits_small_runind_get) -#define arena_mapbits_small_set JEMALLOC_N(arena_mapbits_small_set) -#define arena_mapbits_unallocated_set JEMALLOC_N(arena_mapbits_unallocated_set) -#define arena_mapbits_unallocated_size_get JEMALLOC_N(arena_mapbits_unallocated_size_get) -#define arena_mapbits_unallocated_size_set JEMALLOC_N(arena_mapbits_unallocated_size_set) -#define arena_mapbits_unzeroed_get JEMALLOC_N(arena_mapbits_unzeroed_get) -#define arena_mapbits_unzeroed_set JEMALLOC_N(arena_mapbits_unzeroed_set) -#define arena_mapbitsp_get JEMALLOC_N(arena_mapbitsp_get) -#define arena_mapbitsp_read JEMALLOC_N(arena_mapbitsp_read) -#define arena_mapbitsp_write JEMALLOC_N(arena_mapbitsp_write) -#define arena_mapp_get JEMALLOC_N(arena_mapp_get) -#define arena_maxclass JEMALLOC_N(arena_maxclass) -#define arena_new JEMALLOC_N(arena_new) -#define arena_palloc JEMALLOC_N(arena_palloc) -#define arena_postfork_child JEMALLOC_N(arena_postfork_child) -#define arena_postfork_parent JEMALLOC_N(arena_postfork_parent) -#define arena_prefork JEMALLOC_N(arena_prefork) -#define arena_prof_accum JEMALLOC_N(arena_prof_accum) -#define arena_prof_accum_impl JEMALLOC_N(arena_prof_accum_impl) -#define arena_prof_accum_locked JEMALLOC_N(arena_prof_accum_locked) -#define arena_prof_ctx_get JEMALLOC_N(arena_prof_ctx_get) -#define arena_prof_ctx_set JEMALLOC_N(arena_prof_ctx_set) -#define arena_prof_promoted JEMALLOC_N(arena_prof_promoted) -#define arena_ptr_small_binind_get JEMALLOC_N(arena_ptr_small_binind_get) -#define arena_purge_all JEMALLOC_N(arena_purge_all) -#define arena_ralloc JEMALLOC_N(arena_ralloc) -#define arena_ralloc_no_move JEMALLOC_N(arena_ralloc_no_move) -#define arena_run_regind JEMALLOC_N(arena_run_regind) -#define arena_salloc JEMALLOC_N(arena_salloc) -#define arena_stats_merge JEMALLOC_N(arena_stats_merge) -#define arena_tcache_fill_small JEMALLOC_N(arena_tcache_fill_small) -#define arenas JEMALLOC_N(arenas) -#define arenas_booted JEMALLOC_N(arenas_booted) -#define arenas_cleanup JEMALLOC_N(arenas_cleanup) -#define arenas_extend JEMALLOC_N(arenas_extend) -#define arenas_initialized JEMALLOC_N(arenas_initialized) -#define arenas_lock JEMALLOC_N(arenas_lock) -#define arenas_tls JEMALLOC_N(arenas_tls) -#define arenas_tsd JEMALLOC_N(arenas_tsd) -#define arenas_tsd_boot JEMALLOC_N(arenas_tsd_boot) -#define arenas_tsd_cleanup_wrapper JEMALLOC_N(arenas_tsd_cleanup_wrapper) -#define arenas_tsd_get JEMALLOC_N(arenas_tsd_get) -#define arenas_tsd_get_wrapper JEMALLOC_N(arenas_tsd_get_wrapper) -#define arenas_tsd_set JEMALLOC_N(arenas_tsd_set) -#define atomic_add_u JEMALLOC_N(atomic_add_u) -#define atomic_add_uint32 JEMALLOC_N(atomic_add_uint32) -#define atomic_add_uint64 JEMALLOC_N(atomic_add_uint64) -#define atomic_add_z JEMALLOC_N(atomic_add_z) -#define atomic_sub_u JEMALLOC_N(atomic_sub_u) -#define atomic_sub_uint32 JEMALLOC_N(atomic_sub_uint32) -#define atomic_sub_uint64 JEMALLOC_N(atomic_sub_uint64) -#define atomic_sub_z JEMALLOC_N(atomic_sub_z) -#define base_alloc JEMALLOC_N(base_alloc) -#define base_boot JEMALLOC_N(base_boot) -#define base_calloc JEMALLOC_N(base_calloc) -#define base_node_alloc JEMALLOC_N(base_node_alloc) -#define base_node_dealloc JEMALLOC_N(base_node_dealloc) -#define base_postfork_child JEMALLOC_N(base_postfork_child) -#define base_postfork_parent JEMALLOC_N(base_postfork_parent) -#define base_prefork JEMALLOC_N(base_prefork) -#define bitmap_full JEMALLOC_N(bitmap_full) -#define bitmap_get JEMALLOC_N(bitmap_get) -#define bitmap_info_init JEMALLOC_N(bitmap_info_init) -#define bitmap_info_ngroups JEMALLOC_N(bitmap_info_ngroups) -#define bitmap_init JEMALLOC_N(bitmap_init) -#define bitmap_set JEMALLOC_N(bitmap_set) -#define bitmap_sfu JEMALLOC_N(bitmap_sfu) -#define bitmap_size JEMALLOC_N(bitmap_size) -#define bitmap_unset JEMALLOC_N(bitmap_unset) -#define bt_init JEMALLOC_N(bt_init) -#define buferror JEMALLOC_N(buferror) -#define choose_arena JEMALLOC_N(choose_arena) -#define choose_arena_hard JEMALLOC_N(choose_arena_hard) -#define chunk_alloc JEMALLOC_N(chunk_alloc) -#define chunk_alloc_dss JEMALLOC_N(chunk_alloc_dss) -#define chunk_alloc_mmap JEMALLOC_N(chunk_alloc_mmap) -#define chunk_boot JEMALLOC_N(chunk_boot) -#define chunk_dealloc JEMALLOC_N(chunk_dealloc) -#define chunk_dealloc_mmap JEMALLOC_N(chunk_dealloc_mmap) -#define chunk_dss_boot JEMALLOC_N(chunk_dss_boot) -#define chunk_dss_postfork_child JEMALLOC_N(chunk_dss_postfork_child) -#define chunk_dss_postfork_parent JEMALLOC_N(chunk_dss_postfork_parent) -#define chunk_dss_prec_get JEMALLOC_N(chunk_dss_prec_get) -#define chunk_dss_prec_set JEMALLOC_N(chunk_dss_prec_set) -#define chunk_dss_prefork JEMALLOC_N(chunk_dss_prefork) -#define chunk_in_dss JEMALLOC_N(chunk_in_dss) -#define chunk_npages JEMALLOC_N(chunk_npages) -#define chunk_postfork_child JEMALLOC_N(chunk_postfork_child) -#define chunk_postfork_parent JEMALLOC_N(chunk_postfork_parent) -#define chunk_prefork JEMALLOC_N(chunk_prefork) -#define chunk_unmap JEMALLOC_N(chunk_unmap) -#define chunks_mtx JEMALLOC_N(chunks_mtx) -#define chunks_rtree JEMALLOC_N(chunks_rtree) -#define chunksize JEMALLOC_N(chunksize) -#define chunksize_mask JEMALLOC_N(chunksize_mask) -#define ckh_bucket_search JEMALLOC_N(ckh_bucket_search) -#define ckh_count JEMALLOC_N(ckh_count) -#define ckh_delete JEMALLOC_N(ckh_delete) -#define ckh_evict_reloc_insert JEMALLOC_N(ckh_evict_reloc_insert) -#define ckh_insert JEMALLOC_N(ckh_insert) -#define ckh_isearch JEMALLOC_N(ckh_isearch) -#define ckh_iter JEMALLOC_N(ckh_iter) -#define ckh_new JEMALLOC_N(ckh_new) -#define ckh_pointer_hash JEMALLOC_N(ckh_pointer_hash) -#define ckh_pointer_keycomp JEMALLOC_N(ckh_pointer_keycomp) -#define ckh_rebuild JEMALLOC_N(ckh_rebuild) -#define ckh_remove JEMALLOC_N(ckh_remove) -#define ckh_search JEMALLOC_N(ckh_search) -#define ckh_string_hash JEMALLOC_N(ckh_string_hash) -#define ckh_string_keycomp JEMALLOC_N(ckh_string_keycomp) -#define ckh_try_bucket_insert JEMALLOC_N(ckh_try_bucket_insert) -#define ckh_try_insert JEMALLOC_N(ckh_try_insert) -#define ctl_boot JEMALLOC_N(ctl_boot) -#define ctl_bymib JEMALLOC_N(ctl_bymib) -#define ctl_byname JEMALLOC_N(ctl_byname) -#define ctl_nametomib JEMALLOC_N(ctl_nametomib) -#define ctl_postfork_child JEMALLOC_N(ctl_postfork_child) -#define ctl_postfork_parent JEMALLOC_N(ctl_postfork_parent) -#define ctl_prefork JEMALLOC_N(ctl_prefork) -#define dss_prec_names JEMALLOC_N(dss_prec_names) -#define extent_tree_ad_first JEMALLOC_N(extent_tree_ad_first) -#define extent_tree_ad_insert JEMALLOC_N(extent_tree_ad_insert) -#define extent_tree_ad_iter JEMALLOC_N(extent_tree_ad_iter) -#define extent_tree_ad_iter_recurse JEMALLOC_N(extent_tree_ad_iter_recurse) -#define extent_tree_ad_iter_start JEMALLOC_N(extent_tree_ad_iter_start) -#define extent_tree_ad_last JEMALLOC_N(extent_tree_ad_last) -#define extent_tree_ad_new JEMALLOC_N(extent_tree_ad_new) -#define extent_tree_ad_next JEMALLOC_N(extent_tree_ad_next) -#define extent_tree_ad_nsearch JEMALLOC_N(extent_tree_ad_nsearch) -#define extent_tree_ad_prev JEMALLOC_N(extent_tree_ad_prev) -#define extent_tree_ad_psearch JEMALLOC_N(extent_tree_ad_psearch) -#define extent_tree_ad_remove JEMALLOC_N(extent_tree_ad_remove) -#define extent_tree_ad_reverse_iter JEMALLOC_N(extent_tree_ad_reverse_iter) -#define extent_tree_ad_reverse_iter_recurse JEMALLOC_N(extent_tree_ad_reverse_iter_recurse) -#define extent_tree_ad_reverse_iter_start JEMALLOC_N(extent_tree_ad_reverse_iter_start) -#define extent_tree_ad_search JEMALLOC_N(extent_tree_ad_search) -#define extent_tree_szad_first JEMALLOC_N(extent_tree_szad_first) -#define extent_tree_szad_insert JEMALLOC_N(extent_tree_szad_insert) -#define extent_tree_szad_iter JEMALLOC_N(extent_tree_szad_iter) -#define extent_tree_szad_iter_recurse JEMALLOC_N(extent_tree_szad_iter_recurse) -#define extent_tree_szad_iter_start JEMALLOC_N(extent_tree_szad_iter_start) -#define extent_tree_szad_last JEMALLOC_N(extent_tree_szad_last) -#define extent_tree_szad_new JEMALLOC_N(extent_tree_szad_new) -#define extent_tree_szad_next JEMALLOC_N(extent_tree_szad_next) -#define extent_tree_szad_nsearch JEMALLOC_N(extent_tree_szad_nsearch) -#define extent_tree_szad_prev JEMALLOC_N(extent_tree_szad_prev) -#define extent_tree_szad_psearch JEMALLOC_N(extent_tree_szad_psearch) -#define extent_tree_szad_remove JEMALLOC_N(extent_tree_szad_remove) -#define extent_tree_szad_reverse_iter JEMALLOC_N(extent_tree_szad_reverse_iter) -#define extent_tree_szad_reverse_iter_recurse JEMALLOC_N(extent_tree_szad_reverse_iter_recurse) -#define extent_tree_szad_reverse_iter_start JEMALLOC_N(extent_tree_szad_reverse_iter_start) -#define extent_tree_szad_search JEMALLOC_N(extent_tree_szad_search) -#define get_errno JEMALLOC_N(get_errno) -#define hash JEMALLOC_N(hash) -#define hash_fmix_32 JEMALLOC_N(hash_fmix_32) -#define hash_fmix_64 JEMALLOC_N(hash_fmix_64) -#define hash_get_block_32 JEMALLOC_N(hash_get_block_32) -#define hash_get_block_64 JEMALLOC_N(hash_get_block_64) -#define hash_rotl_32 JEMALLOC_N(hash_rotl_32) -#define hash_rotl_64 JEMALLOC_N(hash_rotl_64) -#define hash_x64_128 JEMALLOC_N(hash_x64_128) -#define hash_x86_128 JEMALLOC_N(hash_x86_128) -#define hash_x86_32 JEMALLOC_N(hash_x86_32) -#define huge_allocated JEMALLOC_N(huge_allocated) -#define huge_boot JEMALLOC_N(huge_boot) -#define huge_dalloc JEMALLOC_N(huge_dalloc) -#define huge_malloc JEMALLOC_N(huge_malloc) -#define huge_mtx JEMALLOC_N(huge_mtx) -#define huge_ndalloc JEMALLOC_N(huge_ndalloc) -#define huge_nmalloc JEMALLOC_N(huge_nmalloc) -#define huge_palloc JEMALLOC_N(huge_palloc) -#define huge_postfork_child JEMALLOC_N(huge_postfork_child) -#define huge_postfork_parent JEMALLOC_N(huge_postfork_parent) -#define huge_prefork JEMALLOC_N(huge_prefork) -#define huge_prof_ctx_get JEMALLOC_N(huge_prof_ctx_get) -#define huge_prof_ctx_set JEMALLOC_N(huge_prof_ctx_set) -#define huge_ralloc JEMALLOC_N(huge_ralloc) -#define huge_ralloc_no_move JEMALLOC_N(huge_ralloc_no_move) -#define huge_salloc JEMALLOC_N(huge_salloc) -#define iallocm JEMALLOC_N(iallocm) -#define icalloc JEMALLOC_N(icalloc) -#define icallocx JEMALLOC_N(icallocx) -#define idalloc JEMALLOC_N(idalloc) -#define idallocx JEMALLOC_N(idallocx) -#define imalloc JEMALLOC_N(imalloc) -#define imallocx JEMALLOC_N(imallocx) -#define ipalloc JEMALLOC_N(ipalloc) -#define ipallocx JEMALLOC_N(ipallocx) -#define iqalloc JEMALLOC_N(iqalloc) -#define iqallocx JEMALLOC_N(iqallocx) -#define iralloc JEMALLOC_N(iralloc) -#define irallocx JEMALLOC_N(irallocx) -#define isalloc JEMALLOC_N(isalloc) -#define isthreaded JEMALLOC_N(isthreaded) -#define ivsalloc JEMALLOC_N(ivsalloc) -#define jemalloc_postfork_child JEMALLOC_N(jemalloc_postfork_child) -#define jemalloc_postfork_parent JEMALLOC_N(jemalloc_postfork_parent) -#define jemalloc_prefork JEMALLOC_N(jemalloc_prefork) -#define malloc_cprintf JEMALLOC_N(malloc_cprintf) -#define malloc_mutex_init JEMALLOC_N(malloc_mutex_init) -#define malloc_mutex_lock JEMALLOC_N(malloc_mutex_lock) -#define malloc_mutex_postfork_child JEMALLOC_N(malloc_mutex_postfork_child) -#define malloc_mutex_postfork_parent JEMALLOC_N(malloc_mutex_postfork_parent) -#define malloc_mutex_prefork JEMALLOC_N(malloc_mutex_prefork) -#define malloc_mutex_unlock JEMALLOC_N(malloc_mutex_unlock) -#define malloc_printf JEMALLOC_N(malloc_printf) -#define malloc_snprintf JEMALLOC_N(malloc_snprintf) -#define malloc_strtoumax JEMALLOC_N(malloc_strtoumax) -#define malloc_tsd_boot JEMALLOC_N(malloc_tsd_boot) -#define malloc_tsd_cleanup_register JEMALLOC_N(malloc_tsd_cleanup_register) -#define malloc_tsd_dalloc JEMALLOC_N(malloc_tsd_dalloc) -#define malloc_tsd_malloc JEMALLOC_N(malloc_tsd_malloc) -#define malloc_tsd_no_cleanup JEMALLOC_N(malloc_tsd_no_cleanup) -#define malloc_vcprintf JEMALLOC_N(malloc_vcprintf) -#define malloc_vsnprintf JEMALLOC_N(malloc_vsnprintf) -#define malloc_write JEMALLOC_N(malloc_write) -#define map_bias JEMALLOC_N(map_bias) -#define mb_write JEMALLOC_N(mb_write) -#define mutex_boot JEMALLOC_N(mutex_boot) -#define narenas_auto JEMALLOC_N(narenas_auto) -#define narenas_total JEMALLOC_N(narenas_total) -#define narenas_total_get JEMALLOC_N(narenas_total_get) -#define ncpus JEMALLOC_N(ncpus) -#define nhbins JEMALLOC_N(nhbins) -#define opt_abort JEMALLOC_N(opt_abort) -#define opt_junk JEMALLOC_N(opt_junk) -#define opt_lg_chunk JEMALLOC_N(opt_lg_chunk) -#define opt_lg_dirty_mult JEMALLOC_N(opt_lg_dirty_mult) -#define opt_lg_prof_interval JEMALLOC_N(opt_lg_prof_interval) -#define opt_lg_prof_sample JEMALLOC_N(opt_lg_prof_sample) -#define opt_lg_tcache_max JEMALLOC_N(opt_lg_tcache_max) -#define opt_narenas JEMALLOC_N(opt_narenas) -#define opt_prof JEMALLOC_N(opt_prof) -#define opt_prof_accum JEMALLOC_N(opt_prof_accum) -#define opt_prof_active JEMALLOC_N(opt_prof_active) -#define opt_prof_final JEMALLOC_N(opt_prof_final) -#define opt_prof_gdump JEMALLOC_N(opt_prof_gdump) -#define opt_prof_leak JEMALLOC_N(opt_prof_leak) -#define opt_prof_prefix JEMALLOC_N(opt_prof_prefix) -#define opt_quarantine JEMALLOC_N(opt_quarantine) -#define opt_redzone JEMALLOC_N(opt_redzone) -#define opt_stats_print JEMALLOC_N(opt_stats_print) -#define opt_tcache JEMALLOC_N(opt_tcache) -#define opt_utrace JEMALLOC_N(opt_utrace) -#define opt_valgrind JEMALLOC_N(opt_valgrind) -#define opt_xmalloc JEMALLOC_N(opt_xmalloc) -#define opt_zero JEMALLOC_N(opt_zero) -#define p2rz JEMALLOC_N(p2rz) -#define pages_purge JEMALLOC_N(pages_purge) -#define pow2_ceil JEMALLOC_N(pow2_ceil) -#define prof_backtrace JEMALLOC_N(prof_backtrace) -#define prof_boot0 JEMALLOC_N(prof_boot0) -#define prof_boot1 JEMALLOC_N(prof_boot1) -#define prof_boot2 JEMALLOC_N(prof_boot2) -#define prof_ctx_get JEMALLOC_N(prof_ctx_get) -#define prof_ctx_set JEMALLOC_N(prof_ctx_set) -#define prof_free JEMALLOC_N(prof_free) -#define prof_gdump JEMALLOC_N(prof_gdump) -#define prof_idump JEMALLOC_N(prof_idump) -#define prof_interval JEMALLOC_N(prof_interval) -#define prof_lookup JEMALLOC_N(prof_lookup) -#define prof_malloc JEMALLOC_N(prof_malloc) -#define prof_mdump JEMALLOC_N(prof_mdump) -#define prof_postfork_child JEMALLOC_N(prof_postfork_child) -#define prof_postfork_parent JEMALLOC_N(prof_postfork_parent) -#define prof_prefork JEMALLOC_N(prof_prefork) -#define prof_promote JEMALLOC_N(prof_promote) -#define prof_realloc JEMALLOC_N(prof_realloc) -#define prof_sample_accum_update JEMALLOC_N(prof_sample_accum_update) -#define prof_sample_threshold_update JEMALLOC_N(prof_sample_threshold_update) -#define prof_tdata_booted JEMALLOC_N(prof_tdata_booted) -#define prof_tdata_cleanup JEMALLOC_N(prof_tdata_cleanup) -#define prof_tdata_get JEMALLOC_N(prof_tdata_get) -#define prof_tdata_init JEMALLOC_N(prof_tdata_init) -#define prof_tdata_initialized JEMALLOC_N(prof_tdata_initialized) -#define prof_tdata_tls JEMALLOC_N(prof_tdata_tls) -#define prof_tdata_tsd JEMALLOC_N(prof_tdata_tsd) -#define prof_tdata_tsd_boot JEMALLOC_N(prof_tdata_tsd_boot) -#define prof_tdata_tsd_cleanup_wrapper JEMALLOC_N(prof_tdata_tsd_cleanup_wrapper) -#define prof_tdata_tsd_get JEMALLOC_N(prof_tdata_tsd_get) -#define prof_tdata_tsd_get_wrapper JEMALLOC_N(prof_tdata_tsd_get_wrapper) -#define prof_tdata_tsd_set JEMALLOC_N(prof_tdata_tsd_set) -#define quarantine JEMALLOC_N(quarantine) -#define quarantine_alloc_hook JEMALLOC_N(quarantine_alloc_hook) -#define quarantine_boot JEMALLOC_N(quarantine_boot) -#define quarantine_booted JEMALLOC_N(quarantine_booted) -#define quarantine_cleanup JEMALLOC_N(quarantine_cleanup) -#define quarantine_init JEMALLOC_N(quarantine_init) -#define quarantine_tls JEMALLOC_N(quarantine_tls) -#define quarantine_tsd JEMALLOC_N(quarantine_tsd) -#define quarantine_tsd_boot JEMALLOC_N(quarantine_tsd_boot) -#define quarantine_tsd_cleanup_wrapper JEMALLOC_N(quarantine_tsd_cleanup_wrapper) -#define quarantine_tsd_get JEMALLOC_N(quarantine_tsd_get) -#define quarantine_tsd_get_wrapper JEMALLOC_N(quarantine_tsd_get_wrapper) -#define quarantine_tsd_set JEMALLOC_N(quarantine_tsd_set) -#define register_zone JEMALLOC_N(register_zone) -#define rtree_get JEMALLOC_N(rtree_get) -#define rtree_get_locked JEMALLOC_N(rtree_get_locked) -#define rtree_new JEMALLOC_N(rtree_new) -#define rtree_postfork_child JEMALLOC_N(rtree_postfork_child) -#define rtree_postfork_parent JEMALLOC_N(rtree_postfork_parent) -#define rtree_prefork JEMALLOC_N(rtree_prefork) -#define rtree_set JEMALLOC_N(rtree_set) -#define s2u JEMALLOC_N(s2u) -#define sa2u JEMALLOC_N(sa2u) -#define set_errno JEMALLOC_N(set_errno) -#define stats_cactive JEMALLOC_N(stats_cactive) -#define stats_cactive_add JEMALLOC_N(stats_cactive_add) -#define stats_cactive_get JEMALLOC_N(stats_cactive_get) -#define stats_cactive_sub JEMALLOC_N(stats_cactive_sub) -#define stats_chunks JEMALLOC_N(stats_chunks) -#define stats_print JEMALLOC_N(stats_print) -#define tcache_alloc_easy JEMALLOC_N(tcache_alloc_easy) -#define tcache_alloc_large JEMALLOC_N(tcache_alloc_large) -#define tcache_alloc_small JEMALLOC_N(tcache_alloc_small) -#define tcache_alloc_small_hard JEMALLOC_N(tcache_alloc_small_hard) -#define tcache_arena_associate JEMALLOC_N(tcache_arena_associate) -#define tcache_arena_dissociate JEMALLOC_N(tcache_arena_dissociate) -#define tcache_bin_flush_large JEMALLOC_N(tcache_bin_flush_large) -#define tcache_bin_flush_small JEMALLOC_N(tcache_bin_flush_small) -#define tcache_bin_info JEMALLOC_N(tcache_bin_info) -#define tcache_boot0 JEMALLOC_N(tcache_boot0) -#define tcache_boot1 JEMALLOC_N(tcache_boot1) -#define tcache_booted JEMALLOC_N(tcache_booted) -#define tcache_create JEMALLOC_N(tcache_create) -#define tcache_dalloc_large JEMALLOC_N(tcache_dalloc_large) -#define tcache_dalloc_small JEMALLOC_N(tcache_dalloc_small) -#define tcache_destroy JEMALLOC_N(tcache_destroy) -#define tcache_enabled_booted JEMALLOC_N(tcache_enabled_booted) -#define tcache_enabled_get JEMALLOC_N(tcache_enabled_get) -#define tcache_enabled_initialized JEMALLOC_N(tcache_enabled_initialized) -#define tcache_enabled_set JEMALLOC_N(tcache_enabled_set) -#define tcache_enabled_tls JEMALLOC_N(tcache_enabled_tls) -#define tcache_enabled_tsd JEMALLOC_N(tcache_enabled_tsd) -#define tcache_enabled_tsd_boot JEMALLOC_N(tcache_enabled_tsd_boot) -#define tcache_enabled_tsd_cleanup_wrapper JEMALLOC_N(tcache_enabled_tsd_cleanup_wrapper) -#define tcache_enabled_tsd_get JEMALLOC_N(tcache_enabled_tsd_get) -#define tcache_enabled_tsd_get_wrapper JEMALLOC_N(tcache_enabled_tsd_get_wrapper) -#define tcache_enabled_tsd_set JEMALLOC_N(tcache_enabled_tsd_set) -#define tcache_event JEMALLOC_N(tcache_event) -#define tcache_event_hard JEMALLOC_N(tcache_event_hard) -#define tcache_flush JEMALLOC_N(tcache_flush) -#define tcache_get JEMALLOC_N(tcache_get) -#define tcache_initialized JEMALLOC_N(tcache_initialized) -#define tcache_maxclass JEMALLOC_N(tcache_maxclass) -#define tcache_salloc JEMALLOC_N(tcache_salloc) -#define tcache_stats_merge JEMALLOC_N(tcache_stats_merge) -#define tcache_thread_cleanup JEMALLOC_N(tcache_thread_cleanup) -#define tcache_tls JEMALLOC_N(tcache_tls) -#define tcache_tsd JEMALLOC_N(tcache_tsd) -#define tcache_tsd_boot JEMALLOC_N(tcache_tsd_boot) -#define tcache_tsd_cleanup_wrapper JEMALLOC_N(tcache_tsd_cleanup_wrapper) -#define tcache_tsd_get JEMALLOC_N(tcache_tsd_get) -#define tcache_tsd_get_wrapper JEMALLOC_N(tcache_tsd_get_wrapper) -#define tcache_tsd_set JEMALLOC_N(tcache_tsd_set) -#define thread_allocated_booted JEMALLOC_N(thread_allocated_booted) -#define thread_allocated_initialized JEMALLOC_N(thread_allocated_initialized) -#define thread_allocated_tls JEMALLOC_N(thread_allocated_tls) -#define thread_allocated_tsd JEMALLOC_N(thread_allocated_tsd) -#define thread_allocated_tsd_boot JEMALLOC_N(thread_allocated_tsd_boot) -#define thread_allocated_tsd_cleanup_wrapper JEMALLOC_N(thread_allocated_tsd_cleanup_wrapper) -#define thread_allocated_tsd_get JEMALLOC_N(thread_allocated_tsd_get) -#define thread_allocated_tsd_get_wrapper JEMALLOC_N(thread_allocated_tsd_get_wrapper) -#define thread_allocated_tsd_set JEMALLOC_N(thread_allocated_tsd_set) -#define u2rz JEMALLOC_N(u2rz) diff --git a/extra/jemalloc/include/jemalloc/internal/prng.h b/extra/jemalloc/include/jemalloc/internal/prng.h deleted file mode 100644 index 83a5462b4dd..00000000000 --- a/extra/jemalloc/include/jemalloc/internal/prng.h +++ /dev/null @@ -1,60 +0,0 @@ -/******************************************************************************/ -#ifdef JEMALLOC_H_TYPES - -/* - * Simple linear congruential pseudo-random number generator: - * - * prng(y) = (a*x + c) % m - * - * where the following constants ensure maximal period: - * - * a == Odd number (relatively prime to 2^n), and (a-1) is a multiple of 4. - * c == Odd number (relatively prime to 2^n). - * m == 2^32 - * - * See Knuth's TAOCP 3rd Ed., Vol. 2, pg. 17 for details on these constraints. - * - * This choice of m has the disadvantage that the quality of the bits is - * proportional to bit position. For example. the lowest bit has a cycle of 2, - * the next has a cycle of 4, etc. For this reason, we prefer to use the upper - * bits. - * - * Macro parameters: - * uint32_t r : Result. - * unsigned lg_range : (0..32], number of least significant bits to return. - * uint32_t state : Seed value. - * const uint32_t a, c : See above discussion. - */ -#define prng32(r, lg_range, state, a, c) do { \ - assert(lg_range > 0); \ - assert(lg_range <= 32); \ - \ - r = (state * (a)) + (c); \ - state = r; \ - r >>= (32 - lg_range); \ -} while (false) - -/* Same as prng32(), but 64 bits of pseudo-randomness, using uint64_t. */ -#define prng64(r, lg_range, state, a, c) do { \ - assert(lg_range > 0); \ - assert(lg_range <= 64); \ - \ - r = (state * (a)) + (c); \ - state = r; \ - r >>= (64 - lg_range); \ -} while (false) - -#endif /* JEMALLOC_H_TYPES */ -/******************************************************************************/ -#ifdef JEMALLOC_H_STRUCTS - -#endif /* JEMALLOC_H_STRUCTS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_EXTERNS - -#endif /* JEMALLOC_H_EXTERNS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_INLINES - -#endif /* JEMALLOC_H_INLINES */ -/******************************************************************************/ diff --git a/extra/jemalloc/include/jemalloc/internal/prof.h b/extra/jemalloc/include/jemalloc/internal/prof.h deleted file mode 100644 index 119a5b1bcb7..00000000000 --- a/extra/jemalloc/include/jemalloc/internal/prof.h +++ /dev/null @@ -1,579 +0,0 @@ -/******************************************************************************/ -#ifdef JEMALLOC_H_TYPES - -typedef struct prof_bt_s prof_bt_t; -typedef struct prof_cnt_s prof_cnt_t; -typedef struct prof_thr_cnt_s prof_thr_cnt_t; -typedef struct prof_ctx_s prof_ctx_t; -typedef struct prof_tdata_s prof_tdata_t; - -/* Option defaults. */ -#define PROF_PREFIX_DEFAULT "jeprof" -#define LG_PROF_SAMPLE_DEFAULT 19 -#define LG_PROF_INTERVAL_DEFAULT -1 - -/* - * Hard limit on stack backtrace depth. The version of prof_backtrace() that - * is based on __builtin_return_address() necessarily has a hard-coded number - * of backtrace frame handlers, and should be kept in sync with this setting. - */ -#define PROF_BT_MAX 128 - -/* Maximum number of backtraces to store in each per thread LRU cache. */ -#define PROF_TCMAX 1024 - -/* Initial hash table size. */ -#define PROF_CKH_MINITEMS 64 - -/* Size of memory buffer to use when writing dump files. */ -#define PROF_DUMP_BUFSIZE 65536 - -/* Size of stack-allocated buffer used by prof_printf(). */ -#define PROF_PRINTF_BUFSIZE 128 - -/* - * Number of mutexes shared among all ctx's. No space is allocated for these - * unless profiling is enabled, so it's okay to over-provision. - */ -#define PROF_NCTX_LOCKS 1024 - -/* - * prof_tdata pointers close to NULL are used to encode state information that - * is used for cleaning up during thread shutdown. - */ -#define PROF_TDATA_STATE_REINCARNATED ((prof_tdata_t *)(uintptr_t)1) -#define PROF_TDATA_STATE_PURGATORY ((prof_tdata_t *)(uintptr_t)2) -#define PROF_TDATA_STATE_MAX PROF_TDATA_STATE_PURGATORY - -#endif /* JEMALLOC_H_TYPES */ -/******************************************************************************/ -#ifdef JEMALLOC_H_STRUCTS - -struct prof_bt_s { - /* Backtrace, stored as len program counters. */ - void **vec; - unsigned len; -}; - -#ifdef JEMALLOC_PROF_LIBGCC -/* Data structure passed to libgcc _Unwind_Backtrace() callback functions. */ -typedef struct { - prof_bt_t *bt; - unsigned nignore; - unsigned max; -} prof_unwind_data_t; -#endif - -struct prof_cnt_s { - /* - * Profiling counters. An allocation/deallocation pair can operate on - * different prof_thr_cnt_t objects that are linked into the same - * prof_ctx_t cnts_ql, so it is possible for the cur* counters to go - * negative. In principle it is possible for the *bytes counters to - * overflow/underflow, but a general solution would require something - * like 128-bit counters; this implementation doesn't bother to solve - * that problem. - */ - int64_t curobjs; - int64_t curbytes; - uint64_t accumobjs; - uint64_t accumbytes; -}; - -struct prof_thr_cnt_s { - /* Linkage into prof_ctx_t's cnts_ql. */ - ql_elm(prof_thr_cnt_t) cnts_link; - - /* Linkage into thread's LRU. */ - ql_elm(prof_thr_cnt_t) lru_link; - - /* - * Associated context. If a thread frees an object that it did not - * allocate, it is possible that the context is not cached in the - * thread's hash table, in which case it must be able to look up the - * context, insert a new prof_thr_cnt_t into the thread's hash table, - * and link it into the prof_ctx_t's cnts_ql. - */ - prof_ctx_t *ctx; - - /* - * Threads use memory barriers to update the counters. Since there is - * only ever one writer, the only challenge is for the reader to get a - * consistent read of the counters. - * - * The writer uses this series of operations: - * - * 1) Increment epoch to an odd number. - * 2) Update counters. - * 3) Increment epoch to an even number. - * - * The reader must assure 1) that the epoch is even while it reads the - * counters, and 2) that the epoch doesn't change between the time it - * starts and finishes reading the counters. - */ - unsigned epoch; - - /* Profiling counters. */ - prof_cnt_t cnts; -}; - -struct prof_ctx_s { - /* Associated backtrace. */ - prof_bt_t *bt; - - /* Protects nlimbo, cnt_merged, and cnts_ql. */ - malloc_mutex_t *lock; - - /* - * Number of threads that currently cause this ctx to be in a state of - * limbo due to one of: - * - Initializing per thread counters associated with this ctx. - * - Preparing to destroy this ctx. - * nlimbo must be 1 (single destroyer) in order to safely destroy the - * ctx. - */ - unsigned nlimbo; - - /* Temporary storage for summation during dump. */ - prof_cnt_t cnt_summed; - - /* When threads exit, they merge their stats into cnt_merged. */ - prof_cnt_t cnt_merged; - - /* - * List of profile counters, one for each thread that has allocated in - * this context. - */ - ql_head(prof_thr_cnt_t) cnts_ql; -}; - -struct prof_tdata_s { - /* - * Hash of (prof_bt_t *)-->(prof_thr_cnt_t *). Each thread keeps a - * cache of backtraces, with associated thread-specific prof_thr_cnt_t - * objects. Other threads may read the prof_thr_cnt_t contents, but no - * others will ever write them. - * - * Upon thread exit, the thread must merge all the prof_thr_cnt_t - * counter data into the associated prof_ctx_t objects, and unlink/free - * the prof_thr_cnt_t objects. - */ - ckh_t bt2cnt; - - /* LRU for contents of bt2cnt. */ - ql_head(prof_thr_cnt_t) lru_ql; - - /* Backtrace vector, used for calls to prof_backtrace(). */ - void **vec; - - /* Sampling state. */ - uint64_t prng_state; - uint64_t threshold; - uint64_t accum; - - /* State used to avoid dumping while operating on prof internals. */ - bool enq; - bool enq_idump; - bool enq_gdump; -}; - -#endif /* JEMALLOC_H_STRUCTS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_EXTERNS - -extern bool opt_prof; -/* - * Even if opt_prof is true, sampling can be temporarily disabled by setting - * opt_prof_active to false. No locking is used when updating opt_prof_active, - * so there are no guarantees regarding how long it will take for all threads - * to notice state changes. - */ -extern bool opt_prof_active; -extern size_t opt_lg_prof_sample; /* Mean bytes between samples. */ -extern ssize_t opt_lg_prof_interval; /* lg(prof_interval). */ -extern bool opt_prof_gdump; /* High-water memory dumping. */ -extern bool opt_prof_final; /* Final profile dumping. */ -extern bool opt_prof_leak; /* Dump leak summary at exit. */ -extern bool opt_prof_accum; /* Report cumulative bytes. */ -extern char opt_prof_prefix[PATH_MAX + 1]; - -/* - * Profile dump interval, measured in bytes allocated. Each arena triggers a - * profile dump when it reaches this threshold. The effect is that the - * interval between profile dumps averages prof_interval, though the actual - * interval between dumps will tend to be sporadic, and the interval will be a - * maximum of approximately (prof_interval * narenas). - */ -extern uint64_t prof_interval; - -/* - * If true, promote small sampled objects to large objects, since small run - * headers do not have embedded profile context pointers. - */ -extern bool prof_promote; - -void bt_init(prof_bt_t *bt, void **vec); -void prof_backtrace(prof_bt_t *bt, unsigned nignore); -prof_thr_cnt_t *prof_lookup(prof_bt_t *bt); -void prof_idump(void); -bool prof_mdump(const char *filename); -void prof_gdump(void); -prof_tdata_t *prof_tdata_init(void); -void prof_tdata_cleanup(void *arg); -void prof_boot0(void); -void prof_boot1(void); -bool prof_boot2(void); -void prof_prefork(void); -void prof_postfork_parent(void); -void prof_postfork_child(void); - -#endif /* JEMALLOC_H_EXTERNS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_INLINES - -#define PROF_ALLOC_PREP(nignore, size, ret) do { \ - prof_tdata_t *prof_tdata; \ - prof_bt_t bt; \ - \ - assert(size == s2u(size)); \ - \ - prof_tdata = prof_tdata_get(true); \ - if ((uintptr_t)prof_tdata <= (uintptr_t)PROF_TDATA_STATE_MAX) { \ - if (prof_tdata != NULL) \ - ret = (prof_thr_cnt_t *)(uintptr_t)1U; \ - else \ - ret = NULL; \ - break; \ - } \ - \ - if (opt_prof_active == false) { \ - /* Sampling is currently inactive, so avoid sampling. */\ - ret = (prof_thr_cnt_t *)(uintptr_t)1U; \ - } else if (opt_lg_prof_sample == 0) { \ - /* Don't bother with sampling logic, since sampling */\ - /* interval is 1. */\ - bt_init(&bt, prof_tdata->vec); \ - prof_backtrace(&bt, nignore); \ - ret = prof_lookup(&bt); \ - } else { \ - if (prof_tdata->threshold == 0) { \ - /* Initialize. Seed the prng differently for */\ - /* each thread. */\ - prof_tdata->prng_state = \ - (uint64_t)(uintptr_t)&size; \ - prof_sample_threshold_update(prof_tdata); \ - } \ - \ - /* Determine whether to capture a backtrace based on */\ - /* whether size is enough for prof_accum to reach */\ - /* prof_tdata->threshold. However, delay updating */\ - /* these variables until prof_{m,re}alloc(), because */\ - /* we don't know for sure that the allocation will */\ - /* succeed. */\ - /* */\ - /* Use subtraction rather than addition to avoid */\ - /* potential integer overflow. */\ - if (size >= prof_tdata->threshold - \ - prof_tdata->accum) { \ - bt_init(&bt, prof_tdata->vec); \ - prof_backtrace(&bt, nignore); \ - ret = prof_lookup(&bt); \ - } else \ - ret = (prof_thr_cnt_t *)(uintptr_t)1U; \ - } \ -} while (0) - -#ifndef JEMALLOC_ENABLE_INLINE -malloc_tsd_protos(JEMALLOC_ATTR(unused), prof_tdata, prof_tdata_t *) - -prof_tdata_t *prof_tdata_get(bool create); -void prof_sample_threshold_update(prof_tdata_t *prof_tdata); -prof_ctx_t *prof_ctx_get(const void *ptr); -void prof_ctx_set(const void *ptr, prof_ctx_t *ctx); -bool prof_sample_accum_update(size_t size); -void prof_malloc(const void *ptr, size_t size, prof_thr_cnt_t *cnt); -void prof_realloc(const void *ptr, size_t size, prof_thr_cnt_t *cnt, - size_t old_size, prof_ctx_t *old_ctx); -void prof_free(const void *ptr, size_t size); -#endif - -#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_PROF_C_)) -/* Thread-specific backtrace cache, used to reduce bt2ctx contention. */ -malloc_tsd_externs(prof_tdata, prof_tdata_t *) -malloc_tsd_funcs(JEMALLOC_INLINE, prof_tdata, prof_tdata_t *, NULL, - prof_tdata_cleanup) - -JEMALLOC_INLINE prof_tdata_t * -prof_tdata_get(bool create) -{ - prof_tdata_t *prof_tdata; - - cassert(config_prof); - - prof_tdata = *prof_tdata_tsd_get(); - if (create && prof_tdata == NULL) - prof_tdata = prof_tdata_init(); - - return (prof_tdata); -} - -JEMALLOC_INLINE void -prof_sample_threshold_update(prof_tdata_t *prof_tdata) -{ - uint64_t r; - double u; - - cassert(config_prof); - - /* - * Compute sample threshold as a geometrically distributed random - * variable with mean (2^opt_lg_prof_sample). - * - * __ __ - * | log(u) | 1 - * prof_tdata->threshold = | -------- |, where p = ------------------- - * | log(1-p) | opt_lg_prof_sample - * 2 - * - * For more information on the math, see: - * - * Non-Uniform Random Variate Generation - * Luc Devroye - * Springer-Verlag, New York, 1986 - * pp 500 - * (http://cg.scs.carleton.ca/~luc/rnbookindex.html) - */ - prng64(r, 53, prof_tdata->prng_state, - UINT64_C(6364136223846793005), UINT64_C(1442695040888963407)); - u = (double)r * (1.0/9007199254740992.0L); - prof_tdata->threshold = (uint64_t)(log(u) / - log(1.0 - (1.0 / (double)((uint64_t)1U << opt_lg_prof_sample)))) - + (uint64_t)1U; -} - -JEMALLOC_INLINE prof_ctx_t * -prof_ctx_get(const void *ptr) -{ - prof_ctx_t *ret; - arena_chunk_t *chunk; - - cassert(config_prof); - assert(ptr != NULL); - - chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); - if (chunk != ptr) { - /* Region. */ - ret = arena_prof_ctx_get(ptr); - } else - ret = huge_prof_ctx_get(ptr); - - return (ret); -} - -JEMALLOC_INLINE void -prof_ctx_set(const void *ptr, prof_ctx_t *ctx) -{ - arena_chunk_t *chunk; - - cassert(config_prof); - assert(ptr != NULL); - - chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); - if (chunk != ptr) { - /* Region. */ - arena_prof_ctx_set(ptr, ctx); - } else - huge_prof_ctx_set(ptr, ctx); -} - -JEMALLOC_INLINE bool -prof_sample_accum_update(size_t size) -{ - prof_tdata_t *prof_tdata; - - cassert(config_prof); - /* Sampling logic is unnecessary if the interval is 1. */ - assert(opt_lg_prof_sample != 0); - - prof_tdata = prof_tdata_get(false); - if ((uintptr_t)prof_tdata <= (uintptr_t)PROF_TDATA_STATE_MAX) - return (true); - - /* Take care to avoid integer overflow. */ - if (size >= prof_tdata->threshold - prof_tdata->accum) { - prof_tdata->accum -= (prof_tdata->threshold - size); - /* Compute new sample threshold. */ - prof_sample_threshold_update(prof_tdata); - while (prof_tdata->accum >= prof_tdata->threshold) { - prof_tdata->accum -= prof_tdata->threshold; - prof_sample_threshold_update(prof_tdata); - } - return (false); - } else { - prof_tdata->accum += size; - return (true); - } -} - -JEMALLOC_INLINE void -prof_malloc(const void *ptr, size_t size, prof_thr_cnt_t *cnt) -{ - - cassert(config_prof); - assert(ptr != NULL); - assert(size == isalloc(ptr, true)); - - if (opt_lg_prof_sample != 0) { - if (prof_sample_accum_update(size)) { - /* - * Don't sample. For malloc()-like allocation, it is - * always possible to tell in advance how large an - * object's usable size will be, so there should never - * be a difference between the size passed to - * PROF_ALLOC_PREP() and prof_malloc(). - */ - assert((uintptr_t)cnt == (uintptr_t)1U); - } - } - - if ((uintptr_t)cnt > (uintptr_t)1U) { - prof_ctx_set(ptr, cnt->ctx); - - cnt->epoch++; - /*********/ - mb_write(); - /*********/ - cnt->cnts.curobjs++; - cnt->cnts.curbytes += size; - if (opt_prof_accum) { - cnt->cnts.accumobjs++; - cnt->cnts.accumbytes += size; - } - /*********/ - mb_write(); - /*********/ - cnt->epoch++; - /*********/ - mb_write(); - /*********/ - } else - prof_ctx_set(ptr, (prof_ctx_t *)(uintptr_t)1U); -} - -JEMALLOC_INLINE void -prof_realloc(const void *ptr, size_t size, prof_thr_cnt_t *cnt, - size_t old_size, prof_ctx_t *old_ctx) -{ - prof_thr_cnt_t *told_cnt; - - cassert(config_prof); - assert(ptr != NULL || (uintptr_t)cnt <= (uintptr_t)1U); - - if (ptr != NULL) { - assert(size == isalloc(ptr, true)); - if (opt_lg_prof_sample != 0) { - if (prof_sample_accum_update(size)) { - /* - * Don't sample. The size passed to - * PROF_ALLOC_PREP() was larger than what - * actually got allocated, so a backtrace was - * captured for this allocation, even though - * its actual size was insufficient to cross - * the sample threshold. - */ - cnt = (prof_thr_cnt_t *)(uintptr_t)1U; - } - } - } - - if ((uintptr_t)old_ctx > (uintptr_t)1U) { - told_cnt = prof_lookup(old_ctx->bt); - if (told_cnt == NULL) { - /* - * It's too late to propagate OOM for this realloc(), - * so operate directly on old_cnt->ctx->cnt_merged. - */ - malloc_mutex_lock(old_ctx->lock); - old_ctx->cnt_merged.curobjs--; - old_ctx->cnt_merged.curbytes -= old_size; - malloc_mutex_unlock(old_ctx->lock); - told_cnt = (prof_thr_cnt_t *)(uintptr_t)1U; - } - } else - told_cnt = (prof_thr_cnt_t *)(uintptr_t)1U; - - if ((uintptr_t)told_cnt > (uintptr_t)1U) - told_cnt->epoch++; - if ((uintptr_t)cnt > (uintptr_t)1U) { - prof_ctx_set(ptr, cnt->ctx); - cnt->epoch++; - } else if (ptr != NULL) - prof_ctx_set(ptr, (prof_ctx_t *)(uintptr_t)1U); - /*********/ - mb_write(); - /*********/ - if ((uintptr_t)told_cnt > (uintptr_t)1U) { - told_cnt->cnts.curobjs--; - told_cnt->cnts.curbytes -= old_size; - } - if ((uintptr_t)cnt > (uintptr_t)1U) { - cnt->cnts.curobjs++; - cnt->cnts.curbytes += size; - if (opt_prof_accum) { - cnt->cnts.accumobjs++; - cnt->cnts.accumbytes += size; - } - } - /*********/ - mb_write(); - /*********/ - if ((uintptr_t)told_cnt > (uintptr_t)1U) - told_cnt->epoch++; - if ((uintptr_t)cnt > (uintptr_t)1U) - cnt->epoch++; - /*********/ - mb_write(); /* Not strictly necessary. */ -} - -JEMALLOC_INLINE void -prof_free(const void *ptr, size_t size) -{ - prof_ctx_t *ctx = prof_ctx_get(ptr); - - cassert(config_prof); - - if ((uintptr_t)ctx > (uintptr_t)1) { - prof_thr_cnt_t *tcnt; - assert(size == isalloc(ptr, true)); - tcnt = prof_lookup(ctx->bt); - - if (tcnt != NULL) { - tcnt->epoch++; - /*********/ - mb_write(); - /*********/ - tcnt->cnts.curobjs--; - tcnt->cnts.curbytes -= size; - /*********/ - mb_write(); - /*********/ - tcnt->epoch++; - /*********/ - mb_write(); - /*********/ - } else { - /* - * OOM during free() cannot be propagated, so operate - * directly on cnt->ctx->cnt_merged. - */ - malloc_mutex_lock(ctx->lock); - ctx->cnt_merged.curobjs--; - ctx->cnt_merged.curbytes -= size; - malloc_mutex_unlock(ctx->lock); - } - } -} -#endif - -#endif /* JEMALLOC_H_INLINES */ -/******************************************************************************/ diff --git a/extra/jemalloc/include/jemalloc/internal/ql.h b/extra/jemalloc/include/jemalloc/internal/ql.h deleted file mode 100644 index a9ed2393f0c..00000000000 --- a/extra/jemalloc/include/jemalloc/internal/ql.h +++ /dev/null @@ -1,83 +0,0 @@ -/* - * List definitions. - */ -#define ql_head(a_type) \ -struct { \ - a_type *qlh_first; \ -} - -#define ql_head_initializer(a_head) {NULL} - -#define ql_elm(a_type) qr(a_type) - -/* List functions. */ -#define ql_new(a_head) do { \ - (a_head)->qlh_first = NULL; \ -} while (0) - -#define ql_elm_new(a_elm, a_field) qr_new((a_elm), a_field) - -#define ql_first(a_head) ((a_head)->qlh_first) - -#define ql_last(a_head, a_field) \ - ((ql_first(a_head) != NULL) \ - ? qr_prev(ql_first(a_head), a_field) : NULL) - -#define ql_next(a_head, a_elm, a_field) \ - ((ql_last(a_head, a_field) != (a_elm)) \ - ? qr_next((a_elm), a_field) : NULL) - -#define ql_prev(a_head, a_elm, a_field) \ - ((ql_first(a_head) != (a_elm)) ? qr_prev((a_elm), a_field) \ - : NULL) - -#define ql_before_insert(a_head, a_qlelm, a_elm, a_field) do { \ - qr_before_insert((a_qlelm), (a_elm), a_field); \ - if (ql_first(a_head) == (a_qlelm)) { \ - ql_first(a_head) = (a_elm); \ - } \ -} while (0) - -#define ql_after_insert(a_qlelm, a_elm, a_field) \ - qr_after_insert((a_qlelm), (a_elm), a_field) - -#define ql_head_insert(a_head, a_elm, a_field) do { \ - if (ql_first(a_head) != NULL) { \ - qr_before_insert(ql_first(a_head), (a_elm), a_field); \ - } \ - ql_first(a_head) = (a_elm); \ -} while (0) - -#define ql_tail_insert(a_head, a_elm, a_field) do { \ - if (ql_first(a_head) != NULL) { \ - qr_before_insert(ql_first(a_head), (a_elm), a_field); \ - } \ - ql_first(a_head) = qr_next((a_elm), a_field); \ -} while (0) - -#define ql_remove(a_head, a_elm, a_field) do { \ - if (ql_first(a_head) == (a_elm)) { \ - ql_first(a_head) = qr_next(ql_first(a_head), a_field); \ - } \ - if (ql_first(a_head) != (a_elm)) { \ - qr_remove((a_elm), a_field); \ - } else { \ - ql_first(a_head) = NULL; \ - } \ -} while (0) - -#define ql_head_remove(a_head, a_type, a_field) do { \ - a_type *t = ql_first(a_head); \ - ql_remove((a_head), t, a_field); \ -} while (0) - -#define ql_tail_remove(a_head, a_type, a_field) do { \ - a_type *t = ql_last(a_head, a_field); \ - ql_remove((a_head), t, a_field); \ -} while (0) - -#define ql_foreach(a_var, a_head, a_field) \ - qr_foreach((a_var), ql_first(a_head), a_field) - -#define ql_reverse_foreach(a_var, a_head, a_field) \ - qr_reverse_foreach((a_var), ql_first(a_head), a_field) diff --git a/extra/jemalloc/include/jemalloc/internal/qr.h b/extra/jemalloc/include/jemalloc/internal/qr.h deleted file mode 100644 index fe22352fedd..00000000000 --- a/extra/jemalloc/include/jemalloc/internal/qr.h +++ /dev/null @@ -1,67 +0,0 @@ -/* Ring definitions. */ -#define qr(a_type) \ -struct { \ - a_type *qre_next; \ - a_type *qre_prev; \ -} - -/* Ring functions. */ -#define qr_new(a_qr, a_field) do { \ - (a_qr)->a_field.qre_next = (a_qr); \ - (a_qr)->a_field.qre_prev = (a_qr); \ -} while (0) - -#define qr_next(a_qr, a_field) ((a_qr)->a_field.qre_next) - -#define qr_prev(a_qr, a_field) ((a_qr)->a_field.qre_prev) - -#define qr_before_insert(a_qrelm, a_qr, a_field) do { \ - (a_qr)->a_field.qre_prev = (a_qrelm)->a_field.qre_prev; \ - (a_qr)->a_field.qre_next = (a_qrelm); \ - (a_qr)->a_field.qre_prev->a_field.qre_next = (a_qr); \ - (a_qrelm)->a_field.qre_prev = (a_qr); \ -} while (0) - -#define qr_after_insert(a_qrelm, a_qr, a_field) \ - do \ - { \ - (a_qr)->a_field.qre_next = (a_qrelm)->a_field.qre_next; \ - (a_qr)->a_field.qre_prev = (a_qrelm); \ - (a_qr)->a_field.qre_next->a_field.qre_prev = (a_qr); \ - (a_qrelm)->a_field.qre_next = (a_qr); \ - } while (0) - -#define qr_meld(a_qr_a, a_qr_b, a_field) do { \ - void *t; \ - (a_qr_a)->a_field.qre_prev->a_field.qre_next = (a_qr_b); \ - (a_qr_b)->a_field.qre_prev->a_field.qre_next = (a_qr_a); \ - t = (a_qr_a)->a_field.qre_prev; \ - (a_qr_a)->a_field.qre_prev = (a_qr_b)->a_field.qre_prev; \ - (a_qr_b)->a_field.qre_prev = t; \ -} while (0) - -/* qr_meld() and qr_split() are functionally equivalent, so there's no need to - * have two copies of the code. */ -#define qr_split(a_qr_a, a_qr_b, a_field) \ - qr_meld((a_qr_a), (a_qr_b), a_field) - -#define qr_remove(a_qr, a_field) do { \ - (a_qr)->a_field.qre_prev->a_field.qre_next \ - = (a_qr)->a_field.qre_next; \ - (a_qr)->a_field.qre_next->a_field.qre_prev \ - = (a_qr)->a_field.qre_prev; \ - (a_qr)->a_field.qre_next = (a_qr); \ - (a_qr)->a_field.qre_prev = (a_qr); \ -} while (0) - -#define qr_foreach(var, a_qr, a_field) \ - for ((var) = (a_qr); \ - (var) != NULL; \ - (var) = (((var)->a_field.qre_next != (a_qr)) \ - ? (var)->a_field.qre_next : NULL)) - -#define qr_reverse_foreach(var, a_qr, a_field) \ - for ((var) = ((a_qr) != NULL) ? qr_prev(a_qr, a_field) : NULL; \ - (var) != NULL; \ - (var) = (((var) != (a_qr)) \ - ? (var)->a_field.qre_prev : NULL)) diff --git a/extra/jemalloc/include/jemalloc/internal/quarantine.h b/extra/jemalloc/include/jemalloc/internal/quarantine.h deleted file mode 100644 index 16f677f73da..00000000000 --- a/extra/jemalloc/include/jemalloc/internal/quarantine.h +++ /dev/null @@ -1,67 +0,0 @@ -/******************************************************************************/ -#ifdef JEMALLOC_H_TYPES - -typedef struct quarantine_obj_s quarantine_obj_t; -typedef struct quarantine_s quarantine_t; - -/* Default per thread quarantine size if valgrind is enabled. */ -#define JEMALLOC_VALGRIND_QUARANTINE_DEFAULT (ZU(1) << 24) - -#endif /* JEMALLOC_H_TYPES */ -/******************************************************************************/ -#ifdef JEMALLOC_H_STRUCTS - -struct quarantine_obj_s { - void *ptr; - size_t usize; -}; - -struct quarantine_s { - size_t curbytes; - size_t curobjs; - size_t first; -#define LG_MAXOBJS_INIT 10 - size_t lg_maxobjs; - quarantine_obj_t objs[1]; /* Dynamically sized ring buffer. */ -}; - -#endif /* JEMALLOC_H_STRUCTS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_EXTERNS - -quarantine_t *quarantine_init(size_t lg_maxobjs); -void quarantine(void *ptr); -void quarantine_cleanup(void *arg); -bool quarantine_boot(void); - -#endif /* JEMALLOC_H_EXTERNS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_INLINES - -#ifndef JEMALLOC_ENABLE_INLINE -malloc_tsd_protos(JEMALLOC_ATTR(unused), quarantine, quarantine_t *) - -void quarantine_alloc_hook(void); -#endif - -#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_QUARANTINE_C_)) -malloc_tsd_externs(quarantine, quarantine_t *) -malloc_tsd_funcs(JEMALLOC_ALWAYS_INLINE, quarantine, quarantine_t *, NULL, - quarantine_cleanup) - -JEMALLOC_ALWAYS_INLINE void -quarantine_alloc_hook(void) -{ - quarantine_t *quarantine; - - assert(config_fill && opt_quarantine); - - quarantine = *quarantine_tsd_get(); - if (quarantine == NULL) - quarantine_init(LG_MAXOBJS_INIT); -} -#endif - -#endif /* JEMALLOC_H_INLINES */ -/******************************************************************************/ - diff --git a/extra/jemalloc/include/jemalloc/internal/rb.h b/extra/jemalloc/include/jemalloc/internal/rb.h deleted file mode 100644 index 7b675f09051..00000000000 --- a/extra/jemalloc/include/jemalloc/internal/rb.h +++ /dev/null @@ -1,973 +0,0 @@ -/*- - ******************************************************************************* - * - * cpp macro implementation of left-leaning 2-3 red-black trees. Parent - * pointers are not used, and color bits are stored in the least significant - * bit of right-child pointers (if RB_COMPACT is defined), thus making node - * linkage as compact as is possible for red-black trees. - * - * Usage: - * - * #include <stdint.h> - * #include <stdbool.h> - * #define NDEBUG // (Optional, see assert(3).) - * #include <assert.h> - * #define RB_COMPACT // (Optional, embed color bits in right-child pointers.) - * #include <rb.h> - * ... - * - ******************************************************************************* - */ - -#ifndef RB_H_ -#define RB_H_ - -#if 0 -__FBSDID("$FreeBSD: head/lib/libc/stdlib/rb.h 204493 2010-02-28 22:57:13Z jasone $"); -#endif - -#ifdef RB_COMPACT -/* Node structure. */ -#define rb_node(a_type) \ -struct { \ - a_type *rbn_left; \ - a_type *rbn_right_red; \ -} -#else -#define rb_node(a_type) \ -struct { \ - a_type *rbn_left; \ - a_type *rbn_right; \ - bool rbn_red; \ -} -#endif - -/* Root structure. */ -#define rb_tree(a_type) \ -struct { \ - a_type *rbt_root; \ - a_type rbt_nil; \ -} - -/* Left accessors. */ -#define rbtn_left_get(a_type, a_field, a_node) \ - ((a_node)->a_field.rbn_left) -#define rbtn_left_set(a_type, a_field, a_node, a_left) do { \ - (a_node)->a_field.rbn_left = a_left; \ -} while (0) - -#ifdef RB_COMPACT -/* Right accessors. */ -#define rbtn_right_get(a_type, a_field, a_node) \ - ((a_type *) (((intptr_t) (a_node)->a_field.rbn_right_red) \ - & ((ssize_t)-2))) -#define rbtn_right_set(a_type, a_field, a_node, a_right) do { \ - (a_node)->a_field.rbn_right_red = (a_type *) (((uintptr_t) a_right) \ - | (((uintptr_t) (a_node)->a_field.rbn_right_red) & ((size_t)1))); \ -} while (0) - -/* Color accessors. */ -#define rbtn_red_get(a_type, a_field, a_node) \ - ((bool) (((uintptr_t) (a_node)->a_field.rbn_right_red) \ - & ((size_t)1))) -#define rbtn_color_set(a_type, a_field, a_node, a_red) do { \ - (a_node)->a_field.rbn_right_red = (a_type *) ((((intptr_t) \ - (a_node)->a_field.rbn_right_red) & ((ssize_t)-2)) \ - | ((ssize_t)a_red)); \ -} while (0) -#define rbtn_red_set(a_type, a_field, a_node) do { \ - (a_node)->a_field.rbn_right_red = (a_type *) (((uintptr_t) \ - (a_node)->a_field.rbn_right_red) | ((size_t)1)); \ -} while (0) -#define rbtn_black_set(a_type, a_field, a_node) do { \ - (a_node)->a_field.rbn_right_red = (a_type *) (((intptr_t) \ - (a_node)->a_field.rbn_right_red) & ((ssize_t)-2)); \ -} while (0) -#else -/* Right accessors. */ -#define rbtn_right_get(a_type, a_field, a_node) \ - ((a_node)->a_field.rbn_right) -#define rbtn_right_set(a_type, a_field, a_node, a_right) do { \ - (a_node)->a_field.rbn_right = a_right; \ -} while (0) - -/* Color accessors. */ -#define rbtn_red_get(a_type, a_field, a_node) \ - ((a_node)->a_field.rbn_red) -#define rbtn_color_set(a_type, a_field, a_node, a_red) do { \ - (a_node)->a_field.rbn_red = (a_red); \ -} while (0) -#define rbtn_red_set(a_type, a_field, a_node) do { \ - (a_node)->a_field.rbn_red = true; \ -} while (0) -#define rbtn_black_set(a_type, a_field, a_node) do { \ - (a_node)->a_field.rbn_red = false; \ -} while (0) -#endif - -/* Node initializer. */ -#define rbt_node_new(a_type, a_field, a_rbt, a_node) do { \ - rbtn_left_set(a_type, a_field, (a_node), &(a_rbt)->rbt_nil); \ - rbtn_right_set(a_type, a_field, (a_node), &(a_rbt)->rbt_nil); \ - rbtn_red_set(a_type, a_field, (a_node)); \ -} while (0) - -/* Tree initializer. */ -#define rb_new(a_type, a_field, a_rbt) do { \ - (a_rbt)->rbt_root = &(a_rbt)->rbt_nil; \ - rbt_node_new(a_type, a_field, a_rbt, &(a_rbt)->rbt_nil); \ - rbtn_black_set(a_type, a_field, &(a_rbt)->rbt_nil); \ -} while (0) - -/* Internal utility macros. */ -#define rbtn_first(a_type, a_field, a_rbt, a_root, r_node) do { \ - (r_node) = (a_root); \ - if ((r_node) != &(a_rbt)->rbt_nil) { \ - for (; \ - rbtn_left_get(a_type, a_field, (r_node)) != &(a_rbt)->rbt_nil;\ - (r_node) = rbtn_left_get(a_type, a_field, (r_node))) { \ - } \ - } \ -} while (0) - -#define rbtn_last(a_type, a_field, a_rbt, a_root, r_node) do { \ - (r_node) = (a_root); \ - if ((r_node) != &(a_rbt)->rbt_nil) { \ - for (; rbtn_right_get(a_type, a_field, (r_node)) != \ - &(a_rbt)->rbt_nil; (r_node) = rbtn_right_get(a_type, a_field, \ - (r_node))) { \ - } \ - } \ -} while (0) - -#define rbtn_rotate_left(a_type, a_field, a_node, r_node) do { \ - (r_node) = rbtn_right_get(a_type, a_field, (a_node)); \ - rbtn_right_set(a_type, a_field, (a_node), \ - rbtn_left_get(a_type, a_field, (r_node))); \ - rbtn_left_set(a_type, a_field, (r_node), (a_node)); \ -} while (0) - -#define rbtn_rotate_right(a_type, a_field, a_node, r_node) do { \ - (r_node) = rbtn_left_get(a_type, a_field, (a_node)); \ - rbtn_left_set(a_type, a_field, (a_node), \ - rbtn_right_get(a_type, a_field, (r_node))); \ - rbtn_right_set(a_type, a_field, (r_node), (a_node)); \ -} while (0) - -/* - * The rb_proto() macro generates function prototypes that correspond to the - * functions generated by an equivalently parameterized call to rb_gen(). - */ - -#define rb_proto(a_attr, a_prefix, a_rbt_type, a_type) \ -a_attr void \ -a_prefix##new(a_rbt_type *rbtree); \ -a_attr a_type * \ -a_prefix##first(a_rbt_type *rbtree); \ -a_attr a_type * \ -a_prefix##last(a_rbt_type *rbtree); \ -a_attr a_type * \ -a_prefix##next(a_rbt_type *rbtree, a_type *node); \ -a_attr a_type * \ -a_prefix##prev(a_rbt_type *rbtree, a_type *node); \ -a_attr a_type * \ -a_prefix##search(a_rbt_type *rbtree, a_type *key); \ -a_attr a_type * \ -a_prefix##nsearch(a_rbt_type *rbtree, a_type *key); \ -a_attr a_type * \ -a_prefix##psearch(a_rbt_type *rbtree, a_type *key); \ -a_attr void \ -a_prefix##insert(a_rbt_type *rbtree, a_type *node); \ -a_attr void \ -a_prefix##remove(a_rbt_type *rbtree, a_type *node); \ -a_attr a_type * \ -a_prefix##iter(a_rbt_type *rbtree, a_type *start, a_type *(*cb)( \ - a_rbt_type *, a_type *, void *), void *arg); \ -a_attr a_type * \ -a_prefix##reverse_iter(a_rbt_type *rbtree, a_type *start, \ - a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg); - -/* - * The rb_gen() macro generates a type-specific red-black tree implementation, - * based on the above cpp macros. - * - * Arguments: - * - * a_attr : Function attribute for generated functions (ex: static). - * a_prefix : Prefix for generated functions (ex: ex_). - * a_rb_type : Type for red-black tree data structure (ex: ex_t). - * a_type : Type for red-black tree node data structure (ex: ex_node_t). - * a_field : Name of red-black tree node linkage (ex: ex_link). - * a_cmp : Node comparison function name, with the following prototype: - * int (a_cmp *)(a_type *a_node, a_type *a_other); - * ^^^^^^ - * or a_key - * Interpretation of comparision function return values: - * -1 : a_node < a_other - * 0 : a_node == a_other - * 1 : a_node > a_other - * In all cases, the a_node or a_key macro argument is the first - * argument to the comparison function, which makes it possible - * to write comparison functions that treat the first argument - * specially. - * - * Assuming the following setup: - * - * typedef struct ex_node_s ex_node_t; - * struct ex_node_s { - * rb_node(ex_node_t) ex_link; - * }; - * typedef rb_tree(ex_node_t) ex_t; - * rb_gen(static, ex_, ex_t, ex_node_t, ex_link, ex_cmp) - * - * The following API is generated: - * - * static void - * ex_new(ex_t *tree); - * Description: Initialize a red-black tree structure. - * Args: - * tree: Pointer to an uninitialized red-black tree object. - * - * static ex_node_t * - * ex_first(ex_t *tree); - * static ex_node_t * - * ex_last(ex_t *tree); - * Description: Get the first/last node in tree. - * Args: - * tree: Pointer to an initialized red-black tree object. - * Ret: First/last node in tree, or NULL if tree is empty. - * - * static ex_node_t * - * ex_next(ex_t *tree, ex_node_t *node); - * static ex_node_t * - * ex_prev(ex_t *tree, ex_node_t *node); - * Description: Get node's successor/predecessor. - * Args: - * tree: Pointer to an initialized red-black tree object. - * node: A node in tree. - * Ret: node's successor/predecessor in tree, or NULL if node is - * last/first. - * - * static ex_node_t * - * ex_search(ex_t *tree, ex_node_t *key); - * Description: Search for node that matches key. - * Args: - * tree: Pointer to an initialized red-black tree object. - * key : Search key. - * Ret: Node in tree that matches key, or NULL if no match. - * - * static ex_node_t * - * ex_nsearch(ex_t *tree, ex_node_t *key); - * static ex_node_t * - * ex_psearch(ex_t *tree, ex_node_t *key); - * Description: Search for node that matches key. If no match is found, - * return what would be key's successor/predecessor, were - * key in tree. - * Args: - * tree: Pointer to an initialized red-black tree object. - * key : Search key. - * Ret: Node in tree that matches key, or if no match, hypothetical node's - * successor/predecessor (NULL if no successor/predecessor). - * - * static void - * ex_insert(ex_t *tree, ex_node_t *node); - * Description: Insert node into tree. - * Args: - * tree: Pointer to an initialized red-black tree object. - * node: Node to be inserted into tree. - * - * static void - * ex_remove(ex_t *tree, ex_node_t *node); - * Description: Remove node from tree. - * Args: - * tree: Pointer to an initialized red-black tree object. - * node: Node in tree to be removed. - * - * static ex_node_t * - * ex_iter(ex_t *tree, ex_node_t *start, ex_node_t *(*cb)(ex_t *, - * ex_node_t *, void *), void *arg); - * static ex_node_t * - * ex_reverse_iter(ex_t *tree, ex_node_t *start, ex_node *(*cb)(ex_t *, - * ex_node_t *, void *), void *arg); - * Description: Iterate forward/backward over tree, starting at node. If - * tree is modified, iteration must be immediately - * terminated by the callback function that causes the - * modification. - * Args: - * tree : Pointer to an initialized red-black tree object. - * start: Node at which to start iteration, or NULL to start at - * first/last node. - * cb : Callback function, which is called for each node during - * iteration. Under normal circumstances the callback function - * should return NULL, which causes iteration to continue. If a - * callback function returns non-NULL, iteration is immediately - * terminated and the non-NULL return value is returned by the - * iterator. This is useful for re-starting iteration after - * modifying tree. - * arg : Opaque pointer passed to cb(). - * Ret: NULL if iteration completed, or the non-NULL callback return value - * that caused termination of the iteration. - */ -#define rb_gen(a_attr, a_prefix, a_rbt_type, a_type, a_field, a_cmp) \ -a_attr void \ -a_prefix##new(a_rbt_type *rbtree) { \ - rb_new(a_type, a_field, rbtree); \ -} \ -a_attr a_type * \ -a_prefix##first(a_rbt_type *rbtree) { \ - a_type *ret; \ - rbtn_first(a_type, a_field, rbtree, rbtree->rbt_root, ret); \ - if (ret == &rbtree->rbt_nil) { \ - ret = NULL; \ - } \ - return (ret); \ -} \ -a_attr a_type * \ -a_prefix##last(a_rbt_type *rbtree) { \ - a_type *ret; \ - rbtn_last(a_type, a_field, rbtree, rbtree->rbt_root, ret); \ - if (ret == &rbtree->rbt_nil) { \ - ret = NULL; \ - } \ - return (ret); \ -} \ -a_attr a_type * \ -a_prefix##next(a_rbt_type *rbtree, a_type *node) { \ - a_type *ret; \ - if (rbtn_right_get(a_type, a_field, node) != &rbtree->rbt_nil) { \ - rbtn_first(a_type, a_field, rbtree, rbtn_right_get(a_type, \ - a_field, node), ret); \ - } else { \ - a_type *tnode = rbtree->rbt_root; \ - assert(tnode != &rbtree->rbt_nil); \ - ret = &rbtree->rbt_nil; \ - while (true) { \ - int cmp = (a_cmp)(node, tnode); \ - if (cmp < 0) { \ - ret = tnode; \ - tnode = rbtn_left_get(a_type, a_field, tnode); \ - } else if (cmp > 0) { \ - tnode = rbtn_right_get(a_type, a_field, tnode); \ - } else { \ - break; \ - } \ - assert(tnode != &rbtree->rbt_nil); \ - } \ - } \ - if (ret == &rbtree->rbt_nil) { \ - ret = (NULL); \ - } \ - return (ret); \ -} \ -a_attr a_type * \ -a_prefix##prev(a_rbt_type *rbtree, a_type *node) { \ - a_type *ret; \ - if (rbtn_left_get(a_type, a_field, node) != &rbtree->rbt_nil) { \ - rbtn_last(a_type, a_field, rbtree, rbtn_left_get(a_type, \ - a_field, node), ret); \ - } else { \ - a_type *tnode = rbtree->rbt_root; \ - assert(tnode != &rbtree->rbt_nil); \ - ret = &rbtree->rbt_nil; \ - while (true) { \ - int cmp = (a_cmp)(node, tnode); \ - if (cmp < 0) { \ - tnode = rbtn_left_get(a_type, a_field, tnode); \ - } else if (cmp > 0) { \ - ret = tnode; \ - tnode = rbtn_right_get(a_type, a_field, tnode); \ - } else { \ - break; \ - } \ - assert(tnode != &rbtree->rbt_nil); \ - } \ - } \ - if (ret == &rbtree->rbt_nil) { \ - ret = (NULL); \ - } \ - return (ret); \ -} \ -a_attr a_type * \ -a_prefix##search(a_rbt_type *rbtree, a_type *key) { \ - a_type *ret; \ - int cmp; \ - ret = rbtree->rbt_root; \ - while (ret != &rbtree->rbt_nil \ - && (cmp = (a_cmp)(key, ret)) != 0) { \ - if (cmp < 0) { \ - ret = rbtn_left_get(a_type, a_field, ret); \ - } else { \ - ret = rbtn_right_get(a_type, a_field, ret); \ - } \ - } \ - if (ret == &rbtree->rbt_nil) { \ - ret = (NULL); \ - } \ - return (ret); \ -} \ -a_attr a_type * \ -a_prefix##nsearch(a_rbt_type *rbtree, a_type *key) { \ - a_type *ret; \ - a_type *tnode = rbtree->rbt_root; \ - ret = &rbtree->rbt_nil; \ - while (tnode != &rbtree->rbt_nil) { \ - int cmp = (a_cmp)(key, tnode); \ - if (cmp < 0) { \ - ret = tnode; \ - tnode = rbtn_left_get(a_type, a_field, tnode); \ - } else if (cmp > 0) { \ - tnode = rbtn_right_get(a_type, a_field, tnode); \ - } else { \ - ret = tnode; \ - break; \ - } \ - } \ - if (ret == &rbtree->rbt_nil) { \ - ret = (NULL); \ - } \ - return (ret); \ -} \ -a_attr a_type * \ -a_prefix##psearch(a_rbt_type *rbtree, a_type *key) { \ - a_type *ret; \ - a_type *tnode = rbtree->rbt_root; \ - ret = &rbtree->rbt_nil; \ - while (tnode != &rbtree->rbt_nil) { \ - int cmp = (a_cmp)(key, tnode); \ - if (cmp < 0) { \ - tnode = rbtn_left_get(a_type, a_field, tnode); \ - } else if (cmp > 0) { \ - ret = tnode; \ - tnode = rbtn_right_get(a_type, a_field, tnode); \ - } else { \ - ret = tnode; \ - break; \ - } \ - } \ - if (ret == &rbtree->rbt_nil) { \ - ret = (NULL); \ - } \ - return (ret); \ -} \ -a_attr void \ -a_prefix##insert(a_rbt_type *rbtree, a_type *node) { \ - struct { \ - a_type *node; \ - int cmp; \ - } path[sizeof(void *) << 4], *pathp; \ - rbt_node_new(a_type, a_field, rbtree, node); \ - /* Wind. */ \ - path->node = rbtree->rbt_root; \ - for (pathp = path; pathp->node != &rbtree->rbt_nil; pathp++) { \ - int cmp = pathp->cmp = a_cmp(node, pathp->node); \ - assert(cmp != 0); \ - if (cmp < 0) { \ - pathp[1].node = rbtn_left_get(a_type, a_field, \ - pathp->node); \ - } else { \ - pathp[1].node = rbtn_right_get(a_type, a_field, \ - pathp->node); \ - } \ - } \ - pathp->node = node; \ - /* Unwind. */ \ - for (pathp--; (uintptr_t)pathp >= (uintptr_t)path; pathp--) { \ - a_type *cnode = pathp->node; \ - if (pathp->cmp < 0) { \ - a_type *left = pathp[1].node; \ - rbtn_left_set(a_type, a_field, cnode, left); \ - if (rbtn_red_get(a_type, a_field, left)) { \ - a_type *leftleft = rbtn_left_get(a_type, a_field, left);\ - if (rbtn_red_get(a_type, a_field, leftleft)) { \ - /* Fix up 4-node. */ \ - a_type *tnode; \ - rbtn_black_set(a_type, a_field, leftleft); \ - rbtn_rotate_right(a_type, a_field, cnode, tnode); \ - cnode = tnode; \ - } \ - } else { \ - return; \ - } \ - } else { \ - a_type *right = pathp[1].node; \ - rbtn_right_set(a_type, a_field, cnode, right); \ - if (rbtn_red_get(a_type, a_field, right)) { \ - a_type *left = rbtn_left_get(a_type, a_field, cnode); \ - if (rbtn_red_get(a_type, a_field, left)) { \ - /* Split 4-node. */ \ - rbtn_black_set(a_type, a_field, left); \ - rbtn_black_set(a_type, a_field, right); \ - rbtn_red_set(a_type, a_field, cnode); \ - } else { \ - /* Lean left. */ \ - a_type *tnode; \ - bool tred = rbtn_red_get(a_type, a_field, cnode); \ - rbtn_rotate_left(a_type, a_field, cnode, tnode); \ - rbtn_color_set(a_type, a_field, tnode, tred); \ - rbtn_red_set(a_type, a_field, cnode); \ - cnode = tnode; \ - } \ - } else { \ - return; \ - } \ - } \ - pathp->node = cnode; \ - } \ - /* Set root, and make it black. */ \ - rbtree->rbt_root = path->node; \ - rbtn_black_set(a_type, a_field, rbtree->rbt_root); \ -} \ -a_attr void \ -a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \ - struct { \ - a_type *node; \ - int cmp; \ - } *pathp, *nodep, path[sizeof(void *) << 4]; \ - /* Wind. */ \ - nodep = NULL; /* Silence compiler warning. */ \ - path->node = rbtree->rbt_root; \ - for (pathp = path; pathp->node != &rbtree->rbt_nil; pathp++) { \ - int cmp = pathp->cmp = a_cmp(node, pathp->node); \ - if (cmp < 0) { \ - pathp[1].node = rbtn_left_get(a_type, a_field, \ - pathp->node); \ - } else { \ - pathp[1].node = rbtn_right_get(a_type, a_field, \ - pathp->node); \ - if (cmp == 0) { \ - /* Find node's successor, in preparation for swap. */ \ - pathp->cmp = 1; \ - nodep = pathp; \ - for (pathp++; pathp->node != &rbtree->rbt_nil; \ - pathp++) { \ - pathp->cmp = -1; \ - pathp[1].node = rbtn_left_get(a_type, a_field, \ - pathp->node); \ - } \ - break; \ - } \ - } \ - } \ - assert(nodep->node == node); \ - pathp--; \ - if (pathp->node != node) { \ - /* Swap node with its successor. */ \ - bool tred = rbtn_red_get(a_type, a_field, pathp->node); \ - rbtn_color_set(a_type, a_field, pathp->node, \ - rbtn_red_get(a_type, a_field, node)); \ - rbtn_left_set(a_type, a_field, pathp->node, \ - rbtn_left_get(a_type, a_field, node)); \ - /* If node's successor is its right child, the following code */\ - /* will do the wrong thing for the right child pointer. */\ - /* However, it doesn't matter, because the pointer will be */\ - /* properly set when the successor is pruned. */\ - rbtn_right_set(a_type, a_field, pathp->node, \ - rbtn_right_get(a_type, a_field, node)); \ - rbtn_color_set(a_type, a_field, node, tred); \ - /* The pruned leaf node's child pointers are never accessed */\ - /* again, so don't bother setting them to nil. */\ - nodep->node = pathp->node; \ - pathp->node = node; \ - if (nodep == path) { \ - rbtree->rbt_root = nodep->node; \ - } else { \ - if (nodep[-1].cmp < 0) { \ - rbtn_left_set(a_type, a_field, nodep[-1].node, \ - nodep->node); \ - } else { \ - rbtn_right_set(a_type, a_field, nodep[-1].node, \ - nodep->node); \ - } \ - } \ - } else { \ - a_type *left = rbtn_left_get(a_type, a_field, node); \ - if (left != &rbtree->rbt_nil) { \ - /* node has no successor, but it has a left child. */\ - /* Splice node out, without losing the left child. */\ - assert(rbtn_red_get(a_type, a_field, node) == false); \ - assert(rbtn_red_get(a_type, a_field, left)); \ - rbtn_black_set(a_type, a_field, left); \ - if (pathp == path) { \ - rbtree->rbt_root = left; \ - } else { \ - if (pathp[-1].cmp < 0) { \ - rbtn_left_set(a_type, a_field, pathp[-1].node, \ - left); \ - } else { \ - rbtn_right_set(a_type, a_field, pathp[-1].node, \ - left); \ - } \ - } \ - return; \ - } else if (pathp == path) { \ - /* The tree only contained one node. */ \ - rbtree->rbt_root = &rbtree->rbt_nil; \ - return; \ - } \ - } \ - if (rbtn_red_get(a_type, a_field, pathp->node)) { \ - /* Prune red node, which requires no fixup. */ \ - assert(pathp[-1].cmp < 0); \ - rbtn_left_set(a_type, a_field, pathp[-1].node, \ - &rbtree->rbt_nil); \ - return; \ - } \ - /* The node to be pruned is black, so unwind until balance is */\ - /* restored. */\ - pathp->node = &rbtree->rbt_nil; \ - for (pathp--; (uintptr_t)pathp >= (uintptr_t)path; pathp--) { \ - assert(pathp->cmp != 0); \ - if (pathp->cmp < 0) { \ - rbtn_left_set(a_type, a_field, pathp->node, \ - pathp[1].node); \ - assert(rbtn_red_get(a_type, a_field, pathp[1].node) \ - == false); \ - if (rbtn_red_get(a_type, a_field, pathp->node)) { \ - a_type *right = rbtn_right_get(a_type, a_field, \ - pathp->node); \ - a_type *rightleft = rbtn_left_get(a_type, a_field, \ - right); \ - a_type *tnode; \ - if (rbtn_red_get(a_type, a_field, rightleft)) { \ - /* In the following diagrams, ||, //, and \\ */\ - /* indicate the path to the removed node. */\ - /* */\ - /* || */\ - /* pathp(r) */\ - /* // \ */\ - /* (b) (b) */\ - /* / */\ - /* (r) */\ - /* */\ - rbtn_black_set(a_type, a_field, pathp->node); \ - rbtn_rotate_right(a_type, a_field, right, tnode); \ - rbtn_right_set(a_type, a_field, pathp->node, tnode);\ - rbtn_rotate_left(a_type, a_field, pathp->node, \ - tnode); \ - } else { \ - /* || */\ - /* pathp(r) */\ - /* // \ */\ - /* (b) (b) */\ - /* / */\ - /* (b) */\ - /* */\ - rbtn_rotate_left(a_type, a_field, pathp->node, \ - tnode); \ - } \ - /* Balance restored, but rotation modified subtree */\ - /* root. */\ - assert((uintptr_t)pathp > (uintptr_t)path); \ - if (pathp[-1].cmp < 0) { \ - rbtn_left_set(a_type, a_field, pathp[-1].node, \ - tnode); \ - } else { \ - rbtn_right_set(a_type, a_field, pathp[-1].node, \ - tnode); \ - } \ - return; \ - } else { \ - a_type *right = rbtn_right_get(a_type, a_field, \ - pathp->node); \ - a_type *rightleft = rbtn_left_get(a_type, a_field, \ - right); \ - if (rbtn_red_get(a_type, a_field, rightleft)) { \ - /* || */\ - /* pathp(b) */\ - /* // \ */\ - /* (b) (b) */\ - /* / */\ - /* (r) */\ - a_type *tnode; \ - rbtn_black_set(a_type, a_field, rightleft); \ - rbtn_rotate_right(a_type, a_field, right, tnode); \ - rbtn_right_set(a_type, a_field, pathp->node, tnode);\ - rbtn_rotate_left(a_type, a_field, pathp->node, \ - tnode); \ - /* Balance restored, but rotation modified */\ - /* subree root, which may actually be the tree */\ - /* root. */\ - if (pathp == path) { \ - /* Set root. */ \ - rbtree->rbt_root = tnode; \ - } else { \ - if (pathp[-1].cmp < 0) { \ - rbtn_left_set(a_type, a_field, \ - pathp[-1].node, tnode); \ - } else { \ - rbtn_right_set(a_type, a_field, \ - pathp[-1].node, tnode); \ - } \ - } \ - return; \ - } else { \ - /* || */\ - /* pathp(b) */\ - /* // \ */\ - /* (b) (b) */\ - /* / */\ - /* (b) */\ - a_type *tnode; \ - rbtn_red_set(a_type, a_field, pathp->node); \ - rbtn_rotate_left(a_type, a_field, pathp->node, \ - tnode); \ - pathp->node = tnode; \ - } \ - } \ - } else { \ - a_type *left; \ - rbtn_right_set(a_type, a_field, pathp->node, \ - pathp[1].node); \ - left = rbtn_left_get(a_type, a_field, pathp->node); \ - if (rbtn_red_get(a_type, a_field, left)) { \ - a_type *tnode; \ - a_type *leftright = rbtn_right_get(a_type, a_field, \ - left); \ - a_type *leftrightleft = rbtn_left_get(a_type, a_field, \ - leftright); \ - if (rbtn_red_get(a_type, a_field, leftrightleft)) { \ - /* || */\ - /* pathp(b) */\ - /* / \\ */\ - /* (r) (b) */\ - /* \ */\ - /* (b) */\ - /* / */\ - /* (r) */\ - a_type *unode; \ - rbtn_black_set(a_type, a_field, leftrightleft); \ - rbtn_rotate_right(a_type, a_field, pathp->node, \ - unode); \ - rbtn_rotate_right(a_type, a_field, pathp->node, \ - tnode); \ - rbtn_right_set(a_type, a_field, unode, tnode); \ - rbtn_rotate_left(a_type, a_field, unode, tnode); \ - } else { \ - /* || */\ - /* pathp(b) */\ - /* / \\ */\ - /* (r) (b) */\ - /* \ */\ - /* (b) */\ - /* / */\ - /* (b) */\ - assert(leftright != &rbtree->rbt_nil); \ - rbtn_red_set(a_type, a_field, leftright); \ - rbtn_rotate_right(a_type, a_field, pathp->node, \ - tnode); \ - rbtn_black_set(a_type, a_field, tnode); \ - } \ - /* Balance restored, but rotation modified subtree */\ - /* root, which may actually be the tree root. */\ - if (pathp == path) { \ - /* Set root. */ \ - rbtree->rbt_root = tnode; \ - } else { \ - if (pathp[-1].cmp < 0) { \ - rbtn_left_set(a_type, a_field, pathp[-1].node, \ - tnode); \ - } else { \ - rbtn_right_set(a_type, a_field, pathp[-1].node, \ - tnode); \ - } \ - } \ - return; \ - } else if (rbtn_red_get(a_type, a_field, pathp->node)) { \ - a_type *leftleft = rbtn_left_get(a_type, a_field, left);\ - if (rbtn_red_get(a_type, a_field, leftleft)) { \ - /* || */\ - /* pathp(r) */\ - /* / \\ */\ - /* (b) (b) */\ - /* / */\ - /* (r) */\ - a_type *tnode; \ - rbtn_black_set(a_type, a_field, pathp->node); \ - rbtn_red_set(a_type, a_field, left); \ - rbtn_black_set(a_type, a_field, leftleft); \ - rbtn_rotate_right(a_type, a_field, pathp->node, \ - tnode); \ - /* Balance restored, but rotation modified */\ - /* subtree root. */\ - assert((uintptr_t)pathp > (uintptr_t)path); \ - if (pathp[-1].cmp < 0) { \ - rbtn_left_set(a_type, a_field, pathp[-1].node, \ - tnode); \ - } else { \ - rbtn_right_set(a_type, a_field, pathp[-1].node, \ - tnode); \ - } \ - return; \ - } else { \ - /* || */\ - /* pathp(r) */\ - /* / \\ */\ - /* (b) (b) */\ - /* / */\ - /* (b) */\ - rbtn_red_set(a_type, a_field, left); \ - rbtn_black_set(a_type, a_field, pathp->node); \ - /* Balance restored. */ \ - return; \ - } \ - } else { \ - a_type *leftleft = rbtn_left_get(a_type, a_field, left);\ - if (rbtn_red_get(a_type, a_field, leftleft)) { \ - /* || */\ - /* pathp(b) */\ - /* / \\ */\ - /* (b) (b) */\ - /* / */\ - /* (r) */\ - a_type *tnode; \ - rbtn_black_set(a_type, a_field, leftleft); \ - rbtn_rotate_right(a_type, a_field, pathp->node, \ - tnode); \ - /* Balance restored, but rotation modified */\ - /* subtree root, which may actually be the tree */\ - /* root. */\ - if (pathp == path) { \ - /* Set root. */ \ - rbtree->rbt_root = tnode; \ - } else { \ - if (pathp[-1].cmp < 0) { \ - rbtn_left_set(a_type, a_field, \ - pathp[-1].node, tnode); \ - } else { \ - rbtn_right_set(a_type, a_field, \ - pathp[-1].node, tnode); \ - } \ - } \ - return; \ - } else { \ - /* || */\ - /* pathp(b) */\ - /* / \\ */\ - /* (b) (b) */\ - /* / */\ - /* (b) */\ - rbtn_red_set(a_type, a_field, left); \ - } \ - } \ - } \ - } \ - /* Set root. */ \ - rbtree->rbt_root = path->node; \ - assert(rbtn_red_get(a_type, a_field, rbtree->rbt_root) == false); \ -} \ -a_attr a_type * \ -a_prefix##iter_recurse(a_rbt_type *rbtree, a_type *node, \ - a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg) { \ - if (node == &rbtree->rbt_nil) { \ - return (&rbtree->rbt_nil); \ - } else { \ - a_type *ret; \ - if ((ret = a_prefix##iter_recurse(rbtree, rbtn_left_get(a_type, \ - a_field, node), cb, arg)) != &rbtree->rbt_nil \ - || (ret = cb(rbtree, node, arg)) != NULL) { \ - return (ret); \ - } \ - return (a_prefix##iter_recurse(rbtree, rbtn_right_get(a_type, \ - a_field, node), cb, arg)); \ - } \ -} \ -a_attr a_type * \ -a_prefix##iter_start(a_rbt_type *rbtree, a_type *start, a_type *node, \ - a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg) { \ - int cmp = a_cmp(start, node); \ - if (cmp < 0) { \ - a_type *ret; \ - if ((ret = a_prefix##iter_start(rbtree, start, \ - rbtn_left_get(a_type, a_field, node), cb, arg)) != \ - &rbtree->rbt_nil || (ret = cb(rbtree, node, arg)) != NULL) { \ - return (ret); \ - } \ - return (a_prefix##iter_recurse(rbtree, rbtn_right_get(a_type, \ - a_field, node), cb, arg)); \ - } else if (cmp > 0) { \ - return (a_prefix##iter_start(rbtree, start, \ - rbtn_right_get(a_type, a_field, node), cb, arg)); \ - } else { \ - a_type *ret; \ - if ((ret = cb(rbtree, node, arg)) != NULL) { \ - return (ret); \ - } \ - return (a_prefix##iter_recurse(rbtree, rbtn_right_get(a_type, \ - a_field, node), cb, arg)); \ - } \ -} \ -a_attr a_type * \ -a_prefix##iter(a_rbt_type *rbtree, a_type *start, a_type *(*cb)( \ - a_rbt_type *, a_type *, void *), void *arg) { \ - a_type *ret; \ - if (start != NULL) { \ - ret = a_prefix##iter_start(rbtree, start, rbtree->rbt_root, \ - cb, arg); \ - } else { \ - ret = a_prefix##iter_recurse(rbtree, rbtree->rbt_root, cb, arg);\ - } \ - if (ret == &rbtree->rbt_nil) { \ - ret = NULL; \ - } \ - return (ret); \ -} \ -a_attr a_type * \ -a_prefix##reverse_iter_recurse(a_rbt_type *rbtree, a_type *node, \ - a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg) { \ - if (node == &rbtree->rbt_nil) { \ - return (&rbtree->rbt_nil); \ - } else { \ - a_type *ret; \ - if ((ret = a_prefix##reverse_iter_recurse(rbtree, \ - rbtn_right_get(a_type, a_field, node), cb, arg)) != \ - &rbtree->rbt_nil || (ret = cb(rbtree, node, arg)) != NULL) { \ - return (ret); \ - } \ - return (a_prefix##reverse_iter_recurse(rbtree, \ - rbtn_left_get(a_type, a_field, node), cb, arg)); \ - } \ -} \ -a_attr a_type * \ -a_prefix##reverse_iter_start(a_rbt_type *rbtree, a_type *start, \ - a_type *node, a_type *(*cb)(a_rbt_type *, a_type *, void *), \ - void *arg) { \ - int cmp = a_cmp(start, node); \ - if (cmp > 0) { \ - a_type *ret; \ - if ((ret = a_prefix##reverse_iter_start(rbtree, start, \ - rbtn_right_get(a_type, a_field, node), cb, arg)) != \ - &rbtree->rbt_nil || (ret = cb(rbtree, node, arg)) != NULL) { \ - return (ret); \ - } \ - return (a_prefix##reverse_iter_recurse(rbtree, \ - rbtn_left_get(a_type, a_field, node), cb, arg)); \ - } else if (cmp < 0) { \ - return (a_prefix##reverse_iter_start(rbtree, start, \ - rbtn_left_get(a_type, a_field, node), cb, arg)); \ - } else { \ - a_type *ret; \ - if ((ret = cb(rbtree, node, arg)) != NULL) { \ - return (ret); \ - } \ - return (a_prefix##reverse_iter_recurse(rbtree, \ - rbtn_left_get(a_type, a_field, node), cb, arg)); \ - } \ -} \ -a_attr a_type * \ -a_prefix##reverse_iter(a_rbt_type *rbtree, a_type *start, \ - a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg) { \ - a_type *ret; \ - if (start != NULL) { \ - ret = a_prefix##reverse_iter_start(rbtree, start, \ - rbtree->rbt_root, cb, arg); \ - } else { \ - ret = a_prefix##reverse_iter_recurse(rbtree, rbtree->rbt_root, \ - cb, arg); \ - } \ - if (ret == &rbtree->rbt_nil) { \ - ret = NULL; \ - } \ - return (ret); \ -} - -#endif /* RB_H_ */ diff --git a/extra/jemalloc/include/jemalloc/internal/rtree.h b/extra/jemalloc/include/jemalloc/internal/rtree.h deleted file mode 100644 index 9bd98548cfe..00000000000 --- a/extra/jemalloc/include/jemalloc/internal/rtree.h +++ /dev/null @@ -1,164 +0,0 @@ -/* - * This radix tree implementation is tailored to the singular purpose of - * tracking which chunks are currently owned by jemalloc. This functionality - * is mandatory for OS X, where jemalloc must be able to respond to object - * ownership queries. - * - ******************************************************************************* - */ -#ifdef JEMALLOC_H_TYPES - -typedef struct rtree_s rtree_t; - -/* - * Size of each radix tree node (must be a power of 2). This impacts tree - * depth. - */ -#if (LG_SIZEOF_PTR == 2) -# define RTREE_NODESIZE (1U << 14) -#else -# define RTREE_NODESIZE CACHELINE -#endif - -#endif /* JEMALLOC_H_TYPES */ -/******************************************************************************/ -#ifdef JEMALLOC_H_STRUCTS - -struct rtree_s { - malloc_mutex_t mutex; - void **root; - unsigned height; - unsigned level2bits[1]; /* Dynamically sized. */ -}; - -#endif /* JEMALLOC_H_STRUCTS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_EXTERNS - -rtree_t *rtree_new(unsigned bits); -void rtree_prefork(rtree_t *rtree); -void rtree_postfork_parent(rtree_t *rtree); -void rtree_postfork_child(rtree_t *rtree); - -#endif /* JEMALLOC_H_EXTERNS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_INLINES - -#ifndef JEMALLOC_ENABLE_INLINE -#ifndef JEMALLOC_DEBUG -void *rtree_get_locked(rtree_t *rtree, uintptr_t key); -#endif -void *rtree_get(rtree_t *rtree, uintptr_t key); -bool rtree_set(rtree_t *rtree, uintptr_t key, void *val); -#endif - -#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_RTREE_C_)) -#define RTREE_GET_GENERATE(f) \ -/* The least significant bits of the key are ignored. */ \ -JEMALLOC_INLINE void * \ -f(rtree_t *rtree, uintptr_t key) \ -{ \ - void *ret; \ - uintptr_t subkey; \ - unsigned i, lshift, height, bits; \ - void **node, **child; \ - \ - RTREE_LOCK(&rtree->mutex); \ - for (i = lshift = 0, height = rtree->height, node = rtree->root;\ - i < height - 1; \ - i++, lshift += bits, node = child) { \ - bits = rtree->level2bits[i]; \ - subkey = (key << lshift) >> ((ZU(1) << (LG_SIZEOF_PTR + \ - 3)) - bits); \ - child = (void**)node[subkey]; \ - if (child == NULL) { \ - RTREE_UNLOCK(&rtree->mutex); \ - return (NULL); \ - } \ - } \ - \ - /* \ - * node is a leaf, so it contains values rather than node \ - * pointers. \ - */ \ - bits = rtree->level2bits[i]; \ - subkey = (key << lshift) >> ((ZU(1) << (LG_SIZEOF_PTR+3)) - \ - bits); \ - ret = node[subkey]; \ - RTREE_UNLOCK(&rtree->mutex); \ - \ - RTREE_GET_VALIDATE \ - return (ret); \ -} - -#ifdef JEMALLOC_DEBUG -# define RTREE_LOCK(l) malloc_mutex_lock(l) -# define RTREE_UNLOCK(l) malloc_mutex_unlock(l) -# define RTREE_GET_VALIDATE -RTREE_GET_GENERATE(rtree_get_locked) -# undef RTREE_LOCK -# undef RTREE_UNLOCK -# undef RTREE_GET_VALIDATE -#endif - -#define RTREE_LOCK(l) -#define RTREE_UNLOCK(l) -#ifdef JEMALLOC_DEBUG - /* - * Suppose that it were possible for a jemalloc-allocated chunk to be - * munmap()ped, followed by a different allocator in another thread re-using - * overlapping virtual memory, all without invalidating the cached rtree - * value. The result would be a false positive (the rtree would claim that - * jemalloc owns memory that it had actually discarded). This scenario - * seems impossible, but the following assertion is a prudent sanity check. - */ -# define RTREE_GET_VALIDATE \ - assert(rtree_get_locked(rtree, key) == ret); -#else -# define RTREE_GET_VALIDATE -#endif -RTREE_GET_GENERATE(rtree_get) -#undef RTREE_LOCK -#undef RTREE_UNLOCK -#undef RTREE_GET_VALIDATE - -JEMALLOC_INLINE bool -rtree_set(rtree_t *rtree, uintptr_t key, void *val) -{ - uintptr_t subkey; - unsigned i, lshift, height, bits; - void **node, **child; - - malloc_mutex_lock(&rtree->mutex); - for (i = lshift = 0, height = rtree->height, node = rtree->root; - i < height - 1; - i++, lshift += bits, node = child) { - bits = rtree->level2bits[i]; - subkey = (key << lshift) >> ((ZU(1) << (LG_SIZEOF_PTR+3)) - - bits); - child = (void**)node[subkey]; - if (child == NULL) { - child = (void**)base_alloc(sizeof(void *) << - rtree->level2bits[i+1]); - if (child == NULL) { - malloc_mutex_unlock(&rtree->mutex); - return (true); - } - memset(child, 0, sizeof(void *) << - rtree->level2bits[i+1]); - node[subkey] = child; - } - } - - /* node is a leaf, so it contains values rather than node pointers. */ - bits = rtree->level2bits[i]; - subkey = (key << lshift) >> ((ZU(1) << (LG_SIZEOF_PTR+3)) - bits); - node[subkey] = val; - malloc_mutex_unlock(&rtree->mutex); - - return (false); -} -#endif - -#endif /* JEMALLOC_H_INLINES */ -/******************************************************************************/ diff --git a/extra/jemalloc/include/jemalloc/internal/size_classes.sh b/extra/jemalloc/include/jemalloc/internal/size_classes.sh deleted file mode 100755 index 29c80c1fb8d..00000000000 --- a/extra/jemalloc/include/jemalloc/internal/size_classes.sh +++ /dev/null @@ -1,122 +0,0 @@ -#!/bin/sh - -# The following limits are chosen such that they cover all supported platforms. - -# Range of quanta. -lg_qmin=3 -lg_qmax=4 - -# The range of tiny size classes is [2^lg_tmin..2^(lg_q-1)]. -lg_tmin=3 - -# Range of page sizes. -lg_pmin=12 -lg_pmax=16 - -pow2() { - e=$1 - pow2_result=1 - while [ ${e} -gt 0 ] ; do - pow2_result=$((${pow2_result} + ${pow2_result})) - e=$((${e} - 1)) - done -} - -cat <<EOF -/* This file was automatically generated by size_classes.sh. */ -/******************************************************************************/ -#ifdef JEMALLOC_H_TYPES - -EOF - -lg_q=${lg_qmin} -while [ ${lg_q} -le ${lg_qmax} ] ; do - lg_t=${lg_tmin} - while [ ${lg_t} -le ${lg_q} ] ; do - lg_p=${lg_pmin} - while [ ${lg_p} -le ${lg_pmax} ] ; do - echo "#if (LG_TINY_MIN == ${lg_t} && LG_QUANTUM == ${lg_q} && LG_PAGE == ${lg_p})" - echo "#define SIZE_CLASSES_DEFINED" - pow2 ${lg_q}; q=${pow2_result} - pow2 ${lg_t}; t=${pow2_result} - pow2 ${lg_p}; p=${pow2_result} - bin=0 - psz=0 - sz=${t} - delta=$((${sz} - ${psz})) - echo "/* SIZE_CLASS(bin, delta, sz) */" - echo "#define SIZE_CLASSES \\" - - # Tiny size classes. - while [ ${sz} -lt ${q} ] ; do - echo " SIZE_CLASS(${bin}, ${delta}, ${sz}) \\" - bin=$((${bin} + 1)) - psz=${sz} - sz=$((${sz} + ${sz})) - delta=$((${sz} - ${psz})) - done - # Quantum-multiple size classes. For each doubling of sz, as many as 4 - # size classes exist. Their spacing is the greater of: - # - q - # - sz/4, where sz is a power of 2 - while [ ${sz} -lt ${p} ] ; do - if [ ${sz} -ge $((${q} * 4)) ] ; then - i=$((${sz} / 4)) - else - i=${q} - fi - next_2pow=$((${sz} * 2)) - while [ ${sz} -lt $next_2pow ] ; do - echo " SIZE_CLASS(${bin}, ${delta}, ${sz}) \\" - bin=$((${bin} + 1)) - psz=${sz} - sz=$((${sz} + ${i})) - delta=$((${sz} - ${psz})) - done - done - echo - echo "#define NBINS ${bin}" - echo "#define SMALL_MAXCLASS ${psz}" - echo "#endif" - echo - lg_p=$((${lg_p} + 1)) - done - lg_t=$((${lg_t} + 1)) - done - lg_q=$((${lg_q} + 1)) -done - -cat <<EOF -#ifndef SIZE_CLASSES_DEFINED -# error "No size class definitions match configuration" -#endif -#undef SIZE_CLASSES_DEFINED -/* - * The small_size2bin lookup table uses uint8_t to encode each bin index, so we - * cannot support more than 256 small size classes. Further constrain NBINS to - * 255 to support prof_promote, since all small size classes, plus a "not - * small" size class must be stored in 8 bits of arena_chunk_map_t's bits - * field. - */ -#if (NBINS > 255) -# error "Too many small size classes" -#endif - -#endif /* JEMALLOC_H_TYPES */ -/******************************************************************************/ -#ifdef JEMALLOC_H_STRUCTS - - -#endif /* JEMALLOC_H_STRUCTS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_EXTERNS - - -#endif /* JEMALLOC_H_EXTERNS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_INLINES - - -#endif /* JEMALLOC_H_INLINES */ -/******************************************************************************/ -EOF diff --git a/extra/jemalloc/include/jemalloc/internal/stats.h b/extra/jemalloc/include/jemalloc/internal/stats.h deleted file mode 100644 index 27f68e3681c..00000000000 --- a/extra/jemalloc/include/jemalloc/internal/stats.h +++ /dev/null @@ -1,173 +0,0 @@ -/******************************************************************************/ -#ifdef JEMALLOC_H_TYPES - -typedef struct tcache_bin_stats_s tcache_bin_stats_t; -typedef struct malloc_bin_stats_s malloc_bin_stats_t; -typedef struct malloc_large_stats_s malloc_large_stats_t; -typedef struct arena_stats_s arena_stats_t; -typedef struct chunk_stats_s chunk_stats_t; - -#endif /* JEMALLOC_H_TYPES */ -/******************************************************************************/ -#ifdef JEMALLOC_H_STRUCTS - -struct tcache_bin_stats_s { - /* - * Number of allocation requests that corresponded to the size of this - * bin. - */ - uint64_t nrequests; -}; - -struct malloc_bin_stats_s { - /* - * Current number of bytes allocated, including objects currently - * cached by tcache. - */ - size_t allocated; - - /* - * Total number of allocation/deallocation requests served directly by - * the bin. Note that tcache may allocate an object, then recycle it - * many times, resulting many increments to nrequests, but only one - * each to nmalloc and ndalloc. - */ - uint64_t nmalloc; - uint64_t ndalloc; - - /* - * Number of allocation requests that correspond to the size of this - * bin. This includes requests served by tcache, though tcache only - * periodically merges into this counter. - */ - uint64_t nrequests; - - /* Number of tcache fills from this bin. */ - uint64_t nfills; - - /* Number of tcache flushes to this bin. */ - uint64_t nflushes; - - /* Total number of runs created for this bin's size class. */ - uint64_t nruns; - - /* - * Total number of runs reused by extracting them from the runs tree for - * this bin's size class. - */ - uint64_t reruns; - - /* Current number of runs in this bin. */ - size_t curruns; -}; - -struct malloc_large_stats_s { - /* - * Total number of allocation/deallocation requests served directly by - * the arena. Note that tcache may allocate an object, then recycle it - * many times, resulting many increments to nrequests, but only one - * each to nmalloc and ndalloc. - */ - uint64_t nmalloc; - uint64_t ndalloc; - - /* - * Number of allocation requests that correspond to this size class. - * This includes requests served by tcache, though tcache only - * periodically merges into this counter. - */ - uint64_t nrequests; - - /* Current number of runs of this size class. */ - size_t curruns; -}; - -struct arena_stats_s { - /* Number of bytes currently mapped. */ - size_t mapped; - - /* - * Total number of purge sweeps, total number of madvise calls made, - * and total pages purged in order to keep dirty unused memory under - * control. - */ - uint64_t npurge; - uint64_t nmadvise; - uint64_t purged; - - /* Per-size-category statistics. */ - size_t allocated_large; - uint64_t nmalloc_large; - uint64_t ndalloc_large; - uint64_t nrequests_large; - - /* - * One element for each possible size class, including sizes that - * overlap with bin size classes. This is necessary because ipalloc() - * sometimes has to use such large objects in order to assure proper - * alignment. - */ - malloc_large_stats_t *lstats; -}; - -struct chunk_stats_s { - /* Number of chunks that were allocated. */ - uint64_t nchunks; - - /* High-water mark for number of chunks allocated. */ - size_t highchunks; - - /* - * Current number of chunks allocated. This value isn't maintained for - * any other purpose, so keep track of it in order to be able to set - * highchunks. - */ - size_t curchunks; -}; - -#endif /* JEMALLOC_H_STRUCTS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_EXTERNS - -extern bool opt_stats_print; - -extern size_t stats_cactive; - -void stats_print(void (*write)(void *, const char *), void *cbopaque, - const char *opts); - -#endif /* JEMALLOC_H_EXTERNS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_INLINES - -#ifndef JEMALLOC_ENABLE_INLINE -size_t stats_cactive_get(void); -void stats_cactive_add(size_t size); -void stats_cactive_sub(size_t size); -#endif - -#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_STATS_C_)) -JEMALLOC_INLINE size_t -stats_cactive_get(void) -{ - - return (atomic_read_z(&stats_cactive)); -} - -JEMALLOC_INLINE void -stats_cactive_add(size_t size) -{ - - atomic_add_z(&stats_cactive, size); -} - -JEMALLOC_INLINE void -stats_cactive_sub(size_t size) -{ - - atomic_sub_z(&stats_cactive, size); -} -#endif - -#endif /* JEMALLOC_H_INLINES */ -/******************************************************************************/ diff --git a/extra/jemalloc/include/jemalloc/internal/tcache.h b/extra/jemalloc/include/jemalloc/internal/tcache.h deleted file mode 100644 index d4eecdee0dc..00000000000 --- a/extra/jemalloc/include/jemalloc/internal/tcache.h +++ /dev/null @@ -1,442 +0,0 @@ -/******************************************************************************/ -#ifdef JEMALLOC_H_TYPES - -typedef struct tcache_bin_info_s tcache_bin_info_t; -typedef struct tcache_bin_s tcache_bin_t; -typedef struct tcache_s tcache_t; - -/* - * tcache pointers close to NULL are used to encode state information that is - * used for two purposes: preventing thread caching on a per thread basis and - * cleaning up during thread shutdown. - */ -#define TCACHE_STATE_DISABLED ((tcache_t *)(uintptr_t)1) -#define TCACHE_STATE_REINCARNATED ((tcache_t *)(uintptr_t)2) -#define TCACHE_STATE_PURGATORY ((tcache_t *)(uintptr_t)3) -#define TCACHE_STATE_MAX TCACHE_STATE_PURGATORY - -/* - * Absolute maximum number of cache slots for each small bin in the thread - * cache. This is an additional constraint beyond that imposed as: twice the - * number of regions per run for this size class. - * - * This constant must be an even number. - */ -#define TCACHE_NSLOTS_SMALL_MAX 200 - -/* Number of cache slots for large size classes. */ -#define TCACHE_NSLOTS_LARGE 20 - -/* (1U << opt_lg_tcache_max) is used to compute tcache_maxclass. */ -#define LG_TCACHE_MAXCLASS_DEFAULT 15 - -/* - * TCACHE_GC_SWEEP is the approximate number of allocation events between - * full GC sweeps. Integer rounding may cause the actual number to be - * slightly higher, since GC is performed incrementally. - */ -#define TCACHE_GC_SWEEP 8192 - -/* Number of tcache allocation/deallocation events between incremental GCs. */ -#define TCACHE_GC_INCR \ - ((TCACHE_GC_SWEEP / NBINS) + ((TCACHE_GC_SWEEP / NBINS == 0) ? 0 : 1)) - -#endif /* JEMALLOC_H_TYPES */ -/******************************************************************************/ -#ifdef JEMALLOC_H_STRUCTS - -typedef enum { - tcache_enabled_false = 0, /* Enable cast to/from bool. */ - tcache_enabled_true = 1, - tcache_enabled_default = 2 -} tcache_enabled_t; - -/* - * Read-only information associated with each element of tcache_t's tbins array - * is stored separately, mainly to reduce memory usage. - */ -struct tcache_bin_info_s { - unsigned ncached_max; /* Upper limit on ncached. */ -}; - -struct tcache_bin_s { - tcache_bin_stats_t tstats; - int low_water; /* Min # cached since last GC. */ - unsigned lg_fill_div; /* Fill (ncached_max >> lg_fill_div). */ - unsigned ncached; /* # of cached objects. */ - void **avail; /* Stack of available objects. */ -}; - -struct tcache_s { - ql_elm(tcache_t) link; /* Used for aggregating stats. */ - uint64_t prof_accumbytes;/* Cleared after arena_prof_accum() */ - arena_t *arena; /* This thread's arena. */ - unsigned ev_cnt; /* Event count since incremental GC. */ - unsigned next_gc_bin; /* Next bin to GC. */ - tcache_bin_t tbins[1]; /* Dynamically sized. */ - /* - * The pointer stacks associated with tbins follow as a contiguous - * array. During tcache initialization, the avail pointer in each - * element of tbins is initialized to point to the proper offset within - * this array. - */ -}; - -#endif /* JEMALLOC_H_STRUCTS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_EXTERNS - -extern bool opt_tcache; -extern ssize_t opt_lg_tcache_max; - -extern tcache_bin_info_t *tcache_bin_info; - -/* - * Number of tcache bins. There are NBINS small-object bins, plus 0 or more - * large-object bins. - */ -extern size_t nhbins; - -/* Maximum cached size class. */ -extern size_t tcache_maxclass; - -size_t tcache_salloc(const void *ptr); -void tcache_event_hard(tcache_t *tcache); -void *tcache_alloc_small_hard(tcache_t *tcache, tcache_bin_t *tbin, - size_t binind); -void tcache_bin_flush_small(tcache_bin_t *tbin, size_t binind, unsigned rem, - tcache_t *tcache); -void tcache_bin_flush_large(tcache_bin_t *tbin, size_t binind, unsigned rem, - tcache_t *tcache); -void tcache_arena_associate(tcache_t *tcache, arena_t *arena); -void tcache_arena_dissociate(tcache_t *tcache); -tcache_t *tcache_create(arena_t *arena); -void tcache_destroy(tcache_t *tcache); -void tcache_thread_cleanup(void *arg); -void tcache_stats_merge(tcache_t *tcache, arena_t *arena); -bool tcache_boot0(void); -bool tcache_boot1(void); - -#endif /* JEMALLOC_H_EXTERNS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_INLINES - -#ifndef JEMALLOC_ENABLE_INLINE -malloc_tsd_protos(JEMALLOC_ATTR(unused), tcache, tcache_t *) -malloc_tsd_protos(JEMALLOC_ATTR(unused), tcache_enabled, tcache_enabled_t) - -void tcache_event(tcache_t *tcache); -void tcache_flush(void); -bool tcache_enabled_get(void); -tcache_t *tcache_get(bool create); -void tcache_enabled_set(bool enabled); -void *tcache_alloc_easy(tcache_bin_t *tbin); -void *tcache_alloc_small(tcache_t *tcache, size_t size, bool zero); -void *tcache_alloc_large(tcache_t *tcache, size_t size, bool zero); -void tcache_dalloc_small(tcache_t *tcache, void *ptr, size_t binind); -void tcache_dalloc_large(tcache_t *tcache, void *ptr, size_t size); -#endif - -#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_TCACHE_C_)) -/* Map of thread-specific caches. */ -malloc_tsd_externs(tcache, tcache_t *) -malloc_tsd_funcs(JEMALLOC_ALWAYS_INLINE, tcache, tcache_t *, NULL, - tcache_thread_cleanup) -/* Per thread flag that allows thread caches to be disabled. */ -malloc_tsd_externs(tcache_enabled, tcache_enabled_t) -malloc_tsd_funcs(JEMALLOC_ALWAYS_INLINE, tcache_enabled, tcache_enabled_t, - tcache_enabled_default, malloc_tsd_no_cleanup) - -JEMALLOC_INLINE void -tcache_flush(void) -{ - tcache_t *tcache; - - cassert(config_tcache); - - tcache = *tcache_tsd_get(); - if ((uintptr_t)tcache <= (uintptr_t)TCACHE_STATE_MAX) - return; - tcache_destroy(tcache); - tcache = NULL; - tcache_tsd_set(&tcache); -} - -JEMALLOC_INLINE bool -tcache_enabled_get(void) -{ - tcache_enabled_t tcache_enabled; - - cassert(config_tcache); - - tcache_enabled = *tcache_enabled_tsd_get(); - if (tcache_enabled == tcache_enabled_default) { - tcache_enabled = (tcache_enabled_t)opt_tcache; - tcache_enabled_tsd_set(&tcache_enabled); - } - - return ((bool)tcache_enabled); -} - -JEMALLOC_INLINE void -tcache_enabled_set(bool enabled) -{ - tcache_enabled_t tcache_enabled; - tcache_t *tcache; - - cassert(config_tcache); - - tcache_enabled = (tcache_enabled_t)enabled; - tcache_enabled_tsd_set(&tcache_enabled); - tcache = *tcache_tsd_get(); - if (enabled) { - if (tcache == TCACHE_STATE_DISABLED) { - tcache = NULL; - tcache_tsd_set(&tcache); - } - } else /* disabled */ { - if (tcache > TCACHE_STATE_MAX) { - tcache_destroy(tcache); - tcache = NULL; - } - if (tcache == NULL) { - tcache = TCACHE_STATE_DISABLED; - tcache_tsd_set(&tcache); - } - } -} - -JEMALLOC_ALWAYS_INLINE tcache_t * -tcache_get(bool create) -{ - tcache_t *tcache; - - if (config_tcache == false) - return (NULL); - if (config_lazy_lock && isthreaded == false) - return (NULL); - - tcache = *tcache_tsd_get(); - if ((uintptr_t)tcache <= (uintptr_t)TCACHE_STATE_MAX) { - if (tcache == TCACHE_STATE_DISABLED) - return (NULL); - if (tcache == NULL) { - if (create == false) { - /* - * Creating a tcache here would cause - * allocation as a side effect of free(). - * Ordinarily that would be okay since - * tcache_create() failure is a soft failure - * that doesn't propagate. However, if TLS - * data are freed via free() as in glibc, - * subtle corruption could result from setting - * a TLS variable after its backing memory is - * freed. - */ - return (NULL); - } - if (tcache_enabled_get() == false) { - tcache_enabled_set(false); /* Memoize. */ - return (NULL); - } - return (tcache_create(choose_arena(NULL))); - } - if (tcache == TCACHE_STATE_PURGATORY) { - /* - * Make a note that an allocator function was called - * after tcache_thread_cleanup() was called. - */ - tcache = TCACHE_STATE_REINCARNATED; - tcache_tsd_set(&tcache); - return (NULL); - } - if (tcache == TCACHE_STATE_REINCARNATED) - return (NULL); - not_reached(); - } - - return (tcache); -} - -JEMALLOC_ALWAYS_INLINE void -tcache_event(tcache_t *tcache) -{ - - if (TCACHE_GC_INCR == 0) - return; - - tcache->ev_cnt++; - assert(tcache->ev_cnt <= TCACHE_GC_INCR); - if (tcache->ev_cnt == TCACHE_GC_INCR) - tcache_event_hard(tcache); -} - -JEMALLOC_ALWAYS_INLINE void * -tcache_alloc_easy(tcache_bin_t *tbin) -{ - void *ret; - - if (tbin->ncached == 0) { - tbin->low_water = -1; - return (NULL); - } - tbin->ncached--; - if ((int)tbin->ncached < tbin->low_water) - tbin->low_water = tbin->ncached; - ret = tbin->avail[tbin->ncached]; - return (ret); -} - -JEMALLOC_ALWAYS_INLINE void * -tcache_alloc_small(tcache_t *tcache, size_t size, bool zero) -{ - void *ret; - size_t binind; - tcache_bin_t *tbin; - - binind = SMALL_SIZE2BIN(size); - assert(binind < NBINS); - tbin = &tcache->tbins[binind]; - ret = tcache_alloc_easy(tbin); - if (ret == NULL) { - ret = tcache_alloc_small_hard(tcache, tbin, binind); - if (ret == NULL) - return (NULL); - } - assert(tcache_salloc(ret) == arena_bin_info[binind].reg_size); - - if (zero == false) { - if (config_fill) { - if (opt_junk) { - arena_alloc_junk_small(ret, - &arena_bin_info[binind], false); - } else if (opt_zero) - memset(ret, 0, size); - } - VALGRIND_MAKE_MEM_UNDEFINED(ret, size); - } else { - if (config_fill && opt_junk) { - arena_alloc_junk_small(ret, &arena_bin_info[binind], - true); - } - VALGRIND_MAKE_MEM_UNDEFINED(ret, size); - memset(ret, 0, size); - } - - if (config_stats) - tbin->tstats.nrequests++; - if (config_prof) - tcache->prof_accumbytes += arena_bin_info[binind].reg_size; - tcache_event(tcache); - return (ret); -} - -JEMALLOC_ALWAYS_INLINE void * -tcache_alloc_large(tcache_t *tcache, size_t size, bool zero) -{ - void *ret; - size_t binind; - tcache_bin_t *tbin; - - size = PAGE_CEILING(size); - assert(size <= tcache_maxclass); - binind = NBINS + (size >> LG_PAGE) - 1; - assert(binind < nhbins); - tbin = &tcache->tbins[binind]; - ret = tcache_alloc_easy(tbin); - if (ret == NULL) { - /* - * Only allocate one large object at a time, because it's quite - * expensive to create one and not use it. - */ - ret = arena_malloc_large(tcache->arena, size, zero); - if (ret == NULL) - return (NULL); - } else { - if (config_prof && prof_promote && size == PAGE) { - arena_chunk_t *chunk = - (arena_chunk_t *)CHUNK_ADDR2BASE(ret); - size_t pageind = (((uintptr_t)ret - (uintptr_t)chunk) >> - LG_PAGE); - arena_mapbits_large_binind_set(chunk, pageind, - BININD_INVALID); - } - if (zero == false) { - if (config_fill) { - if (opt_junk) - memset(ret, 0xa5, size); - else if (opt_zero) - memset(ret, 0, size); - } - VALGRIND_MAKE_MEM_UNDEFINED(ret, size); - } else { - VALGRIND_MAKE_MEM_UNDEFINED(ret, size); - memset(ret, 0, size); - } - - if (config_stats) - tbin->tstats.nrequests++; - if (config_prof) - tcache->prof_accumbytes += size; - } - - tcache_event(tcache); - return (ret); -} - -JEMALLOC_ALWAYS_INLINE void -tcache_dalloc_small(tcache_t *tcache, void *ptr, size_t binind) -{ - tcache_bin_t *tbin; - tcache_bin_info_t *tbin_info; - - assert(tcache_salloc(ptr) <= SMALL_MAXCLASS); - - if (config_fill && opt_junk) - arena_dalloc_junk_small(ptr, &arena_bin_info[binind]); - - tbin = &tcache->tbins[binind]; - tbin_info = &tcache_bin_info[binind]; - if (tbin->ncached == tbin_info->ncached_max) { - tcache_bin_flush_small(tbin, binind, (tbin_info->ncached_max >> - 1), tcache); - } - assert(tbin->ncached < tbin_info->ncached_max); - tbin->avail[tbin->ncached] = ptr; - tbin->ncached++; - - tcache_event(tcache); -} - -JEMALLOC_ALWAYS_INLINE void -tcache_dalloc_large(tcache_t *tcache, void *ptr, size_t size) -{ - size_t binind; - tcache_bin_t *tbin; - tcache_bin_info_t *tbin_info; - - assert((size & PAGE_MASK) == 0); - assert(tcache_salloc(ptr) > SMALL_MAXCLASS); - assert(tcache_salloc(ptr) <= tcache_maxclass); - - binind = NBINS + (size >> LG_PAGE) - 1; - - if (config_fill && opt_junk) - memset(ptr, 0x5a, size); - - tbin = &tcache->tbins[binind]; - tbin_info = &tcache_bin_info[binind]; - if (tbin->ncached == tbin_info->ncached_max) { - tcache_bin_flush_large(tbin, binind, (tbin_info->ncached_max >> - 1), tcache); - } - assert(tbin->ncached < tbin_info->ncached_max); - tbin->avail[tbin->ncached] = ptr; - tbin->ncached++; - - tcache_event(tcache); -} -#endif - -#endif /* JEMALLOC_H_INLINES */ -/******************************************************************************/ diff --git a/extra/jemalloc/include/jemalloc/internal/tsd.h b/extra/jemalloc/include/jemalloc/internal/tsd.h deleted file mode 100644 index 0037cf35e70..00000000000 --- a/extra/jemalloc/include/jemalloc/internal/tsd.h +++ /dev/null @@ -1,397 +0,0 @@ -/******************************************************************************/ -#ifdef JEMALLOC_H_TYPES - -/* Maximum number of malloc_tsd users with cleanup functions. */ -#define MALLOC_TSD_CLEANUPS_MAX 8 - -typedef bool (*malloc_tsd_cleanup_t)(void); - -/* - * TLS/TSD-agnostic macro-based implementation of thread-specific data. There - * are four macros that support (at least) three use cases: file-private, - * library-private, and library-private inlined. Following is an example - * library-private tsd variable: - * - * In example.h: - * typedef struct { - * int x; - * int y; - * } example_t; - * #define EX_INITIALIZER JEMALLOC_CONCAT({0, 0}) - * malloc_tsd_protos(, example, example_t *) - * malloc_tsd_externs(example, example_t *) - * In example.c: - * malloc_tsd_data(, example, example_t *, EX_INITIALIZER) - * malloc_tsd_funcs(, example, example_t *, EX_INITIALIZER, - * example_tsd_cleanup) - * - * The result is a set of generated functions, e.g.: - * - * bool example_tsd_boot(void) {...} - * example_t **example_tsd_get() {...} - * void example_tsd_set(example_t **val) {...} - * - * Note that all of the functions deal in terms of (a_type *) rather than - * (a_type) so that it is possible to support non-pointer types (unlike - * pthreads TSD). example_tsd_cleanup() is passed an (a_type *) pointer that is - * cast to (void *). This means that the cleanup function needs to cast *and* - * dereference the function argument, e.g.: - * - * void - * example_tsd_cleanup(void *arg) - * { - * example_t *example = *(example_t **)arg; - * - * [...] - * if ([want the cleanup function to be called again]) { - * example_tsd_set(&example); - * } - * } - * - * If example_tsd_set() is called within example_tsd_cleanup(), it will be - * called again. This is similar to how pthreads TSD destruction works, except - * that pthreads only calls the cleanup function again if the value was set to - * non-NULL. - */ - -/* malloc_tsd_protos(). */ -#define malloc_tsd_protos(a_attr, a_name, a_type) \ -a_attr bool \ -a_name##_tsd_boot(void); \ -a_attr a_type * \ -a_name##_tsd_get(void); \ -a_attr void \ -a_name##_tsd_set(a_type *val); - -/* malloc_tsd_externs(). */ -#ifdef JEMALLOC_MALLOC_THREAD_CLEANUP -#define malloc_tsd_externs(a_name, a_type) \ -extern __thread a_type a_name##_tls; \ -extern __thread bool a_name##_initialized; \ -extern bool a_name##_booted; -#elif (defined(JEMALLOC_TLS)) -#define malloc_tsd_externs(a_name, a_type) \ -extern __thread a_type a_name##_tls; \ -extern pthread_key_t a_name##_tsd; \ -extern bool a_name##_booted; -#elif (defined(_WIN32)) -#define malloc_tsd_externs(a_name, a_type) \ -extern DWORD a_name##_tsd; \ -extern bool a_name##_booted; -#else -#define malloc_tsd_externs(a_name, a_type) \ -extern pthread_key_t a_name##_tsd; \ -extern bool a_name##_booted; -#endif - -/* malloc_tsd_data(). */ -#ifdef JEMALLOC_MALLOC_THREAD_CLEANUP -#define malloc_tsd_data(a_attr, a_name, a_type, a_initializer) \ -a_attr __thread a_type JEMALLOC_TLS_MODEL \ - a_name##_tls = a_initializer; \ -a_attr __thread bool JEMALLOC_TLS_MODEL \ - a_name##_initialized = false; \ -a_attr bool a_name##_booted = false; -#elif (defined(JEMALLOC_TLS)) -#define malloc_tsd_data(a_attr, a_name, a_type, a_initializer) \ -a_attr __thread a_type JEMALLOC_TLS_MODEL \ - a_name##_tls = a_initializer; \ -a_attr pthread_key_t a_name##_tsd; \ -a_attr bool a_name##_booted = false; -#elif (defined(_WIN32)) -#define malloc_tsd_data(a_attr, a_name, a_type, a_initializer) \ -a_attr DWORD a_name##_tsd; \ -a_attr bool a_name##_booted = false; -#else -#define malloc_tsd_data(a_attr, a_name, a_type, a_initializer) \ -a_attr pthread_key_t a_name##_tsd; \ -a_attr bool a_name##_booted = false; -#endif - -/* malloc_tsd_funcs(). */ -#ifdef JEMALLOC_MALLOC_THREAD_CLEANUP -#define malloc_tsd_funcs(a_attr, a_name, a_type, a_initializer, \ - a_cleanup) \ -/* Initialization/cleanup. */ \ -a_attr bool \ -a_name##_tsd_cleanup_wrapper(void) \ -{ \ - \ - if (a_name##_initialized) { \ - a_name##_initialized = false; \ - a_cleanup(&a_name##_tls); \ - } \ - return (a_name##_initialized); \ -} \ -a_attr bool \ -a_name##_tsd_boot(void) \ -{ \ - \ - if (a_cleanup != malloc_tsd_no_cleanup) { \ - malloc_tsd_cleanup_register( \ - &a_name##_tsd_cleanup_wrapper); \ - } \ - a_name##_booted = true; \ - return (false); \ -} \ -/* Get/set. */ \ -a_attr a_type * \ -a_name##_tsd_get(void) \ -{ \ - \ - assert(a_name##_booted); \ - return (&a_name##_tls); \ -} \ -a_attr void \ -a_name##_tsd_set(a_type *val) \ -{ \ - \ - assert(a_name##_booted); \ - a_name##_tls = (*val); \ - if (a_cleanup != malloc_tsd_no_cleanup) \ - a_name##_initialized = true; \ -} -#elif (defined(JEMALLOC_TLS)) -#define malloc_tsd_funcs(a_attr, a_name, a_type, a_initializer, \ - a_cleanup) \ -/* Initialization/cleanup. */ \ -a_attr bool \ -a_name##_tsd_boot(void) \ -{ \ - \ - if (a_cleanup != malloc_tsd_no_cleanup) { \ - if (pthread_key_create(&a_name##_tsd, a_cleanup) != 0) \ - return (true); \ - } \ - a_name##_booted = true; \ - return (false); \ -} \ -/* Get/set. */ \ -a_attr a_type * \ -a_name##_tsd_get(void) \ -{ \ - \ - assert(a_name##_booted); \ - return (&a_name##_tls); \ -} \ -a_attr void \ -a_name##_tsd_set(a_type *val) \ -{ \ - \ - assert(a_name##_booted); \ - a_name##_tls = (*val); \ - if (a_cleanup != malloc_tsd_no_cleanup) { \ - if (pthread_setspecific(a_name##_tsd, \ - (void *)(&a_name##_tls))) { \ - malloc_write("<jemalloc>: Error" \ - " setting TSD for "#a_name"\n"); \ - if (opt_abort) \ - abort(); \ - } \ - } \ -} -#elif (defined(_WIN32)) -#define malloc_tsd_funcs(a_attr, a_name, a_type, a_initializer, \ - a_cleanup) \ -/* Data structure. */ \ -typedef struct { \ - bool initialized; \ - a_type val; \ -} a_name##_tsd_wrapper_t; \ -/* Initialization/cleanup. */ \ -a_attr bool \ -a_name##_tsd_cleanup_wrapper(void) \ -{ \ - a_name##_tsd_wrapper_t *wrapper; \ - \ - wrapper = (a_name##_tsd_wrapper_t *) TlsGetValue(a_name##_tsd); \ - if (wrapper == NULL) \ - return (false); \ - if (a_cleanup != malloc_tsd_no_cleanup && \ - wrapper->initialized) { \ - a_type val = wrapper->val; \ - a_type tsd_static_data = a_initializer; \ - wrapper->initialized = false; \ - wrapper->val = tsd_static_data; \ - a_cleanup(&val); \ - if (wrapper->initialized) { \ - /* Trigger another cleanup round. */ \ - return (true); \ - } \ - } \ - malloc_tsd_dalloc(wrapper); \ - return (false); \ -} \ -a_attr bool \ -a_name##_tsd_boot(void) \ -{ \ - \ - a_name##_tsd = TlsAlloc(); \ - if (a_name##_tsd == TLS_OUT_OF_INDEXES) \ - return (true); \ - if (a_cleanup != malloc_tsd_no_cleanup) { \ - malloc_tsd_cleanup_register( \ - &a_name##_tsd_cleanup_wrapper); \ - } \ - a_name##_booted = true; \ - return (false); \ -} \ -/* Get/set. */ \ -a_attr a_name##_tsd_wrapper_t * \ -a_name##_tsd_get_wrapper(void) \ -{ \ - a_name##_tsd_wrapper_t *wrapper = (a_name##_tsd_wrapper_t *) \ - TlsGetValue(a_name##_tsd); \ - \ - if (wrapper == NULL) { \ - wrapper = (a_name##_tsd_wrapper_t *) \ - malloc_tsd_malloc(sizeof(a_name##_tsd_wrapper_t)); \ - if (wrapper == NULL) { \ - malloc_write("<jemalloc>: Error allocating" \ - " TSD for "#a_name"\n"); \ - abort(); \ - } else { \ - static a_type tsd_static_data = a_initializer; \ - wrapper->initialized = false; \ - wrapper->val = tsd_static_data; \ - } \ - if (!TlsSetValue(a_name##_tsd, (void *)wrapper)) { \ - malloc_write("<jemalloc>: Error setting" \ - " TSD for "#a_name"\n"); \ - abort(); \ - } \ - } \ - return (wrapper); \ -} \ -a_attr a_type * \ -a_name##_tsd_get(void) \ -{ \ - a_name##_tsd_wrapper_t *wrapper; \ - \ - assert(a_name##_booted); \ - wrapper = a_name##_tsd_get_wrapper(); \ - return (&wrapper->val); \ -} \ -a_attr void \ -a_name##_tsd_set(a_type *val) \ -{ \ - a_name##_tsd_wrapper_t *wrapper; \ - \ - assert(a_name##_booted); \ - wrapper = a_name##_tsd_get_wrapper(); \ - wrapper->val = *(val); \ - if (a_cleanup != malloc_tsd_no_cleanup) \ - wrapper->initialized = true; \ -} -#else -#define malloc_tsd_funcs(a_attr, a_name, a_type, a_initializer, \ - a_cleanup) \ -/* Data structure. */ \ -typedef struct { \ - bool initialized; \ - a_type val; \ -} a_name##_tsd_wrapper_t; \ -/* Initialization/cleanup. */ \ -a_attr void \ -a_name##_tsd_cleanup_wrapper(void *arg) \ -{ \ - a_name##_tsd_wrapper_t *wrapper = (a_name##_tsd_wrapper_t *)arg;\ - \ - if (a_cleanup != malloc_tsd_no_cleanup && \ - wrapper->initialized) { \ - wrapper->initialized = false; \ - a_cleanup(&wrapper->val); \ - if (wrapper->initialized) { \ - /* Trigger another cleanup round. */ \ - if (pthread_setspecific(a_name##_tsd, \ - (void *)wrapper)) { \ - malloc_write("<jemalloc>: Error" \ - " setting TSD for "#a_name"\n"); \ - if (opt_abort) \ - abort(); \ - } \ - return; \ - } \ - } \ - malloc_tsd_dalloc(wrapper); \ -} \ -a_attr bool \ -a_name##_tsd_boot(void) \ -{ \ - \ - if (pthread_key_create(&a_name##_tsd, \ - a_name##_tsd_cleanup_wrapper) != 0) \ - return (true); \ - a_name##_booted = true; \ - return (false); \ -} \ -/* Get/set. */ \ -a_attr a_name##_tsd_wrapper_t * \ -a_name##_tsd_get_wrapper(void) \ -{ \ - a_name##_tsd_wrapper_t *wrapper = (a_name##_tsd_wrapper_t *) \ - pthread_getspecific(a_name##_tsd); \ - \ - if (wrapper == NULL) { \ - wrapper = (a_name##_tsd_wrapper_t *) \ - malloc_tsd_malloc(sizeof(a_name##_tsd_wrapper_t)); \ - if (wrapper == NULL) { \ - malloc_write("<jemalloc>: Error allocating" \ - " TSD for "#a_name"\n"); \ - abort(); \ - } else { \ - static a_type tsd_static_data = a_initializer; \ - wrapper->initialized = false; \ - wrapper->val = tsd_static_data; \ - } \ - if (pthread_setspecific(a_name##_tsd, \ - (void *)wrapper)) { \ - malloc_write("<jemalloc>: Error setting" \ - " TSD for "#a_name"\n"); \ - abort(); \ - } \ - } \ - return (wrapper); \ -} \ -a_attr a_type * \ -a_name##_tsd_get(void) \ -{ \ - a_name##_tsd_wrapper_t *wrapper; \ - \ - assert(a_name##_booted); \ - wrapper = a_name##_tsd_get_wrapper(); \ - return (&wrapper->val); \ -} \ -a_attr void \ -a_name##_tsd_set(a_type *val) \ -{ \ - a_name##_tsd_wrapper_t *wrapper; \ - \ - assert(a_name##_booted); \ - wrapper = a_name##_tsd_get_wrapper(); \ - wrapper->val = *(val); \ - if (a_cleanup != malloc_tsd_no_cleanup) \ - wrapper->initialized = true; \ -} -#endif - -#endif /* JEMALLOC_H_TYPES */ -/******************************************************************************/ -#ifdef JEMALLOC_H_STRUCTS - -#endif /* JEMALLOC_H_STRUCTS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_EXTERNS - -void *malloc_tsd_malloc(size_t size); -void malloc_tsd_dalloc(void *wrapper); -void malloc_tsd_no_cleanup(void *); -void malloc_tsd_cleanup_register(bool (*f)(void)); -void malloc_tsd_boot(void); - -#endif /* JEMALLOC_H_EXTERNS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_INLINES - -#endif /* JEMALLOC_H_INLINES */ -/******************************************************************************/ diff --git a/extra/jemalloc/include/jemalloc/internal/util.h b/extra/jemalloc/include/jemalloc/internal/util.h deleted file mode 100644 index 8479693631a..00000000000 --- a/extra/jemalloc/include/jemalloc/internal/util.h +++ /dev/null @@ -1,160 +0,0 @@ -/******************************************************************************/ -#ifdef JEMALLOC_H_TYPES - -/* Size of stack-allocated buffer passed to buferror(). */ -#define BUFERROR_BUF 64 - -/* - * Size of stack-allocated buffer used by malloc_{,v,vc}printf(). This must be - * large enough for all possible uses within jemalloc. - */ -#define MALLOC_PRINTF_BUFSIZE 4096 - -/* - * Wrap a cpp argument that contains commas such that it isn't broken up into - * multiple arguments. - */ -#define JEMALLOC_CONCAT(...) __VA_ARGS__ - -/* - * Silence compiler warnings due to uninitialized values. This is used - * wherever the compiler fails to recognize that the variable is never used - * uninitialized. - */ -#ifdef JEMALLOC_CC_SILENCE -# define JEMALLOC_CC_SILENCE_INIT(v) = v -#else -# define JEMALLOC_CC_SILENCE_INIT(v) -#endif - -/* - * Define a custom assert() in order to reduce the chances of deadlock during - * assertion failure. - */ -#ifndef assert -#define assert(e) do { \ - if (config_debug && !(e)) { \ - malloc_printf( \ - "<jemalloc>: %s:%d: Failed assertion: \"%s\"\n", \ - __FILE__, __LINE__, #e); \ - abort(); \ - } \ -} while (0) -#endif - -/* Use to assert a particular configuration, e.g., cassert(config_debug). */ -#define cassert(c) do { \ - if ((c) == false) \ - assert(false); \ -} while (0) - -#ifndef not_reached -#define not_reached() do { \ - if (config_debug) { \ - malloc_printf( \ - "<jemalloc>: %s:%d: Unreachable code reached\n", \ - __FILE__, __LINE__); \ - abort(); \ - } \ -} while (0) -#endif - -#ifndef not_implemented -#define not_implemented() do { \ - if (config_debug) { \ - malloc_printf("<jemalloc>: %s:%d: Not implemented\n", \ - __FILE__, __LINE__); \ - abort(); \ - } \ -} while (0) -#endif - -#define assert_not_implemented(e) do { \ - if (config_debug && !(e)) \ - not_implemented(); \ -} while (0) - -#endif /* JEMALLOC_H_TYPES */ -/******************************************************************************/ -#ifdef JEMALLOC_H_STRUCTS - -#endif /* JEMALLOC_H_STRUCTS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_EXTERNS - -int buferror(char *buf, size_t buflen); -uintmax_t malloc_strtoumax(const char *nptr, char **endptr, int base); -void malloc_write(const char *s); - -/* - * malloc_vsnprintf() supports a subset of snprintf(3) that avoids floating - * point math. - */ -int malloc_vsnprintf(char *str, size_t size, const char *format, - va_list ap); -int malloc_snprintf(char *str, size_t size, const char *format, ...) - JEMALLOC_ATTR(format(printf, 3, 4)); -void malloc_vcprintf(void (*write_cb)(void *, const char *), void *cbopaque, - const char *format, va_list ap); -void malloc_cprintf(void (*write)(void *, const char *), void *cbopaque, - const char *format, ...) JEMALLOC_ATTR(format(printf, 3, 4)); -void malloc_printf(const char *format, ...) - JEMALLOC_ATTR(format(printf, 1, 2)); - -#endif /* JEMALLOC_H_EXTERNS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_INLINES - -#ifndef JEMALLOC_ENABLE_INLINE -size_t pow2_ceil(size_t x); -void malloc_write(const char *s); -void set_errno(int errnum); -int get_errno(void); -#endif - -#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_UTIL_C_)) -/* Compute the smallest power of 2 that is >= x. */ -JEMALLOC_INLINE size_t -pow2_ceil(size_t x) -{ - - x--; - x |= x >> 1; - x |= x >> 2; - x |= x >> 4; - x |= x >> 8; - x |= x >> 16; -#if (LG_SIZEOF_PTR == 3) - x |= x >> 32; -#endif - x++; - return (x); -} - -/* Sets error code */ -JEMALLOC_INLINE void -set_errno(int errnum) -{ - -#ifdef _WIN32 - SetLastError(errnum); -#else - errno = errnum; -#endif -} - -/* Get last error code */ -JEMALLOC_INLINE int -get_errno(void) -{ - -#ifdef _WIN32 - return (GetLastError()); -#else - return (errno); -#endif -} -#endif - -#endif /* JEMALLOC_H_INLINES */ -/******************************************************************************/ diff --git a/extra/jemalloc/include/jemalloc/jemalloc.h.in b/extra/jemalloc/include/jemalloc/jemalloc.h.in deleted file mode 100644 index 31b1304a20a..00000000000 --- a/extra/jemalloc/include/jemalloc/jemalloc.h.in +++ /dev/null @@ -1,157 +0,0 @@ -#ifndef JEMALLOC_H_ -#define JEMALLOC_H_ -#ifdef __cplusplus -extern "C" { -#endif - -#include <limits.h> -#include <strings.h> - -#define JEMALLOC_VERSION "@jemalloc_version@" -#define JEMALLOC_VERSION_MAJOR @jemalloc_version_major@ -#define JEMALLOC_VERSION_MINOR @jemalloc_version_minor@ -#define JEMALLOC_VERSION_BUGFIX @jemalloc_version_bugfix@ -#define JEMALLOC_VERSION_NREV @jemalloc_version_nrev@ -#define JEMALLOC_VERSION_GID "@jemalloc_version_gid@" - -#include "jemalloc_defs@install_suffix@.h" - -#ifdef JEMALLOC_EXPERIMENTAL -#define ALLOCM_LG_ALIGN(la) (la) -#if LG_SIZEOF_PTR == 2 -#define ALLOCM_ALIGN(a) (ffs(a)-1) -#else -#define ALLOCM_ALIGN(a) ((a < (size_t)INT_MAX) ? ffs(a)-1 : ffs(a>>32)+31) -#endif -#define ALLOCM_ZERO ((int)0x40) -#define ALLOCM_NO_MOVE ((int)0x80) -/* Bias arena index bits so that 0 encodes "ALLOCM_ARENA() unspecified". */ -#define ALLOCM_ARENA(a) ((int)(((a)+1) << 8)) - -#define ALLOCM_SUCCESS 0 -#define ALLOCM_ERR_OOM 1 -#define ALLOCM_ERR_NOT_MOVED 2 -#endif - -/* - * The je_ prefix on the following public symbol declarations is an artifact of - * namespace management, and should be omitted in application code unless - * JEMALLOC_NO_DEMANGLE is defined (see below). - */ -extern JEMALLOC_EXPORT const char *je_malloc_conf; -extern JEMALLOC_EXPORT void (*je_malloc_message)(void *cbopaque, - const char *s); - -JEMALLOC_EXPORT void *je_malloc(size_t size) JEMALLOC_ATTR(malloc); -JEMALLOC_EXPORT void *je_calloc(size_t num, size_t size) - JEMALLOC_ATTR(malloc); -JEMALLOC_EXPORT int je_posix_memalign(void **memptr, size_t alignment, - size_t size) JEMALLOC_ATTR(nonnull(1)); -JEMALLOC_EXPORT void *je_aligned_alloc(size_t alignment, size_t size) - JEMALLOC_ATTR(malloc); -JEMALLOC_EXPORT void *je_realloc(void *ptr, size_t size); -JEMALLOC_EXPORT void je_free(void *ptr); - -#ifdef JEMALLOC_OVERRIDE_MEMALIGN -JEMALLOC_EXPORT void * je_memalign(size_t alignment, size_t size) - JEMALLOC_ATTR(malloc); -#endif - -#ifdef JEMALLOC_OVERRIDE_VALLOC -JEMALLOC_EXPORT void * je_valloc(size_t size) JEMALLOC_ATTR(malloc); -#endif - -JEMALLOC_EXPORT size_t je_malloc_usable_size( - JEMALLOC_USABLE_SIZE_CONST void *ptr); -JEMALLOC_EXPORT void je_malloc_stats_print(void (*write_cb)(void *, - const char *), void *je_cbopaque, const char *opts); -JEMALLOC_EXPORT int je_mallctl(const char *name, void *oldp, - size_t *oldlenp, void *newp, size_t newlen); -JEMALLOC_EXPORT int je_mallctlnametomib(const char *name, size_t *mibp, - size_t *miblenp); -JEMALLOC_EXPORT int je_mallctlbymib(const size_t *mib, size_t miblen, - void *oldp, size_t *oldlenp, void *newp, size_t newlen); - -#ifdef JEMALLOC_EXPERIMENTAL -JEMALLOC_EXPORT int je_allocm(void **ptr, size_t *rsize, size_t size, - int flags) JEMALLOC_ATTR(nonnull(1)); -JEMALLOC_EXPORT int je_rallocm(void **ptr, size_t *rsize, size_t size, - size_t extra, int flags) JEMALLOC_ATTR(nonnull(1)); -JEMALLOC_EXPORT int je_sallocm(const void *ptr, size_t *rsize, int flags) - JEMALLOC_ATTR(nonnull(1)); -JEMALLOC_EXPORT int je_dallocm(void *ptr, int flags) - JEMALLOC_ATTR(nonnull(1)); -JEMALLOC_EXPORT int je_nallocm(size_t *rsize, size_t size, int flags); -#endif - -/* - * By default application code must explicitly refer to mangled symbol names, - * so that it is possible to use jemalloc in conjunction with another allocator - * in the same application. Define JEMALLOC_MANGLE in order to cause automatic - * name mangling that matches the API prefixing that happened as a result of - * --with-mangling and/or --with-jemalloc-prefix configuration settings. - */ -#ifdef JEMALLOC_MANGLE -#ifndef JEMALLOC_NO_DEMANGLE -#define JEMALLOC_NO_DEMANGLE -#endif -#define malloc_conf je_malloc_conf -#define malloc_message je_malloc_message -#define malloc je_malloc -#define calloc je_calloc -#define posix_memalign je_posix_memalign -#define aligned_alloc je_aligned_alloc -#define realloc je_realloc -#define free je_free -#define malloc_usable_size je_malloc_usable_size -#define malloc_stats_print je_malloc_stats_print -#define mallctl je_mallctl -#define mallctlnametomib je_mallctlnametomib -#define mallctlbymib je_mallctlbymib -#define memalign je_memalign -#define valloc je_valloc -#ifdef JEMALLOC_EXPERIMENTAL -#define allocm je_allocm -#define rallocm je_rallocm -#define sallocm je_sallocm -#define dallocm je_dallocm -#define nallocm je_nallocm -#endif -#endif - -/* - * The je_* macros can be used as stable alternative names for the public - * jemalloc API if JEMALLOC_NO_DEMANGLE is defined. This is primarily meant - * for use in jemalloc itself, but it can be used by application code to - * provide isolation from the name mangling specified via --with-mangling - * and/or --with-jemalloc-prefix. - */ -#ifndef JEMALLOC_NO_DEMANGLE -#undef je_malloc_conf -#undef je_malloc_message -#undef je_malloc -#undef je_calloc -#undef je_posix_memalign -#undef je_aligned_alloc -#undef je_realloc -#undef je_free -#undef je_malloc_usable_size -#undef je_malloc_stats_print -#undef je_mallctl -#undef je_mallctlnametomib -#undef je_mallctlbymib -#undef je_memalign -#undef je_valloc -#ifdef JEMALLOC_EXPERIMENTAL -#undef je_allocm -#undef je_rallocm -#undef je_sallocm -#undef je_dallocm -#undef je_nallocm -#endif -#endif - -#ifdef __cplusplus -}; -#endif -#endif /* JEMALLOC_H_ */ diff --git a/extra/jemalloc/include/jemalloc/jemalloc_defs.h.in b/extra/jemalloc/include/jemalloc/jemalloc_defs.h.in deleted file mode 100644 index 3fcf93ce5d2..00000000000 --- a/extra/jemalloc/include/jemalloc/jemalloc_defs.h.in +++ /dev/null @@ -1,267 +0,0 @@ -/* - * If JEMALLOC_PREFIX is defined via --with-jemalloc-prefix, it will cause all - * public APIs to be prefixed. This makes it possible, with some care, to use - * multiple allocators simultaneously. - */ -#undef JEMALLOC_PREFIX -#undef JEMALLOC_CPREFIX - -/* - * Name mangling for public symbols is controlled by --with-mangling and - * --with-jemalloc-prefix. With default settings the je_ prefix is stripped by - * these macro definitions. - */ -#undef je_malloc_conf -#undef je_malloc_message -#undef je_malloc -#undef je_calloc -#undef je_posix_memalign -#undef je_aligned_alloc -#undef je_realloc -#undef je_free -#undef je_malloc_usable_size -#undef je_malloc_stats_print -#undef je_mallctl -#undef je_mallctlnametomib -#undef je_mallctlbymib -#undef je_memalign -#undef je_valloc -#undef je_allocm -#undef je_rallocm -#undef je_sallocm -#undef je_dallocm -#undef je_nallocm - -/* - * JEMALLOC_PRIVATE_NAMESPACE is used as a prefix for all library-private APIs. - * For shared libraries, symbol visibility mechanisms prevent these symbols - * from being exported, but for static libraries, naming collisions are a real - * possibility. - */ -#undef JEMALLOC_PRIVATE_NAMESPACE -#undef JEMALLOC_N - -/* - * Hyper-threaded CPUs may need a special instruction inside spin loops in - * order to yield to another virtual CPU. - */ -#undef CPU_SPINWAIT - -/* Defined if the equivalent of FreeBSD's atomic(9) functions are available. */ -#undef JEMALLOC_ATOMIC9 - -/* - * Defined if OSAtomic*() functions are available, as provided by Darwin, and - * documented in the atomic(3) manual page. - */ -#undef JEMALLOC_OSATOMIC - -/* - * Defined if __sync_add_and_fetch(uint32_t *, uint32_t) and - * __sync_sub_and_fetch(uint32_t *, uint32_t) are available, despite - * __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4 not being defined (which means the - * functions are defined in libgcc instead of being inlines) - */ -#undef JE_FORCE_SYNC_COMPARE_AND_SWAP_4 - -/* - * Defined if __sync_add_and_fetch(uint64_t *, uint64_t) and - * __sync_sub_and_fetch(uint64_t *, uint64_t) are available, despite - * __GCC_HAVE_SYNC_COMPARE_AND_SWAP_8 not being defined (which means the - * functions are defined in libgcc instead of being inlines) - */ -#undef JE_FORCE_SYNC_COMPARE_AND_SWAP_8 - -/* - * Defined if OSSpin*() functions are available, as provided by Darwin, and - * documented in the spinlock(3) manual page. - */ -#undef JEMALLOC_OSSPIN - -/* - * Defined if _malloc_thread_cleanup() exists. At least in the case of - * FreeBSD, pthread_key_create() allocates, which if used during malloc - * bootstrapping will cause recursion into the pthreads library. Therefore, if - * _malloc_thread_cleanup() exists, use it as the basis for thread cleanup in - * malloc_tsd. - */ -#undef JEMALLOC_MALLOC_THREAD_CLEANUP - -/* - * Defined if threaded initialization is known to be safe on this platform. - * Among other things, it must be possible to initialize a mutex without - * triggering allocation in order for threaded allocation to be safe. - */ -#undef JEMALLOC_THREADED_INIT - -/* - * Defined if the pthreads implementation defines - * _pthread_mutex_init_calloc_cb(), in which case the function is used in order - * to avoid recursive allocation during mutex initialization. - */ -#undef JEMALLOC_MUTEX_INIT_CB - -/* Defined if __attribute__((...)) syntax is supported. */ -#undef JEMALLOC_HAVE_ATTR -#ifdef JEMALLOC_HAVE_ATTR -# define JEMALLOC_ATTR(s) __attribute__((s)) -# define JEMALLOC_EXPORT JEMALLOC_ATTR(visibility("default")) -# define JEMALLOC_ALIGNED(s) JEMALLOC_ATTR(aligned(s)) -# define JEMALLOC_SECTION(s) JEMALLOC_ATTR(section(s)) -# define JEMALLOC_NOINLINE JEMALLOC_ATTR(noinline) -#elif _MSC_VER -# define JEMALLOC_ATTR(s) -# ifdef DLLEXPORT -# define JEMALLOC_EXPORT __declspec(dllexport) -# else -# define JEMALLOC_EXPORT __declspec(dllimport) -# endif -# define JEMALLOC_ALIGNED(s) __declspec(align(s)) -# define JEMALLOC_SECTION(s) __declspec(allocate(s)) -# define JEMALLOC_NOINLINE __declspec(noinline) -#else -# define JEMALLOC_ATTR(s) -# define JEMALLOC_EXPORT -# define JEMALLOC_ALIGNED(s) -# define JEMALLOC_SECTION(s) -# define JEMALLOC_NOINLINE -#endif - -/* Defined if sbrk() is supported. */ -#undef JEMALLOC_HAVE_SBRK - -/* Non-empty if the tls_model attribute is supported. */ -#undef JEMALLOC_TLS_MODEL - -/* JEMALLOC_CC_SILENCE enables code that silences unuseful compiler warnings. */ -#undef JEMALLOC_CC_SILENCE - -/* - * JEMALLOC_DEBUG enables assertions and other sanity checks, and disables - * inline functions. - */ -#undef JEMALLOC_DEBUG - -/* JEMALLOC_STATS enables statistics calculation. */ -#undef JEMALLOC_STATS - -/* JEMALLOC_PROF enables allocation profiling. */ -#undef JEMALLOC_PROF - -/* Use libunwind for profile backtracing if defined. */ -#undef JEMALLOC_PROF_LIBUNWIND - -/* Use libgcc for profile backtracing if defined. */ -#undef JEMALLOC_PROF_LIBGCC - -/* Use gcc intrinsics for profile backtracing if defined. */ -#undef JEMALLOC_PROF_GCC - -/* - * JEMALLOC_TCACHE enables a thread-specific caching layer for small objects. - * This makes it possible to allocate/deallocate objects without any locking - * when the cache is in the steady state. - */ -#undef JEMALLOC_TCACHE - -/* - * JEMALLOC_DSS enables use of sbrk(2) to allocate chunks from the data storage - * segment (DSS). - */ -#undef JEMALLOC_DSS - -/* Support memory filling (junk/zero/quarantine/redzone). */ -#undef JEMALLOC_FILL - -/* Support the experimental API. */ -#undef JEMALLOC_EXPERIMENTAL - -/* Support utrace(2)-based tracing. */ -#undef JEMALLOC_UTRACE - -/* Support Valgrind. */ -#undef JEMALLOC_VALGRIND - -/* Support optional abort() on OOM. */ -#undef JEMALLOC_XMALLOC - -/* Support lazy locking (avoid locking unless a second thread is launched). */ -#undef JEMALLOC_LAZY_LOCK - -/* One page is 2^STATIC_PAGE_SHIFT bytes. */ -#undef STATIC_PAGE_SHIFT - -/* - * If defined, use munmap() to unmap freed chunks, rather than storing them for - * later reuse. This is disabled by default on Linux because common sequences - * of mmap()/munmap() calls will cause virtual memory map holes. - */ -#undef JEMALLOC_MUNMAP - -/* - * If defined, use mremap(...MREMAP_FIXED...) for huge realloc(). This is - * disabled by default because it is Linux-specific and it will cause virtual - * memory map holes, much like munmap(2) does. - */ -#undef JEMALLOC_MREMAP - -/* TLS is used to map arenas and magazine caches to threads. */ -#undef JEMALLOC_TLS - -/* - * JEMALLOC_IVSALLOC enables ivsalloc(), which verifies that pointers reside - * within jemalloc-owned chunks before dereferencing them. - */ -#undef JEMALLOC_IVSALLOC - -/* - * Define overrides for non-standard allocator-related functions if they - * are present on the system. - */ -#undef JEMALLOC_OVERRIDE_MEMALIGN -#undef JEMALLOC_OVERRIDE_VALLOC - -/* - * At least Linux omits the "const" in: - * - * size_t malloc_usable_size(const void *ptr); - * - * Match the operating system's prototype. - */ -#undef JEMALLOC_USABLE_SIZE_CONST - -/* - * Darwin (OS X) uses zones to work around Mach-O symbol override shortcomings. - */ -#undef JEMALLOC_ZONE -#undef JEMALLOC_ZONE_VERSION - -/* - * Methods for purging unused pages differ between operating systems. - * - * madvise(..., MADV_DONTNEED) : On Linux, this immediately discards pages, - * such that new pages will be demand-zeroed if - * the address region is later touched. - * madvise(..., MADV_FREE) : On FreeBSD and Darwin, this marks pages as being - * unused, such that they will be discarded rather - * than swapped out. - */ -#undef JEMALLOC_PURGE_MADVISE_DONTNEED -#undef JEMALLOC_PURGE_MADVISE_FREE - -/* - * Define if operating system has alloca.h header. - */ -#undef JEMALLOC_HAS_ALLOCA_H - -/* sizeof(void *) == 2^LG_SIZEOF_PTR. */ -#undef LG_SIZEOF_PTR - -/* sizeof(int) == 2^LG_SIZEOF_INT. */ -#undef LG_SIZEOF_INT - -/* sizeof(long) == 2^LG_SIZEOF_LONG. */ -#undef LG_SIZEOF_LONG - -/* sizeof(intmax_t) == 2^LG_SIZEOF_INTMAX_T. */ -#undef LG_SIZEOF_INTMAX_T diff --git a/extra/jemalloc/include/msvc_compat/inttypes.h b/extra/jemalloc/include/msvc_compat/inttypes.h deleted file mode 100644 index a4e6b75cb91..00000000000 --- a/extra/jemalloc/include/msvc_compat/inttypes.h +++ /dev/null @@ -1,313 +0,0 @@ -// ISO C9x compliant inttypes.h for Microsoft Visual Studio -// Based on ISO/IEC 9899:TC2 Committee draft (May 6, 2005) WG14/N1124 -// -// Copyright (c) 2006 Alexander Chemeris -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// -// 1. Redistributions of source code must retain the above copyright notice, -// this list of conditions and the following disclaimer. -// -// 2. Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// -// 3. The name of the author may be used to endorse or promote products -// derived from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED -// WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF -// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO -// EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; -// OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, -// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR -// OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF -// ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// -/////////////////////////////////////////////////////////////////////////////// - -#ifndef _MSC_VER // [ -#error "Use this header only with Microsoft Visual C++ compilers!" -#endif // _MSC_VER ] - -#ifndef _MSC_INTTYPES_H_ // [ -#define _MSC_INTTYPES_H_ - -#if _MSC_VER > 1000 -#pragma once -#endif - -#include "stdint.h" - -// 7.8 Format conversion of integer types - -typedef struct { - intmax_t quot; - intmax_t rem; -} imaxdiv_t; - -// 7.8.1 Macros for format specifiers - -#if !defined(__cplusplus) || defined(__STDC_FORMAT_MACROS) // [ See footnote 185 at page 198 - -#ifdef _WIN64 -# define __PRI64_PREFIX "l" -# define __PRIPTR_PREFIX "l" -#else -# define __PRI64_PREFIX "ll" -# define __PRIPTR_PREFIX -#endif - -// The fprintf macros for signed integers are: -#define PRId8 "d" -#define PRIi8 "i" -#define PRIdLEAST8 "d" -#define PRIiLEAST8 "i" -#define PRIdFAST8 "d" -#define PRIiFAST8 "i" - -#define PRId16 "hd" -#define PRIi16 "hi" -#define PRIdLEAST16 "hd" -#define PRIiLEAST16 "hi" -#define PRIdFAST16 "hd" -#define PRIiFAST16 "hi" - -#define PRId32 "d" -#define PRIi32 "i" -#define PRIdLEAST32 "d" -#define PRIiLEAST32 "i" -#define PRIdFAST32 "d" -#define PRIiFAST32 "i" - -#define PRId64 __PRI64_PREFIX "d" -#define PRIi64 __PRI64_PREFIX "i" -#define PRIdLEAST64 __PRI64_PREFIX "d" -#define PRIiLEAST64 __PRI64_PREFIX "i" -#define PRIdFAST64 __PRI64_PREFIX "d" -#define PRIiFAST64 __PRI64_PREFIX "i" - -#define PRIdMAX __PRI64_PREFIX "d" -#define PRIiMAX __PRI64_PREFIX "i" - -#define PRIdPTR __PRIPTR_PREFIX "d" -#define PRIiPTR __PRIPTR_PREFIX "i" - -// The fprintf macros for unsigned integers are: -#define PRIo8 "o" -#define PRIu8 "u" -#define PRIx8 "x" -#define PRIX8 "X" -#define PRIoLEAST8 "o" -#define PRIuLEAST8 "u" -#define PRIxLEAST8 "x" -#define PRIXLEAST8 "X" -#define PRIoFAST8 "o" -#define PRIuFAST8 "u" -#define PRIxFAST8 "x" -#define PRIXFAST8 "X" - -#define PRIo16 "ho" -#define PRIu16 "hu" -#define PRIx16 "hx" -#define PRIX16 "hX" -#define PRIoLEAST16 "ho" -#define PRIuLEAST16 "hu" -#define PRIxLEAST16 "hx" -#define PRIXLEAST16 "hX" -#define PRIoFAST16 "ho" -#define PRIuFAST16 "hu" -#define PRIxFAST16 "hx" -#define PRIXFAST16 "hX" - -#define PRIo32 "o" -#define PRIu32 "u" -#define PRIx32 "x" -#define PRIX32 "X" -#define PRIoLEAST32 "o" -#define PRIuLEAST32 "u" -#define PRIxLEAST32 "x" -#define PRIXLEAST32 "X" -#define PRIoFAST32 "o" -#define PRIuFAST32 "u" -#define PRIxFAST32 "x" -#define PRIXFAST32 "X" - -#define PRIo64 __PRI64_PREFIX "o" -#define PRIu64 __PRI64_PREFIX "u" -#define PRIx64 __PRI64_PREFIX "x" -#define PRIX64 __PRI64_PREFIX "X" -#define PRIoLEAST64 __PRI64_PREFIX "o" -#define PRIuLEAST64 __PRI64_PREFIX "u" -#define PRIxLEAST64 __PRI64_PREFIX "x" -#define PRIXLEAST64 __PRI64_PREFIX "X" -#define PRIoFAST64 __PRI64_PREFIX "o" -#define PRIuFAST64 __PRI64_PREFIX "u" -#define PRIxFAST64 __PRI64_PREFIX "x" -#define PRIXFAST64 __PRI64_PREFIX "X" - -#define PRIoMAX __PRI64_PREFIX "o" -#define PRIuMAX __PRI64_PREFIX "u" -#define PRIxMAX __PRI64_PREFIX "x" -#define PRIXMAX __PRI64_PREFIX "X" - -#define PRIoPTR __PRIPTR_PREFIX "o" -#define PRIuPTR __PRIPTR_PREFIX "u" -#define PRIxPTR __PRIPTR_PREFIX "x" -#define PRIXPTR __PRIPTR_PREFIX "X" - -// The fscanf macros for signed integers are: -#define SCNd8 "d" -#define SCNi8 "i" -#define SCNdLEAST8 "d" -#define SCNiLEAST8 "i" -#define SCNdFAST8 "d" -#define SCNiFAST8 "i" - -#define SCNd16 "hd" -#define SCNi16 "hi" -#define SCNdLEAST16 "hd" -#define SCNiLEAST16 "hi" -#define SCNdFAST16 "hd" -#define SCNiFAST16 "hi" - -#define SCNd32 "ld" -#define SCNi32 "li" -#define SCNdLEAST32 "ld" -#define SCNiLEAST32 "li" -#define SCNdFAST32 "ld" -#define SCNiFAST32 "li" - -#define SCNd64 "I64d" -#define SCNi64 "I64i" -#define SCNdLEAST64 "I64d" -#define SCNiLEAST64 "I64i" -#define SCNdFAST64 "I64d" -#define SCNiFAST64 "I64i" - -#define SCNdMAX "I64d" -#define SCNiMAX "I64i" - -#ifdef _WIN64 // [ -# define SCNdPTR "I64d" -# define SCNiPTR "I64i" -#else // _WIN64 ][ -# define SCNdPTR "ld" -# define SCNiPTR "li" -#endif // _WIN64 ] - -// The fscanf macros for unsigned integers are: -#define SCNo8 "o" -#define SCNu8 "u" -#define SCNx8 "x" -#define SCNX8 "X" -#define SCNoLEAST8 "o" -#define SCNuLEAST8 "u" -#define SCNxLEAST8 "x" -#define SCNXLEAST8 "X" -#define SCNoFAST8 "o" -#define SCNuFAST8 "u" -#define SCNxFAST8 "x" -#define SCNXFAST8 "X" - -#define SCNo16 "ho" -#define SCNu16 "hu" -#define SCNx16 "hx" -#define SCNX16 "hX" -#define SCNoLEAST16 "ho" -#define SCNuLEAST16 "hu" -#define SCNxLEAST16 "hx" -#define SCNXLEAST16 "hX" -#define SCNoFAST16 "ho" -#define SCNuFAST16 "hu" -#define SCNxFAST16 "hx" -#define SCNXFAST16 "hX" - -#define SCNo32 "lo" -#define SCNu32 "lu" -#define SCNx32 "lx" -#define SCNX32 "lX" -#define SCNoLEAST32 "lo" -#define SCNuLEAST32 "lu" -#define SCNxLEAST32 "lx" -#define SCNXLEAST32 "lX" -#define SCNoFAST32 "lo" -#define SCNuFAST32 "lu" -#define SCNxFAST32 "lx" -#define SCNXFAST32 "lX" - -#define SCNo64 "I64o" -#define SCNu64 "I64u" -#define SCNx64 "I64x" -#define SCNX64 "I64X" -#define SCNoLEAST64 "I64o" -#define SCNuLEAST64 "I64u" -#define SCNxLEAST64 "I64x" -#define SCNXLEAST64 "I64X" -#define SCNoFAST64 "I64o" -#define SCNuFAST64 "I64u" -#define SCNxFAST64 "I64x" -#define SCNXFAST64 "I64X" - -#define SCNoMAX "I64o" -#define SCNuMAX "I64u" -#define SCNxMAX "I64x" -#define SCNXMAX "I64X" - -#ifdef _WIN64 // [ -# define SCNoPTR "I64o" -# define SCNuPTR "I64u" -# define SCNxPTR "I64x" -# define SCNXPTR "I64X" -#else // _WIN64 ][ -# define SCNoPTR "lo" -# define SCNuPTR "lu" -# define SCNxPTR "lx" -# define SCNXPTR "lX" -#endif // _WIN64 ] - -#endif // __STDC_FORMAT_MACROS ] - -// 7.8.2 Functions for greatest-width integer types - -// 7.8.2.1 The imaxabs function -#define imaxabs _abs64 - -// 7.8.2.2 The imaxdiv function - -// This is modified version of div() function from Microsoft's div.c found -// in %MSVC.NET%\crt\src\div.c -#ifdef STATIC_IMAXDIV // [ -static -#else // STATIC_IMAXDIV ][ -_inline -#endif // STATIC_IMAXDIV ] -imaxdiv_t __cdecl imaxdiv(intmax_t numer, intmax_t denom) -{ - imaxdiv_t result; - - result.quot = numer / denom; - result.rem = numer % denom; - - if (numer < 0 && result.rem > 0) { - // did division wrong; must fix up - ++result.quot; - result.rem -= denom; - } - - return result; -} - -// 7.8.2.3 The strtoimax and strtoumax functions -#define strtoimax _strtoi64 -#define strtoumax _strtoui64 - -// 7.8.2.4 The wcstoimax and wcstoumax functions -#define wcstoimax _wcstoi64 -#define wcstoumax _wcstoui64 - - -#endif // _MSC_INTTYPES_H_ ] diff --git a/extra/jemalloc/include/msvc_compat/stdbool.h b/extra/jemalloc/include/msvc_compat/stdbool.h deleted file mode 100644 index da9ee8b809b..00000000000 --- a/extra/jemalloc/include/msvc_compat/stdbool.h +++ /dev/null @@ -1,16 +0,0 @@ -#ifndef stdbool_h -#define stdbool_h - -#include <wtypes.h> - -/* MSVC doesn't define _Bool or bool in C, but does have BOOL */ -/* Note this doesn't pass autoconf's test because (bool) 0.5 != true */ -typedef BOOL _Bool; - -#define bool _Bool -#define true 1 -#define false 0 - -#define __bool_true_false_are_defined 1 - -#endif /* stdbool_h */ diff --git a/extra/jemalloc/include/msvc_compat/stdint.h b/extra/jemalloc/include/msvc_compat/stdint.h deleted file mode 100644 index d02608a5972..00000000000 --- a/extra/jemalloc/include/msvc_compat/stdint.h +++ /dev/null @@ -1,247 +0,0 @@ -// ISO C9x compliant stdint.h for Microsoft Visual Studio -// Based on ISO/IEC 9899:TC2 Committee draft (May 6, 2005) WG14/N1124 -// -// Copyright (c) 2006-2008 Alexander Chemeris -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// -// 1. Redistributions of source code must retain the above copyright notice, -// this list of conditions and the following disclaimer. -// -// 2. Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// -// 3. The name of the author may be used to endorse or promote products -// derived from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED -// WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF -// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO -// EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; -// OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, -// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR -// OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF -// ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// -/////////////////////////////////////////////////////////////////////////////// - -#ifndef _MSC_VER // [ -#error "Use this header only with Microsoft Visual C++ compilers!" -#endif // _MSC_VER ] - -#ifndef _MSC_STDINT_H_ // [ -#define _MSC_STDINT_H_ - -#if _MSC_VER > 1000 -#pragma once -#endif - -#include <limits.h> - -// For Visual Studio 6 in C++ mode and for many Visual Studio versions when -// compiling for ARM we should wrap <wchar.h> include with 'extern "C++" {}' -// or compiler give many errors like this: -// error C2733: second C linkage of overloaded function 'wmemchr' not allowed -#ifdef __cplusplus -extern "C" { -#endif -# include <wchar.h> -#ifdef __cplusplus -} -#endif - -// Define _W64 macros to mark types changing their size, like intptr_t. -#ifndef _W64 -# if !defined(__midl) && (defined(_X86_) || defined(_M_IX86)) && _MSC_VER >= 1300 -# define _W64 __w64 -# else -# define _W64 -# endif -#endif - - -// 7.18.1 Integer types - -// 7.18.1.1 Exact-width integer types - -// Visual Studio 6 and Embedded Visual C++ 4 doesn't -// realize that, e.g. char has the same size as __int8 -// so we give up on __intX for them. -#if (_MSC_VER < 1300) - typedef signed char int8_t; - typedef signed short int16_t; - typedef signed int int32_t; - typedef unsigned char uint8_t; - typedef unsigned short uint16_t; - typedef unsigned int uint32_t; -#else - typedef signed __int8 int8_t; - typedef signed __int16 int16_t; - typedef signed __int32 int32_t; - typedef unsigned __int8 uint8_t; - typedef unsigned __int16 uint16_t; - typedef unsigned __int32 uint32_t; -#endif -typedef signed __int64 int64_t; -typedef unsigned __int64 uint64_t; - - -// 7.18.1.2 Minimum-width integer types -typedef int8_t int_least8_t; -typedef int16_t int_least16_t; -typedef int32_t int_least32_t; -typedef int64_t int_least64_t; -typedef uint8_t uint_least8_t; -typedef uint16_t uint_least16_t; -typedef uint32_t uint_least32_t; -typedef uint64_t uint_least64_t; - -// 7.18.1.3 Fastest minimum-width integer types -typedef int8_t int_fast8_t; -typedef int16_t int_fast16_t; -typedef int32_t int_fast32_t; -typedef int64_t int_fast64_t; -typedef uint8_t uint_fast8_t; -typedef uint16_t uint_fast16_t; -typedef uint32_t uint_fast32_t; -typedef uint64_t uint_fast64_t; - -// 7.18.1.4 Integer types capable of holding object pointers -#ifdef _WIN64 // [ - typedef signed __int64 intptr_t; - typedef unsigned __int64 uintptr_t; -#else // _WIN64 ][ - typedef _W64 signed int intptr_t; - typedef _W64 unsigned int uintptr_t; -#endif // _WIN64 ] - -// 7.18.1.5 Greatest-width integer types -typedef int64_t intmax_t; -typedef uint64_t uintmax_t; - - -// 7.18.2 Limits of specified-width integer types - -#if !defined(__cplusplus) || defined(__STDC_LIMIT_MACROS) // [ See footnote 220 at page 257 and footnote 221 at page 259 - -// 7.18.2.1 Limits of exact-width integer types -#define INT8_MIN ((int8_t)_I8_MIN) -#define INT8_MAX _I8_MAX -#define INT16_MIN ((int16_t)_I16_MIN) -#define INT16_MAX _I16_MAX -#define INT32_MIN ((int32_t)_I32_MIN) -#define INT32_MAX _I32_MAX -#define INT64_MIN ((int64_t)_I64_MIN) -#define INT64_MAX _I64_MAX -#define UINT8_MAX _UI8_MAX -#define UINT16_MAX _UI16_MAX -#define UINT32_MAX _UI32_MAX -#define UINT64_MAX _UI64_MAX - -// 7.18.2.2 Limits of minimum-width integer types -#define INT_LEAST8_MIN INT8_MIN -#define INT_LEAST8_MAX INT8_MAX -#define INT_LEAST16_MIN INT16_MIN -#define INT_LEAST16_MAX INT16_MAX -#define INT_LEAST32_MIN INT32_MIN -#define INT_LEAST32_MAX INT32_MAX -#define INT_LEAST64_MIN INT64_MIN -#define INT_LEAST64_MAX INT64_MAX -#define UINT_LEAST8_MAX UINT8_MAX -#define UINT_LEAST16_MAX UINT16_MAX -#define UINT_LEAST32_MAX UINT32_MAX -#define UINT_LEAST64_MAX UINT64_MAX - -// 7.18.2.3 Limits of fastest minimum-width integer types -#define INT_FAST8_MIN INT8_MIN -#define INT_FAST8_MAX INT8_MAX -#define INT_FAST16_MIN INT16_MIN -#define INT_FAST16_MAX INT16_MAX -#define INT_FAST32_MIN INT32_MIN -#define INT_FAST32_MAX INT32_MAX -#define INT_FAST64_MIN INT64_MIN -#define INT_FAST64_MAX INT64_MAX -#define UINT_FAST8_MAX UINT8_MAX -#define UINT_FAST16_MAX UINT16_MAX -#define UINT_FAST32_MAX UINT32_MAX -#define UINT_FAST64_MAX UINT64_MAX - -// 7.18.2.4 Limits of integer types capable of holding object pointers -#ifdef _WIN64 // [ -# define INTPTR_MIN INT64_MIN -# define INTPTR_MAX INT64_MAX -# define UINTPTR_MAX UINT64_MAX -#else // _WIN64 ][ -# define INTPTR_MIN INT32_MIN -# define INTPTR_MAX INT32_MAX -# define UINTPTR_MAX UINT32_MAX -#endif // _WIN64 ] - -// 7.18.2.5 Limits of greatest-width integer types -#define INTMAX_MIN INT64_MIN -#define INTMAX_MAX INT64_MAX -#define UINTMAX_MAX UINT64_MAX - -// 7.18.3 Limits of other integer types - -#ifdef _WIN64 // [ -# define PTRDIFF_MIN _I64_MIN -# define PTRDIFF_MAX _I64_MAX -#else // _WIN64 ][ -# define PTRDIFF_MIN _I32_MIN -# define PTRDIFF_MAX _I32_MAX -#endif // _WIN64 ] - -#define SIG_ATOMIC_MIN INT_MIN -#define SIG_ATOMIC_MAX INT_MAX - -#ifndef SIZE_MAX // [ -# ifdef _WIN64 // [ -# define SIZE_MAX _UI64_MAX -# else // _WIN64 ][ -# define SIZE_MAX _UI32_MAX -# endif // _WIN64 ] -#endif // SIZE_MAX ] - -// WCHAR_MIN and WCHAR_MAX are also defined in <wchar.h> -#ifndef WCHAR_MIN // [ -# define WCHAR_MIN 0 -#endif // WCHAR_MIN ] -#ifndef WCHAR_MAX // [ -# define WCHAR_MAX _UI16_MAX -#endif // WCHAR_MAX ] - -#define WINT_MIN 0 -#define WINT_MAX _UI16_MAX - -#endif // __STDC_LIMIT_MACROS ] - - -// 7.18.4 Limits of other integer types - -#if !defined(__cplusplus) || defined(__STDC_CONSTANT_MACROS) // [ See footnote 224 at page 260 - -// 7.18.4.1 Macros for minimum-width integer constants - -#define INT8_C(val) val##i8 -#define INT16_C(val) val##i16 -#define INT32_C(val) val##i32 -#define INT64_C(val) val##i64 - -#define UINT8_C(val) val##ui8 -#define UINT16_C(val) val##ui16 -#define UINT32_C(val) val##ui32 -#define UINT64_C(val) val##ui64 - -// 7.18.4.2 Macros for greatest-width integer constants -#define INTMAX_C INT64_C -#define UINTMAX_C UINT64_C - -#endif // __STDC_CONSTANT_MACROS ] - - -#endif // _MSC_STDINT_H_ ] diff --git a/extra/jemalloc/include/msvc_compat/strings.h b/extra/jemalloc/include/msvc_compat/strings.h deleted file mode 100644 index c84975b6b8e..00000000000 --- a/extra/jemalloc/include/msvc_compat/strings.h +++ /dev/null @@ -1,23 +0,0 @@ -#ifndef strings_h -#define strings_h - -/* MSVC doesn't define ffs/ffsl. This dummy strings.h header is provided - * for both */ -#include <intrin.h> -#pragma intrinsic(_BitScanForward) -static __forceinline int ffsl(long x) -{ - unsigned long i; - - if (_BitScanForward(&i, x)) - return (i + 1); - return (0); -} - -static __forceinline int ffs(int x) -{ - - return (ffsl(x)); -} - -#endif diff --git a/extra/jemalloc/install-sh b/extra/jemalloc/install-sh deleted file mode 100755 index ebc66913e94..00000000000 --- a/extra/jemalloc/install-sh +++ /dev/null @@ -1,250 +0,0 @@ -#! /bin/sh -# -# install - install a program, script, or datafile -# This comes from X11R5 (mit/util/scripts/install.sh). -# -# Copyright 1991 by the Massachusetts Institute of Technology -# -# Permission to use, copy, modify, distribute, and sell this software and its -# documentation for any purpose is hereby granted without fee, provided that -# the above copyright notice appear in all copies and that both that -# copyright notice and this permission notice appear in supporting -# documentation, and that the name of M.I.T. not be used in advertising or -# publicity pertaining to distribution of the software without specific, -# written prior permission. M.I.T. makes no representations about the -# suitability of this software for any purpose. It is provided "as is" -# without express or implied warranty. -# -# Calling this script install-sh is preferred over install.sh, to prevent -# `make' implicit rules from creating a file called install from it -# when there is no Makefile. -# -# This script is compatible with the BSD install script, but was written -# from scratch. It can only install one file at a time, a restriction -# shared with many OS's install programs. - - -# set DOITPROG to echo to test this script - -# Don't use :- since 4.3BSD and earlier shells don't like it. -doit="${DOITPROG-}" - - -# put in absolute paths if you don't have them in your path; or use env. vars. - -mvprog="${MVPROG-mv}" -cpprog="${CPPROG-cp}" -chmodprog="${CHMODPROG-chmod}" -chownprog="${CHOWNPROG-chown}" -chgrpprog="${CHGRPPROG-chgrp}" -stripprog="${STRIPPROG-strip}" -rmprog="${RMPROG-rm}" -mkdirprog="${MKDIRPROG-mkdir}" - -transformbasename="" -transform_arg="" -instcmd="$mvprog" -chmodcmd="$chmodprog 0755" -chowncmd="" -chgrpcmd="" -stripcmd="" -rmcmd="$rmprog -f" -mvcmd="$mvprog" -src="" -dst="" -dir_arg="" - -while [ x"$1" != x ]; do - case $1 in - -c) instcmd="$cpprog" - shift - continue;; - - -d) dir_arg=true - shift - continue;; - - -m) chmodcmd="$chmodprog $2" - shift - shift - continue;; - - -o) chowncmd="$chownprog $2" - shift - shift - continue;; - - -g) chgrpcmd="$chgrpprog $2" - shift - shift - continue;; - - -s) stripcmd="$stripprog" - shift - continue;; - - -t=*) transformarg=`echo $1 | sed 's/-t=//'` - shift - continue;; - - -b=*) transformbasename=`echo $1 | sed 's/-b=//'` - shift - continue;; - - *) if [ x"$src" = x ] - then - src=$1 - else - # this colon is to work around a 386BSD /bin/sh bug - : - dst=$1 - fi - shift - continue;; - esac -done - -if [ x"$src" = x ] -then - echo "install: no input file specified" - exit 1 -else - true -fi - -if [ x"$dir_arg" != x ]; then - dst=$src - src="" - - if [ -d $dst ]; then - instcmd=: - else - instcmd=mkdir - fi -else - -# Waiting for this to be detected by the "$instcmd $src $dsttmp" command -# might cause directories to be created, which would be especially bad -# if $src (and thus $dsttmp) contains '*'. - - if [ -f $src -o -d $src ] - then - true - else - echo "install: $src does not exist" - exit 1 - fi - - if [ x"$dst" = x ] - then - echo "install: no destination specified" - exit 1 - else - true - fi - -# If destination is a directory, append the input filename; if your system -# does not like double slashes in filenames, you may need to add some logic - - if [ -d $dst ] - then - dst="$dst"/`basename $src` - else - true - fi -fi - -## this sed command emulates the dirname command -dstdir=`echo $dst | sed -e 's,[^/]*$,,;s,/$,,;s,^$,.,'` - -# Make sure that the destination directory exists. -# this part is taken from Noah Friedman's mkinstalldirs script - -# Skip lots of stat calls in the usual case. -if [ ! -d "$dstdir" ]; then -defaultIFS=' -' -IFS="${IFS-${defaultIFS}}" - -oIFS="${IFS}" -# Some sh's can't handle IFS=/ for some reason. -IFS='%' -set - `echo ${dstdir} | sed -e 's@/@%@g' -e 's@^%@/@'` -IFS="${oIFS}" - -pathcomp='' - -while [ $# -ne 0 ] ; do - pathcomp="${pathcomp}${1}" - shift - - if [ ! -d "${pathcomp}" ] ; - then - $mkdirprog "${pathcomp}" - else - true - fi - - pathcomp="${pathcomp}/" -done -fi - -if [ x"$dir_arg" != x ] -then - $doit $instcmd $dst && - - if [ x"$chowncmd" != x ]; then $doit $chowncmd $dst; else true ; fi && - if [ x"$chgrpcmd" != x ]; then $doit $chgrpcmd $dst; else true ; fi && - if [ x"$stripcmd" != x ]; then $doit $stripcmd $dst; else true ; fi && - if [ x"$chmodcmd" != x ]; then $doit $chmodcmd $dst; else true ; fi -else - -# If we're going to rename the final executable, determine the name now. - - if [ x"$transformarg" = x ] - then - dstfile=`basename $dst` - else - dstfile=`basename $dst $transformbasename | - sed $transformarg`$transformbasename - fi - -# don't allow the sed command to completely eliminate the filename - - if [ x"$dstfile" = x ] - then - dstfile=`basename $dst` - else - true - fi - -# Make a temp file name in the proper directory. - - dsttmp=$dstdir/#inst.$$# - -# Move or copy the file name to the temp name - - $doit $instcmd $src $dsttmp && - - trap "rm -f ${dsttmp}" 0 && - -# and set any options; do chmod last to preserve setuid bits - -# If any of these fail, we abort the whole thing. If we want to -# ignore errors from any of these, just make sure not to ignore -# errors from the above "$doit $instcmd $src $dsttmp" command. - - if [ x"$chowncmd" != x ]; then $doit $chowncmd $dsttmp; else true;fi && - if [ x"$chgrpcmd" != x ]; then $doit $chgrpcmd $dsttmp; else true;fi && - if [ x"$stripcmd" != x ]; then $doit $stripcmd $dsttmp; else true;fi && - if [ x"$chmodcmd" != x ]; then $doit $chmodcmd $dsttmp; else true;fi && - -# Now rename the file to the real destination. - - $doit $rmcmd -f $dstdir/$dstfile && - $doit $mvcmd $dsttmp $dstdir/$dstfile - -fi && - - -exit 0 diff --git a/extra/jemalloc/src/arena.c b/extra/jemalloc/src/arena.c deleted file mode 100644 index d28b629a1e1..00000000000 --- a/extra/jemalloc/src/arena.c +++ /dev/null @@ -1,2385 +0,0 @@ -#define JEMALLOC_ARENA_C_ -#include "jemalloc/internal/jemalloc_internal.h" - -/******************************************************************************/ -/* Data. */ - -ssize_t opt_lg_dirty_mult = LG_DIRTY_MULT_DEFAULT; -arena_bin_info_t arena_bin_info[NBINS]; - -JEMALLOC_ALIGNED(CACHELINE) -const uint8_t small_size2bin[] = { -#define S2B_8(i) i, -#define S2B_16(i) S2B_8(i) S2B_8(i) -#define S2B_32(i) S2B_16(i) S2B_16(i) -#define S2B_64(i) S2B_32(i) S2B_32(i) -#define S2B_128(i) S2B_64(i) S2B_64(i) -#define S2B_256(i) S2B_128(i) S2B_128(i) -#define S2B_512(i) S2B_256(i) S2B_256(i) -#define S2B_1024(i) S2B_512(i) S2B_512(i) -#define S2B_2048(i) S2B_1024(i) S2B_1024(i) -#define S2B_4096(i) S2B_2048(i) S2B_2048(i) -#define S2B_8192(i) S2B_4096(i) S2B_4096(i) -#define SIZE_CLASS(bin, delta, size) \ - S2B_##delta(bin) - SIZE_CLASSES -#undef S2B_8 -#undef S2B_16 -#undef S2B_32 -#undef S2B_64 -#undef S2B_128 -#undef S2B_256 -#undef S2B_512 -#undef S2B_1024 -#undef S2B_2048 -#undef S2B_4096 -#undef S2B_8192 -#undef SIZE_CLASS -}; - -/******************************************************************************/ -/* Function prototypes for non-inline static functions. */ - -static void arena_avail_insert(arena_t *arena, arena_chunk_t *chunk, - size_t pageind, size_t npages, bool maybe_adjac_pred, - bool maybe_adjac_succ); -static void arena_avail_remove(arena_t *arena, arena_chunk_t *chunk, - size_t pageind, size_t npages, bool maybe_adjac_pred, - bool maybe_adjac_succ); -static void arena_run_split(arena_t *arena, arena_run_t *run, size_t size, - bool large, size_t binind, bool zero); -static arena_chunk_t *arena_chunk_alloc(arena_t *arena); -static void arena_chunk_dealloc(arena_t *arena, arena_chunk_t *chunk); -static arena_run_t *arena_run_alloc_helper(arena_t *arena, size_t size, - bool large, size_t binind, bool zero); -static arena_run_t *arena_run_alloc(arena_t *arena, size_t size, bool large, - size_t binind, bool zero); -static arena_chunk_t *chunks_dirty_iter_cb(arena_chunk_tree_t *tree, - arena_chunk_t *chunk, void *arg); -static void arena_purge(arena_t *arena, bool all); -static void arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty, - bool cleaned); -static void arena_run_trim_head(arena_t *arena, arena_chunk_t *chunk, - arena_run_t *run, size_t oldsize, size_t newsize); -static void arena_run_trim_tail(arena_t *arena, arena_chunk_t *chunk, - arena_run_t *run, size_t oldsize, size_t newsize, bool dirty); -static arena_run_t *arena_bin_runs_first(arena_bin_t *bin); -static void arena_bin_runs_insert(arena_bin_t *bin, arena_run_t *run); -static void arena_bin_runs_remove(arena_bin_t *bin, arena_run_t *run); -static arena_run_t *arena_bin_nonfull_run_tryget(arena_bin_t *bin); -static arena_run_t *arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t *bin); -static void *arena_bin_malloc_hard(arena_t *arena, arena_bin_t *bin); -static void arena_dissociate_bin_run(arena_chunk_t *chunk, arena_run_t *run, - arena_bin_t *bin); -static void arena_dalloc_bin_run(arena_t *arena, arena_chunk_t *chunk, - arena_run_t *run, arena_bin_t *bin); -static void arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk, - arena_run_t *run, arena_bin_t *bin); -static void arena_ralloc_large_shrink(arena_t *arena, arena_chunk_t *chunk, - void *ptr, size_t oldsize, size_t size); -static bool arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk, - void *ptr, size_t oldsize, size_t size, size_t extra, bool zero); -static bool arena_ralloc_large(void *ptr, size_t oldsize, size_t size, - size_t extra, bool zero); -static size_t bin_info_run_size_calc(arena_bin_info_t *bin_info, - size_t min_run_size); -static void bin_info_init(void); - -/******************************************************************************/ - -static inline int -arena_run_comp(arena_chunk_map_t *a, arena_chunk_map_t *b) -{ - uintptr_t a_mapelm = (uintptr_t)a; - uintptr_t b_mapelm = (uintptr_t)b; - - assert(a != NULL); - assert(b != NULL); - - return ((a_mapelm > b_mapelm) - (a_mapelm < b_mapelm)); -} - -/* Generate red-black tree functions. */ -rb_gen(static UNUSED, arena_run_tree_, arena_run_tree_t, arena_chunk_map_t, - u.rb_link, arena_run_comp) - -static inline int -arena_avail_comp(arena_chunk_map_t *a, arena_chunk_map_t *b) -{ - int ret; - size_t a_size = a->bits & ~PAGE_MASK; - size_t b_size = b->bits & ~PAGE_MASK; - - ret = (a_size > b_size) - (a_size < b_size); - if (ret == 0) { - uintptr_t a_mapelm, b_mapelm; - - if ((a->bits & CHUNK_MAP_KEY) != CHUNK_MAP_KEY) - a_mapelm = (uintptr_t)a; - else { - /* - * Treat keys as though they are lower than anything - * else. - */ - a_mapelm = 0; - } - b_mapelm = (uintptr_t)b; - - ret = (a_mapelm > b_mapelm) - (a_mapelm < b_mapelm); - } - - return (ret); -} - -/* Generate red-black tree functions. */ -rb_gen(static UNUSED, arena_avail_tree_, arena_avail_tree_t, arena_chunk_map_t, - u.rb_link, arena_avail_comp) - -static inline int -arena_chunk_dirty_comp(arena_chunk_t *a, arena_chunk_t *b) -{ - - assert(a != NULL); - assert(b != NULL); - - /* - * Short-circuit for self comparison. The following comparison code - * would come to the same result, but at the cost of executing the slow - * path. - */ - if (a == b) - return (0); - - /* - * Order such that chunks with higher fragmentation are "less than" - * those with lower fragmentation -- purging order is from "least" to - * "greatest". Fragmentation is measured as: - * - * mean current avail run size - * -------------------------------- - * mean defragmented avail run size - * - * navail - * ----------- - * nruns_avail nruns_avail-nruns_adjac - * = ========================= = ----------------------- - * navail nruns_avail - * ----------------------- - * nruns_avail-nruns_adjac - * - * The following code multiplies away the denominator prior to - * comparison, in order to avoid division. - * - */ - { - size_t a_val = (a->nruns_avail - a->nruns_adjac) * - b->nruns_avail; - size_t b_val = (b->nruns_avail - b->nruns_adjac) * - a->nruns_avail; - - if (a_val < b_val) - return (1); - if (a_val > b_val) - return (-1); - } - /* - * Break ties by chunk address. For fragmented chunks, report lower - * addresses as "lower", so that fragmentation reduction happens first - * at lower addresses. However, use the opposite ordering for - * unfragmented chunks, in order to increase the chances of - * re-allocating dirty runs. - */ - { - uintptr_t a_chunk = (uintptr_t)a; - uintptr_t b_chunk = (uintptr_t)b; - int ret = ((a_chunk > b_chunk) - (a_chunk < b_chunk)); - if (a->nruns_adjac == 0) { - assert(b->nruns_adjac == 0); - ret = -ret; - } - return (ret); - } -} - -/* Generate red-black tree functions. */ -rb_gen(static UNUSED, arena_chunk_dirty_, arena_chunk_tree_t, arena_chunk_t, - dirty_link, arena_chunk_dirty_comp) - -static inline bool -arena_avail_adjac_pred(arena_chunk_t *chunk, size_t pageind) -{ - bool ret; - - if (pageind-1 < map_bias) - ret = false; - else { - ret = (arena_mapbits_allocated_get(chunk, pageind-1) == 0); - assert(ret == false || arena_mapbits_dirty_get(chunk, - pageind-1) != arena_mapbits_dirty_get(chunk, pageind)); - } - return (ret); -} - -static inline bool -arena_avail_adjac_succ(arena_chunk_t *chunk, size_t pageind, size_t npages) -{ - bool ret; - - if (pageind+npages == chunk_npages) - ret = false; - else { - assert(pageind+npages < chunk_npages); - ret = (arena_mapbits_allocated_get(chunk, pageind+npages) == 0); - assert(ret == false || arena_mapbits_dirty_get(chunk, pageind) - != arena_mapbits_dirty_get(chunk, pageind+npages)); - } - return (ret); -} - -static inline bool -arena_avail_adjac(arena_chunk_t *chunk, size_t pageind, size_t npages) -{ - - return (arena_avail_adjac_pred(chunk, pageind) || - arena_avail_adjac_succ(chunk, pageind, npages)); -} - -static void -arena_avail_insert(arena_t *arena, arena_chunk_t *chunk, size_t pageind, - size_t npages, bool maybe_adjac_pred, bool maybe_adjac_succ) -{ - - assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >> - LG_PAGE)); - - /* - * chunks_dirty is keyed by nruns_{avail,adjac}, so the chunk must be - * removed and reinserted even if the run to be inserted is clean. - */ - if (chunk->ndirty != 0) - arena_chunk_dirty_remove(&arena->chunks_dirty, chunk); - - if (maybe_adjac_pred && arena_avail_adjac_pred(chunk, pageind)) - chunk->nruns_adjac++; - if (maybe_adjac_succ && arena_avail_adjac_succ(chunk, pageind, npages)) - chunk->nruns_adjac++; - chunk->nruns_avail++; - assert(chunk->nruns_avail > chunk->nruns_adjac); - - if (arena_mapbits_dirty_get(chunk, pageind) != 0) { - arena->ndirty += npages; - chunk->ndirty += npages; - } - if (chunk->ndirty != 0) - arena_chunk_dirty_insert(&arena->chunks_dirty, chunk); - - arena_avail_tree_insert(&arena->runs_avail, arena_mapp_get(chunk, - pageind)); -} - -static void -arena_avail_remove(arena_t *arena, arena_chunk_t *chunk, size_t pageind, - size_t npages, bool maybe_adjac_pred, bool maybe_adjac_succ) -{ - - assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >> - LG_PAGE)); - - /* - * chunks_dirty is keyed by nruns_{avail,adjac}, so the chunk must be - * removed and reinserted even if the run to be removed is clean. - */ - if (chunk->ndirty != 0) - arena_chunk_dirty_remove(&arena->chunks_dirty, chunk); - - if (maybe_adjac_pred && arena_avail_adjac_pred(chunk, pageind)) - chunk->nruns_adjac--; - if (maybe_adjac_succ && arena_avail_adjac_succ(chunk, pageind, npages)) - chunk->nruns_adjac--; - chunk->nruns_avail--; - assert(chunk->nruns_avail > chunk->nruns_adjac || (chunk->nruns_avail - == 0 && chunk->nruns_adjac == 0)); - - if (arena_mapbits_dirty_get(chunk, pageind) != 0) { - arena->ndirty -= npages; - chunk->ndirty -= npages; - } - if (chunk->ndirty != 0) - arena_chunk_dirty_insert(&arena->chunks_dirty, chunk); - - arena_avail_tree_remove(&arena->runs_avail, arena_mapp_get(chunk, - pageind)); -} - -static inline void * -arena_run_reg_alloc(arena_run_t *run, arena_bin_info_t *bin_info) -{ - void *ret; - unsigned regind; - bitmap_t *bitmap = (bitmap_t *)((uintptr_t)run + - (uintptr_t)bin_info->bitmap_offset); - - assert(run->nfree > 0); - assert(bitmap_full(bitmap, &bin_info->bitmap_info) == false); - - regind = bitmap_sfu(bitmap, &bin_info->bitmap_info); - ret = (void *)((uintptr_t)run + (uintptr_t)bin_info->reg0_offset + - (uintptr_t)(bin_info->reg_interval * regind)); - run->nfree--; - if (regind == run->nextind) - run->nextind++; - assert(regind < run->nextind); - return (ret); -} - -static inline void -arena_run_reg_dalloc(arena_run_t *run, void *ptr) -{ - arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); - size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; - size_t mapbits = arena_mapbits_get(chunk, pageind); - size_t binind = arena_ptr_small_binind_get(ptr, mapbits); - arena_bin_info_t *bin_info = &arena_bin_info[binind]; - unsigned regind = arena_run_regind(run, bin_info, ptr); - bitmap_t *bitmap = (bitmap_t *)((uintptr_t)run + - (uintptr_t)bin_info->bitmap_offset); - - assert(run->nfree < bin_info->nregs); - /* Freeing an interior pointer can cause assertion failure. */ - assert(((uintptr_t)ptr - ((uintptr_t)run + - (uintptr_t)bin_info->reg0_offset)) % - (uintptr_t)bin_info->reg_interval == 0); - assert((uintptr_t)ptr >= (uintptr_t)run + - (uintptr_t)bin_info->reg0_offset); - /* Freeing an unallocated pointer can cause assertion failure. */ - assert(bitmap_get(bitmap, &bin_info->bitmap_info, regind)); - - bitmap_unset(bitmap, &bin_info->bitmap_info, regind); - run->nfree++; -} - -static inline void -arena_run_zero(arena_chunk_t *chunk, size_t run_ind, size_t npages) -{ - - VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk + (run_ind << - LG_PAGE)), (npages << LG_PAGE)); - memset((void *)((uintptr_t)chunk + (run_ind << LG_PAGE)), 0, - (npages << LG_PAGE)); -} - -static inline void -arena_run_page_mark_zeroed(arena_chunk_t *chunk, size_t run_ind) -{ - - VALGRIND_MAKE_MEM_DEFINED((void *)((uintptr_t)chunk + (run_ind << - LG_PAGE)), PAGE); -} - -static inline void -arena_run_page_validate_zeroed(arena_chunk_t *chunk, size_t run_ind) -{ - size_t i; - UNUSED size_t *p = (size_t *)((uintptr_t)chunk + (run_ind << LG_PAGE)); - - arena_run_page_mark_zeroed(chunk, run_ind); - for (i = 0; i < PAGE / sizeof(size_t); i++) - assert(p[i] == 0); -} - -static void -arena_run_split(arena_t *arena, arena_run_t *run, size_t size, bool large, - size_t binind, bool zero) -{ - arena_chunk_t *chunk; - size_t run_ind, total_pages, need_pages, rem_pages, i; - size_t flag_dirty; - - assert((large && binind == BININD_INVALID) || (large == false && binind - != BININD_INVALID)); - - chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); - run_ind = (unsigned)(((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE); - flag_dirty = arena_mapbits_dirty_get(chunk, run_ind); - total_pages = arena_mapbits_unallocated_size_get(chunk, run_ind) >> - LG_PAGE; - assert(arena_mapbits_dirty_get(chunk, run_ind+total_pages-1) == - flag_dirty); - need_pages = (size >> LG_PAGE); - assert(need_pages > 0); - assert(need_pages <= total_pages); - rem_pages = total_pages - need_pages; - - arena_avail_remove(arena, chunk, run_ind, total_pages, true, true); - if (config_stats) { - /* - * Update stats_cactive if nactive is crossing a chunk - * multiple. - */ - size_t cactive_diff = CHUNK_CEILING((arena->nactive + - need_pages) << LG_PAGE) - CHUNK_CEILING(arena->nactive << - LG_PAGE); - if (cactive_diff != 0) - stats_cactive_add(cactive_diff); - } - arena->nactive += need_pages; - - /* Keep track of trailing unused pages for later use. */ - if (rem_pages > 0) { - if (flag_dirty != 0) { - arena_mapbits_unallocated_set(chunk, run_ind+need_pages, - (rem_pages << LG_PAGE), CHUNK_MAP_DIRTY); - arena_mapbits_unallocated_set(chunk, - run_ind+total_pages-1, (rem_pages << LG_PAGE), - CHUNK_MAP_DIRTY); - } else { - arena_mapbits_unallocated_set(chunk, run_ind+need_pages, - (rem_pages << LG_PAGE), - arena_mapbits_unzeroed_get(chunk, - run_ind+need_pages)); - arena_mapbits_unallocated_set(chunk, - run_ind+total_pages-1, (rem_pages << LG_PAGE), - arena_mapbits_unzeroed_get(chunk, - run_ind+total_pages-1)); - } - arena_avail_insert(arena, chunk, run_ind+need_pages, rem_pages, - false, true); - } - - /* - * Update the page map separately for large vs. small runs, since it is - * possible to avoid iteration for large mallocs. - */ - if (large) { - if (zero) { - if (flag_dirty == 0) { - /* - * The run is clean, so some pages may be - * zeroed (i.e. never before touched). - */ - for (i = 0; i < need_pages; i++) { - if (arena_mapbits_unzeroed_get(chunk, - run_ind+i) != 0) { - arena_run_zero(chunk, run_ind+i, - 1); - } else if (config_debug) { - arena_run_page_validate_zeroed( - chunk, run_ind+i); - } else { - arena_run_page_mark_zeroed( - chunk, run_ind+i); - } - } - } else { - /* - * The run is dirty, so all pages must be - * zeroed. - */ - arena_run_zero(chunk, run_ind, need_pages); - } - } else { - VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk + - (run_ind << LG_PAGE)), (need_pages << LG_PAGE)); - } - - /* - * Set the last element first, in case the run only contains one - * page (i.e. both statements set the same element). - */ - arena_mapbits_large_set(chunk, run_ind+need_pages-1, 0, - flag_dirty); - arena_mapbits_large_set(chunk, run_ind, size, flag_dirty); - } else { - assert(zero == false); - /* - * Propagate the dirty and unzeroed flags to the allocated - * small run, so that arena_dalloc_bin_run() has the ability to - * conditionally trim clean pages. - */ - arena_mapbits_small_set(chunk, run_ind, 0, binind, flag_dirty); - /* - * The first page will always be dirtied during small run - * initialization, so a validation failure here would not - * actually cause an observable failure. - */ - if (config_debug && flag_dirty == 0 && - arena_mapbits_unzeroed_get(chunk, run_ind) == 0) - arena_run_page_validate_zeroed(chunk, run_ind); - for (i = 1; i < need_pages - 1; i++) { - arena_mapbits_small_set(chunk, run_ind+i, i, binind, 0); - if (config_debug && flag_dirty == 0 && - arena_mapbits_unzeroed_get(chunk, run_ind+i) == 0) { - arena_run_page_validate_zeroed(chunk, - run_ind+i); - } - } - arena_mapbits_small_set(chunk, run_ind+need_pages-1, - need_pages-1, binind, flag_dirty); - if (config_debug && flag_dirty == 0 && - arena_mapbits_unzeroed_get(chunk, run_ind+need_pages-1) == - 0) { - arena_run_page_validate_zeroed(chunk, - run_ind+need_pages-1); - } - VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk + - (run_ind << LG_PAGE)), (need_pages << LG_PAGE)); - } -} - -static arena_chunk_t * -arena_chunk_alloc(arena_t *arena) -{ - arena_chunk_t *chunk; - size_t i; - - if (arena->spare != NULL) { - chunk = arena->spare; - arena->spare = NULL; - - assert(arena_mapbits_allocated_get(chunk, map_bias) == 0); - assert(arena_mapbits_allocated_get(chunk, chunk_npages-1) == 0); - assert(arena_mapbits_unallocated_size_get(chunk, map_bias) == - arena_maxclass); - assert(arena_mapbits_unallocated_size_get(chunk, - chunk_npages-1) == arena_maxclass); - assert(arena_mapbits_dirty_get(chunk, map_bias) == - arena_mapbits_dirty_get(chunk, chunk_npages-1)); - } else { - bool zero; - size_t unzeroed; - - zero = false; - malloc_mutex_unlock(&arena->lock); - chunk = (arena_chunk_t *)chunk_alloc(chunksize, chunksize, - false, &zero, arena->dss_prec); - malloc_mutex_lock(&arena->lock); - if (chunk == NULL) - return (NULL); - if (config_stats) - arena->stats.mapped += chunksize; - - chunk->arena = arena; - - /* - * Claim that no pages are in use, since the header is merely - * overhead. - */ - chunk->ndirty = 0; - - chunk->nruns_avail = 0; - chunk->nruns_adjac = 0; - - /* - * Initialize the map to contain one maximal free untouched run. - * Mark the pages as zeroed iff chunk_alloc() returned a zeroed - * chunk. - */ - unzeroed = zero ? 0 : CHUNK_MAP_UNZEROED; - arena_mapbits_unallocated_set(chunk, map_bias, arena_maxclass, - unzeroed); - /* - * There is no need to initialize the internal page map entries - * unless the chunk is not zeroed. - */ - if (zero == false) { - VALGRIND_MAKE_MEM_UNDEFINED( - (void *)arena_mapp_get(chunk, map_bias+1), - (size_t)((uintptr_t) arena_mapp_get(chunk, - chunk_npages-1) - (uintptr_t)arena_mapp_get(chunk, - map_bias+1))); - for (i = map_bias+1; i < chunk_npages-1; i++) - arena_mapbits_unzeroed_set(chunk, i, unzeroed); - } else { - VALGRIND_MAKE_MEM_DEFINED( - (void *)arena_mapp_get(chunk, map_bias+1), - (size_t)((uintptr_t) arena_mapp_get(chunk, - chunk_npages-1) - (uintptr_t)arena_mapp_get(chunk, - map_bias+1))); - if (config_debug) { - for (i = map_bias+1; i < chunk_npages-1; i++) { - assert(arena_mapbits_unzeroed_get(chunk, - i) == unzeroed); - } - } - } - arena_mapbits_unallocated_set(chunk, chunk_npages-1, - arena_maxclass, unzeroed); - } - - /* Insert the run into the runs_avail tree. */ - arena_avail_insert(arena, chunk, map_bias, chunk_npages-map_bias, - false, false); - - return (chunk); -} - -static void -arena_chunk_dealloc(arena_t *arena, arena_chunk_t *chunk) -{ - assert(arena_mapbits_allocated_get(chunk, map_bias) == 0); - assert(arena_mapbits_allocated_get(chunk, chunk_npages-1) == 0); - assert(arena_mapbits_unallocated_size_get(chunk, map_bias) == - arena_maxclass); - assert(arena_mapbits_unallocated_size_get(chunk, chunk_npages-1) == - arena_maxclass); - assert(arena_mapbits_dirty_get(chunk, map_bias) == - arena_mapbits_dirty_get(chunk, chunk_npages-1)); - - /* - * Remove run from the runs_avail tree, so that the arena does not use - * it. - */ - arena_avail_remove(arena, chunk, map_bias, chunk_npages-map_bias, - false, false); - - if (arena->spare != NULL) { - arena_chunk_t *spare = arena->spare; - - arena->spare = chunk; - malloc_mutex_unlock(&arena->lock); - chunk_dealloc((void *)spare, chunksize, true); - malloc_mutex_lock(&arena->lock); - if (config_stats) - arena->stats.mapped -= chunksize; - } else - arena->spare = chunk; -} - -static arena_run_t * -arena_run_alloc_helper(arena_t *arena, size_t size, bool large, size_t binind, - bool zero) -{ - arena_run_t *run; - arena_chunk_map_t *mapelm, key; - - key.bits = size | CHUNK_MAP_KEY; - mapelm = arena_avail_tree_nsearch(&arena->runs_avail, &key); - if (mapelm != NULL) { - arena_chunk_t *run_chunk = CHUNK_ADDR2BASE(mapelm); - size_t pageind = (((uintptr_t)mapelm - - (uintptr_t)run_chunk->map) / sizeof(arena_chunk_map_t)) - + map_bias; - - run = (arena_run_t *)((uintptr_t)run_chunk + (pageind << - LG_PAGE)); - arena_run_split(arena, run, size, large, binind, zero); - return (run); - } - - return (NULL); -} - -static arena_run_t * -arena_run_alloc(arena_t *arena, size_t size, bool large, size_t binind, - bool zero) -{ - arena_chunk_t *chunk; - arena_run_t *run; - - assert(size <= arena_maxclass); - assert((size & PAGE_MASK) == 0); - assert((large && binind == BININD_INVALID) || (large == false && binind - != BININD_INVALID)); - - /* Search the arena's chunks for the lowest best fit. */ - run = arena_run_alloc_helper(arena, size, large, binind, zero); - if (run != NULL) - return (run); - - /* - * No usable runs. Create a new chunk from which to allocate the run. - */ - chunk = arena_chunk_alloc(arena); - if (chunk != NULL) { - run = (arena_run_t *)((uintptr_t)chunk + (map_bias << LG_PAGE)); - arena_run_split(arena, run, size, large, binind, zero); - return (run); - } - - /* - * arena_chunk_alloc() failed, but another thread may have made - * sufficient memory available while this one dropped arena->lock in - * arena_chunk_alloc(), so search one more time. - */ - return (arena_run_alloc_helper(arena, size, large, binind, zero)); -} - -static inline void -arena_maybe_purge(arena_t *arena) -{ - size_t npurgeable, threshold; - - /* Don't purge if the option is disabled. */ - if (opt_lg_dirty_mult < 0) - return; - /* Don't purge if all dirty pages are already being purged. */ - if (arena->ndirty <= arena->npurgatory) - return; - npurgeable = arena->ndirty - arena->npurgatory; - threshold = (arena->nactive >> opt_lg_dirty_mult); - /* - * Don't purge unless the number of purgeable pages exceeds the - * threshold. - */ - if (npurgeable <= threshold) - return; - - arena_purge(arena, false); -} - -static inline size_t -arena_chunk_purge(arena_t *arena, arena_chunk_t *chunk, bool all) -{ - size_t npurged; - ql_head(arena_chunk_map_t) mapelms; - arena_chunk_map_t *mapelm; - size_t pageind, npages; - size_t nmadvise; - - ql_new(&mapelms); - - /* - * If chunk is the spare, temporarily re-allocate it, 1) so that its - * run is reinserted into runs_avail, and 2) so that it cannot be - * completely discarded by another thread while arena->lock is dropped - * by this thread. Note that the arena_run_dalloc() call will - * implicitly deallocate the chunk, so no explicit action is required - * in this function to deallocate the chunk. - * - * Note that once a chunk contains dirty pages, it cannot again contain - * a single run unless 1) it is a dirty run, or 2) this function purges - * dirty pages and causes the transition to a single clean run. Thus - * (chunk == arena->spare) is possible, but it is not possible for - * this function to be called on the spare unless it contains a dirty - * run. - */ - if (chunk == arena->spare) { - assert(arena_mapbits_dirty_get(chunk, map_bias) != 0); - assert(arena_mapbits_dirty_get(chunk, chunk_npages-1) != 0); - - arena_chunk_alloc(arena); - } - - if (config_stats) - arena->stats.purged += chunk->ndirty; - - /* - * Operate on all dirty runs if there is no clean/dirty run - * fragmentation. - */ - if (chunk->nruns_adjac == 0) - all = true; - - /* - * Temporarily allocate free dirty runs within chunk. If all is false, - * only operate on dirty runs that are fragments; otherwise operate on - * all dirty runs. - */ - for (pageind = map_bias; pageind < chunk_npages; pageind += npages) { - mapelm = arena_mapp_get(chunk, pageind); - if (arena_mapbits_allocated_get(chunk, pageind) == 0) { - size_t run_size = - arena_mapbits_unallocated_size_get(chunk, pageind); - - npages = run_size >> LG_PAGE; - assert(pageind + npages <= chunk_npages); - assert(arena_mapbits_dirty_get(chunk, pageind) == - arena_mapbits_dirty_get(chunk, pageind+npages-1)); - - if (arena_mapbits_dirty_get(chunk, pageind) != 0 && - (all || arena_avail_adjac(chunk, pageind, - npages))) { - arena_run_t *run = (arena_run_t *)((uintptr_t) - chunk + (uintptr_t)(pageind << LG_PAGE)); - - arena_run_split(arena, run, run_size, true, - BININD_INVALID, false); - /* Append to list for later processing. */ - ql_elm_new(mapelm, u.ql_link); - ql_tail_insert(&mapelms, mapelm, u.ql_link); - } - } else { - /* Skip run. */ - if (arena_mapbits_large_get(chunk, pageind) != 0) { - npages = arena_mapbits_large_size_get(chunk, - pageind) >> LG_PAGE; - } else { - size_t binind; - arena_bin_info_t *bin_info; - arena_run_t *run = (arena_run_t *)((uintptr_t) - chunk + (uintptr_t)(pageind << LG_PAGE)); - - assert(arena_mapbits_small_runind_get(chunk, - pageind) == 0); - binind = arena_bin_index(arena, run->bin); - bin_info = &arena_bin_info[binind]; - npages = bin_info->run_size >> LG_PAGE; - } - } - } - assert(pageind == chunk_npages); - assert(chunk->ndirty == 0 || all == false); - assert(chunk->nruns_adjac == 0); - - malloc_mutex_unlock(&arena->lock); - if (config_stats) - nmadvise = 0; - npurged = 0; - ql_foreach(mapelm, &mapelms, u.ql_link) { - bool unzeroed; - size_t flag_unzeroed, i; - - pageind = (((uintptr_t)mapelm - (uintptr_t)chunk->map) / - sizeof(arena_chunk_map_t)) + map_bias; - npages = arena_mapbits_large_size_get(chunk, pageind) >> - LG_PAGE; - assert(pageind + npages <= chunk_npages); - unzeroed = pages_purge((void *)((uintptr_t)chunk + (pageind << - LG_PAGE)), (npages << LG_PAGE)); - flag_unzeroed = unzeroed ? CHUNK_MAP_UNZEROED : 0; - /* - * Set the unzeroed flag for all pages, now that pages_purge() - * has returned whether the pages were zeroed as a side effect - * of purging. This chunk map modification is safe even though - * the arena mutex isn't currently owned by this thread, - * because the run is marked as allocated, thus protecting it - * from being modified by any other thread. As long as these - * writes don't perturb the first and last elements' - * CHUNK_MAP_ALLOCATED bits, behavior is well defined. - */ - for (i = 0; i < npages; i++) { - arena_mapbits_unzeroed_set(chunk, pageind+i, - flag_unzeroed); - } - npurged += npages; - if (config_stats) - nmadvise++; - } - malloc_mutex_lock(&arena->lock); - if (config_stats) - arena->stats.nmadvise += nmadvise; - - /* Deallocate runs. */ - for (mapelm = ql_first(&mapelms); mapelm != NULL; - mapelm = ql_first(&mapelms)) { - arena_run_t *run; - - pageind = (((uintptr_t)mapelm - (uintptr_t)chunk->map) / - sizeof(arena_chunk_map_t)) + map_bias; - run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)(pageind << - LG_PAGE)); - ql_remove(&mapelms, mapelm, u.ql_link); - arena_run_dalloc(arena, run, false, true); - } - - return (npurged); -} - -static arena_chunk_t * -chunks_dirty_iter_cb(arena_chunk_tree_t *tree, arena_chunk_t *chunk, void *arg) -{ - size_t *ndirty = (size_t *)arg; - - assert(chunk->ndirty != 0); - *ndirty += chunk->ndirty; - return (NULL); -} - -static void -arena_purge(arena_t *arena, bool all) -{ - arena_chunk_t *chunk; - size_t npurgatory; - if (config_debug) { - size_t ndirty = 0; - - arena_chunk_dirty_iter(&arena->chunks_dirty, NULL, - chunks_dirty_iter_cb, (void *)&ndirty); - assert(ndirty == arena->ndirty); - } - assert(arena->ndirty > arena->npurgatory || all); - assert((arena->nactive >> opt_lg_dirty_mult) < (arena->ndirty - - arena->npurgatory) || all); - - if (config_stats) - arena->stats.npurge++; - - /* - * Compute the minimum number of pages that this thread should try to - * purge, and add the result to arena->npurgatory. This will keep - * multiple threads from racing to reduce ndirty below the threshold. - */ - { - size_t npurgeable = arena->ndirty - arena->npurgatory; - - if (all == false) { - size_t threshold = (arena->nactive >> - opt_lg_dirty_mult); - - npurgatory = npurgeable - threshold; - } else - npurgatory = npurgeable; - } - arena->npurgatory += npurgatory; - - while (npurgatory > 0) { - size_t npurgeable, npurged, nunpurged; - - /* Get next chunk with dirty pages. */ - chunk = arena_chunk_dirty_first(&arena->chunks_dirty); - if (chunk == NULL) { - /* - * This thread was unable to purge as many pages as - * originally intended, due to races with other threads - * that either did some of the purging work, or re-used - * dirty pages. - */ - arena->npurgatory -= npurgatory; - return; - } - npurgeable = chunk->ndirty; - assert(npurgeable != 0); - - if (npurgeable > npurgatory && chunk->nruns_adjac == 0) { - /* - * This thread will purge all the dirty pages in chunk, - * so set npurgatory to reflect this thread's intent to - * purge the pages. This tends to reduce the chances - * of the following scenario: - * - * 1) This thread sets arena->npurgatory such that - * (arena->ndirty - arena->npurgatory) is at the - * threshold. - * 2) This thread drops arena->lock. - * 3) Another thread causes one or more pages to be - * dirtied, and immediately determines that it must - * purge dirty pages. - * - * If this scenario *does* play out, that's okay, - * because all of the purging work being done really - * needs to happen. - */ - arena->npurgatory += npurgeable - npurgatory; - npurgatory = npurgeable; - } - - /* - * Keep track of how many pages are purgeable, versus how many - * actually get purged, and adjust counters accordingly. - */ - arena->npurgatory -= npurgeable; - npurgatory -= npurgeable; - npurged = arena_chunk_purge(arena, chunk, all); - nunpurged = npurgeable - npurged; - arena->npurgatory += nunpurged; - npurgatory += nunpurged; - } -} - -void -arena_purge_all(arena_t *arena) -{ - - malloc_mutex_lock(&arena->lock); - arena_purge(arena, true); - malloc_mutex_unlock(&arena->lock); -} - -static void -arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty, bool cleaned) -{ - arena_chunk_t *chunk; - size_t size, run_ind, run_pages, flag_dirty; - - chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); - run_ind = (size_t)(((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE); - assert(run_ind >= map_bias); - assert(run_ind < chunk_npages); - if (arena_mapbits_large_get(chunk, run_ind) != 0) { - size = arena_mapbits_large_size_get(chunk, run_ind); - assert(size == PAGE || - arena_mapbits_large_size_get(chunk, - run_ind+(size>>LG_PAGE)-1) == 0); - } else { - size_t binind = arena_bin_index(arena, run->bin); - arena_bin_info_t *bin_info = &arena_bin_info[binind]; - size = bin_info->run_size; - } - run_pages = (size >> LG_PAGE); - if (config_stats) { - /* - * Update stats_cactive if nactive is crossing a chunk - * multiple. - */ - size_t cactive_diff = CHUNK_CEILING(arena->nactive << LG_PAGE) - - CHUNK_CEILING((arena->nactive - run_pages) << LG_PAGE); - if (cactive_diff != 0) - stats_cactive_sub(cactive_diff); - } - arena->nactive -= run_pages; - - /* - * The run is dirty if the caller claims to have dirtied it, as well as - * if it was already dirty before being allocated and the caller - * doesn't claim to have cleaned it. - */ - assert(arena_mapbits_dirty_get(chunk, run_ind) == - arena_mapbits_dirty_get(chunk, run_ind+run_pages-1)); - if (cleaned == false && arena_mapbits_dirty_get(chunk, run_ind) != 0) - dirty = true; - flag_dirty = dirty ? CHUNK_MAP_DIRTY : 0; - - /* Mark pages as unallocated in the chunk map. */ - if (dirty) { - arena_mapbits_unallocated_set(chunk, run_ind, size, - CHUNK_MAP_DIRTY); - arena_mapbits_unallocated_set(chunk, run_ind+run_pages-1, size, - CHUNK_MAP_DIRTY); - } else { - arena_mapbits_unallocated_set(chunk, run_ind, size, - arena_mapbits_unzeroed_get(chunk, run_ind)); - arena_mapbits_unallocated_set(chunk, run_ind+run_pages-1, size, - arena_mapbits_unzeroed_get(chunk, run_ind+run_pages-1)); - } - - /* Try to coalesce forward. */ - if (run_ind + run_pages < chunk_npages && - arena_mapbits_allocated_get(chunk, run_ind+run_pages) == 0 && - arena_mapbits_dirty_get(chunk, run_ind+run_pages) == flag_dirty) { - size_t nrun_size = arena_mapbits_unallocated_size_get(chunk, - run_ind+run_pages); - size_t nrun_pages = nrun_size >> LG_PAGE; - - /* - * Remove successor from runs_avail; the coalesced run is - * inserted later. - */ - assert(arena_mapbits_unallocated_size_get(chunk, - run_ind+run_pages+nrun_pages-1) == nrun_size); - assert(arena_mapbits_dirty_get(chunk, - run_ind+run_pages+nrun_pages-1) == flag_dirty); - arena_avail_remove(arena, chunk, run_ind+run_pages, nrun_pages, - false, true); - - size += nrun_size; - run_pages += nrun_pages; - - arena_mapbits_unallocated_size_set(chunk, run_ind, size); - arena_mapbits_unallocated_size_set(chunk, run_ind+run_pages-1, - size); - } - - /* Try to coalesce backward. */ - if (run_ind > map_bias && arena_mapbits_allocated_get(chunk, run_ind-1) - == 0 && arena_mapbits_dirty_get(chunk, run_ind-1) == flag_dirty) { - size_t prun_size = arena_mapbits_unallocated_size_get(chunk, - run_ind-1); - size_t prun_pages = prun_size >> LG_PAGE; - - run_ind -= prun_pages; - - /* - * Remove predecessor from runs_avail; the coalesced run is - * inserted later. - */ - assert(arena_mapbits_unallocated_size_get(chunk, run_ind) == - prun_size); - assert(arena_mapbits_dirty_get(chunk, run_ind) == flag_dirty); - arena_avail_remove(arena, chunk, run_ind, prun_pages, true, - false); - - size += prun_size; - run_pages += prun_pages; - - arena_mapbits_unallocated_size_set(chunk, run_ind, size); - arena_mapbits_unallocated_size_set(chunk, run_ind+run_pages-1, - size); - } - - /* Insert into runs_avail, now that coalescing is complete. */ - assert(arena_mapbits_unallocated_size_get(chunk, run_ind) == - arena_mapbits_unallocated_size_get(chunk, run_ind+run_pages-1)); - assert(arena_mapbits_dirty_get(chunk, run_ind) == - arena_mapbits_dirty_get(chunk, run_ind+run_pages-1)); - arena_avail_insert(arena, chunk, run_ind, run_pages, true, true); - - /* Deallocate chunk if it is now completely unused. */ - if (size == arena_maxclass) { - assert(run_ind == map_bias); - assert(run_pages == (arena_maxclass >> LG_PAGE)); - arena_chunk_dealloc(arena, chunk); - } - - /* - * It is okay to do dirty page processing here even if the chunk was - * deallocated above, since in that case it is the spare. Waiting - * until after possible chunk deallocation to do dirty processing - * allows for an old spare to be fully deallocated, thus decreasing the - * chances of spuriously crossing the dirty page purging threshold. - */ - if (dirty) - arena_maybe_purge(arena); -} - -static void -arena_run_trim_head(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run, - size_t oldsize, size_t newsize) -{ - size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE; - size_t head_npages = (oldsize - newsize) >> LG_PAGE; - size_t flag_dirty = arena_mapbits_dirty_get(chunk, pageind); - - assert(oldsize > newsize); - - /* - * Update the chunk map so that arena_run_dalloc() can treat the - * leading run as separately allocated. Set the last element of each - * run first, in case of single-page runs. - */ - assert(arena_mapbits_large_size_get(chunk, pageind) == oldsize); - arena_mapbits_large_set(chunk, pageind+head_npages-1, 0, flag_dirty); - arena_mapbits_large_set(chunk, pageind, oldsize-newsize, flag_dirty); - - if (config_debug) { - UNUSED size_t tail_npages = newsize >> LG_PAGE; - assert(arena_mapbits_large_size_get(chunk, - pageind+head_npages+tail_npages-1) == 0); - assert(arena_mapbits_dirty_get(chunk, - pageind+head_npages+tail_npages-1) == flag_dirty); - } - arena_mapbits_large_set(chunk, pageind+head_npages, newsize, - flag_dirty); - - arena_run_dalloc(arena, run, false, false); -} - -static void -arena_run_trim_tail(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run, - size_t oldsize, size_t newsize, bool dirty) -{ - size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE; - size_t head_npages = newsize >> LG_PAGE; - size_t flag_dirty = arena_mapbits_dirty_get(chunk, pageind); - - assert(oldsize > newsize); - - /* - * Update the chunk map so that arena_run_dalloc() can treat the - * trailing run as separately allocated. Set the last element of each - * run first, in case of single-page runs. - */ - assert(arena_mapbits_large_size_get(chunk, pageind) == oldsize); - arena_mapbits_large_set(chunk, pageind+head_npages-1, 0, flag_dirty); - arena_mapbits_large_set(chunk, pageind, newsize, flag_dirty); - - if (config_debug) { - UNUSED size_t tail_npages = (oldsize - newsize) >> LG_PAGE; - assert(arena_mapbits_large_size_get(chunk, - pageind+head_npages+tail_npages-1) == 0); - assert(arena_mapbits_dirty_get(chunk, - pageind+head_npages+tail_npages-1) == flag_dirty); - } - arena_mapbits_large_set(chunk, pageind+head_npages, oldsize-newsize, - flag_dirty); - - arena_run_dalloc(arena, (arena_run_t *)((uintptr_t)run + newsize), - dirty, false); -} - -static arena_run_t * -arena_bin_runs_first(arena_bin_t *bin) -{ - arena_chunk_map_t *mapelm = arena_run_tree_first(&bin->runs); - if (mapelm != NULL) { - arena_chunk_t *chunk; - size_t pageind; - arena_run_t *run; - - chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(mapelm); - pageind = ((((uintptr_t)mapelm - (uintptr_t)chunk->map) / - sizeof(arena_chunk_map_t))) + map_bias; - run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)((pageind - - arena_mapbits_small_runind_get(chunk, pageind)) << - LG_PAGE)); - return (run); - } - - return (NULL); -} - -static void -arena_bin_runs_insert(arena_bin_t *bin, arena_run_t *run) -{ - arena_chunk_t *chunk = CHUNK_ADDR2BASE(run); - size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE; - arena_chunk_map_t *mapelm = arena_mapp_get(chunk, pageind); - - assert(arena_run_tree_search(&bin->runs, mapelm) == NULL); - - arena_run_tree_insert(&bin->runs, mapelm); -} - -static void -arena_bin_runs_remove(arena_bin_t *bin, arena_run_t *run) -{ - arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); - size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE; - arena_chunk_map_t *mapelm = arena_mapp_get(chunk, pageind); - - assert(arena_run_tree_search(&bin->runs, mapelm) != NULL); - - arena_run_tree_remove(&bin->runs, mapelm); -} - -static arena_run_t * -arena_bin_nonfull_run_tryget(arena_bin_t *bin) -{ - arena_run_t *run = arena_bin_runs_first(bin); - if (run != NULL) { - arena_bin_runs_remove(bin, run); - if (config_stats) - bin->stats.reruns++; - } - return (run); -} - -static arena_run_t * -arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t *bin) -{ - arena_run_t *run; - size_t binind; - arena_bin_info_t *bin_info; - - /* Look for a usable run. */ - run = arena_bin_nonfull_run_tryget(bin); - if (run != NULL) - return (run); - /* No existing runs have any space available. */ - - binind = arena_bin_index(arena, bin); - bin_info = &arena_bin_info[binind]; - - /* Allocate a new run. */ - malloc_mutex_unlock(&bin->lock); - /******************************/ - malloc_mutex_lock(&arena->lock); - run = arena_run_alloc(arena, bin_info->run_size, false, binind, false); - if (run != NULL) { - bitmap_t *bitmap = (bitmap_t *)((uintptr_t)run + - (uintptr_t)bin_info->bitmap_offset); - - /* Initialize run internals. */ - run->bin = bin; - run->nextind = 0; - run->nfree = bin_info->nregs; - bitmap_init(bitmap, &bin_info->bitmap_info); - } - malloc_mutex_unlock(&arena->lock); - /********************************/ - malloc_mutex_lock(&bin->lock); - if (run != NULL) { - if (config_stats) { - bin->stats.nruns++; - bin->stats.curruns++; - } - return (run); - } - - /* - * arena_run_alloc() failed, but another thread may have made - * sufficient memory available while this one dropped bin->lock above, - * so search one more time. - */ - run = arena_bin_nonfull_run_tryget(bin); - if (run != NULL) - return (run); - - return (NULL); -} - -/* Re-fill bin->runcur, then call arena_run_reg_alloc(). */ -static void * -arena_bin_malloc_hard(arena_t *arena, arena_bin_t *bin) -{ - void *ret; - size_t binind; - arena_bin_info_t *bin_info; - arena_run_t *run; - - binind = arena_bin_index(arena, bin); - bin_info = &arena_bin_info[binind]; - bin->runcur = NULL; - run = arena_bin_nonfull_run_get(arena, bin); - if (bin->runcur != NULL && bin->runcur->nfree > 0) { - /* - * Another thread updated runcur while this one ran without the - * bin lock in arena_bin_nonfull_run_get(). - */ - assert(bin->runcur->nfree > 0); - ret = arena_run_reg_alloc(bin->runcur, bin_info); - if (run != NULL) { - arena_chunk_t *chunk; - - /* - * arena_run_alloc() may have allocated run, or it may - * have pulled run from the bin's run tree. Therefore - * it is unsafe to make any assumptions about how run - * has previously been used, and arena_bin_lower_run() - * must be called, as if a region were just deallocated - * from the run. - */ - chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); - if (run->nfree == bin_info->nregs) - arena_dalloc_bin_run(arena, chunk, run, bin); - else - arena_bin_lower_run(arena, chunk, run, bin); - } - return (ret); - } - - if (run == NULL) - return (NULL); - - bin->runcur = run; - - assert(bin->runcur->nfree > 0); - - return (arena_run_reg_alloc(bin->runcur, bin_info)); -} - -void -arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin, size_t binind, - uint64_t prof_accumbytes) -{ - unsigned i, nfill; - arena_bin_t *bin; - arena_run_t *run; - void *ptr; - - assert(tbin->ncached == 0); - - if (config_prof && arena_prof_accum(arena, prof_accumbytes)) - prof_idump(); - bin = &arena->bins[binind]; - malloc_mutex_lock(&bin->lock); - for (i = 0, nfill = (tcache_bin_info[binind].ncached_max >> - tbin->lg_fill_div); i < nfill; i++) { - if ((run = bin->runcur) != NULL && run->nfree > 0) - ptr = arena_run_reg_alloc(run, &arena_bin_info[binind]); - else - ptr = arena_bin_malloc_hard(arena, bin); - if (ptr == NULL) - break; - if (config_fill && opt_junk) { - arena_alloc_junk_small(ptr, &arena_bin_info[binind], - true); - } - /* Insert such that low regions get used first. */ - tbin->avail[nfill - 1 - i] = ptr; - } - if (config_stats) { - bin->stats.allocated += i * arena_bin_info[binind].reg_size; - bin->stats.nmalloc += i; - bin->stats.nrequests += tbin->tstats.nrequests; - bin->stats.nfills++; - tbin->tstats.nrequests = 0; - } - malloc_mutex_unlock(&bin->lock); - tbin->ncached = i; -} - -void -arena_alloc_junk_small(void *ptr, arena_bin_info_t *bin_info, bool zero) -{ - - if (zero) { - size_t redzone_size = bin_info->redzone_size; - memset((void *)((uintptr_t)ptr - redzone_size), 0xa5, - redzone_size); - memset((void *)((uintptr_t)ptr + bin_info->reg_size), 0xa5, - redzone_size); - } else { - memset((void *)((uintptr_t)ptr - bin_info->redzone_size), 0xa5, - bin_info->reg_interval); - } -} - -void -arena_dalloc_junk_small(void *ptr, arena_bin_info_t *bin_info) -{ - size_t size = bin_info->reg_size; - size_t redzone_size = bin_info->redzone_size; - size_t i; - bool error = false; - - for (i = 1; i <= redzone_size; i++) { - unsigned byte; - if ((byte = *(uint8_t *)((uintptr_t)ptr - i)) != 0xa5) { - error = true; - malloc_printf("<jemalloc>: Corrupt redzone " - "%zu byte%s before %p (size %zu), byte=%#x\n", i, - (i == 1) ? "" : "s", ptr, size, byte); - } - } - for (i = 0; i < redzone_size; i++) { - unsigned byte; - if ((byte = *(uint8_t *)((uintptr_t)ptr + size + i)) != 0xa5) { - error = true; - malloc_printf("<jemalloc>: Corrupt redzone " - "%zu byte%s after end of %p (size %zu), byte=%#x\n", - i, (i == 1) ? "" : "s", ptr, size, byte); - } - } - if (opt_abort && error) - abort(); - - memset((void *)((uintptr_t)ptr - redzone_size), 0x5a, - bin_info->reg_interval); -} - -void * -arena_malloc_small(arena_t *arena, size_t size, bool zero) -{ - void *ret; - arena_bin_t *bin; - arena_run_t *run; - size_t binind; - - binind = SMALL_SIZE2BIN(size); - assert(binind < NBINS); - bin = &arena->bins[binind]; - size = arena_bin_info[binind].reg_size; - - malloc_mutex_lock(&bin->lock); - if ((run = bin->runcur) != NULL && run->nfree > 0) - ret = arena_run_reg_alloc(run, &arena_bin_info[binind]); - else - ret = arena_bin_malloc_hard(arena, bin); - - if (ret == NULL) { - malloc_mutex_unlock(&bin->lock); - return (NULL); - } - - if (config_stats) { - bin->stats.allocated += size; - bin->stats.nmalloc++; - bin->stats.nrequests++; - } - malloc_mutex_unlock(&bin->lock); - if (config_prof && isthreaded == false && arena_prof_accum(arena, size)) - prof_idump(); - - if (zero == false) { - if (config_fill) { - if (opt_junk) { - arena_alloc_junk_small(ret, - &arena_bin_info[binind], false); - } else if (opt_zero) - memset(ret, 0, size); - } - VALGRIND_MAKE_MEM_UNDEFINED(ret, size); - } else { - if (config_fill && opt_junk) { - arena_alloc_junk_small(ret, &arena_bin_info[binind], - true); - } - VALGRIND_MAKE_MEM_UNDEFINED(ret, size); - memset(ret, 0, size); - } - - return (ret); -} - -void * -arena_malloc_large(arena_t *arena, size_t size, bool zero) -{ - void *ret; - UNUSED bool idump; - - /* Large allocation. */ - size = PAGE_CEILING(size); - malloc_mutex_lock(&arena->lock); - ret = (void *)arena_run_alloc(arena, size, true, BININD_INVALID, zero); - if (ret == NULL) { - malloc_mutex_unlock(&arena->lock); - return (NULL); - } - if (config_stats) { - arena->stats.nmalloc_large++; - arena->stats.nrequests_large++; - arena->stats.allocated_large += size; - arena->stats.lstats[(size >> LG_PAGE) - 1].nmalloc++; - arena->stats.lstats[(size >> LG_PAGE) - 1].nrequests++; - arena->stats.lstats[(size >> LG_PAGE) - 1].curruns++; - } - if (config_prof) - idump = arena_prof_accum_locked(arena, size); - malloc_mutex_unlock(&arena->lock); - if (config_prof && idump) - prof_idump(); - - if (zero == false) { - if (config_fill) { - if (opt_junk) - memset(ret, 0xa5, size); - else if (opt_zero) - memset(ret, 0, size); - } - } - - return (ret); -} - -/* Only handles large allocations that require more than page alignment. */ -void * -arena_palloc(arena_t *arena, size_t size, size_t alignment, bool zero) -{ - void *ret; - size_t alloc_size, leadsize, trailsize; - arena_run_t *run; - arena_chunk_t *chunk; - - assert((size & PAGE_MASK) == 0); - - alignment = PAGE_CEILING(alignment); - alloc_size = size + alignment - PAGE; - - malloc_mutex_lock(&arena->lock); - run = arena_run_alloc(arena, alloc_size, true, BININD_INVALID, zero); - if (run == NULL) { - malloc_mutex_unlock(&arena->lock); - return (NULL); - } - chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); - - leadsize = ALIGNMENT_CEILING((uintptr_t)run, alignment) - - (uintptr_t)run; - assert(alloc_size >= leadsize + size); - trailsize = alloc_size - leadsize - size; - ret = (void *)((uintptr_t)run + leadsize); - if (leadsize != 0) { - arena_run_trim_head(arena, chunk, run, alloc_size, alloc_size - - leadsize); - } - if (trailsize != 0) { - arena_run_trim_tail(arena, chunk, ret, size + trailsize, size, - false); - } - - if (config_stats) { - arena->stats.nmalloc_large++; - arena->stats.nrequests_large++; - arena->stats.allocated_large += size; - arena->stats.lstats[(size >> LG_PAGE) - 1].nmalloc++; - arena->stats.lstats[(size >> LG_PAGE) - 1].nrequests++; - arena->stats.lstats[(size >> LG_PAGE) - 1].curruns++; - } - malloc_mutex_unlock(&arena->lock); - - if (config_fill && zero == false) { - if (opt_junk) - memset(ret, 0xa5, size); - else if (opt_zero) - memset(ret, 0, size); - } - return (ret); -} - -void -arena_prof_promoted(const void *ptr, size_t size) -{ - arena_chunk_t *chunk; - size_t pageind, binind; - - cassert(config_prof); - assert(ptr != NULL); - assert(CHUNK_ADDR2BASE(ptr) != ptr); - assert(isalloc(ptr, false) == PAGE); - assert(isalloc(ptr, true) == PAGE); - assert(size <= SMALL_MAXCLASS); - - chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); - pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; - binind = SMALL_SIZE2BIN(size); - assert(binind < NBINS); - arena_mapbits_large_binind_set(chunk, pageind, binind); - - assert(isalloc(ptr, false) == PAGE); - assert(isalloc(ptr, true) == size); -} - -static void -arena_dissociate_bin_run(arena_chunk_t *chunk, arena_run_t *run, - arena_bin_t *bin) -{ - - /* Dissociate run from bin. */ - if (run == bin->runcur) - bin->runcur = NULL; - else { - size_t binind = arena_bin_index(chunk->arena, bin); - arena_bin_info_t *bin_info = &arena_bin_info[binind]; - - if (bin_info->nregs != 1) { - /* - * This block's conditional is necessary because if the - * run only contains one region, then it never gets - * inserted into the non-full runs tree. - */ - arena_bin_runs_remove(bin, run); - } - } -} - -static void -arena_dalloc_bin_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run, - arena_bin_t *bin) -{ - size_t binind; - arena_bin_info_t *bin_info; - size_t npages, run_ind, past; - - assert(run != bin->runcur); - assert(arena_run_tree_search(&bin->runs, - arena_mapp_get(chunk, ((uintptr_t)run-(uintptr_t)chunk)>>LG_PAGE)) - == NULL); - - binind = arena_bin_index(chunk->arena, run->bin); - bin_info = &arena_bin_info[binind]; - - malloc_mutex_unlock(&bin->lock); - /******************************/ - npages = bin_info->run_size >> LG_PAGE; - run_ind = (size_t)(((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE); - past = (size_t)(PAGE_CEILING((uintptr_t)run + - (uintptr_t)bin_info->reg0_offset + (uintptr_t)(run->nextind * - bin_info->reg_interval - bin_info->redzone_size) - - (uintptr_t)chunk) >> LG_PAGE); - malloc_mutex_lock(&arena->lock); - - /* - * If the run was originally clean, and some pages were never touched, - * trim the clean pages before deallocating the dirty portion of the - * run. - */ - assert(arena_mapbits_dirty_get(chunk, run_ind) == - arena_mapbits_dirty_get(chunk, run_ind+npages-1)); - if (arena_mapbits_dirty_get(chunk, run_ind) == 0 && past - run_ind < - npages) { - /* Trim clean pages. Convert to large run beforehand. */ - assert(npages > 0); - arena_mapbits_large_set(chunk, run_ind, bin_info->run_size, 0); - arena_mapbits_large_set(chunk, run_ind+npages-1, 0, 0); - arena_run_trim_tail(arena, chunk, run, (npages << LG_PAGE), - ((past - run_ind) << LG_PAGE), false); - /* npages = past - run_ind; */ - } - arena_run_dalloc(arena, run, true, false); - malloc_mutex_unlock(&arena->lock); - /****************************/ - malloc_mutex_lock(&bin->lock); - if (config_stats) - bin->stats.curruns--; -} - -static void -arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run, - arena_bin_t *bin) -{ - - /* - * Make sure that if bin->runcur is non-NULL, it refers to the lowest - * non-full run. It is okay to NULL runcur out rather than proactively - * keeping it pointing at the lowest non-full run. - */ - if ((uintptr_t)run < (uintptr_t)bin->runcur) { - /* Switch runcur. */ - if (bin->runcur->nfree > 0) - arena_bin_runs_insert(bin, bin->runcur); - bin->runcur = run; - if (config_stats) - bin->stats.reruns++; - } else - arena_bin_runs_insert(bin, run); -} - -void -arena_dalloc_bin_locked(arena_t *arena, arena_chunk_t *chunk, void *ptr, - arena_chunk_map_t *mapelm) -{ - size_t pageind; - arena_run_t *run; - arena_bin_t *bin; - arena_bin_info_t *bin_info; - size_t size, binind; - - pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; - run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)((pageind - - arena_mapbits_small_runind_get(chunk, pageind)) << LG_PAGE)); - bin = run->bin; - binind = arena_ptr_small_binind_get(ptr, mapelm->bits); - bin_info = &arena_bin_info[binind]; - if (config_fill || config_stats) - size = bin_info->reg_size; - - if (config_fill && opt_junk) - arena_dalloc_junk_small(ptr, bin_info); - - arena_run_reg_dalloc(run, ptr); - if (run->nfree == bin_info->nregs) { - arena_dissociate_bin_run(chunk, run, bin); - arena_dalloc_bin_run(arena, chunk, run, bin); - } else if (run->nfree == 1 && run != bin->runcur) - arena_bin_lower_run(arena, chunk, run, bin); - - if (config_stats) { - bin->stats.allocated -= size; - bin->stats.ndalloc++; - } -} - -void -arena_dalloc_bin(arena_t *arena, arena_chunk_t *chunk, void *ptr, - size_t pageind, arena_chunk_map_t *mapelm) -{ - arena_run_t *run; - arena_bin_t *bin; - - run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)((pageind - - arena_mapbits_small_runind_get(chunk, pageind)) << LG_PAGE)); - bin = run->bin; - malloc_mutex_lock(&bin->lock); - arena_dalloc_bin_locked(arena, chunk, ptr, mapelm); - malloc_mutex_unlock(&bin->lock); -} - -void -arena_dalloc_small(arena_t *arena, arena_chunk_t *chunk, void *ptr, - size_t pageind) -{ - arena_chunk_map_t *mapelm; - - if (config_debug) { - /* arena_ptr_small_binind_get() does extra sanity checking. */ - assert(arena_ptr_small_binind_get(ptr, arena_mapbits_get(chunk, - pageind)) != BININD_INVALID); - } - mapelm = arena_mapp_get(chunk, pageind); - arena_dalloc_bin(arena, chunk, ptr, pageind, mapelm); -} - -void -arena_dalloc_large_locked(arena_t *arena, arena_chunk_t *chunk, void *ptr) -{ - - if (config_fill || config_stats) { - size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; - size_t size = arena_mapbits_large_size_get(chunk, pageind); - - if (config_fill && config_stats && opt_junk) - memset(ptr, 0x5a, size); - if (config_stats) { - arena->stats.ndalloc_large++; - arena->stats.allocated_large -= size; - arena->stats.lstats[(size >> LG_PAGE) - 1].ndalloc++; - arena->stats.lstats[(size >> LG_PAGE) - 1].curruns--; - } - } - - arena_run_dalloc(arena, (arena_run_t *)ptr, true, false); -} - -void -arena_dalloc_large(arena_t *arena, arena_chunk_t *chunk, void *ptr) -{ - - malloc_mutex_lock(&arena->lock); - arena_dalloc_large_locked(arena, chunk, ptr); - malloc_mutex_unlock(&arena->lock); -} - -static void -arena_ralloc_large_shrink(arena_t *arena, arena_chunk_t *chunk, void *ptr, - size_t oldsize, size_t size) -{ - - assert(size < oldsize); - - /* - * Shrink the run, and make trailing pages available for other - * allocations. - */ - malloc_mutex_lock(&arena->lock); - arena_run_trim_tail(arena, chunk, (arena_run_t *)ptr, oldsize, size, - true); - if (config_stats) { - arena->stats.ndalloc_large++; - arena->stats.allocated_large -= oldsize; - arena->stats.lstats[(oldsize >> LG_PAGE) - 1].ndalloc++; - arena->stats.lstats[(oldsize >> LG_PAGE) - 1].curruns--; - - arena->stats.nmalloc_large++; - arena->stats.nrequests_large++; - arena->stats.allocated_large += size; - arena->stats.lstats[(size >> LG_PAGE) - 1].nmalloc++; - arena->stats.lstats[(size >> LG_PAGE) - 1].nrequests++; - arena->stats.lstats[(size >> LG_PAGE) - 1].curruns++; - } - malloc_mutex_unlock(&arena->lock); -} - -static bool -arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk, void *ptr, - size_t oldsize, size_t size, size_t extra, bool zero) -{ - size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; - size_t npages = oldsize >> LG_PAGE; - size_t followsize; - - assert(oldsize == arena_mapbits_large_size_get(chunk, pageind)); - - /* Try to extend the run. */ - assert(size + extra > oldsize); - malloc_mutex_lock(&arena->lock); - if (pageind + npages < chunk_npages && - arena_mapbits_allocated_get(chunk, pageind+npages) == 0 && - (followsize = arena_mapbits_unallocated_size_get(chunk, - pageind+npages)) >= size - oldsize) { - /* - * The next run is available and sufficiently large. Split the - * following run, then merge the first part with the existing - * allocation. - */ - size_t flag_dirty; - size_t splitsize = (oldsize + followsize <= size + extra) - ? followsize : size + extra - oldsize; - arena_run_split(arena, (arena_run_t *)((uintptr_t)chunk + - ((pageind+npages) << LG_PAGE)), splitsize, true, - BININD_INVALID, zero); - - size = oldsize + splitsize; - npages = size >> LG_PAGE; - - /* - * Mark the extended run as dirty if either portion of the run - * was dirty before allocation. This is rather pedantic, - * because there's not actually any sequence of events that - * could cause the resulting run to be passed to - * arena_run_dalloc() with the dirty argument set to false - * (which is when dirty flag consistency would really matter). - */ - flag_dirty = arena_mapbits_dirty_get(chunk, pageind) | - arena_mapbits_dirty_get(chunk, pageind+npages-1); - arena_mapbits_large_set(chunk, pageind, size, flag_dirty); - arena_mapbits_large_set(chunk, pageind+npages-1, 0, flag_dirty); - - if (config_stats) { - arena->stats.ndalloc_large++; - arena->stats.allocated_large -= oldsize; - arena->stats.lstats[(oldsize >> LG_PAGE) - 1].ndalloc++; - arena->stats.lstats[(oldsize >> LG_PAGE) - 1].curruns--; - - arena->stats.nmalloc_large++; - arena->stats.nrequests_large++; - arena->stats.allocated_large += size; - arena->stats.lstats[(size >> LG_PAGE) - 1].nmalloc++; - arena->stats.lstats[(size >> LG_PAGE) - 1].nrequests++; - arena->stats.lstats[(size >> LG_PAGE) - 1].curruns++; - } - malloc_mutex_unlock(&arena->lock); - return (false); - } - malloc_mutex_unlock(&arena->lock); - - return (true); -} - -/* - * Try to resize a large allocation, in order to avoid copying. This will - * always fail if growing an object, and the following run is already in use. - */ -static bool -arena_ralloc_large(void *ptr, size_t oldsize, size_t size, size_t extra, - bool zero) -{ - size_t psize; - - psize = PAGE_CEILING(size + extra); - if (psize == oldsize) { - /* Same size class. */ - if (config_fill && opt_junk && size < oldsize) { - memset((void *)((uintptr_t)ptr + size), 0x5a, oldsize - - size); - } - return (false); - } else { - arena_chunk_t *chunk; - arena_t *arena; - - chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); - arena = chunk->arena; - - if (psize < oldsize) { - /* Fill before shrinking in order avoid a race. */ - if (config_fill && opt_junk) { - memset((void *)((uintptr_t)ptr + size), 0x5a, - oldsize - size); - } - arena_ralloc_large_shrink(arena, chunk, ptr, oldsize, - psize); - return (false); - } else { - bool ret = arena_ralloc_large_grow(arena, chunk, ptr, - oldsize, PAGE_CEILING(size), - psize - PAGE_CEILING(size), zero); - if (config_fill && ret == false && zero == false && - opt_zero) { - memset((void *)((uintptr_t)ptr + oldsize), 0, - size - oldsize); - } - return (ret); - } - } -} - -void * -arena_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra, - bool zero) -{ - - /* - * Avoid moving the allocation if the size class can be left the same. - */ - if (oldsize <= arena_maxclass) { - if (oldsize <= SMALL_MAXCLASS) { - assert(arena_bin_info[SMALL_SIZE2BIN(oldsize)].reg_size - == oldsize); - if ((size + extra <= SMALL_MAXCLASS && - SMALL_SIZE2BIN(size + extra) == - SMALL_SIZE2BIN(oldsize)) || (size <= oldsize && - size + extra >= oldsize)) { - if (config_fill && opt_junk && size < oldsize) { - memset((void *)((uintptr_t)ptr + size), - 0x5a, oldsize - size); - } - return (ptr); - } - } else { - assert(size <= arena_maxclass); - if (size + extra > SMALL_MAXCLASS) { - if (arena_ralloc_large(ptr, oldsize, size, - extra, zero) == false) - return (ptr); - } - } - } - - /* Reallocation would require a move. */ - return (NULL); -} - -void * -arena_ralloc(arena_t *arena, void *ptr, size_t oldsize, size_t size, - size_t extra, size_t alignment, bool zero, bool try_tcache_alloc, - bool try_tcache_dalloc) -{ - void *ret; - size_t copysize; - - /* Try to avoid moving the allocation. */ - ret = arena_ralloc_no_move(ptr, oldsize, size, extra, zero); - if (ret != NULL) - return (ret); - - /* - * size and oldsize are different enough that we need to move the - * object. In that case, fall back to allocating new space and - * copying. - */ - if (alignment != 0) { - size_t usize = sa2u(size + extra, alignment); - if (usize == 0) - return (NULL); - ret = ipallocx(usize, alignment, zero, try_tcache_alloc, arena); - } else - ret = arena_malloc(arena, size + extra, zero, try_tcache_alloc); - - if (ret == NULL) { - if (extra == 0) - return (NULL); - /* Try again, this time without extra. */ - if (alignment != 0) { - size_t usize = sa2u(size, alignment); - if (usize == 0) - return (NULL); - ret = ipallocx(usize, alignment, zero, try_tcache_alloc, - arena); - } else - ret = arena_malloc(arena, size, zero, try_tcache_alloc); - - if (ret == NULL) - return (NULL); - } - - /* Junk/zero-filling were already done by ipalloc()/arena_malloc(). */ - - /* - * Copy at most size bytes (not size+extra), since the caller has no - * expectation that the extra bytes will be reliably preserved. - */ - copysize = (size < oldsize) ? size : oldsize; - VALGRIND_MAKE_MEM_UNDEFINED(ret, copysize); - memcpy(ret, ptr, copysize); - iqallocx(ptr, try_tcache_dalloc); - return (ret); -} - -dss_prec_t -arena_dss_prec_get(arena_t *arena) -{ - dss_prec_t ret; - - malloc_mutex_lock(&arena->lock); - ret = arena->dss_prec; - malloc_mutex_unlock(&arena->lock); - return (ret); -} - -void -arena_dss_prec_set(arena_t *arena, dss_prec_t dss_prec) -{ - - malloc_mutex_lock(&arena->lock); - arena->dss_prec = dss_prec; - malloc_mutex_unlock(&arena->lock); -} - -void -arena_stats_merge(arena_t *arena, const char **dss, size_t *nactive, - size_t *ndirty, arena_stats_t *astats, malloc_bin_stats_t *bstats, - malloc_large_stats_t *lstats) -{ - unsigned i; - - malloc_mutex_lock(&arena->lock); - *dss = dss_prec_names[arena->dss_prec]; - *nactive += arena->nactive; - *ndirty += arena->ndirty; - - astats->mapped += arena->stats.mapped; - astats->npurge += arena->stats.npurge; - astats->nmadvise += arena->stats.nmadvise; - astats->purged += arena->stats.purged; - astats->allocated_large += arena->stats.allocated_large; - astats->nmalloc_large += arena->stats.nmalloc_large; - astats->ndalloc_large += arena->stats.ndalloc_large; - astats->nrequests_large += arena->stats.nrequests_large; - - for (i = 0; i < nlclasses; i++) { - lstats[i].nmalloc += arena->stats.lstats[i].nmalloc; - lstats[i].ndalloc += arena->stats.lstats[i].ndalloc; - lstats[i].nrequests += arena->stats.lstats[i].nrequests; - lstats[i].curruns += arena->stats.lstats[i].curruns; - } - malloc_mutex_unlock(&arena->lock); - - for (i = 0; i < NBINS; i++) { - arena_bin_t *bin = &arena->bins[i]; - - malloc_mutex_lock(&bin->lock); - bstats[i].allocated += bin->stats.allocated; - bstats[i].nmalloc += bin->stats.nmalloc; - bstats[i].ndalloc += bin->stats.ndalloc; - bstats[i].nrequests += bin->stats.nrequests; - if (config_tcache) { - bstats[i].nfills += bin->stats.nfills; - bstats[i].nflushes += bin->stats.nflushes; - } - bstats[i].nruns += bin->stats.nruns; - bstats[i].reruns += bin->stats.reruns; - bstats[i].curruns += bin->stats.curruns; - malloc_mutex_unlock(&bin->lock); - } -} - -bool -arena_new(arena_t *arena, unsigned ind) -{ - unsigned i; - arena_bin_t *bin; - - arena->ind = ind; - arena->nthreads = 0; - - if (malloc_mutex_init(&arena->lock)) - return (true); - - if (config_stats) { - memset(&arena->stats, 0, sizeof(arena_stats_t)); - arena->stats.lstats = - (malloc_large_stats_t *)base_alloc(nlclasses * - sizeof(malloc_large_stats_t)); - if (arena->stats.lstats == NULL) - return (true); - memset(arena->stats.lstats, 0, nlclasses * - sizeof(malloc_large_stats_t)); - if (config_tcache) - ql_new(&arena->tcache_ql); - } - - if (config_prof) - arena->prof_accumbytes = 0; - - arena->dss_prec = chunk_dss_prec_get(); - - /* Initialize chunks. */ - arena_chunk_dirty_new(&arena->chunks_dirty); - arena->spare = NULL; - - arena->nactive = 0; - arena->ndirty = 0; - arena->npurgatory = 0; - - arena_avail_tree_new(&arena->runs_avail); - - /* Initialize bins. */ - for (i = 0; i < NBINS; i++) { - bin = &arena->bins[i]; - if (malloc_mutex_init(&bin->lock)) - return (true); - bin->runcur = NULL; - arena_run_tree_new(&bin->runs); - if (config_stats) - memset(&bin->stats, 0, sizeof(malloc_bin_stats_t)); - } - - return (false); -} - -/* - * Calculate bin_info->run_size such that it meets the following constraints: - * - * *) bin_info->run_size >= min_run_size - * *) bin_info->run_size <= arena_maxclass - * *) run header overhead <= RUN_MAX_OVRHD (or header overhead relaxed). - * *) bin_info->nregs <= RUN_MAXREGS - * - * bin_info->nregs, bin_info->bitmap_offset, and bin_info->reg0_offset are also - * calculated here, since these settings are all interdependent. - */ -static size_t -bin_info_run_size_calc(arena_bin_info_t *bin_info, size_t min_run_size) -{ - size_t pad_size; - size_t try_run_size, good_run_size; - uint32_t try_nregs, good_nregs; - uint32_t try_hdr_size, good_hdr_size; - uint32_t try_bitmap_offset, good_bitmap_offset; - uint32_t try_ctx0_offset, good_ctx0_offset; - uint32_t try_redzone0_offset, good_redzone0_offset; - - assert(min_run_size >= PAGE); - assert(min_run_size <= arena_maxclass); - - /* - * Determine redzone size based on minimum alignment and minimum - * redzone size. Add padding to the end of the run if it is needed to - * align the regions. The padding allows each redzone to be half the - * minimum alignment; without the padding, each redzone would have to - * be twice as large in order to maintain alignment. - */ - if (config_fill && opt_redzone) { - size_t align_min = ZU(1) << (ffs(bin_info->reg_size) - 1); - if (align_min <= REDZONE_MINSIZE) { - bin_info->redzone_size = REDZONE_MINSIZE; - pad_size = 0; - } else { - bin_info->redzone_size = align_min >> 1; - pad_size = bin_info->redzone_size; - } - } else { - bin_info->redzone_size = 0; - pad_size = 0; - } - bin_info->reg_interval = bin_info->reg_size + - (bin_info->redzone_size << 1); - - /* - * Calculate known-valid settings before entering the run_size - * expansion loop, so that the first part of the loop always copies - * valid settings. - * - * The do..while loop iteratively reduces the number of regions until - * the run header and the regions no longer overlap. A closed formula - * would be quite messy, since there is an interdependency between the - * header's mask length and the number of regions. - */ - try_run_size = min_run_size; - try_nregs = ((try_run_size - sizeof(arena_run_t)) / - bin_info->reg_interval) - + 1; /* Counter-act try_nregs-- in loop. */ - if (try_nregs > RUN_MAXREGS) { - try_nregs = RUN_MAXREGS - + 1; /* Counter-act try_nregs-- in loop. */ - } - do { - try_nregs--; - try_hdr_size = sizeof(arena_run_t); - /* Pad to a long boundary. */ - try_hdr_size = LONG_CEILING(try_hdr_size); - try_bitmap_offset = try_hdr_size; - /* Add space for bitmap. */ - try_hdr_size += bitmap_size(try_nregs); - if (config_prof && opt_prof && prof_promote == false) { - /* Pad to a quantum boundary. */ - try_hdr_size = QUANTUM_CEILING(try_hdr_size); - try_ctx0_offset = try_hdr_size; - /* Add space for one (prof_ctx_t *) per region. */ - try_hdr_size += try_nregs * sizeof(prof_ctx_t *); - } else - try_ctx0_offset = 0; - try_redzone0_offset = try_run_size - (try_nregs * - bin_info->reg_interval) - pad_size; - } while (try_hdr_size > try_redzone0_offset); - - /* run_size expansion loop. */ - do { - /* - * Copy valid settings before trying more aggressive settings. - */ - good_run_size = try_run_size; - good_nregs = try_nregs; - good_hdr_size = try_hdr_size; - good_bitmap_offset = try_bitmap_offset; - good_ctx0_offset = try_ctx0_offset; - good_redzone0_offset = try_redzone0_offset; - - /* Try more aggressive settings. */ - try_run_size += PAGE; - try_nregs = ((try_run_size - sizeof(arena_run_t) - pad_size) / - bin_info->reg_interval) - + 1; /* Counter-act try_nregs-- in loop. */ - if (try_nregs > RUN_MAXREGS) { - try_nregs = RUN_MAXREGS - + 1; /* Counter-act try_nregs-- in loop. */ - } - do { - try_nregs--; - try_hdr_size = sizeof(arena_run_t); - /* Pad to a long boundary. */ - try_hdr_size = LONG_CEILING(try_hdr_size); - try_bitmap_offset = try_hdr_size; - /* Add space for bitmap. */ - try_hdr_size += bitmap_size(try_nregs); - if (config_prof && opt_prof && prof_promote == false) { - /* Pad to a quantum boundary. */ - try_hdr_size = QUANTUM_CEILING(try_hdr_size); - try_ctx0_offset = try_hdr_size; - /* - * Add space for one (prof_ctx_t *) per region. - */ - try_hdr_size += try_nregs * - sizeof(prof_ctx_t *); - } - try_redzone0_offset = try_run_size - (try_nregs * - bin_info->reg_interval) - pad_size; - } while (try_hdr_size > try_redzone0_offset); - } while (try_run_size <= arena_maxclass - && try_run_size <= arena_maxclass - && RUN_MAX_OVRHD * (bin_info->reg_interval << 3) > - RUN_MAX_OVRHD_RELAX - && (try_redzone0_offset << RUN_BFP) > RUN_MAX_OVRHD * try_run_size - && try_nregs < RUN_MAXREGS); - - assert(good_hdr_size <= good_redzone0_offset); - - /* Copy final settings. */ - bin_info->run_size = good_run_size; - bin_info->nregs = good_nregs; - bin_info->bitmap_offset = good_bitmap_offset; - bin_info->ctx0_offset = good_ctx0_offset; - bin_info->reg0_offset = good_redzone0_offset + bin_info->redzone_size; - - assert(bin_info->reg0_offset - bin_info->redzone_size + (bin_info->nregs - * bin_info->reg_interval) + pad_size == bin_info->run_size); - - return (good_run_size); -} - -static void -bin_info_init(void) -{ - arena_bin_info_t *bin_info; - size_t prev_run_size = PAGE; - -#define SIZE_CLASS(bin, delta, size) \ - bin_info = &arena_bin_info[bin]; \ - bin_info->reg_size = size; \ - prev_run_size = bin_info_run_size_calc(bin_info, prev_run_size);\ - bitmap_info_init(&bin_info->bitmap_info, bin_info->nregs); - SIZE_CLASSES -#undef SIZE_CLASS -} - -void -arena_boot(void) -{ - size_t header_size; - unsigned i; - - /* - * Compute the header size such that it is large enough to contain the - * page map. The page map is biased to omit entries for the header - * itself, so some iteration is necessary to compute the map bias. - * - * 1) Compute safe header_size and map_bias values that include enough - * space for an unbiased page map. - * 2) Refine map_bias based on (1) to omit the header pages in the page - * map. The resulting map_bias may be one too small. - * 3) Refine map_bias based on (2). The result will be >= the result - * from (2), and will always be correct. - */ - map_bias = 0; - for (i = 0; i < 3; i++) { - header_size = offsetof(arena_chunk_t, map) + - (sizeof(arena_chunk_map_t) * (chunk_npages-map_bias)); - map_bias = (header_size >> LG_PAGE) + ((header_size & PAGE_MASK) - != 0); - } - assert(map_bias > 0); - - arena_maxclass = chunksize - (map_bias << LG_PAGE); - - bin_info_init(); -} - -void -arena_prefork(arena_t *arena) -{ - unsigned i; - - malloc_mutex_prefork(&arena->lock); - for (i = 0; i < NBINS; i++) - malloc_mutex_prefork(&arena->bins[i].lock); -} - -void -arena_postfork_parent(arena_t *arena) -{ - unsigned i; - - for (i = 0; i < NBINS; i++) - malloc_mutex_postfork_parent(&arena->bins[i].lock); - malloc_mutex_postfork_parent(&arena->lock); -} - -void -arena_postfork_child(arena_t *arena) -{ - unsigned i; - - for (i = 0; i < NBINS; i++) - malloc_mutex_postfork_child(&arena->bins[i].lock); - malloc_mutex_postfork_child(&arena->lock); -} diff --git a/extra/jemalloc/src/atomic.c b/extra/jemalloc/src/atomic.c deleted file mode 100644 index 77ee313113b..00000000000 --- a/extra/jemalloc/src/atomic.c +++ /dev/null @@ -1,2 +0,0 @@ -#define JEMALLOC_ATOMIC_C_ -#include "jemalloc/internal/jemalloc_internal.h" diff --git a/extra/jemalloc/src/base.c b/extra/jemalloc/src/base.c deleted file mode 100644 index 4e62e8fa918..00000000000 --- a/extra/jemalloc/src/base.c +++ /dev/null @@ -1,142 +0,0 @@ -#define JEMALLOC_BASE_C_ -#include "jemalloc/internal/jemalloc_internal.h" - -/******************************************************************************/ -/* Data. */ - -static malloc_mutex_t base_mtx; - -/* - * Current pages that are being used for internal memory allocations. These - * pages are carved up in cacheline-size quanta, so that there is no chance of - * false cache line sharing. - */ -static void *base_pages; -static void *base_next_addr; -static void *base_past_addr; /* Addr immediately past base_pages. */ -static extent_node_t *base_nodes; - -/******************************************************************************/ -/* Function prototypes for non-inline static functions. */ - -static bool base_pages_alloc(size_t minsize); - -/******************************************************************************/ - -static bool -base_pages_alloc(size_t minsize) -{ - size_t csize; - bool zero; - - assert(minsize != 0); - csize = CHUNK_CEILING(minsize); - zero = false; - base_pages = chunk_alloc(csize, chunksize, true, &zero, - chunk_dss_prec_get()); - if (base_pages == NULL) - return (true); - base_next_addr = base_pages; - base_past_addr = (void *)((uintptr_t)base_pages + csize); - - return (false); -} - -void * -base_alloc(size_t size) -{ - void *ret; - size_t csize; - - /* Round size up to nearest multiple of the cacheline size. */ - csize = CACHELINE_CEILING(size); - - malloc_mutex_lock(&base_mtx); - /* Make sure there's enough space for the allocation. */ - if ((uintptr_t)base_next_addr + csize > (uintptr_t)base_past_addr) { - if (base_pages_alloc(csize)) { - malloc_mutex_unlock(&base_mtx); - return (NULL); - } - } - /* Allocate. */ - ret = base_next_addr; - base_next_addr = (void *)((uintptr_t)base_next_addr + csize); - malloc_mutex_unlock(&base_mtx); - VALGRIND_MAKE_MEM_UNDEFINED(ret, csize); - - return (ret); -} - -void * -base_calloc(size_t number, size_t size) -{ - void *ret = base_alloc(number * size); - - if (ret != NULL) - memset(ret, 0, number * size); - - return (ret); -} - -extent_node_t * -base_node_alloc(void) -{ - extent_node_t *ret; - - malloc_mutex_lock(&base_mtx); - if (base_nodes != NULL) { - ret = base_nodes; - base_nodes = *(extent_node_t **)ret; - malloc_mutex_unlock(&base_mtx); - VALGRIND_MAKE_MEM_UNDEFINED(ret, sizeof(extent_node_t)); - } else { - malloc_mutex_unlock(&base_mtx); - ret = (extent_node_t *)base_alloc(sizeof(extent_node_t)); - } - - return (ret); -} - -void -base_node_dealloc(extent_node_t *node) -{ - - VALGRIND_MAKE_MEM_UNDEFINED(node, sizeof(extent_node_t)); - malloc_mutex_lock(&base_mtx); - *(extent_node_t **)node = base_nodes; - base_nodes = node; - malloc_mutex_unlock(&base_mtx); -} - -bool -base_boot(void) -{ - - base_nodes = NULL; - if (malloc_mutex_init(&base_mtx)) - return (true); - - return (false); -} - -void -base_prefork(void) -{ - - malloc_mutex_prefork(&base_mtx); -} - -void -base_postfork_parent(void) -{ - - malloc_mutex_postfork_parent(&base_mtx); -} - -void -base_postfork_child(void) -{ - - malloc_mutex_postfork_child(&base_mtx); -} diff --git a/extra/jemalloc/src/bitmap.c b/extra/jemalloc/src/bitmap.c deleted file mode 100644 index b47e2629093..00000000000 --- a/extra/jemalloc/src/bitmap.c +++ /dev/null @@ -1,90 +0,0 @@ -#define JEMALLOC_BITMAP_C_ -#include "jemalloc/internal/jemalloc_internal.h" - -/******************************************************************************/ -/* Function prototypes for non-inline static functions. */ - -static size_t bits2groups(size_t nbits); - -/******************************************************************************/ - -static size_t -bits2groups(size_t nbits) -{ - - return ((nbits >> LG_BITMAP_GROUP_NBITS) + - !!(nbits & BITMAP_GROUP_NBITS_MASK)); -} - -void -bitmap_info_init(bitmap_info_t *binfo, size_t nbits) -{ - unsigned i; - size_t group_count; - - assert(nbits > 0); - assert(nbits <= (ZU(1) << LG_BITMAP_MAXBITS)); - - /* - * Compute the number of groups necessary to store nbits bits, and - * progressively work upward through the levels until reaching a level - * that requires only one group. - */ - binfo->levels[0].group_offset = 0; - group_count = bits2groups(nbits); - for (i = 1; group_count > 1; i++) { - assert(i < BITMAP_MAX_LEVELS); - binfo->levels[i].group_offset = binfo->levels[i-1].group_offset - + group_count; - group_count = bits2groups(group_count); - } - binfo->levels[i].group_offset = binfo->levels[i-1].group_offset - + group_count; - binfo->nlevels = i; - binfo->nbits = nbits; -} - -size_t -bitmap_info_ngroups(const bitmap_info_t *binfo) -{ - - return (binfo->levels[binfo->nlevels].group_offset << LG_SIZEOF_BITMAP); -} - -size_t -bitmap_size(size_t nbits) -{ - bitmap_info_t binfo; - - bitmap_info_init(&binfo, nbits); - return (bitmap_info_ngroups(&binfo)); -} - -void -bitmap_init(bitmap_t *bitmap, const bitmap_info_t *binfo) -{ - size_t extra; - unsigned i; - - /* - * Bits are actually inverted with regard to the external bitmap - * interface, so the bitmap starts out with all 1 bits, except for - * trailing unused bits (if any). Note that each group uses bit 0 to - * correspond to the first logical bit in the group, so extra bits - * are the most significant bits of the last group. - */ - memset(bitmap, 0xffU, binfo->levels[binfo->nlevels].group_offset << - LG_SIZEOF_BITMAP); - extra = (BITMAP_GROUP_NBITS - (binfo->nbits & BITMAP_GROUP_NBITS_MASK)) - & BITMAP_GROUP_NBITS_MASK; - if (extra != 0) - bitmap[binfo->levels[1].group_offset - 1] >>= extra; - for (i = 1; i < binfo->nlevels; i++) { - size_t group_count = binfo->levels[i].group_offset - - binfo->levels[i-1].group_offset; - extra = (BITMAP_GROUP_NBITS - (group_count & - BITMAP_GROUP_NBITS_MASK)) & BITMAP_GROUP_NBITS_MASK; - if (extra != 0) - bitmap[binfo->levels[i+1].group_offset - 1] >>= extra; - } -} diff --git a/extra/jemalloc/src/chunk.c b/extra/jemalloc/src/chunk.c deleted file mode 100644 index 044f76be96c..00000000000 --- a/extra/jemalloc/src/chunk.c +++ /dev/null @@ -1,385 +0,0 @@ -#define JEMALLOC_CHUNK_C_ -#include "jemalloc/internal/jemalloc_internal.h" - -/******************************************************************************/ -/* Data. */ - -const char *opt_dss = DSS_DEFAULT; -size_t opt_lg_chunk = LG_CHUNK_DEFAULT; - -malloc_mutex_t chunks_mtx; -chunk_stats_t stats_chunks; - -/* - * Trees of chunks that were previously allocated (trees differ only in node - * ordering). These are used when allocating chunks, in an attempt to re-use - * address space. Depending on function, different tree orderings are needed, - * which is why there are two trees with the same contents. - */ -static extent_tree_t chunks_szad_mmap; -static extent_tree_t chunks_ad_mmap; -static extent_tree_t chunks_szad_dss; -static extent_tree_t chunks_ad_dss; - -rtree_t *chunks_rtree; - -/* Various chunk-related settings. */ -size_t chunksize; -size_t chunksize_mask; /* (chunksize - 1). */ -size_t chunk_npages; -size_t map_bias; -size_t arena_maxclass; /* Max size class for arenas. */ - -/******************************************************************************/ -/* Function prototypes for non-inline static functions. */ - -static void *chunk_recycle(extent_tree_t *chunks_szad, - extent_tree_t *chunks_ad, size_t size, size_t alignment, bool base, - bool *zero); -static void chunk_record(extent_tree_t *chunks_szad, - extent_tree_t *chunks_ad, void *chunk, size_t size); - -/******************************************************************************/ - -static void * -chunk_recycle(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, size_t size, - size_t alignment, bool base, bool *zero) -{ - void *ret; - extent_node_t *node; - extent_node_t key; - size_t alloc_size, leadsize, trailsize; - bool zeroed; - - if (base) { - /* - * This function may need to call base_node_{,de}alloc(), but - * the current chunk allocation request is on behalf of the - * base allocator. Avoid deadlock (and if that weren't an - * issue, potential for infinite recursion) by returning NULL. - */ - return (NULL); - } - - alloc_size = size + alignment - chunksize; - /* Beware size_t wrap-around. */ - if (alloc_size < size) - return (NULL); - key.addr = NULL; - key.size = alloc_size; - malloc_mutex_lock(&chunks_mtx); - node = extent_tree_szad_nsearch(chunks_szad, &key); - if (node == NULL) { - malloc_mutex_unlock(&chunks_mtx); - return (NULL); - } - leadsize = ALIGNMENT_CEILING((uintptr_t)node->addr, alignment) - - (uintptr_t)node->addr; - assert(node->size >= leadsize + size); - trailsize = node->size - leadsize - size; - ret = (void *)((uintptr_t)node->addr + leadsize); - zeroed = node->zeroed; - if (zeroed) - *zero = true; - /* Remove node from the tree. */ - extent_tree_szad_remove(chunks_szad, node); - extent_tree_ad_remove(chunks_ad, node); - if (leadsize != 0) { - /* Insert the leading space as a smaller chunk. */ - node->size = leadsize; - extent_tree_szad_insert(chunks_szad, node); - extent_tree_ad_insert(chunks_ad, node); - node = NULL; - } - if (trailsize != 0) { - /* Insert the trailing space as a smaller chunk. */ - if (node == NULL) { - /* - * An additional node is required, but - * base_node_alloc() can cause a new base chunk to be - * allocated. Drop chunks_mtx in order to avoid - * deadlock, and if node allocation fails, deallocate - * the result before returning an error. - */ - malloc_mutex_unlock(&chunks_mtx); - node = base_node_alloc(); - if (node == NULL) { - chunk_dealloc(ret, size, true); - return (NULL); - } - malloc_mutex_lock(&chunks_mtx); - } - node->addr = (void *)((uintptr_t)(ret) + size); - node->size = trailsize; - node->zeroed = zeroed; - extent_tree_szad_insert(chunks_szad, node); - extent_tree_ad_insert(chunks_ad, node); - node = NULL; - } - malloc_mutex_unlock(&chunks_mtx); - - if (node != NULL) - base_node_dealloc(node); - if (*zero) { - if (zeroed == false) - memset(ret, 0, size); - else if (config_debug) { - size_t i; - size_t *p = (size_t *)(uintptr_t)ret; - - VALGRIND_MAKE_MEM_DEFINED(ret, size); - for (i = 0; i < size / sizeof(size_t); i++) - assert(p[i] == 0); - } - } - return (ret); -} - -/* - * If the caller specifies (*zero == false), it is still possible to receive - * zeroed memory, in which case *zero is toggled to true. arena_chunk_alloc() - * takes advantage of this to avoid demanding zeroed chunks, but taking - * advantage of them if they are returned. - */ -void * -chunk_alloc(size_t size, size_t alignment, bool base, bool *zero, - dss_prec_t dss_prec) -{ - void *ret; - - assert(size != 0); - assert((size & chunksize_mask) == 0); - assert(alignment != 0); - assert((alignment & chunksize_mask) == 0); - - /* "primary" dss. */ - if (config_dss && dss_prec == dss_prec_primary) { - if ((ret = chunk_recycle(&chunks_szad_dss, &chunks_ad_dss, size, - alignment, base, zero)) != NULL) - goto label_return; - if ((ret = chunk_alloc_dss(size, alignment, zero)) != NULL) - goto label_return; - } - /* mmap. */ - if ((ret = chunk_recycle(&chunks_szad_mmap, &chunks_ad_mmap, size, - alignment, base, zero)) != NULL) - goto label_return; - if ((ret = chunk_alloc_mmap(size, alignment, zero)) != NULL) - goto label_return; - /* "secondary" dss. */ - if (config_dss && dss_prec == dss_prec_secondary) { - if ((ret = chunk_recycle(&chunks_szad_dss, &chunks_ad_dss, size, - alignment, base, zero)) != NULL) - goto label_return; - if ((ret = chunk_alloc_dss(size, alignment, zero)) != NULL) - goto label_return; - } - - /* All strategies for allocation failed. */ - ret = NULL; -label_return: - if (ret != NULL) { - if (config_ivsalloc && base == false) { - if (rtree_set(chunks_rtree, (uintptr_t)ret, ret)) { - chunk_dealloc(ret, size, true); - return (NULL); - } - } - if (config_stats || config_prof) { - bool gdump; - malloc_mutex_lock(&chunks_mtx); - if (config_stats) - stats_chunks.nchunks += (size / chunksize); - stats_chunks.curchunks += (size / chunksize); - if (stats_chunks.curchunks > stats_chunks.highchunks) { - stats_chunks.highchunks = - stats_chunks.curchunks; - if (config_prof) - gdump = true; - } else if (config_prof) - gdump = false; - malloc_mutex_unlock(&chunks_mtx); - if (config_prof && opt_prof && opt_prof_gdump && gdump) - prof_gdump(); - } - if (config_valgrind) - VALGRIND_MAKE_MEM_UNDEFINED(ret, size); - } - assert(CHUNK_ADDR2BASE(ret) == ret); - return (ret); -} - -static void -chunk_record(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, void *chunk, - size_t size) -{ - bool unzeroed; - extent_node_t *xnode, *node, *prev, key; - - unzeroed = pages_purge(chunk, size); - VALGRIND_MAKE_MEM_NOACCESS(chunk, size); - - /* - * Allocate a node before acquiring chunks_mtx even though it might not - * be needed, because base_node_alloc() may cause a new base chunk to - * be allocated, which could cause deadlock if chunks_mtx were already - * held. - */ - xnode = base_node_alloc(); - - malloc_mutex_lock(&chunks_mtx); - key.addr = (void *)((uintptr_t)chunk + size); - node = extent_tree_ad_nsearch(chunks_ad, &key); - /* Try to coalesce forward. */ - if (node != NULL && node->addr == key.addr) { - /* - * Coalesce chunk with the following address range. This does - * not change the position within chunks_ad, so only - * remove/insert from/into chunks_szad. - */ - extent_tree_szad_remove(chunks_szad, node); - node->addr = chunk; - node->size += size; - node->zeroed = (node->zeroed && (unzeroed == false)); - extent_tree_szad_insert(chunks_szad, node); - if (xnode != NULL) - base_node_dealloc(xnode); - } else { - /* Coalescing forward failed, so insert a new node. */ - if (xnode == NULL) { - /* - * base_node_alloc() failed, which is an exceedingly - * unlikely failure. Leak chunk; its pages have - * already been purged, so this is only a virtual - * memory leak. - */ - malloc_mutex_unlock(&chunks_mtx); - return; - } - node = xnode; - node->addr = chunk; - node->size = size; - node->zeroed = (unzeroed == false); - extent_tree_ad_insert(chunks_ad, node); - extent_tree_szad_insert(chunks_szad, node); - } - - /* Try to coalesce backward. */ - prev = extent_tree_ad_prev(chunks_ad, node); - if (prev != NULL && (void *)((uintptr_t)prev->addr + prev->size) == - chunk) { - /* - * Coalesce chunk with the previous address range. This does - * not change the position within chunks_ad, so only - * remove/insert node from/into chunks_szad. - */ - extent_tree_szad_remove(chunks_szad, prev); - extent_tree_ad_remove(chunks_ad, prev); - - extent_tree_szad_remove(chunks_szad, node); - node->addr = prev->addr; - node->size += prev->size; - node->zeroed = (node->zeroed && prev->zeroed); - extent_tree_szad_insert(chunks_szad, node); - - base_node_dealloc(prev); - } - malloc_mutex_unlock(&chunks_mtx); -} - -void -chunk_unmap(void *chunk, size_t size) -{ - assert(chunk != NULL); - assert(CHUNK_ADDR2BASE(chunk) == chunk); - assert(size != 0); - assert((size & chunksize_mask) == 0); - - if (config_dss && chunk_in_dss(chunk)) - chunk_record(&chunks_szad_dss, &chunks_ad_dss, chunk, size); - else if (chunk_dealloc_mmap(chunk, size)) - chunk_record(&chunks_szad_mmap, &chunks_ad_mmap, chunk, size); -} - -void -chunk_dealloc(void *chunk, size_t size, bool unmap) -{ - - assert(chunk != NULL); - assert(CHUNK_ADDR2BASE(chunk) == chunk); - assert(size != 0); - assert((size & chunksize_mask) == 0); - - if (config_ivsalloc) - rtree_set(chunks_rtree, (uintptr_t)chunk, NULL); - if (config_stats || config_prof) { - malloc_mutex_lock(&chunks_mtx); - assert(stats_chunks.curchunks >= (size / chunksize)); - stats_chunks.curchunks -= (size / chunksize); - malloc_mutex_unlock(&chunks_mtx); - } - - if (unmap) - chunk_unmap(chunk, size); -} - -bool -chunk_boot(void) -{ - - /* Set variables according to the value of opt_lg_chunk. */ - chunksize = (ZU(1) << opt_lg_chunk); - assert(chunksize >= PAGE); - chunksize_mask = chunksize - 1; - chunk_npages = (chunksize >> LG_PAGE); - - if (config_stats || config_prof) { - if (malloc_mutex_init(&chunks_mtx)) - return (true); - memset(&stats_chunks, 0, sizeof(chunk_stats_t)); - } - if (config_dss && chunk_dss_boot()) - return (true); - extent_tree_szad_new(&chunks_szad_mmap); - extent_tree_ad_new(&chunks_ad_mmap); - extent_tree_szad_new(&chunks_szad_dss); - extent_tree_ad_new(&chunks_ad_dss); - if (config_ivsalloc) { - chunks_rtree = rtree_new((ZU(1) << (LG_SIZEOF_PTR+3)) - - opt_lg_chunk); - if (chunks_rtree == NULL) - return (true); - } - - return (false); -} - -void -chunk_prefork(void) -{ - - malloc_mutex_lock(&chunks_mtx); - if (config_ivsalloc) - rtree_prefork(chunks_rtree); - chunk_dss_prefork(); -} - -void -chunk_postfork_parent(void) -{ - - chunk_dss_postfork_parent(); - if (config_ivsalloc) - rtree_postfork_parent(chunks_rtree); - malloc_mutex_postfork_parent(&chunks_mtx); -} - -void -chunk_postfork_child(void) -{ - - chunk_dss_postfork_child(); - if (config_ivsalloc) - rtree_postfork_child(chunks_rtree); - malloc_mutex_postfork_child(&chunks_mtx); -} diff --git a/extra/jemalloc/src/chunk_dss.c b/extra/jemalloc/src/chunk_dss.c deleted file mode 100644 index 24781cc52dc..00000000000 --- a/extra/jemalloc/src/chunk_dss.c +++ /dev/null @@ -1,197 +0,0 @@ -#define JEMALLOC_CHUNK_DSS_C_ -#include "jemalloc/internal/jemalloc_internal.h" -/******************************************************************************/ -/* Data. */ - -const char *dss_prec_names[] = { - "disabled", - "primary", - "secondary", - "N/A" -}; - -/* Current dss precedence default, used when creating new arenas. */ -static dss_prec_t dss_prec_default = DSS_PREC_DEFAULT; - -/* - * Protects sbrk() calls. This avoids malloc races among threads, though it - * does not protect against races with threads that call sbrk() directly. - */ -static malloc_mutex_t dss_mtx; - -/* Base address of the DSS. */ -static void *dss_base; -/* Current end of the DSS, or ((void *)-1) if the DSS is exhausted. */ -static void *dss_prev; -/* Current upper limit on DSS addresses. */ -static void *dss_max; - -/******************************************************************************/ - -#ifndef JEMALLOC_HAVE_SBRK -static void * -sbrk(intptr_t increment) -{ - - not_implemented(); - - return (NULL); -} -#endif - -dss_prec_t -chunk_dss_prec_get(void) -{ - dss_prec_t ret; - - if (config_dss == false) - return (dss_prec_disabled); - malloc_mutex_lock(&dss_mtx); - ret = dss_prec_default; - malloc_mutex_unlock(&dss_mtx); - return (ret); -} - -bool -chunk_dss_prec_set(dss_prec_t dss_prec) -{ - - if (config_dss == false) - return (true); - malloc_mutex_lock(&dss_mtx); - dss_prec_default = dss_prec; - malloc_mutex_unlock(&dss_mtx); - return (false); -} - -void * -chunk_alloc_dss(size_t size, size_t alignment, bool *zero) -{ - void *ret; - - cassert(config_dss); - assert(size > 0 && (size & chunksize_mask) == 0); - assert(alignment > 0 && (alignment & chunksize_mask) == 0); - - /* - * sbrk() uses a signed increment argument, so take care not to - * interpret a huge allocation request as a negative increment. - */ - if ((intptr_t)size < 0) - return (NULL); - - malloc_mutex_lock(&dss_mtx); - if (dss_prev != (void *)-1) { - size_t gap_size, cpad_size; - void *cpad, *dss_next; - intptr_t incr; - - /* - * The loop is necessary to recover from races with other - * threads that are using the DSS for something other than - * malloc. - */ - do { - /* Get the current end of the DSS. */ - dss_max = sbrk(0); - /* - * Calculate how much padding is necessary to - * chunk-align the end of the DSS. - */ - gap_size = (chunksize - CHUNK_ADDR2OFFSET(dss_max)) & - chunksize_mask; - /* - * Compute how much chunk-aligned pad space (if any) is - * necessary to satisfy alignment. This space can be - * recycled for later use. - */ - cpad = (void *)((uintptr_t)dss_max + gap_size); - ret = (void *)ALIGNMENT_CEILING((uintptr_t)dss_max, - alignment); - cpad_size = (uintptr_t)ret - (uintptr_t)cpad; - dss_next = (void *)((uintptr_t)ret + size); - if ((uintptr_t)ret < (uintptr_t)dss_max || - (uintptr_t)dss_next < (uintptr_t)dss_max) { - /* Wrap-around. */ - malloc_mutex_unlock(&dss_mtx); - return (NULL); - } - incr = gap_size + cpad_size + size; - dss_prev = sbrk(incr); - if (dss_prev == dss_max) { - /* Success. */ - dss_max = dss_next; - malloc_mutex_unlock(&dss_mtx); - if (cpad_size != 0) - chunk_unmap(cpad, cpad_size); - if (*zero) { - VALGRIND_MAKE_MEM_UNDEFINED(ret, size); - memset(ret, 0, size); - } - return (ret); - } - } while (dss_prev != (void *)-1); - } - malloc_mutex_unlock(&dss_mtx); - - return (NULL); -} - -bool -chunk_in_dss(void *chunk) -{ - bool ret; - - cassert(config_dss); - - malloc_mutex_lock(&dss_mtx); - if ((uintptr_t)chunk >= (uintptr_t)dss_base - && (uintptr_t)chunk < (uintptr_t)dss_max) - ret = true; - else - ret = false; - malloc_mutex_unlock(&dss_mtx); - - return (ret); -} - -bool -chunk_dss_boot(void) -{ - - cassert(config_dss); - - if (malloc_mutex_init(&dss_mtx)) - return (true); - dss_base = sbrk(0); - dss_prev = dss_base; - dss_max = dss_base; - - return (false); -} - -void -chunk_dss_prefork(void) -{ - - if (config_dss) - malloc_mutex_prefork(&dss_mtx); -} - -void -chunk_dss_postfork_parent(void) -{ - - if (config_dss) - malloc_mutex_postfork_parent(&dss_mtx); -} - -void -chunk_dss_postfork_child(void) -{ - - if (config_dss) - malloc_mutex_postfork_child(&dss_mtx); -} - -/******************************************************************************/ diff --git a/extra/jemalloc/src/chunk_mmap.c b/extra/jemalloc/src/chunk_mmap.c deleted file mode 100644 index 8a42e75915f..00000000000 --- a/extra/jemalloc/src/chunk_mmap.c +++ /dev/null @@ -1,210 +0,0 @@ -#define JEMALLOC_CHUNK_MMAP_C_ -#include "jemalloc/internal/jemalloc_internal.h" - -/******************************************************************************/ -/* Function prototypes for non-inline static functions. */ - -static void *pages_map(void *addr, size_t size); -static void pages_unmap(void *addr, size_t size); -static void *chunk_alloc_mmap_slow(size_t size, size_t alignment, - bool *zero); - -/******************************************************************************/ - -static void * -pages_map(void *addr, size_t size) -{ - void *ret; - - assert(size != 0); - -#ifdef _WIN32 - /* - * If VirtualAlloc can't allocate at the given address when one is - * given, it fails and returns NULL. - */ - ret = VirtualAlloc(addr, size, MEM_COMMIT | MEM_RESERVE, - PAGE_READWRITE); -#else - /* - * We don't use MAP_FIXED here, because it can cause the *replacement* - * of existing mappings, and we only want to create new mappings. - */ - ret = mmap(addr, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, - -1, 0); - assert(ret != NULL); - - if (ret == MAP_FAILED) - ret = NULL; - else if (addr != NULL && ret != addr) { - /* - * We succeeded in mapping memory, but not in the right place. - */ - if (munmap(ret, size) == -1) { - char buf[BUFERROR_BUF]; - - buferror(buf, sizeof(buf)); - malloc_printf("<jemalloc: Error in munmap(): %s\n", - buf); - if (opt_abort) - abort(); - } - ret = NULL; - } -#endif - assert(ret == NULL || (addr == NULL && ret != addr) - || (addr != NULL && ret == addr)); - return (ret); -} - -static void -pages_unmap(void *addr, size_t size) -{ - -#ifdef _WIN32 - if (VirtualFree(addr, 0, MEM_RELEASE) == 0) -#else - if (munmap(addr, size) == -1) -#endif - { - char buf[BUFERROR_BUF]; - - buferror(buf, sizeof(buf)); - malloc_printf("<jemalloc>: Error in " -#ifdef _WIN32 - "VirtualFree" -#else - "munmap" -#endif - "(): %s\n", buf); - if (opt_abort) - abort(); - } -} - -static void * -pages_trim(void *addr, size_t alloc_size, size_t leadsize, size_t size) -{ - void *ret = (void *)((uintptr_t)addr + leadsize); - - assert(alloc_size >= leadsize + size); -#ifdef _WIN32 - { - void *new_addr; - - pages_unmap(addr, alloc_size); - new_addr = pages_map(ret, size); - if (new_addr == ret) - return (ret); - if (new_addr) - pages_unmap(new_addr, size); - return (NULL); - } -#else - { - size_t trailsize = alloc_size - leadsize - size; - - if (leadsize != 0) - pages_unmap(addr, leadsize); - if (trailsize != 0) - pages_unmap((void *)((uintptr_t)ret + size), trailsize); - return (ret); - } -#endif -} - -bool -pages_purge(void *addr, size_t length) -{ - bool unzeroed; - -#ifdef _WIN32 - VirtualAlloc(addr, length, MEM_RESET, PAGE_READWRITE); - unzeroed = true; -#else -# ifdef JEMALLOC_PURGE_MADVISE_DONTNEED -# define JEMALLOC_MADV_PURGE MADV_DONTNEED -# define JEMALLOC_MADV_ZEROS true -# elif defined(JEMALLOC_PURGE_MADVISE_FREE) -# define JEMALLOC_MADV_PURGE MADV_FREE -# define JEMALLOC_MADV_ZEROS false -# else -# error "No method defined for purging unused dirty pages." -# endif - int err = madvise(addr, length, JEMALLOC_MADV_PURGE); - unzeroed = (JEMALLOC_MADV_ZEROS == false || err != 0); -# undef JEMALLOC_MADV_PURGE -# undef JEMALLOC_MADV_ZEROS -#endif - return (unzeroed); -} - -static void * -chunk_alloc_mmap_slow(size_t size, size_t alignment, bool *zero) -{ - void *ret, *pages; - size_t alloc_size, leadsize; - - alloc_size = size + alignment - PAGE; - /* Beware size_t wrap-around. */ - if (alloc_size < size) - return (NULL); - do { - pages = pages_map(NULL, alloc_size); - if (pages == NULL) - return (NULL); - leadsize = ALIGNMENT_CEILING((uintptr_t)pages, alignment) - - (uintptr_t)pages; - ret = pages_trim(pages, alloc_size, leadsize, size); - } while (ret == NULL); - - assert(ret != NULL); - *zero = true; - return (ret); -} - -void * -chunk_alloc_mmap(size_t size, size_t alignment, bool *zero) -{ - void *ret; - size_t offset; - - /* - * Ideally, there would be a way to specify alignment to mmap() (like - * NetBSD has), but in the absence of such a feature, we have to work - * hard to efficiently create aligned mappings. The reliable, but - * slow method is to create a mapping that is over-sized, then trim the - * excess. However, that always results in one or two calls to - * pages_unmap(). - * - * Optimistically try mapping precisely the right amount before falling - * back to the slow method, with the expectation that the optimistic - * approach works most of the time. - */ - - assert(alignment != 0); - assert((alignment & chunksize_mask) == 0); - - ret = pages_map(NULL, size); - if (ret == NULL) - return (NULL); - offset = ALIGNMENT_ADDR2OFFSET(ret, alignment); - if (offset != 0) { - pages_unmap(ret, size); - return (chunk_alloc_mmap_slow(size, alignment, zero)); - } - - assert(ret != NULL); - *zero = true; - return (ret); -} - -bool -chunk_dealloc_mmap(void *chunk, size_t size) -{ - - if (config_munmap) - pages_unmap(chunk, size); - - return (config_munmap == false); -} diff --git a/extra/jemalloc/src/ckh.c b/extra/jemalloc/src/ckh.c deleted file mode 100644 index 2f38348bb85..00000000000 --- a/extra/jemalloc/src/ckh.c +++ /dev/null @@ -1,563 +0,0 @@ -/* - ******************************************************************************* - * Implementation of (2^1+,2) cuckoo hashing, where 2^1+ indicates that each - * hash bucket contains 2^n cells, for n >= 1, and 2 indicates that two hash - * functions are employed. The original cuckoo hashing algorithm was described - * in: - * - * Pagh, R., F.F. Rodler (2004) Cuckoo Hashing. Journal of Algorithms - * 51(2):122-144. - * - * Generalization of cuckoo hashing was discussed in: - * - * Erlingsson, U., M. Manasse, F. McSherry (2006) A cool and practical - * alternative to traditional hash tables. In Proceedings of the 7th - * Workshop on Distributed Data and Structures (WDAS'06), Santa Clara, CA, - * January 2006. - * - * This implementation uses precisely two hash functions because that is the - * fewest that can work, and supporting multiple hashes is an implementation - * burden. Here is a reproduction of Figure 1 from Erlingsson et al. (2006) - * that shows approximate expected maximum load factors for various - * configurations: - * - * | #cells/bucket | - * #hashes | 1 | 2 | 4 | 8 | - * --------+-------+-------+-------+-------+ - * 1 | 0.006 | 0.006 | 0.03 | 0.12 | - * 2 | 0.49 | 0.86 |>0.93< |>0.96< | - * 3 | 0.91 | 0.97 | 0.98 | 0.999 | - * 4 | 0.97 | 0.99 | 0.999 | | - * - * The number of cells per bucket is chosen such that a bucket fits in one cache - * line. So, on 32- and 64-bit systems, we use (8,2) and (4,2) cuckoo hashing, - * respectively. - * - ******************************************************************************/ -#define JEMALLOC_CKH_C_ -#include "jemalloc/internal/jemalloc_internal.h" - -/******************************************************************************/ -/* Function prototypes for non-inline static functions. */ - -static bool ckh_grow(ckh_t *ckh); -static void ckh_shrink(ckh_t *ckh); - -/******************************************************************************/ - -/* - * Search bucket for key and return the cell number if found; SIZE_T_MAX - * otherwise. - */ -JEMALLOC_INLINE size_t -ckh_bucket_search(ckh_t *ckh, size_t bucket, const void *key) -{ - ckhc_t *cell; - unsigned i; - - for (i = 0; i < (ZU(1) << LG_CKH_BUCKET_CELLS); i++) { - cell = &ckh->tab[(bucket << LG_CKH_BUCKET_CELLS) + i]; - if (cell->key != NULL && ckh->keycomp(key, cell->key)) - return ((bucket << LG_CKH_BUCKET_CELLS) + i); - } - - return (SIZE_T_MAX); -} - -/* - * Search table for key and return cell number if found; SIZE_T_MAX otherwise. - */ -JEMALLOC_INLINE size_t -ckh_isearch(ckh_t *ckh, const void *key) -{ - size_t hashes[2], bucket, cell; - - assert(ckh != NULL); - - ckh->hash(key, hashes); - - /* Search primary bucket. */ - bucket = hashes[0] & ((ZU(1) << ckh->lg_curbuckets) - 1); - cell = ckh_bucket_search(ckh, bucket, key); - if (cell != SIZE_T_MAX) - return (cell); - - /* Search secondary bucket. */ - bucket = hashes[1] & ((ZU(1) << ckh->lg_curbuckets) - 1); - cell = ckh_bucket_search(ckh, bucket, key); - return (cell); -} - -JEMALLOC_INLINE bool -ckh_try_bucket_insert(ckh_t *ckh, size_t bucket, const void *key, - const void *data) -{ - ckhc_t *cell; - unsigned offset, i; - - /* - * Cycle through the cells in the bucket, starting at a random position. - * The randomness avoids worst-case search overhead as buckets fill up. - */ - prng32(offset, LG_CKH_BUCKET_CELLS, ckh->prng_state, CKH_A, CKH_C); - for (i = 0; i < (ZU(1) << LG_CKH_BUCKET_CELLS); i++) { - cell = &ckh->tab[(bucket << LG_CKH_BUCKET_CELLS) + - ((i + offset) & ((ZU(1) << LG_CKH_BUCKET_CELLS) - 1))]; - if (cell->key == NULL) { - cell->key = key; - cell->data = data; - ckh->count++; - return (false); - } - } - - return (true); -} - -/* - * No space is available in bucket. Randomly evict an item, then try to find an - * alternate location for that item. Iteratively repeat this - * eviction/relocation procedure until either success or detection of an - * eviction/relocation bucket cycle. - */ -JEMALLOC_INLINE bool -ckh_evict_reloc_insert(ckh_t *ckh, size_t argbucket, void const **argkey, - void const **argdata) -{ - const void *key, *data, *tkey, *tdata; - ckhc_t *cell; - size_t hashes[2], bucket, tbucket; - unsigned i; - - bucket = argbucket; - key = *argkey; - data = *argdata; - while (true) { - /* - * Choose a random item within the bucket to evict. This is - * critical to correct function, because without (eventually) - * evicting all items within a bucket during iteration, it - * would be possible to get stuck in an infinite loop if there - * were an item for which both hashes indicated the same - * bucket. - */ - prng32(i, LG_CKH_BUCKET_CELLS, ckh->prng_state, CKH_A, CKH_C); - cell = &ckh->tab[(bucket << LG_CKH_BUCKET_CELLS) + i]; - assert(cell->key != NULL); - - /* Swap cell->{key,data} and {key,data} (evict). */ - tkey = cell->key; tdata = cell->data; - cell->key = key; cell->data = data; - key = tkey; data = tdata; - -#ifdef CKH_COUNT - ckh->nrelocs++; -#endif - - /* Find the alternate bucket for the evicted item. */ - ckh->hash(key, hashes); - tbucket = hashes[1] & ((ZU(1) << ckh->lg_curbuckets) - 1); - if (tbucket == bucket) { - tbucket = hashes[0] & ((ZU(1) << ckh->lg_curbuckets) - - 1); - /* - * It may be that (tbucket == bucket) still, if the - * item's hashes both indicate this bucket. However, - * we are guaranteed to eventually escape this bucket - * during iteration, assuming pseudo-random item - * selection (true randomness would make infinite - * looping a remote possibility). The reason we can - * never get trapped forever is that there are two - * cases: - * - * 1) This bucket == argbucket, so we will quickly - * detect an eviction cycle and terminate. - * 2) An item was evicted to this bucket from another, - * which means that at least one item in this bucket - * has hashes that indicate distinct buckets. - */ - } - /* Check for a cycle. */ - if (tbucket == argbucket) { - *argkey = key; - *argdata = data; - return (true); - } - - bucket = tbucket; - if (ckh_try_bucket_insert(ckh, bucket, key, data) == false) - return (false); - } -} - -JEMALLOC_INLINE bool -ckh_try_insert(ckh_t *ckh, void const**argkey, void const**argdata) -{ - size_t hashes[2], bucket; - const void *key = *argkey; - const void *data = *argdata; - - ckh->hash(key, hashes); - - /* Try to insert in primary bucket. */ - bucket = hashes[0] & ((ZU(1) << ckh->lg_curbuckets) - 1); - if (ckh_try_bucket_insert(ckh, bucket, key, data) == false) - return (false); - - /* Try to insert in secondary bucket. */ - bucket = hashes[1] & ((ZU(1) << ckh->lg_curbuckets) - 1); - if (ckh_try_bucket_insert(ckh, bucket, key, data) == false) - return (false); - - /* - * Try to find a place for this item via iterative eviction/relocation. - */ - return (ckh_evict_reloc_insert(ckh, bucket, argkey, argdata)); -} - -/* - * Try to rebuild the hash table from scratch by inserting all items from the - * old table into the new. - */ -JEMALLOC_INLINE bool -ckh_rebuild(ckh_t *ckh, ckhc_t *aTab) -{ - size_t count, i, nins; - const void *key, *data; - - count = ckh->count; - ckh->count = 0; - for (i = nins = 0; nins < count; i++) { - if (aTab[i].key != NULL) { - key = aTab[i].key; - data = aTab[i].data; - if (ckh_try_insert(ckh, &key, &data)) { - ckh->count = count; - return (true); - } - nins++; - } - } - - return (false); -} - -static bool -ckh_grow(ckh_t *ckh) -{ - bool ret; - ckhc_t *tab, *ttab; - size_t lg_curcells; - unsigned lg_prevbuckets; - -#ifdef CKH_COUNT - ckh->ngrows++; -#endif - - /* - * It is possible (though unlikely, given well behaved hashes) that the - * table will have to be doubled more than once in order to create a - * usable table. - */ - lg_prevbuckets = ckh->lg_curbuckets; - lg_curcells = ckh->lg_curbuckets + LG_CKH_BUCKET_CELLS; - while (true) { - size_t usize; - - lg_curcells++; - usize = sa2u(sizeof(ckhc_t) << lg_curcells, CACHELINE); - if (usize == 0) { - ret = true; - goto label_return; - } - tab = (ckhc_t *)ipalloc(usize, CACHELINE, true); - if (tab == NULL) { - ret = true; - goto label_return; - } - /* Swap in new table. */ - ttab = ckh->tab; - ckh->tab = tab; - tab = ttab; - ckh->lg_curbuckets = lg_curcells - LG_CKH_BUCKET_CELLS; - - if (ckh_rebuild(ckh, tab) == false) { - idalloc(tab); - break; - } - - /* Rebuilding failed, so back out partially rebuilt table. */ - idalloc(ckh->tab); - ckh->tab = tab; - ckh->lg_curbuckets = lg_prevbuckets; - } - - ret = false; -label_return: - return (ret); -} - -static void -ckh_shrink(ckh_t *ckh) -{ - ckhc_t *tab, *ttab; - size_t lg_curcells, usize; - unsigned lg_prevbuckets; - - /* - * It is possible (though unlikely, given well behaved hashes) that the - * table rebuild will fail. - */ - lg_prevbuckets = ckh->lg_curbuckets; - lg_curcells = ckh->lg_curbuckets + LG_CKH_BUCKET_CELLS - 1; - usize = sa2u(sizeof(ckhc_t) << lg_curcells, CACHELINE); - if (usize == 0) - return; - tab = (ckhc_t *)ipalloc(usize, CACHELINE, true); - if (tab == NULL) { - /* - * An OOM error isn't worth propagating, since it doesn't - * prevent this or future operations from proceeding. - */ - return; - } - /* Swap in new table. */ - ttab = ckh->tab; - ckh->tab = tab; - tab = ttab; - ckh->lg_curbuckets = lg_curcells - LG_CKH_BUCKET_CELLS; - - if (ckh_rebuild(ckh, tab) == false) { - idalloc(tab); -#ifdef CKH_COUNT - ckh->nshrinks++; -#endif - return; - } - - /* Rebuilding failed, so back out partially rebuilt table. */ - idalloc(ckh->tab); - ckh->tab = tab; - ckh->lg_curbuckets = lg_prevbuckets; -#ifdef CKH_COUNT - ckh->nshrinkfails++; -#endif -} - -bool -ckh_new(ckh_t *ckh, size_t minitems, ckh_hash_t *hash, ckh_keycomp_t *keycomp) -{ - bool ret; - size_t mincells, usize; - unsigned lg_mincells; - - assert(minitems > 0); - assert(hash != NULL); - assert(keycomp != NULL); - -#ifdef CKH_COUNT - ckh->ngrows = 0; - ckh->nshrinks = 0; - ckh->nshrinkfails = 0; - ckh->ninserts = 0; - ckh->nrelocs = 0; -#endif - ckh->prng_state = 42; /* Value doesn't really matter. */ - ckh->count = 0; - - /* - * Find the minimum power of 2 that is large enough to fit aBaseCount - * entries. We are using (2+,2) cuckoo hashing, which has an expected - * maximum load factor of at least ~0.86, so 0.75 is a conservative load - * factor that will typically allow 2^aLgMinItems to fit without ever - * growing the table. - */ - assert(LG_CKH_BUCKET_CELLS > 0); - mincells = ((minitems + (3 - (minitems % 3))) / 3) << 2; - for (lg_mincells = LG_CKH_BUCKET_CELLS; - (ZU(1) << lg_mincells) < mincells; - lg_mincells++) - ; /* Do nothing. */ - ckh->lg_minbuckets = lg_mincells - LG_CKH_BUCKET_CELLS; - ckh->lg_curbuckets = lg_mincells - LG_CKH_BUCKET_CELLS; - ckh->hash = hash; - ckh->keycomp = keycomp; - - usize = sa2u(sizeof(ckhc_t) << lg_mincells, CACHELINE); - if (usize == 0) { - ret = true; - goto label_return; - } - ckh->tab = (ckhc_t *)ipalloc(usize, CACHELINE, true); - if (ckh->tab == NULL) { - ret = true; - goto label_return; - } - - ret = false; -label_return: - return (ret); -} - -void -ckh_delete(ckh_t *ckh) -{ - - assert(ckh != NULL); - -#ifdef CKH_VERBOSE - malloc_printf( - "%s(%p): ngrows: %"PRIu64", nshrinks: %"PRIu64"," - " nshrinkfails: %"PRIu64", ninserts: %"PRIu64"," - " nrelocs: %"PRIu64"\n", __func__, ckh, - (unsigned long long)ckh->ngrows, - (unsigned long long)ckh->nshrinks, - (unsigned long long)ckh->nshrinkfails, - (unsigned long long)ckh->ninserts, - (unsigned long long)ckh->nrelocs); -#endif - - idalloc(ckh->tab); - if (config_debug) - memset(ckh, 0x5a, sizeof(ckh_t)); -} - -size_t -ckh_count(ckh_t *ckh) -{ - - assert(ckh != NULL); - - return (ckh->count); -} - -bool -ckh_iter(ckh_t *ckh, size_t *tabind, void **key, void **data) -{ - size_t i, ncells; - - for (i = *tabind, ncells = (ZU(1) << (ckh->lg_curbuckets + - LG_CKH_BUCKET_CELLS)); i < ncells; i++) { - if (ckh->tab[i].key != NULL) { - if (key != NULL) - *key = (void *)ckh->tab[i].key; - if (data != NULL) - *data = (void *)ckh->tab[i].data; - *tabind = i + 1; - return (false); - } - } - - return (true); -} - -bool -ckh_insert(ckh_t *ckh, const void *key, const void *data) -{ - bool ret; - - assert(ckh != NULL); - assert(ckh_search(ckh, key, NULL, NULL)); - -#ifdef CKH_COUNT - ckh->ninserts++; -#endif - - while (ckh_try_insert(ckh, &key, &data)) { - if (ckh_grow(ckh)) { - ret = true; - goto label_return; - } - } - - ret = false; -label_return: - return (ret); -} - -bool -ckh_remove(ckh_t *ckh, const void *searchkey, void **key, void **data) -{ - size_t cell; - - assert(ckh != NULL); - - cell = ckh_isearch(ckh, searchkey); - if (cell != SIZE_T_MAX) { - if (key != NULL) - *key = (void *)ckh->tab[cell].key; - if (data != NULL) - *data = (void *)ckh->tab[cell].data; - ckh->tab[cell].key = NULL; - ckh->tab[cell].data = NULL; /* Not necessary. */ - - ckh->count--; - /* Try to halve the table if it is less than 1/4 full. */ - if (ckh->count < (ZU(1) << (ckh->lg_curbuckets - + LG_CKH_BUCKET_CELLS - 2)) && ckh->lg_curbuckets - > ckh->lg_minbuckets) { - /* Ignore error due to OOM. */ - ckh_shrink(ckh); - } - - return (false); - } - - return (true); -} - -bool -ckh_search(ckh_t *ckh, const void *searchkey, void **key, void **data) -{ - size_t cell; - - assert(ckh != NULL); - - cell = ckh_isearch(ckh, searchkey); - if (cell != SIZE_T_MAX) { - if (key != NULL) - *key = (void *)ckh->tab[cell].key; - if (data != NULL) - *data = (void *)ckh->tab[cell].data; - return (false); - } - - return (true); -} - -void -ckh_string_hash(const void *key, size_t r_hash[2]) -{ - - hash(key, strlen((const char *)key), 0x94122f33U, r_hash); -} - -bool -ckh_string_keycomp(const void *k1, const void *k2) -{ - - assert(k1 != NULL); - assert(k2 != NULL); - - return (strcmp((char *)k1, (char *)k2) ? false : true); -} - -void -ckh_pointer_hash(const void *key, size_t r_hash[2]) -{ - union { - const void *v; - size_t i; - } u; - - assert(sizeof(u.v) == sizeof(u.i)); - u.v = key; - hash(&u.i, sizeof(u.i), 0xd983396eU, r_hash); -} - -bool -ckh_pointer_keycomp(const void *k1, const void *k2) -{ - - return ((k1 == k2) ? true : false); -} diff --git a/extra/jemalloc/src/ctl.c b/extra/jemalloc/src/ctl.c deleted file mode 100644 index 7ce4fc4d573..00000000000 --- a/extra/jemalloc/src/ctl.c +++ /dev/null @@ -1,1673 +0,0 @@ -#define JEMALLOC_CTL_C_ -#include "jemalloc/internal/jemalloc_internal.h" - -/******************************************************************************/ -/* Data. */ - -/* - * ctl_mtx protects the following: - * - ctl_stats.* - * - opt_prof_active - */ -static malloc_mutex_t ctl_mtx; -static bool ctl_initialized; -static uint64_t ctl_epoch; -static ctl_stats_t ctl_stats; - -/******************************************************************************/ -/* Helpers for named and indexed nodes. */ - -static inline const ctl_named_node_t * -ctl_named_node(const ctl_node_t *node) -{ - - return ((node->named) ? (const ctl_named_node_t *)node : NULL); -} - -static inline const ctl_named_node_t * -ctl_named_children(const ctl_named_node_t *node, int index) -{ - const ctl_named_node_t *children = ctl_named_node(node->children); - - return (children ? &children[index] : NULL); -} - -static inline const ctl_indexed_node_t * -ctl_indexed_node(const ctl_node_t *node) -{ - - return ((node->named == false) ? (const ctl_indexed_node_t *)node : - NULL); -} - -/******************************************************************************/ -/* Function prototypes for non-inline static functions. */ - -#define CTL_PROTO(n) \ -static int n##_ctl(const size_t *mib, size_t miblen, void *oldp, \ - size_t *oldlenp, void *newp, size_t newlen); - -#define INDEX_PROTO(n) \ -static const ctl_named_node_t *n##_index(const size_t *mib, \ - size_t miblen, size_t i); - -static bool ctl_arena_init(ctl_arena_stats_t *astats); -static void ctl_arena_clear(ctl_arena_stats_t *astats); -static void ctl_arena_stats_amerge(ctl_arena_stats_t *cstats, - arena_t *arena); -static void ctl_arena_stats_smerge(ctl_arena_stats_t *sstats, - ctl_arena_stats_t *astats); -static void ctl_arena_refresh(arena_t *arena, unsigned i); -static bool ctl_grow(void); -static void ctl_refresh(void); -static bool ctl_init(void); -static int ctl_lookup(const char *name, ctl_node_t const **nodesp, - size_t *mibp, size_t *depthp); - -CTL_PROTO(version) -CTL_PROTO(epoch) -CTL_PROTO(thread_tcache_enabled) -CTL_PROTO(thread_tcache_flush) -CTL_PROTO(thread_arena) -CTL_PROTO(thread_allocated) -CTL_PROTO(thread_allocatedp) -CTL_PROTO(thread_deallocated) -CTL_PROTO(thread_deallocatedp) -CTL_PROTO(config_debug) -CTL_PROTO(config_dss) -CTL_PROTO(config_fill) -CTL_PROTO(config_lazy_lock) -CTL_PROTO(config_mremap) -CTL_PROTO(config_munmap) -CTL_PROTO(config_prof) -CTL_PROTO(config_prof_libgcc) -CTL_PROTO(config_prof_libunwind) -CTL_PROTO(config_stats) -CTL_PROTO(config_tcache) -CTL_PROTO(config_tls) -CTL_PROTO(config_utrace) -CTL_PROTO(config_valgrind) -CTL_PROTO(config_xmalloc) -CTL_PROTO(opt_abort) -CTL_PROTO(opt_dss) -CTL_PROTO(opt_lg_chunk) -CTL_PROTO(opt_narenas) -CTL_PROTO(opt_lg_dirty_mult) -CTL_PROTO(opt_stats_print) -CTL_PROTO(opt_junk) -CTL_PROTO(opt_zero) -CTL_PROTO(opt_quarantine) -CTL_PROTO(opt_redzone) -CTL_PROTO(opt_utrace) -CTL_PROTO(opt_valgrind) -CTL_PROTO(opt_xmalloc) -CTL_PROTO(opt_tcache) -CTL_PROTO(opt_lg_tcache_max) -CTL_PROTO(opt_prof) -CTL_PROTO(opt_prof_prefix) -CTL_PROTO(opt_prof_active) -CTL_PROTO(opt_lg_prof_sample) -CTL_PROTO(opt_lg_prof_interval) -CTL_PROTO(opt_prof_gdump) -CTL_PROTO(opt_prof_final) -CTL_PROTO(opt_prof_leak) -CTL_PROTO(opt_prof_accum) -CTL_PROTO(arena_i_purge) -static void arena_purge(unsigned arena_ind); -CTL_PROTO(arena_i_dss) -INDEX_PROTO(arena_i) -CTL_PROTO(arenas_bin_i_size) -CTL_PROTO(arenas_bin_i_nregs) -CTL_PROTO(arenas_bin_i_run_size) -INDEX_PROTO(arenas_bin_i) -CTL_PROTO(arenas_lrun_i_size) -INDEX_PROTO(arenas_lrun_i) -CTL_PROTO(arenas_narenas) -CTL_PROTO(arenas_initialized) -CTL_PROTO(arenas_quantum) -CTL_PROTO(arenas_page) -CTL_PROTO(arenas_tcache_max) -CTL_PROTO(arenas_nbins) -CTL_PROTO(arenas_nhbins) -CTL_PROTO(arenas_nlruns) -CTL_PROTO(arenas_purge) -CTL_PROTO(arenas_extend) -CTL_PROTO(prof_active) -CTL_PROTO(prof_dump) -CTL_PROTO(prof_interval) -CTL_PROTO(stats_chunks_current) -CTL_PROTO(stats_chunks_total) -CTL_PROTO(stats_chunks_high) -CTL_PROTO(stats_huge_allocated) -CTL_PROTO(stats_huge_nmalloc) -CTL_PROTO(stats_huge_ndalloc) -CTL_PROTO(stats_arenas_i_small_allocated) -CTL_PROTO(stats_arenas_i_small_nmalloc) -CTL_PROTO(stats_arenas_i_small_ndalloc) -CTL_PROTO(stats_arenas_i_small_nrequests) -CTL_PROTO(stats_arenas_i_large_allocated) -CTL_PROTO(stats_arenas_i_large_nmalloc) -CTL_PROTO(stats_arenas_i_large_ndalloc) -CTL_PROTO(stats_arenas_i_large_nrequests) -CTL_PROTO(stats_arenas_i_bins_j_allocated) -CTL_PROTO(stats_arenas_i_bins_j_nmalloc) -CTL_PROTO(stats_arenas_i_bins_j_ndalloc) -CTL_PROTO(stats_arenas_i_bins_j_nrequests) -CTL_PROTO(stats_arenas_i_bins_j_nfills) -CTL_PROTO(stats_arenas_i_bins_j_nflushes) -CTL_PROTO(stats_arenas_i_bins_j_nruns) -CTL_PROTO(stats_arenas_i_bins_j_nreruns) -CTL_PROTO(stats_arenas_i_bins_j_curruns) -INDEX_PROTO(stats_arenas_i_bins_j) -CTL_PROTO(stats_arenas_i_lruns_j_nmalloc) -CTL_PROTO(stats_arenas_i_lruns_j_ndalloc) -CTL_PROTO(stats_arenas_i_lruns_j_nrequests) -CTL_PROTO(stats_arenas_i_lruns_j_curruns) -INDEX_PROTO(stats_arenas_i_lruns_j) -CTL_PROTO(stats_arenas_i_nthreads) -CTL_PROTO(stats_arenas_i_dss) -CTL_PROTO(stats_arenas_i_pactive) -CTL_PROTO(stats_arenas_i_pdirty) -CTL_PROTO(stats_arenas_i_mapped) -CTL_PROTO(stats_arenas_i_npurge) -CTL_PROTO(stats_arenas_i_nmadvise) -CTL_PROTO(stats_arenas_i_purged) -INDEX_PROTO(stats_arenas_i) -CTL_PROTO(stats_cactive) -CTL_PROTO(stats_allocated) -CTL_PROTO(stats_active) -CTL_PROTO(stats_mapped) - -/******************************************************************************/ -/* mallctl tree. */ - -/* Maximum tree depth. */ -#define CTL_MAX_DEPTH 6 - -#define NAME(n) {true}, n -#define CHILD(t, c) \ - sizeof(c##_node) / sizeof(ctl_##t##_node_t), \ - (ctl_node_t *)c##_node, \ - NULL -#define CTL(c) 0, NULL, c##_ctl - -/* - * Only handles internal indexed nodes, since there are currently no external - * ones. - */ -#define INDEX(i) {false}, i##_index - -static const ctl_named_node_t tcache_node[] = { - {NAME("enabled"), CTL(thread_tcache_enabled)}, - {NAME("flush"), CTL(thread_tcache_flush)} -}; - -static const ctl_named_node_t thread_node[] = { - {NAME("arena"), CTL(thread_arena)}, - {NAME("allocated"), CTL(thread_allocated)}, - {NAME("allocatedp"), CTL(thread_allocatedp)}, - {NAME("deallocated"), CTL(thread_deallocated)}, - {NAME("deallocatedp"), CTL(thread_deallocatedp)}, - {NAME("tcache"), CHILD(named, tcache)} -}; - -static const ctl_named_node_t config_node[] = { - {NAME("debug"), CTL(config_debug)}, - {NAME("dss"), CTL(config_dss)}, - {NAME("fill"), CTL(config_fill)}, - {NAME("lazy_lock"), CTL(config_lazy_lock)}, - {NAME("mremap"), CTL(config_mremap)}, - {NAME("munmap"), CTL(config_munmap)}, - {NAME("prof"), CTL(config_prof)}, - {NAME("prof_libgcc"), CTL(config_prof_libgcc)}, - {NAME("prof_libunwind"), CTL(config_prof_libunwind)}, - {NAME("stats"), CTL(config_stats)}, - {NAME("tcache"), CTL(config_tcache)}, - {NAME("tls"), CTL(config_tls)}, - {NAME("utrace"), CTL(config_utrace)}, - {NAME("valgrind"), CTL(config_valgrind)}, - {NAME("xmalloc"), CTL(config_xmalloc)} -}; - -static const ctl_named_node_t opt_node[] = { - {NAME("abort"), CTL(opt_abort)}, - {NAME("dss"), CTL(opt_dss)}, - {NAME("lg_chunk"), CTL(opt_lg_chunk)}, - {NAME("narenas"), CTL(opt_narenas)}, - {NAME("lg_dirty_mult"), CTL(opt_lg_dirty_mult)}, - {NAME("stats_print"), CTL(opt_stats_print)}, - {NAME("junk"), CTL(opt_junk)}, - {NAME("zero"), CTL(opt_zero)}, - {NAME("quarantine"), CTL(opt_quarantine)}, - {NAME("redzone"), CTL(opt_redzone)}, - {NAME("utrace"), CTL(opt_utrace)}, - {NAME("valgrind"), CTL(opt_valgrind)}, - {NAME("xmalloc"), CTL(opt_xmalloc)}, - {NAME("tcache"), CTL(opt_tcache)}, - {NAME("lg_tcache_max"), CTL(opt_lg_tcache_max)}, - {NAME("prof"), CTL(opt_prof)}, - {NAME("prof_prefix"), CTL(opt_prof_prefix)}, - {NAME("prof_active"), CTL(opt_prof_active)}, - {NAME("lg_prof_sample"), CTL(opt_lg_prof_sample)}, - {NAME("lg_prof_interval"), CTL(opt_lg_prof_interval)}, - {NAME("prof_gdump"), CTL(opt_prof_gdump)}, - {NAME("prof_final"), CTL(opt_prof_final)}, - {NAME("prof_leak"), CTL(opt_prof_leak)}, - {NAME("prof_accum"), CTL(opt_prof_accum)} -}; - -static const ctl_named_node_t arena_i_node[] = { - {NAME("purge"), CTL(arena_i_purge)}, - {NAME("dss"), CTL(arena_i_dss)} -}; -static const ctl_named_node_t super_arena_i_node[] = { - {NAME(""), CHILD(named, arena_i)} -}; - -static const ctl_indexed_node_t arena_node[] = { - {INDEX(arena_i)} -}; - -static const ctl_named_node_t arenas_bin_i_node[] = { - {NAME("size"), CTL(arenas_bin_i_size)}, - {NAME("nregs"), CTL(arenas_bin_i_nregs)}, - {NAME("run_size"), CTL(arenas_bin_i_run_size)} -}; -static const ctl_named_node_t super_arenas_bin_i_node[] = { - {NAME(""), CHILD(named, arenas_bin_i)} -}; - -static const ctl_indexed_node_t arenas_bin_node[] = { - {INDEX(arenas_bin_i)} -}; - -static const ctl_named_node_t arenas_lrun_i_node[] = { - {NAME("size"), CTL(arenas_lrun_i_size)} -}; -static const ctl_named_node_t super_arenas_lrun_i_node[] = { - {NAME(""), CHILD(named, arenas_lrun_i)} -}; - -static const ctl_indexed_node_t arenas_lrun_node[] = { - {INDEX(arenas_lrun_i)} -}; - -static const ctl_named_node_t arenas_node[] = { - {NAME("narenas"), CTL(arenas_narenas)}, - {NAME("initialized"), CTL(arenas_initialized)}, - {NAME("quantum"), CTL(arenas_quantum)}, - {NAME("page"), CTL(arenas_page)}, - {NAME("tcache_max"), CTL(arenas_tcache_max)}, - {NAME("nbins"), CTL(arenas_nbins)}, - {NAME("nhbins"), CTL(arenas_nhbins)}, - {NAME("bin"), CHILD(indexed, arenas_bin)}, - {NAME("nlruns"), CTL(arenas_nlruns)}, - {NAME("lrun"), CHILD(indexed, arenas_lrun)}, - {NAME("purge"), CTL(arenas_purge)}, - {NAME("extend"), CTL(arenas_extend)} -}; - -static const ctl_named_node_t prof_node[] = { - {NAME("active"), CTL(prof_active)}, - {NAME("dump"), CTL(prof_dump)}, - {NAME("interval"), CTL(prof_interval)} -}; - -static const ctl_named_node_t stats_chunks_node[] = { - {NAME("current"), CTL(stats_chunks_current)}, - {NAME("total"), CTL(stats_chunks_total)}, - {NAME("high"), CTL(stats_chunks_high)} -}; - -static const ctl_named_node_t stats_huge_node[] = { - {NAME("allocated"), CTL(stats_huge_allocated)}, - {NAME("nmalloc"), CTL(stats_huge_nmalloc)}, - {NAME("ndalloc"), CTL(stats_huge_ndalloc)} -}; - -static const ctl_named_node_t stats_arenas_i_small_node[] = { - {NAME("allocated"), CTL(stats_arenas_i_small_allocated)}, - {NAME("nmalloc"), CTL(stats_arenas_i_small_nmalloc)}, - {NAME("ndalloc"), CTL(stats_arenas_i_small_ndalloc)}, - {NAME("nrequests"), CTL(stats_arenas_i_small_nrequests)} -}; - -static const ctl_named_node_t stats_arenas_i_large_node[] = { - {NAME("allocated"), CTL(stats_arenas_i_large_allocated)}, - {NAME("nmalloc"), CTL(stats_arenas_i_large_nmalloc)}, - {NAME("ndalloc"), CTL(stats_arenas_i_large_ndalloc)}, - {NAME("nrequests"), CTL(stats_arenas_i_large_nrequests)} -}; - -static const ctl_named_node_t stats_arenas_i_bins_j_node[] = { - {NAME("allocated"), CTL(stats_arenas_i_bins_j_allocated)}, - {NAME("nmalloc"), CTL(stats_arenas_i_bins_j_nmalloc)}, - {NAME("ndalloc"), CTL(stats_arenas_i_bins_j_ndalloc)}, - {NAME("nrequests"), CTL(stats_arenas_i_bins_j_nrequests)}, - {NAME("nfills"), CTL(stats_arenas_i_bins_j_nfills)}, - {NAME("nflushes"), CTL(stats_arenas_i_bins_j_nflushes)}, - {NAME("nruns"), CTL(stats_arenas_i_bins_j_nruns)}, - {NAME("nreruns"), CTL(stats_arenas_i_bins_j_nreruns)}, - {NAME("curruns"), CTL(stats_arenas_i_bins_j_curruns)} -}; -static const ctl_named_node_t super_stats_arenas_i_bins_j_node[] = { - {NAME(""), CHILD(named, stats_arenas_i_bins_j)} -}; - -static const ctl_indexed_node_t stats_arenas_i_bins_node[] = { - {INDEX(stats_arenas_i_bins_j)} -}; - -static const ctl_named_node_t stats_arenas_i_lruns_j_node[] = { - {NAME("nmalloc"), CTL(stats_arenas_i_lruns_j_nmalloc)}, - {NAME("ndalloc"), CTL(stats_arenas_i_lruns_j_ndalloc)}, - {NAME("nrequests"), CTL(stats_arenas_i_lruns_j_nrequests)}, - {NAME("curruns"), CTL(stats_arenas_i_lruns_j_curruns)} -}; -static const ctl_named_node_t super_stats_arenas_i_lruns_j_node[] = { - {NAME(""), CHILD(named, stats_arenas_i_lruns_j)} -}; - -static const ctl_indexed_node_t stats_arenas_i_lruns_node[] = { - {INDEX(stats_arenas_i_lruns_j)} -}; - -static const ctl_named_node_t stats_arenas_i_node[] = { - {NAME("nthreads"), CTL(stats_arenas_i_nthreads)}, - {NAME("dss"), CTL(stats_arenas_i_dss)}, - {NAME("pactive"), CTL(stats_arenas_i_pactive)}, - {NAME("pdirty"), CTL(stats_arenas_i_pdirty)}, - {NAME("mapped"), CTL(stats_arenas_i_mapped)}, - {NAME("npurge"), CTL(stats_arenas_i_npurge)}, - {NAME("nmadvise"), CTL(stats_arenas_i_nmadvise)}, - {NAME("purged"), CTL(stats_arenas_i_purged)}, - {NAME("small"), CHILD(named, stats_arenas_i_small)}, - {NAME("large"), CHILD(named, stats_arenas_i_large)}, - {NAME("bins"), CHILD(indexed, stats_arenas_i_bins)}, - {NAME("lruns"), CHILD(indexed, stats_arenas_i_lruns)} -}; -static const ctl_named_node_t super_stats_arenas_i_node[] = { - {NAME(""), CHILD(named, stats_arenas_i)} -}; - -static const ctl_indexed_node_t stats_arenas_node[] = { - {INDEX(stats_arenas_i)} -}; - -static const ctl_named_node_t stats_node[] = { - {NAME("cactive"), CTL(stats_cactive)}, - {NAME("allocated"), CTL(stats_allocated)}, - {NAME("active"), CTL(stats_active)}, - {NAME("mapped"), CTL(stats_mapped)}, - {NAME("chunks"), CHILD(named, stats_chunks)}, - {NAME("huge"), CHILD(named, stats_huge)}, - {NAME("arenas"), CHILD(indexed, stats_arenas)} -}; - -static const ctl_named_node_t root_node[] = { - {NAME("version"), CTL(version)}, - {NAME("epoch"), CTL(epoch)}, - {NAME("thread"), CHILD(named, thread)}, - {NAME("config"), CHILD(named, config)}, - {NAME("opt"), CHILD(named, opt)}, - {NAME("arena"), CHILD(indexed, arena)}, - {NAME("arenas"), CHILD(named, arenas)}, - {NAME("prof"), CHILD(named, prof)}, - {NAME("stats"), CHILD(named, stats)} -}; -static const ctl_named_node_t super_root_node[] = { - {NAME(""), CHILD(named, root)} -}; - -#undef NAME -#undef CHILD -#undef CTL -#undef INDEX - -/******************************************************************************/ - -static bool -ctl_arena_init(ctl_arena_stats_t *astats) -{ - - if (astats->lstats == NULL) { - astats->lstats = (malloc_large_stats_t *)base_alloc(nlclasses * - sizeof(malloc_large_stats_t)); - if (astats->lstats == NULL) - return (true); - } - - return (false); -} - -static void -ctl_arena_clear(ctl_arena_stats_t *astats) -{ - - astats->dss = dss_prec_names[dss_prec_limit]; - astats->pactive = 0; - astats->pdirty = 0; - if (config_stats) { - memset(&astats->astats, 0, sizeof(arena_stats_t)); - astats->allocated_small = 0; - astats->nmalloc_small = 0; - astats->ndalloc_small = 0; - astats->nrequests_small = 0; - memset(astats->bstats, 0, NBINS * sizeof(malloc_bin_stats_t)); - memset(astats->lstats, 0, nlclasses * - sizeof(malloc_large_stats_t)); - } -} - -static void -ctl_arena_stats_amerge(ctl_arena_stats_t *cstats, arena_t *arena) -{ - unsigned i; - - arena_stats_merge(arena, &cstats->dss, &cstats->pactive, - &cstats->pdirty, &cstats->astats, cstats->bstats, cstats->lstats); - - for (i = 0; i < NBINS; i++) { - cstats->allocated_small += cstats->bstats[i].allocated; - cstats->nmalloc_small += cstats->bstats[i].nmalloc; - cstats->ndalloc_small += cstats->bstats[i].ndalloc; - cstats->nrequests_small += cstats->bstats[i].nrequests; - } -} - -static void -ctl_arena_stats_smerge(ctl_arena_stats_t *sstats, ctl_arena_stats_t *astats) -{ - unsigned i; - - sstats->pactive += astats->pactive; - sstats->pdirty += astats->pdirty; - - sstats->astats.mapped += astats->astats.mapped; - sstats->astats.npurge += astats->astats.npurge; - sstats->astats.nmadvise += astats->astats.nmadvise; - sstats->astats.purged += astats->astats.purged; - - sstats->allocated_small += astats->allocated_small; - sstats->nmalloc_small += astats->nmalloc_small; - sstats->ndalloc_small += astats->ndalloc_small; - sstats->nrequests_small += astats->nrequests_small; - - sstats->astats.allocated_large += astats->astats.allocated_large; - sstats->astats.nmalloc_large += astats->astats.nmalloc_large; - sstats->astats.ndalloc_large += astats->astats.ndalloc_large; - sstats->astats.nrequests_large += astats->astats.nrequests_large; - - for (i = 0; i < nlclasses; i++) { - sstats->lstats[i].nmalloc += astats->lstats[i].nmalloc; - sstats->lstats[i].ndalloc += astats->lstats[i].ndalloc; - sstats->lstats[i].nrequests += astats->lstats[i].nrequests; - sstats->lstats[i].curruns += astats->lstats[i].curruns; - } - - for (i = 0; i < NBINS; i++) { - sstats->bstats[i].allocated += astats->bstats[i].allocated; - sstats->bstats[i].nmalloc += astats->bstats[i].nmalloc; - sstats->bstats[i].ndalloc += astats->bstats[i].ndalloc; - sstats->bstats[i].nrequests += astats->bstats[i].nrequests; - if (config_tcache) { - sstats->bstats[i].nfills += astats->bstats[i].nfills; - sstats->bstats[i].nflushes += - astats->bstats[i].nflushes; - } - sstats->bstats[i].nruns += astats->bstats[i].nruns; - sstats->bstats[i].reruns += astats->bstats[i].reruns; - sstats->bstats[i].curruns += astats->bstats[i].curruns; - } -} - -static void -ctl_arena_refresh(arena_t *arena, unsigned i) -{ - ctl_arena_stats_t *astats = &ctl_stats.arenas[i]; - ctl_arena_stats_t *sstats = &ctl_stats.arenas[ctl_stats.narenas]; - - ctl_arena_clear(astats); - - sstats->nthreads += astats->nthreads; - if (config_stats) { - ctl_arena_stats_amerge(astats, arena); - /* Merge into sum stats as well. */ - ctl_arena_stats_smerge(sstats, astats); - } else { - astats->pactive += arena->nactive; - astats->pdirty += arena->ndirty; - /* Merge into sum stats as well. */ - sstats->pactive += arena->nactive; - sstats->pdirty += arena->ndirty; - } -} - -static bool -ctl_grow(void) -{ - size_t astats_size; - ctl_arena_stats_t *astats; - arena_t **tarenas; - - /* Extend arena stats and arenas arrays. */ - astats_size = (ctl_stats.narenas + 2) * sizeof(ctl_arena_stats_t); - if (ctl_stats.narenas == narenas_auto) { - /* ctl_stats.arenas and arenas came from base_alloc(). */ - astats = (ctl_arena_stats_t *)imalloc(astats_size); - if (astats == NULL) - return (true); - memcpy(astats, ctl_stats.arenas, (ctl_stats.narenas + 1) * - sizeof(ctl_arena_stats_t)); - - tarenas = (arena_t **)imalloc((ctl_stats.narenas + 1) * - sizeof(arena_t *)); - if (tarenas == NULL) { - idalloc(astats); - return (true); - } - memcpy(tarenas, arenas, ctl_stats.narenas * sizeof(arena_t *)); - } else { - astats = (ctl_arena_stats_t *)iralloc(ctl_stats.arenas, - astats_size, 0, 0, false, false); - if (astats == NULL) - return (true); - - tarenas = (arena_t **)iralloc(arenas, (ctl_stats.narenas + 1) * - sizeof(arena_t *), 0, 0, false, false); - if (tarenas == NULL) - return (true); - } - /* Initialize the new astats and arenas elements. */ - memset(&astats[ctl_stats.narenas + 1], 0, sizeof(ctl_arena_stats_t)); - if (ctl_arena_init(&astats[ctl_stats.narenas + 1])) - return (true); - tarenas[ctl_stats.narenas] = NULL; - /* Swap merged stats to their new location. */ - { - ctl_arena_stats_t tstats; - memcpy(&tstats, &astats[ctl_stats.narenas], - sizeof(ctl_arena_stats_t)); - memcpy(&astats[ctl_stats.narenas], - &astats[ctl_stats.narenas + 1], sizeof(ctl_arena_stats_t)); - memcpy(&astats[ctl_stats.narenas + 1], &tstats, - sizeof(ctl_arena_stats_t)); - } - ctl_stats.arenas = astats; - ctl_stats.narenas++; - malloc_mutex_lock(&arenas_lock); - arenas = tarenas; - narenas_total++; - arenas_extend(narenas_total - 1); - malloc_mutex_unlock(&arenas_lock); - - return (false); -} - -static void -ctl_refresh(void) -{ - unsigned i; - VARIABLE_ARRAY(arena_t *, tarenas, ctl_stats.narenas); - - if (config_stats) { - malloc_mutex_lock(&chunks_mtx); - ctl_stats.chunks.current = stats_chunks.curchunks; - ctl_stats.chunks.total = stats_chunks.nchunks; - ctl_stats.chunks.high = stats_chunks.highchunks; - malloc_mutex_unlock(&chunks_mtx); - - malloc_mutex_lock(&huge_mtx); - ctl_stats.huge.allocated = huge_allocated; - ctl_stats.huge.nmalloc = huge_nmalloc; - ctl_stats.huge.ndalloc = huge_ndalloc; - malloc_mutex_unlock(&huge_mtx); - } - - /* - * Clear sum stats, since they will be merged into by - * ctl_arena_refresh(). - */ - ctl_stats.arenas[ctl_stats.narenas].nthreads = 0; - ctl_arena_clear(&ctl_stats.arenas[ctl_stats.narenas]); - - malloc_mutex_lock(&arenas_lock); - memcpy(tarenas, arenas, sizeof(arena_t *) * ctl_stats.narenas); - for (i = 0; i < ctl_stats.narenas; i++) { - if (arenas[i] != NULL) - ctl_stats.arenas[i].nthreads = arenas[i]->nthreads; - else - ctl_stats.arenas[i].nthreads = 0; - } - malloc_mutex_unlock(&arenas_lock); - for (i = 0; i < ctl_stats.narenas; i++) { - bool initialized = (tarenas[i] != NULL); - - ctl_stats.arenas[i].initialized = initialized; - if (initialized) - ctl_arena_refresh(tarenas[i], i); - } - - if (config_stats) { - ctl_stats.allocated = - ctl_stats.arenas[ctl_stats.narenas].allocated_small - + ctl_stats.arenas[ctl_stats.narenas].astats.allocated_large - + ctl_stats.huge.allocated; - ctl_stats.active = - (ctl_stats.arenas[ctl_stats.narenas].pactive << LG_PAGE) - + ctl_stats.huge.allocated; - ctl_stats.mapped = (ctl_stats.chunks.current << opt_lg_chunk); - } - - ctl_epoch++; -} - -static bool -ctl_init(void) -{ - bool ret; - - malloc_mutex_lock(&ctl_mtx); - if (ctl_initialized == false) { - /* - * Allocate space for one extra arena stats element, which - * contains summed stats across all arenas. - */ - assert(narenas_auto == narenas_total_get()); - ctl_stats.narenas = narenas_auto; - ctl_stats.arenas = (ctl_arena_stats_t *)base_alloc( - (ctl_stats.narenas + 1) * sizeof(ctl_arena_stats_t)); - if (ctl_stats.arenas == NULL) { - ret = true; - goto label_return; - } - memset(ctl_stats.arenas, 0, (ctl_stats.narenas + 1) * - sizeof(ctl_arena_stats_t)); - - /* - * Initialize all stats structures, regardless of whether they - * ever get used. Lazy initialization would allow errors to - * cause inconsistent state to be viewable by the application. - */ - if (config_stats) { - unsigned i; - for (i = 0; i <= ctl_stats.narenas; i++) { - if (ctl_arena_init(&ctl_stats.arenas[i])) { - ret = true; - goto label_return; - } - } - } - ctl_stats.arenas[ctl_stats.narenas].initialized = true; - - ctl_epoch = 0; - ctl_refresh(); - ctl_initialized = true; - } - - ret = false; -label_return: - malloc_mutex_unlock(&ctl_mtx); - return (ret); -} - -static int -ctl_lookup(const char *name, ctl_node_t const **nodesp, size_t *mibp, - size_t *depthp) -{ - int ret; - const char *elm, *tdot, *dot; - size_t elen, i, j; - const ctl_named_node_t *node; - - elm = name; - /* Equivalent to strchrnul(). */ - dot = ((tdot = strchr(elm, '.')) != NULL) ? tdot : strchr(elm, '\0'); - elen = (size_t)((uintptr_t)dot - (uintptr_t)elm); - if (elen == 0) { - ret = ENOENT; - goto label_return; - } - node = super_root_node; - for (i = 0; i < *depthp; i++) { - assert(node); - assert(node->nchildren > 0); - if (ctl_named_node(node->children) != NULL) { - const ctl_named_node_t *pnode = node; - - /* Children are named. */ - for (j = 0; j < node->nchildren; j++) { - const ctl_named_node_t *child = - ctl_named_children(node, j); - if (strlen(child->name) == elen && - strncmp(elm, child->name, elen) == 0) { - node = child; - if (nodesp != NULL) - nodesp[i] = - (const ctl_node_t *)node; - mibp[i] = j; - break; - } - } - if (node == pnode) { - ret = ENOENT; - goto label_return; - } - } else { - uintmax_t index; - const ctl_indexed_node_t *inode; - - /* Children are indexed. */ - index = malloc_strtoumax(elm, NULL, 10); - if (index == UINTMAX_MAX || index > SIZE_T_MAX) { - ret = ENOENT; - goto label_return; - } - - inode = ctl_indexed_node(node->children); - node = inode->index(mibp, *depthp, (size_t)index); - if (node == NULL) { - ret = ENOENT; - goto label_return; - } - - if (nodesp != NULL) - nodesp[i] = (const ctl_node_t *)node; - mibp[i] = (size_t)index; - } - - if (node->ctl != NULL) { - /* Terminal node. */ - if (*dot != '\0') { - /* - * The name contains more elements than are - * in this path through the tree. - */ - ret = ENOENT; - goto label_return; - } - /* Complete lookup successful. */ - *depthp = i + 1; - break; - } - - /* Update elm. */ - if (*dot == '\0') { - /* No more elements. */ - ret = ENOENT; - goto label_return; - } - elm = &dot[1]; - dot = ((tdot = strchr(elm, '.')) != NULL) ? tdot : - strchr(elm, '\0'); - elen = (size_t)((uintptr_t)dot - (uintptr_t)elm); - } - - ret = 0; -label_return: - return (ret); -} - -int -ctl_byname(const char *name, void *oldp, size_t *oldlenp, void *newp, - size_t newlen) -{ - int ret; - size_t depth; - ctl_node_t const *nodes[CTL_MAX_DEPTH]; - size_t mib[CTL_MAX_DEPTH]; - const ctl_named_node_t *node; - - if (ctl_initialized == false && ctl_init()) { - ret = EAGAIN; - goto label_return; - } - - depth = CTL_MAX_DEPTH; - ret = ctl_lookup(name, nodes, mib, &depth); - if (ret != 0) - goto label_return; - - node = ctl_named_node(nodes[depth-1]); - if (node != NULL && node->ctl) - ret = node->ctl(mib, depth, oldp, oldlenp, newp, newlen); - else { - /* The name refers to a partial path through the ctl tree. */ - ret = ENOENT; - } - -label_return: - return(ret); -} - -int -ctl_nametomib(const char *name, size_t *mibp, size_t *miblenp) -{ - int ret; - - if (ctl_initialized == false && ctl_init()) { - ret = EAGAIN; - goto label_return; - } - - ret = ctl_lookup(name, NULL, mibp, miblenp); -label_return: - return(ret); -} - -int -ctl_bymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, - void *newp, size_t newlen) -{ - int ret; - const ctl_named_node_t *node; - size_t i; - - if (ctl_initialized == false && ctl_init()) { - ret = EAGAIN; - goto label_return; - } - - /* Iterate down the tree. */ - node = super_root_node; - for (i = 0; i < miblen; i++) { - assert(node); - assert(node->nchildren > 0); - if (ctl_named_node(node->children) != NULL) { - /* Children are named. */ - if (node->nchildren <= mib[i]) { - ret = ENOENT; - goto label_return; - } - node = ctl_named_children(node, mib[i]); - } else { - const ctl_indexed_node_t *inode; - - /* Indexed element. */ - inode = ctl_indexed_node(node->children); - node = inode->index(mib, miblen, mib[i]); - if (node == NULL) { - ret = ENOENT; - goto label_return; - } - } - } - - /* Call the ctl function. */ - if (node && node->ctl) - ret = node->ctl(mib, miblen, oldp, oldlenp, newp, newlen); - else { - /* Partial MIB. */ - ret = ENOENT; - } - -label_return: - return(ret); -} - -bool -ctl_boot(void) -{ - - if (malloc_mutex_init(&ctl_mtx)) - return (true); - - ctl_initialized = false; - - return (false); -} - -void -ctl_prefork(void) -{ - - malloc_mutex_lock(&ctl_mtx); -} - -void -ctl_postfork_parent(void) -{ - - malloc_mutex_postfork_parent(&ctl_mtx); -} - -void -ctl_postfork_child(void) -{ - - malloc_mutex_postfork_child(&ctl_mtx); -} - -/******************************************************************************/ -/* *_ctl() functions. */ - -#define READONLY() do { \ - if (newp != NULL || newlen != 0) { \ - ret = EPERM; \ - goto label_return; \ - } \ -} while (0) - -#define WRITEONLY() do { \ - if (oldp != NULL || oldlenp != NULL) { \ - ret = EPERM; \ - goto label_return; \ - } \ -} while (0) - -#define READ(v, t) do { \ - if (oldp != NULL && oldlenp != NULL) { \ - if (*oldlenp != sizeof(t)) { \ - size_t copylen = (sizeof(t) <= *oldlenp) \ - ? sizeof(t) : *oldlenp; \ - memcpy(oldp, (void *)&(v), copylen); \ - ret = EINVAL; \ - goto label_return; \ - } else \ - *(t *)oldp = (v); \ - } \ -} while (0) - -#define WRITE(v, t) do { \ - if (newp != NULL) { \ - if (newlen != sizeof(t)) { \ - ret = EINVAL; \ - goto label_return; \ - } \ - (v) = *(t *)newp; \ - } \ -} while (0) - -/* - * There's a lot of code duplication in the following macros due to limitations - * in how nested cpp macros are expanded. - */ -#define CTL_RO_CLGEN(c, l, n, v, t) \ -static int \ -n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \ - void *newp, size_t newlen) \ -{ \ - int ret; \ - t oldval; \ - \ - if ((c) == false) \ - return (ENOENT); \ - if (l) \ - malloc_mutex_lock(&ctl_mtx); \ - READONLY(); \ - oldval = (v); \ - READ(oldval, t); \ - \ - ret = 0; \ -label_return: \ - if (l) \ - malloc_mutex_unlock(&ctl_mtx); \ - return (ret); \ -} - -#define CTL_RO_CGEN(c, n, v, t) \ -static int \ -n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \ - void *newp, size_t newlen) \ -{ \ - int ret; \ - t oldval; \ - \ - if ((c) == false) \ - return (ENOENT); \ - malloc_mutex_lock(&ctl_mtx); \ - READONLY(); \ - oldval = (v); \ - READ(oldval, t); \ - \ - ret = 0; \ -label_return: \ - malloc_mutex_unlock(&ctl_mtx); \ - return (ret); \ -} - -#define CTL_RO_GEN(n, v, t) \ -static int \ -n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \ - void *newp, size_t newlen) \ -{ \ - int ret; \ - t oldval; \ - \ - malloc_mutex_lock(&ctl_mtx); \ - READONLY(); \ - oldval = (v); \ - READ(oldval, t); \ - \ - ret = 0; \ -label_return: \ - malloc_mutex_unlock(&ctl_mtx); \ - return (ret); \ -} - -/* - * ctl_mtx is not acquired, under the assumption that no pertinent data will - * mutate during the call. - */ -#define CTL_RO_NL_CGEN(c, n, v, t) \ -static int \ -n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \ - void *newp, size_t newlen) \ -{ \ - int ret; \ - t oldval; \ - \ - if ((c) == false) \ - return (ENOENT); \ - READONLY(); \ - oldval = (v); \ - READ(oldval, t); \ - \ - ret = 0; \ -label_return: \ - return (ret); \ -} - -#define CTL_RO_NL_GEN(n, v, t) \ -static int \ -n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \ - void *newp, size_t newlen) \ -{ \ - int ret; \ - t oldval; \ - \ - READONLY(); \ - oldval = (v); \ - READ(oldval, t); \ - \ - ret = 0; \ -label_return: \ - return (ret); \ -} - -#define CTL_RO_BOOL_CONFIG_GEN(n) \ -static int \ -n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \ - void *newp, size_t newlen) \ -{ \ - int ret; \ - bool oldval; \ - \ - READONLY(); \ - oldval = n; \ - READ(oldval, bool); \ - \ - ret = 0; \ -label_return: \ - return (ret); \ -} - -CTL_RO_NL_GEN(version, JEMALLOC_VERSION, const char *) - -static int -epoch_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, - void *newp, size_t newlen) -{ - int ret; - uint64_t newval __attribute__((unused)); - - malloc_mutex_lock(&ctl_mtx); - WRITE(newval, uint64_t); - if (newp != NULL) - ctl_refresh(); - READ(ctl_epoch, uint64_t); - - ret = 0; -label_return: - malloc_mutex_unlock(&ctl_mtx); - return (ret); -} - -static int -thread_tcache_enabled_ctl(const size_t *mib, size_t miblen, void *oldp, - size_t *oldlenp, void *newp, size_t newlen) -{ - int ret; - bool oldval; - - if (config_tcache == false) - return (ENOENT); - - oldval = tcache_enabled_get(); - if (newp != NULL) { - if (newlen != sizeof(bool)) { - ret = EINVAL; - goto label_return; - } - tcache_enabled_set(*(bool *)newp); - } - READ(oldval, bool); - - ret = 0; -label_return: - return (ret); -} - -static int -thread_tcache_flush_ctl(const size_t *mib, size_t miblen, void *oldp, - size_t *oldlenp, void *newp, size_t newlen) -{ - int ret; - - if (config_tcache == false) - return (ENOENT); - - READONLY(); - WRITEONLY(); - - tcache_flush(); - - ret = 0; -label_return: - return (ret); -} - -static int -thread_arena_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, - void *newp, size_t newlen) -{ - int ret; - unsigned newind, oldind; - - malloc_mutex_lock(&ctl_mtx); - newind = oldind = choose_arena(NULL)->ind; - WRITE(newind, unsigned); - READ(oldind, unsigned); - if (newind != oldind) { - arena_t *arena; - - if (newind >= ctl_stats.narenas) { - /* New arena index is out of range. */ - ret = EFAULT; - goto label_return; - } - - /* Initialize arena if necessary. */ - malloc_mutex_lock(&arenas_lock); - if ((arena = arenas[newind]) == NULL && (arena = - arenas_extend(newind)) == NULL) { - malloc_mutex_unlock(&arenas_lock); - ret = EAGAIN; - goto label_return; - } - assert(arena == arenas[newind]); - arenas[oldind]->nthreads--; - arenas[newind]->nthreads++; - malloc_mutex_unlock(&arenas_lock); - - /* Set new arena association. */ - if (config_tcache) { - tcache_t *tcache; - if ((uintptr_t)(tcache = *tcache_tsd_get()) > - (uintptr_t)TCACHE_STATE_MAX) { - tcache_arena_dissociate(tcache); - tcache_arena_associate(tcache, arena); - } - } - arenas_tsd_set(&arena); - } - - ret = 0; -label_return: - malloc_mutex_unlock(&ctl_mtx); - return (ret); -} - -CTL_RO_NL_CGEN(config_stats, thread_allocated, - thread_allocated_tsd_get()->allocated, uint64_t) -CTL_RO_NL_CGEN(config_stats, thread_allocatedp, - &thread_allocated_tsd_get()->allocated, uint64_t *) -CTL_RO_NL_CGEN(config_stats, thread_deallocated, - thread_allocated_tsd_get()->deallocated, uint64_t) -CTL_RO_NL_CGEN(config_stats, thread_deallocatedp, - &thread_allocated_tsd_get()->deallocated, uint64_t *) - -/******************************************************************************/ - -CTL_RO_BOOL_CONFIG_GEN(config_debug) -CTL_RO_BOOL_CONFIG_GEN(config_dss) -CTL_RO_BOOL_CONFIG_GEN(config_fill) -CTL_RO_BOOL_CONFIG_GEN(config_lazy_lock) -CTL_RO_BOOL_CONFIG_GEN(config_mremap) -CTL_RO_BOOL_CONFIG_GEN(config_munmap) -CTL_RO_BOOL_CONFIG_GEN(config_prof) -CTL_RO_BOOL_CONFIG_GEN(config_prof_libgcc) -CTL_RO_BOOL_CONFIG_GEN(config_prof_libunwind) -CTL_RO_BOOL_CONFIG_GEN(config_stats) -CTL_RO_BOOL_CONFIG_GEN(config_tcache) -CTL_RO_BOOL_CONFIG_GEN(config_tls) -CTL_RO_BOOL_CONFIG_GEN(config_utrace) -CTL_RO_BOOL_CONFIG_GEN(config_valgrind) -CTL_RO_BOOL_CONFIG_GEN(config_xmalloc) - -/******************************************************************************/ - -CTL_RO_NL_GEN(opt_abort, opt_abort, bool) -CTL_RO_NL_GEN(opt_dss, opt_dss, const char *) -CTL_RO_NL_GEN(opt_lg_chunk, opt_lg_chunk, size_t) -CTL_RO_NL_GEN(opt_narenas, opt_narenas, size_t) -CTL_RO_NL_GEN(opt_lg_dirty_mult, opt_lg_dirty_mult, ssize_t) -CTL_RO_NL_GEN(opt_stats_print, opt_stats_print, bool) -CTL_RO_NL_CGEN(config_fill, opt_junk, opt_junk, bool) -CTL_RO_NL_CGEN(config_fill, opt_zero, opt_zero, bool) -CTL_RO_NL_CGEN(config_fill, opt_quarantine, opt_quarantine, size_t) -CTL_RO_NL_CGEN(config_fill, opt_redzone, opt_redzone, bool) -CTL_RO_NL_CGEN(config_utrace, opt_utrace, opt_utrace, bool) -CTL_RO_NL_CGEN(config_valgrind, opt_valgrind, opt_valgrind, bool) -CTL_RO_NL_CGEN(config_xmalloc, opt_xmalloc, opt_xmalloc, bool) -CTL_RO_NL_CGEN(config_tcache, opt_tcache, opt_tcache, bool) -CTL_RO_NL_CGEN(config_tcache, opt_lg_tcache_max, opt_lg_tcache_max, ssize_t) -CTL_RO_NL_CGEN(config_prof, opt_prof, opt_prof, bool) -CTL_RO_NL_CGEN(config_prof, opt_prof_prefix, opt_prof_prefix, const char *) -CTL_RO_CGEN(config_prof, opt_prof_active, opt_prof_active, bool) /* Mutable. */ -CTL_RO_NL_CGEN(config_prof, opt_lg_prof_sample, opt_lg_prof_sample, size_t) -CTL_RO_NL_CGEN(config_prof, opt_lg_prof_interval, opt_lg_prof_interval, ssize_t) -CTL_RO_NL_CGEN(config_prof, opt_prof_gdump, opt_prof_gdump, bool) -CTL_RO_NL_CGEN(config_prof, opt_prof_final, opt_prof_final, bool) -CTL_RO_NL_CGEN(config_prof, opt_prof_leak, opt_prof_leak, bool) -CTL_RO_NL_CGEN(config_prof, opt_prof_accum, opt_prof_accum, bool) - -/******************************************************************************/ - -/* ctl_mutex must be held during execution of this function. */ -static void -arena_purge(unsigned arena_ind) -{ - VARIABLE_ARRAY(arena_t *, tarenas, ctl_stats.narenas); - - malloc_mutex_lock(&arenas_lock); - memcpy(tarenas, arenas, sizeof(arena_t *) * ctl_stats.narenas); - malloc_mutex_unlock(&arenas_lock); - - if (arena_ind == ctl_stats.narenas) { - unsigned i; - for (i = 0; i < ctl_stats.narenas; i++) { - if (tarenas[i] != NULL) - arena_purge_all(tarenas[i]); - } - } else { - assert(arena_ind < ctl_stats.narenas); - if (tarenas[arena_ind] != NULL) - arena_purge_all(tarenas[arena_ind]); - } -} - -static int -arena_i_purge_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, - void *newp, size_t newlen) -{ - int ret; - - READONLY(); - WRITEONLY(); - malloc_mutex_lock(&ctl_mtx); - arena_purge(mib[1]); - malloc_mutex_unlock(&ctl_mtx); - - ret = 0; -label_return: - return (ret); -} - -static int -arena_i_dss_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, - void *newp, size_t newlen) -{ - int ret, i; - bool match, err; - const char *dss; - unsigned arena_ind = mib[1]; - dss_prec_t dss_prec_old = dss_prec_limit; - dss_prec_t dss_prec = dss_prec_limit; - - malloc_mutex_lock(&ctl_mtx); - WRITE(dss, const char *); - match = false; - for (i = 0; i < dss_prec_limit; i++) { - if (strcmp(dss_prec_names[i], dss) == 0) { - dss_prec = i; - match = true; - break; - } - } - if (match == false) { - ret = EINVAL; - goto label_return; - } - - if (arena_ind < ctl_stats.narenas) { - arena_t *arena = arenas[arena_ind]; - if (arena != NULL) { - dss_prec_old = arena_dss_prec_get(arena); - arena_dss_prec_set(arena, dss_prec); - err = false; - } else - err = true; - } else { - dss_prec_old = chunk_dss_prec_get(); - err = chunk_dss_prec_set(dss_prec); - } - dss = dss_prec_names[dss_prec_old]; - READ(dss, const char *); - if (err) { - ret = EFAULT; - goto label_return; - } - - ret = 0; -label_return: - malloc_mutex_unlock(&ctl_mtx); - return (ret); -} - -static const ctl_named_node_t * -arena_i_index(const size_t *mib, size_t miblen, size_t i) -{ - const ctl_named_node_t * ret; - - malloc_mutex_lock(&ctl_mtx); - if (i > ctl_stats.narenas) { - ret = NULL; - goto label_return; - } - - ret = super_arena_i_node; -label_return: - malloc_mutex_unlock(&ctl_mtx); - return (ret); -} - - -/******************************************************************************/ - -CTL_RO_NL_GEN(arenas_bin_i_size, arena_bin_info[mib[2]].reg_size, size_t) -CTL_RO_NL_GEN(arenas_bin_i_nregs, arena_bin_info[mib[2]].nregs, uint32_t) -CTL_RO_NL_GEN(arenas_bin_i_run_size, arena_bin_info[mib[2]].run_size, size_t) -static const ctl_named_node_t * -arenas_bin_i_index(const size_t *mib, size_t miblen, size_t i) -{ - - if (i > NBINS) - return (NULL); - return (super_arenas_bin_i_node); -} - -CTL_RO_NL_GEN(arenas_lrun_i_size, ((mib[2]+1) << LG_PAGE), size_t) -static const ctl_named_node_t * -arenas_lrun_i_index(const size_t *mib, size_t miblen, size_t i) -{ - - if (i > nlclasses) - return (NULL); - return (super_arenas_lrun_i_node); -} - -static int -arenas_narenas_ctl(const size_t *mib, size_t miblen, void *oldp, - size_t *oldlenp, void *newp, size_t newlen) -{ - int ret; - unsigned narenas; - - malloc_mutex_lock(&ctl_mtx); - READONLY(); - if (*oldlenp != sizeof(unsigned)) { - ret = EINVAL; - goto label_return; - } - narenas = ctl_stats.narenas; - READ(narenas, unsigned); - - ret = 0; -label_return: - malloc_mutex_unlock(&ctl_mtx); - return (ret); -} - -static int -arenas_initialized_ctl(const size_t *mib, size_t miblen, void *oldp, - size_t *oldlenp, void *newp, size_t newlen) -{ - int ret; - unsigned nread, i; - - malloc_mutex_lock(&ctl_mtx); - READONLY(); - if (*oldlenp != ctl_stats.narenas * sizeof(bool)) { - ret = EINVAL; - nread = (*oldlenp < ctl_stats.narenas * sizeof(bool)) - ? (*oldlenp / sizeof(bool)) : ctl_stats.narenas; - } else { - ret = 0; - nread = ctl_stats.narenas; - } - - for (i = 0; i < nread; i++) - ((bool *)oldp)[i] = ctl_stats.arenas[i].initialized; - -label_return: - malloc_mutex_unlock(&ctl_mtx); - return (ret); -} - -CTL_RO_NL_GEN(arenas_quantum, QUANTUM, size_t) -CTL_RO_NL_GEN(arenas_page, PAGE, size_t) -CTL_RO_NL_CGEN(config_tcache, arenas_tcache_max, tcache_maxclass, size_t) -CTL_RO_NL_GEN(arenas_nbins, NBINS, unsigned) -CTL_RO_NL_CGEN(config_tcache, arenas_nhbins, nhbins, unsigned) -CTL_RO_NL_GEN(arenas_nlruns, nlclasses, size_t) - -static int -arenas_purge_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, - void *newp, size_t newlen) -{ - int ret; - unsigned arena_ind; - - malloc_mutex_lock(&ctl_mtx); - WRITEONLY(); - arena_ind = UINT_MAX; - WRITE(arena_ind, unsigned); - if (newp != NULL && arena_ind >= ctl_stats.narenas) - ret = EFAULT; - else { - if (arena_ind == UINT_MAX) - arena_ind = ctl_stats.narenas; - arena_purge(arena_ind); - ret = 0; - } - -label_return: - malloc_mutex_unlock(&ctl_mtx); - return (ret); -} - -static int -arenas_extend_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, - void *newp, size_t newlen) -{ - int ret; - unsigned narenas; - - malloc_mutex_lock(&ctl_mtx); - READONLY(); - if (ctl_grow()) { - ret = EAGAIN; - goto label_return; - } - narenas = ctl_stats.narenas - 1; - READ(narenas, unsigned); - - ret = 0; -label_return: - malloc_mutex_unlock(&ctl_mtx); - return (ret); -} - -/******************************************************************************/ - -static int -prof_active_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, - void *newp, size_t newlen) -{ - int ret; - bool oldval; - - if (config_prof == false) - return (ENOENT); - - malloc_mutex_lock(&ctl_mtx); /* Protect opt_prof_active. */ - oldval = opt_prof_active; - if (newp != NULL) { - /* - * The memory barriers will tend to make opt_prof_active - * propagate faster on systems with weak memory ordering. - */ - mb_write(); - WRITE(opt_prof_active, bool); - mb_write(); - } - READ(oldval, bool); - - ret = 0; -label_return: - malloc_mutex_unlock(&ctl_mtx); - return (ret); -} - -static int -prof_dump_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, - void *newp, size_t newlen) -{ - int ret; - const char *filename = NULL; - - if (config_prof == false) - return (ENOENT); - - WRITEONLY(); - WRITE(filename, const char *); - - if (prof_mdump(filename)) { - ret = EFAULT; - goto label_return; - } - - ret = 0; -label_return: - return (ret); -} - -CTL_RO_NL_CGEN(config_prof, prof_interval, prof_interval, uint64_t) - -/******************************************************************************/ - -CTL_RO_CGEN(config_stats, stats_chunks_current, ctl_stats.chunks.current, - size_t) -CTL_RO_CGEN(config_stats, stats_chunks_total, ctl_stats.chunks.total, uint64_t) -CTL_RO_CGEN(config_stats, stats_chunks_high, ctl_stats.chunks.high, size_t) -CTL_RO_CGEN(config_stats, stats_huge_allocated, huge_allocated, size_t) -CTL_RO_CGEN(config_stats, stats_huge_nmalloc, huge_nmalloc, uint64_t) -CTL_RO_CGEN(config_stats, stats_huge_ndalloc, huge_ndalloc, uint64_t) -CTL_RO_CGEN(config_stats, stats_arenas_i_small_allocated, - ctl_stats.arenas[mib[2]].allocated_small, size_t) -CTL_RO_CGEN(config_stats, stats_arenas_i_small_nmalloc, - ctl_stats.arenas[mib[2]].nmalloc_small, uint64_t) -CTL_RO_CGEN(config_stats, stats_arenas_i_small_ndalloc, - ctl_stats.arenas[mib[2]].ndalloc_small, uint64_t) -CTL_RO_CGEN(config_stats, stats_arenas_i_small_nrequests, - ctl_stats.arenas[mib[2]].nrequests_small, uint64_t) -CTL_RO_CGEN(config_stats, stats_arenas_i_large_allocated, - ctl_stats.arenas[mib[2]].astats.allocated_large, size_t) -CTL_RO_CGEN(config_stats, stats_arenas_i_large_nmalloc, - ctl_stats.arenas[mib[2]].astats.nmalloc_large, uint64_t) -CTL_RO_CGEN(config_stats, stats_arenas_i_large_ndalloc, - ctl_stats.arenas[mib[2]].astats.ndalloc_large, uint64_t) -CTL_RO_CGEN(config_stats, stats_arenas_i_large_nrequests, - ctl_stats.arenas[mib[2]].astats.nrequests_large, uint64_t) - -CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_allocated, - ctl_stats.arenas[mib[2]].bstats[mib[4]].allocated, size_t) -CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nmalloc, - ctl_stats.arenas[mib[2]].bstats[mib[4]].nmalloc, uint64_t) -CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_ndalloc, - ctl_stats.arenas[mib[2]].bstats[mib[4]].ndalloc, uint64_t) -CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nrequests, - ctl_stats.arenas[mib[2]].bstats[mib[4]].nrequests, uint64_t) -CTL_RO_CGEN(config_stats && config_tcache, stats_arenas_i_bins_j_nfills, - ctl_stats.arenas[mib[2]].bstats[mib[4]].nfills, uint64_t) -CTL_RO_CGEN(config_stats && config_tcache, stats_arenas_i_bins_j_nflushes, - ctl_stats.arenas[mib[2]].bstats[mib[4]].nflushes, uint64_t) -CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nruns, - ctl_stats.arenas[mib[2]].bstats[mib[4]].nruns, uint64_t) -CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nreruns, - ctl_stats.arenas[mib[2]].bstats[mib[4]].reruns, uint64_t) -CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_curruns, - ctl_stats.arenas[mib[2]].bstats[mib[4]].curruns, size_t) - -static const ctl_named_node_t * -stats_arenas_i_bins_j_index(const size_t *mib, size_t miblen, size_t j) -{ - - if (j > NBINS) - return (NULL); - return (super_stats_arenas_i_bins_j_node); -} - -CTL_RO_CGEN(config_stats, stats_arenas_i_lruns_j_nmalloc, - ctl_stats.arenas[mib[2]].lstats[mib[4]].nmalloc, uint64_t) -CTL_RO_CGEN(config_stats, stats_arenas_i_lruns_j_ndalloc, - ctl_stats.arenas[mib[2]].lstats[mib[4]].ndalloc, uint64_t) -CTL_RO_CGEN(config_stats, stats_arenas_i_lruns_j_nrequests, - ctl_stats.arenas[mib[2]].lstats[mib[4]].nrequests, uint64_t) -CTL_RO_CGEN(config_stats, stats_arenas_i_lruns_j_curruns, - ctl_stats.arenas[mib[2]].lstats[mib[4]].curruns, size_t) - -static const ctl_named_node_t * -stats_arenas_i_lruns_j_index(const size_t *mib, size_t miblen, size_t j) -{ - - if (j > nlclasses) - return (NULL); - return (super_stats_arenas_i_lruns_j_node); -} - -CTL_RO_GEN(stats_arenas_i_nthreads, ctl_stats.arenas[mib[2]].nthreads, unsigned) -CTL_RO_GEN(stats_arenas_i_dss, ctl_stats.arenas[mib[2]].dss, const char *) -CTL_RO_GEN(stats_arenas_i_pactive, ctl_stats.arenas[mib[2]].pactive, size_t) -CTL_RO_GEN(stats_arenas_i_pdirty, ctl_stats.arenas[mib[2]].pdirty, size_t) -CTL_RO_CGEN(config_stats, stats_arenas_i_mapped, - ctl_stats.arenas[mib[2]].astats.mapped, size_t) -CTL_RO_CGEN(config_stats, stats_arenas_i_npurge, - ctl_stats.arenas[mib[2]].astats.npurge, uint64_t) -CTL_RO_CGEN(config_stats, stats_arenas_i_nmadvise, - ctl_stats.arenas[mib[2]].astats.nmadvise, uint64_t) -CTL_RO_CGEN(config_stats, stats_arenas_i_purged, - ctl_stats.arenas[mib[2]].astats.purged, uint64_t) - -static const ctl_named_node_t * -stats_arenas_i_index(const size_t *mib, size_t miblen, size_t i) -{ - const ctl_named_node_t * ret; - - malloc_mutex_lock(&ctl_mtx); - if (i > ctl_stats.narenas || ctl_stats.arenas[i].initialized == false) { - ret = NULL; - goto label_return; - } - - ret = super_stats_arenas_i_node; -label_return: - malloc_mutex_unlock(&ctl_mtx); - return (ret); -} - -CTL_RO_CGEN(config_stats, stats_cactive, &stats_cactive, size_t *) -CTL_RO_CGEN(config_stats, stats_allocated, ctl_stats.allocated, size_t) -CTL_RO_CGEN(config_stats, stats_active, ctl_stats.active, size_t) -CTL_RO_CGEN(config_stats, stats_mapped, ctl_stats.mapped, size_t) diff --git a/extra/jemalloc/src/extent.c b/extra/jemalloc/src/extent.c deleted file mode 100644 index 8c09b486ed8..00000000000 --- a/extra/jemalloc/src/extent.c +++ /dev/null @@ -1,39 +0,0 @@ -#define JEMALLOC_EXTENT_C_ -#include "jemalloc/internal/jemalloc_internal.h" - -/******************************************************************************/ - -static inline int -extent_szad_comp(extent_node_t *a, extent_node_t *b) -{ - int ret; - size_t a_size = a->size; - size_t b_size = b->size; - - ret = (a_size > b_size) - (a_size < b_size); - if (ret == 0) { - uintptr_t a_addr = (uintptr_t)a->addr; - uintptr_t b_addr = (uintptr_t)b->addr; - - ret = (a_addr > b_addr) - (a_addr < b_addr); - } - - return (ret); -} - -/* Generate red-black tree functions. */ -rb_gen(, extent_tree_szad_, extent_tree_t, extent_node_t, link_szad, - extent_szad_comp) - -static inline int -extent_ad_comp(extent_node_t *a, extent_node_t *b) -{ - uintptr_t a_addr = (uintptr_t)a->addr; - uintptr_t b_addr = (uintptr_t)b->addr; - - return ((a_addr > b_addr) - (a_addr < b_addr)); -} - -/* Generate red-black tree functions. */ -rb_gen(, extent_tree_ad_, extent_tree_t, extent_node_t, link_ad, - extent_ad_comp) diff --git a/extra/jemalloc/src/hash.c b/extra/jemalloc/src/hash.c deleted file mode 100644 index cfa4da0275c..00000000000 --- a/extra/jemalloc/src/hash.c +++ /dev/null @@ -1,2 +0,0 @@ -#define JEMALLOC_HASH_C_ -#include "jemalloc/internal/jemalloc_internal.h" diff --git a/extra/jemalloc/src/huge.c b/extra/jemalloc/src/huge.c deleted file mode 100644 index aa08d43d362..00000000000 --- a/extra/jemalloc/src/huge.c +++ /dev/null @@ -1,313 +0,0 @@ -#define JEMALLOC_HUGE_C_ -#include "jemalloc/internal/jemalloc_internal.h" - -/******************************************************************************/ -/* Data. */ - -uint64_t huge_nmalloc; -uint64_t huge_ndalloc; -size_t huge_allocated; - -malloc_mutex_t huge_mtx; - -/******************************************************************************/ - -/* Tree of chunks that are stand-alone huge allocations. */ -static extent_tree_t huge; - -void * -huge_malloc(size_t size, bool zero) -{ - - return (huge_palloc(size, chunksize, zero)); -} - -void * -huge_palloc(size_t size, size_t alignment, bool zero) -{ - void *ret; - size_t csize; - extent_node_t *node; - bool is_zeroed; - - /* Allocate one or more contiguous chunks for this request. */ - - csize = CHUNK_CEILING(size); - if (csize == 0) { - /* size is large enough to cause size_t wrap-around. */ - return (NULL); - } - - /* Allocate an extent node with which to track the chunk. */ - node = base_node_alloc(); - if (node == NULL) - return (NULL); - - /* - * Copy zero into is_zeroed and pass the copy to chunk_alloc(), so that - * it is possible to make correct junk/zero fill decisions below. - */ - is_zeroed = zero; - ret = chunk_alloc(csize, alignment, false, &is_zeroed, - chunk_dss_prec_get()); - if (ret == NULL) { - base_node_dealloc(node); - return (NULL); - } - - /* Insert node into huge. */ - node->addr = ret; - node->size = csize; - - malloc_mutex_lock(&huge_mtx); - extent_tree_ad_insert(&huge, node); - if (config_stats) { - stats_cactive_add(csize); - huge_nmalloc++; - huge_allocated += csize; - } - malloc_mutex_unlock(&huge_mtx); - - if (config_fill && zero == false) { - if (opt_junk) - memset(ret, 0xa5, csize); - else if (opt_zero && is_zeroed == false) - memset(ret, 0, csize); - } - - return (ret); -} - -void * -huge_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra) -{ - - /* - * Avoid moving the allocation if the size class can be left the same. - */ - if (oldsize > arena_maxclass - && CHUNK_CEILING(oldsize) >= CHUNK_CEILING(size) - && CHUNK_CEILING(oldsize) <= CHUNK_CEILING(size+extra)) { - assert(CHUNK_CEILING(oldsize) == oldsize); - if (config_fill && opt_junk && size < oldsize) { - memset((void *)((uintptr_t)ptr + size), 0x5a, - oldsize - size); - } - return (ptr); - } - - /* Reallocation would require a move. */ - return (NULL); -} - -void * -huge_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra, - size_t alignment, bool zero, bool try_tcache_dalloc) -{ - void *ret; - size_t copysize; - - /* Try to avoid moving the allocation. */ - ret = huge_ralloc_no_move(ptr, oldsize, size, extra); - if (ret != NULL) - return (ret); - - /* - * size and oldsize are different enough that we need to use a - * different size class. In that case, fall back to allocating new - * space and copying. - */ - if (alignment > chunksize) - ret = huge_palloc(size + extra, alignment, zero); - else - ret = huge_malloc(size + extra, zero); - - if (ret == NULL) { - if (extra == 0) - return (NULL); - /* Try again, this time without extra. */ - if (alignment > chunksize) - ret = huge_palloc(size, alignment, zero); - else - ret = huge_malloc(size, zero); - - if (ret == NULL) - return (NULL); - } - - /* - * Copy at most size bytes (not size+extra), since the caller has no - * expectation that the extra bytes will be reliably preserved. - */ - copysize = (size < oldsize) ? size : oldsize; - -#ifdef JEMALLOC_MREMAP - /* - * Use mremap(2) if this is a huge-->huge reallocation, and neither the - * source nor the destination are in dss. - */ - if (oldsize >= chunksize && (config_dss == false || (chunk_in_dss(ptr) - == false && chunk_in_dss(ret) == false))) { - size_t newsize = huge_salloc(ret); - - /* - * Remove ptr from the tree of huge allocations before - * performing the remap operation, in order to avoid the - * possibility of another thread acquiring that mapping before - * this one removes it from the tree. - */ - huge_dalloc(ptr, false); - if (mremap(ptr, oldsize, newsize, MREMAP_MAYMOVE|MREMAP_FIXED, - ret) == MAP_FAILED) { - /* - * Assuming no chunk management bugs in the allocator, - * the only documented way an error can occur here is - * if the application changed the map type for a - * portion of the old allocation. This is firmly in - * undefined behavior territory, so write a diagnostic - * message, and optionally abort. - */ - char buf[BUFERROR_BUF]; - - buferror(buf, sizeof(buf)); - malloc_printf("<jemalloc>: Error in mremap(): %s\n", - buf); - if (opt_abort) - abort(); - memcpy(ret, ptr, copysize); - chunk_dealloc_mmap(ptr, oldsize); - } - } else -#endif - { - memcpy(ret, ptr, copysize); - iqallocx(ptr, try_tcache_dalloc); - } - return (ret); -} - -void -huge_dalloc(void *ptr, bool unmap) -{ - extent_node_t *node, key; - - malloc_mutex_lock(&huge_mtx); - - /* Extract from tree of huge allocations. */ - key.addr = ptr; - node = extent_tree_ad_search(&huge, &key); - assert(node != NULL); - assert(node->addr == ptr); - extent_tree_ad_remove(&huge, node); - - if (config_stats) { - stats_cactive_sub(node->size); - huge_ndalloc++; - huge_allocated -= node->size; - } - - malloc_mutex_unlock(&huge_mtx); - - if (unmap && config_fill && config_dss && opt_junk) - memset(node->addr, 0x5a, node->size); - - chunk_dealloc(node->addr, node->size, unmap); - - base_node_dealloc(node); -} - -size_t -huge_salloc(const void *ptr) -{ - size_t ret; - extent_node_t *node, key; - - malloc_mutex_lock(&huge_mtx); - - /* Extract from tree of huge allocations. */ - key.addr = __DECONST(void *, ptr); - node = extent_tree_ad_search(&huge, &key); - assert(node != NULL); - - ret = node->size; - - malloc_mutex_unlock(&huge_mtx); - - return (ret); -} - -prof_ctx_t * -huge_prof_ctx_get(const void *ptr) -{ - prof_ctx_t *ret; - extent_node_t *node, key; - - malloc_mutex_lock(&huge_mtx); - - /* Extract from tree of huge allocations. */ - key.addr = __DECONST(void *, ptr); - node = extent_tree_ad_search(&huge, &key); - assert(node != NULL); - - ret = node->prof_ctx; - - malloc_mutex_unlock(&huge_mtx); - - return (ret); -} - -void -huge_prof_ctx_set(const void *ptr, prof_ctx_t *ctx) -{ - extent_node_t *node, key; - - malloc_mutex_lock(&huge_mtx); - - /* Extract from tree of huge allocations. */ - key.addr = __DECONST(void *, ptr); - node = extent_tree_ad_search(&huge, &key); - assert(node != NULL); - - node->prof_ctx = ctx; - - malloc_mutex_unlock(&huge_mtx); -} - -bool -huge_boot(void) -{ - - /* Initialize chunks data. */ - if (malloc_mutex_init(&huge_mtx)) - return (true); - extent_tree_ad_new(&huge); - - if (config_stats) { - huge_nmalloc = 0; - huge_ndalloc = 0; - huge_allocated = 0; - } - - return (false); -} - -void -huge_prefork(void) -{ - - malloc_mutex_prefork(&huge_mtx); -} - -void -huge_postfork_parent(void) -{ - - malloc_mutex_postfork_parent(&huge_mtx); -} - -void -huge_postfork_child(void) -{ - - malloc_mutex_postfork_child(&huge_mtx); -} diff --git a/extra/jemalloc/src/jemalloc.c b/extra/jemalloc/src/jemalloc.c deleted file mode 100644 index bc350ed953b..00000000000 --- a/extra/jemalloc/src/jemalloc.c +++ /dev/null @@ -1,1868 +0,0 @@ -#define JEMALLOC_C_ -#include "jemalloc/internal/jemalloc_internal.h" - -/******************************************************************************/ -/* Data. */ - -malloc_tsd_data(, arenas, arena_t *, NULL) -malloc_tsd_data(, thread_allocated, thread_allocated_t, - THREAD_ALLOCATED_INITIALIZER) - -/* Runtime configuration options. */ -const char *je_malloc_conf; -bool opt_abort = -#ifdef JEMALLOC_DEBUG - true -#else - false -#endif - ; -bool opt_junk = -#if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL)) - true -#else - false -#endif - ; -size_t opt_quarantine = ZU(0); -bool opt_redzone = false; -bool opt_utrace = false; -bool opt_valgrind = false; -bool opt_xmalloc = false; -bool opt_zero = false; -size_t opt_narenas = 0; - -unsigned ncpus; - -malloc_mutex_t arenas_lock; -arena_t **arenas; -unsigned narenas_total; -unsigned narenas_auto; - -/* Set to true once the allocator has been initialized. */ -static bool malloc_initialized = false; - -#ifdef JEMALLOC_THREADED_INIT -/* Used to let the initializing thread recursively allocate. */ -# define NO_INITIALIZER ((unsigned long)0) -# define INITIALIZER pthread_self() -# define IS_INITIALIZER (malloc_initializer == pthread_self()) -static pthread_t malloc_initializer = NO_INITIALIZER; -#else -# define NO_INITIALIZER false -# define INITIALIZER true -# define IS_INITIALIZER malloc_initializer -static bool malloc_initializer = NO_INITIALIZER; -#endif - -/* Used to avoid initialization races. */ -#ifdef _WIN32 -static malloc_mutex_t init_lock; - -JEMALLOC_ATTR(constructor) -static void WINAPI -_init_init_lock(void) -{ - - malloc_mutex_init(&init_lock); -} - -#ifdef _MSC_VER -# pragma section(".CRT$XCU", read) -JEMALLOC_SECTION(".CRT$XCU") JEMALLOC_ATTR(used) -static const void (WINAPI *init_init_lock)(void) = _init_init_lock; -#endif - -#else -static malloc_mutex_t init_lock = MALLOC_MUTEX_INITIALIZER; -#endif - -typedef struct { - void *p; /* Input pointer (as in realloc(p, s)). */ - size_t s; /* Request size. */ - void *r; /* Result pointer. */ -} malloc_utrace_t; - -#ifdef JEMALLOC_UTRACE -# define UTRACE(a, b, c) do { \ - if (opt_utrace) { \ - int utrace_serrno = errno; \ - malloc_utrace_t ut; \ - ut.p = (a); \ - ut.s = (b); \ - ut.r = (c); \ - utrace(&ut, sizeof(ut)); \ - errno = utrace_serrno; \ - } \ -} while (0) -#else -# define UTRACE(a, b, c) -#endif - -/******************************************************************************/ -/* Function prototypes for non-inline static functions. */ - -static void stats_print_atexit(void); -static unsigned malloc_ncpus(void); -static bool malloc_conf_next(char const **opts_p, char const **k_p, - size_t *klen_p, char const **v_p, size_t *vlen_p); -static void malloc_conf_error(const char *msg, const char *k, size_t klen, - const char *v, size_t vlen); -static void malloc_conf_init(void); -static bool malloc_init_hard(void); -static int imemalign(void **memptr, size_t alignment, size_t size, - size_t min_alignment); - -/******************************************************************************/ -/* - * Begin miscellaneous support functions. - */ - -/* Create a new arena and insert it into the arenas array at index ind. */ -arena_t * -arenas_extend(unsigned ind) -{ - arena_t *ret; - - ret = (arena_t *)base_alloc(sizeof(arena_t)); - if (ret != NULL && arena_new(ret, ind) == false) { - arenas[ind] = ret; - return (ret); - } - /* Only reached if there is an OOM error. */ - - /* - * OOM here is quite inconvenient to propagate, since dealing with it - * would require a check for failure in the fast path. Instead, punt - * by using arenas[0]. In practice, this is an extremely unlikely - * failure. - */ - malloc_write("<jemalloc>: Error initializing arena\n"); - if (opt_abort) - abort(); - - return (arenas[0]); -} - -/* Slow path, called only by choose_arena(). */ -arena_t * -choose_arena_hard(void) -{ - arena_t *ret; - - if (narenas_auto > 1) { - unsigned i, choose, first_null; - - choose = 0; - first_null = narenas_auto; - malloc_mutex_lock(&arenas_lock); - assert(arenas[0] != NULL); - for (i = 1; i < narenas_auto; i++) { - if (arenas[i] != NULL) { - /* - * Choose the first arena that has the lowest - * number of threads assigned to it. - */ - if (arenas[i]->nthreads < - arenas[choose]->nthreads) - choose = i; - } else if (first_null == narenas_auto) { - /* - * Record the index of the first uninitialized - * arena, in case all extant arenas are in use. - * - * NB: It is possible for there to be - * discontinuities in terms of initialized - * versus uninitialized arenas, due to the - * "thread.arena" mallctl. - */ - first_null = i; - } - } - - if (arenas[choose]->nthreads == 0 - || first_null == narenas_auto) { - /* - * Use an unloaded arena, or the least loaded arena if - * all arenas are already initialized. - */ - ret = arenas[choose]; - } else { - /* Initialize a new arena. */ - ret = arenas_extend(first_null); - } - ret->nthreads++; - malloc_mutex_unlock(&arenas_lock); - } else { - ret = arenas[0]; - malloc_mutex_lock(&arenas_lock); - ret->nthreads++; - malloc_mutex_unlock(&arenas_lock); - } - - arenas_tsd_set(&ret); - - return (ret); -} - -static void -stats_print_atexit(void) -{ - - if (config_tcache && config_stats) { - unsigned narenas, i; - - /* - * Merge stats from extant threads. This is racy, since - * individual threads do not lock when recording tcache stats - * events. As a consequence, the final stats may be slightly - * out of date by the time they are reported, if other threads - * continue to allocate. - */ - for (i = 0, narenas = narenas_total_get(); i < narenas; i++) { - arena_t *arena = arenas[i]; - if (arena != NULL) { - tcache_t *tcache; - - /* - * tcache_stats_merge() locks bins, so if any - * code is introduced that acquires both arena - * and bin locks in the opposite order, - * deadlocks may result. - */ - malloc_mutex_lock(&arena->lock); - ql_foreach(tcache, &arena->tcache_ql, link) { - tcache_stats_merge(tcache, arena); - } - malloc_mutex_unlock(&arena->lock); - } - } - } - je_malloc_stats_print(NULL, NULL, NULL); -} - -/* - * End miscellaneous support functions. - */ -/******************************************************************************/ -/* - * Begin initialization functions. - */ - -static unsigned -malloc_ncpus(void) -{ - unsigned ret; - long result; - -#ifdef _WIN32 - SYSTEM_INFO si; - GetSystemInfo(&si); - result = si.dwNumberOfProcessors; -#else - result = sysconf(_SC_NPROCESSORS_ONLN); -#endif - if (result == -1) { - /* Error. */ - ret = 1; - } else { - ret = (unsigned)result; - } - - return (ret); -} - -void -arenas_cleanup(void *arg) -{ - arena_t *arena = *(arena_t **)arg; - - malloc_mutex_lock(&arenas_lock); - arena->nthreads--; - malloc_mutex_unlock(&arenas_lock); -} - -static JEMALLOC_ATTR(always_inline) void -malloc_thread_init(void) -{ - - /* - * TSD initialization can't be safely done as a side effect of - * deallocation, because it is possible for a thread to do nothing but - * deallocate its TLS data via free(), in which case writing to TLS - * would cause write-after-free memory corruption. The quarantine - * facility *only* gets used as a side effect of deallocation, so make - * a best effort attempt at initializing its TSD by hooking all - * allocation events. - */ - if (config_fill && opt_quarantine) - quarantine_alloc_hook(); -} - -static JEMALLOC_ATTR(always_inline) bool -malloc_init(void) -{ - - if (malloc_initialized == false && malloc_init_hard()) - return (true); - malloc_thread_init(); - - return (false); -} - -static bool -malloc_conf_next(char const **opts_p, char const **k_p, size_t *klen_p, - char const **v_p, size_t *vlen_p) -{ - bool accept; - const char *opts = *opts_p; - - *k_p = opts; - - for (accept = false; accept == false;) { - switch (*opts) { - case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': - case 'G': case 'H': case 'I': case 'J': case 'K': case 'L': - case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R': - case 'S': case 'T': case 'U': case 'V': case 'W': case 'X': - case 'Y': case 'Z': - case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': - case 'g': case 'h': case 'i': case 'j': case 'k': case 'l': - case 'm': case 'n': case 'o': case 'p': case 'q': case 'r': - case 's': case 't': case 'u': case 'v': case 'w': case 'x': - case 'y': case 'z': - case '0': case '1': case '2': case '3': case '4': case '5': - case '6': case '7': case '8': case '9': - case '_': - opts++; - break; - case ':': - opts++; - *klen_p = (uintptr_t)opts - 1 - (uintptr_t)*k_p; - *v_p = opts; - accept = true; - break; - case '\0': - if (opts != *opts_p) { - malloc_write("<jemalloc>: Conf string ends " - "with key\n"); - } - return (true); - default: - malloc_write("<jemalloc>: Malformed conf string\n"); - return (true); - } - } - - for (accept = false; accept == false;) { - switch (*opts) { - case ',': - opts++; - /* - * Look ahead one character here, because the next time - * this function is called, it will assume that end of - * input has been cleanly reached if no input remains, - * but we have optimistically already consumed the - * comma if one exists. - */ - if (*opts == '\0') { - malloc_write("<jemalloc>: Conf string ends " - "with comma\n"); - } - *vlen_p = (uintptr_t)opts - 1 - (uintptr_t)*v_p; - accept = true; - break; - case '\0': - *vlen_p = (uintptr_t)opts - (uintptr_t)*v_p; - accept = true; - break; - default: - opts++; - break; - } - } - - *opts_p = opts; - return (false); -} - -static void -malloc_conf_error(const char *msg, const char *k, size_t klen, const char *v, - size_t vlen) -{ - - malloc_printf("<jemalloc>: %s: %.*s:%.*s\n", msg, (int)klen, k, - (int)vlen, v); -} - -static void -malloc_conf_init(void) -{ - unsigned i; - char buf[PATH_MAX + 1]; - const char *opts, *k, *v; - size_t klen, vlen; - - /* - * Automatically configure valgrind before processing options. The - * valgrind option remains in jemalloc 3.x for compatibility reasons. - */ - if (config_valgrind) { - opt_valgrind = (RUNNING_ON_VALGRIND != 0) ? true : false; - if (config_fill && opt_valgrind) { - opt_junk = false; - assert(opt_zero == false); - opt_quarantine = JEMALLOC_VALGRIND_QUARANTINE_DEFAULT; - opt_redzone = true; - } - if (config_tcache && opt_valgrind) - opt_tcache = false; - } - - for (i = 0; i < 3; i++) { - /* Get runtime configuration. */ - switch (i) { - case 0: - if (je_malloc_conf != NULL) { - /* - * Use options that were compiled into the - * program. - */ - opts = je_malloc_conf; - } else { - /* No configuration specified. */ - buf[0] = '\0'; - opts = buf; - } - break; - case 1: { -#ifndef _WIN32 - int linklen; - const char *linkname = -# ifdef JEMALLOC_PREFIX - "/etc/"JEMALLOC_PREFIX"malloc.conf" -# else - "/etc/malloc.conf" -# endif - ; - - if ((linklen = readlink(linkname, buf, - sizeof(buf) - 1)) != -1) { - /* - * Use the contents of the "/etc/malloc.conf" - * symbolic link's name. - */ - buf[linklen] = '\0'; - opts = buf; - } else -#endif - { - /* No configuration specified. */ - buf[0] = '\0'; - opts = buf; - } - break; - } case 2: { - const char *envname = -#ifdef JEMALLOC_PREFIX - JEMALLOC_CPREFIX"MALLOC_CONF" -#else - "MALLOC_CONF" -#endif - ; - - if ((opts = getenv(envname)) != NULL) { - /* - * Do nothing; opts is already initialized to - * the value of the MALLOC_CONF environment - * variable. - */ - } else { - /* No configuration specified. */ - buf[0] = '\0'; - opts = buf; - } - break; - } default: - /* NOTREACHED */ - assert(false); - buf[0] = '\0'; - opts = buf; - } - - while (*opts != '\0' && malloc_conf_next(&opts, &k, &klen, &v, - &vlen) == false) { -#define CONF_HANDLE_BOOL(o, n) \ - if (sizeof(n)-1 == klen && strncmp(n, k, \ - klen) == 0) { \ - if (strncmp("true", v, vlen) == 0 && \ - vlen == sizeof("true")-1) \ - o = true; \ - else if (strncmp("false", v, vlen) == \ - 0 && vlen == sizeof("false")-1) \ - o = false; \ - else { \ - malloc_conf_error( \ - "Invalid conf value", \ - k, klen, v, vlen); \ - } \ - continue; \ - } -#define CONF_HANDLE_SIZE_T(o, n, min, max, clip) \ - if (sizeof(n)-1 == klen && strncmp(n, k, \ - klen) == 0) { \ - uintmax_t um; \ - char *end; \ - \ - set_errno(0); \ - um = malloc_strtoumax(v, &end, 0); \ - if (get_errno() != 0 || (uintptr_t)end -\ - (uintptr_t)v != vlen) { \ - malloc_conf_error( \ - "Invalid conf value", \ - k, klen, v, vlen); \ - } else if (clip) { \ - if (um < min) \ - o = min; \ - else if (um > max) \ - o = max; \ - else \ - o = um; \ - } else { \ - if (um < min || um > max) { \ - malloc_conf_error( \ - "Out-of-range " \ - "conf value", \ - k, klen, v, vlen); \ - } else \ - o = um; \ - } \ - continue; \ - } -#define CONF_HANDLE_SSIZE_T(o, n, min, max) \ - if (sizeof(n)-1 == klen && strncmp(n, k, \ - klen) == 0) { \ - long l; \ - char *end; \ - \ - set_errno(0); \ - l = strtol(v, &end, 0); \ - if (get_errno() != 0 || (uintptr_t)end -\ - (uintptr_t)v != vlen) { \ - malloc_conf_error( \ - "Invalid conf value", \ - k, klen, v, vlen); \ - } else if (l < (ssize_t)min || l > \ - (ssize_t)max) { \ - malloc_conf_error( \ - "Out-of-range conf value", \ - k, klen, v, vlen); \ - } else \ - o = l; \ - continue; \ - } -#define CONF_HANDLE_CHAR_P(o, n, d) \ - if (sizeof(n)-1 == klen && strncmp(n, k, \ - klen) == 0) { \ - size_t cpylen = (vlen <= \ - sizeof(o)-1) ? vlen : \ - sizeof(o)-1; \ - strncpy(o, v, cpylen); \ - o[cpylen] = '\0'; \ - continue; \ - } - - CONF_HANDLE_BOOL(opt_abort, "abort") - /* - * Chunks always require at least one header page, plus - * one data page in the absence of redzones, or three - * pages in the presence of redzones. In order to - * simplify options processing, fix the limit based on - * config_fill. - */ - CONF_HANDLE_SIZE_T(opt_lg_chunk, "lg_chunk", LG_PAGE + - (config_fill ? 2 : 1), (sizeof(size_t) << 3) - 1, - true) - if (strncmp("dss", k, klen) == 0) { - int i; - bool match = false; - for (i = 0; i < dss_prec_limit; i++) { - if (strncmp(dss_prec_names[i], v, vlen) - == 0) { - if (chunk_dss_prec_set(i)) { - malloc_conf_error( - "Error setting dss", - k, klen, v, vlen); - } else { - opt_dss = - dss_prec_names[i]; - match = true; - break; - } - } - } - if (match == false) { - malloc_conf_error("Invalid conf value", - k, klen, v, vlen); - } - continue; - } - CONF_HANDLE_SIZE_T(opt_narenas, "narenas", 1, - SIZE_T_MAX, false) - CONF_HANDLE_SSIZE_T(opt_lg_dirty_mult, "lg_dirty_mult", - -1, (sizeof(size_t) << 3) - 1) - CONF_HANDLE_BOOL(opt_stats_print, "stats_print") - if (config_fill) { - CONF_HANDLE_BOOL(opt_junk, "junk") - CONF_HANDLE_SIZE_T(opt_quarantine, "quarantine", - 0, SIZE_T_MAX, false) - CONF_HANDLE_BOOL(opt_redzone, "redzone") - CONF_HANDLE_BOOL(opt_zero, "zero") - } - if (config_utrace) { - CONF_HANDLE_BOOL(opt_utrace, "utrace") - } - if (config_valgrind) { - CONF_HANDLE_BOOL(opt_valgrind, "valgrind") - } - if (config_xmalloc) { - CONF_HANDLE_BOOL(opt_xmalloc, "xmalloc") - } - if (config_tcache) { - CONF_HANDLE_BOOL(opt_tcache, "tcache") - CONF_HANDLE_SSIZE_T(opt_lg_tcache_max, - "lg_tcache_max", -1, - (sizeof(size_t) << 3) - 1) - } - if (config_prof) { - CONF_HANDLE_BOOL(opt_prof, "prof") - CONF_HANDLE_CHAR_P(opt_prof_prefix, - "prof_prefix", "jeprof") - CONF_HANDLE_BOOL(opt_prof_active, "prof_active") - CONF_HANDLE_SSIZE_T(opt_lg_prof_sample, - "lg_prof_sample", 0, - (sizeof(uint64_t) << 3) - 1) - CONF_HANDLE_BOOL(opt_prof_accum, "prof_accum") - CONF_HANDLE_SSIZE_T(opt_lg_prof_interval, - "lg_prof_interval", -1, - (sizeof(uint64_t) << 3) - 1) - CONF_HANDLE_BOOL(opt_prof_gdump, "prof_gdump") - CONF_HANDLE_BOOL(opt_prof_final, "prof_final") - CONF_HANDLE_BOOL(opt_prof_leak, "prof_leak") - } - malloc_conf_error("Invalid conf pair", k, klen, v, - vlen); -#undef CONF_HANDLE_BOOL -#undef CONF_HANDLE_SIZE_T -#undef CONF_HANDLE_SSIZE_T -#undef CONF_HANDLE_CHAR_P - } - } -} - -static bool -malloc_init_hard(void) -{ - arena_t *init_arenas[1]; - - malloc_mutex_lock(&init_lock); - if (malloc_initialized || IS_INITIALIZER) { - /* - * Another thread initialized the allocator before this one - * acquired init_lock, or this thread is the initializing - * thread, and it is recursively allocating. - */ - malloc_mutex_unlock(&init_lock); - return (false); - } -#ifdef JEMALLOC_THREADED_INIT - if (malloc_initializer != NO_INITIALIZER && IS_INITIALIZER == false) { - /* Busy-wait until the initializing thread completes. */ - do { - malloc_mutex_unlock(&init_lock); - CPU_SPINWAIT; - malloc_mutex_lock(&init_lock); - } while (malloc_initialized == false); - malloc_mutex_unlock(&init_lock); - return (false); - } -#endif - malloc_initializer = INITIALIZER; - - malloc_tsd_boot(); - if (config_prof) - prof_boot0(); - - malloc_conf_init(); - -#if (!defined(JEMALLOC_MUTEX_INIT_CB) && !defined(JEMALLOC_ZONE) \ - && !defined(_WIN32)) - /* Register fork handlers. */ - if (pthread_atfork(jemalloc_prefork, jemalloc_postfork_parent, - jemalloc_postfork_child) != 0) { - malloc_write("<jemalloc>: Error in pthread_atfork()\n"); - if (opt_abort) - abort(); - } -#endif - - if (opt_stats_print) { - /* Print statistics at exit. */ - if (atexit(stats_print_atexit) != 0) { - malloc_write("<jemalloc>: Error in atexit()\n"); - if (opt_abort) - abort(); - } - } - - if (base_boot()) { - malloc_mutex_unlock(&init_lock); - return (true); - } - - if (chunk_boot()) { - malloc_mutex_unlock(&init_lock); - return (true); - } - - if (ctl_boot()) { - malloc_mutex_unlock(&init_lock); - return (true); - } - - if (config_prof) - prof_boot1(); - - arena_boot(); - - if (config_tcache && tcache_boot0()) { - malloc_mutex_unlock(&init_lock); - return (true); - } - - if (huge_boot()) { - malloc_mutex_unlock(&init_lock); - return (true); - } - - if (malloc_mutex_init(&arenas_lock)) - return (true); - - /* - * Create enough scaffolding to allow recursive allocation in - * malloc_ncpus(). - */ - narenas_total = narenas_auto = 1; - arenas = init_arenas; - memset(arenas, 0, sizeof(arena_t *) * narenas_auto); - - /* - * Initialize one arena here. The rest are lazily created in - * choose_arena_hard(). - */ - arenas_extend(0); - if (arenas[0] == NULL) { - malloc_mutex_unlock(&init_lock); - return (true); - } - - /* Initialize allocation counters before any allocations can occur. */ - if (config_stats && thread_allocated_tsd_boot()) { - malloc_mutex_unlock(&init_lock); - return (true); - } - - if (arenas_tsd_boot()) { - malloc_mutex_unlock(&init_lock); - return (true); - } - - if (config_tcache && tcache_boot1()) { - malloc_mutex_unlock(&init_lock); - return (true); - } - - if (config_fill && quarantine_boot()) { - malloc_mutex_unlock(&init_lock); - return (true); - } - - if (config_prof && prof_boot2()) { - malloc_mutex_unlock(&init_lock); - return (true); - } - - /* Get number of CPUs. */ - malloc_mutex_unlock(&init_lock); - ncpus = malloc_ncpus(); - malloc_mutex_lock(&init_lock); - - if (mutex_boot()) { - malloc_mutex_unlock(&init_lock); - return (true); - } - - if (opt_narenas == 0) { - /* - * For SMP systems, create more than one arena per CPU by - * default. - */ - if (ncpus > 1) - opt_narenas = ncpus << 2; - else - opt_narenas = 1; - } - narenas_auto = opt_narenas; - /* - * Make sure that the arenas array can be allocated. In practice, this - * limit is enough to allow the allocator to function, but the ctl - * machinery will fail to allocate memory at far lower limits. - */ - if (narenas_auto > chunksize / sizeof(arena_t *)) { - narenas_auto = chunksize / sizeof(arena_t *); - malloc_printf("<jemalloc>: Reducing narenas to limit (%d)\n", - narenas_auto); - } - narenas_total = narenas_auto; - - /* Allocate and initialize arenas. */ - arenas = (arena_t **)base_alloc(sizeof(arena_t *) * narenas_total); - if (arenas == NULL) { - malloc_mutex_unlock(&init_lock); - return (true); - } - /* - * Zero the array. In practice, this should always be pre-zeroed, - * since it was just mmap()ed, but let's be sure. - */ - memset(arenas, 0, sizeof(arena_t *) * narenas_total); - /* Copy the pointer to the one arena that was already initialized. */ - arenas[0] = init_arenas[0]; - - malloc_initialized = true; - malloc_mutex_unlock(&init_lock); - return (false); -} - -/* - * End initialization functions. - */ -/******************************************************************************/ -/* - * Begin malloc(3)-compatible functions. - */ - -void * -je_malloc(size_t size) -{ - void *ret; - size_t usize JEMALLOC_CC_SILENCE_INIT(0); - prof_thr_cnt_t *cnt JEMALLOC_CC_SILENCE_INIT(NULL); - - if (malloc_init()) { - ret = NULL; - goto label_oom; - } - - if (size == 0) - size = 1; - - if (config_prof && opt_prof) { - usize = s2u(size); - PROF_ALLOC_PREP(1, usize, cnt); - if (cnt == NULL) { - ret = NULL; - goto label_oom; - } - if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize <= - SMALL_MAXCLASS) { - ret = imalloc(SMALL_MAXCLASS+1); - if (ret != NULL) - arena_prof_promoted(ret, usize); - } else - ret = imalloc(size); - } else { - if (config_stats || (config_valgrind && opt_valgrind)) - usize = s2u(size); - ret = imalloc(size); - } - -label_oom: - if (ret == NULL) { - if (config_xmalloc && opt_xmalloc) { - malloc_write("<jemalloc>: Error in malloc(): " - "out of memory\n"); - abort(); - } - set_errno(ENOMEM); - } - if (config_prof && opt_prof && ret != NULL) - prof_malloc(ret, usize, cnt); - if (config_stats && ret != NULL) { - assert(usize == isalloc(ret, config_prof)); - thread_allocated_tsd_get()->allocated += usize; - } - UTRACE(0, size, ret); - JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, usize, false); - return (ret); -} - -JEMALLOC_ATTR(nonnull(1)) -#ifdef JEMALLOC_PROF -/* - * Avoid any uncertainty as to how many backtrace frames to ignore in - * PROF_ALLOC_PREP(). - */ -JEMALLOC_NOINLINE -#endif -static int -imemalign(void **memptr, size_t alignment, size_t size, - size_t min_alignment) -{ - int ret; - size_t usize; - void *result; - prof_thr_cnt_t *cnt JEMALLOC_CC_SILENCE_INIT(NULL); - - assert(min_alignment != 0); - - if (malloc_init()) - result = NULL; - else { - if (size == 0) - size = 1; - - /* Make sure that alignment is a large enough power of 2. */ - if (((alignment - 1) & alignment) != 0 - || (alignment < min_alignment)) { - if (config_xmalloc && opt_xmalloc) { - malloc_write("<jemalloc>: Error allocating " - "aligned memory: invalid alignment\n"); - abort(); - } - result = NULL; - ret = EINVAL; - goto label_return; - } - - usize = sa2u(size, alignment); - if (usize == 0) { - result = NULL; - ret = ENOMEM; - goto label_return; - } - - if (config_prof && opt_prof) { - PROF_ALLOC_PREP(2, usize, cnt); - if (cnt == NULL) { - result = NULL; - ret = EINVAL; - } else { - if (prof_promote && (uintptr_t)cnt != - (uintptr_t)1U && usize <= SMALL_MAXCLASS) { - assert(sa2u(SMALL_MAXCLASS+1, - alignment) != 0); - result = ipalloc(sa2u(SMALL_MAXCLASS+1, - alignment), alignment, false); - if (result != NULL) { - arena_prof_promoted(result, - usize); - } - } else { - result = ipalloc(usize, alignment, - false); - } - } - } else - result = ipalloc(usize, alignment, false); - } - - if (result == NULL) { - if (config_xmalloc && opt_xmalloc) { - malloc_write("<jemalloc>: Error allocating aligned " - "memory: out of memory\n"); - abort(); - } - ret = ENOMEM; - goto label_return; - } - - *memptr = result; - ret = 0; - -label_return: - if (config_stats && result != NULL) { - assert(usize == isalloc(result, config_prof)); - thread_allocated_tsd_get()->allocated += usize; - } - if (config_prof && opt_prof && result != NULL) - prof_malloc(result, usize, cnt); - UTRACE(0, size, result); - return (ret); -} - -int -je_posix_memalign(void **memptr, size_t alignment, size_t size) -{ - int ret = imemalign(memptr, alignment, size, sizeof(void *)); - JEMALLOC_VALGRIND_MALLOC(ret == 0, *memptr, isalloc(*memptr, - config_prof), false); - return (ret); -} - -void * -je_aligned_alloc(size_t alignment, size_t size) -{ - void *ret; - int err; - - if ((err = imemalign(&ret, alignment, size, 1)) != 0) { - ret = NULL; - set_errno(err); - } - JEMALLOC_VALGRIND_MALLOC(err == 0, ret, isalloc(ret, config_prof), - false); - return (ret); -} - -void * -je_calloc(size_t num, size_t size) -{ - void *ret; - size_t num_size; - size_t usize JEMALLOC_CC_SILENCE_INIT(0); - prof_thr_cnt_t *cnt JEMALLOC_CC_SILENCE_INIT(NULL); - - if (malloc_init()) { - num_size = 0; - ret = NULL; - goto label_return; - } - - num_size = num * size; - if (num_size == 0) { - if (num == 0 || size == 0) - num_size = 1; - else { - ret = NULL; - goto label_return; - } - /* - * Try to avoid division here. We know that it isn't possible to - * overflow during multiplication if neither operand uses any of the - * most significant half of the bits in a size_t. - */ - } else if (((num | size) & (SIZE_T_MAX << (sizeof(size_t) << 2))) - && (num_size / size != num)) { - /* size_t overflow. */ - ret = NULL; - goto label_return; - } - - if (config_prof && opt_prof) { - usize = s2u(num_size); - PROF_ALLOC_PREP(1, usize, cnt); - if (cnt == NULL) { - ret = NULL; - goto label_return; - } - if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize - <= SMALL_MAXCLASS) { - ret = icalloc(SMALL_MAXCLASS+1); - if (ret != NULL) - arena_prof_promoted(ret, usize); - } else - ret = icalloc(num_size); - } else { - if (config_stats || (config_valgrind && opt_valgrind)) - usize = s2u(num_size); - ret = icalloc(num_size); - } - -label_return: - if (ret == NULL) { - if (config_xmalloc && opt_xmalloc) { - malloc_write("<jemalloc>: Error in calloc(): out of " - "memory\n"); - abort(); - } - set_errno(ENOMEM); - } - - if (config_prof && opt_prof && ret != NULL) - prof_malloc(ret, usize, cnt); - if (config_stats && ret != NULL) { - assert(usize == isalloc(ret, config_prof)); - thread_allocated_tsd_get()->allocated += usize; - } - UTRACE(0, num_size, ret); - JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, usize, true); - return (ret); -} - -void * -je_realloc(void *ptr, size_t size) -{ - void *ret; - size_t usize JEMALLOC_CC_SILENCE_INIT(0); - size_t old_size = 0; - size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0); - prof_thr_cnt_t *cnt JEMALLOC_CC_SILENCE_INIT(NULL); - prof_ctx_t *old_ctx JEMALLOC_CC_SILENCE_INIT(NULL); - - if (size == 0) { - if (ptr != NULL) { - /* realloc(ptr, 0) is equivalent to free(p). */ - assert(malloc_initialized || IS_INITIALIZER); - if (config_prof) { - old_size = isalloc(ptr, true); - if (config_valgrind && opt_valgrind) - old_rzsize = p2rz(ptr); - } else if (config_stats) { - old_size = isalloc(ptr, false); - if (config_valgrind && opt_valgrind) - old_rzsize = u2rz(old_size); - } else if (config_valgrind && opt_valgrind) { - old_size = isalloc(ptr, false); - old_rzsize = u2rz(old_size); - } - if (config_prof && opt_prof) { - old_ctx = prof_ctx_get(ptr); - cnt = NULL; - } - iqalloc(ptr); - ret = NULL; - goto label_return; - } else - size = 1; - } - - if (ptr != NULL) { - assert(malloc_initialized || IS_INITIALIZER); - malloc_thread_init(); - - if (config_prof) { - old_size = isalloc(ptr, true); - if (config_valgrind && opt_valgrind) - old_rzsize = p2rz(ptr); - } else if (config_stats) { - old_size = isalloc(ptr, false); - if (config_valgrind && opt_valgrind) - old_rzsize = u2rz(old_size); - } else if (config_valgrind && opt_valgrind) { - old_size = isalloc(ptr, false); - old_rzsize = u2rz(old_size); - } - if (config_prof && opt_prof) { - usize = s2u(size); - old_ctx = prof_ctx_get(ptr); - PROF_ALLOC_PREP(1, usize, cnt); - if (cnt == NULL) { - old_ctx = NULL; - ret = NULL; - goto label_oom; - } - if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && - usize <= SMALL_MAXCLASS) { - ret = iralloc(ptr, SMALL_MAXCLASS+1, 0, 0, - false, false); - if (ret != NULL) - arena_prof_promoted(ret, usize); - else - old_ctx = NULL; - } else { - ret = iralloc(ptr, size, 0, 0, false, false); - if (ret == NULL) - old_ctx = NULL; - } - } else { - if (config_stats || (config_valgrind && opt_valgrind)) - usize = s2u(size); - ret = iralloc(ptr, size, 0, 0, false, false); - } - -label_oom: - if (ret == NULL) { - if (config_xmalloc && opt_xmalloc) { - malloc_write("<jemalloc>: Error in realloc(): " - "out of memory\n"); - abort(); - } - set_errno(ENOMEM); - } - } else { - /* realloc(NULL, size) is equivalent to malloc(size). */ - if (config_prof && opt_prof) - old_ctx = NULL; - if (malloc_init()) { - if (config_prof && opt_prof) - cnt = NULL; - ret = NULL; - } else { - if (config_prof && opt_prof) { - usize = s2u(size); - PROF_ALLOC_PREP(1, usize, cnt); - if (cnt == NULL) - ret = NULL; - else { - if (prof_promote && (uintptr_t)cnt != - (uintptr_t)1U && usize <= - SMALL_MAXCLASS) { - ret = imalloc(SMALL_MAXCLASS+1); - if (ret != NULL) { - arena_prof_promoted(ret, - usize); - } - } else - ret = imalloc(size); - } - } else { - if (config_stats || (config_valgrind && - opt_valgrind)) - usize = s2u(size); - ret = imalloc(size); - } - } - - if (ret == NULL) { - if (config_xmalloc && opt_xmalloc) { - malloc_write("<jemalloc>: Error in realloc(): " - "out of memory\n"); - abort(); - } - set_errno(ENOMEM); - } - } - -label_return: - if (config_prof && opt_prof) - prof_realloc(ret, usize, cnt, old_size, old_ctx); - if (config_stats && ret != NULL) { - thread_allocated_t *ta; - assert(usize == isalloc(ret, config_prof)); - ta = thread_allocated_tsd_get(); - ta->allocated += usize; - ta->deallocated += old_size; - } - UTRACE(ptr, size, ret); - JEMALLOC_VALGRIND_REALLOC(ret, usize, ptr, old_size, old_rzsize, false); - return (ret); -} - -void -je_free(void *ptr) -{ - - UTRACE(ptr, 0, 0); - if (ptr != NULL) { - size_t usize; - size_t rzsize JEMALLOC_CC_SILENCE_INIT(0); - - assert(malloc_initialized || IS_INITIALIZER); - - if (config_prof && opt_prof) { - usize = isalloc(ptr, config_prof); - prof_free(ptr, usize); - } else if (config_stats || config_valgrind) - usize = isalloc(ptr, config_prof); - if (config_stats) - thread_allocated_tsd_get()->deallocated += usize; - if (config_valgrind && opt_valgrind) - rzsize = p2rz(ptr); - iqalloc(ptr); - JEMALLOC_VALGRIND_FREE(ptr, rzsize); - } -} - -/* - * End malloc(3)-compatible functions. - */ -/******************************************************************************/ -/* - * Begin non-standard override functions. - */ - -#ifdef JEMALLOC_OVERRIDE_MEMALIGN -void * -je_memalign(size_t alignment, size_t size) -{ - void *ret JEMALLOC_CC_SILENCE_INIT(NULL); - imemalign(&ret, alignment, size, 1); - JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, size, false); - return (ret); -} -#endif - -#ifdef JEMALLOC_OVERRIDE_VALLOC -void * -je_valloc(size_t size) -{ - void *ret JEMALLOC_CC_SILENCE_INIT(NULL); - imemalign(&ret, PAGE, size, 1); - JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, size, false); - return (ret); -} -#endif - -/* - * is_malloc(je_malloc) is some macro magic to detect if jemalloc_defs.h has - * #define je_malloc malloc - */ -#define malloc_is_malloc 1 -#define is_malloc_(a) malloc_is_ ## a -#define is_malloc(a) is_malloc_(a) - -#if ((is_malloc(je_malloc) == 1) && defined(__GLIBC__) && !defined(__UCLIBC__)) -/* - * glibc provides the RTLD_DEEPBIND flag for dlopen which can make it possible - * to inconsistently reference libc's malloc(3)-compatible functions - * (https://bugzilla.mozilla.org/show_bug.cgi?id=493541). - * - * These definitions interpose hooks in glibc. The functions are actually - * passed an extra argument for the caller return address, which will be - * ignored. - */ -JEMALLOC_EXPORT void (* __free_hook)(void *ptr) = je_free; -JEMALLOC_EXPORT void *(* __malloc_hook)(size_t size) = je_malloc; -JEMALLOC_EXPORT void *(* __realloc_hook)(void *ptr, size_t size) = je_realloc; -JEMALLOC_EXPORT void *(* __memalign_hook)(size_t alignment, size_t size) = - je_memalign; -#endif - -/* - * End non-standard override functions. - */ -/******************************************************************************/ -/* - * Begin non-standard functions. - */ - -size_t -je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void *ptr) -{ - size_t ret; - - assert(malloc_initialized || IS_INITIALIZER); - malloc_thread_init(); - - if (config_ivsalloc) - ret = ivsalloc(ptr, config_prof); - else - ret = (ptr != NULL) ? isalloc(ptr, config_prof) : 0; - - return (ret); -} - -void -je_malloc_stats_print(void (*write_cb)(void *, const char *), void *cbopaque, - const char *opts) -{ - - stats_print(write_cb, cbopaque, opts); -} - -int -je_mallctl(const char *name, void *oldp, size_t *oldlenp, void *newp, - size_t newlen) -{ - - if (malloc_init()) - return (EAGAIN); - - return (ctl_byname(name, oldp, oldlenp, newp, newlen)); -} - -int -je_mallctlnametomib(const char *name, size_t *mibp, size_t *miblenp) -{ - - if (malloc_init()) - return (EAGAIN); - - return (ctl_nametomib(name, mibp, miblenp)); -} - -int -je_mallctlbymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, - void *newp, size_t newlen) -{ - - if (malloc_init()) - return (EAGAIN); - - return (ctl_bymib(mib, miblen, oldp, oldlenp, newp, newlen)); -} - -/* - * End non-standard functions. - */ -/******************************************************************************/ -/* - * Begin experimental functions. - */ -#ifdef JEMALLOC_EXPERIMENTAL - -static JEMALLOC_ATTR(always_inline) void * -iallocm(size_t usize, size_t alignment, bool zero, bool try_tcache, - arena_t *arena) -{ - - assert(usize == ((alignment == 0) ? s2u(usize) : sa2u(usize, - alignment))); - - if (alignment != 0) - return (ipallocx(usize, alignment, zero, try_tcache, arena)); - else if (zero) - return (icallocx(usize, try_tcache, arena)); - else - return (imallocx(usize, try_tcache, arena)); -} - -int -je_allocm(void **ptr, size_t *rsize, size_t size, int flags) -{ - void *p; - size_t usize; - size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK) - & (SIZE_T_MAX-1)); - bool zero = flags & ALLOCM_ZERO; - unsigned arena_ind = ((unsigned)(flags >> 8)) - 1; - arena_t *arena; - bool try_tcache; - - assert(ptr != NULL); - assert(size != 0); - - if (malloc_init()) - goto label_oom; - - if (arena_ind != UINT_MAX) { - arena = arenas[arena_ind]; - try_tcache = false; - } else { - arena = NULL; - try_tcache = true; - } - - usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment); - if (usize == 0) - goto label_oom; - - if (config_prof && opt_prof) { - prof_thr_cnt_t *cnt; - - PROF_ALLOC_PREP(1, usize, cnt); - if (cnt == NULL) - goto label_oom; - if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize <= - SMALL_MAXCLASS) { - size_t usize_promoted = (alignment == 0) ? - s2u(SMALL_MAXCLASS+1) : sa2u(SMALL_MAXCLASS+1, - alignment); - assert(usize_promoted != 0); - p = iallocm(usize_promoted, alignment, zero, - try_tcache, arena); - if (p == NULL) - goto label_oom; - arena_prof_promoted(p, usize); - } else { - p = iallocm(usize, alignment, zero, try_tcache, arena); - if (p == NULL) - goto label_oom; - } - prof_malloc(p, usize, cnt); - } else { - p = iallocm(usize, alignment, zero, try_tcache, arena); - if (p == NULL) - goto label_oom; - } - if (rsize != NULL) - *rsize = usize; - - *ptr = p; - if (config_stats) { - assert(usize == isalloc(p, config_prof)); - thread_allocated_tsd_get()->allocated += usize; - } - UTRACE(0, size, p); - JEMALLOC_VALGRIND_MALLOC(true, p, usize, zero); - return (ALLOCM_SUCCESS); -label_oom: - if (config_xmalloc && opt_xmalloc) { - malloc_write("<jemalloc>: Error in allocm(): " - "out of memory\n"); - abort(); - } - *ptr = NULL; - UTRACE(0, size, 0); - return (ALLOCM_ERR_OOM); -} - -int -je_rallocm(void **ptr, size_t *rsize, size_t size, size_t extra, int flags) -{ - void *p, *q; - size_t usize; - size_t old_size; - size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0); - size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK) - & (SIZE_T_MAX-1)); - bool zero = flags & ALLOCM_ZERO; - bool no_move = flags & ALLOCM_NO_MOVE; - unsigned arena_ind = ((unsigned)(flags >> 8)) - 1; - bool try_tcache_alloc, try_tcache_dalloc; - arena_t *arena; - - assert(ptr != NULL); - assert(*ptr != NULL); - assert(size != 0); - assert(SIZE_T_MAX - size >= extra); - assert(malloc_initialized || IS_INITIALIZER); - malloc_thread_init(); - - if (arena_ind != UINT_MAX) { - arena_chunk_t *chunk; - try_tcache_alloc = true; - chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(*ptr); - try_tcache_dalloc = (chunk == *ptr || chunk->arena != - arenas[arena_ind]); - arena = arenas[arena_ind]; - } else { - try_tcache_alloc = true; - try_tcache_dalloc = true; - arena = NULL; - } - - p = *ptr; - if (config_prof && opt_prof) { - prof_thr_cnt_t *cnt; - - /* - * usize isn't knowable before iralloc() returns when extra is - * non-zero. Therefore, compute its maximum possible value and - * use that in PROF_ALLOC_PREP() to decide whether to capture a - * backtrace. prof_realloc() will use the actual usize to - * decide whether to sample. - */ - size_t max_usize = (alignment == 0) ? s2u(size+extra) : - sa2u(size+extra, alignment); - prof_ctx_t *old_ctx = prof_ctx_get(p); - old_size = isalloc(p, true); - if (config_valgrind && opt_valgrind) - old_rzsize = p2rz(p); - PROF_ALLOC_PREP(1, max_usize, cnt); - if (cnt == NULL) - goto label_oom; - /* - * Use minimum usize to determine whether promotion may happen. - */ - if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U - && ((alignment == 0) ? s2u(size) : sa2u(size, alignment)) - <= SMALL_MAXCLASS) { - q = irallocx(p, SMALL_MAXCLASS+1, (SMALL_MAXCLASS+1 >= - size+extra) ? 0 : size+extra - (SMALL_MAXCLASS+1), - alignment, zero, no_move, try_tcache_alloc, - try_tcache_dalloc, arena); - if (q == NULL) - goto label_err; - if (max_usize < PAGE) { - usize = max_usize; - arena_prof_promoted(q, usize); - } else - usize = isalloc(q, config_prof); - } else { - q = irallocx(p, size, extra, alignment, zero, no_move, - try_tcache_alloc, try_tcache_dalloc, arena); - if (q == NULL) - goto label_err; - usize = isalloc(q, config_prof); - } - prof_realloc(q, usize, cnt, old_size, old_ctx); - if (rsize != NULL) - *rsize = usize; - } else { - if (config_stats) { - old_size = isalloc(p, false); - if (config_valgrind && opt_valgrind) - old_rzsize = u2rz(old_size); - } else if (config_valgrind && opt_valgrind) { - old_size = isalloc(p, false); - old_rzsize = u2rz(old_size); - } - q = irallocx(p, size, extra, alignment, zero, no_move, - try_tcache_alloc, try_tcache_dalloc, arena); - if (q == NULL) - goto label_err; - if (config_stats) - usize = isalloc(q, config_prof); - if (rsize != NULL) { - if (config_stats == false) - usize = isalloc(q, config_prof); - *rsize = usize; - } - } - - *ptr = q; - if (config_stats) { - thread_allocated_t *ta; - ta = thread_allocated_tsd_get(); - ta->allocated += usize; - ta->deallocated += old_size; - } - UTRACE(p, size, q); - JEMALLOC_VALGRIND_REALLOC(q, usize, p, old_size, old_rzsize, zero); - return (ALLOCM_SUCCESS); -label_err: - if (no_move) { - UTRACE(p, size, q); - return (ALLOCM_ERR_NOT_MOVED); - } -label_oom: - if (config_xmalloc && opt_xmalloc) { - malloc_write("<jemalloc>: Error in rallocm(): " - "out of memory\n"); - abort(); - } - UTRACE(p, size, 0); - return (ALLOCM_ERR_OOM); -} - -int -je_sallocm(const void *ptr, size_t *rsize, int flags) -{ - size_t sz; - - assert(malloc_initialized || IS_INITIALIZER); - malloc_thread_init(); - - if (config_ivsalloc) - sz = ivsalloc(ptr, config_prof); - else { - assert(ptr != NULL); - sz = isalloc(ptr, config_prof); - } - assert(rsize != NULL); - *rsize = sz; - - return (ALLOCM_SUCCESS); -} - -int -je_dallocm(void *ptr, int flags) -{ - size_t usize; - size_t rzsize JEMALLOC_CC_SILENCE_INIT(0); - unsigned arena_ind = ((unsigned)(flags >> 8)) - 1; - bool try_tcache; - - assert(ptr != NULL); - assert(malloc_initialized || IS_INITIALIZER); - - if (arena_ind != UINT_MAX) { - arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); - try_tcache = (chunk == ptr || chunk->arena != - arenas[arena_ind]); - } else - try_tcache = true; - - UTRACE(ptr, 0, 0); - if (config_stats || config_valgrind) - usize = isalloc(ptr, config_prof); - if (config_prof && opt_prof) { - if (config_stats == false && config_valgrind == false) - usize = isalloc(ptr, config_prof); - prof_free(ptr, usize); - } - if (config_stats) - thread_allocated_tsd_get()->deallocated += usize; - if (config_valgrind && opt_valgrind) - rzsize = p2rz(ptr); - iqallocx(ptr, try_tcache); - JEMALLOC_VALGRIND_FREE(ptr, rzsize); - - return (ALLOCM_SUCCESS); -} - -int -je_nallocm(size_t *rsize, size_t size, int flags) -{ - size_t usize; - size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK) - & (SIZE_T_MAX-1)); - - assert(size != 0); - - if (malloc_init()) - return (ALLOCM_ERR_OOM); - - usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment); - if (usize == 0) - return (ALLOCM_ERR_OOM); - - if (rsize != NULL) - *rsize = usize; - return (ALLOCM_SUCCESS); -} - -#endif -/* - * End experimental functions. - */ -/******************************************************************************/ -/* - * The following functions are used by threading libraries for protection of - * malloc during fork(). - */ - -/* - * If an application creates a thread before doing any allocation in the main - * thread, then calls fork(2) in the main thread followed by memory allocation - * in the child process, a race can occur that results in deadlock within the - * child: the main thread may have forked while the created thread had - * partially initialized the allocator. Ordinarily jemalloc prevents - * fork/malloc races via the following functions it registers during - * initialization using pthread_atfork(), but of course that does no good if - * the allocator isn't fully initialized at fork time. The following library - * constructor is a partial solution to this problem. It may still possible to - * trigger the deadlock described above, but doing so would involve forking via - * a library constructor that runs before jemalloc's runs. - */ -JEMALLOC_ATTR(constructor) -static void -jemalloc_constructor(void) -{ - - malloc_init(); -} - -#ifndef JEMALLOC_MUTEX_INIT_CB -void -jemalloc_prefork(void) -#else -JEMALLOC_EXPORT void -_malloc_prefork(void) -#endif -{ - unsigned i; - -#ifdef JEMALLOC_MUTEX_INIT_CB - if (malloc_initialized == false) - return; -#endif - assert(malloc_initialized); - - /* Acquire all mutexes in a safe order. */ - ctl_prefork(); - prof_prefork(); - malloc_mutex_prefork(&arenas_lock); - for (i = 0; i < narenas_total; i++) { - if (arenas[i] != NULL) - arena_prefork(arenas[i]); - } - chunk_prefork(); - base_prefork(); - huge_prefork(); -} - -#ifndef JEMALLOC_MUTEX_INIT_CB -void -jemalloc_postfork_parent(void) -#else -JEMALLOC_EXPORT void -_malloc_postfork(void) -#endif -{ - unsigned i; - -#ifdef JEMALLOC_MUTEX_INIT_CB - if (malloc_initialized == false) - return; -#endif - assert(malloc_initialized); - - /* Release all mutexes, now that fork() has completed. */ - huge_postfork_parent(); - base_postfork_parent(); - chunk_postfork_parent(); - for (i = 0; i < narenas_total; i++) { - if (arenas[i] != NULL) - arena_postfork_parent(arenas[i]); - } - malloc_mutex_postfork_parent(&arenas_lock); - prof_postfork_parent(); - ctl_postfork_parent(); -} - -void -jemalloc_postfork_child(void) -{ - unsigned i; - - assert(malloc_initialized); - - /* Release all mutexes, now that fork() has completed. */ - huge_postfork_child(); - base_postfork_child(); - chunk_postfork_child(); - for (i = 0; i < narenas_total; i++) { - if (arenas[i] != NULL) - arena_postfork_child(arenas[i]); - } - malloc_mutex_postfork_child(&arenas_lock); - prof_postfork_child(); - ctl_postfork_child(); -} - -/******************************************************************************/ -/* - * The following functions are used for TLS allocation/deallocation in static - * binaries on FreeBSD. The primary difference between these and i[mcd]alloc() - * is that these avoid accessing TLS variables. - */ - -static void * -a0alloc(size_t size, bool zero) -{ - - if (malloc_init()) - return (NULL); - - if (size == 0) - size = 1; - - if (size <= arena_maxclass) - return (arena_malloc(arenas[0], size, zero, false)); - else - return (huge_malloc(size, zero)); -} - -void * -a0malloc(size_t size) -{ - - return (a0alloc(size, false)); -} - -void * -a0calloc(size_t num, size_t size) -{ - - return (a0alloc(num * size, true)); -} - -void -a0free(void *ptr) -{ - arena_chunk_t *chunk; - - if (ptr == NULL) - return; - - chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); - if (chunk != ptr) - arena_dalloc(chunk->arena, chunk, ptr, false); - else - huge_dalloc(ptr, true); -} - -/******************************************************************************/ diff --git a/extra/jemalloc/src/mb.c b/extra/jemalloc/src/mb.c deleted file mode 100644 index dc2c0a256fd..00000000000 --- a/extra/jemalloc/src/mb.c +++ /dev/null @@ -1,2 +0,0 @@ -#define JEMALLOC_MB_C_ -#include "jemalloc/internal/jemalloc_internal.h" diff --git a/extra/jemalloc/src/mutex.c b/extra/jemalloc/src/mutex.c deleted file mode 100644 index 55e18c23713..00000000000 --- a/extra/jemalloc/src/mutex.c +++ /dev/null @@ -1,149 +0,0 @@ -#define JEMALLOC_MUTEX_C_ -#include "jemalloc/internal/jemalloc_internal.h" - -#if defined(JEMALLOC_LAZY_LOCK) && !defined(_WIN32) -#include <dlfcn.h> -#endif - -#ifndef _CRT_SPINCOUNT -#define _CRT_SPINCOUNT 4000 -#endif - -/******************************************************************************/ -/* Data. */ - -#ifdef JEMALLOC_LAZY_LOCK -bool isthreaded = false; -#endif -#ifdef JEMALLOC_MUTEX_INIT_CB -static bool postpone_init = true; -static malloc_mutex_t *postponed_mutexes = NULL; -#endif - -#if defined(JEMALLOC_LAZY_LOCK) && !defined(_WIN32) -static void pthread_create_once(void); -#endif - -/******************************************************************************/ -/* - * We intercept pthread_create() calls in order to toggle isthreaded if the - * process goes multi-threaded. - */ - -#if defined(JEMALLOC_LAZY_LOCK) && !defined(_WIN32) -static int (*pthread_create_fptr)(pthread_t *__restrict, const pthread_attr_t *, - void *(*)(void *), void *__restrict); - -static void -pthread_create_once(void) -{ - - pthread_create_fptr = dlsym(RTLD_NEXT, "pthread_create"); - if (pthread_create_fptr == NULL) { - malloc_write("<jemalloc>: Error in dlsym(RTLD_NEXT, " - "\"pthread_create\")\n"); - abort(); - } - - isthreaded = true; -} - -JEMALLOC_EXPORT int -pthread_create(pthread_t *__restrict thread, - const pthread_attr_t *__restrict attr, void *(*start_routine)(void *), - void *__restrict arg) -{ - static pthread_once_t once_control = PTHREAD_ONCE_INIT; - - pthread_once(&once_control, pthread_create_once); - - return (pthread_create_fptr(thread, attr, start_routine, arg)); -} -#endif - -/******************************************************************************/ - -#ifdef JEMALLOC_MUTEX_INIT_CB -JEMALLOC_EXPORT int _pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex, - void *(calloc_cb)(size_t, size_t)); -#endif - -bool -malloc_mutex_init(malloc_mutex_t *mutex) -{ - -#ifdef _WIN32 - if (!InitializeCriticalSectionAndSpinCount(&mutex->lock, - _CRT_SPINCOUNT)) - return (true); -#elif (defined(JEMALLOC_OSSPIN)) - mutex->lock = 0; -#elif (defined(JEMALLOC_MUTEX_INIT_CB)) - if (postpone_init) { - mutex->postponed_next = postponed_mutexes; - postponed_mutexes = mutex; - } else { - if (_pthread_mutex_init_calloc_cb(&mutex->lock, base_calloc) != - 0) - return (true); - } -#else - pthread_mutexattr_t attr; - - if (pthread_mutexattr_init(&attr) != 0) - return (true); - pthread_mutexattr_settype(&attr, MALLOC_MUTEX_TYPE); - if (pthread_mutex_init(&mutex->lock, &attr) != 0) { - pthread_mutexattr_destroy(&attr); - return (true); - } - pthread_mutexattr_destroy(&attr); -#endif - return (false); -} - -void -malloc_mutex_prefork(malloc_mutex_t *mutex) -{ - - malloc_mutex_lock(mutex); -} - -void -malloc_mutex_postfork_parent(malloc_mutex_t *mutex) -{ - - malloc_mutex_unlock(mutex); -} - -void -malloc_mutex_postfork_child(malloc_mutex_t *mutex) -{ - -#ifdef JEMALLOC_MUTEX_INIT_CB - malloc_mutex_unlock(mutex); -#else - if (malloc_mutex_init(mutex)) { - malloc_printf("<jemalloc>: Error re-initializing mutex in " - "child\n"); - if (opt_abort) - abort(); - } -#endif -} - -bool -mutex_boot(void) -{ - -#ifdef JEMALLOC_MUTEX_INIT_CB - postpone_init = false; - while (postponed_mutexes != NULL) { - if (_pthread_mutex_init_calloc_cb(&postponed_mutexes->lock, - base_calloc) != 0) - return (true); - postponed_mutexes = postponed_mutexes->postponed_next; - } -#endif - return (false); -} diff --git a/extra/jemalloc/src/prof.c b/extra/jemalloc/src/prof.c deleted file mode 100644 index c133b95c2c6..00000000000 --- a/extra/jemalloc/src/prof.c +++ /dev/null @@ -1,1283 +0,0 @@ -#define JEMALLOC_PROF_C_ -#include "jemalloc/internal/jemalloc_internal.h" -/******************************************************************************/ - -#ifdef JEMALLOC_PROF_LIBUNWIND -#define UNW_LOCAL_ONLY -#include <libunwind.h> -#endif - -#ifdef JEMALLOC_PROF_LIBGCC -#include <unwind.h> -#endif - -/******************************************************************************/ -/* Data. */ - -malloc_tsd_data(, prof_tdata, prof_tdata_t *, NULL) - -bool opt_prof = false; -bool opt_prof_active = true; -size_t opt_lg_prof_sample = LG_PROF_SAMPLE_DEFAULT; -ssize_t opt_lg_prof_interval = LG_PROF_INTERVAL_DEFAULT; -bool opt_prof_gdump = false; -bool opt_prof_final = true; -bool opt_prof_leak = false; -bool opt_prof_accum = false; -char opt_prof_prefix[PATH_MAX + 1]; - -uint64_t prof_interval = 0; -bool prof_promote; - -/* - * Table of mutexes that are shared among ctx's. These are leaf locks, so - * there is no problem with using them for more than one ctx at the same time. - * The primary motivation for this sharing though is that ctx's are ephemeral, - * and destroying mutexes causes complications for systems that allocate when - * creating/destroying mutexes. - */ -static malloc_mutex_t *ctx_locks; -static unsigned cum_ctxs; /* Atomic counter. */ - -/* - * Global hash of (prof_bt_t *)-->(prof_ctx_t *). This is the master data - * structure that knows about all backtraces currently captured. - */ -static ckh_t bt2ctx; -static malloc_mutex_t bt2ctx_mtx; - -static malloc_mutex_t prof_dump_seq_mtx; -static uint64_t prof_dump_seq; -static uint64_t prof_dump_iseq; -static uint64_t prof_dump_mseq; -static uint64_t prof_dump_useq; - -/* - * This buffer is rather large for stack allocation, so use a single buffer for - * all profile dumps. The buffer is implicitly protected by bt2ctx_mtx, since - * it must be locked anyway during dumping. - */ -static char prof_dump_buf[PROF_DUMP_BUFSIZE]; -static unsigned prof_dump_buf_end; -static int prof_dump_fd; - -/* Do not dump any profiles until bootstrapping is complete. */ -static bool prof_booted = false; - -/******************************************************************************/ -/* Function prototypes for non-inline static functions. */ - -static prof_bt_t *bt_dup(prof_bt_t *bt); -static void bt_destroy(prof_bt_t *bt); -#ifdef JEMALLOC_PROF_LIBGCC -static _Unwind_Reason_Code prof_unwind_init_callback( - struct _Unwind_Context *context, void *arg); -static _Unwind_Reason_Code prof_unwind_callback( - struct _Unwind_Context *context, void *arg); -#endif -static bool prof_flush(bool propagate_err); -static bool prof_write(bool propagate_err, const char *s); -static bool prof_printf(bool propagate_err, const char *format, ...) - JEMALLOC_ATTR(format(printf, 2, 3)); -static void prof_ctx_sum(prof_ctx_t *ctx, prof_cnt_t *cnt_all, - size_t *leak_nctx); -static void prof_ctx_destroy(prof_ctx_t *ctx); -static void prof_ctx_merge(prof_ctx_t *ctx, prof_thr_cnt_t *cnt); -static bool prof_dump_ctx(bool propagate_err, prof_ctx_t *ctx, - prof_bt_t *bt); -static bool prof_dump_maps(bool propagate_err); -static bool prof_dump(bool propagate_err, const char *filename, - bool leakcheck); -static void prof_dump_filename(char *filename, char v, int64_t vseq); -static void prof_fdump(void); -static void prof_bt_hash(const void *key, size_t r_hash[2]); -static bool prof_bt_keycomp(const void *k1, const void *k2); -static malloc_mutex_t *prof_ctx_mutex_choose(void); - -/******************************************************************************/ - -void -bt_init(prof_bt_t *bt, void **vec) -{ - - cassert(config_prof); - - bt->vec = vec; - bt->len = 0; -} - -static void -bt_destroy(prof_bt_t *bt) -{ - - cassert(config_prof); - - idalloc(bt); -} - -static prof_bt_t * -bt_dup(prof_bt_t *bt) -{ - prof_bt_t *ret; - - cassert(config_prof); - - /* - * Create a single allocation that has space for vec immediately - * following the prof_bt_t structure. The backtraces that get - * stored in the backtrace caches are copied from stack-allocated - * temporary variables, so size is known at creation time. Making this - * a contiguous object improves cache locality. - */ - ret = (prof_bt_t *)imalloc(QUANTUM_CEILING(sizeof(prof_bt_t)) + - (bt->len * sizeof(void *))); - if (ret == NULL) - return (NULL); - ret->vec = (void **)((uintptr_t)ret + - QUANTUM_CEILING(sizeof(prof_bt_t))); - memcpy(ret->vec, bt->vec, bt->len * sizeof(void *)); - ret->len = bt->len; - - return (ret); -} - -static inline void -prof_enter(prof_tdata_t *prof_tdata) -{ - - cassert(config_prof); - - assert(prof_tdata->enq == false); - prof_tdata->enq = true; - - malloc_mutex_lock(&bt2ctx_mtx); -} - -static inline void -prof_leave(prof_tdata_t *prof_tdata) -{ - bool idump, gdump; - - cassert(config_prof); - - malloc_mutex_unlock(&bt2ctx_mtx); - - assert(prof_tdata->enq); - prof_tdata->enq = false; - idump = prof_tdata->enq_idump; - prof_tdata->enq_idump = false; - gdump = prof_tdata->enq_gdump; - prof_tdata->enq_gdump = false; - - if (idump) - prof_idump(); - if (gdump) - prof_gdump(); -} - -#ifdef JEMALLOC_PROF_LIBUNWIND -void -prof_backtrace(prof_bt_t *bt, unsigned nignore) -{ - unw_context_t uc; - unw_cursor_t cursor; - unsigned i; - int err; - - cassert(config_prof); - assert(bt->len == 0); - assert(bt->vec != NULL); - - unw_getcontext(&uc); - unw_init_local(&cursor, &uc); - - /* Throw away (nignore+1) stack frames, if that many exist. */ - for (i = 0; i < nignore + 1; i++) { - err = unw_step(&cursor); - if (err <= 0) - return; - } - - /* - * Iterate over stack frames until there are no more, or until no space - * remains in bt. - */ - for (i = 0; i < PROF_BT_MAX; i++) { - unw_get_reg(&cursor, UNW_REG_IP, (unw_word_t *)&bt->vec[i]); - bt->len++; - err = unw_step(&cursor); - if (err <= 0) - break; - } -} -#elif (defined(JEMALLOC_PROF_LIBGCC)) -static _Unwind_Reason_Code -prof_unwind_init_callback(struct _Unwind_Context *context, void *arg) -{ - - cassert(config_prof); - - return (_URC_NO_REASON); -} - -static _Unwind_Reason_Code -prof_unwind_callback(struct _Unwind_Context *context, void *arg) -{ - prof_unwind_data_t *data = (prof_unwind_data_t *)arg; - - cassert(config_prof); - - if (data->nignore > 0) - data->nignore--; - else { - data->bt->vec[data->bt->len] = (void *)_Unwind_GetIP(context); - data->bt->len++; - if (data->bt->len == data->max) - return (_URC_END_OF_STACK); - } - - return (_URC_NO_REASON); -} - -void -prof_backtrace(prof_bt_t *bt, unsigned nignore) -{ - prof_unwind_data_t data = {bt, nignore, PROF_BT_MAX}; - - cassert(config_prof); - - _Unwind_Backtrace(prof_unwind_callback, &data); -} -#elif (defined(JEMALLOC_PROF_GCC)) -void -prof_backtrace(prof_bt_t *bt, unsigned nignore) -{ -#define BT_FRAME(i) \ - if ((i) < nignore + PROF_BT_MAX) { \ - void *p; \ - if (__builtin_frame_address(i) == 0) \ - return; \ - p = __builtin_return_address(i); \ - if (p == NULL) \ - return; \ - if (i >= nignore) { \ - bt->vec[(i) - nignore] = p; \ - bt->len = (i) - nignore + 1; \ - } \ - } else \ - return; - - cassert(config_prof); - assert(nignore <= 3); - - BT_FRAME(0) - BT_FRAME(1) - BT_FRAME(2) - BT_FRAME(3) - BT_FRAME(4) - BT_FRAME(5) - BT_FRAME(6) - BT_FRAME(7) - BT_FRAME(8) - BT_FRAME(9) - - BT_FRAME(10) - BT_FRAME(11) - BT_FRAME(12) - BT_FRAME(13) - BT_FRAME(14) - BT_FRAME(15) - BT_FRAME(16) - BT_FRAME(17) - BT_FRAME(18) - BT_FRAME(19) - - BT_FRAME(20) - BT_FRAME(21) - BT_FRAME(22) - BT_FRAME(23) - BT_FRAME(24) - BT_FRAME(25) - BT_FRAME(26) - BT_FRAME(27) - BT_FRAME(28) - BT_FRAME(29) - - BT_FRAME(30) - BT_FRAME(31) - BT_FRAME(32) - BT_FRAME(33) - BT_FRAME(34) - BT_FRAME(35) - BT_FRAME(36) - BT_FRAME(37) - BT_FRAME(38) - BT_FRAME(39) - - BT_FRAME(40) - BT_FRAME(41) - BT_FRAME(42) - BT_FRAME(43) - BT_FRAME(44) - BT_FRAME(45) - BT_FRAME(46) - BT_FRAME(47) - BT_FRAME(48) - BT_FRAME(49) - - BT_FRAME(50) - BT_FRAME(51) - BT_FRAME(52) - BT_FRAME(53) - BT_FRAME(54) - BT_FRAME(55) - BT_FRAME(56) - BT_FRAME(57) - BT_FRAME(58) - BT_FRAME(59) - - BT_FRAME(60) - BT_FRAME(61) - BT_FRAME(62) - BT_FRAME(63) - BT_FRAME(64) - BT_FRAME(65) - BT_FRAME(66) - BT_FRAME(67) - BT_FRAME(68) - BT_FRAME(69) - - BT_FRAME(70) - BT_FRAME(71) - BT_FRAME(72) - BT_FRAME(73) - BT_FRAME(74) - BT_FRAME(75) - BT_FRAME(76) - BT_FRAME(77) - BT_FRAME(78) - BT_FRAME(79) - - BT_FRAME(80) - BT_FRAME(81) - BT_FRAME(82) - BT_FRAME(83) - BT_FRAME(84) - BT_FRAME(85) - BT_FRAME(86) - BT_FRAME(87) - BT_FRAME(88) - BT_FRAME(89) - - BT_FRAME(90) - BT_FRAME(91) - BT_FRAME(92) - BT_FRAME(93) - BT_FRAME(94) - BT_FRAME(95) - BT_FRAME(96) - BT_FRAME(97) - BT_FRAME(98) - BT_FRAME(99) - - BT_FRAME(100) - BT_FRAME(101) - BT_FRAME(102) - BT_FRAME(103) - BT_FRAME(104) - BT_FRAME(105) - BT_FRAME(106) - BT_FRAME(107) - BT_FRAME(108) - BT_FRAME(109) - - BT_FRAME(110) - BT_FRAME(111) - BT_FRAME(112) - BT_FRAME(113) - BT_FRAME(114) - BT_FRAME(115) - BT_FRAME(116) - BT_FRAME(117) - BT_FRAME(118) - BT_FRAME(119) - - BT_FRAME(120) - BT_FRAME(121) - BT_FRAME(122) - BT_FRAME(123) - BT_FRAME(124) - BT_FRAME(125) - BT_FRAME(126) - BT_FRAME(127) - - /* Extras to compensate for nignore. */ - BT_FRAME(128) - BT_FRAME(129) - BT_FRAME(130) -#undef BT_FRAME -} -#else -void -prof_backtrace(prof_bt_t *bt, unsigned nignore) -{ - - cassert(config_prof); - assert(false); -} -#endif - -prof_thr_cnt_t * -prof_lookup(prof_bt_t *bt) -{ - union { - prof_thr_cnt_t *p; - void *v; - } ret; - prof_tdata_t *prof_tdata; - - cassert(config_prof); - - prof_tdata = prof_tdata_get(false); - if ((uintptr_t)prof_tdata <= (uintptr_t)PROF_TDATA_STATE_MAX) - return (NULL); - - if (ckh_search(&prof_tdata->bt2cnt, bt, NULL, &ret.v)) { - union { - prof_bt_t *p; - void *v; - } btkey; - union { - prof_ctx_t *p; - void *v; - } ctx; - bool new_ctx; - - /* - * This thread's cache lacks bt. Look for it in the global - * cache. - */ - prof_enter(prof_tdata); - if (ckh_search(&bt2ctx, bt, &btkey.v, &ctx.v)) { - /* bt has never been seen before. Insert it. */ - ctx.v = imalloc(sizeof(prof_ctx_t)); - if (ctx.v == NULL) { - prof_leave(prof_tdata); - return (NULL); - } - btkey.p = bt_dup(bt); - if (btkey.v == NULL) { - prof_leave(prof_tdata); - idalloc(ctx.v); - return (NULL); - } - ctx.p->bt = btkey.p; - ctx.p->lock = prof_ctx_mutex_choose(); - /* - * Set nlimbo to 1, in order to avoid a race condition - * with prof_ctx_merge()/prof_ctx_destroy(). - */ - ctx.p->nlimbo = 1; - memset(&ctx.p->cnt_merged, 0, sizeof(prof_cnt_t)); - ql_new(&ctx.p->cnts_ql); - if (ckh_insert(&bt2ctx, btkey.v, ctx.v)) { - /* OOM. */ - prof_leave(prof_tdata); - idalloc(btkey.v); - idalloc(ctx.v); - return (NULL); - } - new_ctx = true; - } else { - /* - * Increment nlimbo, in order to avoid a race condition - * with prof_ctx_merge()/prof_ctx_destroy(). - */ - malloc_mutex_lock(ctx.p->lock); - ctx.p->nlimbo++; - malloc_mutex_unlock(ctx.p->lock); - new_ctx = false; - } - prof_leave(prof_tdata); - - /* Link a prof_thd_cnt_t into ctx for this thread. */ - if (ckh_count(&prof_tdata->bt2cnt) == PROF_TCMAX) { - assert(ckh_count(&prof_tdata->bt2cnt) > 0); - /* - * Flush the least recently used cnt in order to keep - * bt2cnt from becoming too large. - */ - ret.p = ql_last(&prof_tdata->lru_ql, lru_link); - assert(ret.v != NULL); - if (ckh_remove(&prof_tdata->bt2cnt, ret.p->ctx->bt, - NULL, NULL)) - assert(false); - ql_remove(&prof_tdata->lru_ql, ret.p, lru_link); - prof_ctx_merge(ret.p->ctx, ret.p); - /* ret can now be re-used. */ - } else { - assert(ckh_count(&prof_tdata->bt2cnt) < PROF_TCMAX); - /* Allocate and partially initialize a new cnt. */ - ret.v = imalloc(sizeof(prof_thr_cnt_t)); - if (ret.p == NULL) { - if (new_ctx) - prof_ctx_destroy(ctx.p); - return (NULL); - } - ql_elm_new(ret.p, cnts_link); - ql_elm_new(ret.p, lru_link); - } - /* Finish initializing ret. */ - ret.p->ctx = ctx.p; - ret.p->epoch = 0; - memset(&ret.p->cnts, 0, sizeof(prof_cnt_t)); - if (ckh_insert(&prof_tdata->bt2cnt, btkey.v, ret.v)) { - if (new_ctx) - prof_ctx_destroy(ctx.p); - idalloc(ret.v); - return (NULL); - } - ql_head_insert(&prof_tdata->lru_ql, ret.p, lru_link); - malloc_mutex_lock(ctx.p->lock); - ql_tail_insert(&ctx.p->cnts_ql, ret.p, cnts_link); - ctx.p->nlimbo--; - malloc_mutex_unlock(ctx.p->lock); - } else { - /* Move ret to the front of the LRU. */ - ql_remove(&prof_tdata->lru_ql, ret.p, lru_link); - ql_head_insert(&prof_tdata->lru_ql, ret.p, lru_link); - } - - return (ret.p); -} - -static bool -prof_flush(bool propagate_err) -{ - bool ret = false; - ssize_t err; - - cassert(config_prof); - - err = write(prof_dump_fd, prof_dump_buf, prof_dump_buf_end); - if (err == -1) { - if (propagate_err == false) { - malloc_write("<jemalloc>: write() failed during heap " - "profile flush\n"); - if (opt_abort) - abort(); - } - ret = true; - } - prof_dump_buf_end = 0; - - return (ret); -} - -static bool -prof_write(bool propagate_err, const char *s) -{ - unsigned i, slen, n; - - cassert(config_prof); - - i = 0; - slen = strlen(s); - while (i < slen) { - /* Flush the buffer if it is full. */ - if (prof_dump_buf_end == PROF_DUMP_BUFSIZE) - if (prof_flush(propagate_err) && propagate_err) - return (true); - - if (prof_dump_buf_end + slen <= PROF_DUMP_BUFSIZE) { - /* Finish writing. */ - n = slen - i; - } else { - /* Write as much of s as will fit. */ - n = PROF_DUMP_BUFSIZE - prof_dump_buf_end; - } - memcpy(&prof_dump_buf[prof_dump_buf_end], &s[i], n); - prof_dump_buf_end += n; - i += n; - } - - return (false); -} - -JEMALLOC_ATTR(format(printf, 2, 3)) -static bool -prof_printf(bool propagate_err, const char *format, ...) -{ - bool ret; - va_list ap; - char buf[PROF_PRINTF_BUFSIZE]; - - va_start(ap, format); - malloc_vsnprintf(buf, sizeof(buf), format, ap); - va_end(ap); - ret = prof_write(propagate_err, buf); - - return (ret); -} - -static void -prof_ctx_sum(prof_ctx_t *ctx, prof_cnt_t *cnt_all, size_t *leak_nctx) -{ - prof_thr_cnt_t *thr_cnt; - prof_cnt_t tcnt; - - cassert(config_prof); - - malloc_mutex_lock(ctx->lock); - - memcpy(&ctx->cnt_summed, &ctx->cnt_merged, sizeof(prof_cnt_t)); - ql_foreach(thr_cnt, &ctx->cnts_ql, cnts_link) { - volatile unsigned *epoch = &thr_cnt->epoch; - - while (true) { - unsigned epoch0 = *epoch; - - /* Make sure epoch is even. */ - if (epoch0 & 1U) - continue; - - memcpy(&tcnt, &thr_cnt->cnts, sizeof(prof_cnt_t)); - - /* Terminate if epoch didn't change while reading. */ - if (*epoch == epoch0) - break; - } - - ctx->cnt_summed.curobjs += tcnt.curobjs; - ctx->cnt_summed.curbytes += tcnt.curbytes; - if (opt_prof_accum) { - ctx->cnt_summed.accumobjs += tcnt.accumobjs; - ctx->cnt_summed.accumbytes += tcnt.accumbytes; - } - } - - if (ctx->cnt_summed.curobjs != 0) - (*leak_nctx)++; - - /* Add to cnt_all. */ - cnt_all->curobjs += ctx->cnt_summed.curobjs; - cnt_all->curbytes += ctx->cnt_summed.curbytes; - if (opt_prof_accum) { - cnt_all->accumobjs += ctx->cnt_summed.accumobjs; - cnt_all->accumbytes += ctx->cnt_summed.accumbytes; - } - - malloc_mutex_unlock(ctx->lock); -} - -static void -prof_ctx_destroy(prof_ctx_t *ctx) -{ - prof_tdata_t *prof_tdata; - - cassert(config_prof); - - /* - * Check that ctx is still unused by any thread cache before destroying - * it. prof_lookup() increments ctx->nlimbo in order to avoid a race - * condition with this function, as does prof_ctx_merge() in order to - * avoid a race between the main body of prof_ctx_merge() and entry - * into this function. - */ - prof_tdata = prof_tdata_get(false); - assert((uintptr_t)prof_tdata > (uintptr_t)PROF_TDATA_STATE_MAX); - prof_enter(prof_tdata); - malloc_mutex_lock(ctx->lock); - if (ql_first(&ctx->cnts_ql) == NULL && ctx->cnt_merged.curobjs == 0 && - ctx->nlimbo == 1) { - assert(ctx->cnt_merged.curbytes == 0); - assert(ctx->cnt_merged.accumobjs == 0); - assert(ctx->cnt_merged.accumbytes == 0); - /* Remove ctx from bt2ctx. */ - if (ckh_remove(&bt2ctx, ctx->bt, NULL, NULL)) - assert(false); - prof_leave(prof_tdata); - /* Destroy ctx. */ - malloc_mutex_unlock(ctx->lock); - bt_destroy(ctx->bt); - idalloc(ctx); - } else { - /* - * Compensate for increment in prof_ctx_merge() or - * prof_lookup(). - */ - ctx->nlimbo--; - malloc_mutex_unlock(ctx->lock); - prof_leave(prof_tdata); - } -} - -static void -prof_ctx_merge(prof_ctx_t *ctx, prof_thr_cnt_t *cnt) -{ - bool destroy; - - cassert(config_prof); - - /* Merge cnt stats and detach from ctx. */ - malloc_mutex_lock(ctx->lock); - ctx->cnt_merged.curobjs += cnt->cnts.curobjs; - ctx->cnt_merged.curbytes += cnt->cnts.curbytes; - ctx->cnt_merged.accumobjs += cnt->cnts.accumobjs; - ctx->cnt_merged.accumbytes += cnt->cnts.accumbytes; - ql_remove(&ctx->cnts_ql, cnt, cnts_link); - if (opt_prof_accum == false && ql_first(&ctx->cnts_ql) == NULL && - ctx->cnt_merged.curobjs == 0 && ctx->nlimbo == 0) { - /* - * Increment ctx->nlimbo in order to keep another thread from - * winning the race to destroy ctx while this one has ctx->lock - * dropped. Without this, it would be possible for another - * thread to: - * - * 1) Sample an allocation associated with ctx. - * 2) Deallocate the sampled object. - * 3) Successfully prof_ctx_destroy(ctx). - * - * The result would be that ctx no longer exists by the time - * this thread accesses it in prof_ctx_destroy(). - */ - ctx->nlimbo++; - destroy = true; - } else - destroy = false; - malloc_mutex_unlock(ctx->lock); - if (destroy) - prof_ctx_destroy(ctx); -} - -static bool -prof_dump_ctx(bool propagate_err, prof_ctx_t *ctx, prof_bt_t *bt) -{ - unsigned i; - - cassert(config_prof); - - /* - * Current statistics can sum to 0 as a result of unmerged per thread - * statistics. Additionally, interval- and growth-triggered dumps can - * occur between the time a ctx is created and when its statistics are - * filled in. Avoid dumping any ctx that is an artifact of either - * implementation detail. - */ - if ((opt_prof_accum == false && ctx->cnt_summed.curobjs == 0) || - (opt_prof_accum && ctx->cnt_summed.accumobjs == 0)) { - assert(ctx->cnt_summed.curobjs == 0); - assert(ctx->cnt_summed.curbytes == 0); - assert(ctx->cnt_summed.accumobjs == 0); - assert(ctx->cnt_summed.accumbytes == 0); - return (false); - } - - if (prof_printf(propagate_err, "%"PRId64": %"PRId64 - " [%"PRIu64": %"PRIu64"] @", - ctx->cnt_summed.curobjs, ctx->cnt_summed.curbytes, - ctx->cnt_summed.accumobjs, ctx->cnt_summed.accumbytes)) - return (true); - - for (i = 0; i < bt->len; i++) { - if (prof_printf(propagate_err, " %#"PRIxPTR, - (uintptr_t)bt->vec[i])) - return (true); - } - - if (prof_write(propagate_err, "\n")) - return (true); - - return (false); -} - -static bool -prof_dump_maps(bool propagate_err) -{ - int mfd; - char filename[PATH_MAX + 1]; - - cassert(config_prof); - - malloc_snprintf(filename, sizeof(filename), "/proc/%d/maps", - (int)getpid()); - mfd = open(filename, O_RDONLY); - if (mfd != -1) { - ssize_t nread; - - if (prof_write(propagate_err, "\nMAPPED_LIBRARIES:\n") && - propagate_err) - return (true); - nread = 0; - do { - prof_dump_buf_end += nread; - if (prof_dump_buf_end == PROF_DUMP_BUFSIZE) { - /* Make space in prof_dump_buf before read(). */ - if (prof_flush(propagate_err) && propagate_err) - return (true); - } - nread = read(mfd, &prof_dump_buf[prof_dump_buf_end], - PROF_DUMP_BUFSIZE - prof_dump_buf_end); - } while (nread > 0); - close(mfd); - } else - return (true); - - return (false); -} - -static bool -prof_dump(bool propagate_err, const char *filename, bool leakcheck) -{ - prof_tdata_t *prof_tdata; - prof_cnt_t cnt_all; - size_t tabind; - union { - prof_bt_t *p; - void *v; - } bt; - union { - prof_ctx_t *p; - void *v; - } ctx; - size_t leak_nctx; - - cassert(config_prof); - - prof_tdata = prof_tdata_get(false); - if ((uintptr_t)prof_tdata <= (uintptr_t)PROF_TDATA_STATE_MAX) - return (true); - prof_enter(prof_tdata); - prof_dump_fd = creat(filename, 0644); - if (prof_dump_fd == -1) { - if (propagate_err == false) { - malloc_printf( - "<jemalloc>: creat(\"%s\"), 0644) failed\n", - filename); - if (opt_abort) - abort(); - } - goto label_error; - } - - /* Merge per thread profile stats, and sum them in cnt_all. */ - memset(&cnt_all, 0, sizeof(prof_cnt_t)); - leak_nctx = 0; - for (tabind = 0; ckh_iter(&bt2ctx, &tabind, NULL, &ctx.v) == false;) - prof_ctx_sum(ctx.p, &cnt_all, &leak_nctx); - - /* Dump profile header. */ - if (opt_lg_prof_sample == 0) { - if (prof_printf(propagate_err, - "heap profile: %"PRId64": %"PRId64 - " [%"PRIu64": %"PRIu64"] @ heapprofile\n", - cnt_all.curobjs, cnt_all.curbytes, - cnt_all.accumobjs, cnt_all.accumbytes)) - goto label_error; - } else { - if (prof_printf(propagate_err, - "heap profile: %"PRId64": %"PRId64 - " [%"PRIu64": %"PRIu64"] @ heap_v2/%"PRIu64"\n", - cnt_all.curobjs, cnt_all.curbytes, - cnt_all.accumobjs, cnt_all.accumbytes, - ((uint64_t)1U << opt_lg_prof_sample))) - goto label_error; - } - - /* Dump per ctx profile stats. */ - for (tabind = 0; ckh_iter(&bt2ctx, &tabind, &bt.v, &ctx.v) - == false;) { - if (prof_dump_ctx(propagate_err, ctx.p, bt.p)) - goto label_error; - } - - /* Dump /proc/<pid>/maps if possible. */ - if (prof_dump_maps(propagate_err)) - goto label_error; - - if (prof_flush(propagate_err)) - goto label_error; - close(prof_dump_fd); - prof_leave(prof_tdata); - - if (leakcheck && cnt_all.curbytes != 0) { - malloc_printf("<jemalloc>: Leak summary: %"PRId64" byte%s, %" - PRId64" object%s, %zu context%s\n", - cnt_all.curbytes, (cnt_all.curbytes != 1) ? "s" : "", - cnt_all.curobjs, (cnt_all.curobjs != 1) ? "s" : "", - leak_nctx, (leak_nctx != 1) ? "s" : ""); - malloc_printf( - "<jemalloc>: Run pprof on \"%s\" for leak detail\n", - filename); - } - - return (false); -label_error: - prof_leave(prof_tdata); - return (true); -} - -#define DUMP_FILENAME_BUFSIZE (PATH_MAX + 1) -static void -prof_dump_filename(char *filename, char v, int64_t vseq) -{ - - cassert(config_prof); - - if (vseq != UINT64_C(0xffffffffffffffff)) { - /* "<prefix>.<pid>.<seq>.v<vseq>.heap" */ - malloc_snprintf(filename, DUMP_FILENAME_BUFSIZE, - "%s.%d.%"PRIu64".%c%"PRId64".heap", - opt_prof_prefix, (int)getpid(), prof_dump_seq, v, vseq); - } else { - /* "<prefix>.<pid>.<seq>.<v>.heap" */ - malloc_snprintf(filename, DUMP_FILENAME_BUFSIZE, - "%s.%d.%"PRIu64".%c.heap", - opt_prof_prefix, (int)getpid(), prof_dump_seq, v); - } - prof_dump_seq++; -} - -static void -prof_fdump(void) -{ - char filename[DUMP_FILENAME_BUFSIZE]; - - cassert(config_prof); - - if (prof_booted == false) - return; - - if (opt_prof_final && opt_prof_prefix[0] != '\0') { - malloc_mutex_lock(&prof_dump_seq_mtx); - prof_dump_filename(filename, 'f', UINT64_C(0xffffffffffffffff)); - malloc_mutex_unlock(&prof_dump_seq_mtx); - prof_dump(false, filename, opt_prof_leak); - } -} - -void -prof_idump(void) -{ - prof_tdata_t *prof_tdata; - char filename[PATH_MAX + 1]; - - cassert(config_prof); - - if (prof_booted == false) - return; - prof_tdata = prof_tdata_get(false); - if ((uintptr_t)prof_tdata <= (uintptr_t)PROF_TDATA_STATE_MAX) - return; - if (prof_tdata->enq) { - prof_tdata->enq_idump = true; - return; - } - - if (opt_prof_prefix[0] != '\0') { - malloc_mutex_lock(&prof_dump_seq_mtx); - prof_dump_filename(filename, 'i', prof_dump_iseq); - prof_dump_iseq++; - malloc_mutex_unlock(&prof_dump_seq_mtx); - prof_dump(false, filename, false); - } -} - -bool -prof_mdump(const char *filename) -{ - char filename_buf[DUMP_FILENAME_BUFSIZE]; - - cassert(config_prof); - - if (opt_prof == false || prof_booted == false) - return (true); - - if (filename == NULL) { - /* No filename specified, so automatically generate one. */ - if (opt_prof_prefix[0] == '\0') - return (true); - malloc_mutex_lock(&prof_dump_seq_mtx); - prof_dump_filename(filename_buf, 'm', prof_dump_mseq); - prof_dump_mseq++; - malloc_mutex_unlock(&prof_dump_seq_mtx); - filename = filename_buf; - } - return (prof_dump(true, filename, false)); -} - -void -prof_gdump(void) -{ - prof_tdata_t *prof_tdata; - char filename[DUMP_FILENAME_BUFSIZE]; - - cassert(config_prof); - - if (prof_booted == false) - return; - prof_tdata = prof_tdata_get(false); - if ((uintptr_t)prof_tdata <= (uintptr_t)PROF_TDATA_STATE_MAX) - return; - if (prof_tdata->enq) { - prof_tdata->enq_gdump = true; - return; - } - - if (opt_prof_prefix[0] != '\0') { - malloc_mutex_lock(&prof_dump_seq_mtx); - prof_dump_filename(filename, 'u', prof_dump_useq); - prof_dump_useq++; - malloc_mutex_unlock(&prof_dump_seq_mtx); - prof_dump(false, filename, false); - } -} - -static void -prof_bt_hash(const void *key, size_t r_hash[2]) -{ - prof_bt_t *bt = (prof_bt_t *)key; - - cassert(config_prof); - - hash(bt->vec, bt->len * sizeof(void *), 0x94122f33U, r_hash); -} - -static bool -prof_bt_keycomp(const void *k1, const void *k2) -{ - const prof_bt_t *bt1 = (prof_bt_t *)k1; - const prof_bt_t *bt2 = (prof_bt_t *)k2; - - cassert(config_prof); - - if (bt1->len != bt2->len) - return (false); - return (memcmp(bt1->vec, bt2->vec, bt1->len * sizeof(void *)) == 0); -} - -static malloc_mutex_t * -prof_ctx_mutex_choose(void) -{ - unsigned nctxs = atomic_add_u(&cum_ctxs, 1); - - return (&ctx_locks[(nctxs - 1) % PROF_NCTX_LOCKS]); -} - -prof_tdata_t * -prof_tdata_init(void) -{ - prof_tdata_t *prof_tdata; - - cassert(config_prof); - - /* Initialize an empty cache for this thread. */ - prof_tdata = (prof_tdata_t *)imalloc(sizeof(prof_tdata_t)); - if (prof_tdata == NULL) - return (NULL); - - if (ckh_new(&prof_tdata->bt2cnt, PROF_CKH_MINITEMS, - prof_bt_hash, prof_bt_keycomp)) { - idalloc(prof_tdata); - return (NULL); - } - ql_new(&prof_tdata->lru_ql); - - prof_tdata->vec = imalloc(sizeof(void *) * PROF_BT_MAX); - if (prof_tdata->vec == NULL) { - ckh_delete(&prof_tdata->bt2cnt); - idalloc(prof_tdata); - return (NULL); - } - - prof_tdata->prng_state = 0; - prof_tdata->threshold = 0; - prof_tdata->accum = 0; - - prof_tdata->enq = false; - prof_tdata->enq_idump = false; - prof_tdata->enq_gdump = false; - - prof_tdata_tsd_set(&prof_tdata); - - return (prof_tdata); -} - -void -prof_tdata_cleanup(void *arg) -{ - prof_thr_cnt_t *cnt; - prof_tdata_t *prof_tdata = *(prof_tdata_t **)arg; - - cassert(config_prof); - - if (prof_tdata == PROF_TDATA_STATE_REINCARNATED) { - /* - * Another destructor deallocated memory after this destructor - * was called. Reset prof_tdata to PROF_TDATA_STATE_PURGATORY - * in order to receive another callback. - */ - prof_tdata = PROF_TDATA_STATE_PURGATORY; - prof_tdata_tsd_set(&prof_tdata); - } else if (prof_tdata == PROF_TDATA_STATE_PURGATORY) { - /* - * The previous time this destructor was called, we set the key - * to PROF_TDATA_STATE_PURGATORY so that other destructors - * wouldn't cause re-creation of the prof_tdata. This time, do - * nothing, so that the destructor will not be called again. - */ - } else if (prof_tdata != NULL) { - /* - * Delete the hash table. All of its contents can still be - * iterated over via the LRU. - */ - ckh_delete(&prof_tdata->bt2cnt); - /* - * Iteratively merge cnt's into the global stats and delete - * them. - */ - while ((cnt = ql_last(&prof_tdata->lru_ql, lru_link)) != NULL) { - ql_remove(&prof_tdata->lru_ql, cnt, lru_link); - prof_ctx_merge(cnt->ctx, cnt); - idalloc(cnt); - } - idalloc(prof_tdata->vec); - idalloc(prof_tdata); - prof_tdata = PROF_TDATA_STATE_PURGATORY; - prof_tdata_tsd_set(&prof_tdata); - } -} - -void -prof_boot0(void) -{ - - cassert(config_prof); - - memcpy(opt_prof_prefix, PROF_PREFIX_DEFAULT, - sizeof(PROF_PREFIX_DEFAULT)); -} - -void -prof_boot1(void) -{ - - cassert(config_prof); - - /* - * opt_prof and prof_promote must be in their final state before any - * arenas are initialized, so this function must be executed early. - */ - - if (opt_prof_leak && opt_prof == false) { - /* - * Enable opt_prof, but in such a way that profiles are never - * automatically dumped. - */ - opt_prof = true; - opt_prof_gdump = false; - } else if (opt_prof) { - if (opt_lg_prof_interval >= 0) { - prof_interval = (((uint64_t)1U) << - opt_lg_prof_interval); - } - } - - prof_promote = (opt_prof && opt_lg_prof_sample > LG_PAGE); -} - -bool -prof_boot2(void) -{ - - cassert(config_prof); - - if (opt_prof) { - unsigned i; - - if (ckh_new(&bt2ctx, PROF_CKH_MINITEMS, prof_bt_hash, - prof_bt_keycomp)) - return (true); - if (malloc_mutex_init(&bt2ctx_mtx)) - return (true); - if (prof_tdata_tsd_boot()) { - malloc_write( - "<jemalloc>: Error in pthread_key_create()\n"); - abort(); - } - - if (malloc_mutex_init(&prof_dump_seq_mtx)) - return (true); - - if (atexit(prof_fdump) != 0) { - malloc_write("<jemalloc>: Error in atexit()\n"); - if (opt_abort) - abort(); - } - - ctx_locks = (malloc_mutex_t *)base_alloc(PROF_NCTX_LOCKS * - sizeof(malloc_mutex_t)); - if (ctx_locks == NULL) - return (true); - for (i = 0; i < PROF_NCTX_LOCKS; i++) { - if (malloc_mutex_init(&ctx_locks[i])) - return (true); - } - } - -#ifdef JEMALLOC_PROF_LIBGCC - /* - * Cause the backtracing machinery to allocate its internal state - * before enabling profiling. - */ - _Unwind_Backtrace(prof_unwind_init_callback, NULL); -#endif - - prof_booted = true; - - return (false); -} - -void -prof_prefork(void) -{ - - if (opt_prof) { - unsigned i; - - malloc_mutex_lock(&bt2ctx_mtx); - malloc_mutex_lock(&prof_dump_seq_mtx); - for (i = 0; i < PROF_NCTX_LOCKS; i++) - malloc_mutex_lock(&ctx_locks[i]); - } -} - -void -prof_postfork_parent(void) -{ - - if (opt_prof) { - unsigned i; - - for (i = 0; i < PROF_NCTX_LOCKS; i++) - malloc_mutex_postfork_parent(&ctx_locks[i]); - malloc_mutex_postfork_parent(&prof_dump_seq_mtx); - malloc_mutex_postfork_parent(&bt2ctx_mtx); - } -} - -void -prof_postfork_child(void) -{ - - if (opt_prof) { - unsigned i; - - for (i = 0; i < PROF_NCTX_LOCKS; i++) - malloc_mutex_postfork_child(&ctx_locks[i]); - malloc_mutex_postfork_child(&prof_dump_seq_mtx); - malloc_mutex_postfork_child(&bt2ctx_mtx); - } -} - -/******************************************************************************/ diff --git a/extra/jemalloc/src/quarantine.c b/extra/jemalloc/src/quarantine.c deleted file mode 100644 index f96a948d5c7..00000000000 --- a/extra/jemalloc/src/quarantine.c +++ /dev/null @@ -1,190 +0,0 @@ -#define JEMALLOC_QUARANTINE_C_ -#include "jemalloc/internal/jemalloc_internal.h" - -/* - * quarantine pointers close to NULL are used to encode state information that - * is used for cleaning up during thread shutdown. - */ -#define QUARANTINE_STATE_REINCARNATED ((quarantine_t *)(uintptr_t)1) -#define QUARANTINE_STATE_PURGATORY ((quarantine_t *)(uintptr_t)2) -#define QUARANTINE_STATE_MAX QUARANTINE_STATE_PURGATORY - -/******************************************************************************/ -/* Data. */ - -malloc_tsd_data(, quarantine, quarantine_t *, NULL) - -/******************************************************************************/ -/* Function prototypes for non-inline static functions. */ - -static quarantine_t *quarantine_grow(quarantine_t *quarantine); -static void quarantine_drain_one(quarantine_t *quarantine); -static void quarantine_drain(quarantine_t *quarantine, size_t upper_bound); - -/******************************************************************************/ - -quarantine_t * -quarantine_init(size_t lg_maxobjs) -{ - quarantine_t *quarantine; - - quarantine = (quarantine_t *)imalloc(offsetof(quarantine_t, objs) + - ((ZU(1) << lg_maxobjs) * sizeof(quarantine_obj_t))); - if (quarantine == NULL) - return (NULL); - quarantine->curbytes = 0; - quarantine->curobjs = 0; - quarantine->first = 0; - quarantine->lg_maxobjs = lg_maxobjs; - - quarantine_tsd_set(&quarantine); - - return (quarantine); -} - -static quarantine_t * -quarantine_grow(quarantine_t *quarantine) -{ - quarantine_t *ret; - - ret = quarantine_init(quarantine->lg_maxobjs + 1); - if (ret == NULL) { - quarantine_drain_one(quarantine); - return (quarantine); - } - - ret->curbytes = quarantine->curbytes; - ret->curobjs = quarantine->curobjs; - if (quarantine->first + quarantine->curobjs <= (ZU(1) << - quarantine->lg_maxobjs)) { - /* objs ring buffer data are contiguous. */ - memcpy(ret->objs, &quarantine->objs[quarantine->first], - quarantine->curobjs * sizeof(quarantine_obj_t)); - } else { - /* objs ring buffer data wrap around. */ - size_t ncopy_a = (ZU(1) << quarantine->lg_maxobjs) - - quarantine->first; - size_t ncopy_b = quarantine->curobjs - ncopy_a; - - memcpy(ret->objs, &quarantine->objs[quarantine->first], ncopy_a - * sizeof(quarantine_obj_t)); - memcpy(&ret->objs[ncopy_a], quarantine->objs, ncopy_b * - sizeof(quarantine_obj_t)); - } - idalloc(quarantine); - - return (ret); -} - -static void -quarantine_drain_one(quarantine_t *quarantine) -{ - quarantine_obj_t *obj = &quarantine->objs[quarantine->first]; - assert(obj->usize == isalloc(obj->ptr, config_prof)); - idalloc(obj->ptr); - quarantine->curbytes -= obj->usize; - quarantine->curobjs--; - quarantine->first = (quarantine->first + 1) & ((ZU(1) << - quarantine->lg_maxobjs) - 1); -} - -static void -quarantine_drain(quarantine_t *quarantine, size_t upper_bound) -{ - - while (quarantine->curbytes > upper_bound && quarantine->curobjs > 0) - quarantine_drain_one(quarantine); -} - -void -quarantine(void *ptr) -{ - quarantine_t *quarantine; - size_t usize = isalloc(ptr, config_prof); - - cassert(config_fill); - assert(opt_quarantine); - - quarantine = *quarantine_tsd_get(); - if ((uintptr_t)quarantine <= (uintptr_t)QUARANTINE_STATE_MAX) { - if (quarantine == QUARANTINE_STATE_PURGATORY) { - /* - * Make a note that quarantine() was called after - * quarantine_cleanup() was called. - */ - quarantine = QUARANTINE_STATE_REINCARNATED; - quarantine_tsd_set(&quarantine); - } - idalloc(ptr); - return; - } - /* - * Drain one or more objects if the quarantine size limit would be - * exceeded by appending ptr. - */ - if (quarantine->curbytes + usize > opt_quarantine) { - size_t upper_bound = (opt_quarantine >= usize) ? opt_quarantine - - usize : 0; - quarantine_drain(quarantine, upper_bound); - } - /* Grow the quarantine ring buffer if it's full. */ - if (quarantine->curobjs == (ZU(1) << quarantine->lg_maxobjs)) - quarantine = quarantine_grow(quarantine); - /* quarantine_grow() must free a slot if it fails to grow. */ - assert(quarantine->curobjs < (ZU(1) << quarantine->lg_maxobjs)); - /* Append ptr if its size doesn't exceed the quarantine size. */ - if (quarantine->curbytes + usize <= opt_quarantine) { - size_t offset = (quarantine->first + quarantine->curobjs) & - ((ZU(1) << quarantine->lg_maxobjs) - 1); - quarantine_obj_t *obj = &quarantine->objs[offset]; - obj->ptr = ptr; - obj->usize = usize; - quarantine->curbytes += usize; - quarantine->curobjs++; - if (opt_junk) - memset(ptr, 0x5a, usize); - } else { - assert(quarantine->curbytes == 0); - idalloc(ptr); - } -} - -void -quarantine_cleanup(void *arg) -{ - quarantine_t *quarantine = *(quarantine_t **)arg; - - if (quarantine == QUARANTINE_STATE_REINCARNATED) { - /* - * Another destructor deallocated memory after this destructor - * was called. Reset quarantine to QUARANTINE_STATE_PURGATORY - * in order to receive another callback. - */ - quarantine = QUARANTINE_STATE_PURGATORY; - quarantine_tsd_set(&quarantine); - } else if (quarantine == QUARANTINE_STATE_PURGATORY) { - /* - * The previous time this destructor was called, we set the key - * to QUARANTINE_STATE_PURGATORY so that other destructors - * wouldn't cause re-creation of the quarantine. This time, do - * nothing, so that the destructor will not be called again. - */ - } else if (quarantine != NULL) { - quarantine_drain(quarantine, 0); - idalloc(quarantine); - quarantine = QUARANTINE_STATE_PURGATORY; - quarantine_tsd_set(&quarantine); - } -} - -bool -quarantine_boot(void) -{ - - cassert(config_fill); - - if (quarantine_tsd_boot()) - return (true); - - return (false); -} diff --git a/extra/jemalloc/src/rtree.c b/extra/jemalloc/src/rtree.c deleted file mode 100644 index 90c6935a0ed..00000000000 --- a/extra/jemalloc/src/rtree.c +++ /dev/null @@ -1,67 +0,0 @@ -#define JEMALLOC_RTREE_C_ -#include "jemalloc/internal/jemalloc_internal.h" - -rtree_t * -rtree_new(unsigned bits) -{ - rtree_t *ret; - unsigned bits_per_level, height, i; - - bits_per_level = ffs(pow2_ceil((RTREE_NODESIZE / sizeof(void *)))) - 1; - height = bits / bits_per_level; - if (height * bits_per_level != bits) - height++; - assert(height * bits_per_level >= bits); - - ret = (rtree_t*)base_alloc(offsetof(rtree_t, level2bits) + - (sizeof(unsigned) * height)); - if (ret == NULL) - return (NULL); - memset(ret, 0, offsetof(rtree_t, level2bits) + (sizeof(unsigned) * - height)); - - if (malloc_mutex_init(&ret->mutex)) { - /* Leak the rtree. */ - return (NULL); - } - ret->height = height; - if (bits_per_level * height > bits) - ret->level2bits[0] = bits % bits_per_level; - else - ret->level2bits[0] = bits_per_level; - for (i = 1; i < height; i++) - ret->level2bits[i] = bits_per_level; - - ret->root = (void**)base_alloc(sizeof(void *) << ret->level2bits[0]); - if (ret->root == NULL) { - /* - * We leak the rtree here, since there's no generic base - * deallocation. - */ - return (NULL); - } - memset(ret->root, 0, sizeof(void *) << ret->level2bits[0]); - - return (ret); -} - -void -rtree_prefork(rtree_t *rtree) -{ - - malloc_mutex_prefork(&rtree->mutex); -} - -void -rtree_postfork_parent(rtree_t *rtree) -{ - - malloc_mutex_postfork_parent(&rtree->mutex); -} - -void -rtree_postfork_child(rtree_t *rtree) -{ - - malloc_mutex_postfork_child(&rtree->mutex); -} diff --git a/extra/jemalloc/src/stats.c b/extra/jemalloc/src/stats.c deleted file mode 100644 index 43f87af6700..00000000000 --- a/extra/jemalloc/src/stats.c +++ /dev/null @@ -1,549 +0,0 @@ -#define JEMALLOC_STATS_C_ -#include "jemalloc/internal/jemalloc_internal.h" - -#define CTL_GET(n, v, t) do { \ - size_t sz = sizeof(t); \ - xmallctl(n, v, &sz, NULL, 0); \ -} while (0) - -#define CTL_I_GET(n, v, t) do { \ - size_t mib[6]; \ - size_t miblen = sizeof(mib) / sizeof(size_t); \ - size_t sz = sizeof(t); \ - xmallctlnametomib(n, mib, &miblen); \ - mib[2] = i; \ - xmallctlbymib(mib, miblen, v, &sz, NULL, 0); \ -} while (0) - -#define CTL_J_GET(n, v, t) do { \ - size_t mib[6]; \ - size_t miblen = sizeof(mib) / sizeof(size_t); \ - size_t sz = sizeof(t); \ - xmallctlnametomib(n, mib, &miblen); \ - mib[2] = j; \ - xmallctlbymib(mib, miblen, v, &sz, NULL, 0); \ -} while (0) - -#define CTL_IJ_GET(n, v, t) do { \ - size_t mib[6]; \ - size_t miblen = sizeof(mib) / sizeof(size_t); \ - size_t sz = sizeof(t); \ - xmallctlnametomib(n, mib, &miblen); \ - mib[2] = i; \ - mib[4] = j; \ - xmallctlbymib(mib, miblen, v, &sz, NULL, 0); \ -} while (0) - -/******************************************************************************/ -/* Data. */ - -bool opt_stats_print = false; - -size_t stats_cactive = 0; - -/******************************************************************************/ -/* Function prototypes for non-inline static functions. */ - -static void stats_arena_bins_print(void (*write_cb)(void *, const char *), - void *cbopaque, unsigned i); -static void stats_arena_lruns_print(void (*write_cb)(void *, const char *), - void *cbopaque, unsigned i); -static void stats_arena_print(void (*write_cb)(void *, const char *), - void *cbopaque, unsigned i, bool bins, bool large); - -/******************************************************************************/ - -static void -stats_arena_bins_print(void (*write_cb)(void *, const char *), void *cbopaque, - unsigned i) -{ - size_t page; - bool config_tcache; - unsigned nbins, j, gap_start; - - CTL_GET("arenas.page", &page, size_t); - - CTL_GET("config.tcache", &config_tcache, bool); - if (config_tcache) { - malloc_cprintf(write_cb, cbopaque, - "bins: bin size regs pgs allocated nmalloc" - " ndalloc nrequests nfills nflushes" - " newruns reruns curruns\n"); - } else { - malloc_cprintf(write_cb, cbopaque, - "bins: bin size regs pgs allocated nmalloc" - " ndalloc newruns reruns curruns\n"); - } - CTL_GET("arenas.nbins", &nbins, unsigned); - for (j = 0, gap_start = UINT_MAX; j < nbins; j++) { - uint64_t nruns; - - CTL_IJ_GET("stats.arenas.0.bins.0.nruns", &nruns, uint64_t); - if (nruns == 0) { - if (gap_start == UINT_MAX) - gap_start = j; - } else { - size_t reg_size, run_size, allocated; - uint32_t nregs; - uint64_t nmalloc, ndalloc, nrequests, nfills, nflushes; - uint64_t reruns; - size_t curruns; - - if (gap_start != UINT_MAX) { - if (j > gap_start + 1) { - /* Gap of more than one size class. */ - malloc_cprintf(write_cb, cbopaque, - "[%u..%u]\n", gap_start, - j - 1); - } else { - /* Gap of one size class. */ - malloc_cprintf(write_cb, cbopaque, - "[%u]\n", gap_start); - } - gap_start = UINT_MAX; - } - CTL_J_GET("arenas.bin.0.size", ®_size, size_t); - CTL_J_GET("arenas.bin.0.nregs", &nregs, uint32_t); - CTL_J_GET("arenas.bin.0.run_size", &run_size, size_t); - CTL_IJ_GET("stats.arenas.0.bins.0.allocated", - &allocated, size_t); - CTL_IJ_GET("stats.arenas.0.bins.0.nmalloc", - &nmalloc, uint64_t); - CTL_IJ_GET("stats.arenas.0.bins.0.ndalloc", - &ndalloc, uint64_t); - if (config_tcache) { - CTL_IJ_GET("stats.arenas.0.bins.0.nrequests", - &nrequests, uint64_t); - CTL_IJ_GET("stats.arenas.0.bins.0.nfills", - &nfills, uint64_t); - CTL_IJ_GET("stats.arenas.0.bins.0.nflushes", - &nflushes, uint64_t); - } - CTL_IJ_GET("stats.arenas.0.bins.0.nreruns", &reruns, - uint64_t); - CTL_IJ_GET("stats.arenas.0.bins.0.curruns", &curruns, - size_t); - if (config_tcache) { - malloc_cprintf(write_cb, cbopaque, - "%13u %5zu %4u %3zu %12zu %12"PRIu64 - " %12"PRIu64" %12"PRIu64" %12"PRIu64 - " %12"PRIu64" %12"PRIu64" %12"PRIu64 - " %12zu\n", - j, reg_size, nregs, run_size / page, - allocated, nmalloc, ndalloc, nrequests, - nfills, nflushes, nruns, reruns, curruns); - } else { - malloc_cprintf(write_cb, cbopaque, - "%13u %5zu %4u %3zu %12zu %12"PRIu64 - " %12"PRIu64" %12"PRIu64" %12"PRIu64 - " %12zu\n", - j, reg_size, nregs, run_size / page, - allocated, nmalloc, ndalloc, nruns, reruns, - curruns); - } - } - } - if (gap_start != UINT_MAX) { - if (j > gap_start + 1) { - /* Gap of more than one size class. */ - malloc_cprintf(write_cb, cbopaque, "[%u..%u]\n", - gap_start, j - 1); - } else { - /* Gap of one size class. */ - malloc_cprintf(write_cb, cbopaque, "[%u]\n", gap_start); - } - } -} - -static void -stats_arena_lruns_print(void (*write_cb)(void *, const char *), void *cbopaque, - unsigned i) -{ - size_t page, nlruns, j; - ssize_t gap_start; - - CTL_GET("arenas.page", &page, size_t); - - malloc_cprintf(write_cb, cbopaque, - "large: size pages nmalloc ndalloc nrequests" - " curruns\n"); - CTL_GET("arenas.nlruns", &nlruns, size_t); - for (j = 0, gap_start = -1; j < nlruns; j++) { - uint64_t nmalloc, ndalloc, nrequests; - size_t run_size, curruns; - - CTL_IJ_GET("stats.arenas.0.lruns.0.nmalloc", &nmalloc, - uint64_t); - CTL_IJ_GET("stats.arenas.0.lruns.0.ndalloc", &ndalloc, - uint64_t); - CTL_IJ_GET("stats.arenas.0.lruns.0.nrequests", &nrequests, - uint64_t); - if (nrequests == 0) { - if (gap_start == -1) - gap_start = j; - } else { - CTL_J_GET("arenas.lrun.0.size", &run_size, size_t); - CTL_IJ_GET("stats.arenas.0.lruns.0.curruns", &curruns, - size_t); - if (gap_start != -1) { - malloc_cprintf(write_cb, cbopaque, "[%zu]\n", - j - gap_start); - gap_start = -1; - } - malloc_cprintf(write_cb, cbopaque, - "%13zu %5zu %12"PRIu64" %12"PRIu64" %12"PRIu64 - " %12zu\n", - run_size, run_size / page, nmalloc, ndalloc, - nrequests, curruns); - } - } - if (gap_start != -1) - malloc_cprintf(write_cb, cbopaque, "[%zu]\n", j - gap_start); -} - -static void -stats_arena_print(void (*write_cb)(void *, const char *), void *cbopaque, - unsigned i, bool bins, bool large) -{ - unsigned nthreads; - const char *dss; - size_t page, pactive, pdirty, mapped; - uint64_t npurge, nmadvise, purged; - size_t small_allocated; - uint64_t small_nmalloc, small_ndalloc, small_nrequests; - size_t large_allocated; - uint64_t large_nmalloc, large_ndalloc, large_nrequests; - - CTL_GET("arenas.page", &page, size_t); - - CTL_I_GET("stats.arenas.0.nthreads", &nthreads, unsigned); - malloc_cprintf(write_cb, cbopaque, - "assigned threads: %u\n", nthreads); - CTL_I_GET("stats.arenas.0.dss", &dss, const char *); - malloc_cprintf(write_cb, cbopaque, "dss allocation precedence: %s\n", - dss); - CTL_I_GET("stats.arenas.0.pactive", &pactive, size_t); - CTL_I_GET("stats.arenas.0.pdirty", &pdirty, size_t); - CTL_I_GET("stats.arenas.0.npurge", &npurge, uint64_t); - CTL_I_GET("stats.arenas.0.nmadvise", &nmadvise, uint64_t); - CTL_I_GET("stats.arenas.0.purged", &purged, uint64_t); - malloc_cprintf(write_cb, cbopaque, - "dirty pages: %zu:%zu active:dirty, %"PRIu64" sweep%s," - " %"PRIu64" madvise%s, %"PRIu64" purged\n", - pactive, pdirty, npurge, npurge == 1 ? "" : "s", - nmadvise, nmadvise == 1 ? "" : "s", purged); - - malloc_cprintf(write_cb, cbopaque, - " allocated nmalloc ndalloc nrequests\n"); - CTL_I_GET("stats.arenas.0.small.allocated", &small_allocated, size_t); - CTL_I_GET("stats.arenas.0.small.nmalloc", &small_nmalloc, uint64_t); - CTL_I_GET("stats.arenas.0.small.ndalloc", &small_ndalloc, uint64_t); - CTL_I_GET("stats.arenas.0.small.nrequests", &small_nrequests, uint64_t); - malloc_cprintf(write_cb, cbopaque, - "small: %12zu %12"PRIu64" %12"PRIu64" %12"PRIu64"\n", - small_allocated, small_nmalloc, small_ndalloc, small_nrequests); - CTL_I_GET("stats.arenas.0.large.allocated", &large_allocated, size_t); - CTL_I_GET("stats.arenas.0.large.nmalloc", &large_nmalloc, uint64_t); - CTL_I_GET("stats.arenas.0.large.ndalloc", &large_ndalloc, uint64_t); - CTL_I_GET("stats.arenas.0.large.nrequests", &large_nrequests, uint64_t); - malloc_cprintf(write_cb, cbopaque, - "large: %12zu %12"PRIu64" %12"PRIu64" %12"PRIu64"\n", - large_allocated, large_nmalloc, large_ndalloc, large_nrequests); - malloc_cprintf(write_cb, cbopaque, - "total: %12zu %12"PRIu64" %12"PRIu64" %12"PRIu64"\n", - small_allocated + large_allocated, - small_nmalloc + large_nmalloc, - small_ndalloc + large_ndalloc, - small_nrequests + large_nrequests); - malloc_cprintf(write_cb, cbopaque, "active: %12zu\n", pactive * page); - CTL_I_GET("stats.arenas.0.mapped", &mapped, size_t); - malloc_cprintf(write_cb, cbopaque, "mapped: %12zu\n", mapped); - - if (bins) - stats_arena_bins_print(write_cb, cbopaque, i); - if (large) - stats_arena_lruns_print(write_cb, cbopaque, i); -} - -void -stats_print(void (*write_cb)(void *, const char *), void *cbopaque, - const char *opts) -{ - int err; - uint64_t epoch; - size_t u64sz; - bool general = true; - bool merged = true; - bool unmerged = true; - bool bins = true; - bool large = true; - - /* - * Refresh stats, in case mallctl() was called by the application. - * - * Check for OOM here, since refreshing the ctl cache can trigger - * allocation. In practice, none of the subsequent mallctl()-related - * calls in this function will cause OOM if this one succeeds. - * */ - epoch = 1; - u64sz = sizeof(uint64_t); - err = je_mallctl("epoch", &epoch, &u64sz, &epoch, sizeof(uint64_t)); - if (err != 0) { - if (err == EAGAIN) { - malloc_write("<jemalloc>: Memory allocation failure in " - "mallctl(\"epoch\", ...)\n"); - return; - } - malloc_write("<jemalloc>: Failure in mallctl(\"epoch\", " - "...)\n"); - abort(); - } - - if (opts != NULL) { - unsigned i; - - for (i = 0; opts[i] != '\0'; i++) { - switch (opts[i]) { - case 'g': - general = false; - break; - case 'm': - merged = false; - break; - case 'a': - unmerged = false; - break; - case 'b': - bins = false; - break; - case 'l': - large = false; - break; - default:; - } - } - } - - malloc_cprintf(write_cb, cbopaque, - "___ Begin jemalloc statistics ___\n"); - if (general) { - int err; - const char *cpv; - bool bv; - unsigned uv; - ssize_t ssv; - size_t sv, bsz, ssz, sssz, cpsz; - - bsz = sizeof(bool); - ssz = sizeof(size_t); - sssz = sizeof(ssize_t); - cpsz = sizeof(const char *); - - CTL_GET("version", &cpv, const char *); - malloc_cprintf(write_cb, cbopaque, "Version: %s\n", cpv); - CTL_GET("config.debug", &bv, bool); - malloc_cprintf(write_cb, cbopaque, "Assertions %s\n", - bv ? "enabled" : "disabled"); - -#define OPT_WRITE_BOOL(n) \ - if ((err = je_mallctl("opt."#n, &bv, &bsz, NULL, 0)) \ - == 0) { \ - malloc_cprintf(write_cb, cbopaque, \ - " opt."#n": %s\n", bv ? "true" : "false"); \ - } -#define OPT_WRITE_SIZE_T(n) \ - if ((err = je_mallctl("opt."#n, &sv, &ssz, NULL, 0)) \ - == 0) { \ - malloc_cprintf(write_cb, cbopaque, \ - " opt."#n": %zu\n", sv); \ - } -#define OPT_WRITE_SSIZE_T(n) \ - if ((err = je_mallctl("opt."#n, &ssv, &sssz, NULL, 0)) \ - == 0) { \ - malloc_cprintf(write_cb, cbopaque, \ - " opt."#n": %zd\n", ssv); \ - } -#define OPT_WRITE_CHAR_P(n) \ - if ((err = je_mallctl("opt."#n, &cpv, &cpsz, NULL, 0)) \ - == 0) { \ - malloc_cprintf(write_cb, cbopaque, \ - " opt."#n": \"%s\"\n", cpv); \ - } - - malloc_cprintf(write_cb, cbopaque, - "Run-time option settings:\n"); - OPT_WRITE_BOOL(abort) - OPT_WRITE_SIZE_T(lg_chunk) - OPT_WRITE_CHAR_P(dss) - OPT_WRITE_SIZE_T(narenas) - OPT_WRITE_SSIZE_T(lg_dirty_mult) - OPT_WRITE_BOOL(stats_print) - OPT_WRITE_BOOL(junk) - OPT_WRITE_SIZE_T(quarantine) - OPT_WRITE_BOOL(redzone) - OPT_WRITE_BOOL(zero) - OPT_WRITE_BOOL(utrace) - OPT_WRITE_BOOL(valgrind) - OPT_WRITE_BOOL(xmalloc) - OPT_WRITE_BOOL(tcache) - OPT_WRITE_SSIZE_T(lg_tcache_max) - OPT_WRITE_BOOL(prof) - OPT_WRITE_CHAR_P(prof_prefix) - OPT_WRITE_BOOL(prof_active) - OPT_WRITE_SSIZE_T(lg_prof_sample) - OPT_WRITE_BOOL(prof_accum) - OPT_WRITE_SSIZE_T(lg_prof_interval) - OPT_WRITE_BOOL(prof_gdump) - OPT_WRITE_BOOL(prof_final) - OPT_WRITE_BOOL(prof_leak) - -#undef OPT_WRITE_BOOL -#undef OPT_WRITE_SIZE_T -#undef OPT_WRITE_SSIZE_T -#undef OPT_WRITE_CHAR_P - - malloc_cprintf(write_cb, cbopaque, "CPUs: %u\n", ncpus); - - CTL_GET("arenas.narenas", &uv, unsigned); - malloc_cprintf(write_cb, cbopaque, "Arenas: %u\n", uv); - - malloc_cprintf(write_cb, cbopaque, "Pointer size: %zu\n", - sizeof(void *)); - - CTL_GET("arenas.quantum", &sv, size_t); - malloc_cprintf(write_cb, cbopaque, "Quantum size: %zu\n", sv); - - CTL_GET("arenas.page", &sv, size_t); - malloc_cprintf(write_cb, cbopaque, "Page size: %zu\n", sv); - - CTL_GET("opt.lg_dirty_mult", &ssv, ssize_t); - if (ssv >= 0) { - malloc_cprintf(write_cb, cbopaque, - "Min active:dirty page ratio per arena: %u:1\n", - (1U << ssv)); - } else { - malloc_cprintf(write_cb, cbopaque, - "Min active:dirty page ratio per arena: N/A\n"); - } - if ((err = je_mallctl("arenas.tcache_max", &sv, &ssz, NULL, 0)) - == 0) { - malloc_cprintf(write_cb, cbopaque, - "Maximum thread-cached size class: %zu\n", sv); - } - if ((err = je_mallctl("opt.prof", &bv, &bsz, NULL, 0)) == 0 && - bv) { - CTL_GET("opt.lg_prof_sample", &sv, size_t); - malloc_cprintf(write_cb, cbopaque, - "Average profile sample interval: %"PRIu64 - " (2^%zu)\n", (((uint64_t)1U) << sv), sv); - - CTL_GET("opt.lg_prof_interval", &ssv, ssize_t); - if (ssv >= 0) { - malloc_cprintf(write_cb, cbopaque, - "Average profile dump interval: %"PRIu64 - " (2^%zd)\n", - (((uint64_t)1U) << ssv), ssv); - } else { - malloc_cprintf(write_cb, cbopaque, - "Average profile dump interval: N/A\n"); - } - } - CTL_GET("opt.lg_chunk", &sv, size_t); - malloc_cprintf(write_cb, cbopaque, "Chunk size: %zu (2^%zu)\n", - (ZU(1) << sv), sv); - } - - if (config_stats) { - size_t *cactive; - size_t allocated, active, mapped; - size_t chunks_current, chunks_high; - uint64_t chunks_total; - size_t huge_allocated; - uint64_t huge_nmalloc, huge_ndalloc; - - CTL_GET("stats.cactive", &cactive, size_t *); - CTL_GET("stats.allocated", &allocated, size_t); - CTL_GET("stats.active", &active, size_t); - CTL_GET("stats.mapped", &mapped, size_t); - malloc_cprintf(write_cb, cbopaque, - "Allocated: %zu, active: %zu, mapped: %zu\n", - allocated, active, mapped); - malloc_cprintf(write_cb, cbopaque, - "Current active ceiling: %zu\n", atomic_read_z(cactive)); - - /* Print chunk stats. */ - CTL_GET("stats.chunks.total", &chunks_total, uint64_t); - CTL_GET("stats.chunks.high", &chunks_high, size_t); - CTL_GET("stats.chunks.current", &chunks_current, size_t); - malloc_cprintf(write_cb, cbopaque, "chunks: nchunks " - "highchunks curchunks\n"); - malloc_cprintf(write_cb, cbopaque, - " %13"PRIu64" %12zu %12zu\n", - chunks_total, chunks_high, chunks_current); - - /* Print huge stats. */ - CTL_GET("stats.huge.nmalloc", &huge_nmalloc, uint64_t); - CTL_GET("stats.huge.ndalloc", &huge_ndalloc, uint64_t); - CTL_GET("stats.huge.allocated", &huge_allocated, size_t); - malloc_cprintf(write_cb, cbopaque, - "huge: nmalloc ndalloc allocated\n"); - malloc_cprintf(write_cb, cbopaque, - " %12"PRIu64" %12"PRIu64" %12zu\n", - huge_nmalloc, huge_ndalloc, huge_allocated); - - if (merged) { - unsigned narenas; - - CTL_GET("arenas.narenas", &narenas, unsigned); - { - VARIABLE_ARRAY(bool, initialized, narenas); - size_t isz; - unsigned i, ninitialized; - - isz = sizeof(bool) * narenas; - xmallctl("arenas.initialized", initialized, - &isz, NULL, 0); - for (i = ninitialized = 0; i < narenas; i++) { - if (initialized[i]) - ninitialized++; - } - - if (ninitialized > 1 || unmerged == false) { - /* Print merged arena stats. */ - malloc_cprintf(write_cb, cbopaque, - "\nMerged arenas stats:\n"); - stats_arena_print(write_cb, cbopaque, - narenas, bins, large); - } - } - } - - if (unmerged) { - unsigned narenas; - - /* Print stats for each arena. */ - - CTL_GET("arenas.narenas", &narenas, unsigned); - { - VARIABLE_ARRAY(bool, initialized, narenas); - size_t isz; - unsigned i; - - isz = sizeof(bool) * narenas; - xmallctl("arenas.initialized", initialized, - &isz, NULL, 0); - - for (i = 0; i < narenas; i++) { - if (initialized[i]) { - malloc_cprintf(write_cb, - cbopaque, - "\narenas[%u]:\n", i); - stats_arena_print(write_cb, - cbopaque, i, bins, large); - } - } - } - } - } - malloc_cprintf(write_cb, cbopaque, "--- End jemalloc statistics ---\n"); -} diff --git a/extra/jemalloc/src/tcache.c b/extra/jemalloc/src/tcache.c deleted file mode 100644 index 98ed19edd52..00000000000 --- a/extra/jemalloc/src/tcache.c +++ /dev/null @@ -1,476 +0,0 @@ -#define JEMALLOC_TCACHE_C_ -#include "jemalloc/internal/jemalloc_internal.h" - -/******************************************************************************/ -/* Data. */ - -malloc_tsd_data(, tcache, tcache_t *, NULL) -malloc_tsd_data(, tcache_enabled, tcache_enabled_t, tcache_enabled_default) - -bool opt_tcache = true; -ssize_t opt_lg_tcache_max = LG_TCACHE_MAXCLASS_DEFAULT; - -tcache_bin_info_t *tcache_bin_info; -static unsigned stack_nelms; /* Total stack elms per tcache. */ - -size_t nhbins; -size_t tcache_maxclass; - -/******************************************************************************/ - -size_t tcache_salloc(const void *ptr) -{ - - return (arena_salloc(ptr, false)); -} - -void -tcache_event_hard(tcache_t *tcache) -{ - size_t binind = tcache->next_gc_bin; - tcache_bin_t *tbin = &tcache->tbins[binind]; - tcache_bin_info_t *tbin_info = &tcache_bin_info[binind]; - - if (tbin->low_water > 0) { - /* - * Flush (ceiling) 3/4 of the objects below the low water mark. - */ - if (binind < NBINS) { - tcache_bin_flush_small(tbin, binind, tbin->ncached - - tbin->low_water + (tbin->low_water >> 2), tcache); - } else { - tcache_bin_flush_large(tbin, binind, tbin->ncached - - tbin->low_water + (tbin->low_water >> 2), tcache); - } - /* - * Reduce fill count by 2X. Limit lg_fill_div such that the - * fill count is always at least 1. - */ - if ((tbin_info->ncached_max >> (tbin->lg_fill_div+1)) >= 1) - tbin->lg_fill_div++; - } else if (tbin->low_water < 0) { - /* - * Increase fill count by 2X. Make sure lg_fill_div stays - * greater than 0. - */ - if (tbin->lg_fill_div > 1) - tbin->lg_fill_div--; - } - tbin->low_water = tbin->ncached; - - tcache->next_gc_bin++; - if (tcache->next_gc_bin == nhbins) - tcache->next_gc_bin = 0; - tcache->ev_cnt = 0; -} - -void * -tcache_alloc_small_hard(tcache_t *tcache, tcache_bin_t *tbin, size_t binind) -{ - void *ret; - - arena_tcache_fill_small(tcache->arena, tbin, binind, - config_prof ? tcache->prof_accumbytes : 0); - if (config_prof) - tcache->prof_accumbytes = 0; - ret = tcache_alloc_easy(tbin); - - return (ret); -} - -void -tcache_bin_flush_small(tcache_bin_t *tbin, size_t binind, unsigned rem, - tcache_t *tcache) -{ - void *ptr; - unsigned i, nflush, ndeferred; - bool merged_stats = false; - - assert(binind < NBINS); - assert(rem <= tbin->ncached); - - for (nflush = tbin->ncached - rem; nflush > 0; nflush = ndeferred) { - /* Lock the arena bin associated with the first object. */ - arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE( - tbin->avail[0]); - arena_t *arena = chunk->arena; - arena_bin_t *bin = &arena->bins[binind]; - - if (config_prof && arena == tcache->arena) { - if (arena_prof_accum(arena, tcache->prof_accumbytes)) - prof_idump(); - tcache->prof_accumbytes = 0; - } - - malloc_mutex_lock(&bin->lock); - if (config_stats && arena == tcache->arena) { - assert(merged_stats == false); - merged_stats = true; - bin->stats.nflushes++; - bin->stats.nrequests += tbin->tstats.nrequests; - tbin->tstats.nrequests = 0; - } - ndeferred = 0; - for (i = 0; i < nflush; i++) { - ptr = tbin->avail[i]; - assert(ptr != NULL); - chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); - if (chunk->arena == arena) { - size_t pageind = ((uintptr_t)ptr - - (uintptr_t)chunk) >> LG_PAGE; - arena_chunk_map_t *mapelm = - arena_mapp_get(chunk, pageind); - if (config_fill && opt_junk) { - arena_alloc_junk_small(ptr, - &arena_bin_info[binind], true); - } - arena_dalloc_bin_locked(arena, chunk, ptr, - mapelm); - } else { - /* - * This object was allocated via a different - * arena bin than the one that is currently - * locked. Stash the object, so that it can be - * handled in a future pass. - */ - tbin->avail[ndeferred] = ptr; - ndeferred++; - } - } - malloc_mutex_unlock(&bin->lock); - } - if (config_stats && merged_stats == false) { - /* - * The flush loop didn't happen to flush to this thread's - * arena, so the stats didn't get merged. Manually do so now. - */ - arena_bin_t *bin = &tcache->arena->bins[binind]; - malloc_mutex_lock(&bin->lock); - bin->stats.nflushes++; - bin->stats.nrequests += tbin->tstats.nrequests; - tbin->tstats.nrequests = 0; - malloc_mutex_unlock(&bin->lock); - } - - memmove(tbin->avail, &tbin->avail[tbin->ncached - rem], - rem * sizeof(void *)); - tbin->ncached = rem; - if ((int)tbin->ncached < tbin->low_water) - tbin->low_water = tbin->ncached; -} - -void -tcache_bin_flush_large(tcache_bin_t *tbin, size_t binind, unsigned rem, - tcache_t *tcache) -{ - void *ptr; - unsigned i, nflush, ndeferred; - bool merged_stats = false; - - assert(binind < nhbins); - assert(rem <= tbin->ncached); - - for (nflush = tbin->ncached - rem; nflush > 0; nflush = ndeferred) { - /* Lock the arena associated with the first object. */ - arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE( - tbin->avail[0]); - arena_t *arena = chunk->arena; - UNUSED bool idump; - - if (config_prof) - idump = false; - malloc_mutex_lock(&arena->lock); - if ((config_prof || config_stats) && arena == tcache->arena) { - if (config_prof) { - idump = arena_prof_accum_locked(arena, - tcache->prof_accumbytes); - tcache->prof_accumbytes = 0; - } - if (config_stats) { - merged_stats = true; - arena->stats.nrequests_large += - tbin->tstats.nrequests; - arena->stats.lstats[binind - NBINS].nrequests += - tbin->tstats.nrequests; - tbin->tstats.nrequests = 0; - } - } - ndeferred = 0; - for (i = 0; i < nflush; i++) { - ptr = tbin->avail[i]; - assert(ptr != NULL); - chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); - if (chunk->arena == arena) - arena_dalloc_large_locked(arena, chunk, ptr); - else { - /* - * This object was allocated via a different - * arena than the one that is currently locked. - * Stash the object, so that it can be handled - * in a future pass. - */ - tbin->avail[ndeferred] = ptr; - ndeferred++; - } - } - malloc_mutex_unlock(&arena->lock); - if (config_prof && idump) - prof_idump(); - } - if (config_stats && merged_stats == false) { - /* - * The flush loop didn't happen to flush to this thread's - * arena, so the stats didn't get merged. Manually do so now. - */ - arena_t *arena = tcache->arena; - malloc_mutex_lock(&arena->lock); - arena->stats.nrequests_large += tbin->tstats.nrequests; - arena->stats.lstats[binind - NBINS].nrequests += - tbin->tstats.nrequests; - tbin->tstats.nrequests = 0; - malloc_mutex_unlock(&arena->lock); - } - - memmove(tbin->avail, &tbin->avail[tbin->ncached - rem], - rem * sizeof(void *)); - tbin->ncached = rem; - if ((int)tbin->ncached < tbin->low_water) - tbin->low_water = tbin->ncached; -} - -void -tcache_arena_associate(tcache_t *tcache, arena_t *arena) -{ - - if (config_stats) { - /* Link into list of extant tcaches. */ - malloc_mutex_lock(&arena->lock); - ql_elm_new(tcache, link); - ql_tail_insert(&arena->tcache_ql, tcache, link); - malloc_mutex_unlock(&arena->lock); - } - tcache->arena = arena; -} - -void -tcache_arena_dissociate(tcache_t *tcache) -{ - - if (config_stats) { - /* Unlink from list of extant tcaches. */ - malloc_mutex_lock(&tcache->arena->lock); - ql_remove(&tcache->arena->tcache_ql, tcache, link); - malloc_mutex_unlock(&tcache->arena->lock); - tcache_stats_merge(tcache, tcache->arena); - } -} - -tcache_t * -tcache_create(arena_t *arena) -{ - tcache_t *tcache; - size_t size, stack_offset; - unsigned i; - - size = offsetof(tcache_t, tbins) + (sizeof(tcache_bin_t) * nhbins); - /* Naturally align the pointer stacks. */ - size = PTR_CEILING(size); - stack_offset = size; - size += stack_nelms * sizeof(void *); - /* - * Round up to the nearest multiple of the cacheline size, in order to - * avoid the possibility of false cacheline sharing. - * - * That this works relies on the same logic as in ipalloc(), but we - * cannot directly call ipalloc() here due to tcache bootstrapping - * issues. - */ - size = (size + CACHELINE_MASK) & (-CACHELINE); - - if (size <= SMALL_MAXCLASS) - tcache = (tcache_t *)arena_malloc_small(arena, size, true); - else if (size <= tcache_maxclass) - tcache = (tcache_t *)arena_malloc_large(arena, size, true); - else - tcache = (tcache_t *)icallocx(size, false, arena); - - if (tcache == NULL) - return (NULL); - - tcache_arena_associate(tcache, arena); - - assert((TCACHE_NSLOTS_SMALL_MAX & 1U) == 0); - for (i = 0; i < nhbins; i++) { - tcache->tbins[i].lg_fill_div = 1; - tcache->tbins[i].avail = (void **)((uintptr_t)tcache + - (uintptr_t)stack_offset); - stack_offset += tcache_bin_info[i].ncached_max * sizeof(void *); - } - - tcache_tsd_set(&tcache); - - return (tcache); -} - -void -tcache_destroy(tcache_t *tcache) -{ - unsigned i; - size_t tcache_size; - - tcache_arena_dissociate(tcache); - - for (i = 0; i < NBINS; i++) { - tcache_bin_t *tbin = &tcache->tbins[i]; - tcache_bin_flush_small(tbin, i, 0, tcache); - - if (config_stats && tbin->tstats.nrequests != 0) { - arena_t *arena = tcache->arena; - arena_bin_t *bin = &arena->bins[i]; - malloc_mutex_lock(&bin->lock); - bin->stats.nrequests += tbin->tstats.nrequests; - malloc_mutex_unlock(&bin->lock); - } - } - - for (; i < nhbins; i++) { - tcache_bin_t *tbin = &tcache->tbins[i]; - tcache_bin_flush_large(tbin, i, 0, tcache); - - if (config_stats && tbin->tstats.nrequests != 0) { - arena_t *arena = tcache->arena; - malloc_mutex_lock(&arena->lock); - arena->stats.nrequests_large += tbin->tstats.nrequests; - arena->stats.lstats[i - NBINS].nrequests += - tbin->tstats.nrequests; - malloc_mutex_unlock(&arena->lock); - } - } - - if (config_prof && tcache->prof_accumbytes > 0 && - arena_prof_accum(tcache->arena, tcache->prof_accumbytes)) - prof_idump(); - - tcache_size = arena_salloc(tcache, false); - if (tcache_size <= SMALL_MAXCLASS) { - arena_chunk_t *chunk = CHUNK_ADDR2BASE(tcache); - arena_t *arena = chunk->arena; - size_t pageind = ((uintptr_t)tcache - (uintptr_t)chunk) >> - LG_PAGE; - arena_chunk_map_t *mapelm = arena_mapp_get(chunk, pageind); - - arena_dalloc_bin(arena, chunk, tcache, pageind, mapelm); - } else if (tcache_size <= tcache_maxclass) { - arena_chunk_t *chunk = CHUNK_ADDR2BASE(tcache); - arena_t *arena = chunk->arena; - - arena_dalloc_large(arena, chunk, tcache); - } else - idallocx(tcache, false); -} - -void -tcache_thread_cleanup(void *arg) -{ - tcache_t *tcache = *(tcache_t **)arg; - - if (tcache == TCACHE_STATE_DISABLED) { - /* Do nothing. */ - } else if (tcache == TCACHE_STATE_REINCARNATED) { - /* - * Another destructor called an allocator function after this - * destructor was called. Reset tcache to - * TCACHE_STATE_PURGATORY in order to receive another callback. - */ - tcache = TCACHE_STATE_PURGATORY; - tcache_tsd_set(&tcache); - } else if (tcache == TCACHE_STATE_PURGATORY) { - /* - * The previous time this destructor was called, we set the key - * to TCACHE_STATE_PURGATORY so that other destructors wouldn't - * cause re-creation of the tcache. This time, do nothing, so - * that the destructor will not be called again. - */ - } else if (tcache != NULL) { - assert(tcache != TCACHE_STATE_PURGATORY); - tcache_destroy(tcache); - tcache = TCACHE_STATE_PURGATORY; - tcache_tsd_set(&tcache); - } -} - -void -tcache_stats_merge(tcache_t *tcache, arena_t *arena) -{ - unsigned i; - - /* Merge and reset tcache stats. */ - for (i = 0; i < NBINS; i++) { - arena_bin_t *bin = &arena->bins[i]; - tcache_bin_t *tbin = &tcache->tbins[i]; - malloc_mutex_lock(&bin->lock); - bin->stats.nrequests += tbin->tstats.nrequests; - malloc_mutex_unlock(&bin->lock); - tbin->tstats.nrequests = 0; - } - - for (; i < nhbins; i++) { - malloc_large_stats_t *lstats = &arena->stats.lstats[i - NBINS]; - tcache_bin_t *tbin = &tcache->tbins[i]; - arena->stats.nrequests_large += tbin->tstats.nrequests; - lstats->nrequests += tbin->tstats.nrequests; - tbin->tstats.nrequests = 0; - } -} - -bool -tcache_boot0(void) -{ - unsigned i; - - /* - * If necessary, clamp opt_lg_tcache_max, now that arena_maxclass is - * known. - */ - if (opt_lg_tcache_max < 0 || (1U << opt_lg_tcache_max) < SMALL_MAXCLASS) - tcache_maxclass = SMALL_MAXCLASS; - else if ((1U << opt_lg_tcache_max) > arena_maxclass) - tcache_maxclass = arena_maxclass; - else - tcache_maxclass = (1U << opt_lg_tcache_max); - - nhbins = NBINS + (tcache_maxclass >> LG_PAGE); - - /* Initialize tcache_bin_info. */ - tcache_bin_info = (tcache_bin_info_t *)base_alloc(nhbins * - sizeof(tcache_bin_info_t)); - if (tcache_bin_info == NULL) - return (true); - stack_nelms = 0; - for (i = 0; i < NBINS; i++) { - if ((arena_bin_info[i].nregs << 1) <= TCACHE_NSLOTS_SMALL_MAX) { - tcache_bin_info[i].ncached_max = - (arena_bin_info[i].nregs << 1); - } else { - tcache_bin_info[i].ncached_max = - TCACHE_NSLOTS_SMALL_MAX; - } - stack_nelms += tcache_bin_info[i].ncached_max; - } - for (; i < nhbins; i++) { - tcache_bin_info[i].ncached_max = TCACHE_NSLOTS_LARGE; - stack_nelms += tcache_bin_info[i].ncached_max; - } - - return (false); -} - -bool -tcache_boot1(void) -{ - - if (tcache_tsd_boot() || tcache_enabled_tsd_boot()) - return (true); - - return (false); -} diff --git a/extra/jemalloc/src/tsd.c b/extra/jemalloc/src/tsd.c deleted file mode 100644 index 961a546329c..00000000000 --- a/extra/jemalloc/src/tsd.c +++ /dev/null @@ -1,107 +0,0 @@ -#define JEMALLOC_TSD_C_ -#include "jemalloc/internal/jemalloc_internal.h" - -/******************************************************************************/ -/* Data. */ - -static unsigned ncleanups; -static malloc_tsd_cleanup_t cleanups[MALLOC_TSD_CLEANUPS_MAX]; - -/******************************************************************************/ - -void * -malloc_tsd_malloc(size_t size) -{ - - /* Avoid choose_arena() in order to dodge bootstrapping issues. */ - return (arena_malloc(arenas[0], size, false, false)); -} - -void -malloc_tsd_dalloc(void *wrapper) -{ - - idalloc(wrapper); -} - -void -malloc_tsd_no_cleanup(void *arg) -{ - - not_reached(); -} - -#if defined(JEMALLOC_MALLOC_THREAD_CLEANUP) || defined(_WIN32) -#ifndef _WIN32 -JEMALLOC_EXPORT -#endif -void -_malloc_thread_cleanup(void) -{ - bool pending[MALLOC_TSD_CLEANUPS_MAX], again; - unsigned i; - - for (i = 0; i < ncleanups; i++) - pending[i] = true; - - do { - again = false; - for (i = 0; i < ncleanups; i++) { - if (pending[i]) { - pending[i] = cleanups[i](); - if (pending[i]) - again = true; - } - } - } while (again); -} -#endif - -void -malloc_tsd_cleanup_register(bool (*f)(void)) -{ - - assert(ncleanups < MALLOC_TSD_CLEANUPS_MAX); - cleanups[ncleanups] = f; - ncleanups++; -} - -void -malloc_tsd_boot(void) -{ - - ncleanups = 0; -} - -#ifdef _WIN32 -static BOOL WINAPI -_tls_callback(HINSTANCE hinstDLL, DWORD fdwReason, LPVOID lpvReserved) -{ - - switch (fdwReason) { -#ifdef JEMALLOC_LAZY_LOCK - case DLL_THREAD_ATTACH: - isthreaded = true; - break; -#endif - case DLL_THREAD_DETACH: - _malloc_thread_cleanup(); - break; - default: - break; - } - return (true); -} - -#ifdef _MSC_VER -# ifdef _M_IX86 -# pragma comment(linker, "/INCLUDE:__tls_used") -# else -# pragma comment(linker, "/INCLUDE:_tls_used") -# endif -# pragma section(".CRT$XLY",long,read) -#endif -JEMALLOC_SECTION(".CRT$XLY") JEMALLOC_ATTR(used) -static const BOOL (WINAPI *tls_callback)(HINSTANCE hinstDLL, - DWORD fdwReason, LPVOID lpvReserved) = _tls_callback; -#endif diff --git a/extra/jemalloc/src/util.c b/extra/jemalloc/src/util.c deleted file mode 100644 index b3a01143698..00000000000 --- a/extra/jemalloc/src/util.c +++ /dev/null @@ -1,641 +0,0 @@ -#define assert(e) do { \ - if (config_debug && !(e)) { \ - malloc_write("<jemalloc>: Failed assertion\n"); \ - abort(); \ - } \ -} while (0) - -#define not_reached() do { \ - if (config_debug) { \ - malloc_write("<jemalloc>: Unreachable code reached\n"); \ - abort(); \ - } \ -} while (0) - -#define not_implemented() do { \ - if (config_debug) { \ - malloc_write("<jemalloc>: Not implemented\n"); \ - abort(); \ - } \ -} while (0) - -#define JEMALLOC_UTIL_C_ -#include "jemalloc/internal/jemalloc_internal.h" - -/******************************************************************************/ -/* Function prototypes for non-inline static functions. */ - -static void wrtmessage(void *cbopaque, const char *s); -#define U2S_BUFSIZE ((1U << (LG_SIZEOF_INTMAX_T + 3)) + 1) -static char *u2s(uintmax_t x, unsigned base, bool uppercase, char *s, - size_t *slen_p); -#define D2S_BUFSIZE (1 + U2S_BUFSIZE) -static char *d2s(intmax_t x, char sign, char *s, size_t *slen_p); -#define O2S_BUFSIZE (1 + U2S_BUFSIZE) -static char *o2s(uintmax_t x, bool alt_form, char *s, size_t *slen_p); -#define X2S_BUFSIZE (2 + U2S_BUFSIZE) -static char *x2s(uintmax_t x, bool alt_form, bool uppercase, char *s, - size_t *slen_p); - -/******************************************************************************/ - -/* malloc_message() setup. */ -static void -wrtmessage(void *cbopaque, const char *s) -{ - -#ifdef SYS_write - /* - * Use syscall(2) rather than write(2) when possible in order to avoid - * the possibility of memory allocation within libc. This is necessary - * on FreeBSD; most operating systems do not have this problem though. - */ - UNUSED int result = syscall(SYS_write, STDERR_FILENO, s, strlen(s)); -#else - UNUSED int result = write(STDERR_FILENO, s, strlen(s)); -#endif -} - -JEMALLOC_EXPORT void (*je_malloc_message)(void *, const char *s); - -/* - * Wrapper around malloc_message() that avoids the need for - * je_malloc_message(...) throughout the code. - */ -void -malloc_write(const char *s) -{ - - if (je_malloc_message != NULL) - je_malloc_message(NULL, s); - else - wrtmessage(NULL, s); -} - -/* - * glibc provides a non-standard strerror_r() when _GNU_SOURCE is defined, so - * provide a wrapper. - */ -int -buferror(char *buf, size_t buflen) -{ - -#ifdef _WIN32 - FormatMessageA(FORMAT_MESSAGE_FROM_SYSTEM, NULL, GetLastError(), 0, - (LPSTR)buf, buflen, NULL); - return (0); -#elif defined(_GNU_SOURCE) - char *b = strerror_r(errno, buf, buflen); - if (b != buf) { - strncpy(buf, b, buflen); - buf[buflen-1] = '\0'; - } - return (0); -#else - return (strerror_r(errno, buf, buflen)); -#endif -} - -uintmax_t -malloc_strtoumax(const char *nptr, char **endptr, int base) -{ - uintmax_t ret, digit; - int b; - bool neg; - const char *p, *ns; - - if (base < 0 || base == 1 || base > 36) { - set_errno(EINVAL); - return (UINTMAX_MAX); - } - b = base; - - /* Swallow leading whitespace and get sign, if any. */ - neg = false; - p = nptr; - while (true) { - switch (*p) { - case '\t': case '\n': case '\v': case '\f': case '\r': case ' ': - p++; - break; - case '-': - neg = true; - /* Fall through. */ - case '+': - p++; - /* Fall through. */ - default: - goto label_prefix; - } - } - - /* Get prefix, if any. */ - label_prefix: - /* - * Note where the first non-whitespace/sign character is so that it is - * possible to tell whether any digits are consumed (e.g., " 0" vs. - * " -x"). - */ - ns = p; - if (*p == '0') { - switch (p[1]) { - case '0': case '1': case '2': case '3': case '4': case '5': - case '6': case '7': - if (b == 0) - b = 8; - if (b == 8) - p++; - break; - case 'x': - switch (p[2]) { - case '0': case '1': case '2': case '3': case '4': - case '5': case '6': case '7': case '8': case '9': - case 'A': case 'B': case 'C': case 'D': case 'E': - case 'F': - case 'a': case 'b': case 'c': case 'd': case 'e': - case 'f': - if (b == 0) - b = 16; - if (b == 16) - p += 2; - break; - default: - break; - } - break; - default: - break; - } - } - if (b == 0) - b = 10; - - /* Convert. */ - ret = 0; - while ((*p >= '0' && *p <= '9' && (digit = *p - '0') < b) - || (*p >= 'A' && *p <= 'Z' && (digit = 10 + *p - 'A') < b) - || (*p >= 'a' && *p <= 'z' && (digit = 10 + *p - 'a') < b)) { - uintmax_t pret = ret; - ret *= b; - ret += digit; - if (ret < pret) { - /* Overflow. */ - set_errno(ERANGE); - return (UINTMAX_MAX); - } - p++; - } - if (neg) - ret = -ret; - - if (endptr != NULL) { - if (p == ns) { - /* No characters were converted. */ - *endptr = (char *)nptr; - } else - *endptr = (char *)p; - } - - return (ret); -} - -static char * -u2s(uintmax_t x, unsigned base, bool uppercase, char *s, size_t *slen_p) -{ - unsigned i; - - i = U2S_BUFSIZE - 1; - s[i] = '\0'; - switch (base) { - case 10: - do { - i--; - s[i] = "0123456789"[x % (uint64_t)10]; - x /= (uint64_t)10; - } while (x > 0); - break; - case 16: { - const char *digits = (uppercase) - ? "0123456789ABCDEF" - : "0123456789abcdef"; - - do { - i--; - s[i] = digits[x & 0xf]; - x >>= 4; - } while (x > 0); - break; - } default: { - const char *digits = (uppercase) - ? "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ" - : "0123456789abcdefghijklmnopqrstuvwxyz"; - - assert(base >= 2 && base <= 36); - do { - i--; - s[i] = digits[x % (uint64_t)base]; - x /= (uint64_t)base; - } while (x > 0); - }} - - *slen_p = U2S_BUFSIZE - 1 - i; - return (&s[i]); -} - -static char * -d2s(intmax_t x, char sign, char *s, size_t *slen_p) -{ - bool neg; - - if ((neg = (x < 0))) - x = -x; - s = u2s(x, 10, false, s, slen_p); - if (neg) - sign = '-'; - switch (sign) { - case '-': - if (neg == false) - break; - /* Fall through. */ - case ' ': - case '+': - s--; - (*slen_p)++; - *s = sign; - break; - default: not_reached(); - } - return (s); -} - -static char * -o2s(uintmax_t x, bool alt_form, char *s, size_t *slen_p) -{ - - s = u2s(x, 8, false, s, slen_p); - if (alt_form && *s != '0') { - s--; - (*slen_p)++; - *s = '0'; - } - return (s); -} - -static char * -x2s(uintmax_t x, bool alt_form, bool uppercase, char *s, size_t *slen_p) -{ - - s = u2s(x, 16, uppercase, s, slen_p); - if (alt_form) { - s -= 2; - (*slen_p) += 2; - memcpy(s, uppercase ? "0X" : "0x", 2); - } - return (s); -} - -int -malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap) -{ - int ret; - size_t i; - const char *f; - -#define APPEND_C(c) do { \ - if (i < size) \ - str[i] = (c); \ - i++; \ -} while (0) -#define APPEND_S(s, slen) do { \ - if (i < size) { \ - size_t cpylen = (slen <= size - i) ? slen : size - i; \ - memcpy(&str[i], s, cpylen); \ - } \ - i += slen; \ -} while (0) -#define APPEND_PADDED_S(s, slen, width, left_justify) do { \ - /* Left padding. */ \ - size_t pad_len = (width == -1) ? 0 : ((slen < (size_t)width) ? \ - (size_t)width - slen : 0); \ - if (left_justify == false && pad_len != 0) { \ - size_t j; \ - for (j = 0; j < pad_len; j++) \ - APPEND_C(' '); \ - } \ - /* Value. */ \ - APPEND_S(s, slen); \ - /* Right padding. */ \ - if (left_justify && pad_len != 0) { \ - size_t j; \ - for (j = 0; j < pad_len; j++) \ - APPEND_C(' '); \ - } \ -} while (0) -#define GET_ARG_NUMERIC(val, len) do { \ - switch (len) { \ - case '?': \ - val = va_arg(ap, int); \ - break; \ - case '?' | 0x80: \ - val = va_arg(ap, unsigned int); \ - break; \ - case 'l': \ - val = va_arg(ap, long); \ - break; \ - case 'l' | 0x80: \ - val = va_arg(ap, unsigned long); \ - break; \ - case 'q': \ - val = va_arg(ap, long long); \ - break; \ - case 'q' | 0x80: \ - val = va_arg(ap, unsigned long long); \ - break; \ - case 'j': \ - val = va_arg(ap, intmax_t); \ - break; \ - case 't': \ - val = va_arg(ap, ptrdiff_t); \ - break; \ - case 'z': \ - val = va_arg(ap, ssize_t); \ - break; \ - case 'z' | 0x80: \ - val = va_arg(ap, size_t); \ - break; \ - case 'p': /* Synthetic; used for %p. */ \ - val = va_arg(ap, uintptr_t); \ - break; \ - default: not_reached(); \ - } \ -} while (0) - - i = 0; - f = format; - while (true) { - switch (*f) { - case '\0': goto label_out; - case '%': { - bool alt_form = false; - bool left_justify = false; - bool plus_space = false; - bool plus_plus = false; - int prec = -1; - int width = -1; - unsigned char len = '?'; - - f++; - if (*f == '%') { - /* %% */ - APPEND_C(*f); - break; - } - /* Flags. */ - while (true) { - switch (*f) { - case '#': - assert(alt_form == false); - alt_form = true; - break; - case '-': - assert(left_justify == false); - left_justify = true; - break; - case ' ': - assert(plus_space == false); - plus_space = true; - break; - case '+': - assert(plus_plus == false); - plus_plus = true; - break; - default: goto label_width; - } - f++; - } - /* Width. */ - label_width: - switch (*f) { - case '*': - width = va_arg(ap, int); - f++; - break; - case '0': case '1': case '2': case '3': case '4': - case '5': case '6': case '7': case '8': case '9': { - uintmax_t uwidth; - set_errno(0); - uwidth = malloc_strtoumax(f, (char **)&f, 10); - assert(uwidth != UINTMAX_MAX || get_errno() != - ERANGE); - width = (int)uwidth; - if (*f == '.') { - f++; - goto label_precision; - } else - goto label_length; - break; - } case '.': - f++; - goto label_precision; - default: goto label_length; - } - /* Precision. */ - label_precision: - switch (*f) { - case '*': - prec = va_arg(ap, int); - f++; - break; - case '0': case '1': case '2': case '3': case '4': - case '5': case '6': case '7': case '8': case '9': { - uintmax_t uprec; - set_errno(0); - uprec = malloc_strtoumax(f, (char **)&f, 10); - assert(uprec != UINTMAX_MAX || get_errno() != - ERANGE); - prec = (int)uprec; - break; - } - default: break; - } - /* Length. */ - label_length: - switch (*f) { - case 'l': - f++; - if (*f == 'l') { - len = 'q'; - f++; - } else - len = 'l'; - break; - case 'j': - len = 'j'; - f++; - break; - case 't': - len = 't'; - f++; - break; - case 'z': - len = 'z'; - f++; - break; - default: break; - } - /* Conversion specifier. */ - switch (*f) { - char *s; - size_t slen; - case 'd': case 'i': { - intmax_t val JEMALLOC_CC_SILENCE_INIT(0); - char buf[D2S_BUFSIZE]; - - GET_ARG_NUMERIC(val, len); - s = d2s(val, (plus_plus ? '+' : (plus_space ? - ' ' : '-')), buf, &slen); - APPEND_PADDED_S(s, slen, width, left_justify); - f++; - break; - } case 'o': { - uintmax_t val JEMALLOC_CC_SILENCE_INIT(0); - char buf[O2S_BUFSIZE]; - - GET_ARG_NUMERIC(val, len | 0x80); - s = o2s(val, alt_form, buf, &slen); - APPEND_PADDED_S(s, slen, width, left_justify); - f++; - break; - } case 'u': { - uintmax_t val JEMALLOC_CC_SILENCE_INIT(0); - char buf[U2S_BUFSIZE]; - - GET_ARG_NUMERIC(val, len | 0x80); - s = u2s(val, 10, false, buf, &slen); - APPEND_PADDED_S(s, slen, width, left_justify); - f++; - break; - } case 'x': case 'X': { - uintmax_t val JEMALLOC_CC_SILENCE_INIT(0); - char buf[X2S_BUFSIZE]; - - GET_ARG_NUMERIC(val, len | 0x80); - s = x2s(val, alt_form, *f == 'X', buf, &slen); - APPEND_PADDED_S(s, slen, width, left_justify); - f++; - break; - } case 'c': { - unsigned char val; - char buf[2]; - - assert(len == '?' || len == 'l'); - assert_not_implemented(len != 'l'); - val = va_arg(ap, int); - buf[0] = val; - buf[1] = '\0'; - APPEND_PADDED_S(buf, 1, width, left_justify); - f++; - break; - } case 's': - assert(len == '?' || len == 'l'); - assert_not_implemented(len != 'l'); - s = va_arg(ap, char *); - slen = (prec == -1) ? strlen(s) : prec; - APPEND_PADDED_S(s, slen, width, left_justify); - f++; - break; - case 'p': { - uintmax_t val; - char buf[X2S_BUFSIZE]; - - GET_ARG_NUMERIC(val, 'p'); - s = x2s(val, true, false, buf, &slen); - APPEND_PADDED_S(s, slen, width, left_justify); - f++; - break; - } - default: not_implemented(); - } - break; - } default: { - APPEND_C(*f); - f++; - break; - }} - } - label_out: - if (i < size) - str[i] = '\0'; - else - str[size - 1] = '\0'; - ret = i; - -#undef APPEND_C -#undef APPEND_S -#undef APPEND_PADDED_S -#undef GET_ARG_NUMERIC - return (ret); -} - -JEMALLOC_ATTR(format(printf, 3, 4)) -int -malloc_snprintf(char *str, size_t size, const char *format, ...) -{ - int ret; - va_list ap; - - va_start(ap, format); - ret = malloc_vsnprintf(str, size, format, ap); - va_end(ap); - - return (ret); -} - -void -malloc_vcprintf(void (*write_cb)(void *, const char *), void *cbopaque, - const char *format, va_list ap) -{ - char buf[MALLOC_PRINTF_BUFSIZE]; - - if (write_cb == NULL) { - /* - * The caller did not provide an alternate write_cb callback - * function, so use the default one. malloc_write() is an - * inline function, so use malloc_message() directly here. - */ - write_cb = (je_malloc_message != NULL) ? je_malloc_message : - wrtmessage; - cbopaque = NULL; - } - - malloc_vsnprintf(buf, sizeof(buf), format, ap); - write_cb(cbopaque, buf); -} - -/* - * Print to a callback function in such a way as to (hopefully) avoid memory - * allocation. - */ -JEMALLOC_ATTR(format(printf, 3, 4)) -void -malloc_cprintf(void (*write_cb)(void *, const char *), void *cbopaque, - const char *format, ...) -{ - va_list ap; - - va_start(ap, format); - malloc_vcprintf(write_cb, cbopaque, format, ap); - va_end(ap); -} - -/* Print to stderr in such a way as to avoid memory allocation. */ -JEMALLOC_ATTR(format(printf, 1, 2)) -void -malloc_printf(const char *format, ...) -{ - va_list ap; - - va_start(ap, format); - malloc_vcprintf(NULL, NULL, format, ap); - va_end(ap); -} diff --git a/extra/jemalloc/src/zone.c b/extra/jemalloc/src/zone.c deleted file mode 100644 index c62c183f65e..00000000000 --- a/extra/jemalloc/src/zone.c +++ /dev/null @@ -1,258 +0,0 @@ -#include "jemalloc/internal/jemalloc_internal.h" -#ifndef JEMALLOC_ZONE -# error "This source file is for zones on Darwin (OS X)." -#endif - -/* - * The malloc_default_purgeable_zone function is only available on >= 10.6. - * We need to check whether it is present at runtime, thus the weak_import. - */ -extern malloc_zone_t *malloc_default_purgeable_zone(void) -JEMALLOC_ATTR(weak_import); - -/******************************************************************************/ -/* Data. */ - -static malloc_zone_t zone; -static struct malloc_introspection_t zone_introspect; - -/******************************************************************************/ -/* Function prototypes for non-inline static functions. */ - -static size_t zone_size(malloc_zone_t *zone, void *ptr); -static void *zone_malloc(malloc_zone_t *zone, size_t size); -static void *zone_calloc(malloc_zone_t *zone, size_t num, size_t size); -static void *zone_valloc(malloc_zone_t *zone, size_t size); -static void zone_free(malloc_zone_t *zone, void *ptr); -static void *zone_realloc(malloc_zone_t *zone, void *ptr, size_t size); -#if (JEMALLOC_ZONE_VERSION >= 5) -static void *zone_memalign(malloc_zone_t *zone, size_t alignment, -#endif -#if (JEMALLOC_ZONE_VERSION >= 6) - size_t size); -static void zone_free_definite_size(malloc_zone_t *zone, void *ptr, - size_t size); -#endif -static void *zone_destroy(malloc_zone_t *zone); -static size_t zone_good_size(malloc_zone_t *zone, size_t size); -static void zone_force_lock(malloc_zone_t *zone); -static void zone_force_unlock(malloc_zone_t *zone); - -/******************************************************************************/ -/* - * Functions. - */ - -static size_t -zone_size(malloc_zone_t *zone, void *ptr) -{ - - /* - * There appear to be places within Darwin (such as setenv(3)) that - * cause calls to this function with pointers that *no* zone owns. If - * we knew that all pointers were owned by *some* zone, we could split - * our zone into two parts, and use one as the default allocator and - * the other as the default deallocator/reallocator. Since that will - * not work in practice, we must check all pointers to assure that they - * reside within a mapped chunk before determining size. - */ - return (ivsalloc(ptr, config_prof)); -} - -static void * -zone_malloc(malloc_zone_t *zone, size_t size) -{ - - return (je_malloc(size)); -} - -static void * -zone_calloc(malloc_zone_t *zone, size_t num, size_t size) -{ - - return (je_calloc(num, size)); -} - -static void * -zone_valloc(malloc_zone_t *zone, size_t size) -{ - void *ret = NULL; /* Assignment avoids useless compiler warning. */ - - je_posix_memalign(&ret, PAGE, size); - - return (ret); -} - -static void -zone_free(malloc_zone_t *zone, void *ptr) -{ - - if (ivsalloc(ptr, config_prof) != 0) { - je_free(ptr); - return; - } - - free(ptr); -} - -static void * -zone_realloc(malloc_zone_t *zone, void *ptr, size_t size) -{ - - if (ivsalloc(ptr, config_prof) != 0) - return (je_realloc(ptr, size)); - - return (realloc(ptr, size)); -} - -#if (JEMALLOC_ZONE_VERSION >= 5) -static void * -zone_memalign(malloc_zone_t *zone, size_t alignment, size_t size) -{ - void *ret = NULL; /* Assignment avoids useless compiler warning. */ - - je_posix_memalign(&ret, alignment, size); - - return (ret); -} -#endif - -#if (JEMALLOC_ZONE_VERSION >= 6) -static void -zone_free_definite_size(malloc_zone_t *zone, void *ptr, size_t size) -{ - - if (ivsalloc(ptr, config_prof) != 0) { - assert(ivsalloc(ptr, config_prof) == size); - je_free(ptr); - return; - } - - free(ptr); -} -#endif - -static void * -zone_destroy(malloc_zone_t *zone) -{ - - /* This function should never be called. */ - assert(false); - return (NULL); -} - -static size_t -zone_good_size(malloc_zone_t *zone, size_t size) -{ - - if (size == 0) - size = 1; - return (s2u(size)); -} - -static void -zone_force_lock(malloc_zone_t *zone) -{ - - if (isthreaded) - jemalloc_prefork(); -} - -static void -zone_force_unlock(malloc_zone_t *zone) -{ - - if (isthreaded) - jemalloc_postfork_parent(); -} - -JEMALLOC_ATTR(constructor) -void -register_zone(void) -{ - - /* - * If something else replaced the system default zone allocator, don't - * register jemalloc's. - */ - malloc_zone_t *default_zone = malloc_default_zone(); - if (!default_zone->zone_name || - strcmp(default_zone->zone_name, "DefaultMallocZone") != 0) { - return; - } - - zone.size = (void *)zone_size; - zone.malloc = (void *)zone_malloc; - zone.calloc = (void *)zone_calloc; - zone.valloc = (void *)zone_valloc; - zone.free = (void *)zone_free; - zone.realloc = (void *)zone_realloc; - zone.destroy = (void *)zone_destroy; - zone.zone_name = "jemalloc_zone"; - zone.batch_malloc = NULL; - zone.batch_free = NULL; - zone.introspect = &zone_introspect; - zone.version = JEMALLOC_ZONE_VERSION; -#if (JEMALLOC_ZONE_VERSION >= 5) - zone.memalign = zone_memalign; -#endif -#if (JEMALLOC_ZONE_VERSION >= 6) - zone.free_definite_size = zone_free_definite_size; -#endif -#if (JEMALLOC_ZONE_VERSION >= 8) - zone.pressure_relief = NULL; -#endif - - zone_introspect.enumerator = NULL; - zone_introspect.good_size = (void *)zone_good_size; - zone_introspect.check = NULL; - zone_introspect.print = NULL; - zone_introspect.log = NULL; - zone_introspect.force_lock = (void *)zone_force_lock; - zone_introspect.force_unlock = (void *)zone_force_unlock; - zone_introspect.statistics = NULL; -#if (JEMALLOC_ZONE_VERSION >= 6) - zone_introspect.zone_locked = NULL; -#endif -#if (JEMALLOC_ZONE_VERSION >= 7) - zone_introspect.enable_discharge_checking = NULL; - zone_introspect.disable_discharge_checking = NULL; - zone_introspect.discharge = NULL; -#ifdef __BLOCKS__ - zone_introspect.enumerate_discharged_pointers = NULL; -#else - zone_introspect.enumerate_unavailable_without_blocks = NULL; -#endif -#endif - - /* - * The default purgeable zone is created lazily by OSX's libc. It uses - * the default zone when it is created for "small" allocations - * (< 15 KiB), but assumes the default zone is a scalable_zone. This - * obviously fails when the default zone is the jemalloc zone, so - * malloc_default_purgeable_zone is called beforehand so that the - * default purgeable zone is created when the default zone is still - * a scalable_zone. As purgeable zones only exist on >= 10.6, we need - * to check for the existence of malloc_default_purgeable_zone() at - * run time. - */ - if (malloc_default_purgeable_zone != NULL) - malloc_default_purgeable_zone(); - - /* Register the custom zone. At this point it won't be the default. */ - malloc_zone_register(&zone); - - /* - * Unregister and reregister the default zone. On OSX >= 10.6, - * unregistering takes the last registered zone and places it at the - * location of the specified zone. Unregistering the default zone thus - * makes the last registered one the default. On OSX < 10.6, - * unregistering shifts all registered zones. The first registered zone - * then becomes the default. - */ - do { - default_zone = malloc_default_zone(); - malloc_zone_unregister(default_zone); - malloc_zone_register(default_zone); - } while (malloc_default_zone() != &zone); -} diff --git a/extra/jemalloc/test/ALLOCM_ARENA.c b/extra/jemalloc/test/ALLOCM_ARENA.c deleted file mode 100644 index 2c52485e890..00000000000 --- a/extra/jemalloc/test/ALLOCM_ARENA.c +++ /dev/null @@ -1,67 +0,0 @@ -#define JEMALLOC_MANGLE -#include "jemalloc_test.h" - -#define NTHREADS 10 - -void * -je_thread_start(void *arg) -{ - unsigned thread_ind = (unsigned)(uintptr_t)arg; - unsigned arena_ind; - int r; - void *p; - size_t rsz, sz; - - sz = sizeof(arena_ind); - if (mallctl("arenas.extend", &arena_ind, &sz, NULL, 0) - != 0) { - malloc_printf("Error in arenas.extend\n"); - abort(); - } - - if (thread_ind % 4 != 3) { - size_t mib[3]; - size_t miblen = sizeof(mib) / sizeof(size_t); - const char *dss_precs[] = {"disabled", "primary", "secondary"}; - const char *dss = dss_precs[thread_ind % 4]; - if (mallctlnametomib("arena.0.dss", mib, &miblen) != 0) { - malloc_printf("Error in mallctlnametomib()\n"); - abort(); - } - mib[1] = arena_ind; - if (mallctlbymib(mib, miblen, NULL, NULL, (void *)&dss, - sizeof(const char *))) { - malloc_printf("Error in mallctlbymib()\n"); - abort(); - } - } - - r = allocm(&p, &rsz, 1, ALLOCM_ARENA(arena_ind)); - if (r != ALLOCM_SUCCESS) { - malloc_printf("Unexpected allocm() error\n"); - abort(); - } - dallocm(p, 0); - - return (NULL); -} - -int -main(void) -{ - je_thread_t threads[NTHREADS]; - unsigned i; - - malloc_printf("Test begin\n"); - - for (i = 0; i < NTHREADS; i++) { - je_thread_create(&threads[i], je_thread_start, - (void *)(uintptr_t)i); - } - - for (i = 0; i < NTHREADS; i++) - je_thread_join(threads[i], NULL); - - malloc_printf("Test end\n"); - return (0); -} diff --git a/extra/jemalloc/test/ALLOCM_ARENA.exp b/extra/jemalloc/test/ALLOCM_ARENA.exp deleted file mode 100644 index 369a88dd240..00000000000 --- a/extra/jemalloc/test/ALLOCM_ARENA.exp +++ /dev/null @@ -1,2 +0,0 @@ -Test begin -Test end diff --git a/extra/jemalloc/test/aligned_alloc.c b/extra/jemalloc/test/aligned_alloc.c deleted file mode 100644 index 5a9b0caea78..00000000000 --- a/extra/jemalloc/test/aligned_alloc.c +++ /dev/null @@ -1,119 +0,0 @@ -#define JEMALLOC_MANGLE -#include "jemalloc_test.h" - -#define CHUNK 0x400000 -/* #define MAXALIGN ((size_t)UINT64_C(0x80000000000)) */ -#define MAXALIGN ((size_t)0x2000000LU) -#define NITER 4 - -int -main(void) -{ - size_t alignment, size, total; - unsigned i; - void *p, *ps[NITER]; - - malloc_printf("Test begin\n"); - - /* Test error conditions. */ - alignment = 0; - set_errno(0); - p = aligned_alloc(alignment, 1); - if (p != NULL || get_errno() != EINVAL) { - malloc_printf( - "Expected error for invalid alignment %zu\n", alignment); - } - - for (alignment = sizeof(size_t); alignment < MAXALIGN; - alignment <<= 1) { - set_errno(0); - p = aligned_alloc(alignment + 1, 1); - if (p != NULL || get_errno() != EINVAL) { - malloc_printf( - "Expected error for invalid alignment %zu\n", - alignment + 1); - } - } - -#if LG_SIZEOF_PTR == 3 - alignment = UINT64_C(0x8000000000000000); - size = UINT64_C(0x8000000000000000); -#else - alignment = 0x80000000LU; - size = 0x80000000LU; -#endif - set_errno(0); - p = aligned_alloc(alignment, size); - if (p != NULL || get_errno() != ENOMEM) { - malloc_printf( - "Expected error for aligned_alloc(%zu, %zu)\n", - alignment, size); - } - -#if LG_SIZEOF_PTR == 3 - alignment = UINT64_C(0x4000000000000000); - size = UINT64_C(0x8400000000000001); -#else - alignment = 0x40000000LU; - size = 0x84000001LU; -#endif - set_errno(0); - p = aligned_alloc(alignment, size); - if (p != NULL || get_errno() != ENOMEM) { - malloc_printf( - "Expected error for aligned_alloc(%zu, %zu)\n", - alignment, size); - } - - alignment = 0x10LU; -#if LG_SIZEOF_PTR == 3 - size = UINT64_C(0xfffffffffffffff0); -#else - size = 0xfffffff0LU; -#endif - set_errno(0); - p = aligned_alloc(alignment, size); - if (p != NULL || get_errno() != ENOMEM) { - malloc_printf( - "Expected error for aligned_alloc(&p, %zu, %zu)\n", - alignment, size); - } - - for (i = 0; i < NITER; i++) - ps[i] = NULL; - - for (alignment = 8; - alignment <= MAXALIGN; - alignment <<= 1) { - total = 0; - malloc_printf("Alignment: %zu\n", alignment); - for (size = 1; - size < 3 * alignment && size < (1U << 31); - size += (alignment >> (LG_SIZEOF_PTR-1)) - 1) { - for (i = 0; i < NITER; i++) { - ps[i] = aligned_alloc(alignment, size); - if (ps[i] == NULL) { - char buf[BUFERROR_BUF]; - - buferror(buf, sizeof(buf)); - malloc_printf( - "Error for size %zu (%#zx): %s\n", - size, size, buf); - exit(1); - } - total += malloc_usable_size(ps[i]); - if (total >= (MAXALIGN << 1)) - break; - } - for (i = 0; i < NITER; i++) { - if (ps[i] != NULL) { - free(ps[i]); - ps[i] = NULL; - } - } - } - } - - malloc_printf("Test end\n"); - return (0); -} diff --git a/extra/jemalloc/test/aligned_alloc.exp b/extra/jemalloc/test/aligned_alloc.exp deleted file mode 100644 index b5061c7277e..00000000000 --- a/extra/jemalloc/test/aligned_alloc.exp +++ /dev/null @@ -1,25 +0,0 @@ -Test begin -Alignment: 8 -Alignment: 16 -Alignment: 32 -Alignment: 64 -Alignment: 128 -Alignment: 256 -Alignment: 512 -Alignment: 1024 -Alignment: 2048 -Alignment: 4096 -Alignment: 8192 -Alignment: 16384 -Alignment: 32768 -Alignment: 65536 -Alignment: 131072 -Alignment: 262144 -Alignment: 524288 -Alignment: 1048576 -Alignment: 2097152 -Alignment: 4194304 -Alignment: 8388608 -Alignment: 16777216 -Alignment: 33554432 -Test end diff --git a/extra/jemalloc/test/allocated.c b/extra/jemalloc/test/allocated.c deleted file mode 100644 index 9884905d810..00000000000 --- a/extra/jemalloc/test/allocated.c +++ /dev/null @@ -1,118 +0,0 @@ -#define JEMALLOC_MANGLE -#include "jemalloc_test.h" - -void * -je_thread_start(void *arg) -{ - int err; - void *p; - uint64_t a0, a1, d0, d1; - uint64_t *ap0, *ap1, *dp0, *dp1; - size_t sz, usize; - - sz = sizeof(a0); - if ((err = mallctl("thread.allocated", &a0, &sz, NULL, 0))) { - if (err == ENOENT) { -#ifdef JEMALLOC_STATS - assert(false); -#endif - goto label_return; - } - malloc_printf("%s(): Error in mallctl(): %s\n", __func__, - strerror(err)); - exit(1); - } - sz = sizeof(ap0); - if ((err = mallctl("thread.allocatedp", &ap0, &sz, NULL, 0))) { - if (err == ENOENT) { -#ifdef JEMALLOC_STATS - assert(false); -#endif - goto label_return; - } - malloc_printf("%s(): Error in mallctl(): %s\n", __func__, - strerror(err)); - exit(1); - } - assert(*ap0 == a0); - - sz = sizeof(d0); - if ((err = mallctl("thread.deallocated", &d0, &sz, NULL, 0))) { - if (err == ENOENT) { -#ifdef JEMALLOC_STATS - assert(false); -#endif - goto label_return; - } - malloc_printf("%s(): Error in mallctl(): %s\n", __func__, - strerror(err)); - exit(1); - } - sz = sizeof(dp0); - if ((err = mallctl("thread.deallocatedp", &dp0, &sz, NULL, 0))) { - if (err == ENOENT) { -#ifdef JEMALLOC_STATS - assert(false); -#endif - goto label_return; - } - malloc_printf("%s(): Error in mallctl(): %s\n", __func__, - strerror(err)); - exit(1); - } - assert(*dp0 == d0); - - p = malloc(1); - if (p == NULL) { - malloc_printf("%s(): Error in malloc()\n", __func__); - exit(1); - } - - sz = sizeof(a1); - mallctl("thread.allocated", &a1, &sz, NULL, 0); - sz = sizeof(ap1); - mallctl("thread.allocatedp", &ap1, &sz, NULL, 0); - assert(*ap1 == a1); - assert(ap0 == ap1); - - usize = malloc_usable_size(p); - assert(a0 + usize <= a1); - - free(p); - - sz = sizeof(d1); - mallctl("thread.deallocated", &d1, &sz, NULL, 0); - sz = sizeof(dp1); - mallctl("thread.deallocatedp", &dp1, &sz, NULL, 0); - assert(*dp1 == d1); - assert(dp0 == dp1); - - assert(d0 + usize <= d1); - -label_return: - return (NULL); -} - -int -main(void) -{ - int ret = 0; - je_thread_t thread; - - malloc_printf("Test begin\n"); - - je_thread_start(NULL); - - je_thread_create(&thread, je_thread_start, NULL); - je_thread_join(thread, (void *)&ret); - - je_thread_start(NULL); - - je_thread_create(&thread, je_thread_start, NULL); - je_thread_join(thread, (void *)&ret); - - je_thread_start(NULL); - - malloc_printf("Test end\n"); - return (ret); -} diff --git a/extra/jemalloc/test/allocated.exp b/extra/jemalloc/test/allocated.exp deleted file mode 100644 index 369a88dd240..00000000000 --- a/extra/jemalloc/test/allocated.exp +++ /dev/null @@ -1,2 +0,0 @@ -Test begin -Test end diff --git a/extra/jemalloc/test/allocm.c b/extra/jemalloc/test/allocm.c deleted file mode 100644 index 80be673b8fd..00000000000 --- a/extra/jemalloc/test/allocm.c +++ /dev/null @@ -1,194 +0,0 @@ -#define JEMALLOC_MANGLE -#include "jemalloc_test.h" - -#define CHUNK 0x400000 -/* #define MAXALIGN ((size_t)UINT64_C(0x80000000000)) */ -#define MAXALIGN ((size_t)0x2000000LU) -#define NITER 4 - -int -main(void) -{ - int r; - void *p; - size_t nsz, rsz, sz, alignment, total; - unsigned i; - void *ps[NITER]; - - malloc_printf("Test begin\n"); - - sz = 42; - nsz = 0; - r = nallocm(&nsz, sz, 0); - if (r != ALLOCM_SUCCESS) { - malloc_printf("Unexpected nallocm() error\n"); - abort(); - } - rsz = 0; - r = allocm(&p, &rsz, sz, 0); - if (r != ALLOCM_SUCCESS) { - malloc_printf("Unexpected allocm() error\n"); - abort(); - } - if (rsz < sz) - malloc_printf("Real size smaller than expected\n"); - if (nsz != rsz) - malloc_printf("nallocm()/allocm() rsize mismatch\n"); - if (dallocm(p, 0) != ALLOCM_SUCCESS) - malloc_printf("Unexpected dallocm() error\n"); - - r = allocm(&p, NULL, sz, 0); - if (r != ALLOCM_SUCCESS) { - malloc_printf("Unexpected allocm() error\n"); - abort(); - } - if (dallocm(p, 0) != ALLOCM_SUCCESS) - malloc_printf("Unexpected dallocm() error\n"); - - nsz = 0; - r = nallocm(&nsz, sz, ALLOCM_ZERO); - if (r != ALLOCM_SUCCESS) { - malloc_printf("Unexpected nallocm() error\n"); - abort(); - } - rsz = 0; - r = allocm(&p, &rsz, sz, ALLOCM_ZERO); - if (r != ALLOCM_SUCCESS) { - malloc_printf("Unexpected allocm() error\n"); - abort(); - } - if (nsz != rsz) - malloc_printf("nallocm()/allocm() rsize mismatch\n"); - if (dallocm(p, 0) != ALLOCM_SUCCESS) - malloc_printf("Unexpected dallocm() error\n"); - -#if LG_SIZEOF_PTR == 3 - alignment = UINT64_C(0x8000000000000000); - sz = UINT64_C(0x8000000000000000); -#else - alignment = 0x80000000LU; - sz = 0x80000000LU; -#endif - nsz = 0; - r = nallocm(&nsz, sz, ALLOCM_ALIGN(alignment)); - if (r == ALLOCM_SUCCESS) { - malloc_printf( - "Expected error for nallocm(&nsz, %zu, %#x)\n", - sz, ALLOCM_ALIGN(alignment)); - } - rsz = 0; - r = allocm(&p, &rsz, sz, ALLOCM_ALIGN(alignment)); - if (r == ALLOCM_SUCCESS) { - malloc_printf( - "Expected error for allocm(&p, %zu, %#x)\n", - sz, ALLOCM_ALIGN(alignment)); - } - if (nsz != rsz) - malloc_printf("nallocm()/allocm() rsize mismatch\n"); - -#if LG_SIZEOF_PTR == 3 - alignment = UINT64_C(0x4000000000000000); - sz = UINT64_C(0x8400000000000001); -#else - alignment = 0x40000000LU; - sz = 0x84000001LU; -#endif - nsz = 0; - r = nallocm(&nsz, sz, ALLOCM_ALIGN(alignment)); - if (r != ALLOCM_SUCCESS) - malloc_printf("Unexpected nallocm() error\n"); - rsz = 0; - r = allocm(&p, &rsz, sz, ALLOCM_ALIGN(alignment)); - if (r == ALLOCM_SUCCESS) { - malloc_printf( - "Expected error for allocm(&p, %zu, %#x)\n", - sz, ALLOCM_ALIGN(alignment)); - } - - alignment = 0x10LU; -#if LG_SIZEOF_PTR == 3 - sz = UINT64_C(0xfffffffffffffff0); -#else - sz = 0xfffffff0LU; -#endif - nsz = 0; - r = nallocm(&nsz, sz, ALLOCM_ALIGN(alignment)); - if (r == ALLOCM_SUCCESS) { - malloc_printf( - "Expected error for nallocm(&nsz, %zu, %#x)\n", - sz, ALLOCM_ALIGN(alignment)); - } - rsz = 0; - r = allocm(&p, &rsz, sz, ALLOCM_ALIGN(alignment)); - if (r == ALLOCM_SUCCESS) { - malloc_printf( - "Expected error for allocm(&p, %zu, %#x)\n", - sz, ALLOCM_ALIGN(alignment)); - } - if (nsz != rsz) - malloc_printf("nallocm()/allocm() rsize mismatch\n"); - - for (i = 0; i < NITER; i++) - ps[i] = NULL; - - for (alignment = 8; - alignment <= MAXALIGN; - alignment <<= 1) { - total = 0; - malloc_printf("Alignment: %zu\n", alignment); - for (sz = 1; - sz < 3 * alignment && sz < (1U << 31); - sz += (alignment >> (LG_SIZEOF_PTR-1)) - 1) { - for (i = 0; i < NITER; i++) { - nsz = 0; - r = nallocm(&nsz, sz, - ALLOCM_ALIGN(alignment) | ALLOCM_ZERO); - if (r != ALLOCM_SUCCESS) { - malloc_printf( - "nallocm() error for size %zu" - " (%#zx): %d\n", - sz, sz, r); - exit(1); - } - rsz = 0; - r = allocm(&ps[i], &rsz, sz, - ALLOCM_ALIGN(alignment) | ALLOCM_ZERO); - if (r != ALLOCM_SUCCESS) { - malloc_printf( - "allocm() error for size %zu" - " (%#zx): %d\n", - sz, sz, r); - exit(1); - } - if (rsz < sz) { - malloc_printf( - "Real size smaller than" - " expected\n"); - } - if (nsz != rsz) { - malloc_printf( - "nallocm()/allocm() rsize" - " mismatch\n"); - } - if ((uintptr_t)p & (alignment-1)) { - malloc_printf( - "%p inadequately aligned for" - " alignment: %zu\n", p, alignment); - } - sallocm(ps[i], &rsz, 0); - total += rsz; - if (total >= (MAXALIGN << 1)) - break; - } - for (i = 0; i < NITER; i++) { - if (ps[i] != NULL) { - dallocm(ps[i], 0); - ps[i] = NULL; - } - } - } - } - - malloc_printf("Test end\n"); - return (0); -} diff --git a/extra/jemalloc/test/allocm.exp b/extra/jemalloc/test/allocm.exp deleted file mode 100644 index b5061c7277e..00000000000 --- a/extra/jemalloc/test/allocm.exp +++ /dev/null @@ -1,25 +0,0 @@ -Test begin -Alignment: 8 -Alignment: 16 -Alignment: 32 -Alignment: 64 -Alignment: 128 -Alignment: 256 -Alignment: 512 -Alignment: 1024 -Alignment: 2048 -Alignment: 4096 -Alignment: 8192 -Alignment: 16384 -Alignment: 32768 -Alignment: 65536 -Alignment: 131072 -Alignment: 262144 -Alignment: 524288 -Alignment: 1048576 -Alignment: 2097152 -Alignment: 4194304 -Alignment: 8388608 -Alignment: 16777216 -Alignment: 33554432 -Test end diff --git a/extra/jemalloc/test/bitmap.c b/extra/jemalloc/test/bitmap.c deleted file mode 100644 index b2cb63004bc..00000000000 --- a/extra/jemalloc/test/bitmap.c +++ /dev/null @@ -1,153 +0,0 @@ -#define JEMALLOC_MANGLE -#include "jemalloc_test.h" - -#if (LG_BITMAP_MAXBITS > 12) -# define MAXBITS 4500 -#else -# define MAXBITS (1U << LG_BITMAP_MAXBITS) -#endif - -static void -test_bitmap_size(void) -{ - size_t i, prev_size; - - prev_size = 0; - for (i = 1; i <= MAXBITS; i++) { - size_t size = bitmap_size(i); - assert(size >= prev_size); - prev_size = size; - } -} - -static void -test_bitmap_init(void) -{ - size_t i; - - for (i = 1; i <= MAXBITS; i++) { - bitmap_info_t binfo; - bitmap_info_init(&binfo, i); - { - size_t j; - bitmap_t *bitmap = malloc(sizeof(bitmap_t) * - bitmap_info_ngroups(&binfo)); - bitmap_init(bitmap, &binfo); - - for (j = 0; j < i; j++) - assert(bitmap_get(bitmap, &binfo, j) == false); - free(bitmap); - - } - } -} - -static void -test_bitmap_set(void) -{ - size_t i; - - for (i = 1; i <= MAXBITS; i++) { - bitmap_info_t binfo; - bitmap_info_init(&binfo, i); - { - size_t j; - bitmap_t *bitmap = malloc(sizeof(bitmap_t) * - bitmap_info_ngroups(&binfo)); - bitmap_init(bitmap, &binfo); - - for (j = 0; j < i; j++) - bitmap_set(bitmap, &binfo, j); - assert(bitmap_full(bitmap, &binfo)); - free(bitmap); - } - } -} - -static void -test_bitmap_unset(void) -{ - size_t i; - - for (i = 1; i <= MAXBITS; i++) { - bitmap_info_t binfo; - bitmap_info_init(&binfo, i); - { - size_t j; - bitmap_t *bitmap = malloc(sizeof(bitmap_t) * - bitmap_info_ngroups(&binfo)); - bitmap_init(bitmap, &binfo); - - for (j = 0; j < i; j++) - bitmap_set(bitmap, &binfo, j); - assert(bitmap_full(bitmap, &binfo)); - for (j = 0; j < i; j++) - bitmap_unset(bitmap, &binfo, j); - for (j = 0; j < i; j++) - bitmap_set(bitmap, &binfo, j); - assert(bitmap_full(bitmap, &binfo)); - free(bitmap); - } - } -} - -static void -test_bitmap_sfu(void) -{ - size_t i; - - for (i = 1; i <= MAXBITS; i++) { - bitmap_info_t binfo; - bitmap_info_init(&binfo, i); - { - ssize_t j; - bitmap_t *bitmap = malloc(sizeof(bitmap_t) * - bitmap_info_ngroups(&binfo)); - bitmap_init(bitmap, &binfo); - - /* Iteratively set bits starting at the beginning. */ - for (j = 0; j < i; j++) - assert(bitmap_sfu(bitmap, &binfo) == j); - assert(bitmap_full(bitmap, &binfo)); - - /* - * Iteratively unset bits starting at the end, and - * verify that bitmap_sfu() reaches the unset bits. - */ - for (j = i - 1; j >= 0; j--) { - bitmap_unset(bitmap, &binfo, j); - assert(bitmap_sfu(bitmap, &binfo) == j); - bitmap_unset(bitmap, &binfo, j); - } - assert(bitmap_get(bitmap, &binfo, 0) == false); - - /* - * Iteratively set bits starting at the beginning, and - * verify that bitmap_sfu() looks past them. - */ - for (j = 1; j < i; j++) { - bitmap_set(bitmap, &binfo, j - 1); - assert(bitmap_sfu(bitmap, &binfo) == j); - bitmap_unset(bitmap, &binfo, j); - } - assert(bitmap_sfu(bitmap, &binfo) == i - 1); - assert(bitmap_full(bitmap, &binfo)); - free(bitmap); - } - } -} - -int -main(void) -{ - malloc_printf("Test begin\n"); - - test_bitmap_size(); - test_bitmap_init(); - test_bitmap_set(); - test_bitmap_unset(); - test_bitmap_sfu(); - - malloc_printf("Test end\n"); - return (0); -} diff --git a/extra/jemalloc/test/bitmap.exp b/extra/jemalloc/test/bitmap.exp deleted file mode 100644 index 369a88dd240..00000000000 --- a/extra/jemalloc/test/bitmap.exp +++ /dev/null @@ -1,2 +0,0 @@ -Test begin -Test end diff --git a/extra/jemalloc/test/jemalloc_test.h.in b/extra/jemalloc/test/jemalloc_test.h.in deleted file mode 100644 index e38b48efa41..00000000000 --- a/extra/jemalloc/test/jemalloc_test.h.in +++ /dev/null @@ -1,53 +0,0 @@ -/* - * This header should be included by tests, rather than directly including - * jemalloc/jemalloc.h, because --with-install-suffix may cause the header to - * have a different name. - */ -#include "jemalloc/jemalloc@install_suffix@.h" -#include "jemalloc/internal/jemalloc_internal.h" - -/* Abstraction layer for threading in tests */ -#ifdef _WIN32 -#include <windows.h> - -typedef HANDLE je_thread_t; - -void -je_thread_create(je_thread_t *thread, void *(*proc)(void *), void *arg) -{ - LPTHREAD_START_ROUTINE routine = (LPTHREAD_START_ROUTINE)proc; - *thread = CreateThread(NULL, 0, routine, arg, 0, NULL); - if (*thread == NULL) { - malloc_printf("Error in CreateThread()\n"); - exit(1); - } -} - -void -je_thread_join(je_thread_t thread, void **ret) -{ - WaitForSingleObject(thread, INFINITE); -} - -#else -#include <pthread.h> - -typedef pthread_t je_thread_t; - -void -je_thread_create(je_thread_t *thread, void *(*proc)(void *), void *arg) -{ - - if (pthread_create(thread, NULL, proc, arg) != 0) { - malloc_printf("Error in pthread_create()\n"); - exit(1); - } -} - -void -je_thread_join(je_thread_t thread, void **ret) -{ - - pthread_join(thread, ret); -} -#endif diff --git a/extra/jemalloc/test/mremap.c b/extra/jemalloc/test/mremap.c deleted file mode 100644 index 47efa7c415b..00000000000 --- a/extra/jemalloc/test/mremap.c +++ /dev/null @@ -1,60 +0,0 @@ -#define JEMALLOC_MANGLE -#include "jemalloc_test.h" - -int -main(void) -{ - int ret, err; - size_t sz, lg_chunk, chunksize, i; - char *p, *q; - - malloc_printf("Test begin\n"); - - sz = sizeof(lg_chunk); - if ((err = mallctl("opt.lg_chunk", &lg_chunk, &sz, NULL, 0))) { - assert(err != ENOENT); - malloc_printf("%s(): Error in mallctl(): %s\n", __func__, - strerror(err)); - ret = 1; - goto label_return; - } - chunksize = ((size_t)1U) << lg_chunk; - - p = (char *)malloc(chunksize); - if (p == NULL) { - malloc_printf("malloc(%zu) --> %p\n", chunksize, p); - ret = 1; - goto label_return; - } - memset(p, 'a', chunksize); - - q = (char *)realloc(p, chunksize * 2); - if (q == NULL) { - malloc_printf("realloc(%p, %zu) --> %p\n", p, chunksize * 2, - q); - ret = 1; - goto label_return; - } - for (i = 0; i < chunksize; i++) { - assert(q[i] == 'a'); - } - - p = q; - - q = (char *)realloc(p, chunksize); - if (q == NULL) { - malloc_printf("realloc(%p, %zu) --> %p\n", p, chunksize, q); - ret = 1; - goto label_return; - } - for (i = 0; i < chunksize; i++) { - assert(q[i] == 'a'); - } - - free(q); - - ret = 0; -label_return: - malloc_printf("Test end\n"); - return (ret); -} diff --git a/extra/jemalloc/test/mremap.exp b/extra/jemalloc/test/mremap.exp deleted file mode 100644 index 369a88dd240..00000000000 --- a/extra/jemalloc/test/mremap.exp +++ /dev/null @@ -1,2 +0,0 @@ -Test begin -Test end diff --git a/extra/jemalloc/test/posix_memalign.c b/extra/jemalloc/test/posix_memalign.c deleted file mode 100644 index 2185bcf762a..00000000000 --- a/extra/jemalloc/test/posix_memalign.c +++ /dev/null @@ -1,115 +0,0 @@ -#define JEMALLOC_MANGLE -#include "jemalloc_test.h" - -#define CHUNK 0x400000 -/* #define MAXALIGN ((size_t)UINT64_C(0x80000000000)) */ -#define MAXALIGN ((size_t)0x2000000LU) -#define NITER 4 - -int -main(void) -{ - size_t alignment, size, total; - unsigned i; - int err; - void *p, *ps[NITER]; - - malloc_printf("Test begin\n"); - - /* Test error conditions. */ - for (alignment = 0; alignment < sizeof(void *); alignment++) { - err = posix_memalign(&p, alignment, 1); - if (err != EINVAL) { - malloc_printf( - "Expected error for invalid alignment %zu\n", - alignment); - } - } - - for (alignment = sizeof(size_t); alignment < MAXALIGN; - alignment <<= 1) { - err = posix_memalign(&p, alignment + 1, 1); - if (err == 0) { - malloc_printf( - "Expected error for invalid alignment %zu\n", - alignment + 1); - } - } - -#if LG_SIZEOF_PTR == 3 - alignment = UINT64_C(0x8000000000000000); - size = UINT64_C(0x8000000000000000); -#else - alignment = 0x80000000LU; - size = 0x80000000LU; -#endif - err = posix_memalign(&p, alignment, size); - if (err == 0) { - malloc_printf( - "Expected error for posix_memalign(&p, %zu, %zu)\n", - alignment, size); - } - -#if LG_SIZEOF_PTR == 3 - alignment = UINT64_C(0x4000000000000000); - size = UINT64_C(0x8400000000000001); -#else - alignment = 0x40000000LU; - size = 0x84000001LU; -#endif - err = posix_memalign(&p, alignment, size); - if (err == 0) { - malloc_printf( - "Expected error for posix_memalign(&p, %zu, %zu)\n", - alignment, size); - } - - alignment = 0x10LU; -#if LG_SIZEOF_PTR == 3 - size = UINT64_C(0xfffffffffffffff0); -#else - size = 0xfffffff0LU; -#endif - err = posix_memalign(&p, alignment, size); - if (err == 0) { - malloc_printf( - "Expected error for posix_memalign(&p, %zu, %zu)\n", - alignment, size); - } - - for (i = 0; i < NITER; i++) - ps[i] = NULL; - - for (alignment = 8; - alignment <= MAXALIGN; - alignment <<= 1) { - total = 0; - malloc_printf("Alignment: %zu\n", alignment); - for (size = 1; - size < 3 * alignment && size < (1U << 31); - size += (alignment >> (LG_SIZEOF_PTR-1)) - 1) { - for (i = 0; i < NITER; i++) { - err = posix_memalign(&ps[i], - alignment, size); - if (err) { - malloc_printf( - "Error for size %zu (%#zx): %s\n", - size, size, strerror(err)); - exit(1); - } - total += malloc_usable_size(ps[i]); - if (total >= (MAXALIGN << 1)) - break; - } - for (i = 0; i < NITER; i++) { - if (ps[i] != NULL) { - free(ps[i]); - ps[i] = NULL; - } - } - } - } - - malloc_printf("Test end\n"); - return (0); -} diff --git a/extra/jemalloc/test/posix_memalign.exp b/extra/jemalloc/test/posix_memalign.exp deleted file mode 100644 index b5061c7277e..00000000000 --- a/extra/jemalloc/test/posix_memalign.exp +++ /dev/null @@ -1,25 +0,0 @@ -Test begin -Alignment: 8 -Alignment: 16 -Alignment: 32 -Alignment: 64 -Alignment: 128 -Alignment: 256 -Alignment: 512 -Alignment: 1024 -Alignment: 2048 -Alignment: 4096 -Alignment: 8192 -Alignment: 16384 -Alignment: 32768 -Alignment: 65536 -Alignment: 131072 -Alignment: 262144 -Alignment: 524288 -Alignment: 1048576 -Alignment: 2097152 -Alignment: 4194304 -Alignment: 8388608 -Alignment: 16777216 -Alignment: 33554432 -Test end diff --git a/extra/jemalloc/test/rallocm.c b/extra/jemalloc/test/rallocm.c deleted file mode 100644 index c5dedf48d7b..00000000000 --- a/extra/jemalloc/test/rallocm.c +++ /dev/null @@ -1,127 +0,0 @@ -#define JEMALLOC_MANGLE -#include "jemalloc_test.h" - -int -main(void) -{ - size_t pagesize; - void *p, *q; - size_t sz, tsz; - int r; - - malloc_printf("Test begin\n"); - - /* Get page size. */ - { -#ifdef _WIN32 - SYSTEM_INFO si; - GetSystemInfo(&si); - pagesize = (size_t)si.dwPageSize; -#else - long result = sysconf(_SC_PAGESIZE); - assert(result != -1); - pagesize = (size_t)result; -#endif - } - - r = allocm(&p, &sz, 42, 0); - if (r != ALLOCM_SUCCESS) { - malloc_printf("Unexpected allocm() error\n"); - abort(); - } - - q = p; - r = rallocm(&q, &tsz, sz, 0, ALLOCM_NO_MOVE); - if (r != ALLOCM_SUCCESS) - malloc_printf("Unexpected rallocm() error\n"); - if (q != p) - malloc_printf("Unexpected object move\n"); - if (tsz != sz) { - malloc_printf("Unexpected size change: %zu --> %zu\n", - sz, tsz); - } - - q = p; - r = rallocm(&q, &tsz, sz, 5, ALLOCM_NO_MOVE); - if (r != ALLOCM_SUCCESS) - malloc_printf("Unexpected rallocm() error\n"); - if (q != p) - malloc_printf("Unexpected object move\n"); - if (tsz != sz) { - malloc_printf("Unexpected size change: %zu --> %zu\n", - sz, tsz); - } - - q = p; - r = rallocm(&q, &tsz, sz + 5, 0, ALLOCM_NO_MOVE); - if (r != ALLOCM_ERR_NOT_MOVED) - malloc_printf("Unexpected rallocm() result\n"); - if (q != p) - malloc_printf("Unexpected object move\n"); - if (tsz != sz) { - malloc_printf("Unexpected size change: %zu --> %zu\n", - sz, tsz); - } - - q = p; - r = rallocm(&q, &tsz, sz + 5, 0, 0); - if (r != ALLOCM_SUCCESS) - malloc_printf("Unexpected rallocm() error\n"); - if (q == p) - malloc_printf("Expected object move\n"); - if (tsz == sz) { - malloc_printf("Expected size change: %zu --> %zu\n", - sz, tsz); - } - p = q; - sz = tsz; - - r = rallocm(&q, &tsz, pagesize*2, 0, 0); - if (r != ALLOCM_SUCCESS) - malloc_printf("Unexpected rallocm() error\n"); - if (q == p) - malloc_printf("Expected object move\n"); - if (tsz == sz) { - malloc_printf("Expected size change: %zu --> %zu\n", - sz, tsz); - } - p = q; - sz = tsz; - - r = rallocm(&q, &tsz, pagesize*4, 0, 0); - if (r != ALLOCM_SUCCESS) - malloc_printf("Unexpected rallocm() error\n"); - if (tsz == sz) { - malloc_printf("Expected size change: %zu --> %zu\n", - sz, tsz); - } - p = q; - sz = tsz; - - r = rallocm(&q, &tsz, pagesize*2, 0, ALLOCM_NO_MOVE); - if (r != ALLOCM_SUCCESS) - malloc_printf("Unexpected rallocm() error\n"); - if (q != p) - malloc_printf("Unexpected object move\n"); - if (tsz == sz) { - malloc_printf("Expected size change: %zu --> %zu\n", - sz, tsz); - } - sz = tsz; - - r = rallocm(&q, &tsz, pagesize*4, 0, ALLOCM_NO_MOVE); - if (r != ALLOCM_SUCCESS) - malloc_printf("Unexpected rallocm() error\n"); - if (q != p) - malloc_printf("Unexpected object move\n"); - if (tsz == sz) { - malloc_printf("Expected size change: %zu --> %zu\n", - sz, tsz); - } - sz = tsz; - - dallocm(p, 0); - - malloc_printf("Test end\n"); - return (0); -} diff --git a/extra/jemalloc/test/rallocm.exp b/extra/jemalloc/test/rallocm.exp deleted file mode 100644 index 369a88dd240..00000000000 --- a/extra/jemalloc/test/rallocm.exp +++ /dev/null @@ -1,2 +0,0 @@ -Test begin -Test end diff --git a/extra/jemalloc/test/thread_arena.c b/extra/jemalloc/test/thread_arena.c deleted file mode 100644 index c5a21fa0c70..00000000000 --- a/extra/jemalloc/test/thread_arena.c +++ /dev/null @@ -1,81 +0,0 @@ -#define JEMALLOC_MANGLE -#include "jemalloc_test.h" - -#define NTHREADS 10 - -void * -je_thread_start(void *arg) -{ - unsigned main_arena_ind = *(unsigned *)arg; - void *p; - unsigned arena_ind; - size_t size; - int err; - - p = malloc(1); - if (p == NULL) { - malloc_printf("%s(): Error in malloc()\n", __func__); - return (void *)1; - } - free(p); - - size = sizeof(arena_ind); - if ((err = mallctl("thread.arena", &arena_ind, &size, &main_arena_ind, - sizeof(main_arena_ind)))) { - malloc_printf("%s(): Error in mallctl(): %s\n", __func__, - strerror(err)); - return (void *)1; - } - - size = sizeof(arena_ind); - if ((err = mallctl("thread.arena", &arena_ind, &size, NULL, - 0))) { - malloc_printf("%s(): Error in mallctl(): %s\n", __func__, - strerror(err)); - return (void *)1; - } - assert(arena_ind == main_arena_ind); - - return (NULL); -} - -int -main(void) -{ - int ret = 0; - void *p; - unsigned arena_ind; - size_t size; - int err; - je_thread_t threads[NTHREADS]; - unsigned i; - - malloc_printf("Test begin\n"); - - p = malloc(1); - if (p == NULL) { - malloc_printf("%s(): Error in malloc()\n", __func__); - ret = 1; - goto label_return; - } - - size = sizeof(arena_ind); - if ((err = mallctl("thread.arena", &arena_ind, &size, NULL, 0))) { - malloc_printf("%s(): Error in mallctl(): %s\n", __func__, - strerror(err)); - ret = 1; - goto label_return; - } - - for (i = 0; i < NTHREADS; i++) { - je_thread_create(&threads[i], je_thread_start, - (void *)&arena_ind); - } - - for (i = 0; i < NTHREADS; i++) - je_thread_join(threads[i], (void *)&ret); - -label_return: - malloc_printf("Test end\n"); - return (ret); -} diff --git a/extra/jemalloc/test/thread_arena.exp b/extra/jemalloc/test/thread_arena.exp deleted file mode 100644 index 369a88dd240..00000000000 --- a/extra/jemalloc/test/thread_arena.exp +++ /dev/null @@ -1,2 +0,0 @@ -Test begin -Test end diff --git a/extra/jemalloc/test/thread_tcache_enabled.c b/extra/jemalloc/test/thread_tcache_enabled.c deleted file mode 100644 index 2061b7bbaff..00000000000 --- a/extra/jemalloc/test/thread_tcache_enabled.c +++ /dev/null @@ -1,91 +0,0 @@ -#define JEMALLOC_MANGLE -#include "jemalloc_test.h" - -void * -je_thread_start(void *arg) -{ - int err; - size_t sz; - bool e0, e1; - - sz = sizeof(bool); - if ((err = mallctl("thread.tcache.enabled", &e0, &sz, NULL, 0))) { - if (err == ENOENT) { -#ifdef JEMALLOC_TCACHE - assert(false); -#endif - } - goto label_return; - } - - if (e0) { - e1 = false; - assert(mallctl("thread.tcache.enabled", &e0, &sz, &e1, sz) - == 0); - assert(e0); - } - - e1 = true; - assert(mallctl("thread.tcache.enabled", &e0, &sz, &e1, sz) == 0); - assert(e0 == false); - - e1 = true; - assert(mallctl("thread.tcache.enabled", &e0, &sz, &e1, sz) == 0); - assert(e0); - - e1 = false; - assert(mallctl("thread.tcache.enabled", &e0, &sz, &e1, sz) == 0); - assert(e0); - - e1 = false; - assert(mallctl("thread.tcache.enabled", &e0, &sz, &e1, sz) == 0); - assert(e0 == false); - - free(malloc(1)); - e1 = true; - assert(mallctl("thread.tcache.enabled", &e0, &sz, &e1, sz) == 0); - assert(e0 == false); - - free(malloc(1)); - e1 = true; - assert(mallctl("thread.tcache.enabled", &e0, &sz, &e1, sz) == 0); - assert(e0); - - free(malloc(1)); - e1 = false; - assert(mallctl("thread.tcache.enabled", &e0, &sz, &e1, sz) == 0); - assert(e0); - - free(malloc(1)); - e1 = false; - assert(mallctl("thread.tcache.enabled", &e0, &sz, &e1, sz) == 0); - assert(e0 == false); - - free(malloc(1)); -label_return: - return (NULL); -} - -int -main(void) -{ - int ret = 0; - je_thread_t thread; - - malloc_printf("Test begin\n"); - - je_thread_start(NULL); - - je_thread_create(&thread, je_thread_start, NULL); - je_thread_join(thread, (void *)&ret); - - je_thread_start(NULL); - - je_thread_create(&thread, je_thread_start, NULL); - je_thread_join(thread, (void *)&ret); - - je_thread_start(NULL); - - malloc_printf("Test end\n"); - return (ret); -} diff --git a/extra/jemalloc/test/thread_tcache_enabled.exp b/extra/jemalloc/test/thread_tcache_enabled.exp deleted file mode 100644 index 369a88dd240..00000000000 --- a/extra/jemalloc/test/thread_tcache_enabled.exp +++ /dev/null @@ -1,2 +0,0 @@ -Test begin -Test end diff --git a/extra/readline/CMakeLists.txt b/extra/readline/CMakeLists.txt index e245a2cd435..bdecdd1fcce 100644 --- a/extra/readline/CMakeLists.txt +++ b/extra/readline/CMakeLists.txt @@ -13,8 +13,7 @@ # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -INCLUDE_DIRECTORIES(${CMAKE_SOURCE_DIR}/include - ${CMAKE_SOURCE_DIR}/extra) +INCLUDE_DIRECTORIES(${CMAKE_SOURCE_DIR}/include ${CMAKE_CURRENT_SOURCE_DIR}) ADD_DEFINITIONS(-DHAVE_CONFIG_H -DNO_KILL_INTR) diff --git a/extra/readline/history.h b/extra/readline/history.h index 59aad6a4e20..c196b0361e3 100644 --- a/extra/readline/history.h +++ b/extra/readline/history.h @@ -32,8 +32,8 @@ extern "C" { # include "rlstdc.h" # include "rltypedefs.h" #else -# include <readline/rlstdc.h> -# include <readline/rltypedefs.h> +# include <rlstdc.h> +# include <rltypedefs.h> #endif #ifdef __STDC__ diff --git a/extra/readline/keymaps.h b/extra/readline/keymaps.h index eb28a8ecc33..1de567ddc1e 100644 --- a/extra/readline/keymaps.h +++ b/extra/readline/keymaps.h @@ -32,9 +32,9 @@ extern "C" { # include "chardefs.h" # include "rltypedefs.h" #else -# include <readline/rlstdc.h> -# include <readline/chardefs.h> -# include <readline/rltypedefs.h> +# include <rlstdc.h> +# include <chardefs.h> +# include <rltypedefs.h> #endif /* A keymap contains one entry for each key in the ASCII set. diff --git a/extra/readline/readline.h b/extra/readline/readline.h index 8ed1b84172e..867b2e71641 100644 --- a/extra/readline/readline.h +++ b/extra/readline/readline.h @@ -33,10 +33,10 @@ extern "C" { # include "keymaps.h" # include "tilde.h" #else -# include <readline/rlstdc.h> -# include <readline/rltypedefs.h> -# include <readline/keymaps.h> -# include <readline/tilde.h> +# include <rlstdc.h> +# include <rltypedefs.h> +# include <keymaps.h> +# include <tilde.h> #endif /* Hex-encoded Readline version number. */ diff --git a/extra/readline/xmalloc.h b/extra/readline/xmalloc.h index fafb44fcffd..58b17f39f3d 100644 --- a/extra/readline/xmalloc.h +++ b/extra/readline/xmalloc.h @@ -26,7 +26,7 @@ #if defined (READLINE_LIBRARY) # include "rlstdc.h" #else -# include <readline/rlstdc.h> +# include <rlstdc.h> #endif #ifndef PTR_T diff --git a/extra/yassl/CMakeLists.txt b/extra/yassl/CMakeLists.txt index 08e0f49d8a2..23404a661d6 100644 --- a/extra/yassl/CMakeLists.txt +++ b/extra/yassl/CMakeLists.txt @@ -1,4 +1,4 @@ -# Copyright (c) 2006, 2013, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 2006, 2014, Oracle and/or its affiliates. All rights reserved. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -33,7 +33,6 @@ SET(YASSL_SOURCES src/buffer.cpp src/cert_wrapper.cpp src/crypto_wrapper.cpp sr ADD_CONVENIENCE_LIBRARY(yassl ${YASSL_SOURCES}) RESTRICT_SYMBOL_EXPORTS(yassl) -INSTALL_DEBUG_SYMBOLS(yassl) IF(MSVC) INSTALL_DEBUG_TARGET(yassl DESTINATION ${INSTALL_LIBDIR}/debug) ENDIF() diff --git a/extra/yassl/src/ssl.cpp b/extra/yassl/src/ssl.cpp index 9352423de2a..356b310037e 100644 --- a/extra/yassl/src/ssl.cpp +++ b/extra/yassl/src/ssl.cpp @@ -1,5 +1,5 @@ /* - Copyright (c) 2005, 2012, Oracle and/or its affiliates. + Copyright (c) 2005, 2014, Oracle and/or its affiliates. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -791,7 +791,10 @@ int SSL_CTX_load_verify_locations(SSL_CTX* ctx, const char* file, strncpy(name, path, MAX_PATH - 1 - HALF_PATH); strncat(name, "/", 1); strncat(name, entry->d_name, HALF_PATH); - if (stat(name, &buf) < 0) return SSL_BAD_STAT; + if (stat(name, &buf) < 0) { + closedir(dir); + return SSL_BAD_STAT; + } if (S_ISREG(buf.st_mode)) ret = read_file(ctx, name, SSL_FILETYPE_PEM, CA); diff --git a/extra/yassl/taocrypt/CMakeLists.txt b/extra/yassl/taocrypt/CMakeLists.txt index 84f1fc186e4..eeed35fd6f4 100644 --- a/extra/yassl/taocrypt/CMakeLists.txt +++ b/extra/yassl/taocrypt/CMakeLists.txt @@ -1,4 +1,4 @@ -# Copyright (c) 2006, 2013, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 2006, 2014, Oracle and/or its affiliates. All rights reserved. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -32,7 +32,6 @@ SET(TAOCRYPT_SOURCES src/aes.cpp src/aestables.cpp src/algebra.cpp src/arc4.cpp ADD_CONVENIENCE_LIBRARY(taocrypt ${TAOCRYPT_SOURCES}) RESTRICT_SYMBOL_EXPORTS(taocrypt) -INSTALL_DEBUG_SYMBOLS(taocrypt) IF(MSVC) INSTALL_DEBUG_TARGET(taocrypt DESTINATION ${INSTALL_LIBDIR}/debug) ENDIF() diff --git a/extra/yassl/taocrypt/include/asn.hpp b/extra/yassl/taocrypt/include/asn.hpp index c58c7579ccf..b826bf54f8d 100644 --- a/extra/yassl/taocrypt/include/asn.hpp +++ b/extra/yassl/taocrypt/include/asn.hpp @@ -1,5 +1,5 @@ /* - Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved. + Copyright (c) 2005, 2014, Oracle and/or its affiliates. All rights reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -296,11 +296,11 @@ private: byte* signature_; char issuer_[ASN_NAME_MAX]; // Names char subject_[ASN_NAME_MAX]; // Names - char beforeDate_[MAX_DATE_SZ]; // valid before date + char beforeDate_[MAX_DATE_SZ+1]; // valid before date, +null term byte beforeDateType_; // beforeDate time type - char afterDate_[MAX_DATE_SZ]; // valid after date + char afterDate_[MAX_DATE_SZ+1]; // valid after date, +null term byte afterDateType_; // afterDate time type - bool verify_; // Default to yes, but could be off + bool verify_; // Default to yes, but could be off void ReadHeader(); void Decode(SignerList*, CertType); diff --git a/include/atomic/rwlock.h b/include/atomic/rwlock.h index f6bf7d0b76f..2ffdd384cc5 100644 --- a/include/atomic/rwlock.h +++ b/include/atomic/rwlock.h @@ -41,13 +41,6 @@ typedef char my_atomic_rwlock_t; typedef struct {pthread_mutex_t rw;} my_atomic_rwlock_t; -#ifndef SAFE_MUTEX - -/* - we're using read-write lock macros but map them to mutex locks, and they're - faster. Still, having semantically rich API we can change the - underlying implementation, if necessary. -*/ #define my_atomic_rwlock_destroy(name) pthread_mutex_destroy(& (name)->rw) #define my_atomic_rwlock_init(name) pthread_mutex_init(& (name)->rw, 0) #define my_atomic_rwlock_rdlock(name) pthread_mutex_lock(& (name)->rw) @@ -55,37 +48,6 @@ typedef struct {pthread_mutex_t rw;} my_atomic_rwlock_t; #define my_atomic_rwlock_rdunlock(name) pthread_mutex_unlock(& (name)->rw) #define my_atomic_rwlock_wrunlock(name) pthread_mutex_unlock(& (name)->rw) -#else /* SAFE_MUTEX */ - -/* - SAFE_MUTEX pollutes the compiling name space with macros - that alter pthread_mutex_t, pthread_mutex_init, etc. - Atomic operations should never use the safe mutex wrappers. - Unfortunately, there is no way to have both: - - safe mutex macros expanding pthread_mutex_lock to safe_mutex_lock - - my_atomic macros expanding to unmodified pthread_mutex_lock - inlined in the same compilation unit. - So, in case of SAFE_MUTEX, a function call is required. - Given that SAFE_MUTEX is a debugging facility, - this extra function call is not a performance concern for - production builds. -*/ -C_MODE_START -extern void plain_pthread_mutex_init(safe_mutex_t *); -extern void plain_pthread_mutex_destroy(safe_mutex_t *); -extern void plain_pthread_mutex_lock(safe_mutex_t *); -extern void plain_pthread_mutex_unlock(safe_mutex_t *); -C_MODE_END - -#define my_atomic_rwlock_destroy(name) plain_pthread_mutex_destroy(&(name)->rw) -#define my_atomic_rwlock_init(name) plain_pthread_mutex_init(&(name)->rw) -#define my_atomic_rwlock_rdlock(name) plain_pthread_mutex_lock(&(name)->rw) -#define my_atomic_rwlock_wrlock(name) plain_pthread_mutex_lock(&(name)->rw) -#define my_atomic_rwlock_rdunlock(name) plain_pthread_mutex_unlock(&(name)->rw) -#define my_atomic_rwlock_wrunlock(name) plain_pthread_mutex_unlock(&(name)->rw) - -#endif /* SAFE_MUTEX */ - #define MY_ATOMIC_MODE "mutex" #ifndef MY_ATOMIC_MODE_RWLOCKS #define MY_ATOMIC_MODE_RWLOCKS 1 diff --git a/include/keycache.h b/include/keycache.h index 8fa9bf1cd18..85937ebefb9 100644 --- a/include/keycache.h +++ b/include/keycache.h @@ -67,11 +67,13 @@ typedef enum key_cache_type typedef int (*INIT_KEY_CACHE) (void *, uint key_cache_block_size, - size_t use_mem, uint division_limit, uint age_threshold); + size_t use_mem, uint division_limit, uint age_threshold, + uint changed_blocks_hash_size); typedef int (*RESIZE_KEY_CACHE) (void *, uint key_cache_block_size, - size_t use_mem, uint division_limit, uint age_threshold); + size_t use_mem, uint division_limit, uint age_threshold, + uint changed_blocks_hash_size); typedef void (*CHANGE_KEY_CACHE_PARAM) (void *keycache_cb, @@ -146,6 +148,7 @@ typedef struct st_key_cache ulonglong param_division_limit;/* min. percentage of warm blocks */ ulonglong param_age_threshold; /* determines when hot block is downgraded */ ulonglong param_partitions; /* number of the key cache partitions */ + ulonglong changed_blocks_hash_size; /* number of hash buckets for changed files */ my_bool key_cache_inited; /* <=> key cache has been created */ my_bool can_be_used; /* usage of cache for read/write is allowed */ my_bool in_init; /* set to 1 in MySQL during init/resize */ @@ -160,10 +163,11 @@ extern KEY_CACHE dflt_key_cache_var, *dflt_key_cache; extern int init_key_cache(KEY_CACHE *keycache, uint key_cache_block_size, size_t use_mem, uint division_limit, - uint age_threshold, uint partitions); + uint age_threshold, uint changed_blocks_hash_size, + uint partitions); extern int resize_key_cache(KEY_CACHE *keycache, uint key_cache_block_size, size_t use_mem, uint division_limit, - uint age_threshold); + uint age_threshold, uint changed_blocks_hash_size); extern void change_key_cache_param(KEY_CACHE *keycache, uint division_limit, uint age_threshold); extern uchar *key_cache_read(KEY_CACHE *keycache, @@ -202,6 +206,7 @@ extern int repartition_key_cache(KEY_CACHE *keycache, size_t use_mem, uint division_limit, uint age_threshold, + uint changed_blocks_hash_size, uint partitions); C_MODE_END #endif /* _keycache_h */ diff --git a/include/m_ctype.h b/include/m_ctype.h index 6f372002ebb..5994816cbfc 100644 --- a/include/m_ctype.h +++ b/include/m_ctype.h @@ -735,6 +735,14 @@ my_bool my_propagate_simple(CHARSET_INFO *cs, const uchar *str, size_t len); my_bool my_propagate_complex(CHARSET_INFO *cs, const uchar *str, size_t len); +typedef struct +{ + size_t char_length; + uint repertoire; +} MY_STRING_METADATA; + +void my_string_metadata_get(MY_STRING_METADATA *metadata, + CHARSET_INFO *cs, const char *str, size_t len); uint my_string_repertoire(CHARSET_INFO *cs, const char *str, ulong len); my_bool my_charset_is_ascii_based(CHARSET_INFO *cs); my_bool my_charset_is_8bit_pure_ascii(CHARSET_INFO *cs); diff --git a/include/my_cpu.h b/include/my_cpu.h new file mode 100644 index 00000000000..026b92c1b74 --- /dev/null +++ b/include/my_cpu.h @@ -0,0 +1,44 @@ +/* Copyright (c) 2013, MariaDB foundation Ab and SkySQL + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 of the License. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02111-1307 USA +*/ + +/* instructions for specific cpu's */ + +/* + Macros for adjusting thread priority (hardware multi-threading) + The defines are the same ones used by the linux kernel +*/ + +#if defined(__powerpc__) +/* Very low priority */ +#define HMT_very_low() asm volatile("or 31,31,31") +/* Low priority */ +#define HMT_low() asm volatile("or 1,1,1") +/* Medium low priority */ +#define HMT_medium_low() asm volatile("or 6,6,6") +/* Medium priority */ +#define HMT_medium() asm volatile("or 2,2,2") +/* Medium high priority */ +#define HMT_medium_high() asm volatile("or 5,5,5") +/* High priority */ +#define HMT_high() asm volatile("or 3,3,3") +#else +#define HMT_very_low() +#define HMT_low() +#define HMT_medium_low() +#define HMT_medium() +#define HMT_medium_high() +#define HMT_high() +#endif diff --git a/include/my_sys.h b/include/my_sys.h index 4b4e5b7a22f..9913ee8c79b 100644 --- a/include/my_sys.h +++ b/include/my_sys.h @@ -242,6 +242,11 @@ extern MYSQL_PLUGIN_IMPORT CHARSET_INFO *default_charset_info; extern MYSQL_PLUGIN_IMPORT CHARSET_INFO *all_charsets[MY_ALL_CHARSETS_SIZE]; extern struct charset_info_st compiled_charsets[]; +/* Collation properties and use statistics */ +extern my_bool my_collation_is_known_id(uint id); +extern ulonglong my_collation_statistics_get_use_count(uint id); +extern const char *my_collation_get_tailoring(uint id); + /* statistics */ extern ulong my_file_opened,my_stream_opened, my_tmp_file_created; extern ulong my_file_total_opened; diff --git a/include/myisam.h b/include/myisam.h index 853fac20ae4..88ce401fabc 100644 --- a/include/myisam.h +++ b/include/myisam.h @@ -41,6 +41,12 @@ extern "C" { #endif #define MI_MAX_POSSIBLE_KEY_BUFF HA_MAX_POSSIBLE_KEY_BUFF +/* + The following defines can be increased if necessary. + But beware the dependency of MI_MAX_POSSIBLE_KEY_BUFF and MI_MAX_KEY_LENGTH. +*/ +#define MI_MAX_KEY_LENGTH 1000 /* Max length in bytes */ +#define MI_MAX_KEY_SEG 16 /* Max segments for key */ #define MI_NAME_IEXT ".MYI" #define MI_NAME_DEXT ".MYD" diff --git a/include/mysql.h.pp b/include/mysql.h.pp index ca5b1ac05bf..6b60389acc3 100644 --- a/include/mysql.h.pp +++ b/include/mysql.h.pp @@ -49,9 +49,9 @@ enum enum_field_types { MYSQL_TYPE_DECIMAL, MYSQL_TYPE_TINY, MYSQL_TYPE_DATETIME, MYSQL_TYPE_YEAR, MYSQL_TYPE_NEWDATE, MYSQL_TYPE_VARCHAR, MYSQL_TYPE_BIT, - MYSQL_TYPE_TIMESTAMP2, - MYSQL_TYPE_DATETIME2, - MYSQL_TYPE_TIME2, + MYSQL_TYPE_TIMESTAMP2, + MYSQL_TYPE_DATETIME2, + MYSQL_TYPE_TIME2, MYSQL_TYPE_NEWDECIMAL=246, MYSQL_TYPE_ENUM=247, MYSQL_TYPE_SET=248, @@ -94,7 +94,7 @@ my_bool net_write_command(NET *net,unsigned char command, const unsigned char *header, size_t head_len, const unsigned char *packet, size_t len); int net_real_write(NET *net,const unsigned char *packet, size_t len); -unsigned long my_net_read(NET *net); +unsigned long my_net_read_packet(NET *net, my_bool read_from_server); struct sockaddr; int my_connect(my_socket s, const struct sockaddr *name, unsigned int namelen, unsigned int timeout); @@ -553,7 +553,7 @@ int mysql_list_processes_cont(MYSQL_RES **ret, MYSQL *mysql, int mysql_options(MYSQL *mysql,enum mysql_option option, const void *arg); int mysql_options4(MYSQL *mysql,enum mysql_option option, - const void *arg1, const void *arg2); + const void *arg1, const void *arg2); void mysql_free_result(MYSQL_RES *result); int mysql_free_result_start(MYSQL_RES *result); int mysql_free_result_cont(MYSQL_RES *result, int status); diff --git a/include/mysql_com.h b/include/mysql_com.h index 3a794b36c1e..0da24bc35a3 100644 --- a/include/mysql_com.h +++ b/include/mysql_com.h @@ -528,7 +528,8 @@ my_bool net_write_command(NET *net,unsigned char command, const unsigned char *header, size_t head_len, const unsigned char *packet, size_t len); int net_real_write(NET *net,const unsigned char *packet, size_t len); -unsigned long my_net_read(NET *net); +unsigned long my_net_read_packet(NET *net, my_bool read_from_server); +#define my_net_read(A) my_net_read_packet((A), 0) #ifdef MY_GLOBAL_INCLUDED void my_net_set_write_timeout(NET *net, uint timeout); diff --git a/include/thread_pool_priv.h b/include/thread_pool_priv.h index 449c8ded66b..4270c32c826 100644 --- a/include/thread_pool_priv.h +++ b/include/thread_pool_priv.h @@ -1,6 +1,6 @@ #error don't use /* - Copyright (c) 2010, 2012, Oracle and/or its affiliates. All rights reserved. + Copyright (c) 2010, 2014, Oracle and/or its affiliates. All rights reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by diff --git a/libmysql/CMakeLists.txt b/libmysql/CMakeLists.txt index 6897031b1f9..d432d499d47 100644 --- a/libmysql/CMakeLists.txt +++ b/libmysql/CMakeLists.txt @@ -257,6 +257,9 @@ mariadb_dyncol_unpack_free mariadb_dyncol_column_cmp_named mariadb_dyncol_column_count mariadb_dyncol_prepare_decimal + +# Added in MariaDB-10.0 to stay compatible with MySQL-5.6, yuck! +mysql_options4 ) SET(CLIENT_API_FUNCTIONS @@ -406,7 +409,6 @@ SET(LIBS clientlib dbug strings vio mysys mysys_ssl ${ZLIB_LIBRARY} ${SSL_LIBRAR MERGE_LIBRARIES(mysqlclient STATIC ${LIBS} COMPONENT Development) # Visual Studio users need debug static library for debug projects -INSTALL_DEBUG_SYMBOLS(clientlib) IF(MSVC) INSTALL_DEBUG_TARGET(mysqlclient DESTINATION ${INSTALL_LIBDIR}/debug) INSTALL_DEBUG_TARGET(clientlib DESTINATION ${INSTALL_LIBDIR}/debug) @@ -447,6 +449,10 @@ IF(NOT DISABLE_SHARED) SOVERSION "${SHARED_LIB_MAJOR_VERSION}") IF(LINK_FLAG_NO_UNDEFINED OR VERSION_SCRIPT_LINK_FLAGS) GET_TARGET_PROPERTY(libmysql_link_flags libmysql LINK_FLAGS) + IF(NOT libmysql_link_flags) + # Avoid libmysql_link_flags-NOTFOUND + SET(libmysql_link_flags) + ENDIF() SET_TARGET_PROPERTIES(libmysql PROPERTIES LINK_FLAGS "${libmysql_link_flags} ${LINK_FLAG_NO_UNDEFINED} ${VERSION_SCRIPT_LINK_FLAGS}") ENDIF() diff --git a/man/CMakeLists.txt b/man/CMakeLists.txt index f2842959c3f..c4383b31a17 100644 --- a/man/CMakeLists.txt +++ b/man/CMakeLists.txt @@ -14,6 +14,7 @@ # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA SET(MAN1_SERVER innochecksum.1 my_print_defaults.1 myisam_ftdump.1 myisamchk.1 + aria_chk.1 aria_dump_log.1 aria_ftdump.1 aria_pack.1 aria_read_log.1 myisamlog.1 myisampack.1 mysql.server.1 mysql_convert_table_format.1 mysql_fix_extensions.1 mysql_install_db.1 diff --git a/man/aria_pack.1 b/man/aria_pack.1 index 9cec33a3818..ee47f5ff3bb 100644 --- a/man/aria_pack.1 +++ b/man/aria_pack.1 @@ -1,6 +1,6 @@ .TH ARIA_PACK "1" "May 2014" "aria_pack Ver 1.0" "User Commands" .SH NAME -aria_pack \- manual page for aria_pack Ver 1.0 +aria_pack \- generate compressed, read\-only Aria tables .SH SYNOPSIS .B aria_pack [\fIOPTIONS\fR] \fIfilename\fR... diff --git a/man/mysqlbinlog.1 b/man/mysqlbinlog.1 index cc0f62485b5..5e9bc6c2f43 100644 --- a/man/mysqlbinlog.1 +++ b/man/mysqlbinlog.1 @@ -1255,7 +1255,7 @@ indicates a FORMAT_DESCRIPTION_EVENT\&. The following table lists the possible type codes\&. .TS allbox tab(:); -l l l. +l l lx. T{ Type T}:T{ @@ -1389,6 +1389,7 @@ T} T{ 0f T}:T{ +.nf FORMAT_DESCRIPTION_EVENT T}:T{ This indicates the start of a log file written by MySQL 5 or later\&. @@ -1526,7 +1527,7 @@ Master Pos: The position of the next event in the original master log file\&. Flags: 16 flags\&. Currently, the following flags are used\&. The others are reserved for future use\&. .TS allbox tab(:); -l l l. +l l lx. T{ Flag T}:T{ @@ -1537,6 +1538,7 @@ T} T{ 01 T}:T{ +.nf LOG_EVENT_BINLOG_IN_USE_F T}:T{ Log file correctly closed\&. (Used only in @@ -1558,6 +1560,7 @@ T} T{ 04 T}:T{ +.nf LOG_EVENT_THREAD_SPECIFIC_F T}:T{ Set if the event is dependent on the connection it was executed in (for diff --git a/man/mysqldump.1 b/man/mysqldump.1 index e91e8859ec4..c3c0bf4440d 100644 --- a/man/mysqldump.1 +++ b/man/mysqldump.1 @@ -2019,7 +2019,7 @@ value, an empty string, and the string value are distinguished from one another in the output generated by this option as follows\&. .TS allbox tab(:); -l l. +l lx. T{ \fBValue\fR: T}:T{ diff --git a/mysql-test/disabled.def b/mysql-test/disabled.def index d2e839fa39a..c0aed63e8be 100644 --- a/mysql-test/disabled.def +++ b/mysql-test/disabled.def @@ -21,4 +21,5 @@ ssl_crl_clients_valid : broken upstream ssl_crl : broken upstream ssl_crl_clrpath : broken upstream innodb-wl5522-debug-zip : broken upstream -innodb_bug12902967 : broken upstream
\ No newline at end of file +innodb_bug12902967 : broken upstream +file_contents : MDEV-6526 these files are not installed anymore diff --git a/mysql-test/extra/rpl_tests/rpl_loaddata.test b/mysql-test/extra/rpl_tests/rpl_loaddata.test index 3c7aa9e9474..67eb137bdf4 100644 --- a/mysql-test/extra/rpl_tests/rpl_loaddata.test +++ b/mysql-test/extra/rpl_tests/rpl_loaddata.test @@ -18,26 +18,14 @@ source include/have_innodb.inc; CALL mtr.add_suppression("Unsafe statement written to the binary log using statement format since BINLOG_FORMAT = STATEMENT"); --enable_query_log -# MTR is not case-sensitive. -let $lower_stmt_head= load data; -let $UPPER_STMT_HEAD= LOAD DATA; -if ($lock_option) -{ - #if $lock_option is null, an extra blank is added into the statement, - #this will change the result of rpl_loaddata test case. so $lock_option - #is set only when it is not null. - let $lower_stmt_head= load data $lock_option; - let $UPPER_STMT_HEAD= LOAD DATA $lock_option; -} - select last_insert_id(); create table t1(a int not null auto_increment, b int, primary key(a) ); -eval $lower_stmt_head infile '../../std_data/rpl_loaddata.dat' into table t1; +eval load data $lock_option infile '../../std_data/rpl_loaddata.dat' into table t1; # verify that LAST_INSERT_ID() is set by LOAD DATA INFILE select last_insert_id(); create temporary table t2 (day date,id int(9),category enum('a','b','c'),name varchar(60)); -eval $lower_stmt_head infile '../../std_data/rpl_loaddata2.dat' into table t2 fields terminated by ',' optionally enclosed by '%' escaped by '@' lines terminated by '\n##\n' starting by '>' ignore 1 lines; +eval load data $lock_option infile '../../std_data/rpl_loaddata2.dat' into table t2 fields terminated by ',' optionally enclosed by '%' escaped by '@' lines terminated by '\n##\n' starting by '>' ignore 1 lines; create table t3 (day date,id int(9),category enum('a','b','c'),name varchar(60)); --disable_warnings @@ -63,7 +51,7 @@ sync_slave_with_master; insert into t1 values(1,10); connection master; -eval $lower_stmt_head infile '../../std_data/rpl_loaddata.dat' into table t1; +eval load data $lock_option infile '../../std_data/rpl_loaddata.dat' into table t1; save_master_pos; connection slave; @@ -86,7 +74,7 @@ connection master; set sql_log_bin=0; delete from t1; set sql_log_bin=1; -eval $lower_stmt_head infile '../../std_data/rpl_loaddata.dat' into table t1; +eval load data $lock_option infile '../../std_data/rpl_loaddata.dat' into table t1; save_master_pos; connection slave; # The SQL slave thread should be stopped now. @@ -111,7 +99,7 @@ connection master; set sql_log_bin=0; delete from t1; set sql_log_bin=1; -eval $lower_stmt_head infile '../../std_data/rpl_loaddata.dat' into table t1; +eval load data $lock_option infile '../../std_data/rpl_loaddata.dat' into table t1; save_master_pos; connection slave; # The SQL slave thread should be stopped now. @@ -131,7 +119,7 @@ reset master; eval create table t2 (day date,id int(9),category enum('a','b','c'),name varchar(60), unique(day)) engine=$engine_type; # no transactions --error ER_DUP_ENTRY -eval $lower_stmt_head infile '../../std_data/rpl_loaddata2.dat' into table t2 fields +eval load data $lock_option infile '../../std_data/rpl_loaddata2.dat' into table t2 fields terminated by ',' optionally enclosed by '%' escaped by '@' lines terminated by '\n##\n' starting by '>' ignore 1 lines; select * from t2; @@ -147,7 +135,7 @@ alter table t2 drop key day; connection master; delete from t2; --error ER_DUP_ENTRY -eval $lower_stmt_head infile '../../std_data/rpl_loaddata2.dat' into table t2 fields +eval load data $lock_option infile '../../std_data/rpl_loaddata2.dat' into table t2 fields terminated by ',' optionally enclosed by '%' escaped by '@' lines terminated by '\n##\n' starting by '>' ignore 1 lines; connection slave; @@ -167,7 +155,7 @@ drop table t1, t2; CREATE TABLE t1 (word CHAR(20) NOT NULL PRIMARY KEY) ENGINE=INNODB; --error ER_DUP_ENTRY -eval $UPPER_STMT_HEAD INFILE "../../std_data/words.dat" INTO TABLE t1; +eval LOAD DATA $lock_option INFILE "../../std_data/words.dat" INTO TABLE t1; DROP TABLE t1; @@ -196,17 +184,17 @@ DROP TABLE t1; -- echo ### assertion: works with cross-referenced database -- replace_result $MYSQLTEST_VARDIR MYSQLTEST_VARDIR --- eval $UPPER_STMT_HEAD LOCAL INFILE '$MYSQLTEST_VARDIR/std_data/loaddata5.dat' INTO TABLE $db1.t1 +-- eval LOAD DATA $lock_option LOCAL INFILE '$MYSQLTEST_VARDIR/std_data/loaddata5.dat' INTO TABLE $db1.t1 -- eval use $db1 -- replace_result $MYSQLTEST_VARDIR MYSQLTEST_VARDIR -- echo ### assertion: works with fully qualified name on current database -- replace_result $MYSQLTEST_VARDIR MYSQLTEST_VARDIR --- eval $UPPER_STMT_HEAD LOCAL INFILE '$MYSQLTEST_VARDIR/std_data/loaddata5.dat' INTO TABLE $db1.t1 +-- eval LOAD DATA $lock_option LOCAL INFILE '$MYSQLTEST_VARDIR/std_data/loaddata5.dat' INTO TABLE $db1.t1 -- echo ### assertion: works without fully qualified name on current database -- replace_result $MYSQLTEST_VARDIR MYSQLTEST_VARDIR --- eval $UPPER_STMT_HEAD LOCAL INFILE '$MYSQLTEST_VARDIR/std_data/loaddata5.dat' INTO TABLE t1 +-- eval LOAD DATA $lock_option LOCAL INFILE '$MYSQLTEST_VARDIR/std_data/loaddata5.dat' INTO TABLE t1 -- echo ### create connection without default database -- echo ### connect (conn2,localhost,root,,*NO-ONE*); @@ -214,7 +202,7 @@ connect (conn2,localhost,root,,*NO-ONE*); -- connection conn2 -- echo ### assertion: works without stating the default database -- replace_result $MYSQLTEST_VARDIR MYSQLTEST_VARDIR --- eval $UPPER_STMT_HEAD LOCAL INFILE '$MYSQLTEST_VARDIR/std_data/loaddata5.dat' INTO TABLE $db1.t1 +-- eval LOAD DATA $lock_option LOCAL INFILE '$MYSQLTEST_VARDIR/std_data/loaddata5.dat' INTO TABLE $db1.t1 # We cannot disconnect right away because when inserting # concurrently in a MyISAM table, the server is sending an OK diff --git a/mysql-test/include/bytes.inc b/mysql-test/include/bytes.inc new file mode 100644 index 00000000000..71575992bcf --- /dev/null +++ b/mysql-test/include/bytes.inc @@ -0,0 +1,9 @@ +# +# Create a table with all byte values +# +CREATE TABLE halfs (a INT); +INSERT INTO halfs VALUES (0x00),(0x01),(0x02),(0x03),(0x04),(0x05),(0x06),(0x07); +INSERT INTO halfs VALUES (0x08),(0x09),(0x0A),(0x0B),(0x0C),(0x0D),(0x0E),(0x0F); +CREATE TEMPORARY TABLE bytes (a BINARY(1), KEY(a)) ENGINE=MyISAM; +INSERT INTO bytes SELECT CHAR((t1.a << 4) | t2.a USING BINARY) FROM halfs t1, halfs t2; +DROP TABLE halfs; diff --git a/mysql-test/include/bytes2.inc b/mysql-test/include/bytes2.inc new file mode 100644 index 00000000000..c151d32a3dc --- /dev/null +++ b/mysql-test/include/bytes2.inc @@ -0,0 +1,21 @@ +# +# Create a table with all 2 byte sequence values +# + +--source include/bytes.inc + +CREATE TABLE halfs (a BINARY(1)); +# "bytes" is a temporary table, hence is not allowed in joins +# Create a non-temporary copy. +INSERT INTO halfs SELECT * FROM bytes; +CREATE TEMPORARY TABLE bytes2 ( + a BINARY(2), + hi BINARY(1), + lo BINARY(1), + KEY(a), + KEY(lo) +) ENGINE=MyISAM; +INSERT INTO bytes2 +SELECT CONCAT(t1.a, t2.a), t1.a, t2.a FROM halfs t1, halfs t2 +ORDER BY t1.a, t2.a; +DROP TABLE halfs; diff --git a/mysql-test/include/ctype_E05C.inc b/mysql-test/include/ctype_E05C.inc new file mode 100644 index 00000000000..9ef35a98934 --- /dev/null +++ b/mysql-test/include/ctype_E05C.inc @@ -0,0 +1,111 @@ +--echo # Start of ctype_E05C.inc + +# +# A shared test for character sets big5, cp932, gbk, sjis +# They all can have 0x5C as the second byte in a multi-byte character. +# 0xE05C is one of such characters. +# + +# Checking that the character 0xE05C correctly understands 5C as the second byte +# rather than a stand-alone backslash, including the strings that also +# have real backslash escapes and/or separator escapes. + +SELECT HEX('à\'),HEX('à\t'); +SELECT HEX('\\à\'),HEX('\\à\t'),HEX('\\à\t\t'); +SELECT HEX('''à\'),HEX('à\'''); +SELECT HEX('\\''à\'),HEX('à\''\\'); + +SELECT HEX(BINARY('à\')),HEX(BINARY('à\t')); +SELECT HEX(BINARY('\\à\')),HEX(BINARY('\\à\t')),HEX(BINARY('\\à\t\t')); +SELECT HEX(BINARY('''à\')),HEX(BINARY('à\''')); +SELECT HEX(BINARY('\\''à\')),HEX(BINARY('à\''\\')); + +SELECT HEX(_BINARY'à\'),HEX(_BINARY'à\t'); +SELECT HEX(_BINARY'\\à\'),HEX(_BINARY'\\à\t'),HEX(_BINARY'\\à\t\t'); +SELECT HEX(_BINARY'''à\'),HEX(_BINARY'à\'''); +SELECT HEX(_BINARY'\\''à\'),HEX(_BINARY'à\''\\'); + +CREATE TABLE t1 AS SELECT REPEAT(' ',10) AS a LIMIT 0; +SHOW CREATE TABLE t1; +INSERT INTO t1 VALUES ('à\'),('à\t'); +INSERT INTO t1 VALUES ('\\à\'),('\\à\t'),('\\à\t\t'); +INSERT INTO t1 VALUES ('''à\'),('à\'''); +INSERT INTO t1 VALUES ('\\''à\'),('à\''\\'); +SELECT a, HEX(a) FROM t1; +DROP TABLE t1; + +CREATE TABLE t1 (a BLOB); +INSERT INTO t1 VALUES ('à\'),('à\t'); +INSERT INTO t1 VALUES ('\\à\'),('\\à\t'),('\\à\t\t'); +INSERT INTO t1 VALUES ('''à\'),('à\'''); +INSERT INTO t1 VALUES ('\\''à\'),('à\''\\'); +SELECT a, HEX(a) FROM t1; +DROP TABLE t1; + +CREATE TABLE t1 AS SELECT REPEAT(' ', 10) AS a LIMIT 0; +SHOW CREATE TABLE t1; +INSERT INTO t1 VALUES (BINARY('à\')),(BINARY('à\t')); +INSERT INTO t1 VALUES (BINARY('\\à\')),(BINARY('\\à\t')),(BINARY('\\à\t\t')); +INSERT INTO t1 VALUES (BINARY('''à\')),(BINARY('à\''')); +INSERT INTO t1 VALUES (BINARY('\\''à\')),(BINARY('à\''\\')); +SELECT a, HEX(a) FROM t1; +DROP TABLE t1; + +CREATE TABLE t1 (a BLOB); +INSERT INTO t1 VALUES (BINARY('à\')),(BINARY('à\t')); +INSERT INTO t1 VALUES (BINARY('\\à\')),(BINARY('\\à\t')),(BINARY('\\à\t\t')); +INSERT INTO t1 VALUES (BINARY('''à\')),(BINARY('à\''')); +INSERT INTO t1 VALUES (BINARY('\\''à\')),(BINARY('à\''\\')); +SELECT a, HEX(a) FROM t1; +DROP TABLE t1; + +CREATE TABLE t1 AS SELECT REPEAT(' ', 10) AS a LIMIT 0; +SHOW CREATE TABLE t1; +INSERT INTO t1 VALUES (_BINARY'à\'),(_BINARY'à\t'); +INSERT INTO t1 VALUES (_BINARY'\\à\'),(_BINARY'\\à\t'),(_BINARY'\\à\t\t'); +INSERT INTO t1 VALUES (_BINARY'''à\'),(_BINARY'à\'''); +INSERT INTO t1 VALUES (_BINARY'\\''à\'),(_BINARY'à\''\\'); +SELECT a, HEX(a) FROM t1; +DROP TABLE t1; + +CREATE TABLE t1 (a BLOB); +INSERT INTO t1 VALUES (_BINARY'à\'),(_BINARY'à\t'); +INSERT INTO t1 VALUES (_BINARY'\\à\'),(_BINARY'\\à\t'),(_BINARY'\\à\t\t'); +INSERT INTO t1 VALUES (_BINARY'''à\'),(_BINARY'à\'''); +INSERT INTO t1 VALUES (_BINARY'\\''à\'),(_BINARY'à\''\\'); +SELECT a, HEX(a) FROM t1; +DROP TABLE t1; + +# Checking that with character_set_client=binary 0x5C in 0xE05C +# is treated as escape rather than the second byte of a multi-byte character, +# even if character_set_connection is big5/cp932/gbk/sjis. +# Note, the other 0x5C which is before 0xE05C is also treated as escape. +# +SET character_set_client=binary, character_set_results=binary; +SELECT @@character_set_client, @@character_set_connection, @@character_set_results; +SELECT HEX('à\['), HEX('\à\['); +CREATE TABLE t1 AS SELECT REPEAT(' ', 10) AS a LIMIT 0; +SHOW CREATE TABLE t1; +INSERT INTO t1 VALUES ('à\['),('\à\['); +SELECT HEX(a) FROM t1; +DROP TABLE t1; + +# +# Checking the other way around: +# 0x5C in 0xE05C is treated as the second byte +# when character_set_client=big5,cp932,gbk,sjis +# and character_set_connection=binary +# +SET character_set_client=@@character_set_connection, character_set_results=@@character_set_connection; +SET character_set_connection=binary; +SELECT @@character_set_client, @@character_set_connection, @@character_set_results; +SELECT HEX('à\['), HEX('\à\['); +CREATE TABLE t1 AS SELECT REPEAT(' ', 10) AS a LIMIT 0; +SHOW CREATE TABLE t1; +INSERT INTO t1 VALUES ('à\['),('\à\['); +SELECT HEX(a) FROM t1; +DROP TABLE t1; + + +--echo # Start of ctype_E05C.inc + diff --git a/mysql-test/include/ctype_unescape.inc b/mysql-test/include/ctype_unescape.inc new file mode 100644 index 00000000000..5d67bf8d189 --- /dev/null +++ b/mysql-test/include/ctype_unescape.inc @@ -0,0 +1,327 @@ +--echo # Start of ctype_unescape.inc + +# +# Testing how string literals with backslash and quote-quote are unescaped. +# The tests assume that single quote (') is used as a delimiter. +# + +# +# Make sure that the parser really works using the character set we need. +# We use binary strings to compose strings, to be able to test get malformed +# sequences, which are possible as a result of mysql_real_escape_string(). +# The important thing for this test is to make the parser unescape using +# the client character set, rather than binary. Currently it works exactly +# that way by default, so the query below should return @@character_set_client +# +SET @query=_binary'SELECT CHARSET(\'test\'),@@character_set_client,@@character_set_connection'; +PREPARE stmt FROM @query; +EXECUTE stmt; +DEALLOCATE PREPARE stmt; + +let $CHARSET=`SELECT @@character_set_connection`; + +CREATE TABLE allbytes (a VARBINARY(10)); + +# +# Create various byte sequences to test. Testing the full banch of +# possible combinations takes about 2 minutes. So this test provides +# variants to run with: +# - the full set of possible combinations +# - a reduced test of combinations for selected bytes only +# + +# Create selected byte combinations +if ($ctype_unescape_combinations == 'selected') +{ +--echo # Using selected bytes combinations +--source include/bytes.inc +# +# Populate "selected_bytes" with bytes that have a special meaning. +# We'll use "selected_bytes" to generate byte seqeunces, +# instead of the full possible byte combinations, to reduce test time. +# +CREATE TABLE selected_bytes (a VARBINARY(10)); + +# Bytes that have a special meaning in all character sets: +# 0x00 - mysql_real_escape_string() quotes this to '\0' +# 0x0D - mysql_real_escape_string() quotes this to '\r' +# 0x0A - mysql_real_escape_string() quotes this to '\n' +# 0x1A - mysql_real_escape_string() quotes this to '\Z' +# 0x08 - mysql_real_escape_string() does not quote this, +# but '\b' is unescaped to 0x08. +# 0x09 - mysql_real_escape_string() does not quote this, +# but '\t' is unescaped to 0x09. +# 0x30 - '0', as in '\0' +# 0x5A - 'Z', as in '\Z' +# 0x62 - 'b', as in '\b' +# 0x6E - 'n', as in '\n' +# 0x72 - 't', as in '\r' +# 0x74 - 't', as in '\t' + +INSERT INTO selected_bytes (a) VALUES ('\0'),('\b'),('\t'),('\r'),('\n'),('\Z'); +INSERT INTO selected_bytes (a) VALUES ('0'),('b'),('t'),('r'),('n'),('Z'); + +# 0x22 - double quote +# 0x25 - percent sign, '\%' is preserved as is for LIKE. +# 0x27 - single quote +# 0x5C - backslash +# 0x5F - underscore, '\_' is preserved as is for LIKE. +INSERT INTO selected_bytes (a) VALUES ('\\'),('_'),('%'),(0x22),(0x27); + +# Some bytes do not have any special meaning, for example basic Latin letters. +# Let's add, one should be enough for a good enough coverage. +INSERT INTO selected_bytes (a) VALUES ('a'); + +# +# This maps summarizes bytes that have a special +# meaning in various character sets: +# +# MBHEAD MBTAIL NONASCII-8BIT BAD +# ------ ------ -------------- ---------- +# big5: [A1..F9] [40..7E,A1..FE] N/A [80..A0,FA..FF] +# cp932: [81..9F,E0..FC] [40..7E,80..FC] [A1..DF] [FD..FF] +# gbk: [81..FE] [40..7E,80..FE] N/A [FF] +# sjis: [81..9F,E0..FC] [40..7E,80..FC] [A1..DF] [FD..FF] +# swe7: N/A N/A [5B..5E,7B..7E] [80..FF] +# + +INSERT INTO selected_bytes (a) VALUES +(0x3F), # 7bit +(0x40), # 7bit mbtail +(0x7E), # 7bit mbtail nonascii-8bit +(0x7F), # 7bit nonascii-8bit +(0x80), # mbtail bad-mb +(0x81), # mbhead mbtail +(0x9F), # mbhead mbtail bad-mb +(0xA0), # mbhead mbtail bad-mb +(0xA1), # mbhead mbtail nonascii-8bit +(0xE0), # mbhead mbtai +(0xEF), # mbhead mbtail +(0xF9), # mbhead mbtail +(0xFA), # mbhead mbtail bad-mb +(0xFC), # mbhead mbtail bad-mb +(0xFD), # mbhead mbtail bad-mb +(0xFE), # mbhead mbtial bad-mb +(0xFF); # bad-mb + +# +# Now populate the test table +# + +# Use all single bytes, this is cheap, there are only 256 values. +INSERT INTO allbytes (a) SELECT a FROM bytes; + +# Add selected bytes combinations +INSERT INTO allbytes (a) SELECT CONCAT(t1.a,t2.a) FROM selected_bytes t1,selected_bytes t2; +INSERT INTO allbytes (a) SELECT CONCAT(0x5C,t1.a,t2.a) FROM selected_bytes t1,selected_bytes t2; +INSERT INTO allbytes (a) SELECT CONCAT(0x5C,t1.a,0x5C,t2.a) FROM selected_bytes t1,selected_bytes t2; +DROP TABLE selected_bytes; + +# Delete all non-single byte sequences that do not have +# backslashes or quotes at all. There is nothing special with these strings. +DELETE FROM allbytes WHERE + OCTET_LENGTH(a)>1 AND + LOCATE(0x5C,a)=0 AND + a NOT LIKE '%\'%' AND + a NOT LIKE '%"%'; + +} + +if ($ctype_unescape_combinations=='') +{ +--echo # Using full byte combinations +--source include/bytes2.inc +INSERT INTO allbytes (a) SELECT a FROM bytes; +INSERT INTO allbytes (a) SELECT CONCAT(hi,lo) FROM bytes2; +INSERT INTO allbytes (a) SELECT CONCAT(0x5C,hi,lo) FROM bytes2; +INSERT INTO allbytes (a) SELECT CONCAT(0x5C,hi,0x5C,lo) FROM bytes2; +} + + +DELIMITER //; + +# +# A procedure that make an SQL query using 'val' as a string literal. +# The result of the query execution is written into the table 't1'. +# NULL in t1.b means that query failed due to syntax error, +# typically because of mis-interpreted closing quote delimiter. +# +CREATE PROCEDURE p1(val VARBINARY(10)) +BEGIN + DECLARE EXIT HANDLER FOR SQLSTATE '42000' INSERT INTO t1 (a,b) VALUES(val,NULL); + SET @query=CONCAT(_binary"INSERT INTO t1 (a,b) VALUES (0x",HEX(val),",'",val,"')"); + PREPARE stmt FROM @query; + EXECUTE stmt; + DEALLOCATE PREPARE stmt; +END// + +# +# A procedure that iterates through all records in "allbytes". +# And runs p1() for every record. +# +CREATE PROCEDURE p2() +BEGIN + DECLARE val VARBINARY(10); + DECLARE done INT DEFAULT FALSE; + DECLARE stmt CURSOR FOR SELECT a FROM allbytes; + DECLARE CONTINUE HANDLER FOR NOT FOUND SET done=TRUE; + OPEN stmt; +read_loop1: LOOP + FETCH stmt INTO val; + IF done THEN + LEAVE read_loop1; + END IF; + CALL p1(val); +END LOOP; + CLOSE stmt; +END// + + +# A function that converts the value from binary to $CHARSET +# and check if it has changed. CONVERT() fixes malformed strings. +# So if the string changes in CONVERT(), it means it was not wellformed. +--eval CREATE FUNCTION iswellformed(a VARBINARY(256)) RETURNS INT RETURN a=BINARY CONVERT(a USING $CHARSET); + +# +# A function that approximately reproduces how the SQL parser +# would unescape a binary string. +# +CREATE FUNCTION unescape(a VARBINARY(256)) RETURNS VARBINARY(256) +BEGIN + # We need to do it in a way to avoid producing new escape sequences + # First, enclose all known escsape sequences to '{{xx}}' + # - Backslash not followed by a LIKE pattern characters _ and % + # - Double escapes + # This uses PCRE Branch Reset Groups: (?|(alt1)|(alt2)|(alt3)). + # So '\\1' in the last argument always means the match, no matter + # which alternative it came from. + SET a=REGEXP_REPLACE(a,'(?|(\\\\[^_%])|(\\x{27}\\x{27}))','{{\\1}}'); + # Now unescape all enclosed standard escape sequences + SET a=REPLACE(a,'{{\\0}}', '\0'); + SET a=REPLACE(a,'{{\\b}}', '\b'); + SET a=REPLACE(a,'{{\\t}}', '\t'); + SET a=REPLACE(a,'{{\\r}}', '\r'); + SET a=REPLACE(a,'{{\\n}}', '\n'); + SET a=REPLACE(a,'{{\\Z}}', '\Z'); + SET a=REPLACE(a,'{{\\\'}}', '\''); + # Unescape double quotes + SET a=REPLACE(a,'{{\'\'}}', '\''); + # Unescape the rest: all other \x sequences mean just 'x' + SET a=REGEXP_REPLACE(a, '{{\\\\(.|\\R)}}', '\\1'); + RETURN a; +END// + + +# +# A function that checks what happened during unescaping. +# +# @param a - the value before unescaping +# @param b - the value after unescaping +# +# The following return values are possible: +# - SyntErr - b IS NULL, which means syntax error happened in p1(). +# - Preserv - the value was not modified during unescaping. +# This is possible if 0x5C was treated as mbtail. +# Or only LIKE escape sequences were found: '\_' and '\%'. +# - Trivial - only 0x5C were removed. +# - Regular - the value was unescaped like a binary string. +# Some standard escape sequences were found. +# No special multi-byte handling happened. +# - Special - Something else happened. Should not happen. +# +CREATE FUNCTION unescape_type(a VARBINARY(256),b VARBINARY(256)) RETURNS VARBINARY(256) +BEGIN + RETURN CASE + WHEN b IS NULL THEN '[SyntErr]' + WHEN a=b THEN CASE + WHEN OCTET_LENGTH(a)=1 THEN '[Preserve]' + WHEN a RLIKE '\\\\[_%]' THEN '[Preserve][LIKE]' + WHEN a RLIKE '^[[:ascii:]]+$' THEN '[Preserve][ASCII]' + ELSE '[Preserv][MB]' END + WHEN REPLACE(a,0x5C,'')=b THEN '[Trivial]' + WHEN UNESCAPE(a)=b THEN '[Regular]' + ELSE '[Special]' END; +END// + + +# +# Check what happened with wellformedness during unescaping +# @param a - the value before unescaping +# @param b - the value after unescaping +# +# Returned values: +# [FIXED] - the value was malformed and become wellformed after unescaping +# [BROKE] - the value was wellformed and become malformed after unescaping +# [ILSEQ] - both values (before unescaping and after unescaping) are malformed +# '' - both values are wellformed +# +CREATE FUNCTION wellformedness(a VARBINARY(256), b VARBINARY(256)) + RETURNS VARBINARY(256) +BEGIN + RETURN CASE + WHEN b IS NULL THEN '' + WHEN NOT iswellformed(a) AND iswellformed(b) THEN '[FIXED]' + WHEN iswellformed(a) AND NOT iswellformed(b) THEN '[BROKE]' + WHEN NOT iswellformed(a) AND NOT iswellformed(b) THEN '[ILSEQ]' + ELSE '' + END; +END// + + +# +# Check if the value could be generated by mysql_real_escape_string(), +# or can only come from a direct user input. +# +# @param a - the value before unescaping +# +# Returns: +# [USER] - if the value could not be generated by mysql_real_escape_string() +# '' - if the value was possibly generated by mysql_real_escape_string() +# +# +CREATE FUNCTION mysql_real_escape_string_generated(a VARBINARY(256)) + RETURNS VARBINARY(256) +BEGIN + DECLARE a1 BINARY(1) DEFAULT SUBSTR(a,1,1); + DECLARE a2 BINARY(1) DEFAULT SUBSTR(a,2,1); + DECLARE a3 BINARY(1) DEFAULT SUBSTR(a,3,1); + DECLARE a4 BINARY(1) DEFAULT SUBSTR(a,4,1); + DECLARE a2a4 BINARY(2) DEFAULT CONCAT(a2,a4); + RETURN CASE + WHEN (a1=0x5C) AND + (a3=0x5C) AND + (a2>0x7F) AND + (a4 NOT IN ('_','%','0','t','r','n','Z')) AND + iswellformed(a2a4) THEN '[USER]' + ELSE '' + END; +END// + +DELIMITER ;// + + +CREATE TABLE t1 (a VARBINARY(10),b VARBINARY(10)); +CALL p2(); +# Avoid "Invalid XXX character string" warnings +# We mark malformed strings in the output anyway +--disable_warnings +# All records marked with '[BAD]' mean that the string was unescaped +# in a unexpected way, that means there is a bug in UNESCAPE() above. +SELECT HEX(a),HEX(b), + CONCAT(unescape_type(a,b), + wellformedness(a,b), + mysql_real_escape_string_generated(a), + IF(UNESCAPE(a)<>b,CONCAT('[BAD',HEX(UNESCAPE(a)),']'),'')) AS comment +FROM t1 ORDER BY LENGTH(a),a; +--enable_warnings +DROP TABLE t1; +DROP PROCEDURE p1; +DROP PROCEDURE p2; +DROP FUNCTION unescape; +DROP FUNCTION unescape_type; +DROP FUNCTION wellformedness; +DROP FUNCTION mysql_real_escape_string_generated; +DROP FUNCTION iswellformed; +DROP TABLE allbytes; + +--echo # End of ctype_backslash.inc diff --git a/mysql-test/include/gis_debug.inc b/mysql-test/include/gis_debug.inc new file mode 100644 index 00000000000..c81932ef90c --- /dev/null +++ b/mysql-test/include/gis_debug.inc @@ -0,0 +1,161 @@ +# +# This is a shared file included from t/gis-precise.test and t/gis-debug.test +# +# - gis-precise.test is executed both in debug and production builds +# and makes sure that the checked GIS functions return the expected results. +# +# - gis-debug.test is executed only in debug builds +# (and is skipped in production builds). +# gis-debug.test activates tracing of the internal GIS routines. +# The trace log is printed to the client side warnings. +# So gis-debug.test makes sure not only that the correct results are returned, +# but also check *how* these results were generated - makes sure that +# the internal GIS routines went through the expected data and code flow paths. +# + +--disable_warnings +DROP TABLE IF EXISTS p1; +--enable_warnings + +DELIMITER |; +CREATE PROCEDURE p1(dist DOUBLE, geom TEXT) +BEGIN + DECLARE g GEOMETRY; + SET g=GeomFromText(geom); + SELECT geom AS `-----`; + SELECT dist, GeometryType(@buf:=ST_Buffer(g, dist)) AS `buffer`, ROUND(ST_AREA(@buf),2) AS buf_area; +END| +DELIMITER ;| + +--disable_query_log + +--echo # +--echo # Testing ST_BUFFER with positive distance +--echo # + +CALL p1(1, 'POINT(0 0))'); +CALL p1(1, 'LineString(0 1, 1 1))'); +CALL p1(1, 'LineString(9 9,8 1,1 5,0 0)'); +CALL p1(1, 'Polygon((2 2,2 8,8 8,8 2,2 2))'); +CALL p1(1, 'Polygon((0 0,0 8,8 8,8 0,0 0),(2 2,6 2,6 6,2 6,2 2))'); +CALL p1(1, 'Polygon((0 0, 0 8, 8 8, 8 10, -10 10, -10 0, 0 0))'); +CALL p1(1, 'MultiPoint(9 9,8 1,1 5)'); +CALL p1(1, 'MultiLineString((0 0,2 2))'); +CALL p1(1, 'MultiLineString((0 0,2 2,0 4))'); +CALL p1(1, 'MultiLineString((0 0,2 2),(0 2,2 0))'); +CALL p1(1, 'MultiLineString((2 2,2 8,-2 8),(-6 -6, 6 6),(10 10, 14 14))'); +CALL p1(1, 'MultiPolygon(((2 2,2 8,8 8,8 2,2 2)), ((9 9,8 1,1 5,9 9)))'); +CALL p1(1, 'MultiPolygon(((2 2,2 8,8 8,8 2,2 2), (4 4,4 6,6 6,6 4,4 4)),((9 9,8 1,1 5,9 9)))'); +CALL p1(1, 'GeometryCollection(Point(0 0))'); +CALL p1(1, 'GeometryCollection(LineString(0 0, 2 2)))'); +CALL p1(1, 'GeometryCollection(Polygon((2 2,2 8,8 8,8 2,2 2))))'); +CALL p1(1, 'GeometryCollection(MultiPoint(9 9,8 1,1 5))'); +CALL p1(1, 'GeometryCollection(MultiLineString((0 0,0 1),(3 0,3 1)))'); +CALL p1(1, 'GeometryCollection(MultiPolygon(((0 0, 3 0, 3 3, 0 3, 0 0)),((6 6,6 9,9 9,9 6,6 6))))'); +CALL p1(1, 'GeometryCollection(Point(9 9),LineString(1 5,0 0),Polygon((2 2,2 8,8 8,8 2,2 2)))'); + + +--echo # +--echo # Testing ST_BUFFER with zero distance +--echo # + +CALL p1(0, 'POINT(0 0))'); +CALL p1(0, 'LineString(0 1, 1 1))'); +CALL p1(0, 'LineString(9 9,8 1,1 5,0 0)'); +CALL p1(0, 'Polygon((2 2,2 8,8 8,8 2,2 2))'); +CALL p1(0, 'Polygon((0 0,0 8,8 8,8 0,0 0),(2 2,6 2,6 6,2 6,2 2))'); +CALL p1(0, 'Polygon((0 0, 0 8, 8 8, 8 10, -10 10, -10 0, 0 0))'); +CALL p1(0, 'MultiPoint(9 9,8 1,1 5)'); +CALL p1(0, 'MultiLineString((0 0,2 2))'); +CALL p1(0, 'MultiLineString((0 0,2 2,0 4))'); +CALL p1(0, 'MultiLineString((0 0,2 2),(0 2,2 0))'); +CALL p1(0, 'MultiLineString((2 2,2 8,-2 8),(-6 -6, 6 6),(10 10, 14 14))'); +CALL p1(0, 'MultiPolygon(((2 2,2 8,8 8,8 2,2 2)), ((9 9,8 1,1 5,9 9)))'); +CALL p1(0, 'MultiPolygon(((2 2,2 8,8 8,8 2,2 2), (4 4,4 6,6 6,6 4,4 4)),((9 9,8 1,1 5,9 9)))'); +CALL p1(0, 'GeometryCollection(Point(0 0))'); +CALL p1(0, 'GeometryCollection(LineString(0 0, 2 2)))'); +CALL p1(0, 'GeometryCollection(Polygon((2 2,2 8,8 8,8 2,2 2))))'); +CALL p1(0, 'GeometryCollection(MultiPoint(9 9,8 1,1 5))'); +CALL p1(0, 'GeometryCollection(MultiLineString((0 0,0 1),(3 0,3 1)))'); +CALL p1(0, 'GeometryCollection(MultiPolygon(((0 0, 3 0, 3 3, 0 3, 0 0)),((6 6,6 9,9 9,9 6,6 6))))'); +CALL p1(0, 'GeometryCollection(Point(9 9),LineString(1 5,0 0),Polygon((2 2,2 8,8 8,8 2,2 2)))'); + + +--echo # +--echo # Testing ST_BUFFER with negative distance +--echo # + +CALL p1(-1, 'POINT(0 0))'); +CALL p1(-1, 'LineString(0 1, 1 1))'); +CALL p1(-1, 'LineString(9 9,8 1,1 5,0 0)'); +CALL p1(-1, 'Polygon((2 2,2 8,8 8,8 2,2 2))'); +# +# Wrong shape +# CALL p1(-1, 'Polygon((0 0,0 8,8 8,8 0,0 0),(2 2,6 2,6 6,2 6,2 2))'); +# Wrong shape +# CALL p1(-1, 'Polygon((0 0, 0 8, 8 8, 8 10, -10 10, -10 0, 0 0))'); +# +CALL p1(-1, 'MultiPoint(9 9,8 1,1 5)'); +CALL p1(-1, 'MultiLineString((0 0,2 2))'); +CALL p1(-1, 'MultiLineString((0 0,2 2,0 4))'); +CALL p1(-1, 'MultiLineString((0 0,2 2),(0 2,2 0))'); +CALL p1(-1, 'MultiLineString((2 2,2 8,-2 8),(-6 -6, 6 6),(10 10, 14 14))'); +# +# Wrong shape +#CALL p1(-1, 'MultiPolygon(((2 2,2 8,8 8,8 2,2 2)), ((9 9,8 1,1 5,9 9)))'); +#CALL p1(-1, 'MultiPolygon(((2 2,2 8,8 8,8 2,2 2), (4 4,4 6,6 6,6 4,4 4)),((9 9,8 1,1 5,9 9)))'); +# +CALL p1(-1, 'GeometryCollection(Point(0 0))'); +CALL p1(-1, 'GeometryCollection(LineString(0 0, 2 2)))'); +CALL p1(-1, 'GeometryCollection(Polygon((2 2,2 8,8 8,8 2,2 2))))'); +CALL p1(-1, 'GeometryCollection(MultiPoint(9 9,8 1,1 5))'); +CALL p1(-1, 'GeometryCollection(MultiLineString((0 0,0 1),(3 0,3 1)))'); +# +# Wrong shape +# CALL p1(-1, 'GeometryCollection(MultiPolygon(((0 0, 3 0, 3 3, 0 3, 0 0)),((6 6,6 9,9 9,9 6,6 6))))'); +# +CALL p1(-1, 'GeometryCollection(Point(9 9),LineString(1 5,0 0),Polygon((2 2,2 8,8 8,8 2,2 2)))'); + + +--enable_query_log + +SELECT ST_CONTAINS( + GeomFromText('MULTIPOLYGON(((0 0, 0 5, 5 5, 5 0, 0 0)),((6 6, 6 11, 11 11, 11 6, 6 6)))'), + GeomFromText('POINT(5 10)')); +SELECT AsText(ST_UNION( + GeomFromText('MULTIPOLYGON(((0 0, 0 5, 5 5, 5 0, 0 0)),((6 6, 6 11, 11 11, 11 6, 6 6)))'), + GeomFromText('POINT(5 10)'))); + +DROP PROCEDURE p1; + +--echo # +--echo # Bug #13833019 ASSERTION `T1->RESULT_RANGE' FAILED IN GCALC_OPERATION_REDUCER::END_COUPLE +--echo # +SELECT GeometryType(ST_BUFFER(MULTIPOLYGONFROMTEXT('MULTIPOLYGON(((0 0,9 4,3 3,0 0)),((2 2,2 2,8 8,2 3,2 2)))'), 3)); + +--echo # +--echo # Bug #13832749 HANDLE_FATAL_SIGNAL IN GCALC_FUNCTION::COUNT_INTERNAL +--echo # +SELECT GeometryType(ST_BUFFER(MULTIPOLYGONFROMTEXT('MULTIPOLYGON(((3 5,2 5,2 4,3 4,3 5)),((2 2,2 8,8 8,8 2,2 2), (4 4,4 6,6 6,6 4,4 4)), ((9 9,8 1,1 5,9 9)))'),1)); + + +--echo # +--echo # Bug#13358363 - ASSERTION: N > 0 && N < SINUSES_CALCULATED*2+1 | GET_N_SINCOS/ADD_EDGE_BUFFER +--echo # + +DO ST_BUFFER(ST_GEOMCOLLFROMTEXT('linestring(1 1,2 2)'),''); + +SELECT ST_WITHIN( + LINESTRINGFROMTEXT(' LINESTRING(3 8,9 2,3 8,3 3,7 6,4 7,4 7,8 1) '), + ST_BUFFER(MULTIPOLYGONFROMTEXT(' MULTIPOLYGON(((3 5,2 5,2 4,3 4,3 5)),((2 2,2 8,8 8,8 2,2 2),(4 4,4 6,6 6,6 4,4 4)),((0 5,3 5,3 2,1 2,1 1,3 1,3 0,0 0,0 3,2 3,2 4,0 4,0 5))) '), + ST_NUMINTERIORRINGS(POLYGONFROMTEXT('POLYGON((3 5,2 4,2 5,3 5)) ')))); + +SELECT ST_DIMENSION(ST_BUFFER(POLYGONFROMTEXT(' POLYGON((3 5,2 5,2 4,3 4,3 5)) '), + ST_NUMINTERIORRINGS(POLYGONFROMTEXT(' POLYGON((0 0,9 3,4 2,0 0))')))); + +SELECT ST_NUMINTERIORRINGS( + ST_ENVELOPE(ST_BUFFER(MULTIPOLYGONFROMTEXT('MULTIPOLYGON(((3 5,2 5,2 4,3 4,3 5))) '), + SRID(MULTILINESTRINGFROMTEXT('MULTILINESTRING((2 2,4 2,1 2,2 4,2 2)) '))))); + +SELECT ASTEXT(ST_BUFFER(POLYGONFROMTEXT(' POLYGON((9 9,5 2,4 5,9 9))'), + SRID(GEOMETRYFROMTEXT(' MULTIPOINT(8 4,5 0,7 8,6 9,3 4,7 3,5 5) ')))); diff --git a/mysql-test/include/gis_generic.inc b/mysql-test/include/gis_generic.inc index e4fee4448c1..59acf04dfa2 100644 --- a/mysql-test/include/gis_generic.inc +++ b/mysql-test/include/gis_generic.inc @@ -72,6 +72,19 @@ INSERT into gis_geometry SELECT * FROM gis_multi_line; INSERT into gis_geometry SELECT * FROM gis_multi_polygon; INSERT into gis_geometry SELECT * FROM gis_geometrycollection; +-- disable_query_log +-- disable_result_log +ANALYZE TABLE gis_point; +ANALYZE TABLE gis_line; +ANALYZE TABLE gis_polygon; +ANALYZE TABLE gis_multi_point; +ANALYZE TABLE gis_multi_line; +ANALYZE TABLE gis_multi_polygon; +ANALYZE TABLE gis_geometrycollection; +ANALYZE TABLE gis_geometry; +-- enable_result_log +-- enable_query_log + SELECT fid, AsText(g) FROM gis_point ORDER by fid; SELECT fid, AsText(g) FROM gis_line ORDER by fid; SELECT fid, AsText(g) FROM gis_polygon ORDER by fid; diff --git a/mysql-test/include/have_mysql_upgrade.inc b/mysql-test/include/have_mysql_upgrade.inc deleted file mode 100644 index 8f486176018..00000000000 --- a/mysql-test/include/have_mysql_upgrade.inc +++ /dev/null @@ -1,4 +0,0 @@ ---require r/have_mysql_upgrade.result ---disable_query_log -select LENGTH("$MYSQL_UPGRADE")>0 as have_mysql_upgrade; ---enable_query_log diff --git a/mysql-test/include/have_semisync_plugin.inc b/mysql-test/include/have_semisync_plugin.inc new file mode 100644 index 00000000000..8a1679de636 --- /dev/null +++ b/mysql-test/include/have_semisync_plugin.inc @@ -0,0 +1,15 @@ +# +# Check if server has support for loading plugins +# +if (`SELECT @@have_dynamic_loading != 'YES'`) { + --skip Requires dynamic loading +} + +# +# Check if the variable SEMISYNC_MASTER_SO is set +# +if (!$SEMISYNC_MASTER_SO) +{ + skip Need semisync plugins; +} + diff --git a/mysql-test/include/install_semisync.inc b/mysql-test/include/install_semisync.inc index 368b7b7cb4a..9cc6df2072a 100644 --- a/mysql-test/include/install_semisync.inc +++ b/mysql-test/include/install_semisync.inc @@ -14,7 +14,7 @@ if ($value == No such row) { SET sql_log_bin = 0; - eval INSTALL PLUGIN rpl_semi_sync_master SONAME '$SEMISYNC_MASTER_PLUGIN'; + install plugin rpl_semi_sync_master soname 'semisync_master'; SET GLOBAL rpl_semi_sync_master_enabled = 1; SET sql_log_bin = 1; } @@ -28,7 +28,7 @@ if ($value == No such row) if ($value == No such row) { SET sql_log_bin = 0; - eval INSTALL PLUGIN rpl_semi_sync_slave SONAME '$SEMISYNC_SLAVE_PLUGIN'; + install plugin rpl_semi_sync_slave soname 'semisync_slave'; SET GLOBAL rpl_semi_sync_slave_enabled = 1; SET sql_log_bin = 1; } diff --git a/mysql-test/include/mtr_warnings.sql b/mysql-test/include/mtr_warnings.sql index bc8d8044afb..fd59bf3c5f4 100644 --- a/mysql-test/include/mtr_warnings.sql +++ b/mysql-test/include/mtr_warnings.sql @@ -155,6 +155,7 @@ INSERT INTO global_suppressions VALUES ("InnoDB: Error: in ALTER TABLE `test`.`t[123]`"), ("InnoDB: Error: in RENAME TABLE table `test`.`t1`"), ("InnoDB: Error: table `test`.`t[123]` .*does not exist in the InnoDB internal"), + ("InnoDB: Warning: semaphore wait:"), /* BUG#32080 - Excessive warnings on Solaris: setrlimit could not diff --git a/mysql-test/include/mysql_upgrade_preparation.inc b/mysql-test/include/mysql_upgrade_preparation.inc index a3c81c4c1e7..03019ae29ff 100644 --- a/mysql-test/include/mysql_upgrade_preparation.inc +++ b/mysql-test/include/mysql_upgrade_preparation.inc @@ -2,13 +2,6 @@ # Can't run test of external client with embedded server -- source include/not_embedded.inc - -# Only run test if "mysql_upgrade" is found ---require r/have_mysql_upgrade.result ---disable_query_log -select LENGTH("$MYSQL_UPGRADE")>0 as have_mysql_upgrade; ---enable_query_log - # # Hack: # diff --git a/mysql-test/include/mysqlhotcopy.inc b/mysql-test/include/mysqlhotcopy.inc index 779ed7f36e0..f775d782b28 100644 --- a/mysql-test/include/mysqlhotcopy.inc +++ b/mysql-test/include/mysqlhotcopy.inc @@ -109,7 +109,7 @@ DROP DATABASE hotcopy_save; --replace_result $MYSQLD_DATADIR MYSQLD_DATADIR --list_files $MYSQLD_DATADIR/hotcopy_save --replace_result $MASTER_MYSOCK MASTER_MYSOCK ---error 9,11,2304 +--error 1 --exec $MYSQLHOTCOPY --quiet -S $MASTER_MYSOCK -u root hotcopy_test hotcopy_save --replace_result $MASTER_MYSOCK MASTER_MYSOCK --exec $MYSQLHOTCOPY --quiet --allowold -S $MASTER_MYSOCK -u root hotcopy_test hotcopy_save diff --git a/mysql-test/include/search_pattern_in_file.inc b/mysql-test/include/search_pattern_in_file.inc index c047b5bc499..0d09cdcd36e 100644 --- a/mysql-test/include/search_pattern_in_file.inc +++ b/mysql-test/include/search_pattern_in_file.inc @@ -10,6 +10,10 @@ # The environment variables SEARCH_FILE and SEARCH_PATTERN must be set # before sourcing this routine. # +# Optionally, SEARCH_RANGE can be set to the max number of bytes of the file +# to search. If negative, it will search that many bytes at the end of the +# file. The default is to search only the first 50000 bytes of the file. +# # In case of # - SEARCH_FILE and/or SEARCH_PATTERN is not set # - SEARCH_FILE cannot be opened @@ -38,6 +42,7 @@ # --error 0,1 # --remove_file $error_log # let SEARCH_FILE= $error_log; +# let SEARCH_RANGE= -50000; # # Stop the server # let $restart_file= $MYSQLTEST_VARDIR/tmp/mysqld.1.expect; # --exec echo "wait" > $restart_file @@ -57,8 +62,18 @@ perl; use strict; my $search_file= $ENV{'SEARCH_FILE'} or die "SEARCH_FILE not set"; my $search_pattern= $ENV{'SEARCH_PATTERN'} or die "SEARCH_PATTERN not set"; + my $search_range= $ENV{'SEARCH_RANGE'}; + my $file_content; + $search_range= 50000 unless $search_range =~ /-?[0-9]+/; open(FILE, "$search_file") or die("Unable to open '$search_file': $!\n"); - read(FILE, my $file_content, 50000, 0); + if ($search_range >= 0) { + read(FILE, $file_content, $search_range, 0); + } else { + my $size= -s $search_file; + $search_range = -$size if $size > -$search_range; + seek(FILE, $search_range, 2); + read(FILE, $file_content, -$search_range, 0); + } close(FILE); if ( not $file_content =~ m{$search_pattern} ) { die("# ERROR: The file '$search_file' does not contain the expected pattern $search_pattern\n->$file_content<-\n"); diff --git a/mysql-test/include/show_events.inc b/mysql-test/include/show_events.inc index f7b0931c812..9a39ec67d0e 100644 --- a/mysql-test/include/show_events.inc +++ b/mysql-test/include/show_events.inc @@ -83,7 +83,7 @@ let $script= s{block_len=[0-9]+}{block_len=#}; s{Server ver:.*DOLLAR}{SERVER_VERSION, BINLOG_VERSION}; s{GTID [0-9]+-[0-9]+-[0-9]+}{GTID #-#-#}; - s{\[[0-9]-[0-9]-[0-9]+\]}{[#-#-#]}; + s{\[([0-9]-[0-9]-[0-9]+,?)+\]}{[#-#-#]}; s{cid=[0-9]+}{cid=#}; s{SQL_LOAD-[a-z,0-9,-]*.[a-z]*}{SQL_LOAD-<SERVER UUID>-<MASTER server-id>-<file-id>.<extension>}; s{rand_seed1=[0-9]*,rand_seed2=[0-9]*}{rand_seed1=<seed 1>,rand_seed2=<seed 2>}; diff --git a/mysql-test/include/stop_dump_threads.inc b/mysql-test/include/stop_dump_threads.inc new file mode 100644 index 00000000000..ae33c963d9a --- /dev/null +++ b/mysql-test/include/stop_dump_threads.inc @@ -0,0 +1,32 @@ +# ==== Purpose ==== +# +# Stop all dump threads on the server of the current connection. +# +# ==== Usage ==== +# +# --source include/stop_dump_threads.inc + +--let $include_filename= stop_dump_threads.inc +--source include/begin_include_file.inc + + +--let $_sdt_show_rpl_debug_info_old= $show_rpl_debug_info +--let $show_rpl_debug_info= 1 +--disable_query_log +--disable_result_log + +--let $_sdt_dump_thread_id= `SELECT ID FROM INFORMATION_SCHEMA.PROCESSLIST WHERE COMMAND = 'Binlog dump'` + +while ($_sdt_dump_thread_id != '') +{ + eval KILL $_sdt_dump_thread_id; + --let $wait_condition= SELECT COUNT(*) = 0 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE ID = $_sdt_dump_thread_id + --source include/wait_condition.inc + + --let $_sdt_dump_thread_id= `SELECT ID FROM INFORMATION_SCHEMA.PROCESSLIST WHERE COMMAND = 'Binlog dump'` +} + +--let $show_rpl_debug_info= $_sdt_show_rpl_debug_info_old + +--let $include_filename= stop_dump_threads.inc +--source include/end_include_file.inc diff --git a/mysql-test/include/uninstall_semisync.inc b/mysql-test/include/uninstall_semisync.inc index 11668d1db97..0a4c55fa4f2 100644 --- a/mysql-test/include/uninstall_semisync.inc +++ b/mysql-test/include/uninstall_semisync.inc @@ -13,6 +13,11 @@ UNINSTALL PLUGIN rpl_semi_sync_slave; --connection master +# After BUG#17638477 fix, uninstallation of rpl_semi_sync_master +# is not allowed when there are semi sync slaves. Hence kill +# all dump threads before uninstalling it. +SET GLOBAL rpl_semi_sync_master_enabled = OFF; +--source include/stop_dump_threads.inc UNINSTALL PLUGIN rpl_semi_sync_master; --enable_warnings diff --git a/mysql-test/mtr.out-of-source b/mysql-test/mtr.out-of-source index c2809ede136..51713517ae1 100644 --- a/mysql-test/mtr.out-of-source +++ b/mysql-test/mtr.out-of-source @@ -1,5 +1,5 @@ #!/usr/bin/perl # Call mtr in out-of-source build -$ENV{MTR_BINDIR} = "@CMAKE_BINARY_DIR@"; -chdir("@CMAKE_SOURCE_DIR@/mysql-test"); -exit(system($^X, "@CMAKE_SOURCE_DIR@/mysql-test/mysql-test-run.pl", @ARGV) >> 8);
\ No newline at end of file +$ENV{MTR_BINDIR} = '@CMAKE_BINARY_DIR@'; +chdir('@CMAKE_SOURCE_DIR@/mysql-test'); +exit(system($^X, '@CMAKE_SOURCE_DIR@/mysql-test/mysql-test-run.pl', @ARGV) >> 8); diff --git a/mysql-test/mysql-test-run.pl b/mysql-test/mysql-test-run.pl index 5e03bfea15b..e9709d875e6 100755 --- a/mysql-test/mysql-test-run.pl +++ b/mysql-test/mysql-test-run.pl @@ -5265,7 +5265,7 @@ sub get_extra_opts { sub stop_servers($$) { my (@servers)= @_; - mtr_report("Restarting ", started(@servers)); + mtr_report("Stopping ", started(@servers)); My::SafeProcess::shutdown($opt_shutdown_timeout, started(@servers)); diff --git a/mysql-test/r/create_or_replace.result b/mysql-test/r/create_or_replace.result index e6589807c2b..a2f06c38cb5 100644 --- a/mysql-test/r/create_or_replace.result +++ b/mysql-test/r/create_or_replace.result @@ -427,4 +427,16 @@ THREAD_ID LOCK_MODE LOCK_DURATION LOCK_TYPE TABLE_SCHEMA TABLE_NAME # MDL_SHARED_READ MDL_EXPLICIT Table metadata lock test t2 drop table t1; unlock tables; +# +# MDEV-6560 +# Assertion `! is_set() ' failed in Diagnostics_area::set_ok_status +# +CREATE TABLE t1 (col_int_nokey INT) ENGINE=InnoDB; +CREATE OR REPLACE TEMPORARY TABLE tmp LIKE t1; +LOCK TABLE t1 WRITE; +CREATE OR REPLACE TABLE t1 LIKE tmp; +KILL QUERY con_id; +CREATE OR REPLACE TABLE t1 (a int); +KILL QUERY con_id; +drop table t1; DROP TABLE t2; diff --git a/mysql-test/r/ctype_big5.result b/mysql-test/r/ctype_big5.result index 48bc1dab3a7..175bbf0f09f 100644 --- a/mysql-test/r/ctype_big5.result +++ b/mysql-test/r/ctype_big5.result @@ -1307,3 +1307,3340 @@ A1A1A1A1A1A120202020202020202020202020202020202020 # # End of 5.6 tests # +# +# Start of 10.0 tests +# +# Start of ctype_unescape.inc +SET @query=_binary'SELECT CHARSET(\'test\'),@@character_set_client,@@character_set_connection'; +PREPARE stmt FROM @query; +EXECUTE stmt; +CHARSET('test') @@character_set_client @@character_set_connection +big5 big5 big5 +DEALLOCATE PREPARE stmt; +CREATE TABLE allbytes (a VARBINARY(10)); +# Using selected bytes combinations +CREATE TABLE halfs (a INT); +INSERT INTO halfs VALUES (0x00),(0x01),(0x02),(0x03),(0x04),(0x05),(0x06),(0x07); +INSERT INTO halfs VALUES (0x08),(0x09),(0x0A),(0x0B),(0x0C),(0x0D),(0x0E),(0x0F); +CREATE TEMPORARY TABLE bytes (a BINARY(1), KEY(a)) ENGINE=MyISAM; +INSERT INTO bytes SELECT CHAR((t1.a << 4) | t2.a USING BINARY) FROM halfs t1, halfs t2; +DROP TABLE halfs; +CREATE TABLE selected_bytes (a VARBINARY(10)); +INSERT INTO selected_bytes (a) VALUES ('\0'),('\b'),('\t'),('\r'),('\n'),('\Z'); +INSERT INTO selected_bytes (a) VALUES ('0'),('b'),('t'),('r'),('n'),('Z'); +INSERT INTO selected_bytes (a) VALUES ('\\'),('_'),('%'),(0x22),(0x27); +INSERT INTO selected_bytes (a) VALUES ('a'); +INSERT INTO selected_bytes (a) VALUES +(0x3F), # 7bit +(0x40), # 7bit mbtail +(0x7E), # 7bit mbtail nonascii-8bit +(0x7F), # 7bit nonascii-8bit +(0x80), # mbtail bad-mb +(0x81), # mbhead mbtail +(0x9F), # mbhead mbtail bad-mb +(0xA0), # mbhead mbtail bad-mb +(0xA1), # mbhead mbtail nonascii-8bit +(0xE0), # mbhead mbtai +(0xEF), # mbhead mbtail +(0xF9), # mbhead mbtail +(0xFA), # mbhead mbtail bad-mb +(0xFC), # mbhead mbtail bad-mb +(0xFD), # mbhead mbtail bad-mb +(0xFE), # mbhead mbtial bad-mb +(0xFF); +INSERT INTO allbytes (a) SELECT a FROM bytes; +INSERT INTO allbytes (a) SELECT CONCAT(t1.a,t2.a) FROM selected_bytes t1,selected_bytes t2; +INSERT INTO allbytes (a) SELECT CONCAT(0x5C,t1.a,t2.a) FROM selected_bytes t1,selected_bytes t2; +INSERT INTO allbytes (a) SELECT CONCAT(0x5C,t1.a,0x5C,t2.a) FROM selected_bytes t1,selected_bytes t2; +DROP TABLE selected_bytes; +DELETE FROM allbytes WHERE +OCTET_LENGTH(a)>1 AND +LOCATE(0x5C,a)=0 AND +a NOT LIKE '%\'%' AND + a NOT LIKE '%"%'; +CREATE PROCEDURE p1(val VARBINARY(10)) +BEGIN +DECLARE EXIT HANDLER FOR SQLSTATE '42000' INSERT INTO t1 (a,b) VALUES(val,NULL); +SET @query=CONCAT(_binary"INSERT INTO t1 (a,b) VALUES (0x",HEX(val),",'",val,"')"); +PREPARE stmt FROM @query; +EXECUTE stmt; +DEALLOCATE PREPARE stmt; +END// +CREATE PROCEDURE p2() +BEGIN +DECLARE val VARBINARY(10); +DECLARE done INT DEFAULT FALSE; +DECLARE stmt CURSOR FOR SELECT a FROM allbytes; +DECLARE CONTINUE HANDLER FOR NOT FOUND SET done=TRUE; +OPEN stmt; +read_loop1: LOOP +FETCH stmt INTO val; +IF done THEN +LEAVE read_loop1; +END IF; +CALL p1(val); +END LOOP; +CLOSE stmt; +END// +CREATE FUNCTION iswellformed(a VARBINARY(256)) RETURNS INT RETURN a=BINARY CONVERT(a USING big5);// +CREATE FUNCTION unescape(a VARBINARY(256)) RETURNS VARBINARY(256) +BEGIN +# We need to do it in a way to avoid producing new escape sequences +# First, enclose all known escsape sequences to '{{xx}}' + # - Backslash not followed by a LIKE pattern characters _ and % +# - Double escapes +# This uses PCRE Branch Reset Groups: (?|(alt1)|(alt2)|(alt3)). +# So '\\1' in the last argument always means the match, no matter +# which alternative it came from. +SET a=REGEXP_REPLACE(a,'(?|(\\\\[^_%])|(\\x{27}\\x{27}))','{{\\1}}'); +# Now unescape all enclosed standard escape sequences +SET a=REPLACE(a,'{{\\0}}', '\0'); +SET a=REPLACE(a,'{{\\b}}', '\b'); +SET a=REPLACE(a,'{{\\t}}', '\t'); +SET a=REPLACE(a,'{{\\r}}', '\r'); +SET a=REPLACE(a,'{{\\n}}', '\n'); +SET a=REPLACE(a,'{{\\Z}}', '\Z'); +SET a=REPLACE(a,'{{\\\'}}', '\''); +# Unescape double quotes +SET a=REPLACE(a,'{{\'\'}}', '\''); + # Unescape the rest: all other \x sequences mean just 'x' + SET a=REGEXP_REPLACE(a, '{{\\\\(.|\\R)}}', '\\1'); + RETURN a; +END// +CREATE FUNCTION unescape_type(a VARBINARY(256),b VARBINARY(256)) RETURNS VARBINARY(256) +BEGIN +RETURN CASE +WHEN b IS NULL THEN '[SyntErr]' + WHEN a=b THEN CASE +WHEN OCTET_LENGTH(a)=1 THEN '[Preserve]' + WHEN a RLIKE '\\\\[_%]' THEN '[Preserve][LIKE]' + WHEN a RLIKE '^[[:ascii:]]+$' THEN '[Preserve][ASCII]' + ELSE '[Preserv][MB]' END +WHEN REPLACE(a,0x5C,'')=b THEN '[Trivial]' + WHEN UNESCAPE(a)=b THEN '[Regular]' + ELSE '[Special]' END; +END// +CREATE FUNCTION wellformedness(a VARBINARY(256), b VARBINARY(256)) +RETURNS VARBINARY(256) +BEGIN +RETURN CASE +WHEN b IS NULL THEN '' + WHEN NOT iswellformed(a) AND iswellformed(b) THEN '[FIXED]' + WHEN iswellformed(a) AND NOT iswellformed(b) THEN '[BROKE]' + WHEN NOT iswellformed(a) AND NOT iswellformed(b) THEN '[ILSEQ]' + ELSE '' + END; +END// +CREATE FUNCTION mysql_real_escape_string_generated(a VARBINARY(256)) +RETURNS VARBINARY(256) +BEGIN +DECLARE a1 BINARY(1) DEFAULT SUBSTR(a,1,1); +DECLARE a2 BINARY(1) DEFAULT SUBSTR(a,2,1); +DECLARE a3 BINARY(1) DEFAULT SUBSTR(a,3,1); +DECLARE a4 BINARY(1) DEFAULT SUBSTR(a,4,1); +DECLARE a2a4 BINARY(2) DEFAULT CONCAT(a2,a4); +RETURN CASE +WHEN (a1=0x5C) AND +(a3=0x5C) AND +(a2>0x7F) AND +(a4 NOT IN ('_','%','0','t','r','n','Z')) AND +iswellformed(a2a4) THEN '[USER]' + ELSE '' + END; +END// +CREATE TABLE t1 (a VARBINARY(10),b VARBINARY(10)); +CALL p2(); +SELECT HEX(a),HEX(b), +CONCAT(unescape_type(a,b), +wellformedness(a,b), +mysql_real_escape_string_generated(a), +IF(UNESCAPE(a)<>b,CONCAT('[BAD',HEX(UNESCAPE(a)),']'),'')) AS comment +FROM t1 ORDER BY LENGTH(a),a; +HEX(a) HEX(b) comment +00 00 [Preserve] +01 01 [Preserve] +02 02 [Preserve] +03 03 [Preserve] +04 04 [Preserve] +05 05 [Preserve] +06 06 [Preserve] +07 07 [Preserve] +08 08 [Preserve] +09 09 [Preserve] +0A 0A [Preserve] +0B 0B [Preserve] +0C 0C [Preserve] +0D 0D [Preserve] +0E 0E [Preserve] +0F 0F [Preserve] +10 10 [Preserve] +11 11 [Preserve] +12 12 [Preserve] +13 13 [Preserve] +14 14 [Preserve] +15 15 [Preserve] +16 16 [Preserve] +17 17 [Preserve] +18 18 [Preserve] +19 19 [Preserve] +1A 1A [Preserve] +1B 1B [Preserve] +1C 1C [Preserve] +1D 1D [Preserve] +1E 1E [Preserve] +1F 1F [Preserve] +20 20 [Preserve] +21 21 [Preserve] +22 22 [Preserve] +23 23 [Preserve] +24 24 [Preserve] +25 25 [Preserve] +26 26 [Preserve] +27 NULL [SyntErr] +28 28 [Preserve] +29 29 [Preserve] +2A 2A [Preserve] +2B 2B [Preserve] +2C 2C [Preserve] +2D 2D [Preserve] +2E 2E [Preserve] +2F 2F [Preserve] +30 30 [Preserve] +31 31 [Preserve] +32 32 [Preserve] +33 33 [Preserve] +34 34 [Preserve] +35 35 [Preserve] +36 36 [Preserve] +37 37 [Preserve] +38 38 [Preserve] +39 39 [Preserve] +3A 3A [Preserve] +3B 3B [Preserve] +3C 3C [Preserve] +3D 3D [Preserve] +3E 3E [Preserve] +3F 3F [Preserve] +40 40 [Preserve] +41 41 [Preserve] +42 42 [Preserve] +43 43 [Preserve] +44 44 [Preserve] +45 45 [Preserve] +46 46 [Preserve] +47 47 [Preserve] +48 48 [Preserve] +49 49 [Preserve] +4A 4A [Preserve] +4B 4B [Preserve] +4C 4C [Preserve] +4D 4D [Preserve] +4E 4E [Preserve] +4F 4F [Preserve] +50 50 [Preserve] +51 51 [Preserve] +52 52 [Preserve] +53 53 [Preserve] +54 54 [Preserve] +55 55 [Preserve] +56 56 [Preserve] +57 57 [Preserve] +58 58 [Preserve] +59 59 [Preserve] +5A 5A [Preserve] +5B 5B [Preserve] +5C NULL [SyntErr] +5D 5D [Preserve] +5E 5E [Preserve] +5F 5F [Preserve] +60 60 [Preserve] +61 61 [Preserve] +62 62 [Preserve] +63 63 [Preserve] +64 64 [Preserve] +65 65 [Preserve] +66 66 [Preserve] +67 67 [Preserve] +68 68 [Preserve] +69 69 [Preserve] +6A 6A [Preserve] +6B 6B [Preserve] +6C 6C [Preserve] +6D 6D [Preserve] +6E 6E [Preserve] +6F 6F [Preserve] +70 70 [Preserve] +71 71 [Preserve] +72 72 [Preserve] +73 73 [Preserve] +74 74 [Preserve] +75 75 [Preserve] +76 76 [Preserve] +77 77 [Preserve] +78 78 [Preserve] +79 79 [Preserve] +7A 7A [Preserve] +7B 7B [Preserve] +7C 7C [Preserve] +7D 7D [Preserve] +7E 7E [Preserve] +7F 7F [Preserve] +80 80 [Preserve][ILSEQ] +81 81 [Preserve][ILSEQ] +82 82 [Preserve][ILSEQ] +83 83 [Preserve][ILSEQ] +84 84 [Preserve][ILSEQ] +85 85 [Preserve][ILSEQ] +86 86 [Preserve][ILSEQ] +87 87 [Preserve][ILSEQ] +88 88 [Preserve][ILSEQ] +89 89 [Preserve][ILSEQ] +8A 8A [Preserve][ILSEQ] +8B 8B [Preserve][ILSEQ] +8C 8C [Preserve][ILSEQ] +8D 8D [Preserve][ILSEQ] +8E 8E [Preserve][ILSEQ] +8F 8F [Preserve][ILSEQ] +90 90 [Preserve][ILSEQ] +91 91 [Preserve][ILSEQ] +92 92 [Preserve][ILSEQ] +93 93 [Preserve][ILSEQ] +94 94 [Preserve][ILSEQ] +95 95 [Preserve][ILSEQ] +96 96 [Preserve][ILSEQ] +97 97 [Preserve][ILSEQ] +98 98 [Preserve][ILSEQ] +99 99 [Preserve][ILSEQ] +9A 9A [Preserve][ILSEQ] +9B 9B [Preserve][ILSEQ] +9C 9C [Preserve][ILSEQ] +9D 9D [Preserve][ILSEQ] +9E 9E [Preserve][ILSEQ] +9F 9F [Preserve][ILSEQ] +A0 A0 [Preserve][ILSEQ] +A1 A1 [Preserve][ILSEQ] +A2 A2 [Preserve][ILSEQ] +A3 A3 [Preserve][ILSEQ] +A4 A4 [Preserve][ILSEQ] +A5 A5 [Preserve][ILSEQ] +A6 A6 [Preserve][ILSEQ] +A7 A7 [Preserve][ILSEQ] +A8 A8 [Preserve][ILSEQ] +A9 A9 [Preserve][ILSEQ] +AA AA [Preserve][ILSEQ] +AB AB [Preserve][ILSEQ] +AC AC [Preserve][ILSEQ] +AD AD [Preserve][ILSEQ] +AE AE [Preserve][ILSEQ] +AF AF [Preserve][ILSEQ] +B0 B0 [Preserve][ILSEQ] +B1 B1 [Preserve][ILSEQ] +B2 B2 [Preserve][ILSEQ] +B3 B3 [Preserve][ILSEQ] +B4 B4 [Preserve][ILSEQ] +B5 B5 [Preserve][ILSEQ] +B6 B6 [Preserve][ILSEQ] +B7 B7 [Preserve][ILSEQ] +B8 B8 [Preserve][ILSEQ] +B9 B9 [Preserve][ILSEQ] +BA BA [Preserve][ILSEQ] +BB BB [Preserve][ILSEQ] +BC BC [Preserve][ILSEQ] +BD BD [Preserve][ILSEQ] +BE BE [Preserve][ILSEQ] +BF BF [Preserve][ILSEQ] +C0 C0 [Preserve][ILSEQ] +C1 C1 [Preserve][ILSEQ] +C2 C2 [Preserve][ILSEQ] +C3 C3 [Preserve][ILSEQ] +C4 C4 [Preserve][ILSEQ] +C5 C5 [Preserve][ILSEQ] +C6 C6 [Preserve][ILSEQ] +C7 C7 [Preserve][ILSEQ] +C8 C8 [Preserve][ILSEQ] +C9 C9 [Preserve][ILSEQ] +CA CA [Preserve][ILSEQ] +CB CB [Preserve][ILSEQ] +CC CC [Preserve][ILSEQ] +CD CD [Preserve][ILSEQ] +CE CE [Preserve][ILSEQ] +CF CF [Preserve][ILSEQ] +D0 D0 [Preserve][ILSEQ] +D1 D1 [Preserve][ILSEQ] +D2 D2 [Preserve][ILSEQ] +D3 D3 [Preserve][ILSEQ] +D4 D4 [Preserve][ILSEQ] +D5 D5 [Preserve][ILSEQ] +D6 D6 [Preserve][ILSEQ] +D7 D7 [Preserve][ILSEQ] +D8 D8 [Preserve][ILSEQ] +D9 D9 [Preserve][ILSEQ] +DA DA [Preserve][ILSEQ] +DB DB [Preserve][ILSEQ] +DC DC [Preserve][ILSEQ] +DD DD [Preserve][ILSEQ] +DE DE [Preserve][ILSEQ] +DF DF [Preserve][ILSEQ] +E0 E0 [Preserve][ILSEQ] +E1 E1 [Preserve][ILSEQ] +E2 E2 [Preserve][ILSEQ] +E3 E3 [Preserve][ILSEQ] +E4 E4 [Preserve][ILSEQ] +E5 E5 [Preserve][ILSEQ] +E6 E6 [Preserve][ILSEQ] +E7 E7 [Preserve][ILSEQ] +E8 E8 [Preserve][ILSEQ] +E9 E9 [Preserve][ILSEQ] +EA EA [Preserve][ILSEQ] +EB EB [Preserve][ILSEQ] +EC EC [Preserve][ILSEQ] +ED ED [Preserve][ILSEQ] +EE EE [Preserve][ILSEQ] +EF EF [Preserve][ILSEQ] +F0 F0 [Preserve][ILSEQ] +F1 F1 [Preserve][ILSEQ] +F2 F2 [Preserve][ILSEQ] +F3 F3 [Preserve][ILSEQ] +F4 F4 [Preserve][ILSEQ] +F5 F5 [Preserve][ILSEQ] +F6 F6 [Preserve][ILSEQ] +F7 F7 [Preserve][ILSEQ] +F8 F8 [Preserve][ILSEQ] +F9 F9 [Preserve][ILSEQ] +FA FA [Preserve][ILSEQ] +FB FB [Preserve][ILSEQ] +FC FC [Preserve][ILSEQ] +FD FD [Preserve][ILSEQ] +FE FE [Preserve][ILSEQ] +FF FF [Preserve][ILSEQ] +0022 0022 [Preserve][ASCII] +0027 NULL [SyntErr] +005C NULL [SyntErr] +0822 0822 [Preserve][ASCII] +0827 NULL [SyntErr] +085C NULL [SyntErr] +0922 0922 [Preserve][ASCII] +0927 NULL [SyntErr] +095C NULL [SyntErr] +0A22 0A22 [Preserve][ASCII] +0A27 NULL [SyntErr] +0A5C NULL [SyntErr] +0D22 0D22 [Preserve][ASCII] +0D27 NULL [SyntErr] +0D5C NULL [SyntErr] +1A22 1A22 [Preserve][ASCII] +1A27 NULL [SyntErr] +1A5C NULL [SyntErr] +2200 2200 [Preserve][ASCII] +2208 2208 [Preserve][ASCII] +2209 2209 [Preserve][ASCII] +220A 220A [Preserve][ASCII] +220D 220D [Preserve][ASCII] +221A 221A [Preserve][ASCII] +2222 2222 [Preserve][ASCII] +2225 2225 [Preserve][ASCII] +2227 NULL [SyntErr] +2230 2230 [Preserve][ASCII] +223F 223F [Preserve][ASCII] +2240 2240 [Preserve][ASCII] +225A 225A [Preserve][ASCII] +225C NULL [SyntErr] +225F 225F [Preserve][ASCII] +2261 2261 [Preserve][ASCII] +2262 2262 [Preserve][ASCII] +226E 226E [Preserve][ASCII] +2272 2272 [Preserve][ASCII] +2274 2274 [Preserve][ASCII] +227E 227E [Preserve][ASCII] +227F 227F [Preserve][ASCII] +2280 2280 [Preserv][MB][ILSEQ] +2281 2281 [Preserv][MB][ILSEQ] +229F 229F [Preserv][MB][ILSEQ] +22A0 22A0 [Preserv][MB][ILSEQ] +22A1 22A1 [Preserv][MB][ILSEQ] +22E0 22E0 [Preserv][MB][ILSEQ] +22EF 22EF [Preserv][MB][ILSEQ] +22F9 22F9 [Preserv][MB][ILSEQ] +22FA 22FA [Preserv][MB][ILSEQ] +22FC 22FC [Preserv][MB][ILSEQ] +22FD 22FD [Preserv][MB][ILSEQ] +22FE 22FE [Preserv][MB][ILSEQ] +22FF 22FF [Preserv][MB][ILSEQ] +2522 2522 [Preserve][ASCII] +2527 NULL [SyntErr] +255C NULL [SyntErr] +2700 NULL [SyntErr] +2708 NULL [SyntErr] +2709 NULL [SyntErr] +270A NULL [SyntErr] +270D NULL [SyntErr] +271A NULL [SyntErr] +2722 NULL [SyntErr] +2725 NULL [SyntErr] +2727 27 [Regular] +2730 NULL [SyntErr] +273F NULL [SyntErr] +2740 NULL [SyntErr] +275A NULL [SyntErr] +275C NULL [SyntErr] +275F NULL [SyntErr] +2761 NULL [SyntErr] +2762 NULL [SyntErr] +276E NULL [SyntErr] +2772 NULL [SyntErr] +2774 NULL [SyntErr] +277E NULL [SyntErr] +277F NULL [SyntErr] +2780 NULL [SyntErr] +2781 NULL [SyntErr] +279F NULL [SyntErr] +27A0 NULL [SyntErr] +27A1 NULL [SyntErr] +27E0 NULL [SyntErr] +27EF NULL [SyntErr] +27F9 NULL [SyntErr] +27FA NULL [SyntErr] +27FC NULL [SyntErr] +27FD NULL [SyntErr] +27FE NULL [SyntErr] +27FF NULL [SyntErr] +3022 3022 [Preserve][ASCII] +3027 NULL [SyntErr] +305C NULL [SyntErr] +3F22 3F22 [Preserve][ASCII] +3F27 NULL [SyntErr] +3F5C NULL [SyntErr] +4022 4022 [Preserve][ASCII] +4027 NULL [SyntErr] +405C NULL [SyntErr] +5A22 5A22 [Preserve][ASCII] +5A27 NULL [SyntErr] +5A5C NULL [SyntErr] +5C00 00 [Trivial] +5C08 08 [Trivial] +5C09 09 [Trivial] +5C0A 0A [Trivial] +5C0D 0D [Trivial] +5C1A 1A [Trivial] +5C22 22 [Trivial] +5C25 5C25 [Preserve][LIKE] +5C27 27 [Trivial] +5C30 00 [Regular] +5C3F 3F [Trivial] +5C40 40 [Trivial] +5C5A 1A [Regular] +5C5C 5C [Regular] +5C5F 5C5F [Preserve][LIKE] +5C61 61 [Trivial] +5C62 08 [Regular] +5C6E 0A [Regular] +5C72 0D [Regular] +5C74 09 [Regular] +5C7E 7E [Trivial] +5C7F 7F [Trivial] +5C80 80 [Trivial][ILSEQ] +5C81 81 [Trivial][ILSEQ] +5C9F 9F [Trivial][ILSEQ] +5CA0 A0 [Trivial][ILSEQ] +5CA1 A1 [Trivial][ILSEQ] +5CE0 E0 [Trivial][ILSEQ] +5CEF EF [Trivial][ILSEQ] +5CF9 F9 [Trivial][ILSEQ] +5CFA FA [Trivial][ILSEQ] +5CFC FC [Trivial][ILSEQ] +5CFD FD [Trivial][ILSEQ] +5CFE FE [Trivial][ILSEQ] +5CFF FF [Trivial][ILSEQ] +5F22 5F22 [Preserve][ASCII] +5F27 NULL [SyntErr] +5F5C NULL [SyntErr] +6122 6122 [Preserve][ASCII] +6127 NULL [SyntErr] +615C NULL [SyntErr] +6222 6222 [Preserve][ASCII] +6227 NULL [SyntErr] +625C NULL [SyntErr] +6E22 6E22 [Preserve][ASCII] +6E27 NULL [SyntErr] +6E5C NULL [SyntErr] +7222 7222 [Preserve][ASCII] +7227 NULL [SyntErr] +725C NULL [SyntErr] +7422 7422 [Preserve][ASCII] +7427 NULL [SyntErr] +745C NULL [SyntErr] +7E22 7E22 [Preserve][ASCII] +7E27 NULL [SyntErr] +7E5C NULL [SyntErr] +7F22 7F22 [Preserve][ASCII] +7F27 NULL [SyntErr] +7F5C NULL [SyntErr] +8022 8022 [Preserv][MB][ILSEQ] +8027 NULL [SyntErr] +805C NULL [SyntErr] +8122 8122 [Preserv][MB][ILSEQ] +8127 NULL [SyntErr] +815C NULL [SyntErr] +9F22 9F22 [Preserv][MB][ILSEQ] +9F27 NULL [SyntErr] +9F5C NULL [SyntErr] +A022 A022 [Preserv][MB][ILSEQ] +A027 NULL [SyntErr] +A05C NULL [SyntErr] +A122 A122 [Preserv][MB][ILSEQ] +A127 NULL [SyntErr] +A15C A15C [Preserv][MB] +E022 E022 [Preserv][MB][ILSEQ] +E027 NULL [SyntErr] +E05C E05C [Preserv][MB] +EF22 EF22 [Preserv][MB][ILSEQ] +EF27 NULL [SyntErr] +EF5C EF5C [Preserv][MB] +F922 F922 [Preserv][MB][ILSEQ] +F927 NULL [SyntErr] +F95C F95C [Preserv][MB] +FA22 FA22 [Preserv][MB][ILSEQ] +FA27 NULL [SyntErr] +FA5C NULL [SyntErr] +FC22 FC22 [Preserv][MB][ILSEQ] +FC27 NULL [SyntErr] +FC5C NULL [SyntErr] +FD22 FD22 [Preserv][MB][ILSEQ] +FD27 NULL [SyntErr] +FD5C NULL [SyntErr] +FE22 FE22 [Preserv][MB][ILSEQ] +FE27 NULL [SyntErr] +FE5C NULL [SyntErr] +FF22 FF22 [Preserv][MB][ILSEQ] +FF27 NULL [SyntErr] +FF5C NULL [SyntErr] +5C0000 0000 [Trivial] +5C0008 0008 [Trivial] +5C0009 0009 [Trivial] +5C000A 000A [Trivial] +5C000D 000D [Trivial] +5C001A 001A [Trivial] +5C0022 0022 [Trivial] +5C0025 0025 [Trivial] +5C0027 NULL [SyntErr] +5C0030 0030 [Trivial] +5C003F 003F [Trivial] +5C0040 0040 [Trivial] +5C005A 005A [Trivial] +5C005C NULL [SyntErr] +5C005F 005F [Trivial] +5C0061 0061 [Trivial] +5C0062 0062 [Trivial] +5C006E 006E [Trivial] +5C0072 0072 [Trivial] +5C0074 0074 [Trivial] +5C007E 007E [Trivial] +5C007F 007F [Trivial] +5C0080 0080 [Trivial][ILSEQ] +5C0081 0081 [Trivial][ILSEQ] +5C009F 009F [Trivial][ILSEQ] +5C00A0 00A0 [Trivial][ILSEQ] +5C00A1 00A1 [Trivial][ILSEQ] +5C00E0 00E0 [Trivial][ILSEQ] +5C00EF 00EF [Trivial][ILSEQ] +5C00F9 00F9 [Trivial][ILSEQ] +5C00FA 00FA [Trivial][ILSEQ] +5C00FC 00FC [Trivial][ILSEQ] +5C00FD 00FD [Trivial][ILSEQ] +5C00FE 00FE [Trivial][ILSEQ] +5C00FF 00FF [Trivial][ILSEQ] +5C0800 0800 [Trivial] +5C0808 0808 [Trivial] +5C0809 0809 [Trivial] +5C080A 080A [Trivial] +5C080D 080D [Trivial] +5C081A 081A [Trivial] +5C0822 0822 [Trivial] +5C0825 0825 [Trivial] +5C0827 NULL [SyntErr] +5C0830 0830 [Trivial] +5C083F 083F [Trivial] +5C0840 0840 [Trivial] +5C085A 085A [Trivial] +5C085C NULL [SyntErr] +5C085F 085F [Trivial] +5C0861 0861 [Trivial] +5C0862 0862 [Trivial] +5C086E 086E [Trivial] +5C0872 0872 [Trivial] +5C0874 0874 [Trivial] +5C087E 087E [Trivial] +5C087F 087F [Trivial] +5C0880 0880 [Trivial][ILSEQ] +5C0881 0881 [Trivial][ILSEQ] +5C089F 089F [Trivial][ILSEQ] +5C08A0 08A0 [Trivial][ILSEQ] +5C08A1 08A1 [Trivial][ILSEQ] +5C08E0 08E0 [Trivial][ILSEQ] +5C08EF 08EF [Trivial][ILSEQ] +5C08F9 08F9 [Trivial][ILSEQ] +5C08FA 08FA [Trivial][ILSEQ] +5C08FC 08FC [Trivial][ILSEQ] +5C08FD 08FD [Trivial][ILSEQ] +5C08FE 08FE [Trivial][ILSEQ] +5C08FF 08FF [Trivial][ILSEQ] +5C0900 0900 [Trivial] +5C0908 0908 [Trivial] +5C0909 0909 [Trivial] +5C090A 090A [Trivial] +5C090D 090D [Trivial] +5C091A 091A [Trivial] +5C0922 0922 [Trivial] +5C0925 0925 [Trivial] +5C0927 NULL [SyntErr] +5C0930 0930 [Trivial] +5C093F 093F [Trivial] +5C0940 0940 [Trivial] +5C095A 095A [Trivial] +5C095C NULL [SyntErr] +5C095F 095F [Trivial] +5C0961 0961 [Trivial] +5C0962 0962 [Trivial] +5C096E 096E [Trivial] +5C0972 0972 [Trivial] +5C0974 0974 [Trivial] +5C097E 097E [Trivial] +5C097F 097F [Trivial] +5C0980 0980 [Trivial][ILSEQ] +5C0981 0981 [Trivial][ILSEQ] +5C099F 099F [Trivial][ILSEQ] +5C09A0 09A0 [Trivial][ILSEQ] +5C09A1 09A1 [Trivial][ILSEQ] +5C09E0 09E0 [Trivial][ILSEQ] +5C09EF 09EF [Trivial][ILSEQ] +5C09F9 09F9 [Trivial][ILSEQ] +5C09FA 09FA [Trivial][ILSEQ] +5C09FC 09FC [Trivial][ILSEQ] +5C09FD 09FD [Trivial][ILSEQ] +5C09FE 09FE [Trivial][ILSEQ] +5C09FF 09FF [Trivial][ILSEQ] +5C0A00 0A00 [Trivial] +5C0A08 0A08 [Trivial] +5C0A09 0A09 [Trivial] +5C0A0A 0A0A [Trivial] +5C0A0D 0A0D [Trivial] +5C0A1A 0A1A [Trivial] +5C0A22 0A22 [Trivial] +5C0A25 0A25 [Trivial] +5C0A27 NULL [SyntErr] +5C0A30 0A30 [Trivial] +5C0A3F 0A3F [Trivial] +5C0A40 0A40 [Trivial] +5C0A5A 0A5A [Trivial] +5C0A5C NULL [SyntErr] +5C0A5F 0A5F [Trivial] +5C0A61 0A61 [Trivial] +5C0A62 0A62 [Trivial] +5C0A6E 0A6E [Trivial] +5C0A72 0A72 [Trivial] +5C0A74 0A74 [Trivial] +5C0A7E 0A7E [Trivial] +5C0A7F 0A7F [Trivial] +5C0A80 0A80 [Trivial][ILSEQ] +5C0A81 0A81 [Trivial][ILSEQ] +5C0A9F 0A9F [Trivial][ILSEQ] +5C0AA0 0AA0 [Trivial][ILSEQ] +5C0AA1 0AA1 [Trivial][ILSEQ] +5C0AE0 0AE0 [Trivial][ILSEQ] +5C0AEF 0AEF [Trivial][ILSEQ] +5C0AF9 0AF9 [Trivial][ILSEQ] +5C0AFA 0AFA [Trivial][ILSEQ] +5C0AFC 0AFC [Trivial][ILSEQ] +5C0AFD 0AFD [Trivial][ILSEQ] +5C0AFE 0AFE [Trivial][ILSEQ] +5C0AFF 0AFF [Trivial][ILSEQ] +5C0D00 0D00 [Trivial] +5C0D08 0D08 [Trivial] +5C0D09 0D09 [Trivial] +5C0D0A 0D0A [Trivial] +5C0D0D 0D0D [Trivial] +5C0D1A 0D1A [Trivial] +5C0D22 0D22 [Trivial] +5C0D25 0D25 [Trivial] +5C0D27 NULL [SyntErr] +5C0D30 0D30 [Trivial] +5C0D3F 0D3F [Trivial] +5C0D40 0D40 [Trivial] +5C0D5A 0D5A [Trivial] +5C0D5C NULL [SyntErr] +5C0D5F 0D5F [Trivial] +5C0D61 0D61 [Trivial] +5C0D62 0D62 [Trivial] +5C0D6E 0D6E [Trivial] +5C0D72 0D72 [Trivial] +5C0D74 0D74 [Trivial] +5C0D7E 0D7E [Trivial] +5C0D7F 0D7F [Trivial] +5C0D80 0D80 [Trivial][ILSEQ] +5C0D81 0D81 [Trivial][ILSEQ] +5C0D9F 0D9F [Trivial][ILSEQ] +5C0DA0 0DA0 [Trivial][ILSEQ] +5C0DA1 0DA1 [Trivial][ILSEQ] +5C0DE0 0DE0 [Trivial][ILSEQ] +5C0DEF 0DEF [Trivial][ILSEQ] +5C0DF9 0DF9 [Trivial][ILSEQ] +5C0DFA 0DFA [Trivial][ILSEQ] +5C0DFC 0DFC [Trivial][ILSEQ] +5C0DFD 0DFD [Trivial][ILSEQ] +5C0DFE 0DFE [Trivial][ILSEQ] +5C0DFF 0DFF [Trivial][ILSEQ] +5C1A00 1A00 [Trivial] +5C1A08 1A08 [Trivial] +5C1A09 1A09 [Trivial] +5C1A0A 1A0A [Trivial] +5C1A0D 1A0D [Trivial] +5C1A1A 1A1A [Trivial] +5C1A22 1A22 [Trivial] +5C1A25 1A25 [Trivial] +5C1A27 NULL [SyntErr] +5C1A30 1A30 [Trivial] +5C1A3F 1A3F [Trivial] +5C1A40 1A40 [Trivial] +5C1A5A 1A5A [Trivial] +5C1A5C NULL [SyntErr] +5C1A5F 1A5F [Trivial] +5C1A61 1A61 [Trivial] +5C1A62 1A62 [Trivial] +5C1A6E 1A6E [Trivial] +5C1A72 1A72 [Trivial] +5C1A74 1A74 [Trivial] +5C1A7E 1A7E [Trivial] +5C1A7F 1A7F [Trivial] +5C1A80 1A80 [Trivial][ILSEQ] +5C1A81 1A81 [Trivial][ILSEQ] +5C1A9F 1A9F [Trivial][ILSEQ] +5C1AA0 1AA0 [Trivial][ILSEQ] +5C1AA1 1AA1 [Trivial][ILSEQ] +5C1AE0 1AE0 [Trivial][ILSEQ] +5C1AEF 1AEF [Trivial][ILSEQ] +5C1AF9 1AF9 [Trivial][ILSEQ] +5C1AFA 1AFA [Trivial][ILSEQ] +5C1AFC 1AFC [Trivial][ILSEQ] +5C1AFD 1AFD [Trivial][ILSEQ] +5C1AFE 1AFE [Trivial][ILSEQ] +5C1AFF 1AFF [Trivial][ILSEQ] +5C2200 2200 [Trivial] +5C2208 2208 [Trivial] +5C2209 2209 [Trivial] +5C220A 220A [Trivial] +5C220D 220D [Trivial] +5C221A 221A [Trivial] +5C2222 2222 [Trivial] +5C2225 2225 [Trivial] +5C2227 NULL [SyntErr] +5C2230 2230 [Trivial] +5C223F 223F [Trivial] +5C2240 2240 [Trivial] +5C225A 225A [Trivial] +5C225C NULL [SyntErr] +5C225F 225F [Trivial] +5C2261 2261 [Trivial] +5C2262 2262 [Trivial] +5C226E 226E [Trivial] +5C2272 2272 [Trivial] +5C2274 2274 [Trivial] +5C227E 227E [Trivial] +5C227F 227F [Trivial] +5C2280 2280 [Trivial][ILSEQ] +5C2281 2281 [Trivial][ILSEQ] +5C229F 229F [Trivial][ILSEQ] +5C22A0 22A0 [Trivial][ILSEQ] +5C22A1 22A1 [Trivial][ILSEQ] +5C22E0 22E0 [Trivial][ILSEQ] +5C22EF 22EF [Trivial][ILSEQ] +5C22F9 22F9 [Trivial][ILSEQ] +5C22FA 22FA [Trivial][ILSEQ] +5C22FC 22FC [Trivial][ILSEQ] +5C22FD 22FD [Trivial][ILSEQ] +5C22FE 22FE [Trivial][ILSEQ] +5C22FF 22FF [Trivial][ILSEQ] +5C2500 5C2500 [Preserve][LIKE] +5C2508 5C2508 [Preserve][LIKE] +5C2509 5C2509 [Preserve][LIKE] +5C250A 5C250A [Preserve][LIKE] +5C250D 5C250D [Preserve][LIKE] +5C251A 5C251A [Preserve][LIKE] +5C2522 5C2522 [Preserve][LIKE] +5C2525 5C2525 [Preserve][LIKE] +5C2527 NULL [SyntErr] +5C2530 5C2530 [Preserve][LIKE] +5C253F 5C253F [Preserve][LIKE] +5C2540 5C2540 [Preserve][LIKE] +5C255A 5C255A [Preserve][LIKE] +5C255C NULL [SyntErr] +5C255F 5C255F [Preserve][LIKE] +5C2561 5C2561 [Preserve][LIKE] +5C2562 5C2562 [Preserve][LIKE] +5C256E 5C256E [Preserve][LIKE] +5C2572 5C2572 [Preserve][LIKE] +5C2574 5C2574 [Preserve][LIKE] +5C257E 5C257E [Preserve][LIKE] +5C257F 5C257F [Preserve][LIKE] +5C2580 5C2580 [Preserve][LIKE][ILSEQ] +5C2581 5C2581 [Preserve][LIKE][ILSEQ] +5C259F 5C259F [Preserve][LIKE][ILSEQ] +5C25A0 5C25A0 [Preserve][LIKE][ILSEQ] +5C25A1 5C25A1 [Preserve][LIKE][ILSEQ] +5C25E0 5C25E0 [Preserve][LIKE][ILSEQ] +5C25EF 5C25EF [Preserve][LIKE][ILSEQ] +5C25F9 5C25F9 [Preserve][LIKE][ILSEQ] +5C25FA 5C25FA [Preserve][LIKE][ILSEQ] +5C25FC 5C25FC [Preserve][LIKE][ILSEQ] +5C25FD 5C25FD [Preserve][LIKE][ILSEQ] +5C25FE 5C25FE [Preserve][LIKE][ILSEQ] +5C25FF 5C25FF [Preserve][LIKE][ILSEQ] +5C2700 2700 [Trivial] +5C2708 2708 [Trivial] +5C2709 2709 [Trivial] +5C270A 270A [Trivial] +5C270D 270D [Trivial] +5C271A 271A [Trivial] +5C2722 2722 [Trivial] +5C2725 2725 [Trivial] +5C2727 NULL [SyntErr] +5C2730 2730 [Trivial] +5C273F 273F [Trivial] +5C2740 2740 [Trivial] +5C275A 275A [Trivial] +5C275C NULL [SyntErr] +5C275F 275F [Trivial] +5C2761 2761 [Trivial] +5C2762 2762 [Trivial] +5C276E 276E [Trivial] +5C2772 2772 [Trivial] +5C2774 2774 [Trivial] +5C277E 277E [Trivial] +5C277F 277F [Trivial] +5C2780 2780 [Trivial][ILSEQ] +5C2781 2781 [Trivial][ILSEQ] +5C279F 279F [Trivial][ILSEQ] +5C27A0 27A0 [Trivial][ILSEQ] +5C27A1 27A1 [Trivial][ILSEQ] +5C27E0 27E0 [Trivial][ILSEQ] +5C27EF 27EF [Trivial][ILSEQ] +5C27F9 27F9 [Trivial][ILSEQ] +5C27FA 27FA [Trivial][ILSEQ] +5C27FC 27FC [Trivial][ILSEQ] +5C27FD 27FD [Trivial][ILSEQ] +5C27FE 27FE [Trivial][ILSEQ] +5C27FF 27FF [Trivial][ILSEQ] +5C3000 0000 [Regular] +5C3008 0008 [Regular] +5C3009 0009 [Regular] +5C300A 000A [Regular] +5C300D 000D [Regular] +5C301A 001A [Regular] +5C3022 0022 [Regular] +5C3025 0025 [Regular] +5C3027 NULL [SyntErr] +5C3030 0030 [Regular] +5C303F 003F [Regular] +5C3040 0040 [Regular] +5C305A 005A [Regular] +5C305C NULL [SyntErr] +5C305F 005F [Regular] +5C3061 0061 [Regular] +5C3062 0062 [Regular] +5C306E 006E [Regular] +5C3072 0072 [Regular] +5C3074 0074 [Regular] +5C307E 007E [Regular] +5C307F 007F [Regular] +5C3080 0080 [Regular][ILSEQ] +5C3081 0081 [Regular][ILSEQ] +5C309F 009F [Regular][ILSEQ] +5C30A0 00A0 [Regular][ILSEQ] +5C30A1 00A1 [Regular][ILSEQ] +5C30E0 00E0 [Regular][ILSEQ] +5C30EF 00EF [Regular][ILSEQ] +5C30F9 00F9 [Regular][ILSEQ] +5C30FA 00FA [Regular][ILSEQ] +5C30FC 00FC [Regular][ILSEQ] +5C30FD 00FD [Regular][ILSEQ] +5C30FE 00FE [Regular][ILSEQ] +5C30FF 00FF [Regular][ILSEQ] +5C3F00 3F00 [Trivial] +5C3F08 3F08 [Trivial] +5C3F09 3F09 [Trivial] +5C3F0A 3F0A [Trivial] +5C3F0D 3F0D [Trivial] +5C3F1A 3F1A [Trivial] +5C3F22 3F22 [Trivial] +5C3F25 3F25 [Trivial] +5C3F27 NULL [SyntErr] +5C3F30 3F30 [Trivial] +5C3F3F 3F3F [Trivial] +5C3F40 3F40 [Trivial] +5C3F5A 3F5A [Trivial] +5C3F5C NULL [SyntErr] +5C3F5F 3F5F [Trivial] +5C3F61 3F61 [Trivial] +5C3F62 3F62 [Trivial] +5C3F6E 3F6E [Trivial] +5C3F72 3F72 [Trivial] +5C3F74 3F74 [Trivial] +5C3F7E 3F7E [Trivial] +5C3F7F 3F7F [Trivial] +5C3F80 3F80 [Trivial][ILSEQ] +5C3F81 3F81 [Trivial][ILSEQ] +5C3F9F 3F9F [Trivial][ILSEQ] +5C3FA0 3FA0 [Trivial][ILSEQ] +5C3FA1 3FA1 [Trivial][ILSEQ] +5C3FE0 3FE0 [Trivial][ILSEQ] +5C3FEF 3FEF [Trivial][ILSEQ] +5C3FF9 3FF9 [Trivial][ILSEQ] +5C3FFA 3FFA [Trivial][ILSEQ] +5C3FFC 3FFC [Trivial][ILSEQ] +5C3FFD 3FFD [Trivial][ILSEQ] +5C3FFE 3FFE [Trivial][ILSEQ] +5C3FFF 3FFF [Trivial][ILSEQ] +5C4000 4000 [Trivial] +5C4008 4008 [Trivial] +5C4009 4009 [Trivial] +5C400A 400A [Trivial] +5C400D 400D [Trivial] +5C401A 401A [Trivial] +5C4022 4022 [Trivial] +5C4025 4025 [Trivial] +5C4027 NULL [SyntErr] +5C4030 4030 [Trivial] +5C403F 403F [Trivial] +5C4040 4040 [Trivial] +5C405A 405A [Trivial] +5C405C NULL [SyntErr] +5C405F 405F [Trivial] +5C4061 4061 [Trivial] +5C4062 4062 [Trivial] +5C406E 406E [Trivial] +5C4072 4072 [Trivial] +5C4074 4074 [Trivial] +5C407E 407E [Trivial] +5C407F 407F [Trivial] +5C4080 4080 [Trivial][ILSEQ] +5C4081 4081 [Trivial][ILSEQ] +5C409F 409F [Trivial][ILSEQ] +5C40A0 40A0 [Trivial][ILSEQ] +5C40A1 40A1 [Trivial][ILSEQ] +5C40E0 40E0 [Trivial][ILSEQ] +5C40EF 40EF [Trivial][ILSEQ] +5C40F9 40F9 [Trivial][ILSEQ] +5C40FA 40FA [Trivial][ILSEQ] +5C40FC 40FC [Trivial][ILSEQ] +5C40FD 40FD [Trivial][ILSEQ] +5C40FE 40FE [Trivial][ILSEQ] +5C40FF 40FF [Trivial][ILSEQ] +5C5A00 1A00 [Regular] +5C5A08 1A08 [Regular] +5C5A09 1A09 [Regular] +5C5A0A 1A0A [Regular] +5C5A0D 1A0D [Regular] +5C5A1A 1A1A [Regular] +5C5A22 1A22 [Regular] +5C5A25 1A25 [Regular] +5C5A27 NULL [SyntErr] +5C5A30 1A30 [Regular] +5C5A3F 1A3F [Regular] +5C5A40 1A40 [Regular] +5C5A5A 1A5A [Regular] +5C5A5C NULL [SyntErr] +5C5A5F 1A5F [Regular] +5C5A61 1A61 [Regular] +5C5A62 1A62 [Regular] +5C5A6E 1A6E [Regular] +5C5A72 1A72 [Regular] +5C5A74 1A74 [Regular] +5C5A7E 1A7E [Regular] +5C5A7F 1A7F [Regular] +5C5A80 1A80 [Regular][ILSEQ] +5C5A81 1A81 [Regular][ILSEQ] +5C5A9F 1A9F [Regular][ILSEQ] +5C5AA0 1AA0 [Regular][ILSEQ] +5C5AA1 1AA1 [Regular][ILSEQ] +5C5AE0 1AE0 [Regular][ILSEQ] +5C5AEF 1AEF [Regular][ILSEQ] +5C5AF9 1AF9 [Regular][ILSEQ] +5C5AFA 1AFA [Regular][ILSEQ] +5C5AFC 1AFC [Regular][ILSEQ] +5C5AFD 1AFD [Regular][ILSEQ] +5C5AFE 1AFE [Regular][ILSEQ] +5C5AFF 1AFF [Regular][ILSEQ] +5C5C00 5C00 [Regular] +5C5C08 5C08 [Regular] +5C5C09 5C09 [Regular] +5C5C0A 5C0A [Regular] +5C5C0D 5C0D [Regular] +5C5C1A 5C1A [Regular] +5C5C22 5C22 [Regular] +5C5C25 5C25 [Regular] +5C5C27 NULL [SyntErr] +5C5C30 5C30 [Regular] +5C5C3F 5C3F [Regular] +5C5C40 5C40 [Regular] +5C5C5A 5C5A [Regular] +5C5C5C NULL [SyntErr] +5C5C5F 5C5F [Regular] +5C5C61 5C61 [Regular] +5C5C62 5C62 [Regular] +5C5C6E 5C6E [Regular] +5C5C72 5C72 [Regular] +5C5C74 5C74 [Regular] +5C5C7E 5C7E [Regular] +5C5C7F 5C7F [Regular] +5C5C80 5C80 [Regular][ILSEQ] +5C5C81 5C81 [Regular][ILSEQ] +5C5C9F 5C9F [Regular][ILSEQ] +5C5CA0 5CA0 [Regular][ILSEQ] +5C5CA1 5CA1 [Regular][ILSEQ] +5C5CE0 5CE0 [Regular][ILSEQ] +5C5CEF 5CEF [Regular][ILSEQ] +5C5CF9 5CF9 [Regular][ILSEQ] +5C5CFA 5CFA [Regular][ILSEQ] +5C5CFC 5CFC [Regular][ILSEQ] +5C5CFD 5CFD [Regular][ILSEQ] +5C5CFE 5CFE [Regular][ILSEQ] +5C5CFF 5CFF [Regular][ILSEQ] +5C5F00 5C5F00 [Preserve][LIKE] +5C5F08 5C5F08 [Preserve][LIKE] +5C5F09 5C5F09 [Preserve][LIKE] +5C5F0A 5C5F0A [Preserve][LIKE] +5C5F0D 5C5F0D [Preserve][LIKE] +5C5F1A 5C5F1A [Preserve][LIKE] +5C5F22 5C5F22 [Preserve][LIKE] +5C5F25 5C5F25 [Preserve][LIKE] +5C5F27 NULL [SyntErr] +5C5F30 5C5F30 [Preserve][LIKE] +5C5F3F 5C5F3F [Preserve][LIKE] +5C5F40 5C5F40 [Preserve][LIKE] +5C5F5A 5C5F5A [Preserve][LIKE] +5C5F5C NULL [SyntErr] +5C5F5F 5C5F5F [Preserve][LIKE] +5C5F61 5C5F61 [Preserve][LIKE] +5C5F62 5C5F62 [Preserve][LIKE] +5C5F6E 5C5F6E [Preserve][LIKE] +5C5F72 5C5F72 [Preserve][LIKE] +5C5F74 5C5F74 [Preserve][LIKE] +5C5F7E 5C5F7E [Preserve][LIKE] +5C5F7F 5C5F7F [Preserve][LIKE] +5C5F80 5C5F80 [Preserve][LIKE][ILSEQ] +5C5F81 5C5F81 [Preserve][LIKE][ILSEQ] +5C5F9F 5C5F9F [Preserve][LIKE][ILSEQ] +5C5FA0 5C5FA0 [Preserve][LIKE][ILSEQ] +5C5FA1 5C5FA1 [Preserve][LIKE][ILSEQ] +5C5FE0 5C5FE0 [Preserve][LIKE][ILSEQ] +5C5FEF 5C5FEF [Preserve][LIKE][ILSEQ] +5C5FF9 5C5FF9 [Preserve][LIKE][ILSEQ] +5C5FFA 5C5FFA [Preserve][LIKE][ILSEQ] +5C5FFC 5C5FFC [Preserve][LIKE][ILSEQ] +5C5FFD 5C5FFD [Preserve][LIKE][ILSEQ] +5C5FFE 5C5FFE [Preserve][LIKE][ILSEQ] +5C5FFF 5C5FFF [Preserve][LIKE][ILSEQ] +5C6100 6100 [Trivial] +5C6108 6108 [Trivial] +5C6109 6109 [Trivial] +5C610A 610A [Trivial] +5C610D 610D [Trivial] +5C611A 611A [Trivial] +5C6122 6122 [Trivial] +5C6125 6125 [Trivial] +5C6127 NULL [SyntErr] +5C6130 6130 [Trivial] +5C613F 613F [Trivial] +5C6140 6140 [Trivial] +5C615A 615A [Trivial] +5C615C NULL [SyntErr] +5C615F 615F [Trivial] +5C6161 6161 [Trivial] +5C6162 6162 [Trivial] +5C616E 616E [Trivial] +5C6172 6172 [Trivial] +5C6174 6174 [Trivial] +5C617E 617E [Trivial] +5C617F 617F [Trivial] +5C6180 6180 [Trivial][ILSEQ] +5C6181 6181 [Trivial][ILSEQ] +5C619F 619F [Trivial][ILSEQ] +5C61A0 61A0 [Trivial][ILSEQ] +5C61A1 61A1 [Trivial][ILSEQ] +5C61E0 61E0 [Trivial][ILSEQ] +5C61EF 61EF [Trivial][ILSEQ] +5C61F9 61F9 [Trivial][ILSEQ] +5C61FA 61FA [Trivial][ILSEQ] +5C61FC 61FC [Trivial][ILSEQ] +5C61FD 61FD [Trivial][ILSEQ] +5C61FE 61FE [Trivial][ILSEQ] +5C61FF 61FF [Trivial][ILSEQ] +5C6200 0800 [Regular] +5C6208 0808 [Regular] +5C6209 0809 [Regular] +5C620A 080A [Regular] +5C620D 080D [Regular] +5C621A 081A [Regular] +5C6222 0822 [Regular] +5C6225 0825 [Regular] +5C6227 NULL [SyntErr] +5C6230 0830 [Regular] +5C623F 083F [Regular] +5C6240 0840 [Regular] +5C625A 085A [Regular] +5C625C NULL [SyntErr] +5C625F 085F [Regular] +5C6261 0861 [Regular] +5C6262 0862 [Regular] +5C626E 086E [Regular] +5C6272 0872 [Regular] +5C6274 0874 [Regular] +5C627E 087E [Regular] +5C627F 087F [Regular] +5C6280 0880 [Regular][ILSEQ] +5C6281 0881 [Regular][ILSEQ] +5C629F 089F [Regular][ILSEQ] +5C62A0 08A0 [Regular][ILSEQ] +5C62A1 08A1 [Regular][ILSEQ] +5C62E0 08E0 [Regular][ILSEQ] +5C62EF 08EF [Regular][ILSEQ] +5C62F9 08F9 [Regular][ILSEQ] +5C62FA 08FA [Regular][ILSEQ] +5C62FC 08FC [Regular][ILSEQ] +5C62FD 08FD [Regular][ILSEQ] +5C62FE 08FE [Regular][ILSEQ] +5C62FF 08FF [Regular][ILSEQ] +5C6E00 0A00 [Regular] +5C6E08 0A08 [Regular] +5C6E09 0A09 [Regular] +5C6E0A 0A0A [Regular] +5C6E0D 0A0D [Regular] +5C6E1A 0A1A [Regular] +5C6E22 0A22 [Regular] +5C6E25 0A25 [Regular] +5C6E27 NULL [SyntErr] +5C6E30 0A30 [Regular] +5C6E3F 0A3F [Regular] +5C6E40 0A40 [Regular] +5C6E5A 0A5A [Regular] +5C6E5C NULL [SyntErr] +5C6E5F 0A5F [Regular] +5C6E61 0A61 [Regular] +5C6E62 0A62 [Regular] +5C6E6E 0A6E [Regular] +5C6E72 0A72 [Regular] +5C6E74 0A74 [Regular] +5C6E7E 0A7E [Regular] +5C6E7F 0A7F [Regular] +5C6E80 0A80 [Regular][ILSEQ] +5C6E81 0A81 [Regular][ILSEQ] +5C6E9F 0A9F [Regular][ILSEQ] +5C6EA0 0AA0 [Regular][ILSEQ] +5C6EA1 0AA1 [Regular][ILSEQ] +5C6EE0 0AE0 [Regular][ILSEQ] +5C6EEF 0AEF [Regular][ILSEQ] +5C6EF9 0AF9 [Regular][ILSEQ] +5C6EFA 0AFA [Regular][ILSEQ] +5C6EFC 0AFC [Regular][ILSEQ] +5C6EFD 0AFD [Regular][ILSEQ] +5C6EFE 0AFE [Regular][ILSEQ] +5C6EFF 0AFF [Regular][ILSEQ] +5C7200 0D00 [Regular] +5C7208 0D08 [Regular] +5C7209 0D09 [Regular] +5C720A 0D0A [Regular] +5C720D 0D0D [Regular] +5C721A 0D1A [Regular] +5C7222 0D22 [Regular] +5C7225 0D25 [Regular] +5C7227 NULL [SyntErr] +5C7230 0D30 [Regular] +5C723F 0D3F [Regular] +5C7240 0D40 [Regular] +5C725A 0D5A [Regular] +5C725C NULL [SyntErr] +5C725F 0D5F [Regular] +5C7261 0D61 [Regular] +5C7262 0D62 [Regular] +5C726E 0D6E [Regular] +5C7272 0D72 [Regular] +5C7274 0D74 [Regular] +5C727E 0D7E [Regular] +5C727F 0D7F [Regular] +5C7280 0D80 [Regular][ILSEQ] +5C7281 0D81 [Regular][ILSEQ] +5C729F 0D9F [Regular][ILSEQ] +5C72A0 0DA0 [Regular][ILSEQ] +5C72A1 0DA1 [Regular][ILSEQ] +5C72E0 0DE0 [Regular][ILSEQ] +5C72EF 0DEF [Regular][ILSEQ] +5C72F9 0DF9 [Regular][ILSEQ] +5C72FA 0DFA [Regular][ILSEQ] +5C72FC 0DFC [Regular][ILSEQ] +5C72FD 0DFD [Regular][ILSEQ] +5C72FE 0DFE [Regular][ILSEQ] +5C72FF 0DFF [Regular][ILSEQ] +5C7400 0900 [Regular] +5C7408 0908 [Regular] +5C7409 0909 [Regular] +5C740A 090A [Regular] +5C740D 090D [Regular] +5C741A 091A [Regular] +5C7422 0922 [Regular] +5C7425 0925 [Regular] +5C7427 NULL [SyntErr] +5C7430 0930 [Regular] +5C743F 093F [Regular] +5C7440 0940 [Regular] +5C745A 095A [Regular] +5C745C NULL [SyntErr] +5C745F 095F [Regular] +5C7461 0961 [Regular] +5C7462 0962 [Regular] +5C746E 096E [Regular] +5C7472 0972 [Regular] +5C7474 0974 [Regular] +5C747E 097E [Regular] +5C747F 097F [Regular] +5C7480 0980 [Regular][ILSEQ] +5C7481 0981 [Regular][ILSEQ] +5C749F 099F [Regular][ILSEQ] +5C74A0 09A0 [Regular][ILSEQ] +5C74A1 09A1 [Regular][ILSEQ] +5C74E0 09E0 [Regular][ILSEQ] +5C74EF 09EF [Regular][ILSEQ] +5C74F9 09F9 [Regular][ILSEQ] +5C74FA 09FA [Regular][ILSEQ] +5C74FC 09FC [Regular][ILSEQ] +5C74FD 09FD [Regular][ILSEQ] +5C74FE 09FE [Regular][ILSEQ] +5C74FF 09FF [Regular][ILSEQ] +5C7E00 7E00 [Trivial] +5C7E08 7E08 [Trivial] +5C7E09 7E09 [Trivial] +5C7E0A 7E0A [Trivial] +5C7E0D 7E0D [Trivial] +5C7E1A 7E1A [Trivial] +5C7E22 7E22 [Trivial] +5C7E25 7E25 [Trivial] +5C7E27 NULL [SyntErr] +5C7E30 7E30 [Trivial] +5C7E3F 7E3F [Trivial] +5C7E40 7E40 [Trivial] +5C7E5A 7E5A [Trivial] +5C7E5C NULL [SyntErr] +5C7E5F 7E5F [Trivial] +5C7E61 7E61 [Trivial] +5C7E62 7E62 [Trivial] +5C7E6E 7E6E [Trivial] +5C7E72 7E72 [Trivial] +5C7E74 7E74 [Trivial] +5C7E7E 7E7E [Trivial] +5C7E7F 7E7F [Trivial] +5C7E80 7E80 [Trivial][ILSEQ] +5C7E81 7E81 [Trivial][ILSEQ] +5C7E9F 7E9F [Trivial][ILSEQ] +5C7EA0 7EA0 [Trivial][ILSEQ] +5C7EA1 7EA1 [Trivial][ILSEQ] +5C7EE0 7EE0 [Trivial][ILSEQ] +5C7EEF 7EEF [Trivial][ILSEQ] +5C7EF9 7EF9 [Trivial][ILSEQ] +5C7EFA 7EFA [Trivial][ILSEQ] +5C7EFC 7EFC [Trivial][ILSEQ] +5C7EFD 7EFD [Trivial][ILSEQ] +5C7EFE 7EFE [Trivial][ILSEQ] +5C7EFF 7EFF [Trivial][ILSEQ] +5C7F00 7F00 [Trivial] +5C7F08 7F08 [Trivial] +5C7F09 7F09 [Trivial] +5C7F0A 7F0A [Trivial] +5C7F0D 7F0D [Trivial] +5C7F1A 7F1A [Trivial] +5C7F22 7F22 [Trivial] +5C7F25 7F25 [Trivial] +5C7F27 NULL [SyntErr] +5C7F30 7F30 [Trivial] +5C7F3F 7F3F [Trivial] +5C7F40 7F40 [Trivial] +5C7F5A 7F5A [Trivial] +5C7F5C NULL [SyntErr] +5C7F5F 7F5F [Trivial] +5C7F61 7F61 [Trivial] +5C7F62 7F62 [Trivial] +5C7F6E 7F6E [Trivial] +5C7F72 7F72 [Trivial] +5C7F74 7F74 [Trivial] +5C7F7E 7F7E [Trivial] +5C7F7F 7F7F [Trivial] +5C7F80 7F80 [Trivial][ILSEQ] +5C7F81 7F81 [Trivial][ILSEQ] +5C7F9F 7F9F [Trivial][ILSEQ] +5C7FA0 7FA0 [Trivial][ILSEQ] +5C7FA1 7FA1 [Trivial][ILSEQ] +5C7FE0 7FE0 [Trivial][ILSEQ] +5C7FEF 7FEF [Trivial][ILSEQ] +5C7FF9 7FF9 [Trivial][ILSEQ] +5C7FFA 7FFA [Trivial][ILSEQ] +5C7FFC 7FFC [Trivial][ILSEQ] +5C7FFD 7FFD [Trivial][ILSEQ] +5C7FFE 7FFE [Trivial][ILSEQ] +5C7FFF 7FFF [Trivial][ILSEQ] +5C8000 8000 [Trivial][ILSEQ] +5C8008 8008 [Trivial][ILSEQ] +5C8009 8009 [Trivial][ILSEQ] +5C800A 800A [Trivial][ILSEQ] +5C800D 800D [Trivial][ILSEQ] +5C801A 801A [Trivial][ILSEQ] +5C8022 8022 [Trivial][ILSEQ] +5C8025 8025 [Trivial][ILSEQ] +5C8027 NULL [SyntErr] +5C8030 8030 [Trivial][ILSEQ] +5C803F 803F [Trivial][ILSEQ] +5C8040 8040 [Trivial][ILSEQ] +5C805A 805A [Trivial][ILSEQ] +5C805C NULL [SyntErr] +5C805F 805F [Trivial][ILSEQ] +5C8061 8061 [Trivial][ILSEQ] +5C8062 8062 [Trivial][ILSEQ] +5C806E 806E [Trivial][ILSEQ] +5C8072 8072 [Trivial][ILSEQ] +5C8074 8074 [Trivial][ILSEQ] +5C807E 807E [Trivial][ILSEQ] +5C807F 807F [Trivial][ILSEQ] +5C8080 8080 [Trivial][ILSEQ] +5C8081 8081 [Trivial][ILSEQ] +5C809F 809F [Trivial][ILSEQ] +5C80A0 80A0 [Trivial][ILSEQ] +5C80A1 80A1 [Trivial][ILSEQ] +5C80E0 80E0 [Trivial][ILSEQ] +5C80EF 80EF [Trivial][ILSEQ] +5C80F9 80F9 [Trivial][ILSEQ] +5C80FA 80FA [Trivial][ILSEQ] +5C80FC 80FC [Trivial][ILSEQ] +5C80FD 80FD [Trivial][ILSEQ] +5C80FE 80FE [Trivial][ILSEQ] +5C80FF 80FF [Trivial][ILSEQ] +5C8100 8100 [Trivial][ILSEQ] +5C8108 8108 [Trivial][ILSEQ] +5C8109 8109 [Trivial][ILSEQ] +5C810A 810A [Trivial][ILSEQ] +5C810D 810D [Trivial][ILSEQ] +5C811A 811A [Trivial][ILSEQ] +5C8122 8122 [Trivial][ILSEQ] +5C8125 8125 [Trivial][ILSEQ] +5C8127 NULL [SyntErr] +5C8130 8130 [Trivial][ILSEQ] +5C813F 813F [Trivial][ILSEQ] +5C8140 8140 [Trivial][ILSEQ] +5C815A 815A [Trivial][ILSEQ] +5C815C NULL [SyntErr] +5C815F 815F [Trivial][ILSEQ] +5C8161 8161 [Trivial][ILSEQ] +5C8162 8162 [Trivial][ILSEQ] +5C816E 816E [Trivial][ILSEQ] +5C8172 8172 [Trivial][ILSEQ] +5C8174 8174 [Trivial][ILSEQ] +5C817E 817E [Trivial][ILSEQ] +5C817F 817F [Trivial][ILSEQ] +5C8180 8180 [Trivial][ILSEQ] +5C8181 8181 [Trivial][ILSEQ] +5C819F 819F [Trivial][ILSEQ] +5C81A0 81A0 [Trivial][ILSEQ] +5C81A1 81A1 [Trivial][ILSEQ] +5C81E0 81E0 [Trivial][ILSEQ] +5C81EF 81EF [Trivial][ILSEQ] +5C81F9 81F9 [Trivial][ILSEQ] +5C81FA 81FA [Trivial][ILSEQ] +5C81FC 81FC [Trivial][ILSEQ] +5C81FD 81FD [Trivial][ILSEQ] +5C81FE 81FE [Trivial][ILSEQ] +5C81FF 81FF [Trivial][ILSEQ] +5C9F00 9F00 [Trivial][ILSEQ] +5C9F08 9F08 [Trivial][ILSEQ] +5C9F09 9F09 [Trivial][ILSEQ] +5C9F0A 9F0A [Trivial][ILSEQ] +5C9F0D 9F0D [Trivial][ILSEQ] +5C9F1A 9F1A [Trivial][ILSEQ] +5C9F22 9F22 [Trivial][ILSEQ] +5C9F25 9F25 [Trivial][ILSEQ] +5C9F27 NULL [SyntErr] +5C9F30 9F30 [Trivial][ILSEQ] +5C9F3F 9F3F [Trivial][ILSEQ] +5C9F40 9F40 [Trivial][ILSEQ] +5C9F5A 9F5A [Trivial][ILSEQ] +5C9F5C NULL [SyntErr] +5C9F5F 9F5F [Trivial][ILSEQ] +5C9F61 9F61 [Trivial][ILSEQ] +5C9F62 9F62 [Trivial][ILSEQ] +5C9F6E 9F6E [Trivial][ILSEQ] +5C9F72 9F72 [Trivial][ILSEQ] +5C9F74 9F74 [Trivial][ILSEQ] +5C9F7E 9F7E [Trivial][ILSEQ] +5C9F7F 9F7F [Trivial][ILSEQ] +5C9F80 9F80 [Trivial][ILSEQ] +5C9F81 9F81 [Trivial][ILSEQ] +5C9F9F 9F9F [Trivial][ILSEQ] +5C9FA0 9FA0 [Trivial][ILSEQ] +5C9FA1 9FA1 [Trivial][ILSEQ] +5C9FE0 9FE0 [Trivial][ILSEQ] +5C9FEF 9FEF [Trivial][ILSEQ] +5C9FF9 9FF9 [Trivial][ILSEQ] +5C9FFA 9FFA [Trivial][ILSEQ] +5C9FFC 9FFC [Trivial][ILSEQ] +5C9FFD 9FFD [Trivial][ILSEQ] +5C9FFE 9FFE [Trivial][ILSEQ] +5C9FFF 9FFF [Trivial][ILSEQ] +5CA000 A000 [Trivial][ILSEQ] +5CA008 A008 [Trivial][ILSEQ] +5CA009 A009 [Trivial][ILSEQ] +5CA00A A00A [Trivial][ILSEQ] +5CA00D A00D [Trivial][ILSEQ] +5CA01A A01A [Trivial][ILSEQ] +5CA022 A022 [Trivial][ILSEQ] +5CA025 A025 [Trivial][ILSEQ] +5CA027 NULL [SyntErr] +5CA030 A030 [Trivial][ILSEQ] +5CA03F A03F [Trivial][ILSEQ] +5CA040 A040 [Trivial][ILSEQ] +5CA05A A05A [Trivial][ILSEQ] +5CA05C NULL [SyntErr] +5CA05F A05F [Trivial][ILSEQ] +5CA061 A061 [Trivial][ILSEQ] +5CA062 A062 [Trivial][ILSEQ] +5CA06E A06E [Trivial][ILSEQ] +5CA072 A072 [Trivial][ILSEQ] +5CA074 A074 [Trivial][ILSEQ] +5CA07E A07E [Trivial][ILSEQ] +5CA07F A07F [Trivial][ILSEQ] +5CA080 A080 [Trivial][ILSEQ] +5CA081 A081 [Trivial][ILSEQ] +5CA09F A09F [Trivial][ILSEQ] +5CA0A0 A0A0 [Trivial][ILSEQ] +5CA0A1 A0A1 [Trivial][ILSEQ] +5CA0E0 A0E0 [Trivial][ILSEQ] +5CA0EF A0EF [Trivial][ILSEQ] +5CA0F9 A0F9 [Trivial][ILSEQ] +5CA0FA A0FA [Trivial][ILSEQ] +5CA0FC A0FC [Trivial][ILSEQ] +5CA0FD A0FD [Trivial][ILSEQ] +5CA0FE A0FE [Trivial][ILSEQ] +5CA0FF A0FF [Trivial][ILSEQ] +5CA100 A100 [Trivial][ILSEQ] +5CA108 A108 [Trivial][ILSEQ] +5CA109 A109 [Trivial][ILSEQ] +5CA10A A10A [Trivial][ILSEQ] +5CA10D A10D [Trivial][ILSEQ] +5CA11A A11A [Trivial][ILSEQ] +5CA122 A122 [Trivial][ILSEQ] +5CA125 A125 [Trivial][ILSEQ] +5CA127 NULL [SyntErr] +5CA130 A130 [Trivial][ILSEQ] +5CA13F A13F [Trivial][ILSEQ] +5CA140 A140 [Trivial] +5CA15A A15A [Trivial] +5CA15C NULL [SyntErr] +5CA15F A15F [Trivial] +5CA161 A161 [Trivial] +5CA162 A162 [Trivial] +5CA16E A16E [Trivial] +5CA172 A172 [Trivial] +5CA174 A174 [Trivial] +5CA17E A17E [Trivial] +5CA17F A17F [Trivial][ILSEQ] +5CA180 A180 [Trivial][ILSEQ] +5CA181 A181 [Trivial][ILSEQ] +5CA19F A19F [Trivial][ILSEQ] +5CA1A0 A1A0 [Trivial][ILSEQ] +5CA1A1 A1A1 [Trivial] +5CA1E0 A1E0 [Trivial] +5CA1EF A1EF [Trivial] +5CA1F9 A1F9 [Trivial] +5CA1FA A1FA [Trivial] +5CA1FC A1FC [Trivial] +5CA1FD A1FD [Trivial] +5CA1FE A1FE [Trivial] +5CA1FF A1FF [Trivial][ILSEQ] +5CE000 E000 [Trivial][ILSEQ] +5CE008 E008 [Trivial][ILSEQ] +5CE009 E009 [Trivial][ILSEQ] +5CE00A E00A [Trivial][ILSEQ] +5CE00D E00D [Trivial][ILSEQ] +5CE01A E01A [Trivial][ILSEQ] +5CE022 E022 [Trivial][ILSEQ] +5CE025 E025 [Trivial][ILSEQ] +5CE027 NULL [SyntErr] +5CE030 E030 [Trivial][ILSEQ] +5CE03F E03F [Trivial][ILSEQ] +5CE040 E040 [Trivial] +5CE05A E05A [Trivial] +5CE05C NULL [SyntErr] +5CE05F E05F [Trivial] +5CE061 E061 [Trivial] +5CE062 E062 [Trivial] +5CE06E E06E [Trivial] +5CE072 E072 [Trivial] +5CE074 E074 [Trivial] +5CE07E E07E [Trivial] +5CE07F E07F [Trivial][ILSEQ] +5CE080 E080 [Trivial][ILSEQ] +5CE081 E081 [Trivial][ILSEQ] +5CE09F E09F [Trivial][ILSEQ] +5CE0A0 E0A0 [Trivial][ILSEQ] +5CE0A1 E0A1 [Trivial] +5CE0E0 E0E0 [Trivial] +5CE0EF E0EF [Trivial] +5CE0F9 E0F9 [Trivial] +5CE0FA E0FA [Trivial] +5CE0FC E0FC [Trivial] +5CE0FD E0FD [Trivial] +5CE0FE E0FE [Trivial] +5CE0FF E0FF [Trivial][ILSEQ] +5CEF00 EF00 [Trivial][ILSEQ] +5CEF08 EF08 [Trivial][ILSEQ] +5CEF09 EF09 [Trivial][ILSEQ] +5CEF0A EF0A [Trivial][ILSEQ] +5CEF0D EF0D [Trivial][ILSEQ] +5CEF1A EF1A [Trivial][ILSEQ] +5CEF22 EF22 [Trivial][ILSEQ] +5CEF25 EF25 [Trivial][ILSEQ] +5CEF27 NULL [SyntErr] +5CEF30 EF30 [Trivial][ILSEQ] +5CEF3F EF3F [Trivial][ILSEQ] +5CEF40 EF40 [Trivial] +5CEF5A EF5A [Trivial] +5CEF5C NULL [SyntErr] +5CEF5F EF5F [Trivial] +5CEF61 EF61 [Trivial] +5CEF62 EF62 [Trivial] +5CEF6E EF6E [Trivial] +5CEF72 EF72 [Trivial] +5CEF74 EF74 [Trivial] +5CEF7E EF7E [Trivial] +5CEF7F EF7F [Trivial][ILSEQ] +5CEF80 EF80 [Trivial][ILSEQ] +5CEF81 EF81 [Trivial][ILSEQ] +5CEF9F EF9F [Trivial][ILSEQ] +5CEFA0 EFA0 [Trivial][ILSEQ] +5CEFA1 EFA1 [Trivial] +5CEFE0 EFE0 [Trivial] +5CEFEF EFEF [Trivial] +5CEFF9 EFF9 [Trivial] +5CEFFA EFFA [Trivial] +5CEFFC EFFC [Trivial] +5CEFFD EFFD [Trivial] +5CEFFE EFFE [Trivial] +5CEFFF EFFF [Trivial][ILSEQ] +5CF900 F900 [Trivial][ILSEQ] +5CF908 F908 [Trivial][ILSEQ] +5CF909 F909 [Trivial][ILSEQ] +5CF90A F90A [Trivial][ILSEQ] +5CF90D F90D [Trivial][ILSEQ] +5CF91A F91A [Trivial][ILSEQ] +5CF922 F922 [Trivial][ILSEQ] +5CF925 F925 [Trivial][ILSEQ] +5CF927 NULL [SyntErr] +5CF930 F930 [Trivial][ILSEQ] +5CF93F F93F [Trivial][ILSEQ] +5CF940 F940 [Trivial] +5CF95A F95A [Trivial] +5CF95C NULL [SyntErr] +5CF95F F95F [Trivial] +5CF961 F961 [Trivial] +5CF962 F962 [Trivial] +5CF96E F96E [Trivial] +5CF972 F972 [Trivial] +5CF974 F974 [Trivial] +5CF97E F97E [Trivial] +5CF97F F97F [Trivial][ILSEQ] +5CF980 F980 [Trivial][ILSEQ] +5CF981 F981 [Trivial][ILSEQ] +5CF99F F99F [Trivial][ILSEQ] +5CF9A0 F9A0 [Trivial][ILSEQ] +5CF9A1 F9A1 [Trivial] +5CF9E0 F9E0 [Trivial] +5CF9EF F9EF [Trivial] +5CF9F9 F9F9 [Trivial] +5CF9FA F9FA [Trivial] +5CF9FC F9FC [Trivial] +5CF9FD F9FD [Trivial] +5CF9FE F9FE [Trivial] +5CF9FF F9FF [Trivial][ILSEQ] +5CFA00 FA00 [Trivial][ILSEQ] +5CFA08 FA08 [Trivial][ILSEQ] +5CFA09 FA09 [Trivial][ILSEQ] +5CFA0A FA0A [Trivial][ILSEQ] +5CFA0D FA0D [Trivial][ILSEQ] +5CFA1A FA1A [Trivial][ILSEQ] +5CFA22 FA22 [Trivial][ILSEQ] +5CFA25 FA25 [Trivial][ILSEQ] +5CFA27 NULL [SyntErr] +5CFA30 FA30 [Trivial][ILSEQ] +5CFA3F FA3F [Trivial][ILSEQ] +5CFA40 FA40 [Trivial][ILSEQ] +5CFA5A FA5A [Trivial][ILSEQ] +5CFA5C NULL [SyntErr] +5CFA5F FA5F [Trivial][ILSEQ] +5CFA61 FA61 [Trivial][ILSEQ] +5CFA62 FA62 [Trivial][ILSEQ] +5CFA6E FA6E [Trivial][ILSEQ] +5CFA72 FA72 [Trivial][ILSEQ] +5CFA74 FA74 [Trivial][ILSEQ] +5CFA7E FA7E [Trivial][ILSEQ] +5CFA7F FA7F [Trivial][ILSEQ] +5CFA80 FA80 [Trivial][ILSEQ] +5CFA81 FA81 [Trivial][ILSEQ] +5CFA9F FA9F [Trivial][ILSEQ] +5CFAA0 FAA0 [Trivial][ILSEQ] +5CFAA1 FAA1 [Trivial][ILSEQ] +5CFAE0 FAE0 [Trivial][ILSEQ] +5CFAEF FAEF [Trivial][ILSEQ] +5CFAF9 FAF9 [Trivial][ILSEQ] +5CFAFA FAFA [Trivial][ILSEQ] +5CFAFC FAFC [Trivial][ILSEQ] +5CFAFD FAFD [Trivial][ILSEQ] +5CFAFE FAFE [Trivial][ILSEQ] +5CFAFF FAFF [Trivial][ILSEQ] +5CFC00 FC00 [Trivial][ILSEQ] +5CFC08 FC08 [Trivial][ILSEQ] +5CFC09 FC09 [Trivial][ILSEQ] +5CFC0A FC0A [Trivial][ILSEQ] +5CFC0D FC0D [Trivial][ILSEQ] +5CFC1A FC1A [Trivial][ILSEQ] +5CFC22 FC22 [Trivial][ILSEQ] +5CFC25 FC25 [Trivial][ILSEQ] +5CFC27 NULL [SyntErr] +5CFC30 FC30 [Trivial][ILSEQ] +5CFC3F FC3F [Trivial][ILSEQ] +5CFC40 FC40 [Trivial][ILSEQ] +5CFC5A FC5A [Trivial][ILSEQ] +5CFC5C NULL [SyntErr] +5CFC5F FC5F [Trivial][ILSEQ] +5CFC61 FC61 [Trivial][ILSEQ] +5CFC62 FC62 [Trivial][ILSEQ] +5CFC6E FC6E [Trivial][ILSEQ] +5CFC72 FC72 [Trivial][ILSEQ] +5CFC74 FC74 [Trivial][ILSEQ] +5CFC7E FC7E [Trivial][ILSEQ] +5CFC7F FC7F [Trivial][ILSEQ] +5CFC80 FC80 [Trivial][ILSEQ] +5CFC81 FC81 [Trivial][ILSEQ] +5CFC9F FC9F [Trivial][ILSEQ] +5CFCA0 FCA0 [Trivial][ILSEQ] +5CFCA1 FCA1 [Trivial][ILSEQ] +5CFCE0 FCE0 [Trivial][ILSEQ] +5CFCEF FCEF [Trivial][ILSEQ] +5CFCF9 FCF9 [Trivial][ILSEQ] +5CFCFA FCFA [Trivial][ILSEQ] +5CFCFC FCFC [Trivial][ILSEQ] +5CFCFD FCFD [Trivial][ILSEQ] +5CFCFE FCFE [Trivial][ILSEQ] +5CFCFF FCFF [Trivial][ILSEQ] +5CFD00 FD00 [Trivial][ILSEQ] +5CFD08 FD08 [Trivial][ILSEQ] +5CFD09 FD09 [Trivial][ILSEQ] +5CFD0A FD0A [Trivial][ILSEQ] +5CFD0D FD0D [Trivial][ILSEQ] +5CFD1A FD1A [Trivial][ILSEQ] +5CFD22 FD22 [Trivial][ILSEQ] +5CFD25 FD25 [Trivial][ILSEQ] +5CFD27 NULL [SyntErr] +5CFD30 FD30 [Trivial][ILSEQ] +5CFD3F FD3F [Trivial][ILSEQ] +5CFD40 FD40 [Trivial][ILSEQ] +5CFD5A FD5A [Trivial][ILSEQ] +5CFD5C NULL [SyntErr] +5CFD5F FD5F [Trivial][ILSEQ] +5CFD61 FD61 [Trivial][ILSEQ] +5CFD62 FD62 [Trivial][ILSEQ] +5CFD6E FD6E [Trivial][ILSEQ] +5CFD72 FD72 [Trivial][ILSEQ] +5CFD74 FD74 [Trivial][ILSEQ] +5CFD7E FD7E [Trivial][ILSEQ] +5CFD7F FD7F [Trivial][ILSEQ] +5CFD80 FD80 [Trivial][ILSEQ] +5CFD81 FD81 [Trivial][ILSEQ] +5CFD9F FD9F [Trivial][ILSEQ] +5CFDA0 FDA0 [Trivial][ILSEQ] +5CFDA1 FDA1 [Trivial][ILSEQ] +5CFDE0 FDE0 [Trivial][ILSEQ] +5CFDEF FDEF [Trivial][ILSEQ] +5CFDF9 FDF9 [Trivial][ILSEQ] +5CFDFA FDFA [Trivial][ILSEQ] +5CFDFC FDFC [Trivial][ILSEQ] +5CFDFD FDFD [Trivial][ILSEQ] +5CFDFE FDFE [Trivial][ILSEQ] +5CFDFF FDFF [Trivial][ILSEQ] +5CFE00 FE00 [Trivial][ILSEQ] +5CFE08 FE08 [Trivial][ILSEQ] +5CFE09 FE09 [Trivial][ILSEQ] +5CFE0A FE0A [Trivial][ILSEQ] +5CFE0D FE0D [Trivial][ILSEQ] +5CFE1A FE1A [Trivial][ILSEQ] +5CFE22 FE22 [Trivial][ILSEQ] +5CFE25 FE25 [Trivial][ILSEQ] +5CFE27 NULL [SyntErr] +5CFE30 FE30 [Trivial][ILSEQ] +5CFE3F FE3F [Trivial][ILSEQ] +5CFE40 FE40 [Trivial][ILSEQ] +5CFE5A FE5A [Trivial][ILSEQ] +5CFE5C NULL [SyntErr] +5CFE5F FE5F [Trivial][ILSEQ] +5CFE61 FE61 [Trivial][ILSEQ] +5CFE62 FE62 [Trivial][ILSEQ] +5CFE6E FE6E [Trivial][ILSEQ] +5CFE72 FE72 [Trivial][ILSEQ] +5CFE74 FE74 [Trivial][ILSEQ] +5CFE7E FE7E [Trivial][ILSEQ] +5CFE7F FE7F [Trivial][ILSEQ] +5CFE80 FE80 [Trivial][ILSEQ] +5CFE81 FE81 [Trivial][ILSEQ] +5CFE9F FE9F [Trivial][ILSEQ] +5CFEA0 FEA0 [Trivial][ILSEQ] +5CFEA1 FEA1 [Trivial][ILSEQ] +5CFEE0 FEE0 [Trivial][ILSEQ] +5CFEEF FEEF [Trivial][ILSEQ] +5CFEF9 FEF9 [Trivial][ILSEQ] +5CFEFA FEFA [Trivial][ILSEQ] +5CFEFC FEFC [Trivial][ILSEQ] +5CFEFD FEFD [Trivial][ILSEQ] +5CFEFE FEFE [Trivial][ILSEQ] +5CFEFF FEFF [Trivial][ILSEQ] +5CFF00 FF00 [Trivial][ILSEQ] +5CFF08 FF08 [Trivial][ILSEQ] +5CFF09 FF09 [Trivial][ILSEQ] +5CFF0A FF0A [Trivial][ILSEQ] +5CFF0D FF0D [Trivial][ILSEQ] +5CFF1A FF1A [Trivial][ILSEQ] +5CFF22 FF22 [Trivial][ILSEQ] +5CFF25 FF25 [Trivial][ILSEQ] +5CFF27 NULL [SyntErr] +5CFF30 FF30 [Trivial][ILSEQ] +5CFF3F FF3F [Trivial][ILSEQ] +5CFF40 FF40 [Trivial][ILSEQ] +5CFF5A FF5A [Trivial][ILSEQ] +5CFF5C NULL [SyntErr] +5CFF5F FF5F [Trivial][ILSEQ] +5CFF61 FF61 [Trivial][ILSEQ] +5CFF62 FF62 [Trivial][ILSEQ] +5CFF6E FF6E [Trivial][ILSEQ] +5CFF72 FF72 [Trivial][ILSEQ] +5CFF74 FF74 [Trivial][ILSEQ] +5CFF7E FF7E [Trivial][ILSEQ] +5CFF7F FF7F [Trivial][ILSEQ] +5CFF80 FF80 [Trivial][ILSEQ] +5CFF81 FF81 [Trivial][ILSEQ] +5CFF9F FF9F [Trivial][ILSEQ] +5CFFA0 FFA0 [Trivial][ILSEQ] +5CFFA1 FFA1 [Trivial][ILSEQ] +5CFFE0 FFE0 [Trivial][ILSEQ] +5CFFEF FFEF [Trivial][ILSEQ] +5CFFF9 FFF9 [Trivial][ILSEQ] +5CFFFA FFFA [Trivial][ILSEQ] +5CFFFC FFFC [Trivial][ILSEQ] +5CFFFD FFFD [Trivial][ILSEQ] +5CFFFE FFFE [Trivial][ILSEQ] +5CFFFF FFFF [Trivial][ILSEQ] +5C005C00 0000 [Trivial] +5C005C08 0008 [Trivial] +5C005C09 0009 [Trivial] +5C005C0A 000A [Trivial] +5C005C0D 000D [Trivial] +5C005C1A 001A [Trivial] +5C005C22 0022 [Trivial] +5C005C25 005C25 [Regular] +5C005C27 0027 [Trivial] +5C005C30 0000 [Regular] +5C005C3F 003F [Trivial] +5C005C40 0040 [Trivial] +5C005C5A 001A [Regular] +5C005C5C 005C [Regular] +5C005C5F 005C5F [Regular] +5C005C61 0061 [Trivial] +5C005C62 0008 [Regular] +5C005C6E 000A [Regular] +5C005C72 000D [Regular] +5C005C74 0009 [Regular] +5C005C7E 007E [Trivial] +5C005C7F 007F [Trivial] +5C005C80 0080 [Trivial][ILSEQ] +5C005C81 0081 [Trivial][ILSEQ] +5C005C9F 009F [Trivial][ILSEQ] +5C005CA0 00A0 [Trivial][ILSEQ] +5C005CA1 00A1 [Trivial][ILSEQ] +5C005CE0 00E0 [Trivial][ILSEQ] +5C005CEF 00EF [Trivial][ILSEQ] +5C005CF9 00F9 [Trivial][ILSEQ] +5C005CFA 00FA [Trivial][ILSEQ] +5C005CFC 00FC [Trivial][ILSEQ] +5C005CFD 00FD [Trivial][ILSEQ] +5C005CFE 00FE [Trivial][ILSEQ] +5C005CFF 00FF [Trivial][ILSEQ] +5C085C00 0800 [Trivial] +5C085C08 0808 [Trivial] +5C085C09 0809 [Trivial] +5C085C0A 080A [Trivial] +5C085C0D 080D [Trivial] +5C085C1A 081A [Trivial] +5C085C22 0822 [Trivial] +5C085C25 085C25 [Regular] +5C085C27 0827 [Trivial] +5C085C30 0800 [Regular] +5C085C3F 083F [Trivial] +5C085C40 0840 [Trivial] +5C085C5A 081A [Regular] +5C085C5C 085C [Regular] +5C085C5F 085C5F [Regular] +5C085C61 0861 [Trivial] +5C085C62 0808 [Regular] +5C085C6E 080A [Regular] +5C085C72 080D [Regular] +5C085C74 0809 [Regular] +5C085C7E 087E [Trivial] +5C085C7F 087F [Trivial] +5C085C80 0880 [Trivial][ILSEQ] +5C085C81 0881 [Trivial][ILSEQ] +5C085C9F 089F [Trivial][ILSEQ] +5C085CA0 08A0 [Trivial][ILSEQ] +5C085CA1 08A1 [Trivial][ILSEQ] +5C085CE0 08E0 [Trivial][ILSEQ] +5C085CEF 08EF [Trivial][ILSEQ] +5C085CF9 08F9 [Trivial][ILSEQ] +5C085CFA 08FA [Trivial][ILSEQ] +5C085CFC 08FC [Trivial][ILSEQ] +5C085CFD 08FD [Trivial][ILSEQ] +5C085CFE 08FE [Trivial][ILSEQ] +5C085CFF 08FF [Trivial][ILSEQ] +5C095C00 0900 [Trivial] +5C095C08 0908 [Trivial] +5C095C09 0909 [Trivial] +5C095C0A 090A [Trivial] +5C095C0D 090D [Trivial] +5C095C1A 091A [Trivial] +5C095C22 0922 [Trivial] +5C095C25 095C25 [Regular] +5C095C27 0927 [Trivial] +5C095C30 0900 [Regular] +5C095C3F 093F [Trivial] +5C095C40 0940 [Trivial] +5C095C5A 091A [Regular] +5C095C5C 095C [Regular] +5C095C5F 095C5F [Regular] +5C095C61 0961 [Trivial] +5C095C62 0908 [Regular] +5C095C6E 090A [Regular] +5C095C72 090D [Regular] +5C095C74 0909 [Regular] +5C095C7E 097E [Trivial] +5C095C7F 097F [Trivial] +5C095C80 0980 [Trivial][ILSEQ] +5C095C81 0981 [Trivial][ILSEQ] +5C095C9F 099F [Trivial][ILSEQ] +5C095CA0 09A0 [Trivial][ILSEQ] +5C095CA1 09A1 [Trivial][ILSEQ] +5C095CE0 09E0 [Trivial][ILSEQ] +5C095CEF 09EF [Trivial][ILSEQ] +5C095CF9 09F9 [Trivial][ILSEQ] +5C095CFA 09FA [Trivial][ILSEQ] +5C095CFC 09FC [Trivial][ILSEQ] +5C095CFD 09FD [Trivial][ILSEQ] +5C095CFE 09FE [Trivial][ILSEQ] +5C095CFF 09FF [Trivial][ILSEQ] +5C0A5C00 0A00 [Trivial] +5C0A5C08 0A08 [Trivial] +5C0A5C09 0A09 [Trivial] +5C0A5C0A 0A0A [Trivial] +5C0A5C0D 0A0D [Trivial] +5C0A5C1A 0A1A [Trivial] +5C0A5C22 0A22 [Trivial] +5C0A5C25 0A5C25 [Regular] +5C0A5C27 0A27 [Trivial] +5C0A5C30 0A00 [Regular] +5C0A5C3F 0A3F [Trivial] +5C0A5C40 0A40 [Trivial] +5C0A5C5A 0A1A [Regular] +5C0A5C5C 0A5C [Regular] +5C0A5C5F 0A5C5F [Regular] +5C0A5C61 0A61 [Trivial] +5C0A5C62 0A08 [Regular] +5C0A5C6E 0A0A [Regular] +5C0A5C72 0A0D [Regular] +5C0A5C74 0A09 [Regular] +5C0A5C7E 0A7E [Trivial] +5C0A5C7F 0A7F [Trivial] +5C0A5C80 0A80 [Trivial][ILSEQ] +5C0A5C81 0A81 [Trivial][ILSEQ] +5C0A5C9F 0A9F [Trivial][ILSEQ] +5C0A5CA0 0AA0 [Trivial][ILSEQ] +5C0A5CA1 0AA1 [Trivial][ILSEQ] +5C0A5CE0 0AE0 [Trivial][ILSEQ] +5C0A5CEF 0AEF [Trivial][ILSEQ] +5C0A5CF9 0AF9 [Trivial][ILSEQ] +5C0A5CFA 0AFA [Trivial][ILSEQ] +5C0A5CFC 0AFC [Trivial][ILSEQ] +5C0A5CFD 0AFD [Trivial][ILSEQ] +5C0A5CFE 0AFE [Trivial][ILSEQ] +5C0A5CFF 0AFF [Trivial][ILSEQ] +5C0D5C00 0D00 [Trivial] +5C0D5C08 0D08 [Trivial] +5C0D5C09 0D09 [Trivial] +5C0D5C0A 0D0A [Trivial] +5C0D5C0D 0D0D [Trivial] +5C0D5C1A 0D1A [Trivial] +5C0D5C22 0D22 [Trivial] +5C0D5C25 0D5C25 [Regular] +5C0D5C27 0D27 [Trivial] +5C0D5C30 0D00 [Regular] +5C0D5C3F 0D3F [Trivial] +5C0D5C40 0D40 [Trivial] +5C0D5C5A 0D1A [Regular] +5C0D5C5C 0D5C [Regular] +5C0D5C5F 0D5C5F [Regular] +5C0D5C61 0D61 [Trivial] +5C0D5C62 0D08 [Regular] +5C0D5C6E 0D0A [Regular] +5C0D5C72 0D0D [Regular] +5C0D5C74 0D09 [Regular] +5C0D5C7E 0D7E [Trivial] +5C0D5C7F 0D7F [Trivial] +5C0D5C80 0D80 [Trivial][ILSEQ] +5C0D5C81 0D81 [Trivial][ILSEQ] +5C0D5C9F 0D9F [Trivial][ILSEQ] +5C0D5CA0 0DA0 [Trivial][ILSEQ] +5C0D5CA1 0DA1 [Trivial][ILSEQ] +5C0D5CE0 0DE0 [Trivial][ILSEQ] +5C0D5CEF 0DEF [Trivial][ILSEQ] +5C0D5CF9 0DF9 [Trivial][ILSEQ] +5C0D5CFA 0DFA [Trivial][ILSEQ] +5C0D5CFC 0DFC [Trivial][ILSEQ] +5C0D5CFD 0DFD [Trivial][ILSEQ] +5C0D5CFE 0DFE [Trivial][ILSEQ] +5C0D5CFF 0DFF [Trivial][ILSEQ] +5C1A5C00 1A00 [Trivial] +5C1A5C08 1A08 [Trivial] +5C1A5C09 1A09 [Trivial] +5C1A5C0A 1A0A [Trivial] +5C1A5C0D 1A0D [Trivial] +5C1A5C1A 1A1A [Trivial] +5C1A5C22 1A22 [Trivial] +5C1A5C25 1A5C25 [Regular] +5C1A5C27 1A27 [Trivial] +5C1A5C30 1A00 [Regular] +5C1A5C3F 1A3F [Trivial] +5C1A5C40 1A40 [Trivial] +5C1A5C5A 1A1A [Regular] +5C1A5C5C 1A5C [Regular] +5C1A5C5F 1A5C5F [Regular] +5C1A5C61 1A61 [Trivial] +5C1A5C62 1A08 [Regular] +5C1A5C6E 1A0A [Regular] +5C1A5C72 1A0D [Regular] +5C1A5C74 1A09 [Regular] +5C1A5C7E 1A7E [Trivial] +5C1A5C7F 1A7F [Trivial] +5C1A5C80 1A80 [Trivial][ILSEQ] +5C1A5C81 1A81 [Trivial][ILSEQ] +5C1A5C9F 1A9F [Trivial][ILSEQ] +5C1A5CA0 1AA0 [Trivial][ILSEQ] +5C1A5CA1 1AA1 [Trivial][ILSEQ] +5C1A5CE0 1AE0 [Trivial][ILSEQ] +5C1A5CEF 1AEF [Trivial][ILSEQ] +5C1A5CF9 1AF9 [Trivial][ILSEQ] +5C1A5CFA 1AFA [Trivial][ILSEQ] +5C1A5CFC 1AFC [Trivial][ILSEQ] +5C1A5CFD 1AFD [Trivial][ILSEQ] +5C1A5CFE 1AFE [Trivial][ILSEQ] +5C1A5CFF 1AFF [Trivial][ILSEQ] +5C225C00 2200 [Trivial] +5C225C08 2208 [Trivial] +5C225C09 2209 [Trivial] +5C225C0A 220A [Trivial] +5C225C0D 220D [Trivial] +5C225C1A 221A [Trivial] +5C225C22 2222 [Trivial] +5C225C25 225C25 [Regular] +5C225C27 2227 [Trivial] +5C225C30 2200 [Regular] +5C225C3F 223F [Trivial] +5C225C40 2240 [Trivial] +5C225C5A 221A [Regular] +5C225C5C 225C [Regular] +5C225C5F 225C5F [Regular] +5C225C61 2261 [Trivial] +5C225C62 2208 [Regular] +5C225C6E 220A [Regular] +5C225C72 220D [Regular] +5C225C74 2209 [Regular] +5C225C7E 227E [Trivial] +5C225C7F 227F [Trivial] +5C225C80 2280 [Trivial][ILSEQ] +5C225C81 2281 [Trivial][ILSEQ] +5C225C9F 229F [Trivial][ILSEQ] +5C225CA0 22A0 [Trivial][ILSEQ] +5C225CA1 22A1 [Trivial][ILSEQ] +5C225CE0 22E0 [Trivial][ILSEQ] +5C225CEF 22EF [Trivial][ILSEQ] +5C225CF9 22F9 [Trivial][ILSEQ] +5C225CFA 22FA [Trivial][ILSEQ] +5C225CFC 22FC [Trivial][ILSEQ] +5C225CFD 22FD [Trivial][ILSEQ] +5C225CFE 22FE [Trivial][ILSEQ] +5C225CFF 22FF [Trivial][ILSEQ] +5C255C00 5C2500 [Regular] +5C255C08 5C2508 [Regular] +5C255C09 5C2509 [Regular] +5C255C0A 5C250A [Regular] +5C255C0D 5C250D [Regular] +5C255C1A 5C251A [Regular] +5C255C22 5C2522 [Regular] +5C255C25 5C255C25 [Preserve][LIKE] +5C255C27 5C2527 [Regular] +5C255C30 5C2500 [Regular] +5C255C3F 5C253F [Regular] +5C255C40 5C2540 [Regular] +5C255C5A 5C251A [Regular] +5C255C5C 5C255C [Regular] +5C255C5F 5C255C5F [Preserve][LIKE] +5C255C61 5C2561 [Regular] +5C255C62 5C2508 [Regular] +5C255C6E 5C250A [Regular] +5C255C72 5C250D [Regular] +5C255C74 5C2509 [Regular] +5C255C7E 5C257E [Regular] +5C255C7F 5C257F [Regular] +5C255C80 5C2580 [Regular][ILSEQ] +5C255C81 5C2581 [Regular][ILSEQ] +5C255C9F 5C259F [Regular][ILSEQ] +5C255CA0 5C25A0 [Regular][ILSEQ] +5C255CA1 5C25A1 [Regular][ILSEQ] +5C255CE0 5C25E0 [Regular][ILSEQ] +5C255CEF 5C25EF [Regular][ILSEQ] +5C255CF9 5C25F9 [Regular][ILSEQ] +5C255CFA 5C25FA [Regular][ILSEQ] +5C255CFC 5C25FC [Regular][ILSEQ] +5C255CFD 5C25FD [Regular][ILSEQ] +5C255CFE 5C25FE [Regular][ILSEQ] +5C255CFF 5C25FF [Regular][ILSEQ] +5C275C00 2700 [Trivial] +5C275C08 2708 [Trivial] +5C275C09 2709 [Trivial] +5C275C0A 270A [Trivial] +5C275C0D 270D [Trivial] +5C275C1A 271A [Trivial] +5C275C22 2722 [Trivial] +5C275C25 275C25 [Regular] +5C275C27 2727 [Trivial] +5C275C30 2700 [Regular] +5C275C3F 273F [Trivial] +5C275C40 2740 [Trivial] +5C275C5A 271A [Regular] +5C275C5C 275C [Regular] +5C275C5F 275C5F [Regular] +5C275C61 2761 [Trivial] +5C275C62 2708 [Regular] +5C275C6E 270A [Regular] +5C275C72 270D [Regular] +5C275C74 2709 [Regular] +5C275C7E 277E [Trivial] +5C275C7F 277F [Trivial] +5C275C80 2780 [Trivial][ILSEQ] +5C275C81 2781 [Trivial][ILSEQ] +5C275C9F 279F [Trivial][ILSEQ] +5C275CA0 27A0 [Trivial][ILSEQ] +5C275CA1 27A1 [Trivial][ILSEQ] +5C275CE0 27E0 [Trivial][ILSEQ] +5C275CEF 27EF [Trivial][ILSEQ] +5C275CF9 27F9 [Trivial][ILSEQ] +5C275CFA 27FA [Trivial][ILSEQ] +5C275CFC 27FC [Trivial][ILSEQ] +5C275CFD 27FD [Trivial][ILSEQ] +5C275CFE 27FE [Trivial][ILSEQ] +5C275CFF 27FF [Trivial][ILSEQ] +5C305C00 0000 [Regular] +5C305C08 0008 [Regular] +5C305C09 0009 [Regular] +5C305C0A 000A [Regular] +5C305C0D 000D [Regular] +5C305C1A 001A [Regular] +5C305C22 0022 [Regular] +5C305C25 005C25 [Regular] +5C305C27 0027 [Regular] +5C305C30 0000 [Regular] +5C305C3F 003F [Regular] +5C305C40 0040 [Regular] +5C305C5A 001A [Regular] +5C305C5C 005C [Regular] +5C305C5F 005C5F [Regular] +5C305C61 0061 [Regular] +5C305C62 0008 [Regular] +5C305C6E 000A [Regular] +5C305C72 000D [Regular] +5C305C74 0009 [Regular] +5C305C7E 007E [Regular] +5C305C7F 007F [Regular] +5C305C80 0080 [Regular][ILSEQ] +5C305C81 0081 [Regular][ILSEQ] +5C305C9F 009F [Regular][ILSEQ] +5C305CA0 00A0 [Regular][ILSEQ] +5C305CA1 00A1 [Regular][ILSEQ] +5C305CE0 00E0 [Regular][ILSEQ] +5C305CEF 00EF [Regular][ILSEQ] +5C305CF9 00F9 [Regular][ILSEQ] +5C305CFA 00FA [Regular][ILSEQ] +5C305CFC 00FC [Regular][ILSEQ] +5C305CFD 00FD [Regular][ILSEQ] +5C305CFE 00FE [Regular][ILSEQ] +5C305CFF 00FF [Regular][ILSEQ] +5C3F5C00 3F00 [Trivial] +5C3F5C08 3F08 [Trivial] +5C3F5C09 3F09 [Trivial] +5C3F5C0A 3F0A [Trivial] +5C3F5C0D 3F0D [Trivial] +5C3F5C1A 3F1A [Trivial] +5C3F5C22 3F22 [Trivial] +5C3F5C25 3F5C25 [Regular] +5C3F5C27 3F27 [Trivial] +5C3F5C30 3F00 [Regular] +5C3F5C3F 3F3F [Trivial] +5C3F5C40 3F40 [Trivial] +5C3F5C5A 3F1A [Regular] +5C3F5C5C 3F5C [Regular] +5C3F5C5F 3F5C5F [Regular] +5C3F5C61 3F61 [Trivial] +5C3F5C62 3F08 [Regular] +5C3F5C6E 3F0A [Regular] +5C3F5C72 3F0D [Regular] +5C3F5C74 3F09 [Regular] +5C3F5C7E 3F7E [Trivial] +5C3F5C7F 3F7F [Trivial] +5C3F5C80 3F80 [Trivial][ILSEQ] +5C3F5C81 3F81 [Trivial][ILSEQ] +5C3F5C9F 3F9F [Trivial][ILSEQ] +5C3F5CA0 3FA0 [Trivial][ILSEQ] +5C3F5CA1 3FA1 [Trivial][ILSEQ] +5C3F5CE0 3FE0 [Trivial][ILSEQ] +5C3F5CEF 3FEF [Trivial][ILSEQ] +5C3F5CF9 3FF9 [Trivial][ILSEQ] +5C3F5CFA 3FFA [Trivial][ILSEQ] +5C3F5CFC 3FFC [Trivial][ILSEQ] +5C3F5CFD 3FFD [Trivial][ILSEQ] +5C3F5CFE 3FFE [Trivial][ILSEQ] +5C3F5CFF 3FFF [Trivial][ILSEQ] +5C405C00 4000 [Trivial] +5C405C08 4008 [Trivial] +5C405C09 4009 [Trivial] +5C405C0A 400A [Trivial] +5C405C0D 400D [Trivial] +5C405C1A 401A [Trivial] +5C405C22 4022 [Trivial] +5C405C25 405C25 [Regular] +5C405C27 4027 [Trivial] +5C405C30 4000 [Regular] +5C405C3F 403F [Trivial] +5C405C40 4040 [Trivial] +5C405C5A 401A [Regular] +5C405C5C 405C [Regular] +5C405C5F 405C5F [Regular] +5C405C61 4061 [Trivial] +5C405C62 4008 [Regular] +5C405C6E 400A [Regular] +5C405C72 400D [Regular] +5C405C74 4009 [Regular] +5C405C7E 407E [Trivial] +5C405C7F 407F [Trivial] +5C405C80 4080 [Trivial][ILSEQ] +5C405C81 4081 [Trivial][ILSEQ] +5C405C9F 409F [Trivial][ILSEQ] +5C405CA0 40A0 [Trivial][ILSEQ] +5C405CA1 40A1 [Trivial][ILSEQ] +5C405CE0 40E0 [Trivial][ILSEQ] +5C405CEF 40EF [Trivial][ILSEQ] +5C405CF9 40F9 [Trivial][ILSEQ] +5C405CFA 40FA [Trivial][ILSEQ] +5C405CFC 40FC [Trivial][ILSEQ] +5C405CFD 40FD [Trivial][ILSEQ] +5C405CFE 40FE [Trivial][ILSEQ] +5C405CFF 40FF [Trivial][ILSEQ] +5C5A5C00 1A00 [Regular] +5C5A5C08 1A08 [Regular] +5C5A5C09 1A09 [Regular] +5C5A5C0A 1A0A [Regular] +5C5A5C0D 1A0D [Regular] +5C5A5C1A 1A1A [Regular] +5C5A5C22 1A22 [Regular] +5C5A5C25 1A5C25 [Regular] +5C5A5C27 1A27 [Regular] +5C5A5C30 1A00 [Regular] +5C5A5C3F 1A3F [Regular] +5C5A5C40 1A40 [Regular] +5C5A5C5A 1A1A [Regular] +5C5A5C5C 1A5C [Regular] +5C5A5C5F 1A5C5F [Regular] +5C5A5C61 1A61 [Regular] +5C5A5C62 1A08 [Regular] +5C5A5C6E 1A0A [Regular] +5C5A5C72 1A0D [Regular] +5C5A5C74 1A09 [Regular] +5C5A5C7E 1A7E [Regular] +5C5A5C7F 1A7F [Regular] +5C5A5C80 1A80 [Regular][ILSEQ] +5C5A5C81 1A81 [Regular][ILSEQ] +5C5A5C9F 1A9F [Regular][ILSEQ] +5C5A5CA0 1AA0 [Regular][ILSEQ] +5C5A5CA1 1AA1 [Regular][ILSEQ] +5C5A5CE0 1AE0 [Regular][ILSEQ] +5C5A5CEF 1AEF [Regular][ILSEQ] +5C5A5CF9 1AF9 [Regular][ILSEQ] +5C5A5CFA 1AFA [Regular][ILSEQ] +5C5A5CFC 1AFC [Regular][ILSEQ] +5C5A5CFD 1AFD [Regular][ILSEQ] +5C5A5CFE 1AFE [Regular][ILSEQ] +5C5A5CFF 1AFF [Regular][ILSEQ] +5C5C5C00 5C00 [Regular] +5C5C5C08 5C08 [Regular] +5C5C5C09 5C09 [Regular] +5C5C5C0A 5C0A [Regular] +5C5C5C0D 5C0D [Regular] +5C5C5C1A 5C1A [Regular] +5C5C5C22 5C22 [Regular] +5C5C5C25 5C5C25 [Regular] +5C5C5C27 5C27 [Regular] +5C5C5C30 5C00 [Regular] +5C5C5C3F 5C3F [Regular] +5C5C5C40 5C40 [Regular] +5C5C5C5A 5C1A [Regular] +5C5C5C5C 5C5C [Regular] +5C5C5C5F 5C5C5F [Regular] +5C5C5C61 5C61 [Regular] +5C5C5C62 5C08 [Regular] +5C5C5C6E 5C0A [Regular] +5C5C5C72 5C0D [Regular] +5C5C5C74 5C09 [Regular] +5C5C5C7E 5C7E [Regular] +5C5C5C7F 5C7F [Regular] +5C5C5C80 5C80 [Regular][ILSEQ] +5C5C5C81 5C81 [Regular][ILSEQ] +5C5C5C9F 5C9F [Regular][ILSEQ] +5C5C5CA0 5CA0 [Regular][ILSEQ] +5C5C5CA1 5CA1 [Regular][ILSEQ] +5C5C5CE0 5CE0 [Regular][ILSEQ] +5C5C5CEF 5CEF [Regular][ILSEQ] +5C5C5CF9 5CF9 [Regular][ILSEQ] +5C5C5CFA 5CFA [Regular][ILSEQ] +5C5C5CFC 5CFC [Regular][ILSEQ] +5C5C5CFD 5CFD [Regular][ILSEQ] +5C5C5CFE 5CFE [Regular][ILSEQ] +5C5C5CFF 5CFF [Regular][ILSEQ] +5C5F5C00 5C5F00 [Regular] +5C5F5C08 5C5F08 [Regular] +5C5F5C09 5C5F09 [Regular] +5C5F5C0A 5C5F0A [Regular] +5C5F5C0D 5C5F0D [Regular] +5C5F5C1A 5C5F1A [Regular] +5C5F5C22 5C5F22 [Regular] +5C5F5C25 5C5F5C25 [Preserve][LIKE] +5C5F5C27 5C5F27 [Regular] +5C5F5C30 5C5F00 [Regular] +5C5F5C3F 5C5F3F [Regular] +5C5F5C40 5C5F40 [Regular] +5C5F5C5A 5C5F1A [Regular] +5C5F5C5C 5C5F5C [Regular] +5C5F5C5F 5C5F5C5F [Preserve][LIKE] +5C5F5C61 5C5F61 [Regular] +5C5F5C62 5C5F08 [Regular] +5C5F5C6E 5C5F0A [Regular] +5C5F5C72 5C5F0D [Regular] +5C5F5C74 5C5F09 [Regular] +5C5F5C7E 5C5F7E [Regular] +5C5F5C7F 5C5F7F [Regular] +5C5F5C80 5C5F80 [Regular][ILSEQ] +5C5F5C81 5C5F81 [Regular][ILSEQ] +5C5F5C9F 5C5F9F [Regular][ILSEQ] +5C5F5CA0 5C5FA0 [Regular][ILSEQ] +5C5F5CA1 5C5FA1 [Regular][ILSEQ] +5C5F5CE0 5C5FE0 [Regular][ILSEQ] +5C5F5CEF 5C5FEF [Regular][ILSEQ] +5C5F5CF9 5C5FF9 [Regular][ILSEQ] +5C5F5CFA 5C5FFA [Regular][ILSEQ] +5C5F5CFC 5C5FFC [Regular][ILSEQ] +5C5F5CFD 5C5FFD [Regular][ILSEQ] +5C5F5CFE 5C5FFE [Regular][ILSEQ] +5C5F5CFF 5C5FFF [Regular][ILSEQ] +5C615C00 6100 [Trivial] +5C615C08 6108 [Trivial] +5C615C09 6109 [Trivial] +5C615C0A 610A [Trivial] +5C615C0D 610D [Trivial] +5C615C1A 611A [Trivial] +5C615C22 6122 [Trivial] +5C615C25 615C25 [Regular] +5C615C27 6127 [Trivial] +5C615C30 6100 [Regular] +5C615C3F 613F [Trivial] +5C615C40 6140 [Trivial] +5C615C5A 611A [Regular] +5C615C5C 615C [Regular] +5C615C5F 615C5F [Regular] +5C615C61 6161 [Trivial] +5C615C62 6108 [Regular] +5C615C6E 610A [Regular] +5C615C72 610D [Regular] +5C615C74 6109 [Regular] +5C615C7E 617E [Trivial] +5C615C7F 617F [Trivial] +5C615C80 6180 [Trivial][ILSEQ] +5C615C81 6181 [Trivial][ILSEQ] +5C615C9F 619F [Trivial][ILSEQ] +5C615CA0 61A0 [Trivial][ILSEQ] +5C615CA1 61A1 [Trivial][ILSEQ] +5C615CE0 61E0 [Trivial][ILSEQ] +5C615CEF 61EF [Trivial][ILSEQ] +5C615CF9 61F9 [Trivial][ILSEQ] +5C615CFA 61FA [Trivial][ILSEQ] +5C615CFC 61FC [Trivial][ILSEQ] +5C615CFD 61FD [Trivial][ILSEQ] +5C615CFE 61FE [Trivial][ILSEQ] +5C615CFF 61FF [Trivial][ILSEQ] +5C625C00 0800 [Regular] +5C625C08 0808 [Regular] +5C625C09 0809 [Regular] +5C625C0A 080A [Regular] +5C625C0D 080D [Regular] +5C625C1A 081A [Regular] +5C625C22 0822 [Regular] +5C625C25 085C25 [Regular] +5C625C27 0827 [Regular] +5C625C30 0800 [Regular] +5C625C3F 083F [Regular] +5C625C40 0840 [Regular] +5C625C5A 081A [Regular] +5C625C5C 085C [Regular] +5C625C5F 085C5F [Regular] +5C625C61 0861 [Regular] +5C625C62 0808 [Regular] +5C625C6E 080A [Regular] +5C625C72 080D [Regular] +5C625C74 0809 [Regular] +5C625C7E 087E [Regular] +5C625C7F 087F [Regular] +5C625C80 0880 [Regular][ILSEQ] +5C625C81 0881 [Regular][ILSEQ] +5C625C9F 089F [Regular][ILSEQ] +5C625CA0 08A0 [Regular][ILSEQ] +5C625CA1 08A1 [Regular][ILSEQ] +5C625CE0 08E0 [Regular][ILSEQ] +5C625CEF 08EF [Regular][ILSEQ] +5C625CF9 08F9 [Regular][ILSEQ] +5C625CFA 08FA [Regular][ILSEQ] +5C625CFC 08FC [Regular][ILSEQ] +5C625CFD 08FD [Regular][ILSEQ] +5C625CFE 08FE [Regular][ILSEQ] +5C625CFF 08FF [Regular][ILSEQ] +5C6E5C00 0A00 [Regular] +5C6E5C08 0A08 [Regular] +5C6E5C09 0A09 [Regular] +5C6E5C0A 0A0A [Regular] +5C6E5C0D 0A0D [Regular] +5C6E5C1A 0A1A [Regular] +5C6E5C22 0A22 [Regular] +5C6E5C25 0A5C25 [Regular] +5C6E5C27 0A27 [Regular] +5C6E5C30 0A00 [Regular] +5C6E5C3F 0A3F [Regular] +5C6E5C40 0A40 [Regular] +5C6E5C5A 0A1A [Regular] +5C6E5C5C 0A5C [Regular] +5C6E5C5F 0A5C5F [Regular] +5C6E5C61 0A61 [Regular] +5C6E5C62 0A08 [Regular] +5C6E5C6E 0A0A [Regular] +5C6E5C72 0A0D [Regular] +5C6E5C74 0A09 [Regular] +5C6E5C7E 0A7E [Regular] +5C6E5C7F 0A7F [Regular] +5C6E5C80 0A80 [Regular][ILSEQ] +5C6E5C81 0A81 [Regular][ILSEQ] +5C6E5C9F 0A9F [Regular][ILSEQ] +5C6E5CA0 0AA0 [Regular][ILSEQ] +5C6E5CA1 0AA1 [Regular][ILSEQ] +5C6E5CE0 0AE0 [Regular][ILSEQ] +5C6E5CEF 0AEF [Regular][ILSEQ] +5C6E5CF9 0AF9 [Regular][ILSEQ] +5C6E5CFA 0AFA [Regular][ILSEQ] +5C6E5CFC 0AFC [Regular][ILSEQ] +5C6E5CFD 0AFD [Regular][ILSEQ] +5C6E5CFE 0AFE [Regular][ILSEQ] +5C6E5CFF 0AFF [Regular][ILSEQ] +5C725C00 0D00 [Regular] +5C725C08 0D08 [Regular] +5C725C09 0D09 [Regular] +5C725C0A 0D0A [Regular] +5C725C0D 0D0D [Regular] +5C725C1A 0D1A [Regular] +5C725C22 0D22 [Regular] +5C725C25 0D5C25 [Regular] +5C725C27 0D27 [Regular] +5C725C30 0D00 [Regular] +5C725C3F 0D3F [Regular] +5C725C40 0D40 [Regular] +5C725C5A 0D1A [Regular] +5C725C5C 0D5C [Regular] +5C725C5F 0D5C5F [Regular] +5C725C61 0D61 [Regular] +5C725C62 0D08 [Regular] +5C725C6E 0D0A [Regular] +5C725C72 0D0D [Regular] +5C725C74 0D09 [Regular] +5C725C7E 0D7E [Regular] +5C725C7F 0D7F [Regular] +5C725C80 0D80 [Regular][ILSEQ] +5C725C81 0D81 [Regular][ILSEQ] +5C725C9F 0D9F [Regular][ILSEQ] +5C725CA0 0DA0 [Regular][ILSEQ] +5C725CA1 0DA1 [Regular][ILSEQ] +5C725CE0 0DE0 [Regular][ILSEQ] +5C725CEF 0DEF [Regular][ILSEQ] +5C725CF9 0DF9 [Regular][ILSEQ] +5C725CFA 0DFA [Regular][ILSEQ] +5C725CFC 0DFC [Regular][ILSEQ] +5C725CFD 0DFD [Regular][ILSEQ] +5C725CFE 0DFE [Regular][ILSEQ] +5C725CFF 0DFF [Regular][ILSEQ] +5C745C00 0900 [Regular] +5C745C08 0908 [Regular] +5C745C09 0909 [Regular] +5C745C0A 090A [Regular] +5C745C0D 090D [Regular] +5C745C1A 091A [Regular] +5C745C22 0922 [Regular] +5C745C25 095C25 [Regular] +5C745C27 0927 [Regular] +5C745C30 0900 [Regular] +5C745C3F 093F [Regular] +5C745C40 0940 [Regular] +5C745C5A 091A [Regular] +5C745C5C 095C [Regular] +5C745C5F 095C5F [Regular] +5C745C61 0961 [Regular] +5C745C62 0908 [Regular] +5C745C6E 090A [Regular] +5C745C72 090D [Regular] +5C745C74 0909 [Regular] +5C745C7E 097E [Regular] +5C745C7F 097F [Regular] +5C745C80 0980 [Regular][ILSEQ] +5C745C81 0981 [Regular][ILSEQ] +5C745C9F 099F [Regular][ILSEQ] +5C745CA0 09A0 [Regular][ILSEQ] +5C745CA1 09A1 [Regular][ILSEQ] +5C745CE0 09E0 [Regular][ILSEQ] +5C745CEF 09EF [Regular][ILSEQ] +5C745CF9 09F9 [Regular][ILSEQ] +5C745CFA 09FA [Regular][ILSEQ] +5C745CFC 09FC [Regular][ILSEQ] +5C745CFD 09FD [Regular][ILSEQ] +5C745CFE 09FE [Regular][ILSEQ] +5C745CFF 09FF [Regular][ILSEQ] +5C7E5C00 7E00 [Trivial] +5C7E5C08 7E08 [Trivial] +5C7E5C09 7E09 [Trivial] +5C7E5C0A 7E0A [Trivial] +5C7E5C0D 7E0D [Trivial] +5C7E5C1A 7E1A [Trivial] +5C7E5C22 7E22 [Trivial] +5C7E5C25 7E5C25 [Regular] +5C7E5C27 7E27 [Trivial] +5C7E5C30 7E00 [Regular] +5C7E5C3F 7E3F [Trivial] +5C7E5C40 7E40 [Trivial] +5C7E5C5A 7E1A [Regular] +5C7E5C5C 7E5C [Regular] +5C7E5C5F 7E5C5F [Regular] +5C7E5C61 7E61 [Trivial] +5C7E5C62 7E08 [Regular] +5C7E5C6E 7E0A [Regular] +5C7E5C72 7E0D [Regular] +5C7E5C74 7E09 [Regular] +5C7E5C7E 7E7E [Trivial] +5C7E5C7F 7E7F [Trivial] +5C7E5C80 7E80 [Trivial][ILSEQ] +5C7E5C81 7E81 [Trivial][ILSEQ] +5C7E5C9F 7E9F [Trivial][ILSEQ] +5C7E5CA0 7EA0 [Trivial][ILSEQ] +5C7E5CA1 7EA1 [Trivial][ILSEQ] +5C7E5CE0 7EE0 [Trivial][ILSEQ] +5C7E5CEF 7EEF [Trivial][ILSEQ] +5C7E5CF9 7EF9 [Trivial][ILSEQ] +5C7E5CFA 7EFA [Trivial][ILSEQ] +5C7E5CFC 7EFC [Trivial][ILSEQ] +5C7E5CFD 7EFD [Trivial][ILSEQ] +5C7E5CFE 7EFE [Trivial][ILSEQ] +5C7E5CFF 7EFF [Trivial][ILSEQ] +5C7F5C00 7F00 [Trivial] +5C7F5C08 7F08 [Trivial] +5C7F5C09 7F09 [Trivial] +5C7F5C0A 7F0A [Trivial] +5C7F5C0D 7F0D [Trivial] +5C7F5C1A 7F1A [Trivial] +5C7F5C22 7F22 [Trivial] +5C7F5C25 7F5C25 [Regular] +5C7F5C27 7F27 [Trivial] +5C7F5C30 7F00 [Regular] +5C7F5C3F 7F3F [Trivial] +5C7F5C40 7F40 [Trivial] +5C7F5C5A 7F1A [Regular] +5C7F5C5C 7F5C [Regular] +5C7F5C5F 7F5C5F [Regular] +5C7F5C61 7F61 [Trivial] +5C7F5C62 7F08 [Regular] +5C7F5C6E 7F0A [Regular] +5C7F5C72 7F0D [Regular] +5C7F5C74 7F09 [Regular] +5C7F5C7E 7F7E [Trivial] +5C7F5C7F 7F7F [Trivial] +5C7F5C80 7F80 [Trivial][ILSEQ] +5C7F5C81 7F81 [Trivial][ILSEQ] +5C7F5C9F 7F9F [Trivial][ILSEQ] +5C7F5CA0 7FA0 [Trivial][ILSEQ] +5C7F5CA1 7FA1 [Trivial][ILSEQ] +5C7F5CE0 7FE0 [Trivial][ILSEQ] +5C7F5CEF 7FEF [Trivial][ILSEQ] +5C7F5CF9 7FF9 [Trivial][ILSEQ] +5C7F5CFA 7FFA [Trivial][ILSEQ] +5C7F5CFC 7FFC [Trivial][ILSEQ] +5C7F5CFD 7FFD [Trivial][ILSEQ] +5C7F5CFE 7FFE [Trivial][ILSEQ] +5C7F5CFF 7FFF [Trivial][ILSEQ] +5C805C00 8000 [Trivial][ILSEQ] +5C805C08 8008 [Trivial][ILSEQ] +5C805C09 8009 [Trivial][ILSEQ] +5C805C0A 800A [Trivial][ILSEQ] +5C805C0D 800D [Trivial][ILSEQ] +5C805C1A 801A [Trivial][ILSEQ] +5C805C22 8022 [Trivial][ILSEQ] +5C805C25 805C25 [Regular][ILSEQ] +5C805C27 8027 [Trivial][ILSEQ] +5C805C30 8000 [Regular][ILSEQ] +5C805C3F 803F [Trivial][ILSEQ] +5C805C40 8040 [Trivial][ILSEQ] +5C805C5A 801A [Regular][ILSEQ] +5C805C5C 805C [Regular][ILSEQ] +5C805C5F 805C5F [Regular][ILSEQ] +5C805C61 8061 [Trivial][ILSEQ] +5C805C62 8008 [Regular][ILSEQ] +5C805C6E 800A [Regular][ILSEQ] +5C805C72 800D [Regular][ILSEQ] +5C805C74 8009 [Regular][ILSEQ] +5C805C7E 807E [Trivial][ILSEQ] +5C805C7F 807F [Trivial][ILSEQ] +5C805C80 8080 [Trivial][ILSEQ] +5C805C81 8081 [Trivial][ILSEQ] +5C805C9F 809F [Trivial][ILSEQ] +5C805CA0 80A0 [Trivial][ILSEQ] +5C805CA1 80A1 [Trivial][ILSEQ] +5C805CE0 80E0 [Trivial][ILSEQ] +5C805CEF 80EF [Trivial][ILSEQ] +5C805CF9 80F9 [Trivial][ILSEQ] +5C805CFA 80FA [Trivial][ILSEQ] +5C805CFC 80FC [Trivial][ILSEQ] +5C805CFD 80FD [Trivial][ILSEQ] +5C805CFE 80FE [Trivial][ILSEQ] +5C805CFF 80FF [Trivial][ILSEQ] +5C815C00 8100 [Trivial][ILSEQ] +5C815C08 8108 [Trivial][ILSEQ] +5C815C09 8109 [Trivial][ILSEQ] +5C815C0A 810A [Trivial][ILSEQ] +5C815C0D 810D [Trivial][ILSEQ] +5C815C1A 811A [Trivial][ILSEQ] +5C815C22 8122 [Trivial][ILSEQ] +5C815C25 815C25 [Regular][ILSEQ] +5C815C27 8127 [Trivial][ILSEQ] +5C815C30 8100 [Regular][ILSEQ] +5C815C3F 813F [Trivial][ILSEQ] +5C815C40 8140 [Trivial][ILSEQ] +5C815C5A 811A [Regular][ILSEQ] +5C815C5C 815C [Regular][ILSEQ] +5C815C5F 815C5F [Regular][ILSEQ] +5C815C61 8161 [Trivial][ILSEQ] +5C815C62 8108 [Regular][ILSEQ] +5C815C6E 810A [Regular][ILSEQ] +5C815C72 810D [Regular][ILSEQ] +5C815C74 8109 [Regular][ILSEQ] +5C815C7E 817E [Trivial][ILSEQ] +5C815C7F 817F [Trivial][ILSEQ] +5C815C80 8180 [Trivial][ILSEQ] +5C815C81 8181 [Trivial][ILSEQ] +5C815C9F 819F [Trivial][ILSEQ] +5C815CA0 81A0 [Trivial][ILSEQ] +5C815CA1 81A1 [Trivial][ILSEQ] +5C815CE0 81E0 [Trivial][ILSEQ] +5C815CEF 81EF [Trivial][ILSEQ] +5C815CF9 81F9 [Trivial][ILSEQ] +5C815CFA 81FA [Trivial][ILSEQ] +5C815CFC 81FC [Trivial][ILSEQ] +5C815CFD 81FD [Trivial][ILSEQ] +5C815CFE 81FE [Trivial][ILSEQ] +5C815CFF 81FF [Trivial][ILSEQ] +5C9F5C00 9F00 [Trivial][ILSEQ] +5C9F5C08 9F08 [Trivial][ILSEQ] +5C9F5C09 9F09 [Trivial][ILSEQ] +5C9F5C0A 9F0A [Trivial][ILSEQ] +5C9F5C0D 9F0D [Trivial][ILSEQ] +5C9F5C1A 9F1A [Trivial][ILSEQ] +5C9F5C22 9F22 [Trivial][ILSEQ] +5C9F5C25 9F5C25 [Regular][ILSEQ] +5C9F5C27 9F27 [Trivial][ILSEQ] +5C9F5C30 9F00 [Regular][ILSEQ] +5C9F5C3F 9F3F [Trivial][ILSEQ] +5C9F5C40 9F40 [Trivial][ILSEQ] +5C9F5C5A 9F1A [Regular][ILSEQ] +5C9F5C5C 9F5C [Regular][ILSEQ] +5C9F5C5F 9F5C5F [Regular][ILSEQ] +5C9F5C61 9F61 [Trivial][ILSEQ] +5C9F5C62 9F08 [Regular][ILSEQ] +5C9F5C6E 9F0A [Regular][ILSEQ] +5C9F5C72 9F0D [Regular][ILSEQ] +5C9F5C74 9F09 [Regular][ILSEQ] +5C9F5C7E 9F7E [Trivial][ILSEQ] +5C9F5C7F 9F7F [Trivial][ILSEQ] +5C9F5C80 9F80 [Trivial][ILSEQ] +5C9F5C81 9F81 [Trivial][ILSEQ] +5C9F5C9F 9F9F [Trivial][ILSEQ] +5C9F5CA0 9FA0 [Trivial][ILSEQ] +5C9F5CA1 9FA1 [Trivial][ILSEQ] +5C9F5CE0 9FE0 [Trivial][ILSEQ] +5C9F5CEF 9FEF [Trivial][ILSEQ] +5C9F5CF9 9FF9 [Trivial][ILSEQ] +5C9F5CFA 9FFA [Trivial][ILSEQ] +5C9F5CFC 9FFC [Trivial][ILSEQ] +5C9F5CFD 9FFD [Trivial][ILSEQ] +5C9F5CFE 9FFE [Trivial][ILSEQ] +5C9F5CFF 9FFF [Trivial][ILSEQ] +5CA05C00 A000 [Trivial][ILSEQ] +5CA05C08 A008 [Trivial][ILSEQ] +5CA05C09 A009 [Trivial][ILSEQ] +5CA05C0A A00A [Trivial][ILSEQ] +5CA05C0D A00D [Trivial][ILSEQ] +5CA05C1A A01A [Trivial][ILSEQ] +5CA05C22 A022 [Trivial][ILSEQ] +5CA05C25 A05C25 [Regular][ILSEQ] +5CA05C27 A027 [Trivial][ILSEQ] +5CA05C30 A000 [Regular][ILSEQ] +5CA05C3F A03F [Trivial][ILSEQ] +5CA05C40 A040 [Trivial][ILSEQ] +5CA05C5A A01A [Regular][ILSEQ] +5CA05C5C A05C [Regular][ILSEQ] +5CA05C5F A05C5F [Regular][ILSEQ] +5CA05C61 A061 [Trivial][ILSEQ] +5CA05C62 A008 [Regular][ILSEQ] +5CA05C6E A00A [Regular][ILSEQ] +5CA05C72 A00D [Regular][ILSEQ] +5CA05C74 A009 [Regular][ILSEQ] +5CA05C7E A07E [Trivial][ILSEQ] +5CA05C7F A07F [Trivial][ILSEQ] +5CA05C80 A080 [Trivial][ILSEQ] +5CA05C81 A081 [Trivial][ILSEQ] +5CA05C9F A09F [Trivial][ILSEQ] +5CA05CA0 A0A0 [Trivial][ILSEQ] +5CA05CA1 A0A1 [Trivial][ILSEQ] +5CA05CE0 A0E0 [Trivial][ILSEQ] +5CA05CEF A0EF [Trivial][ILSEQ] +5CA05CF9 A0F9 [Trivial][ILSEQ] +5CA05CFA A0FA [Trivial][ILSEQ] +5CA05CFC A0FC [Trivial][ILSEQ] +5CA05CFD A0FD [Trivial][ILSEQ] +5CA05CFE A0FE [Trivial][ILSEQ] +5CA05CFF A0FF [Trivial][ILSEQ] +5CA15C00 A100 [Trivial][BROKE] +5CA15C08 A108 [Trivial][BROKE] +5CA15C09 A109 [Trivial][BROKE] +5CA15C0A A10A [Trivial][BROKE] +5CA15C0D A10D [Trivial][BROKE] +5CA15C1A A11A [Trivial][BROKE] +5CA15C22 A122 [Trivial][BROKE] +5CA15C25 A15C25 [Regular] +5CA15C27 A127 [Trivial][BROKE] +5CA15C30 A100 [Regular][BROKE] +5CA15C3F A13F [Trivial][BROKE] +5CA15C40 A140 [Trivial][USER] +5CA15C5A A11A [Regular][BROKE] +5CA15C5C A15C [Regular][USER] +5CA15C5F A15C5F [Regular] +5CA15C61 A161 [Trivial][USER] +5CA15C62 A108 [Regular][BROKE][USER] +5CA15C6E A10A [Regular][BROKE] +5CA15C72 A10D [Regular][BROKE] +5CA15C74 A109 [Regular][BROKE] +5CA15C7E A17E [Trivial][USER] +5CA15C7F A17F [Trivial][BROKE] +5CA15C80 A180 [Trivial][ILSEQ] +5CA15C81 A181 [Trivial][ILSEQ] +5CA15C9F A19F [Trivial][ILSEQ] +5CA15CA0 A1A0 [Trivial][ILSEQ] +5CA15CA1 A1A1 [Trivial][FIXED][USER] +5CA15CE0 A1E0 [Trivial][FIXED][USER] +5CA15CEF A1EF [Trivial][FIXED][USER] +5CA15CF9 A1F9 [Trivial][FIXED][USER] +5CA15CFA A1FA [Trivial][FIXED][USER] +5CA15CFC A1FC [Trivial][FIXED][USER] +5CA15CFD A1FD [Trivial][FIXED][USER] +5CA15CFE A1FE [Trivial][FIXED][USER] +5CA15CFF A1FF [Trivial][ILSEQ] +5CE05C00 E000 [Trivial][BROKE] +5CE05C08 E008 [Trivial][BROKE] +5CE05C09 E009 [Trivial][BROKE] +5CE05C0A E00A [Trivial][BROKE] +5CE05C0D E00D [Trivial][BROKE] +5CE05C1A E01A [Trivial][BROKE] +5CE05C22 E022 [Trivial][BROKE] +5CE05C25 E05C25 [Regular] +5CE05C27 E027 [Trivial][BROKE] +5CE05C30 E000 [Regular][BROKE] +5CE05C3F E03F [Trivial][BROKE] +5CE05C40 E040 [Trivial][USER] +5CE05C5A E01A [Regular][BROKE] +5CE05C5C E05C [Regular][USER] +5CE05C5F E05C5F [Regular] +5CE05C61 E061 [Trivial][USER] +5CE05C62 E008 [Regular][BROKE][USER] +5CE05C6E E00A [Regular][BROKE] +5CE05C72 E00D [Regular][BROKE] +5CE05C74 E009 [Regular][BROKE] +5CE05C7E E07E [Trivial][USER] +5CE05C7F E07F [Trivial][BROKE] +5CE05C80 E080 [Trivial][ILSEQ] +5CE05C81 E081 [Trivial][ILSEQ] +5CE05C9F E09F [Trivial][ILSEQ] +5CE05CA0 E0A0 [Trivial][ILSEQ] +5CE05CA1 E0A1 [Trivial][FIXED][USER] +5CE05CE0 E0E0 [Trivial][FIXED][USER] +5CE05CEF E0EF [Trivial][FIXED][USER] +5CE05CF9 E0F9 [Trivial][FIXED][USER] +5CE05CFA E0FA [Trivial][FIXED][USER] +5CE05CFC E0FC [Trivial][FIXED][USER] +5CE05CFD E0FD [Trivial][FIXED][USER] +5CE05CFE E0FE [Trivial][FIXED][USER] +5CE05CFF E0FF [Trivial][ILSEQ] +5CEF5C00 EF00 [Trivial][BROKE] +5CEF5C08 EF08 [Trivial][BROKE] +5CEF5C09 EF09 [Trivial][BROKE] +5CEF5C0A EF0A [Trivial][BROKE] +5CEF5C0D EF0D [Trivial][BROKE] +5CEF5C1A EF1A [Trivial][BROKE] +5CEF5C22 EF22 [Trivial][BROKE] +5CEF5C25 EF5C25 [Regular] +5CEF5C27 EF27 [Trivial][BROKE] +5CEF5C30 EF00 [Regular][BROKE] +5CEF5C3F EF3F [Trivial][BROKE] +5CEF5C40 EF40 [Trivial][USER] +5CEF5C5A EF1A [Regular][BROKE] +5CEF5C5C EF5C [Regular][USER] +5CEF5C5F EF5C5F [Regular] +5CEF5C61 EF61 [Trivial][USER] +5CEF5C62 EF08 [Regular][BROKE][USER] +5CEF5C6E EF0A [Regular][BROKE] +5CEF5C72 EF0D [Regular][BROKE] +5CEF5C74 EF09 [Regular][BROKE] +5CEF5C7E EF7E [Trivial][USER] +5CEF5C7F EF7F [Trivial][BROKE] +5CEF5C80 EF80 [Trivial][ILSEQ] +5CEF5C81 EF81 [Trivial][ILSEQ] +5CEF5C9F EF9F [Trivial][ILSEQ] +5CEF5CA0 EFA0 [Trivial][ILSEQ] +5CEF5CA1 EFA1 [Trivial][FIXED][USER] +5CEF5CE0 EFE0 [Trivial][FIXED][USER] +5CEF5CEF EFEF [Trivial][FIXED][USER] +5CEF5CF9 EFF9 [Trivial][FIXED][USER] +5CEF5CFA EFFA [Trivial][FIXED][USER] +5CEF5CFC EFFC [Trivial][FIXED][USER] +5CEF5CFD EFFD [Trivial][FIXED][USER] +5CEF5CFE EFFE [Trivial][FIXED][USER] +5CEF5CFF EFFF [Trivial][ILSEQ] +5CF95C00 F900 [Trivial][BROKE] +5CF95C08 F908 [Trivial][BROKE] +5CF95C09 F909 [Trivial][BROKE] +5CF95C0A F90A [Trivial][BROKE] +5CF95C0D F90D [Trivial][BROKE] +5CF95C1A F91A [Trivial][BROKE] +5CF95C22 F922 [Trivial][BROKE] +5CF95C25 F95C25 [Regular] +5CF95C27 F927 [Trivial][BROKE] +5CF95C30 F900 [Regular][BROKE] +5CF95C3F F93F [Trivial][BROKE] +5CF95C40 F940 [Trivial][USER] +5CF95C5A F91A [Regular][BROKE] +5CF95C5C F95C [Regular][USER] +5CF95C5F F95C5F [Regular] +5CF95C61 F961 [Trivial][USER] +5CF95C62 F908 [Regular][BROKE][USER] +5CF95C6E F90A [Regular][BROKE] +5CF95C72 F90D [Regular][BROKE] +5CF95C74 F909 [Regular][BROKE] +5CF95C7E F97E [Trivial][USER] +5CF95C7F F97F [Trivial][BROKE] +5CF95C80 F980 [Trivial][ILSEQ] +5CF95C81 F981 [Trivial][ILSEQ] +5CF95C9F F99F [Trivial][ILSEQ] +5CF95CA0 F9A0 [Trivial][ILSEQ] +5CF95CA1 F9A1 [Trivial][FIXED][USER] +5CF95CE0 F9E0 [Trivial][FIXED][USER] +5CF95CEF F9EF [Trivial][FIXED][USER] +5CF95CF9 F9F9 [Trivial][FIXED][USER] +5CF95CFA F9FA [Trivial][FIXED][USER] +5CF95CFC F9FC [Trivial][FIXED][USER] +5CF95CFD F9FD [Trivial][FIXED][USER] +5CF95CFE F9FE [Trivial][FIXED][USER] +5CF95CFF F9FF [Trivial][ILSEQ] +5CFA5C00 FA00 [Trivial][ILSEQ] +5CFA5C08 FA08 [Trivial][ILSEQ] +5CFA5C09 FA09 [Trivial][ILSEQ] +5CFA5C0A FA0A [Trivial][ILSEQ] +5CFA5C0D FA0D [Trivial][ILSEQ] +5CFA5C1A FA1A [Trivial][ILSEQ] +5CFA5C22 FA22 [Trivial][ILSEQ] +5CFA5C25 FA5C25 [Regular][ILSEQ] +5CFA5C27 FA27 [Trivial][ILSEQ] +5CFA5C30 FA00 [Regular][ILSEQ] +5CFA5C3F FA3F [Trivial][ILSEQ] +5CFA5C40 FA40 [Trivial][ILSEQ] +5CFA5C5A FA1A [Regular][ILSEQ] +5CFA5C5C FA5C [Regular][ILSEQ] +5CFA5C5F FA5C5F [Regular][ILSEQ] +5CFA5C61 FA61 [Trivial][ILSEQ] +5CFA5C62 FA08 [Regular][ILSEQ] +5CFA5C6E FA0A [Regular][ILSEQ] +5CFA5C72 FA0D [Regular][ILSEQ] +5CFA5C74 FA09 [Regular][ILSEQ] +5CFA5C7E FA7E [Trivial][ILSEQ] +5CFA5C7F FA7F [Trivial][ILSEQ] +5CFA5C80 FA80 [Trivial][ILSEQ] +5CFA5C81 FA81 [Trivial][ILSEQ] +5CFA5C9F FA9F [Trivial][ILSEQ] +5CFA5CA0 FAA0 [Trivial][ILSEQ] +5CFA5CA1 FAA1 [Trivial][ILSEQ] +5CFA5CE0 FAE0 [Trivial][ILSEQ] +5CFA5CEF FAEF [Trivial][ILSEQ] +5CFA5CF9 FAF9 [Trivial][ILSEQ] +5CFA5CFA FAFA [Trivial][ILSEQ] +5CFA5CFC FAFC [Trivial][ILSEQ] +5CFA5CFD FAFD [Trivial][ILSEQ] +5CFA5CFE FAFE [Trivial][ILSEQ] +5CFA5CFF FAFF [Trivial][ILSEQ] +5CFC5C00 FC00 [Trivial][ILSEQ] +5CFC5C08 FC08 [Trivial][ILSEQ] +5CFC5C09 FC09 [Trivial][ILSEQ] +5CFC5C0A FC0A [Trivial][ILSEQ] +5CFC5C0D FC0D [Trivial][ILSEQ] +5CFC5C1A FC1A [Trivial][ILSEQ] +5CFC5C22 FC22 [Trivial][ILSEQ] +5CFC5C25 FC5C25 [Regular][ILSEQ] +5CFC5C27 FC27 [Trivial][ILSEQ] +5CFC5C30 FC00 [Regular][ILSEQ] +5CFC5C3F FC3F [Trivial][ILSEQ] +5CFC5C40 FC40 [Trivial][ILSEQ] +5CFC5C5A FC1A [Regular][ILSEQ] +5CFC5C5C FC5C [Regular][ILSEQ] +5CFC5C5F FC5C5F [Regular][ILSEQ] +5CFC5C61 FC61 [Trivial][ILSEQ] +5CFC5C62 FC08 [Regular][ILSEQ] +5CFC5C6E FC0A [Regular][ILSEQ] +5CFC5C72 FC0D [Regular][ILSEQ] +5CFC5C74 FC09 [Regular][ILSEQ] +5CFC5C7E FC7E [Trivial][ILSEQ] +5CFC5C7F FC7F [Trivial][ILSEQ] +5CFC5C80 FC80 [Trivial][ILSEQ] +5CFC5C81 FC81 [Trivial][ILSEQ] +5CFC5C9F FC9F [Trivial][ILSEQ] +5CFC5CA0 FCA0 [Trivial][ILSEQ] +5CFC5CA1 FCA1 [Trivial][ILSEQ] +5CFC5CE0 FCE0 [Trivial][ILSEQ] +5CFC5CEF FCEF [Trivial][ILSEQ] +5CFC5CF9 FCF9 [Trivial][ILSEQ] +5CFC5CFA FCFA [Trivial][ILSEQ] +5CFC5CFC FCFC [Trivial][ILSEQ] +5CFC5CFD FCFD [Trivial][ILSEQ] +5CFC5CFE FCFE [Trivial][ILSEQ] +5CFC5CFF FCFF [Trivial][ILSEQ] +5CFD5C00 FD00 [Trivial][ILSEQ] +5CFD5C08 FD08 [Trivial][ILSEQ] +5CFD5C09 FD09 [Trivial][ILSEQ] +5CFD5C0A FD0A [Trivial][ILSEQ] +5CFD5C0D FD0D [Trivial][ILSEQ] +5CFD5C1A FD1A [Trivial][ILSEQ] +5CFD5C22 FD22 [Trivial][ILSEQ] +5CFD5C25 FD5C25 [Regular][ILSEQ] +5CFD5C27 FD27 [Trivial][ILSEQ] +5CFD5C30 FD00 [Regular][ILSEQ] +5CFD5C3F FD3F [Trivial][ILSEQ] +5CFD5C40 FD40 [Trivial][ILSEQ] +5CFD5C5A FD1A [Regular][ILSEQ] +5CFD5C5C FD5C [Regular][ILSEQ] +5CFD5C5F FD5C5F [Regular][ILSEQ] +5CFD5C61 FD61 [Trivial][ILSEQ] +5CFD5C62 FD08 [Regular][ILSEQ] +5CFD5C6E FD0A [Regular][ILSEQ] +5CFD5C72 FD0D [Regular][ILSEQ] +5CFD5C74 FD09 [Regular][ILSEQ] +5CFD5C7E FD7E [Trivial][ILSEQ] +5CFD5C7F FD7F [Trivial][ILSEQ] +5CFD5C80 FD80 [Trivial][ILSEQ] +5CFD5C81 FD81 [Trivial][ILSEQ] +5CFD5C9F FD9F [Trivial][ILSEQ] +5CFD5CA0 FDA0 [Trivial][ILSEQ] +5CFD5CA1 FDA1 [Trivial][ILSEQ] +5CFD5CE0 FDE0 [Trivial][ILSEQ] +5CFD5CEF FDEF [Trivial][ILSEQ] +5CFD5CF9 FDF9 [Trivial][ILSEQ] +5CFD5CFA FDFA [Trivial][ILSEQ] +5CFD5CFC FDFC [Trivial][ILSEQ] +5CFD5CFD FDFD [Trivial][ILSEQ] +5CFD5CFE FDFE [Trivial][ILSEQ] +5CFD5CFF FDFF [Trivial][ILSEQ] +5CFE5C00 FE00 [Trivial][ILSEQ] +5CFE5C08 FE08 [Trivial][ILSEQ] +5CFE5C09 FE09 [Trivial][ILSEQ] +5CFE5C0A FE0A [Trivial][ILSEQ] +5CFE5C0D FE0D [Trivial][ILSEQ] +5CFE5C1A FE1A [Trivial][ILSEQ] +5CFE5C22 FE22 [Trivial][ILSEQ] +5CFE5C25 FE5C25 [Regular][ILSEQ] +5CFE5C27 FE27 [Trivial][ILSEQ] +5CFE5C30 FE00 [Regular][ILSEQ] +5CFE5C3F FE3F [Trivial][ILSEQ] +5CFE5C40 FE40 [Trivial][ILSEQ] +5CFE5C5A FE1A [Regular][ILSEQ] +5CFE5C5C FE5C [Regular][ILSEQ] +5CFE5C5F FE5C5F [Regular][ILSEQ] +5CFE5C61 FE61 [Trivial][ILSEQ] +5CFE5C62 FE08 [Regular][ILSEQ] +5CFE5C6E FE0A [Regular][ILSEQ] +5CFE5C72 FE0D [Regular][ILSEQ] +5CFE5C74 FE09 [Regular][ILSEQ] +5CFE5C7E FE7E [Trivial][ILSEQ] +5CFE5C7F FE7F [Trivial][ILSEQ] +5CFE5C80 FE80 [Trivial][ILSEQ] +5CFE5C81 FE81 [Trivial][ILSEQ] +5CFE5C9F FE9F [Trivial][ILSEQ] +5CFE5CA0 FEA0 [Trivial][ILSEQ] +5CFE5CA1 FEA1 [Trivial][ILSEQ] +5CFE5CE0 FEE0 [Trivial][ILSEQ] +5CFE5CEF FEEF [Trivial][ILSEQ] +5CFE5CF9 FEF9 [Trivial][ILSEQ] +5CFE5CFA FEFA [Trivial][ILSEQ] +5CFE5CFC FEFC [Trivial][ILSEQ] +5CFE5CFD FEFD [Trivial][ILSEQ] +5CFE5CFE FEFE [Trivial][ILSEQ] +5CFE5CFF FEFF [Trivial][ILSEQ] +5CFF5C00 FF00 [Trivial][ILSEQ] +5CFF5C08 FF08 [Trivial][ILSEQ] +5CFF5C09 FF09 [Trivial][ILSEQ] +5CFF5C0A FF0A [Trivial][ILSEQ] +5CFF5C0D FF0D [Trivial][ILSEQ] +5CFF5C1A FF1A [Trivial][ILSEQ] +5CFF5C22 FF22 [Trivial][ILSEQ] +5CFF5C25 FF5C25 [Regular][ILSEQ] +5CFF5C27 FF27 [Trivial][ILSEQ] +5CFF5C30 FF00 [Regular][ILSEQ] +5CFF5C3F FF3F [Trivial][ILSEQ] +5CFF5C40 FF40 [Trivial][ILSEQ] +5CFF5C5A FF1A [Regular][ILSEQ] +5CFF5C5C FF5C [Regular][ILSEQ] +5CFF5C5F FF5C5F [Regular][ILSEQ] +5CFF5C61 FF61 [Trivial][ILSEQ] +5CFF5C62 FF08 [Regular][ILSEQ] +5CFF5C6E FF0A [Regular][ILSEQ] +5CFF5C72 FF0D [Regular][ILSEQ] +5CFF5C74 FF09 [Regular][ILSEQ] +5CFF5C7E FF7E [Trivial][ILSEQ] +5CFF5C7F FF7F [Trivial][ILSEQ] +5CFF5C80 FF80 [Trivial][ILSEQ] +5CFF5C81 FF81 [Trivial][ILSEQ] +5CFF5C9F FF9F [Trivial][ILSEQ] +5CFF5CA0 FFA0 [Trivial][ILSEQ] +5CFF5CA1 FFA1 [Trivial][ILSEQ] +5CFF5CE0 FFE0 [Trivial][ILSEQ] +5CFF5CEF FFEF [Trivial][ILSEQ] +5CFF5CF9 FFF9 [Trivial][ILSEQ] +5CFF5CFA FFFA [Trivial][ILSEQ] +5CFF5CFC FFFC [Trivial][ILSEQ] +5CFF5CFD FFFD [Trivial][ILSEQ] +5CFF5CFE FFFE [Trivial][ILSEQ] +5CFF5CFF FFFF [Trivial][ILSEQ] +DROP TABLE t1; +DROP PROCEDURE p1; +DROP PROCEDURE p2; +DROP FUNCTION unescape; +DROP FUNCTION unescape_type; +DROP FUNCTION wellformedness; +DROP FUNCTION mysql_real_escape_string_generated; +DROP FUNCTION iswellformed; +DROP TABLE allbytes; +# End of ctype_backslash.inc +SET NAMES big5; +# Start of ctype_E05C.inc +SELECT HEX('à\'),HEX('à\t'); +HEX('à\') HEX('à\t') +E05C E05C74 +SELECT HEX('\\à\'),HEX('\\à\t'),HEX('\\à\t\t'); +HEX('\\à\') HEX('\\à\t') HEX('\\à\t\t') +5CE05C 5CE05C74 5CE05C7409 +SELECT HEX('''à\'),HEX('à\'''); +HEX('''à\') HEX('à\''') +27E05C E05C27 +SELECT HEX('\\''à\'),HEX('à\''\\'); +HEX('\\''à\') HEX('à\''\\') +5C27E05C E05C275C +SELECT HEX(BINARY('à\')),HEX(BINARY('à\t')); +HEX(BINARY('à\')) HEX(BINARY('à\t')) +E05C E05C74 +SELECT HEX(BINARY('\\à\')),HEX(BINARY('\\à\t')),HEX(BINARY('\\à\t\t')); +HEX(BINARY('\\à\')) HEX(BINARY('\\à\t')) HEX(BINARY('\\à\t\t')) +5CE05C 5CE05C74 5CE05C7409 +SELECT HEX(BINARY('''à\')),HEX(BINARY('à\''')); +HEX(BINARY('''à\')) HEX(BINARY('à\''')) +27E05C E05C27 +SELECT HEX(BINARY('\\''à\')),HEX(BINARY('à\''\\')); +HEX(BINARY('\\''à\')) HEX(BINARY('à\''\\')) +5C27E05C E05C275C +SELECT HEX(_BINARY'à\'),HEX(_BINARY'à\t'); +HEX(_BINARY'à\') HEX(_BINARY'à\t') +E05C E05C74 +SELECT HEX(_BINARY'\\à\'),HEX(_BINARY'\\à\t'),HEX(_BINARY'\\à\t\t'); +HEX(_BINARY'\\à\') HEX(_BINARY'\\à\t') HEX(_BINARY'\\à\t\t') +5CE05C 5CE05C74 5CE05C7409 +SELECT HEX(_BINARY'''à\'),HEX(_BINARY'à\'''); +HEX(_BINARY'''à\') HEX(_BINARY'à\''') +27E05C E05C27 +SELECT HEX(_BINARY'\\''à\'),HEX(_BINARY'à\''\\'); +HEX(_BINARY'\\''à\') HEX(_BINARY'à\''\\') +5C27E05C E05C275C +CREATE TABLE t1 AS SELECT REPEAT(' ',10) AS a LIMIT 0; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` varchar(10) CHARACTER SET big5 NOT NULL DEFAULT '' +) ENGINE=MyISAM DEFAULT CHARSET=latin1 +INSERT INTO t1 VALUES ('à\'),('à\t'); +INSERT INTO t1 VALUES ('\\à\'),('\\à\t'),('\\à\t\t'); +INSERT INTO t1 VALUES ('''à\'),('à\'''); +INSERT INTO t1 VALUES ('\\''à\'),('à\''\\'); +SELECT a, HEX(a) FROM t1; +a HEX(a) +à\ E05C +à\t E05C74 +\à\ 5CE05C +\à\t 5CE05C74 +\à\t 5CE05C7409 +'à\ 27E05C +à\' E05C27 +\'à\ 5C27E05C +à\'\ E05C275C +DROP TABLE t1; +CREATE TABLE t1 (a BLOB); +INSERT INTO t1 VALUES ('à\'),('à\t'); +INSERT INTO t1 VALUES ('\\à\'),('\\à\t'),('\\à\t\t'); +INSERT INTO t1 VALUES ('''à\'),('à\'''); +INSERT INTO t1 VALUES ('\\''à\'),('à\''\\'); +SELECT a, HEX(a) FROM t1; +a HEX(a) +à\ E05C +à\t E05C74 +\à\ 5CE05C +\à\t 5CE05C74 +\à\t 5CE05C7409 +'à\ 27E05C +à\' E05C27 +\'à\ 5C27E05C +à\'\ E05C275C +DROP TABLE t1; +CREATE TABLE t1 AS SELECT REPEAT(' ', 10) AS a LIMIT 0; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` varchar(10) CHARACTER SET big5 NOT NULL DEFAULT '' +) ENGINE=MyISAM DEFAULT CHARSET=latin1 +INSERT INTO t1 VALUES (BINARY('à\')),(BINARY('à\t')); +INSERT INTO t1 VALUES (BINARY('\\à\')),(BINARY('\\à\t')),(BINARY('\\à\t\t')); +INSERT INTO t1 VALUES (BINARY('''à\')),(BINARY('à\''')); +INSERT INTO t1 VALUES (BINARY('\\''à\')),(BINARY('à\''\\')); +SELECT a, HEX(a) FROM t1; +a HEX(a) +à\ E05C +à\t E05C74 +\à\ 5CE05C +\à\t 5CE05C74 +\à\t 5CE05C7409 +'à\ 27E05C +à\' E05C27 +\'à\ 5C27E05C +à\'\ E05C275C +DROP TABLE t1; +CREATE TABLE t1 (a BLOB); +INSERT INTO t1 VALUES (BINARY('à\')),(BINARY('à\t')); +INSERT INTO t1 VALUES (BINARY('\\à\')),(BINARY('\\à\t')),(BINARY('\\à\t\t')); +INSERT INTO t1 VALUES (BINARY('''à\')),(BINARY('à\''')); +INSERT INTO t1 VALUES (BINARY('\\''à\')),(BINARY('à\''\\')); +SELECT a, HEX(a) FROM t1; +a HEX(a) +à\ E05C +à\t E05C74 +\à\ 5CE05C +\à\t 5CE05C74 +\à\t 5CE05C7409 +'à\ 27E05C +à\' E05C27 +\'à\ 5C27E05C +à\'\ E05C275C +DROP TABLE t1; +CREATE TABLE t1 AS SELECT REPEAT(' ', 10) AS a LIMIT 0; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` varchar(10) CHARACTER SET big5 NOT NULL DEFAULT '' +) ENGINE=MyISAM DEFAULT CHARSET=latin1 +INSERT INTO t1 VALUES (_BINARY'à\'),(_BINARY'à\t'); +INSERT INTO t1 VALUES (_BINARY'\\à\'),(_BINARY'\\à\t'),(_BINARY'\\à\t\t'); +INSERT INTO t1 VALUES (_BINARY'''à\'),(_BINARY'à\'''); +INSERT INTO t1 VALUES (_BINARY'\\''à\'),(_BINARY'à\''\\'); +SELECT a, HEX(a) FROM t1; +a HEX(a) +à\ E05C +à\t E05C74 +\à\ 5CE05C +\à\t 5CE05C74 +\à\t 5CE05C7409 +'à\ 27E05C +à\' E05C27 +\'à\ 5C27E05C +à\'\ E05C275C +DROP TABLE t1; +CREATE TABLE t1 (a BLOB); +INSERT INTO t1 VALUES (_BINARY'à\'),(_BINARY'à\t'); +INSERT INTO t1 VALUES (_BINARY'\\à\'),(_BINARY'\\à\t'),(_BINARY'\\à\t\t'); +INSERT INTO t1 VALUES (_BINARY'''à\'),(_BINARY'à\'''); +INSERT INTO t1 VALUES (_BINARY'\\''à\'),(_BINARY'à\''\\'); +SELECT a, HEX(a) FROM t1; +a HEX(a) +à\ E05C +à\t E05C74 +\à\ 5CE05C +\à\t 5CE05C74 +\à\t 5CE05C7409 +'à\ 27E05C +à\' E05C27 +\'à\ 5C27E05C +à\'\ E05C275C +DROP TABLE t1; +SET character_set_client=binary, character_set_results=binary; +SELECT @@character_set_client, @@character_set_connection, @@character_set_results; +@@character_set_client @@character_set_connection @@character_set_results +binary big5 binary +SELECT HEX('à\['), HEX('\à\['); +HEX('à\[') HEX('\à\[') +E05B E05B +CREATE TABLE t1 AS SELECT REPEAT(' ', 10) AS a LIMIT 0; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` varchar(10) CHARACTER SET big5 NOT NULL DEFAULT '' +) ENGINE=MyISAM DEFAULT CHARSET=latin1 +INSERT INTO t1 VALUES ('à\['),('\à\['); +SELECT HEX(a) FROM t1; +HEX(a) +E05B +E05B +DROP TABLE t1; +SET character_set_client=@@character_set_connection, character_set_results=@@character_set_connection; +SET character_set_connection=binary; +SELECT @@character_set_client, @@character_set_connection, @@character_set_results; +@@character_set_client @@character_set_connection @@character_set_results +big5 binary big5 +SELECT HEX('à\['), HEX('\à\['); +HEX('à\[') HEX('\à\[') +E05C5B E05B +CREATE TABLE t1 AS SELECT REPEAT(' ', 10) AS a LIMIT 0; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` varbinary(10) NOT NULL DEFAULT '' +) ENGINE=MyISAM DEFAULT CHARSET=latin1 +INSERT INTO t1 VALUES ('à\['),('\à\['); +SELECT HEX(a) FROM t1; +HEX(a) +E05C5B +E05B +DROP TABLE t1; +# Start of ctype_E05C.inc +SET NAMES big5; +CREATE TABLE t1 (a ENUM('È@') CHARACTER SET big5); +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` enum('?') CHARACTER SET big5 DEFAULT NULL +) ENGINE=MyISAM DEFAULT CHARSET=latin1 +INSERT INTO t1 VALUES ('È@'); +INSERT INTO t1 VALUES (_big5 0xC840); +INSERT INTO t1 VALUES (0xC840); +SELECT HEX(a),a FROM t1; +HEX(a) a +C840 È@ +C840 È@ +C840 È@ +DROP TABLE t1; +SET NAMES binary; +CREATE TABLE t1 (a ENUM('È@') CHARACTER SET big5); +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` enum('?') CHARACTER SET big5 DEFAULT NULL +) ENGINE=MyISAM DEFAULT CHARSET=latin1 +INSERT INTO t1 VALUES ('È@'); +INSERT INTO t1 VALUES (_big5 0xC840); +INSERT INTO t1 VALUES (0xC840); +SELECT HEX(a),a FROM t1; +HEX(a) a +C840 È@ +C840 È@ +C840 È@ +DROP TABLE t1; +SET NAMES big5; +CREATE TABLE t1 ( +c1 CHAR(10) CHARACTER SET big5, +c2 VARCHAR(10) CHARACTER SET big5, +c3 TEXT CHARACTER SET big5 +); +INSERT INTO t1 VALUES ('È@','È@','È@'); +INSERT INTO t1 VALUES (_big5 0xC840,_big5 0xC840,_big5 0xC840); +INSERT INTO t1 VALUES (0xC840,0xC840,0xC840); +SELECT HEX(c1),HEX(c2),HEX(c3) FROM t1; +HEX(c1) HEX(c2) HEX(c3) +C840 C840 C840 +C840 C840 C840 +C840 C840 C840 +DROP TABLE t1; +SET NAMES binary; +CREATE TABLE t1 ( +c1 CHAR(10) CHARACTER SET big5, +c2 VARCHAR(10) CHARACTER SET big5, +c3 TEXT CHARACTER SET big5 +); +INSERT INTO t1 VALUES ('È@','È@','È@'); +INSERT INTO t1 VALUES (_big5 0xC840,_big5 0xC840,_big5 0xC840); +INSERT INTO t1 VALUES (0xC840,0xC840,0xC840); +SELECT HEX(c1),HEX(c2),HEX(c3) FROM t1; +HEX(c1) HEX(c2) HEX(c3) +C840 C840 C840 +C840 C840 C840 +C840 C840 C840 +DROP TABLE t1; +SET NAMES binary; +CREATE TABLE t1 (a VARCHAR(10) CHARACTER SET big5, KEY(a)); +INSERT INTO t1 VALUES (0xC840),(0xC841),(0xC842); +SELECT HEX(a) FROM t1 WHERE a='È@'; +HEX(a) +C840 +SELECT HEX(a) FROM t1 IGNORE KEY(a) WHERE a='È@'; +HEX(a) +C840 +DROP TABLE t1; +# +# End of 10.0 tests +# diff --git a/mysql-test/r/ctype_cp932_binlog_stm.result b/mysql-test/r/ctype_cp932_binlog_stm.result index 37631f9f4bb..0e6ae25a395 100644 --- a/mysql-test/r/ctype_cp932_binlog_stm.result +++ b/mysql-test/r/ctype_cp932_binlog_stm.result @@ -20060,3 +20060,204 @@ hex(weight_string(cast(0x814081408140 as char),25, 4, 0xC0)) # # End of 5.6 tests # +# +# Start of 10.0 tests +# +SET NAMES cp932; +# Start of ctype_E05C.inc +SELECT HEX('à\'),HEX('à\t'); +HEX('à\') HEX('à\t') +E05C E05C74 +SELECT HEX('\\à\'),HEX('\\à\t'),HEX('\\à\t\t'); +HEX('\\à\') HEX('\\à\t') HEX('\\à\t\t') +5CE05C 5CE05C74 5CE05C7409 +SELECT HEX('''à\'),HEX('à\'''); +HEX('''à\') HEX('à\''') +27E05C E05C27 +SELECT HEX('\\''à\'),HEX('à\''\\'); +HEX('\\''à\') HEX('à\''\\') +5C27E05C E05C275C +SELECT HEX(BINARY('à\')),HEX(BINARY('à\t')); +HEX(BINARY('à\')) HEX(BINARY('à\t')) +E05C E05C74 +SELECT HEX(BINARY('\\à\')),HEX(BINARY('\\à\t')),HEX(BINARY('\\à\t\t')); +HEX(BINARY('\\à\')) HEX(BINARY('\\à\t')) HEX(BINARY('\\à\t\t')) +5CE05C 5CE05C74 5CE05C7409 +SELECT HEX(BINARY('''à\')),HEX(BINARY('à\''')); +HEX(BINARY('''à\')) HEX(BINARY('à\''')) +27E05C E05C27 +SELECT HEX(BINARY('\\''à\')),HEX(BINARY('à\''\\')); +HEX(BINARY('\\''à\')) HEX(BINARY('à\''\\')) +5C27E05C E05C275C +SELECT HEX(_BINARY'à\'),HEX(_BINARY'à\t'); +HEX(_BINARY'à\') HEX(_BINARY'à\t') +E05C E05C74 +SELECT HEX(_BINARY'\\à\'),HEX(_BINARY'\\à\t'),HEX(_BINARY'\\à\t\t'); +HEX(_BINARY'\\à\') HEX(_BINARY'\\à\t') HEX(_BINARY'\\à\t\t') +5CE05C 5CE05C74 5CE05C7409 +SELECT HEX(_BINARY'''à\'),HEX(_BINARY'à\'''); +HEX(_BINARY'''à\') HEX(_BINARY'à\''') +27E05C E05C27 +SELECT HEX(_BINARY'\\''à\'),HEX(_BINARY'à\''\\'); +HEX(_BINARY'\\''à\') HEX(_BINARY'à\''\\') +5C27E05C E05C275C +CREATE TABLE t1 AS SELECT REPEAT(' ',10) AS a LIMIT 0; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` varchar(10) CHARACTER SET cp932 NOT NULL DEFAULT '' +) ENGINE=MyISAM DEFAULT CHARSET=latin1 +INSERT INTO t1 VALUES ('à\'),('à\t'); +INSERT INTO t1 VALUES ('\\à\'),('\\à\t'),('\\à\t\t'); +INSERT INTO t1 VALUES ('''à\'),('à\'''); +INSERT INTO t1 VALUES ('\\''à\'),('à\''\\'); +SELECT a, HEX(a) FROM t1; +a HEX(a) +à\ E05C +à\t E05C74 +\à\ 5CE05C +\à\t 5CE05C74 +\à\t 5CE05C7409 +'à\ 27E05C +à\' E05C27 +\'à\ 5C27E05C +à\'\ E05C275C +DROP TABLE t1; +CREATE TABLE t1 (a BLOB); +INSERT INTO t1 VALUES ('à\'),('à\t'); +INSERT INTO t1 VALUES ('\\à\'),('\\à\t'),('\\à\t\t'); +INSERT INTO t1 VALUES ('''à\'),('à\'''); +INSERT INTO t1 VALUES ('\\''à\'),('à\''\\'); +SELECT a, HEX(a) FROM t1; +a HEX(a) +à\ E05C +à\t E05C74 +\à\ 5CE05C +\à\t 5CE05C74 +\à\t 5CE05C7409 +'à\ 27E05C +à\' E05C27 +\'à\ 5C27E05C +à\'\ E05C275C +DROP TABLE t1; +CREATE TABLE t1 AS SELECT REPEAT(' ', 10) AS a LIMIT 0; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` varchar(10) CHARACTER SET cp932 NOT NULL DEFAULT '' +) ENGINE=MyISAM DEFAULT CHARSET=latin1 +INSERT INTO t1 VALUES (BINARY('à\')),(BINARY('à\t')); +INSERT INTO t1 VALUES (BINARY('\\à\')),(BINARY('\\à\t')),(BINARY('\\à\t\t')); +INSERT INTO t1 VALUES (BINARY('''à\')),(BINARY('à\''')); +INSERT INTO t1 VALUES (BINARY('\\''à\')),(BINARY('à\''\\')); +SELECT a, HEX(a) FROM t1; +a HEX(a) +à\ E05C +à\t E05C74 +\à\ 5CE05C +\à\t 5CE05C74 +\à\t 5CE05C7409 +'à\ 27E05C +à\' E05C27 +\'à\ 5C27E05C +à\'\ E05C275C +DROP TABLE t1; +CREATE TABLE t1 (a BLOB); +INSERT INTO t1 VALUES (BINARY('à\')),(BINARY('à\t')); +INSERT INTO t1 VALUES (BINARY('\\à\')),(BINARY('\\à\t')),(BINARY('\\à\t\t')); +INSERT INTO t1 VALUES (BINARY('''à\')),(BINARY('à\''')); +INSERT INTO t1 VALUES (BINARY('\\''à\')),(BINARY('à\''\\')); +SELECT a, HEX(a) FROM t1; +a HEX(a) +à\ E05C +à\t E05C74 +\à\ 5CE05C +\à\t 5CE05C74 +\à\t 5CE05C7409 +'à\ 27E05C +à\' E05C27 +\'à\ 5C27E05C +à\'\ E05C275C +DROP TABLE t1; +CREATE TABLE t1 AS SELECT REPEAT(' ', 10) AS a LIMIT 0; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` varchar(10) CHARACTER SET cp932 NOT NULL DEFAULT '' +) ENGINE=MyISAM DEFAULT CHARSET=latin1 +INSERT INTO t1 VALUES (_BINARY'à\'),(_BINARY'à\t'); +INSERT INTO t1 VALUES (_BINARY'\\à\'),(_BINARY'\\à\t'),(_BINARY'\\à\t\t'); +INSERT INTO t1 VALUES (_BINARY'''à\'),(_BINARY'à\'''); +INSERT INTO t1 VALUES (_BINARY'\\''à\'),(_BINARY'à\''\\'); +SELECT a, HEX(a) FROM t1; +a HEX(a) +à\ E05C +à\t E05C74 +\à\ 5CE05C +\à\t 5CE05C74 +\à\t 5CE05C7409 +'à\ 27E05C +à\' E05C27 +\'à\ 5C27E05C +à\'\ E05C275C +DROP TABLE t1; +CREATE TABLE t1 (a BLOB); +INSERT INTO t1 VALUES (_BINARY'à\'),(_BINARY'à\t'); +INSERT INTO t1 VALUES (_BINARY'\\à\'),(_BINARY'\\à\t'),(_BINARY'\\à\t\t'); +INSERT INTO t1 VALUES (_BINARY'''à\'),(_BINARY'à\'''); +INSERT INTO t1 VALUES (_BINARY'\\''à\'),(_BINARY'à\''\\'); +SELECT a, HEX(a) FROM t1; +a HEX(a) +à\ E05C +à\t E05C74 +\à\ 5CE05C +\à\t 5CE05C74 +\à\t 5CE05C7409 +'à\ 27E05C +à\' E05C27 +\'à\ 5C27E05C +à\'\ E05C275C +DROP TABLE t1; +SET character_set_client=binary, character_set_results=binary; +SELECT @@character_set_client, @@character_set_connection, @@character_set_results; +@@character_set_client @@character_set_connection @@character_set_results +binary cp932 binary +SELECT HEX('à\['), HEX('\à\['); +HEX('à\[') HEX('\à\[') +E05B E05B +CREATE TABLE t1 AS SELECT REPEAT(' ', 10) AS a LIMIT 0; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` varchar(10) CHARACTER SET cp932 NOT NULL DEFAULT '' +) ENGINE=MyISAM DEFAULT CHARSET=latin1 +INSERT INTO t1 VALUES ('à\['),('\à\['); +SELECT HEX(a) FROM t1; +HEX(a) +E05B +E05B +DROP TABLE t1; +SET character_set_client=@@character_set_connection, character_set_results=@@character_set_connection; +SET character_set_connection=binary; +SELECT @@character_set_client, @@character_set_connection, @@character_set_results; +@@character_set_client @@character_set_connection @@character_set_results +cp932 binary cp932 +SELECT HEX('à\['), HEX('\à\['); +HEX('à\[') HEX('\à\[') +E05C5B E05B +CREATE TABLE t1 AS SELECT REPEAT(' ', 10) AS a LIMIT 0; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` varbinary(10) NOT NULL DEFAULT '' +) ENGINE=MyISAM DEFAULT CHARSET=latin1 +INSERT INTO t1 VALUES ('à\['),('\à\['); +SELECT HEX(a) FROM t1; +HEX(a) +E05C5B +E05B +DROP TABLE t1; +# Start of ctype_E05C.inc +# +# End of 10.0 tests +# diff --git a/mysql-test/r/ctype_eucjpms.result b/mysql-test/r/ctype_eucjpms.result index 813eae74cab..a1232c115e9 100644 --- a/mysql-test/r/ctype_eucjpms.result +++ b/mysql-test/r/ctype_eucjpms.result @@ -33622,3 +33622,20 @@ hex(weight_string(cast(0x8FA2C38FA2C38FA2C3 as char),25, 4, 0xC0)) # # End of 5.6 tests # +# +# Start of 10.0 tests +# +# +# MDEV-6776 ujis and eucjmps erroneously accept 0x8EA0 as a valid byte sequence +# +CREATE TABLE t1 (a VARCHAR(10) CHARACTER SET eucjpms); +INSERT INTO t1 VALUES (0x8EA0); +SELECT HEX(a), CHAR_LENGTH(a) FROM t1; +HEX(a) CHAR_LENGTH(a) + 0 +DROP TABLE t1; +SELECT _eucjpms 0x8EA0; +ERROR HY000: Invalid eucjpms character string: '8EA0' +# +# End of 10.0 tests +# diff --git a/mysql-test/r/ctype_gb2312.result b/mysql-test/r/ctype_gb2312.result index 1ab177e72c7..5db6e2d3035 100644 --- a/mysql-test/r/ctype_gb2312.result +++ b/mysql-test/r/ctype_gb2312.result @@ -1301,3 +1301,3073 @@ A1A1A1A1A1A120202020202020202020202020202020202020 # # End of 5.6 tests # +# +# Start of 10.0 tests +# +# Start of ctype_unescape.inc +SET @query=_binary'SELECT CHARSET(\'test\'),@@character_set_client,@@character_set_connection'; +PREPARE stmt FROM @query; +EXECUTE stmt; +CHARSET('test') @@character_set_client @@character_set_connection +gb2312 gb2312 gb2312 +DEALLOCATE PREPARE stmt; +CREATE TABLE allbytes (a VARBINARY(10)); +# Using selected bytes combinations +CREATE TABLE halfs (a INT); +INSERT INTO halfs VALUES (0x00),(0x01),(0x02),(0x03),(0x04),(0x05),(0x06),(0x07); +INSERT INTO halfs VALUES (0x08),(0x09),(0x0A),(0x0B),(0x0C),(0x0D),(0x0E),(0x0F); +CREATE TEMPORARY TABLE bytes (a BINARY(1), KEY(a)) ENGINE=MyISAM; +INSERT INTO bytes SELECT CHAR((t1.a << 4) | t2.a USING BINARY) FROM halfs t1, halfs t2; +DROP TABLE halfs; +CREATE TABLE selected_bytes (a VARBINARY(10)); +INSERT INTO selected_bytes (a) VALUES ('\0'),('\b'),('\t'),('\r'),('\n'),('\Z'); +INSERT INTO selected_bytes (a) VALUES ('0'),('b'),('t'),('r'),('n'),('Z'); +INSERT INTO selected_bytes (a) VALUES ('\\'),('_'),('%'),(0x22),(0x27); +INSERT INTO selected_bytes (a) VALUES ('a'); +INSERT INTO selected_bytes (a) VALUES +(0x3F), # 7bit +(0x40), # 7bit mbtail +(0x7E), # 7bit mbtail nonascii-8bit +(0x7F), # 7bit nonascii-8bit +(0x80), # mbtail bad-mb +(0x81), # mbhead mbtail +(0x9F), # mbhead mbtail bad-mb +(0xA0), # mbhead mbtail bad-mb +(0xA1), # mbhead mbtail nonascii-8bit +(0xE0), # mbhead mbtai +(0xEF), # mbhead mbtail +(0xF9), # mbhead mbtail +(0xFA), # mbhead mbtail bad-mb +(0xFC), # mbhead mbtail bad-mb +(0xFD), # mbhead mbtail bad-mb +(0xFE), # mbhead mbtial bad-mb +(0xFF); +INSERT INTO allbytes (a) SELECT a FROM bytes; +INSERT INTO allbytes (a) SELECT CONCAT(t1.a,t2.a) FROM selected_bytes t1,selected_bytes t2; +INSERT INTO allbytes (a) SELECT CONCAT(0x5C,t1.a,t2.a) FROM selected_bytes t1,selected_bytes t2; +INSERT INTO allbytes (a) SELECT CONCAT(0x5C,t1.a,0x5C,t2.a) FROM selected_bytes t1,selected_bytes t2; +DROP TABLE selected_bytes; +DELETE FROM allbytes WHERE +OCTET_LENGTH(a)>1 AND +LOCATE(0x5C,a)=0 AND +a NOT LIKE '%\'%' AND + a NOT LIKE '%"%'; +CREATE PROCEDURE p1(val VARBINARY(10)) +BEGIN +DECLARE EXIT HANDLER FOR SQLSTATE '42000' INSERT INTO t1 (a,b) VALUES(val,NULL); +SET @query=CONCAT(_binary"INSERT INTO t1 (a,b) VALUES (0x",HEX(val),",'",val,"')"); +PREPARE stmt FROM @query; +EXECUTE stmt; +DEALLOCATE PREPARE stmt; +END// +CREATE PROCEDURE p2() +BEGIN +DECLARE val VARBINARY(10); +DECLARE done INT DEFAULT FALSE; +DECLARE stmt CURSOR FOR SELECT a FROM allbytes; +DECLARE CONTINUE HANDLER FOR NOT FOUND SET done=TRUE; +OPEN stmt; +read_loop1: LOOP +FETCH stmt INTO val; +IF done THEN +LEAVE read_loop1; +END IF; +CALL p1(val); +END LOOP; +CLOSE stmt; +END// +CREATE FUNCTION iswellformed(a VARBINARY(256)) RETURNS INT RETURN a=BINARY CONVERT(a USING gb2312);// +CREATE FUNCTION unescape(a VARBINARY(256)) RETURNS VARBINARY(256) +BEGIN +# We need to do it in a way to avoid producing new escape sequences +# First, enclose all known escsape sequences to '{{xx}}' + # - Backslash not followed by a LIKE pattern characters _ and % +# - Double escapes +# This uses PCRE Branch Reset Groups: (?|(alt1)|(alt2)|(alt3)). +# So '\\1' in the last argument always means the match, no matter +# which alternative it came from. +SET a=REGEXP_REPLACE(a,'(?|(\\\\[^_%])|(\\x{27}\\x{27}))','{{\\1}}'); +# Now unescape all enclosed standard escape sequences +SET a=REPLACE(a,'{{\\0}}', '\0'); +SET a=REPLACE(a,'{{\\b}}', '\b'); +SET a=REPLACE(a,'{{\\t}}', '\t'); +SET a=REPLACE(a,'{{\\r}}', '\r'); +SET a=REPLACE(a,'{{\\n}}', '\n'); +SET a=REPLACE(a,'{{\\Z}}', '\Z'); +SET a=REPLACE(a,'{{\\\'}}', '\''); +# Unescape double quotes +SET a=REPLACE(a,'{{\'\'}}', '\''); + # Unescape the rest: all other \x sequences mean just 'x' + SET a=REGEXP_REPLACE(a, '{{\\\\(.|\\R)}}', '\\1'); + RETURN a; +END// +CREATE FUNCTION unescape_type(a VARBINARY(256),b VARBINARY(256)) RETURNS VARBINARY(256) +BEGIN +RETURN CASE +WHEN b IS NULL THEN '[SyntErr]' + WHEN a=b THEN CASE +WHEN OCTET_LENGTH(a)=1 THEN '[Preserve]' + WHEN a RLIKE '\\\\[_%]' THEN '[Preserve][LIKE]' + WHEN a RLIKE '^[[:ascii:]]+$' THEN '[Preserve][ASCII]' + ELSE '[Preserv][MB]' END +WHEN REPLACE(a,0x5C,'')=b THEN '[Trivial]' + WHEN UNESCAPE(a)=b THEN '[Regular]' + ELSE '[Special]' END; +END// +CREATE FUNCTION wellformedness(a VARBINARY(256), b VARBINARY(256)) +RETURNS VARBINARY(256) +BEGIN +RETURN CASE +WHEN b IS NULL THEN '' + WHEN NOT iswellformed(a) AND iswellformed(b) THEN '[FIXED]' + WHEN iswellformed(a) AND NOT iswellformed(b) THEN '[BROKE]' + WHEN NOT iswellformed(a) AND NOT iswellformed(b) THEN '[ILSEQ]' + ELSE '' + END; +END// +CREATE FUNCTION mysql_real_escape_string_generated(a VARBINARY(256)) +RETURNS VARBINARY(256) +BEGIN +DECLARE a1 BINARY(1) DEFAULT SUBSTR(a,1,1); +DECLARE a2 BINARY(1) DEFAULT SUBSTR(a,2,1); +DECLARE a3 BINARY(1) DEFAULT SUBSTR(a,3,1); +DECLARE a4 BINARY(1) DEFAULT SUBSTR(a,4,1); +DECLARE a2a4 BINARY(2) DEFAULT CONCAT(a2,a4); +RETURN CASE +WHEN (a1=0x5C) AND +(a3=0x5C) AND +(a2>0x7F) AND +(a4 NOT IN ('_','%','0','t','r','n','Z')) AND +iswellformed(a2a4) THEN '[USER]' + ELSE '' + END; +END// +CREATE TABLE t1 (a VARBINARY(10),b VARBINARY(10)); +CALL p2(); +SELECT HEX(a),HEX(b), +CONCAT(unescape_type(a,b), +wellformedness(a,b), +mysql_real_escape_string_generated(a), +IF(UNESCAPE(a)<>b,CONCAT('[BAD',HEX(UNESCAPE(a)),']'),'')) AS comment +FROM t1 ORDER BY LENGTH(a),a; +HEX(a) HEX(b) comment +00 00 [Preserve] +01 01 [Preserve] +02 02 [Preserve] +03 03 [Preserve] +04 04 [Preserve] +05 05 [Preserve] +06 06 [Preserve] +07 07 [Preserve] +08 08 [Preserve] +09 09 [Preserve] +0A 0A [Preserve] +0B 0B [Preserve] +0C 0C [Preserve] +0D 0D [Preserve] +0E 0E [Preserve] +0F 0F [Preserve] +10 10 [Preserve] +11 11 [Preserve] +12 12 [Preserve] +13 13 [Preserve] +14 14 [Preserve] +15 15 [Preserve] +16 16 [Preserve] +17 17 [Preserve] +18 18 [Preserve] +19 19 [Preserve] +1A 1A [Preserve] +1B 1B [Preserve] +1C 1C [Preserve] +1D 1D [Preserve] +1E 1E [Preserve] +1F 1F [Preserve] +20 20 [Preserve] +21 21 [Preserve] +22 22 [Preserve] +23 23 [Preserve] +24 24 [Preserve] +25 25 [Preserve] +26 26 [Preserve] +27 NULL [SyntErr] +28 28 [Preserve] +29 29 [Preserve] +2A 2A [Preserve] +2B 2B [Preserve] +2C 2C [Preserve] +2D 2D [Preserve] +2E 2E [Preserve] +2F 2F [Preserve] +30 30 [Preserve] +31 31 [Preserve] +32 32 [Preserve] +33 33 [Preserve] +34 34 [Preserve] +35 35 [Preserve] +36 36 [Preserve] +37 37 [Preserve] +38 38 [Preserve] +39 39 [Preserve] +3A 3A [Preserve] +3B 3B [Preserve] +3C 3C [Preserve] +3D 3D [Preserve] +3E 3E [Preserve] +3F 3F [Preserve] +40 40 [Preserve] +41 41 [Preserve] +42 42 [Preserve] +43 43 [Preserve] +44 44 [Preserve] +45 45 [Preserve] +46 46 [Preserve] +47 47 [Preserve] +48 48 [Preserve] +49 49 [Preserve] +4A 4A [Preserve] +4B 4B [Preserve] +4C 4C [Preserve] +4D 4D [Preserve] +4E 4E [Preserve] +4F 4F [Preserve] +50 50 [Preserve] +51 51 [Preserve] +52 52 [Preserve] +53 53 [Preserve] +54 54 [Preserve] +55 55 [Preserve] +56 56 [Preserve] +57 57 [Preserve] +58 58 [Preserve] +59 59 [Preserve] +5A 5A [Preserve] +5B 5B [Preserve] +5C NULL [SyntErr] +5D 5D [Preserve] +5E 5E [Preserve] +5F 5F [Preserve] +60 60 [Preserve] +61 61 [Preserve] +62 62 [Preserve] +63 63 [Preserve] +64 64 [Preserve] +65 65 [Preserve] +66 66 [Preserve] +67 67 [Preserve] +68 68 [Preserve] +69 69 [Preserve] +6A 6A [Preserve] +6B 6B [Preserve] +6C 6C [Preserve] +6D 6D [Preserve] +6E 6E [Preserve] +6F 6F [Preserve] +70 70 [Preserve] +71 71 [Preserve] +72 72 [Preserve] +73 73 [Preserve] +74 74 [Preserve] +75 75 [Preserve] +76 76 [Preserve] +77 77 [Preserve] +78 78 [Preserve] +79 79 [Preserve] +7A 7A [Preserve] +7B 7B [Preserve] +7C 7C [Preserve] +7D 7D [Preserve] +7E 7E [Preserve] +7F 7F [Preserve] +80 80 [Preserve][ILSEQ] +81 81 [Preserve][ILSEQ] +82 82 [Preserve][ILSEQ] +83 83 [Preserve][ILSEQ] +84 84 [Preserve][ILSEQ] +85 85 [Preserve][ILSEQ] +86 86 [Preserve][ILSEQ] +87 87 [Preserve][ILSEQ] +88 88 [Preserve][ILSEQ] +89 89 [Preserve][ILSEQ] +8A 8A [Preserve][ILSEQ] +8B 8B [Preserve][ILSEQ] +8C 8C [Preserve][ILSEQ] +8D 8D [Preserve][ILSEQ] +8E 8E [Preserve][ILSEQ] +8F 8F [Preserve][ILSEQ] +90 90 [Preserve][ILSEQ] +91 91 [Preserve][ILSEQ] +92 92 [Preserve][ILSEQ] +93 93 [Preserve][ILSEQ] +94 94 [Preserve][ILSEQ] +95 95 [Preserve][ILSEQ] +96 96 [Preserve][ILSEQ] +97 97 [Preserve][ILSEQ] +98 98 [Preserve][ILSEQ] +99 99 [Preserve][ILSEQ] +9A 9A [Preserve][ILSEQ] +9B 9B [Preserve][ILSEQ] +9C 9C [Preserve][ILSEQ] +9D 9D [Preserve][ILSEQ] +9E 9E [Preserve][ILSEQ] +9F 9F [Preserve][ILSEQ] +A0 A0 [Preserve][ILSEQ] +A1 A1 [Preserve][ILSEQ] +A2 A2 [Preserve][ILSEQ] +A3 A3 [Preserve][ILSEQ] +A4 A4 [Preserve][ILSEQ] +A5 A5 [Preserve][ILSEQ] +A6 A6 [Preserve][ILSEQ] +A7 A7 [Preserve][ILSEQ] +A8 A8 [Preserve][ILSEQ] +A9 A9 [Preserve][ILSEQ] +AA AA [Preserve][ILSEQ] +AB AB [Preserve][ILSEQ] +AC AC [Preserve][ILSEQ] +AD AD [Preserve][ILSEQ] +AE AE [Preserve][ILSEQ] +AF AF [Preserve][ILSEQ] +B0 B0 [Preserve][ILSEQ] +B1 B1 [Preserve][ILSEQ] +B2 B2 [Preserve][ILSEQ] +B3 B3 [Preserve][ILSEQ] +B4 B4 [Preserve][ILSEQ] +B5 B5 [Preserve][ILSEQ] +B6 B6 [Preserve][ILSEQ] +B7 B7 [Preserve][ILSEQ] +B8 B8 [Preserve][ILSEQ] +B9 B9 [Preserve][ILSEQ] +BA BA [Preserve][ILSEQ] +BB BB [Preserve][ILSEQ] +BC BC [Preserve][ILSEQ] +BD BD [Preserve][ILSEQ] +BE BE [Preserve][ILSEQ] +BF BF [Preserve][ILSEQ] +C0 C0 [Preserve][ILSEQ] +C1 C1 [Preserve][ILSEQ] +C2 C2 [Preserve][ILSEQ] +C3 C3 [Preserve][ILSEQ] +C4 C4 [Preserve][ILSEQ] +C5 C5 [Preserve][ILSEQ] +C6 C6 [Preserve][ILSEQ] +C7 C7 [Preserve][ILSEQ] +C8 C8 [Preserve][ILSEQ] +C9 C9 [Preserve][ILSEQ] +CA CA [Preserve][ILSEQ] +CB CB [Preserve][ILSEQ] +CC CC [Preserve][ILSEQ] +CD CD [Preserve][ILSEQ] +CE CE [Preserve][ILSEQ] +CF CF [Preserve][ILSEQ] +D0 D0 [Preserve][ILSEQ] +D1 D1 [Preserve][ILSEQ] +D2 D2 [Preserve][ILSEQ] +D3 D3 [Preserve][ILSEQ] +D4 D4 [Preserve][ILSEQ] +D5 D5 [Preserve][ILSEQ] +D6 D6 [Preserve][ILSEQ] +D7 D7 [Preserve][ILSEQ] +D8 D8 [Preserve][ILSEQ] +D9 D9 [Preserve][ILSEQ] +DA DA [Preserve][ILSEQ] +DB DB [Preserve][ILSEQ] +DC DC [Preserve][ILSEQ] +DD DD [Preserve][ILSEQ] +DE DE [Preserve][ILSEQ] +DF DF [Preserve][ILSEQ] +E0 E0 [Preserve][ILSEQ] +E1 E1 [Preserve][ILSEQ] +E2 E2 [Preserve][ILSEQ] +E3 E3 [Preserve][ILSEQ] +E4 E4 [Preserve][ILSEQ] +E5 E5 [Preserve][ILSEQ] +E6 E6 [Preserve][ILSEQ] +E7 E7 [Preserve][ILSEQ] +E8 E8 [Preserve][ILSEQ] +E9 E9 [Preserve][ILSEQ] +EA EA [Preserve][ILSEQ] +EB EB [Preserve][ILSEQ] +EC EC [Preserve][ILSEQ] +ED ED [Preserve][ILSEQ] +EE EE [Preserve][ILSEQ] +EF EF [Preserve][ILSEQ] +F0 F0 [Preserve][ILSEQ] +F1 F1 [Preserve][ILSEQ] +F2 F2 [Preserve][ILSEQ] +F3 F3 [Preserve][ILSEQ] +F4 F4 [Preserve][ILSEQ] +F5 F5 [Preserve][ILSEQ] +F6 F6 [Preserve][ILSEQ] +F7 F7 [Preserve][ILSEQ] +F8 F8 [Preserve][ILSEQ] +F9 F9 [Preserve][ILSEQ] +FA FA [Preserve][ILSEQ] +FB FB [Preserve][ILSEQ] +FC FC [Preserve][ILSEQ] +FD FD [Preserve][ILSEQ] +FE FE [Preserve][ILSEQ] +FF FF [Preserve][ILSEQ] +0022 0022 [Preserve][ASCII] +0027 NULL [SyntErr] +005C NULL [SyntErr] +0822 0822 [Preserve][ASCII] +0827 NULL [SyntErr] +085C NULL [SyntErr] +0922 0922 [Preserve][ASCII] +0927 NULL [SyntErr] +095C NULL [SyntErr] +0A22 0A22 [Preserve][ASCII] +0A27 NULL [SyntErr] +0A5C NULL [SyntErr] +0D22 0D22 [Preserve][ASCII] +0D27 NULL [SyntErr] +0D5C NULL [SyntErr] +1A22 1A22 [Preserve][ASCII] +1A27 NULL [SyntErr] +1A5C NULL [SyntErr] +2200 2200 [Preserve][ASCII] +2208 2208 [Preserve][ASCII] +2209 2209 [Preserve][ASCII] +220A 220A [Preserve][ASCII] +220D 220D [Preserve][ASCII] +221A 221A [Preserve][ASCII] +2222 2222 [Preserve][ASCII] +2225 2225 [Preserve][ASCII] +2227 NULL [SyntErr] +2230 2230 [Preserve][ASCII] +223F 223F [Preserve][ASCII] +2240 2240 [Preserve][ASCII] +225A 225A [Preserve][ASCII] +225C NULL [SyntErr] +225F 225F [Preserve][ASCII] +2261 2261 [Preserve][ASCII] +2262 2262 [Preserve][ASCII] +226E 226E [Preserve][ASCII] +2272 2272 [Preserve][ASCII] +2274 2274 [Preserve][ASCII] +227E 227E [Preserve][ASCII] +227F 227F [Preserve][ASCII] +2280 2280 [Preserv][MB][ILSEQ] +2281 2281 [Preserv][MB][ILSEQ] +229F 229F [Preserv][MB][ILSEQ] +22A0 22A0 [Preserv][MB][ILSEQ] +22A1 22A1 [Preserv][MB][ILSEQ] +22E0 22E0 [Preserv][MB][ILSEQ] +22EF 22EF [Preserv][MB][ILSEQ] +22F9 22F9 [Preserv][MB][ILSEQ] +22FA 22FA [Preserv][MB][ILSEQ] +22FC 22FC [Preserv][MB][ILSEQ] +22FD 22FD [Preserv][MB][ILSEQ] +22FE 22FE [Preserv][MB][ILSEQ] +22FF 22FF [Preserv][MB][ILSEQ] +2522 2522 [Preserve][ASCII] +2527 NULL [SyntErr] +255C NULL [SyntErr] +2700 NULL [SyntErr] +2708 NULL [SyntErr] +2709 NULL [SyntErr] +270A NULL [SyntErr] +270D NULL [SyntErr] +271A NULL [SyntErr] +2722 NULL [SyntErr] +2725 NULL [SyntErr] +2727 27 [Regular] +2730 NULL [SyntErr] +273F NULL [SyntErr] +2740 NULL [SyntErr] +275A NULL [SyntErr] +275C NULL [SyntErr] +275F NULL [SyntErr] +2761 NULL [SyntErr] +2762 NULL [SyntErr] +276E NULL [SyntErr] +2772 NULL [SyntErr] +2774 NULL [SyntErr] +277E NULL [SyntErr] +277F NULL [SyntErr] +2780 NULL [SyntErr] +2781 NULL [SyntErr] +279F NULL [SyntErr] +27A0 NULL [SyntErr] +27A1 NULL [SyntErr] +27E0 NULL [SyntErr] +27EF NULL [SyntErr] +27F9 NULL [SyntErr] +27FA NULL [SyntErr] +27FC NULL [SyntErr] +27FD NULL [SyntErr] +27FE NULL [SyntErr] +27FF NULL [SyntErr] +3022 3022 [Preserve][ASCII] +3027 NULL [SyntErr] +305C NULL [SyntErr] +3F22 3F22 [Preserve][ASCII] +3F27 NULL [SyntErr] +3F5C NULL [SyntErr] +4022 4022 [Preserve][ASCII] +4027 NULL [SyntErr] +405C NULL [SyntErr] +5A22 5A22 [Preserve][ASCII] +5A27 NULL [SyntErr] +5A5C NULL [SyntErr] +5C00 00 [Trivial] +5C08 08 [Trivial] +5C09 09 [Trivial] +5C0A 0A [Trivial] +5C0D 0D [Trivial] +5C1A 1A [Trivial] +5C22 22 [Trivial] +5C25 5C25 [Preserve][LIKE] +5C27 27 [Trivial] +5C30 00 [Regular] +5C3F 3F [Trivial] +5C40 40 [Trivial] +5C5A 1A [Regular] +5C5C 5C [Regular] +5C5F 5C5F [Preserve][LIKE] +5C61 61 [Trivial] +5C62 08 [Regular] +5C6E 0A [Regular] +5C72 0D [Regular] +5C74 09 [Regular] +5C7E 7E [Trivial] +5C7F 7F [Trivial] +5C80 80 [Trivial][ILSEQ] +5C81 81 [Trivial][ILSEQ] +5C9F 9F [Trivial][ILSEQ] +5CA0 A0 [Trivial][ILSEQ] +5CA1 A1 [Trivial][ILSEQ] +5CE0 E0 [Trivial][ILSEQ] +5CEF EF [Trivial][ILSEQ] +5CF9 F9 [Trivial][ILSEQ] +5CFA FA [Trivial][ILSEQ] +5CFC FC [Trivial][ILSEQ] +5CFD FD [Trivial][ILSEQ] +5CFE FE [Trivial][ILSEQ] +5CFF FF [Trivial][ILSEQ] +5F22 5F22 [Preserve][ASCII] +5F27 NULL [SyntErr] +5F5C NULL [SyntErr] +6122 6122 [Preserve][ASCII] +6127 NULL [SyntErr] +615C NULL [SyntErr] +6222 6222 [Preserve][ASCII] +6227 NULL [SyntErr] +625C NULL [SyntErr] +6E22 6E22 [Preserve][ASCII] +6E27 NULL [SyntErr] +6E5C NULL [SyntErr] +7222 7222 [Preserve][ASCII] +7227 NULL [SyntErr] +725C NULL [SyntErr] +7422 7422 [Preserve][ASCII] +7427 NULL [SyntErr] +745C NULL [SyntErr] +7E22 7E22 [Preserve][ASCII] +7E27 NULL [SyntErr] +7E5C NULL [SyntErr] +7F22 7F22 [Preserve][ASCII] +7F27 NULL [SyntErr] +7F5C NULL [SyntErr] +8022 8022 [Preserv][MB][ILSEQ] +8027 NULL [SyntErr] +805C NULL [SyntErr] +8122 8122 [Preserv][MB][ILSEQ] +8127 NULL [SyntErr] +815C NULL [SyntErr] +9F22 9F22 [Preserv][MB][ILSEQ] +9F27 NULL [SyntErr] +9F5C NULL [SyntErr] +A022 A022 [Preserv][MB][ILSEQ] +A027 NULL [SyntErr] +A05C NULL [SyntErr] +A122 A122 [Preserv][MB][ILSEQ] +A127 NULL [SyntErr] +A15C NULL [SyntErr] +E022 E022 [Preserv][MB][ILSEQ] +E027 NULL [SyntErr] +E05C NULL [SyntErr] +EF22 EF22 [Preserv][MB][ILSEQ] +EF27 NULL [SyntErr] +EF5C NULL [SyntErr] +F922 F922 [Preserv][MB][ILSEQ] +F927 NULL [SyntErr] +F95C NULL [SyntErr] +FA22 FA22 [Preserv][MB][ILSEQ] +FA27 NULL [SyntErr] +FA5C NULL [SyntErr] +FC22 FC22 [Preserv][MB][ILSEQ] +FC27 NULL [SyntErr] +FC5C NULL [SyntErr] +FD22 FD22 [Preserv][MB][ILSEQ] +FD27 NULL [SyntErr] +FD5C NULL [SyntErr] +FE22 FE22 [Preserv][MB][ILSEQ] +FE27 NULL [SyntErr] +FE5C NULL [SyntErr] +FF22 FF22 [Preserv][MB][ILSEQ] +FF27 NULL [SyntErr] +FF5C NULL [SyntErr] +5C0000 0000 [Trivial] +5C0008 0008 [Trivial] +5C0009 0009 [Trivial] +5C000A 000A [Trivial] +5C000D 000D [Trivial] +5C001A 001A [Trivial] +5C0022 0022 [Trivial] +5C0025 0025 [Trivial] +5C0027 NULL [SyntErr] +5C0030 0030 [Trivial] +5C003F 003F [Trivial] +5C0040 0040 [Trivial] +5C005A 005A [Trivial] +5C005C NULL [SyntErr] +5C005F 005F [Trivial] +5C0061 0061 [Trivial] +5C0062 0062 [Trivial] +5C006E 006E [Trivial] +5C0072 0072 [Trivial] +5C0074 0074 [Trivial] +5C007E 007E [Trivial] +5C007F 007F [Trivial] +5C0080 0080 [Trivial][ILSEQ] +5C0081 0081 [Trivial][ILSEQ] +5C009F 009F [Trivial][ILSEQ] +5C00A0 00A0 [Trivial][ILSEQ] +5C00A1 00A1 [Trivial][ILSEQ] +5C00E0 00E0 [Trivial][ILSEQ] +5C00EF 00EF [Trivial][ILSEQ] +5C00F9 00F9 [Trivial][ILSEQ] +5C00FA 00FA [Trivial][ILSEQ] +5C00FC 00FC [Trivial][ILSEQ] +5C00FD 00FD [Trivial][ILSEQ] +5C00FE 00FE [Trivial][ILSEQ] +5C00FF 00FF [Trivial][ILSEQ] +5C0800 0800 [Trivial] +5C0808 0808 [Trivial] +5C0809 0809 [Trivial] +5C080A 080A [Trivial] +5C080D 080D [Trivial] +5C081A 081A [Trivial] +5C0822 0822 [Trivial] +5C0825 0825 [Trivial] +5C0827 NULL [SyntErr] +5C0830 0830 [Trivial] +5C083F 083F [Trivial] +5C0840 0840 [Trivial] +5C085A 085A [Trivial] +5C085C NULL [SyntErr] +5C085F 085F [Trivial] +5C0861 0861 [Trivial] +5C0862 0862 [Trivial] +5C086E 086E [Trivial] +5C0872 0872 [Trivial] +5C0874 0874 [Trivial] +5C087E 087E [Trivial] +5C087F 087F [Trivial] +5C0880 0880 [Trivial][ILSEQ] +5C0881 0881 [Trivial][ILSEQ] +5C089F 089F [Trivial][ILSEQ] +5C08A0 08A0 [Trivial][ILSEQ] +5C08A1 08A1 [Trivial][ILSEQ] +5C08E0 08E0 [Trivial][ILSEQ] +5C08EF 08EF [Trivial][ILSEQ] +5C08F9 08F9 [Trivial][ILSEQ] +5C08FA 08FA [Trivial][ILSEQ] +5C08FC 08FC [Trivial][ILSEQ] +5C08FD 08FD [Trivial][ILSEQ] +5C08FE 08FE [Trivial][ILSEQ] +5C08FF 08FF [Trivial][ILSEQ] +5C0900 0900 [Trivial] +5C0908 0908 [Trivial] +5C0909 0909 [Trivial] +5C090A 090A [Trivial] +5C090D 090D [Trivial] +5C091A 091A [Trivial] +5C0922 0922 [Trivial] +5C0925 0925 [Trivial] +5C0927 NULL [SyntErr] +5C0930 0930 [Trivial] +5C093F 093F [Trivial] +5C0940 0940 [Trivial] +5C095A 095A [Trivial] +5C095C NULL [SyntErr] +5C095F 095F [Trivial] +5C0961 0961 [Trivial] +5C0962 0962 [Trivial] +5C096E 096E [Trivial] +5C0972 0972 [Trivial] +5C0974 0974 [Trivial] +5C097E 097E [Trivial] +5C097F 097F [Trivial] +5C0980 0980 [Trivial][ILSEQ] +5C0981 0981 [Trivial][ILSEQ] +5C099F 099F [Trivial][ILSEQ] +5C09A0 09A0 [Trivial][ILSEQ] +5C09A1 09A1 [Trivial][ILSEQ] +5C09E0 09E0 [Trivial][ILSEQ] +5C09EF 09EF [Trivial][ILSEQ] +5C09F9 09F9 [Trivial][ILSEQ] +5C09FA 09FA [Trivial][ILSEQ] +5C09FC 09FC [Trivial][ILSEQ] +5C09FD 09FD [Trivial][ILSEQ] +5C09FE 09FE [Trivial][ILSEQ] +5C09FF 09FF [Trivial][ILSEQ] +5C0A00 0A00 [Trivial] +5C0A08 0A08 [Trivial] +5C0A09 0A09 [Trivial] +5C0A0A 0A0A [Trivial] +5C0A0D 0A0D [Trivial] +5C0A1A 0A1A [Trivial] +5C0A22 0A22 [Trivial] +5C0A25 0A25 [Trivial] +5C0A27 NULL [SyntErr] +5C0A30 0A30 [Trivial] +5C0A3F 0A3F [Trivial] +5C0A40 0A40 [Trivial] +5C0A5A 0A5A [Trivial] +5C0A5C NULL [SyntErr] +5C0A5F 0A5F [Trivial] +5C0A61 0A61 [Trivial] +5C0A62 0A62 [Trivial] +5C0A6E 0A6E [Trivial] +5C0A72 0A72 [Trivial] +5C0A74 0A74 [Trivial] +5C0A7E 0A7E [Trivial] +5C0A7F 0A7F [Trivial] +5C0A80 0A80 [Trivial][ILSEQ] +5C0A81 0A81 [Trivial][ILSEQ] +5C0A9F 0A9F [Trivial][ILSEQ] +5C0AA0 0AA0 [Trivial][ILSEQ] +5C0AA1 0AA1 [Trivial][ILSEQ] +5C0AE0 0AE0 [Trivial][ILSEQ] +5C0AEF 0AEF [Trivial][ILSEQ] +5C0AF9 0AF9 [Trivial][ILSEQ] +5C0AFA 0AFA [Trivial][ILSEQ] +5C0AFC 0AFC [Trivial][ILSEQ] +5C0AFD 0AFD [Trivial][ILSEQ] +5C0AFE 0AFE [Trivial][ILSEQ] +5C0AFF 0AFF [Trivial][ILSEQ] +5C0D00 0D00 [Trivial] +5C0D08 0D08 [Trivial] +5C0D09 0D09 [Trivial] +5C0D0A 0D0A [Trivial] +5C0D0D 0D0D [Trivial] +5C0D1A 0D1A [Trivial] +5C0D22 0D22 [Trivial] +5C0D25 0D25 [Trivial] +5C0D27 NULL [SyntErr] +5C0D30 0D30 [Trivial] +5C0D3F 0D3F [Trivial] +5C0D40 0D40 [Trivial] +5C0D5A 0D5A [Trivial] +5C0D5C NULL [SyntErr] +5C0D5F 0D5F [Trivial] +5C0D61 0D61 [Trivial] +5C0D62 0D62 [Trivial] +5C0D6E 0D6E [Trivial] +5C0D72 0D72 [Trivial] +5C0D74 0D74 [Trivial] +5C0D7E 0D7E [Trivial] +5C0D7F 0D7F [Trivial] +5C0D80 0D80 [Trivial][ILSEQ] +5C0D81 0D81 [Trivial][ILSEQ] +5C0D9F 0D9F [Trivial][ILSEQ] +5C0DA0 0DA0 [Trivial][ILSEQ] +5C0DA1 0DA1 [Trivial][ILSEQ] +5C0DE0 0DE0 [Trivial][ILSEQ] +5C0DEF 0DEF [Trivial][ILSEQ] +5C0DF9 0DF9 [Trivial][ILSEQ] +5C0DFA 0DFA [Trivial][ILSEQ] +5C0DFC 0DFC [Trivial][ILSEQ] +5C0DFD 0DFD [Trivial][ILSEQ] +5C0DFE 0DFE [Trivial][ILSEQ] +5C0DFF 0DFF [Trivial][ILSEQ] +5C1A00 1A00 [Trivial] +5C1A08 1A08 [Trivial] +5C1A09 1A09 [Trivial] +5C1A0A 1A0A [Trivial] +5C1A0D 1A0D [Trivial] +5C1A1A 1A1A [Trivial] +5C1A22 1A22 [Trivial] +5C1A25 1A25 [Trivial] +5C1A27 NULL [SyntErr] +5C1A30 1A30 [Trivial] +5C1A3F 1A3F [Trivial] +5C1A40 1A40 [Trivial] +5C1A5A 1A5A [Trivial] +5C1A5C NULL [SyntErr] +5C1A5F 1A5F [Trivial] +5C1A61 1A61 [Trivial] +5C1A62 1A62 [Trivial] +5C1A6E 1A6E [Trivial] +5C1A72 1A72 [Trivial] +5C1A74 1A74 [Trivial] +5C1A7E 1A7E [Trivial] +5C1A7F 1A7F [Trivial] +5C1A80 1A80 [Trivial][ILSEQ] +5C1A81 1A81 [Trivial][ILSEQ] +5C1A9F 1A9F [Trivial][ILSEQ] +5C1AA0 1AA0 [Trivial][ILSEQ] +5C1AA1 1AA1 [Trivial][ILSEQ] +5C1AE0 1AE0 [Trivial][ILSEQ] +5C1AEF 1AEF [Trivial][ILSEQ] +5C1AF9 1AF9 [Trivial][ILSEQ] +5C1AFA 1AFA [Trivial][ILSEQ] +5C1AFC 1AFC [Trivial][ILSEQ] +5C1AFD 1AFD [Trivial][ILSEQ] +5C1AFE 1AFE [Trivial][ILSEQ] +5C1AFF 1AFF [Trivial][ILSEQ] +5C2200 2200 [Trivial] +5C2208 2208 [Trivial] +5C2209 2209 [Trivial] +5C220A 220A [Trivial] +5C220D 220D [Trivial] +5C221A 221A [Trivial] +5C2222 2222 [Trivial] +5C2225 2225 [Trivial] +5C2227 NULL [SyntErr] +5C2230 2230 [Trivial] +5C223F 223F [Trivial] +5C2240 2240 [Trivial] +5C225A 225A [Trivial] +5C225C NULL [SyntErr] +5C225F 225F [Trivial] +5C2261 2261 [Trivial] +5C2262 2262 [Trivial] +5C226E 226E [Trivial] +5C2272 2272 [Trivial] +5C2274 2274 [Trivial] +5C227E 227E [Trivial] +5C227F 227F [Trivial] +5C2280 2280 [Trivial][ILSEQ] +5C2281 2281 [Trivial][ILSEQ] +5C229F 229F [Trivial][ILSEQ] +5C22A0 22A0 [Trivial][ILSEQ] +5C22A1 22A1 [Trivial][ILSEQ] +5C22E0 22E0 [Trivial][ILSEQ] +5C22EF 22EF [Trivial][ILSEQ] +5C22F9 22F9 [Trivial][ILSEQ] +5C22FA 22FA [Trivial][ILSEQ] +5C22FC 22FC [Trivial][ILSEQ] +5C22FD 22FD [Trivial][ILSEQ] +5C22FE 22FE [Trivial][ILSEQ] +5C22FF 22FF [Trivial][ILSEQ] +5C2500 5C2500 [Preserve][LIKE] +5C2508 5C2508 [Preserve][LIKE] +5C2509 5C2509 [Preserve][LIKE] +5C250A 5C250A [Preserve][LIKE] +5C250D 5C250D [Preserve][LIKE] +5C251A 5C251A [Preserve][LIKE] +5C2522 5C2522 [Preserve][LIKE] +5C2525 5C2525 [Preserve][LIKE] +5C2527 NULL [SyntErr] +5C2530 5C2530 [Preserve][LIKE] +5C253F 5C253F [Preserve][LIKE] +5C2540 5C2540 [Preserve][LIKE] +5C255A 5C255A [Preserve][LIKE] +5C255C NULL [SyntErr] +5C255F 5C255F [Preserve][LIKE] +5C2561 5C2561 [Preserve][LIKE] +5C2562 5C2562 [Preserve][LIKE] +5C256E 5C256E [Preserve][LIKE] +5C2572 5C2572 [Preserve][LIKE] +5C2574 5C2574 [Preserve][LIKE] +5C257E 5C257E [Preserve][LIKE] +5C257F 5C257F [Preserve][LIKE] +5C2580 5C2580 [Preserve][LIKE][ILSEQ] +5C2581 5C2581 [Preserve][LIKE][ILSEQ] +5C259F 5C259F [Preserve][LIKE][ILSEQ] +5C25A0 5C25A0 [Preserve][LIKE][ILSEQ] +5C25A1 5C25A1 [Preserve][LIKE][ILSEQ] +5C25E0 5C25E0 [Preserve][LIKE][ILSEQ] +5C25EF 5C25EF [Preserve][LIKE][ILSEQ] +5C25F9 5C25F9 [Preserve][LIKE][ILSEQ] +5C25FA 5C25FA [Preserve][LIKE][ILSEQ] +5C25FC 5C25FC [Preserve][LIKE][ILSEQ] +5C25FD 5C25FD [Preserve][LIKE][ILSEQ] +5C25FE 5C25FE [Preserve][LIKE][ILSEQ] +5C25FF 5C25FF [Preserve][LIKE][ILSEQ] +5C2700 2700 [Trivial] +5C2708 2708 [Trivial] +5C2709 2709 [Trivial] +5C270A 270A [Trivial] +5C270D 270D [Trivial] +5C271A 271A [Trivial] +5C2722 2722 [Trivial] +5C2725 2725 [Trivial] +5C2727 NULL [SyntErr] +5C2730 2730 [Trivial] +5C273F 273F [Trivial] +5C2740 2740 [Trivial] +5C275A 275A [Trivial] +5C275C NULL [SyntErr] +5C275F 275F [Trivial] +5C2761 2761 [Trivial] +5C2762 2762 [Trivial] +5C276E 276E [Trivial] +5C2772 2772 [Trivial] +5C2774 2774 [Trivial] +5C277E 277E [Trivial] +5C277F 277F [Trivial] +5C2780 2780 [Trivial][ILSEQ] +5C2781 2781 [Trivial][ILSEQ] +5C279F 279F [Trivial][ILSEQ] +5C27A0 27A0 [Trivial][ILSEQ] +5C27A1 27A1 [Trivial][ILSEQ] +5C27E0 27E0 [Trivial][ILSEQ] +5C27EF 27EF [Trivial][ILSEQ] +5C27F9 27F9 [Trivial][ILSEQ] +5C27FA 27FA [Trivial][ILSEQ] +5C27FC 27FC [Trivial][ILSEQ] +5C27FD 27FD [Trivial][ILSEQ] +5C27FE 27FE [Trivial][ILSEQ] +5C27FF 27FF [Trivial][ILSEQ] +5C3000 0000 [Regular] +5C3008 0008 [Regular] +5C3009 0009 [Regular] +5C300A 000A [Regular] +5C300D 000D [Regular] +5C301A 001A [Regular] +5C3022 0022 [Regular] +5C3025 0025 [Regular] +5C3027 NULL [SyntErr] +5C3030 0030 [Regular] +5C303F 003F [Regular] +5C3040 0040 [Regular] +5C305A 005A [Regular] +5C305C NULL [SyntErr] +5C305F 005F [Regular] +5C3061 0061 [Regular] +5C3062 0062 [Regular] +5C306E 006E [Regular] +5C3072 0072 [Regular] +5C3074 0074 [Regular] +5C307E 007E [Regular] +5C307F 007F [Regular] +5C3080 0080 [Regular][ILSEQ] +5C3081 0081 [Regular][ILSEQ] +5C309F 009F [Regular][ILSEQ] +5C30A0 00A0 [Regular][ILSEQ] +5C30A1 00A1 [Regular][ILSEQ] +5C30E0 00E0 [Regular][ILSEQ] +5C30EF 00EF [Regular][ILSEQ] +5C30F9 00F9 [Regular][ILSEQ] +5C30FA 00FA [Regular][ILSEQ] +5C30FC 00FC [Regular][ILSEQ] +5C30FD 00FD [Regular][ILSEQ] +5C30FE 00FE [Regular][ILSEQ] +5C30FF 00FF [Regular][ILSEQ] +5C3F00 3F00 [Trivial] +5C3F08 3F08 [Trivial] +5C3F09 3F09 [Trivial] +5C3F0A 3F0A [Trivial] +5C3F0D 3F0D [Trivial] +5C3F1A 3F1A [Trivial] +5C3F22 3F22 [Trivial] +5C3F25 3F25 [Trivial] +5C3F27 NULL [SyntErr] +5C3F30 3F30 [Trivial] +5C3F3F 3F3F [Trivial] +5C3F40 3F40 [Trivial] +5C3F5A 3F5A [Trivial] +5C3F5C NULL [SyntErr] +5C3F5F 3F5F [Trivial] +5C3F61 3F61 [Trivial] +5C3F62 3F62 [Trivial] +5C3F6E 3F6E [Trivial] +5C3F72 3F72 [Trivial] +5C3F74 3F74 [Trivial] +5C3F7E 3F7E [Trivial] +5C3F7F 3F7F [Trivial] +5C3F80 3F80 [Trivial][ILSEQ] +5C3F81 3F81 [Trivial][ILSEQ] +5C3F9F 3F9F [Trivial][ILSEQ] +5C3FA0 3FA0 [Trivial][ILSEQ] +5C3FA1 3FA1 [Trivial][ILSEQ] +5C3FE0 3FE0 [Trivial][ILSEQ] +5C3FEF 3FEF [Trivial][ILSEQ] +5C3FF9 3FF9 [Trivial][ILSEQ] +5C3FFA 3FFA [Trivial][ILSEQ] +5C3FFC 3FFC [Trivial][ILSEQ] +5C3FFD 3FFD [Trivial][ILSEQ] +5C3FFE 3FFE [Trivial][ILSEQ] +5C3FFF 3FFF [Trivial][ILSEQ] +5C4000 4000 [Trivial] +5C4008 4008 [Trivial] +5C4009 4009 [Trivial] +5C400A 400A [Trivial] +5C400D 400D [Trivial] +5C401A 401A [Trivial] +5C4022 4022 [Trivial] +5C4025 4025 [Trivial] +5C4027 NULL [SyntErr] +5C4030 4030 [Trivial] +5C403F 403F [Trivial] +5C4040 4040 [Trivial] +5C405A 405A [Trivial] +5C405C NULL [SyntErr] +5C405F 405F [Trivial] +5C4061 4061 [Trivial] +5C4062 4062 [Trivial] +5C406E 406E [Trivial] +5C4072 4072 [Trivial] +5C4074 4074 [Trivial] +5C407E 407E [Trivial] +5C407F 407F [Trivial] +5C4080 4080 [Trivial][ILSEQ] +5C4081 4081 [Trivial][ILSEQ] +5C409F 409F [Trivial][ILSEQ] +5C40A0 40A0 [Trivial][ILSEQ] +5C40A1 40A1 [Trivial][ILSEQ] +5C40E0 40E0 [Trivial][ILSEQ] +5C40EF 40EF [Trivial][ILSEQ] +5C40F9 40F9 [Trivial][ILSEQ] +5C40FA 40FA [Trivial][ILSEQ] +5C40FC 40FC [Trivial][ILSEQ] +5C40FD 40FD [Trivial][ILSEQ] +5C40FE 40FE [Trivial][ILSEQ] +5C40FF 40FF [Trivial][ILSEQ] +5C5A00 1A00 [Regular] +5C5A08 1A08 [Regular] +5C5A09 1A09 [Regular] +5C5A0A 1A0A [Regular] +5C5A0D 1A0D [Regular] +5C5A1A 1A1A [Regular] +5C5A22 1A22 [Regular] +5C5A25 1A25 [Regular] +5C5A27 NULL [SyntErr] +5C5A30 1A30 [Regular] +5C5A3F 1A3F [Regular] +5C5A40 1A40 [Regular] +5C5A5A 1A5A [Regular] +5C5A5C NULL [SyntErr] +5C5A5F 1A5F [Regular] +5C5A61 1A61 [Regular] +5C5A62 1A62 [Regular] +5C5A6E 1A6E [Regular] +5C5A72 1A72 [Regular] +5C5A74 1A74 [Regular] +5C5A7E 1A7E [Regular] +5C5A7F 1A7F [Regular] +5C5A80 1A80 [Regular][ILSEQ] +5C5A81 1A81 [Regular][ILSEQ] +5C5A9F 1A9F [Regular][ILSEQ] +5C5AA0 1AA0 [Regular][ILSEQ] +5C5AA1 1AA1 [Regular][ILSEQ] +5C5AE0 1AE0 [Regular][ILSEQ] +5C5AEF 1AEF [Regular][ILSEQ] +5C5AF9 1AF9 [Regular][ILSEQ] +5C5AFA 1AFA [Regular][ILSEQ] +5C5AFC 1AFC [Regular][ILSEQ] +5C5AFD 1AFD [Regular][ILSEQ] +5C5AFE 1AFE [Regular][ILSEQ] +5C5AFF 1AFF [Regular][ILSEQ] +5C5C00 5C00 [Regular] +5C5C08 5C08 [Regular] +5C5C09 5C09 [Regular] +5C5C0A 5C0A [Regular] +5C5C0D 5C0D [Regular] +5C5C1A 5C1A [Regular] +5C5C22 5C22 [Regular] +5C5C25 5C25 [Regular] +5C5C27 NULL [SyntErr] +5C5C30 5C30 [Regular] +5C5C3F 5C3F [Regular] +5C5C40 5C40 [Regular] +5C5C5A 5C5A [Regular] +5C5C5C NULL [SyntErr] +5C5C5F 5C5F [Regular] +5C5C61 5C61 [Regular] +5C5C62 5C62 [Regular] +5C5C6E 5C6E [Regular] +5C5C72 5C72 [Regular] +5C5C74 5C74 [Regular] +5C5C7E 5C7E [Regular] +5C5C7F 5C7F [Regular] +5C5C80 5C80 [Regular][ILSEQ] +5C5C81 5C81 [Regular][ILSEQ] +5C5C9F 5C9F [Regular][ILSEQ] +5C5CA0 5CA0 [Regular][ILSEQ] +5C5CA1 5CA1 [Regular][ILSEQ] +5C5CE0 5CE0 [Regular][ILSEQ] +5C5CEF 5CEF [Regular][ILSEQ] +5C5CF9 5CF9 [Regular][ILSEQ] +5C5CFA 5CFA [Regular][ILSEQ] +5C5CFC 5CFC [Regular][ILSEQ] +5C5CFD 5CFD [Regular][ILSEQ] +5C5CFE 5CFE [Regular][ILSEQ] +5C5CFF 5CFF [Regular][ILSEQ] +5C5F00 5C5F00 [Preserve][LIKE] +5C5F08 5C5F08 [Preserve][LIKE] +5C5F09 5C5F09 [Preserve][LIKE] +5C5F0A 5C5F0A [Preserve][LIKE] +5C5F0D 5C5F0D [Preserve][LIKE] +5C5F1A 5C5F1A [Preserve][LIKE] +5C5F22 5C5F22 [Preserve][LIKE] +5C5F25 5C5F25 [Preserve][LIKE] +5C5F27 NULL [SyntErr] +5C5F30 5C5F30 [Preserve][LIKE] +5C5F3F 5C5F3F [Preserve][LIKE] +5C5F40 5C5F40 [Preserve][LIKE] +5C5F5A 5C5F5A [Preserve][LIKE] +5C5F5C NULL [SyntErr] +5C5F5F 5C5F5F [Preserve][LIKE] +5C5F61 5C5F61 [Preserve][LIKE] +5C5F62 5C5F62 [Preserve][LIKE] +5C5F6E 5C5F6E [Preserve][LIKE] +5C5F72 5C5F72 [Preserve][LIKE] +5C5F74 5C5F74 [Preserve][LIKE] +5C5F7E 5C5F7E [Preserve][LIKE] +5C5F7F 5C5F7F [Preserve][LIKE] +5C5F80 5C5F80 [Preserve][LIKE][ILSEQ] +5C5F81 5C5F81 [Preserve][LIKE][ILSEQ] +5C5F9F 5C5F9F [Preserve][LIKE][ILSEQ] +5C5FA0 5C5FA0 [Preserve][LIKE][ILSEQ] +5C5FA1 5C5FA1 [Preserve][LIKE][ILSEQ] +5C5FE0 5C5FE0 [Preserve][LIKE][ILSEQ] +5C5FEF 5C5FEF [Preserve][LIKE][ILSEQ] +5C5FF9 5C5FF9 [Preserve][LIKE][ILSEQ] +5C5FFA 5C5FFA [Preserve][LIKE][ILSEQ] +5C5FFC 5C5FFC [Preserve][LIKE][ILSEQ] +5C5FFD 5C5FFD [Preserve][LIKE][ILSEQ] +5C5FFE 5C5FFE [Preserve][LIKE][ILSEQ] +5C5FFF 5C5FFF [Preserve][LIKE][ILSEQ] +5C6100 6100 [Trivial] +5C6108 6108 [Trivial] +5C6109 6109 [Trivial] +5C610A 610A [Trivial] +5C610D 610D [Trivial] +5C611A 611A [Trivial] +5C6122 6122 [Trivial] +5C6125 6125 [Trivial] +5C6127 NULL [SyntErr] +5C6130 6130 [Trivial] +5C613F 613F [Trivial] +5C6140 6140 [Trivial] +5C615A 615A [Trivial] +5C615C NULL [SyntErr] +5C615F 615F [Trivial] +5C6161 6161 [Trivial] +5C6162 6162 [Trivial] +5C616E 616E [Trivial] +5C6172 6172 [Trivial] +5C6174 6174 [Trivial] +5C617E 617E [Trivial] +5C617F 617F [Trivial] +5C6180 6180 [Trivial][ILSEQ] +5C6181 6181 [Trivial][ILSEQ] +5C619F 619F [Trivial][ILSEQ] +5C61A0 61A0 [Trivial][ILSEQ] +5C61A1 61A1 [Trivial][ILSEQ] +5C61E0 61E0 [Trivial][ILSEQ] +5C61EF 61EF [Trivial][ILSEQ] +5C61F9 61F9 [Trivial][ILSEQ] +5C61FA 61FA [Trivial][ILSEQ] +5C61FC 61FC [Trivial][ILSEQ] +5C61FD 61FD [Trivial][ILSEQ] +5C61FE 61FE [Trivial][ILSEQ] +5C61FF 61FF [Trivial][ILSEQ] +5C6200 0800 [Regular] +5C6208 0808 [Regular] +5C6209 0809 [Regular] +5C620A 080A [Regular] +5C620D 080D [Regular] +5C621A 081A [Regular] +5C6222 0822 [Regular] +5C6225 0825 [Regular] +5C6227 NULL [SyntErr] +5C6230 0830 [Regular] +5C623F 083F [Regular] +5C6240 0840 [Regular] +5C625A 085A [Regular] +5C625C NULL [SyntErr] +5C625F 085F [Regular] +5C6261 0861 [Regular] +5C6262 0862 [Regular] +5C626E 086E [Regular] +5C6272 0872 [Regular] +5C6274 0874 [Regular] +5C627E 087E [Regular] +5C627F 087F [Regular] +5C6280 0880 [Regular][ILSEQ] +5C6281 0881 [Regular][ILSEQ] +5C629F 089F [Regular][ILSEQ] +5C62A0 08A0 [Regular][ILSEQ] +5C62A1 08A1 [Regular][ILSEQ] +5C62E0 08E0 [Regular][ILSEQ] +5C62EF 08EF [Regular][ILSEQ] +5C62F9 08F9 [Regular][ILSEQ] +5C62FA 08FA [Regular][ILSEQ] +5C62FC 08FC [Regular][ILSEQ] +5C62FD 08FD [Regular][ILSEQ] +5C62FE 08FE [Regular][ILSEQ] +5C62FF 08FF [Regular][ILSEQ] +5C6E00 0A00 [Regular] +5C6E08 0A08 [Regular] +5C6E09 0A09 [Regular] +5C6E0A 0A0A [Regular] +5C6E0D 0A0D [Regular] +5C6E1A 0A1A [Regular] +5C6E22 0A22 [Regular] +5C6E25 0A25 [Regular] +5C6E27 NULL [SyntErr] +5C6E30 0A30 [Regular] +5C6E3F 0A3F [Regular] +5C6E40 0A40 [Regular] +5C6E5A 0A5A [Regular] +5C6E5C NULL [SyntErr] +5C6E5F 0A5F [Regular] +5C6E61 0A61 [Regular] +5C6E62 0A62 [Regular] +5C6E6E 0A6E [Regular] +5C6E72 0A72 [Regular] +5C6E74 0A74 [Regular] +5C6E7E 0A7E [Regular] +5C6E7F 0A7F [Regular] +5C6E80 0A80 [Regular][ILSEQ] +5C6E81 0A81 [Regular][ILSEQ] +5C6E9F 0A9F [Regular][ILSEQ] +5C6EA0 0AA0 [Regular][ILSEQ] +5C6EA1 0AA1 [Regular][ILSEQ] +5C6EE0 0AE0 [Regular][ILSEQ] +5C6EEF 0AEF [Regular][ILSEQ] +5C6EF9 0AF9 [Regular][ILSEQ] +5C6EFA 0AFA [Regular][ILSEQ] +5C6EFC 0AFC [Regular][ILSEQ] +5C6EFD 0AFD [Regular][ILSEQ] +5C6EFE 0AFE [Regular][ILSEQ] +5C6EFF 0AFF [Regular][ILSEQ] +5C7200 0D00 [Regular] +5C7208 0D08 [Regular] +5C7209 0D09 [Regular] +5C720A 0D0A [Regular] +5C720D 0D0D [Regular] +5C721A 0D1A [Regular] +5C7222 0D22 [Regular] +5C7225 0D25 [Regular] +5C7227 NULL [SyntErr] +5C7230 0D30 [Regular] +5C723F 0D3F [Regular] +5C7240 0D40 [Regular] +5C725A 0D5A [Regular] +5C725C NULL [SyntErr] +5C725F 0D5F [Regular] +5C7261 0D61 [Regular] +5C7262 0D62 [Regular] +5C726E 0D6E [Regular] +5C7272 0D72 [Regular] +5C7274 0D74 [Regular] +5C727E 0D7E [Regular] +5C727F 0D7F [Regular] +5C7280 0D80 [Regular][ILSEQ] +5C7281 0D81 [Regular][ILSEQ] +5C729F 0D9F [Regular][ILSEQ] +5C72A0 0DA0 [Regular][ILSEQ] +5C72A1 0DA1 [Regular][ILSEQ] +5C72E0 0DE0 [Regular][ILSEQ] +5C72EF 0DEF [Regular][ILSEQ] +5C72F9 0DF9 [Regular][ILSEQ] +5C72FA 0DFA [Regular][ILSEQ] +5C72FC 0DFC [Regular][ILSEQ] +5C72FD 0DFD [Regular][ILSEQ] +5C72FE 0DFE [Regular][ILSEQ] +5C72FF 0DFF [Regular][ILSEQ] +5C7400 0900 [Regular] +5C7408 0908 [Regular] +5C7409 0909 [Regular] +5C740A 090A [Regular] +5C740D 090D [Regular] +5C741A 091A [Regular] +5C7422 0922 [Regular] +5C7425 0925 [Regular] +5C7427 NULL [SyntErr] +5C7430 0930 [Regular] +5C743F 093F [Regular] +5C7440 0940 [Regular] +5C745A 095A [Regular] +5C745C NULL [SyntErr] +5C745F 095F [Regular] +5C7461 0961 [Regular] +5C7462 0962 [Regular] +5C746E 096E [Regular] +5C7472 0972 [Regular] +5C7474 0974 [Regular] +5C747E 097E [Regular] +5C747F 097F [Regular] +5C7480 0980 [Regular][ILSEQ] +5C7481 0981 [Regular][ILSEQ] +5C749F 099F [Regular][ILSEQ] +5C74A0 09A0 [Regular][ILSEQ] +5C74A1 09A1 [Regular][ILSEQ] +5C74E0 09E0 [Regular][ILSEQ] +5C74EF 09EF [Regular][ILSEQ] +5C74F9 09F9 [Regular][ILSEQ] +5C74FA 09FA [Regular][ILSEQ] +5C74FC 09FC [Regular][ILSEQ] +5C74FD 09FD [Regular][ILSEQ] +5C74FE 09FE [Regular][ILSEQ] +5C74FF 09FF [Regular][ILSEQ] +5C7E00 7E00 [Trivial] +5C7E08 7E08 [Trivial] +5C7E09 7E09 [Trivial] +5C7E0A 7E0A [Trivial] +5C7E0D 7E0D [Trivial] +5C7E1A 7E1A [Trivial] +5C7E22 7E22 [Trivial] +5C7E25 7E25 [Trivial] +5C7E27 NULL [SyntErr] +5C7E30 7E30 [Trivial] +5C7E3F 7E3F [Trivial] +5C7E40 7E40 [Trivial] +5C7E5A 7E5A [Trivial] +5C7E5C NULL [SyntErr] +5C7E5F 7E5F [Trivial] +5C7E61 7E61 [Trivial] +5C7E62 7E62 [Trivial] +5C7E6E 7E6E [Trivial] +5C7E72 7E72 [Trivial] +5C7E74 7E74 [Trivial] +5C7E7E 7E7E [Trivial] +5C7E7F 7E7F [Trivial] +5C7E80 7E80 [Trivial][ILSEQ] +5C7E81 7E81 [Trivial][ILSEQ] +5C7E9F 7E9F [Trivial][ILSEQ] +5C7EA0 7EA0 [Trivial][ILSEQ] +5C7EA1 7EA1 [Trivial][ILSEQ] +5C7EE0 7EE0 [Trivial][ILSEQ] +5C7EEF 7EEF [Trivial][ILSEQ] +5C7EF9 7EF9 [Trivial][ILSEQ] +5C7EFA 7EFA [Trivial][ILSEQ] +5C7EFC 7EFC [Trivial][ILSEQ] +5C7EFD 7EFD [Trivial][ILSEQ] +5C7EFE 7EFE [Trivial][ILSEQ] +5C7EFF 7EFF [Trivial][ILSEQ] +5C7F00 7F00 [Trivial] +5C7F08 7F08 [Trivial] +5C7F09 7F09 [Trivial] +5C7F0A 7F0A [Trivial] +5C7F0D 7F0D [Trivial] +5C7F1A 7F1A [Trivial] +5C7F22 7F22 [Trivial] +5C7F25 7F25 [Trivial] +5C7F27 NULL [SyntErr] +5C7F30 7F30 [Trivial] +5C7F3F 7F3F [Trivial] +5C7F40 7F40 [Trivial] +5C7F5A 7F5A [Trivial] +5C7F5C NULL [SyntErr] +5C7F5F 7F5F [Trivial] +5C7F61 7F61 [Trivial] +5C7F62 7F62 [Trivial] +5C7F6E 7F6E [Trivial] +5C7F72 7F72 [Trivial] +5C7F74 7F74 [Trivial] +5C7F7E 7F7E [Trivial] +5C7F7F 7F7F [Trivial] +5C7F80 7F80 [Trivial][ILSEQ] +5C7F81 7F81 [Trivial][ILSEQ] +5C7F9F 7F9F [Trivial][ILSEQ] +5C7FA0 7FA0 [Trivial][ILSEQ] +5C7FA1 7FA1 [Trivial][ILSEQ] +5C7FE0 7FE0 [Trivial][ILSEQ] +5C7FEF 7FEF [Trivial][ILSEQ] +5C7FF9 7FF9 [Trivial][ILSEQ] +5C7FFA 7FFA [Trivial][ILSEQ] +5C7FFC 7FFC [Trivial][ILSEQ] +5C7FFD 7FFD [Trivial][ILSEQ] +5C7FFE 7FFE [Trivial][ILSEQ] +5C7FFF 7FFF [Trivial][ILSEQ] +5C8000 8000 [Trivial][ILSEQ] +5C8008 8008 [Trivial][ILSEQ] +5C8009 8009 [Trivial][ILSEQ] +5C800A 800A [Trivial][ILSEQ] +5C800D 800D [Trivial][ILSEQ] +5C801A 801A [Trivial][ILSEQ] +5C8022 8022 [Trivial][ILSEQ] +5C8025 8025 [Trivial][ILSEQ] +5C8027 NULL [SyntErr] +5C8030 8030 [Trivial][ILSEQ] +5C803F 803F [Trivial][ILSEQ] +5C8040 8040 [Trivial][ILSEQ] +5C805A 805A [Trivial][ILSEQ] +5C805C NULL [SyntErr] +5C805F 805F [Trivial][ILSEQ] +5C8061 8061 [Trivial][ILSEQ] +5C8062 8062 [Trivial][ILSEQ] +5C806E 806E [Trivial][ILSEQ] +5C8072 8072 [Trivial][ILSEQ] +5C8074 8074 [Trivial][ILSEQ] +5C807E 807E [Trivial][ILSEQ] +5C807F 807F [Trivial][ILSEQ] +5C8080 8080 [Trivial][ILSEQ] +5C8081 8081 [Trivial][ILSEQ] +5C809F 809F [Trivial][ILSEQ] +5C80A0 80A0 [Trivial][ILSEQ] +5C80A1 80A1 [Trivial][ILSEQ] +5C80E0 80E0 [Trivial][ILSEQ] +5C80EF 80EF [Trivial][ILSEQ] +5C80F9 80F9 [Trivial][ILSEQ] +5C80FA 80FA [Trivial][ILSEQ] +5C80FC 80FC [Trivial][ILSEQ] +5C80FD 80FD [Trivial][ILSEQ] +5C80FE 80FE [Trivial][ILSEQ] +5C80FF 80FF [Trivial][ILSEQ] +5C8100 8100 [Trivial][ILSEQ] +5C8108 8108 [Trivial][ILSEQ] +5C8109 8109 [Trivial][ILSEQ] +5C810A 810A [Trivial][ILSEQ] +5C810D 810D [Trivial][ILSEQ] +5C811A 811A [Trivial][ILSEQ] +5C8122 8122 [Trivial][ILSEQ] +5C8125 8125 [Trivial][ILSEQ] +5C8127 NULL [SyntErr] +5C8130 8130 [Trivial][ILSEQ] +5C813F 813F [Trivial][ILSEQ] +5C8140 8140 [Trivial][ILSEQ] +5C815A 815A [Trivial][ILSEQ] +5C815C NULL [SyntErr] +5C815F 815F [Trivial][ILSEQ] +5C8161 8161 [Trivial][ILSEQ] +5C8162 8162 [Trivial][ILSEQ] +5C816E 816E [Trivial][ILSEQ] +5C8172 8172 [Trivial][ILSEQ] +5C8174 8174 [Trivial][ILSEQ] +5C817E 817E [Trivial][ILSEQ] +5C817F 817F [Trivial][ILSEQ] +5C8180 8180 [Trivial][ILSEQ] +5C8181 8181 [Trivial][ILSEQ] +5C819F 819F [Trivial][ILSEQ] +5C81A0 81A0 [Trivial][ILSEQ] +5C81A1 81A1 [Trivial][ILSEQ] +5C81E0 81E0 [Trivial][ILSEQ] +5C81EF 81EF [Trivial][ILSEQ] +5C81F9 81F9 [Trivial][ILSEQ] +5C81FA 81FA [Trivial][ILSEQ] +5C81FC 81FC [Trivial][ILSEQ] +5C81FD 81FD [Trivial][ILSEQ] +5C81FE 81FE [Trivial][ILSEQ] +5C81FF 81FF [Trivial][ILSEQ] +5C9F00 9F00 [Trivial][ILSEQ] +5C9F08 9F08 [Trivial][ILSEQ] +5C9F09 9F09 [Trivial][ILSEQ] +5C9F0A 9F0A [Trivial][ILSEQ] +5C9F0D 9F0D [Trivial][ILSEQ] +5C9F1A 9F1A [Trivial][ILSEQ] +5C9F22 9F22 [Trivial][ILSEQ] +5C9F25 9F25 [Trivial][ILSEQ] +5C9F27 NULL [SyntErr] +5C9F30 9F30 [Trivial][ILSEQ] +5C9F3F 9F3F [Trivial][ILSEQ] +5C9F40 9F40 [Trivial][ILSEQ] +5C9F5A 9F5A [Trivial][ILSEQ] +5C9F5C NULL [SyntErr] +5C9F5F 9F5F [Trivial][ILSEQ] +5C9F61 9F61 [Trivial][ILSEQ] +5C9F62 9F62 [Trivial][ILSEQ] +5C9F6E 9F6E [Trivial][ILSEQ] +5C9F72 9F72 [Trivial][ILSEQ] +5C9F74 9F74 [Trivial][ILSEQ] +5C9F7E 9F7E [Trivial][ILSEQ] +5C9F7F 9F7F [Trivial][ILSEQ] +5C9F80 9F80 [Trivial][ILSEQ] +5C9F81 9F81 [Trivial][ILSEQ] +5C9F9F 9F9F [Trivial][ILSEQ] +5C9FA0 9FA0 [Trivial][ILSEQ] +5C9FA1 9FA1 [Trivial][ILSEQ] +5C9FE0 9FE0 [Trivial][ILSEQ] +5C9FEF 9FEF [Trivial][ILSEQ] +5C9FF9 9FF9 [Trivial][ILSEQ] +5C9FFA 9FFA [Trivial][ILSEQ] +5C9FFC 9FFC [Trivial][ILSEQ] +5C9FFD 9FFD [Trivial][ILSEQ] +5C9FFE 9FFE [Trivial][ILSEQ] +5C9FFF 9FFF [Trivial][ILSEQ] +5CA000 A000 [Trivial][ILSEQ] +5CA008 A008 [Trivial][ILSEQ] +5CA009 A009 [Trivial][ILSEQ] +5CA00A A00A [Trivial][ILSEQ] +5CA00D A00D [Trivial][ILSEQ] +5CA01A A01A [Trivial][ILSEQ] +5CA022 A022 [Trivial][ILSEQ] +5CA025 A025 [Trivial][ILSEQ] +5CA027 NULL [SyntErr] +5CA030 A030 [Trivial][ILSEQ] +5CA03F A03F [Trivial][ILSEQ] +5CA040 A040 [Trivial][ILSEQ] +5CA05A A05A [Trivial][ILSEQ] +5CA05C NULL [SyntErr] +5CA05F A05F [Trivial][ILSEQ] +5CA061 A061 [Trivial][ILSEQ] +5CA062 A062 [Trivial][ILSEQ] +5CA06E A06E [Trivial][ILSEQ] +5CA072 A072 [Trivial][ILSEQ] +5CA074 A074 [Trivial][ILSEQ] +5CA07E A07E [Trivial][ILSEQ] +5CA07F A07F [Trivial][ILSEQ] +5CA080 A080 [Trivial][ILSEQ] +5CA081 A081 [Trivial][ILSEQ] +5CA09F A09F [Trivial][ILSEQ] +5CA0A0 A0A0 [Trivial][ILSEQ] +5CA0A1 A0A1 [Trivial][ILSEQ] +5CA0E0 A0E0 [Trivial][ILSEQ] +5CA0EF A0EF [Trivial][ILSEQ] +5CA0F9 A0F9 [Trivial][ILSEQ] +5CA0FA A0FA [Trivial][ILSEQ] +5CA0FC A0FC [Trivial][ILSEQ] +5CA0FD A0FD [Trivial][ILSEQ] +5CA0FE A0FE [Trivial][ILSEQ] +5CA0FF A0FF [Trivial][ILSEQ] +5CA100 A100 [Trivial][ILSEQ] +5CA108 A108 [Trivial][ILSEQ] +5CA109 A109 [Trivial][ILSEQ] +5CA10A A10A [Trivial][ILSEQ] +5CA10D A10D [Trivial][ILSEQ] +5CA11A A11A [Trivial][ILSEQ] +5CA122 A122 [Trivial][ILSEQ] +5CA125 A125 [Trivial][ILSEQ] +5CA127 NULL [SyntErr] +5CA130 A130 [Trivial][ILSEQ] +5CA13F A13F [Trivial][ILSEQ] +5CA140 A140 [Trivial][ILSEQ] +5CA15A A15A [Trivial][ILSEQ] +5CA15C NULL [SyntErr] +5CA15F A15F [Trivial][ILSEQ] +5CA161 A161 [Trivial][ILSEQ] +5CA162 A162 [Trivial][ILSEQ] +5CA16E A16E [Trivial][ILSEQ] +5CA172 A172 [Trivial][ILSEQ] +5CA174 A174 [Trivial][ILSEQ] +5CA17E A17E [Trivial][ILSEQ] +5CA17F A17F [Trivial][ILSEQ] +5CA180 A180 [Trivial][ILSEQ] +5CA181 A181 [Trivial][ILSEQ] +5CA19F A19F [Trivial][ILSEQ] +5CA1A0 A1A0 [Trivial][ILSEQ] +5CA1A1 A1A1 [Trivial] +5CA1E0 A1E0 [Trivial] +5CA1EF A1EF [Trivial] +5CA1F9 A1F9 [Trivial] +5CA1FA A1FA [Trivial] +5CA1FC A1FC [Trivial] +5CA1FD A1FD [Trivial] +5CA1FE A1FE [Trivial] +5CA1FF A1FF [Trivial][ILSEQ] +5CE000 E000 [Trivial][ILSEQ] +5CE008 E008 [Trivial][ILSEQ] +5CE009 E009 [Trivial][ILSEQ] +5CE00A E00A [Trivial][ILSEQ] +5CE00D E00D [Trivial][ILSEQ] +5CE01A E01A [Trivial][ILSEQ] +5CE022 E022 [Trivial][ILSEQ] +5CE025 E025 [Trivial][ILSEQ] +5CE027 NULL [SyntErr] +5CE030 E030 [Trivial][ILSEQ] +5CE03F E03F [Trivial][ILSEQ] +5CE040 E040 [Trivial][ILSEQ] +5CE05A E05A [Trivial][ILSEQ] +5CE05C NULL [SyntErr] +5CE05F E05F [Trivial][ILSEQ] +5CE061 E061 [Trivial][ILSEQ] +5CE062 E062 [Trivial][ILSEQ] +5CE06E E06E [Trivial][ILSEQ] +5CE072 E072 [Trivial][ILSEQ] +5CE074 E074 [Trivial][ILSEQ] +5CE07E E07E [Trivial][ILSEQ] +5CE07F E07F [Trivial][ILSEQ] +5CE080 E080 [Trivial][ILSEQ] +5CE081 E081 [Trivial][ILSEQ] +5CE09F E09F [Trivial][ILSEQ] +5CE0A0 E0A0 [Trivial][ILSEQ] +5CE0A1 E0A1 [Trivial] +5CE0E0 E0E0 [Trivial] +5CE0EF E0EF [Trivial] +5CE0F9 E0F9 [Trivial] +5CE0FA E0FA [Trivial] +5CE0FC E0FC [Trivial] +5CE0FD E0FD [Trivial] +5CE0FE E0FE [Trivial] +5CE0FF E0FF [Trivial][ILSEQ] +5CEF00 EF00 [Trivial][ILSEQ] +5CEF08 EF08 [Trivial][ILSEQ] +5CEF09 EF09 [Trivial][ILSEQ] +5CEF0A EF0A [Trivial][ILSEQ] +5CEF0D EF0D [Trivial][ILSEQ] +5CEF1A EF1A [Trivial][ILSEQ] +5CEF22 EF22 [Trivial][ILSEQ] +5CEF25 EF25 [Trivial][ILSEQ] +5CEF27 NULL [SyntErr] +5CEF30 EF30 [Trivial][ILSEQ] +5CEF3F EF3F [Trivial][ILSEQ] +5CEF40 EF40 [Trivial][ILSEQ] +5CEF5A EF5A [Trivial][ILSEQ] +5CEF5C NULL [SyntErr] +5CEF5F EF5F [Trivial][ILSEQ] +5CEF61 EF61 [Trivial][ILSEQ] +5CEF62 EF62 [Trivial][ILSEQ] +5CEF6E EF6E [Trivial][ILSEQ] +5CEF72 EF72 [Trivial][ILSEQ] +5CEF74 EF74 [Trivial][ILSEQ] +5CEF7E EF7E [Trivial][ILSEQ] +5CEF7F EF7F [Trivial][ILSEQ] +5CEF80 EF80 [Trivial][ILSEQ] +5CEF81 EF81 [Trivial][ILSEQ] +5CEF9F EF9F [Trivial][ILSEQ] +5CEFA0 EFA0 [Trivial][ILSEQ] +5CEFA1 EFA1 [Trivial] +5CEFE0 EFE0 [Trivial] +5CEFEF EFEF [Trivial] +5CEFF9 EFF9 [Trivial] +5CEFFA EFFA [Trivial] +5CEFFC EFFC [Trivial] +5CEFFD EFFD [Trivial] +5CEFFE EFFE [Trivial] +5CEFFF EFFF [Trivial][ILSEQ] +5CF900 F900 [Trivial][ILSEQ] +5CF908 F908 [Trivial][ILSEQ] +5CF909 F909 [Trivial][ILSEQ] +5CF90A F90A [Trivial][ILSEQ] +5CF90D F90D [Trivial][ILSEQ] +5CF91A F91A [Trivial][ILSEQ] +5CF922 F922 [Trivial][ILSEQ] +5CF925 F925 [Trivial][ILSEQ] +5CF927 NULL [SyntErr] +5CF930 F930 [Trivial][ILSEQ] +5CF93F F93F [Trivial][ILSEQ] +5CF940 F940 [Trivial][ILSEQ] +5CF95A F95A [Trivial][ILSEQ] +5CF95C NULL [SyntErr] +5CF95F F95F [Trivial][ILSEQ] +5CF961 F961 [Trivial][ILSEQ] +5CF962 F962 [Trivial][ILSEQ] +5CF96E F96E [Trivial][ILSEQ] +5CF972 F972 [Trivial][ILSEQ] +5CF974 F974 [Trivial][ILSEQ] +5CF97E F97E [Trivial][ILSEQ] +5CF97F F97F [Trivial][ILSEQ] +5CF980 F980 [Trivial][ILSEQ] +5CF981 F981 [Trivial][ILSEQ] +5CF99F F99F [Trivial][ILSEQ] +5CF9A0 F9A0 [Trivial][ILSEQ] +5CF9A1 F9A1 [Trivial][ILSEQ] +5CF9E0 F9E0 [Trivial][ILSEQ] +5CF9EF F9EF [Trivial][ILSEQ] +5CF9F9 F9F9 [Trivial][ILSEQ] +5CF9FA F9FA [Trivial][ILSEQ] +5CF9FC F9FC [Trivial][ILSEQ] +5CF9FD F9FD [Trivial][ILSEQ] +5CF9FE F9FE [Trivial][ILSEQ] +5CF9FF F9FF [Trivial][ILSEQ] +5CFA00 FA00 [Trivial][ILSEQ] +5CFA08 FA08 [Trivial][ILSEQ] +5CFA09 FA09 [Trivial][ILSEQ] +5CFA0A FA0A [Trivial][ILSEQ] +5CFA0D FA0D [Trivial][ILSEQ] +5CFA1A FA1A [Trivial][ILSEQ] +5CFA22 FA22 [Trivial][ILSEQ] +5CFA25 FA25 [Trivial][ILSEQ] +5CFA27 NULL [SyntErr] +5CFA30 FA30 [Trivial][ILSEQ] +5CFA3F FA3F [Trivial][ILSEQ] +5CFA40 FA40 [Trivial][ILSEQ] +5CFA5A FA5A [Trivial][ILSEQ] +5CFA5C NULL [SyntErr] +5CFA5F FA5F [Trivial][ILSEQ] +5CFA61 FA61 [Trivial][ILSEQ] +5CFA62 FA62 [Trivial][ILSEQ] +5CFA6E FA6E [Trivial][ILSEQ] +5CFA72 FA72 [Trivial][ILSEQ] +5CFA74 FA74 [Trivial][ILSEQ] +5CFA7E FA7E [Trivial][ILSEQ] +5CFA7F FA7F [Trivial][ILSEQ] +5CFA80 FA80 [Trivial][ILSEQ] +5CFA81 FA81 [Trivial][ILSEQ] +5CFA9F FA9F [Trivial][ILSEQ] +5CFAA0 FAA0 [Trivial][ILSEQ] +5CFAA1 FAA1 [Trivial][ILSEQ] +5CFAE0 FAE0 [Trivial][ILSEQ] +5CFAEF FAEF [Trivial][ILSEQ] +5CFAF9 FAF9 [Trivial][ILSEQ] +5CFAFA FAFA [Trivial][ILSEQ] +5CFAFC FAFC [Trivial][ILSEQ] +5CFAFD FAFD [Trivial][ILSEQ] +5CFAFE FAFE [Trivial][ILSEQ] +5CFAFF FAFF [Trivial][ILSEQ] +5CFC00 FC00 [Trivial][ILSEQ] +5CFC08 FC08 [Trivial][ILSEQ] +5CFC09 FC09 [Trivial][ILSEQ] +5CFC0A FC0A [Trivial][ILSEQ] +5CFC0D FC0D [Trivial][ILSEQ] +5CFC1A FC1A [Trivial][ILSEQ] +5CFC22 FC22 [Trivial][ILSEQ] +5CFC25 FC25 [Trivial][ILSEQ] +5CFC27 NULL [SyntErr] +5CFC30 FC30 [Trivial][ILSEQ] +5CFC3F FC3F [Trivial][ILSEQ] +5CFC40 FC40 [Trivial][ILSEQ] +5CFC5A FC5A [Trivial][ILSEQ] +5CFC5C NULL [SyntErr] +5CFC5F FC5F [Trivial][ILSEQ] +5CFC61 FC61 [Trivial][ILSEQ] +5CFC62 FC62 [Trivial][ILSEQ] +5CFC6E FC6E [Trivial][ILSEQ] +5CFC72 FC72 [Trivial][ILSEQ] +5CFC74 FC74 [Trivial][ILSEQ] +5CFC7E FC7E [Trivial][ILSEQ] +5CFC7F FC7F [Trivial][ILSEQ] +5CFC80 FC80 [Trivial][ILSEQ] +5CFC81 FC81 [Trivial][ILSEQ] +5CFC9F FC9F [Trivial][ILSEQ] +5CFCA0 FCA0 [Trivial][ILSEQ] +5CFCA1 FCA1 [Trivial][ILSEQ] +5CFCE0 FCE0 [Trivial][ILSEQ] +5CFCEF FCEF [Trivial][ILSEQ] +5CFCF9 FCF9 [Trivial][ILSEQ] +5CFCFA FCFA [Trivial][ILSEQ] +5CFCFC FCFC [Trivial][ILSEQ] +5CFCFD FCFD [Trivial][ILSEQ] +5CFCFE FCFE [Trivial][ILSEQ] +5CFCFF FCFF [Trivial][ILSEQ] +5CFD00 FD00 [Trivial][ILSEQ] +5CFD08 FD08 [Trivial][ILSEQ] +5CFD09 FD09 [Trivial][ILSEQ] +5CFD0A FD0A [Trivial][ILSEQ] +5CFD0D FD0D [Trivial][ILSEQ] +5CFD1A FD1A [Trivial][ILSEQ] +5CFD22 FD22 [Trivial][ILSEQ] +5CFD25 FD25 [Trivial][ILSEQ] +5CFD27 NULL [SyntErr] +5CFD30 FD30 [Trivial][ILSEQ] +5CFD3F FD3F [Trivial][ILSEQ] +5CFD40 FD40 [Trivial][ILSEQ] +5CFD5A FD5A [Trivial][ILSEQ] +5CFD5C NULL [SyntErr] +5CFD5F FD5F [Trivial][ILSEQ] +5CFD61 FD61 [Trivial][ILSEQ] +5CFD62 FD62 [Trivial][ILSEQ] +5CFD6E FD6E [Trivial][ILSEQ] +5CFD72 FD72 [Trivial][ILSEQ] +5CFD74 FD74 [Trivial][ILSEQ] +5CFD7E FD7E [Trivial][ILSEQ] +5CFD7F FD7F [Trivial][ILSEQ] +5CFD80 FD80 [Trivial][ILSEQ] +5CFD81 FD81 [Trivial][ILSEQ] +5CFD9F FD9F [Trivial][ILSEQ] +5CFDA0 FDA0 [Trivial][ILSEQ] +5CFDA1 FDA1 [Trivial][ILSEQ] +5CFDE0 FDE0 [Trivial][ILSEQ] +5CFDEF FDEF [Trivial][ILSEQ] +5CFDF9 FDF9 [Trivial][ILSEQ] +5CFDFA FDFA [Trivial][ILSEQ] +5CFDFC FDFC [Trivial][ILSEQ] +5CFDFD FDFD [Trivial][ILSEQ] +5CFDFE FDFE [Trivial][ILSEQ] +5CFDFF FDFF [Trivial][ILSEQ] +5CFE00 FE00 [Trivial][ILSEQ] +5CFE08 FE08 [Trivial][ILSEQ] +5CFE09 FE09 [Trivial][ILSEQ] +5CFE0A FE0A [Trivial][ILSEQ] +5CFE0D FE0D [Trivial][ILSEQ] +5CFE1A FE1A [Trivial][ILSEQ] +5CFE22 FE22 [Trivial][ILSEQ] +5CFE25 FE25 [Trivial][ILSEQ] +5CFE27 NULL [SyntErr] +5CFE30 FE30 [Trivial][ILSEQ] +5CFE3F FE3F [Trivial][ILSEQ] +5CFE40 FE40 [Trivial][ILSEQ] +5CFE5A FE5A [Trivial][ILSEQ] +5CFE5C NULL [SyntErr] +5CFE5F FE5F [Trivial][ILSEQ] +5CFE61 FE61 [Trivial][ILSEQ] +5CFE62 FE62 [Trivial][ILSEQ] +5CFE6E FE6E [Trivial][ILSEQ] +5CFE72 FE72 [Trivial][ILSEQ] +5CFE74 FE74 [Trivial][ILSEQ] +5CFE7E FE7E [Trivial][ILSEQ] +5CFE7F FE7F [Trivial][ILSEQ] +5CFE80 FE80 [Trivial][ILSEQ] +5CFE81 FE81 [Trivial][ILSEQ] +5CFE9F FE9F [Trivial][ILSEQ] +5CFEA0 FEA0 [Trivial][ILSEQ] +5CFEA1 FEA1 [Trivial][ILSEQ] +5CFEE0 FEE0 [Trivial][ILSEQ] +5CFEEF FEEF [Trivial][ILSEQ] +5CFEF9 FEF9 [Trivial][ILSEQ] +5CFEFA FEFA [Trivial][ILSEQ] +5CFEFC FEFC [Trivial][ILSEQ] +5CFEFD FEFD [Trivial][ILSEQ] +5CFEFE FEFE [Trivial][ILSEQ] +5CFEFF FEFF [Trivial][ILSEQ] +5CFF00 FF00 [Trivial][ILSEQ] +5CFF08 FF08 [Trivial][ILSEQ] +5CFF09 FF09 [Trivial][ILSEQ] +5CFF0A FF0A [Trivial][ILSEQ] +5CFF0D FF0D [Trivial][ILSEQ] +5CFF1A FF1A [Trivial][ILSEQ] +5CFF22 FF22 [Trivial][ILSEQ] +5CFF25 FF25 [Trivial][ILSEQ] +5CFF27 NULL [SyntErr] +5CFF30 FF30 [Trivial][ILSEQ] +5CFF3F FF3F [Trivial][ILSEQ] +5CFF40 FF40 [Trivial][ILSEQ] +5CFF5A FF5A [Trivial][ILSEQ] +5CFF5C NULL [SyntErr] +5CFF5F FF5F [Trivial][ILSEQ] +5CFF61 FF61 [Trivial][ILSEQ] +5CFF62 FF62 [Trivial][ILSEQ] +5CFF6E FF6E [Trivial][ILSEQ] +5CFF72 FF72 [Trivial][ILSEQ] +5CFF74 FF74 [Trivial][ILSEQ] +5CFF7E FF7E [Trivial][ILSEQ] +5CFF7F FF7F [Trivial][ILSEQ] +5CFF80 FF80 [Trivial][ILSEQ] +5CFF81 FF81 [Trivial][ILSEQ] +5CFF9F FF9F [Trivial][ILSEQ] +5CFFA0 FFA0 [Trivial][ILSEQ] +5CFFA1 FFA1 [Trivial][ILSEQ] +5CFFE0 FFE0 [Trivial][ILSEQ] +5CFFEF FFEF [Trivial][ILSEQ] +5CFFF9 FFF9 [Trivial][ILSEQ] +5CFFFA FFFA [Trivial][ILSEQ] +5CFFFC FFFC [Trivial][ILSEQ] +5CFFFD FFFD [Trivial][ILSEQ] +5CFFFE FFFE [Trivial][ILSEQ] +5CFFFF FFFF [Trivial][ILSEQ] +5C005C00 0000 [Trivial] +5C005C08 0008 [Trivial] +5C005C09 0009 [Trivial] +5C005C0A 000A [Trivial] +5C005C0D 000D [Trivial] +5C005C1A 001A [Trivial] +5C005C22 0022 [Trivial] +5C005C25 005C25 [Regular] +5C005C27 0027 [Trivial] +5C005C30 0000 [Regular] +5C005C3F 003F [Trivial] +5C005C40 0040 [Trivial] +5C005C5A 001A [Regular] +5C005C5C 005C [Regular] +5C005C5F 005C5F [Regular] +5C005C61 0061 [Trivial] +5C005C62 0008 [Regular] +5C005C6E 000A [Regular] +5C005C72 000D [Regular] +5C005C74 0009 [Regular] +5C005C7E 007E [Trivial] +5C005C7F 007F [Trivial] +5C005C80 0080 [Trivial][ILSEQ] +5C005C81 0081 [Trivial][ILSEQ] +5C005C9F 009F [Trivial][ILSEQ] +5C005CA0 00A0 [Trivial][ILSEQ] +5C005CA1 00A1 [Trivial][ILSEQ] +5C005CE0 00E0 [Trivial][ILSEQ] +5C005CEF 00EF [Trivial][ILSEQ] +5C005CF9 00F9 [Trivial][ILSEQ] +5C005CFA 00FA [Trivial][ILSEQ] +5C005CFC 00FC [Trivial][ILSEQ] +5C005CFD 00FD [Trivial][ILSEQ] +5C005CFE 00FE [Trivial][ILSEQ] +5C005CFF 00FF [Trivial][ILSEQ] +5C085C00 0800 [Trivial] +5C085C08 0808 [Trivial] +5C085C09 0809 [Trivial] +5C085C0A 080A [Trivial] +5C085C0D 080D [Trivial] +5C085C1A 081A [Trivial] +5C085C22 0822 [Trivial] +5C085C25 085C25 [Regular] +5C085C27 0827 [Trivial] +5C085C30 0800 [Regular] +5C085C3F 083F [Trivial] +5C085C40 0840 [Trivial] +5C085C5A 081A [Regular] +5C085C5C 085C [Regular] +5C085C5F 085C5F [Regular] +5C085C61 0861 [Trivial] +5C085C62 0808 [Regular] +5C085C6E 080A [Regular] +5C085C72 080D [Regular] +5C085C74 0809 [Regular] +5C085C7E 087E [Trivial] +5C085C7F 087F [Trivial] +5C085C80 0880 [Trivial][ILSEQ] +5C085C81 0881 [Trivial][ILSEQ] +5C085C9F 089F [Trivial][ILSEQ] +5C085CA0 08A0 [Trivial][ILSEQ] +5C085CA1 08A1 [Trivial][ILSEQ] +5C085CE0 08E0 [Trivial][ILSEQ] +5C085CEF 08EF [Trivial][ILSEQ] +5C085CF9 08F9 [Trivial][ILSEQ] +5C085CFA 08FA [Trivial][ILSEQ] +5C085CFC 08FC [Trivial][ILSEQ] +5C085CFD 08FD [Trivial][ILSEQ] +5C085CFE 08FE [Trivial][ILSEQ] +5C085CFF 08FF [Trivial][ILSEQ] +5C095C00 0900 [Trivial] +5C095C08 0908 [Trivial] +5C095C09 0909 [Trivial] +5C095C0A 090A [Trivial] +5C095C0D 090D [Trivial] +5C095C1A 091A [Trivial] +5C095C22 0922 [Trivial] +5C095C25 095C25 [Regular] +5C095C27 0927 [Trivial] +5C095C30 0900 [Regular] +5C095C3F 093F [Trivial] +5C095C40 0940 [Trivial] +5C095C5A 091A [Regular] +5C095C5C 095C [Regular] +5C095C5F 095C5F [Regular] +5C095C61 0961 [Trivial] +5C095C62 0908 [Regular] +5C095C6E 090A [Regular] +5C095C72 090D [Regular] +5C095C74 0909 [Regular] +5C095C7E 097E [Trivial] +5C095C7F 097F [Trivial] +5C095C80 0980 [Trivial][ILSEQ] +5C095C81 0981 [Trivial][ILSEQ] +5C095C9F 099F [Trivial][ILSEQ] +5C095CA0 09A0 [Trivial][ILSEQ] +5C095CA1 09A1 [Trivial][ILSEQ] +5C095CE0 09E0 [Trivial][ILSEQ] +5C095CEF 09EF [Trivial][ILSEQ] +5C095CF9 09F9 [Trivial][ILSEQ] +5C095CFA 09FA [Trivial][ILSEQ] +5C095CFC 09FC [Trivial][ILSEQ] +5C095CFD 09FD [Trivial][ILSEQ] +5C095CFE 09FE [Trivial][ILSEQ] +5C095CFF 09FF [Trivial][ILSEQ] +5C0A5C00 0A00 [Trivial] +5C0A5C08 0A08 [Trivial] +5C0A5C09 0A09 [Trivial] +5C0A5C0A 0A0A [Trivial] +5C0A5C0D 0A0D [Trivial] +5C0A5C1A 0A1A [Trivial] +5C0A5C22 0A22 [Trivial] +5C0A5C25 0A5C25 [Regular] +5C0A5C27 0A27 [Trivial] +5C0A5C30 0A00 [Regular] +5C0A5C3F 0A3F [Trivial] +5C0A5C40 0A40 [Trivial] +5C0A5C5A 0A1A [Regular] +5C0A5C5C 0A5C [Regular] +5C0A5C5F 0A5C5F [Regular] +5C0A5C61 0A61 [Trivial] +5C0A5C62 0A08 [Regular] +5C0A5C6E 0A0A [Regular] +5C0A5C72 0A0D [Regular] +5C0A5C74 0A09 [Regular] +5C0A5C7E 0A7E [Trivial] +5C0A5C7F 0A7F [Trivial] +5C0A5C80 0A80 [Trivial][ILSEQ] +5C0A5C81 0A81 [Trivial][ILSEQ] +5C0A5C9F 0A9F [Trivial][ILSEQ] +5C0A5CA0 0AA0 [Trivial][ILSEQ] +5C0A5CA1 0AA1 [Trivial][ILSEQ] +5C0A5CE0 0AE0 [Trivial][ILSEQ] +5C0A5CEF 0AEF [Trivial][ILSEQ] +5C0A5CF9 0AF9 [Trivial][ILSEQ] +5C0A5CFA 0AFA [Trivial][ILSEQ] +5C0A5CFC 0AFC [Trivial][ILSEQ] +5C0A5CFD 0AFD [Trivial][ILSEQ] +5C0A5CFE 0AFE [Trivial][ILSEQ] +5C0A5CFF 0AFF [Trivial][ILSEQ] +5C0D5C00 0D00 [Trivial] +5C0D5C08 0D08 [Trivial] +5C0D5C09 0D09 [Trivial] +5C0D5C0A 0D0A [Trivial] +5C0D5C0D 0D0D [Trivial] +5C0D5C1A 0D1A [Trivial] +5C0D5C22 0D22 [Trivial] +5C0D5C25 0D5C25 [Regular] +5C0D5C27 0D27 [Trivial] +5C0D5C30 0D00 [Regular] +5C0D5C3F 0D3F [Trivial] +5C0D5C40 0D40 [Trivial] +5C0D5C5A 0D1A [Regular] +5C0D5C5C 0D5C [Regular] +5C0D5C5F 0D5C5F [Regular] +5C0D5C61 0D61 [Trivial] +5C0D5C62 0D08 [Regular] +5C0D5C6E 0D0A [Regular] +5C0D5C72 0D0D [Regular] +5C0D5C74 0D09 [Regular] +5C0D5C7E 0D7E [Trivial] +5C0D5C7F 0D7F [Trivial] +5C0D5C80 0D80 [Trivial][ILSEQ] +5C0D5C81 0D81 [Trivial][ILSEQ] +5C0D5C9F 0D9F [Trivial][ILSEQ] +5C0D5CA0 0DA0 [Trivial][ILSEQ] +5C0D5CA1 0DA1 [Trivial][ILSEQ] +5C0D5CE0 0DE0 [Trivial][ILSEQ] +5C0D5CEF 0DEF [Trivial][ILSEQ] +5C0D5CF9 0DF9 [Trivial][ILSEQ] +5C0D5CFA 0DFA [Trivial][ILSEQ] +5C0D5CFC 0DFC [Trivial][ILSEQ] +5C0D5CFD 0DFD [Trivial][ILSEQ] +5C0D5CFE 0DFE [Trivial][ILSEQ] +5C0D5CFF 0DFF [Trivial][ILSEQ] +5C1A5C00 1A00 [Trivial] +5C1A5C08 1A08 [Trivial] +5C1A5C09 1A09 [Trivial] +5C1A5C0A 1A0A [Trivial] +5C1A5C0D 1A0D [Trivial] +5C1A5C1A 1A1A [Trivial] +5C1A5C22 1A22 [Trivial] +5C1A5C25 1A5C25 [Regular] +5C1A5C27 1A27 [Trivial] +5C1A5C30 1A00 [Regular] +5C1A5C3F 1A3F [Trivial] +5C1A5C40 1A40 [Trivial] +5C1A5C5A 1A1A [Regular] +5C1A5C5C 1A5C [Regular] +5C1A5C5F 1A5C5F [Regular] +5C1A5C61 1A61 [Trivial] +5C1A5C62 1A08 [Regular] +5C1A5C6E 1A0A [Regular] +5C1A5C72 1A0D [Regular] +5C1A5C74 1A09 [Regular] +5C1A5C7E 1A7E [Trivial] +5C1A5C7F 1A7F [Trivial] +5C1A5C80 1A80 [Trivial][ILSEQ] +5C1A5C81 1A81 [Trivial][ILSEQ] +5C1A5C9F 1A9F [Trivial][ILSEQ] +5C1A5CA0 1AA0 [Trivial][ILSEQ] +5C1A5CA1 1AA1 [Trivial][ILSEQ] +5C1A5CE0 1AE0 [Trivial][ILSEQ] +5C1A5CEF 1AEF [Trivial][ILSEQ] +5C1A5CF9 1AF9 [Trivial][ILSEQ] +5C1A5CFA 1AFA [Trivial][ILSEQ] +5C1A5CFC 1AFC [Trivial][ILSEQ] +5C1A5CFD 1AFD [Trivial][ILSEQ] +5C1A5CFE 1AFE [Trivial][ILSEQ] +5C1A5CFF 1AFF [Trivial][ILSEQ] +5C225C00 2200 [Trivial] +5C225C08 2208 [Trivial] +5C225C09 2209 [Trivial] +5C225C0A 220A [Trivial] +5C225C0D 220D [Trivial] +5C225C1A 221A [Trivial] +5C225C22 2222 [Trivial] +5C225C25 225C25 [Regular] +5C225C27 2227 [Trivial] +5C225C30 2200 [Regular] +5C225C3F 223F [Trivial] +5C225C40 2240 [Trivial] +5C225C5A 221A [Regular] +5C225C5C 225C [Regular] +5C225C5F 225C5F [Regular] +5C225C61 2261 [Trivial] +5C225C62 2208 [Regular] +5C225C6E 220A [Regular] +5C225C72 220D [Regular] +5C225C74 2209 [Regular] +5C225C7E 227E [Trivial] +5C225C7F 227F [Trivial] +5C225C80 2280 [Trivial][ILSEQ] +5C225C81 2281 [Trivial][ILSEQ] +5C225C9F 229F [Trivial][ILSEQ] +5C225CA0 22A0 [Trivial][ILSEQ] +5C225CA1 22A1 [Trivial][ILSEQ] +5C225CE0 22E0 [Trivial][ILSEQ] +5C225CEF 22EF [Trivial][ILSEQ] +5C225CF9 22F9 [Trivial][ILSEQ] +5C225CFA 22FA [Trivial][ILSEQ] +5C225CFC 22FC [Trivial][ILSEQ] +5C225CFD 22FD [Trivial][ILSEQ] +5C225CFE 22FE [Trivial][ILSEQ] +5C225CFF 22FF [Trivial][ILSEQ] +5C255C00 5C2500 [Regular] +5C255C08 5C2508 [Regular] +5C255C09 5C2509 [Regular] +5C255C0A 5C250A [Regular] +5C255C0D 5C250D [Regular] +5C255C1A 5C251A [Regular] +5C255C22 5C2522 [Regular] +5C255C25 5C255C25 [Preserve][LIKE] +5C255C27 5C2527 [Regular] +5C255C30 5C2500 [Regular] +5C255C3F 5C253F [Regular] +5C255C40 5C2540 [Regular] +5C255C5A 5C251A [Regular] +5C255C5C 5C255C [Regular] +5C255C5F 5C255C5F [Preserve][LIKE] +5C255C61 5C2561 [Regular] +5C255C62 5C2508 [Regular] +5C255C6E 5C250A [Regular] +5C255C72 5C250D [Regular] +5C255C74 5C2509 [Regular] +5C255C7E 5C257E [Regular] +5C255C7F 5C257F [Regular] +5C255C80 5C2580 [Regular][ILSEQ] +5C255C81 5C2581 [Regular][ILSEQ] +5C255C9F 5C259F [Regular][ILSEQ] +5C255CA0 5C25A0 [Regular][ILSEQ] +5C255CA1 5C25A1 [Regular][ILSEQ] +5C255CE0 5C25E0 [Regular][ILSEQ] +5C255CEF 5C25EF [Regular][ILSEQ] +5C255CF9 5C25F9 [Regular][ILSEQ] +5C255CFA 5C25FA [Regular][ILSEQ] +5C255CFC 5C25FC [Regular][ILSEQ] +5C255CFD 5C25FD [Regular][ILSEQ] +5C255CFE 5C25FE [Regular][ILSEQ] +5C255CFF 5C25FF [Regular][ILSEQ] +5C275C00 2700 [Trivial] +5C275C08 2708 [Trivial] +5C275C09 2709 [Trivial] +5C275C0A 270A [Trivial] +5C275C0D 270D [Trivial] +5C275C1A 271A [Trivial] +5C275C22 2722 [Trivial] +5C275C25 275C25 [Regular] +5C275C27 2727 [Trivial] +5C275C30 2700 [Regular] +5C275C3F 273F [Trivial] +5C275C40 2740 [Trivial] +5C275C5A 271A [Regular] +5C275C5C 275C [Regular] +5C275C5F 275C5F [Regular] +5C275C61 2761 [Trivial] +5C275C62 2708 [Regular] +5C275C6E 270A [Regular] +5C275C72 270D [Regular] +5C275C74 2709 [Regular] +5C275C7E 277E [Trivial] +5C275C7F 277F [Trivial] +5C275C80 2780 [Trivial][ILSEQ] +5C275C81 2781 [Trivial][ILSEQ] +5C275C9F 279F [Trivial][ILSEQ] +5C275CA0 27A0 [Trivial][ILSEQ] +5C275CA1 27A1 [Trivial][ILSEQ] +5C275CE0 27E0 [Trivial][ILSEQ] +5C275CEF 27EF [Trivial][ILSEQ] +5C275CF9 27F9 [Trivial][ILSEQ] +5C275CFA 27FA [Trivial][ILSEQ] +5C275CFC 27FC [Trivial][ILSEQ] +5C275CFD 27FD [Trivial][ILSEQ] +5C275CFE 27FE [Trivial][ILSEQ] +5C275CFF 27FF [Trivial][ILSEQ] +5C305C00 0000 [Regular] +5C305C08 0008 [Regular] +5C305C09 0009 [Regular] +5C305C0A 000A [Regular] +5C305C0D 000D [Regular] +5C305C1A 001A [Regular] +5C305C22 0022 [Regular] +5C305C25 005C25 [Regular] +5C305C27 0027 [Regular] +5C305C30 0000 [Regular] +5C305C3F 003F [Regular] +5C305C40 0040 [Regular] +5C305C5A 001A [Regular] +5C305C5C 005C [Regular] +5C305C5F 005C5F [Regular] +5C305C61 0061 [Regular] +5C305C62 0008 [Regular] +5C305C6E 000A [Regular] +5C305C72 000D [Regular] +5C305C74 0009 [Regular] +5C305C7E 007E [Regular] +5C305C7F 007F [Regular] +5C305C80 0080 [Regular][ILSEQ] +5C305C81 0081 [Regular][ILSEQ] +5C305C9F 009F [Regular][ILSEQ] +5C305CA0 00A0 [Regular][ILSEQ] +5C305CA1 00A1 [Regular][ILSEQ] +5C305CE0 00E0 [Regular][ILSEQ] +5C305CEF 00EF [Regular][ILSEQ] +5C305CF9 00F9 [Regular][ILSEQ] +5C305CFA 00FA [Regular][ILSEQ] +5C305CFC 00FC [Regular][ILSEQ] +5C305CFD 00FD [Regular][ILSEQ] +5C305CFE 00FE [Regular][ILSEQ] +5C305CFF 00FF [Regular][ILSEQ] +5C3F5C00 3F00 [Trivial] +5C3F5C08 3F08 [Trivial] +5C3F5C09 3F09 [Trivial] +5C3F5C0A 3F0A [Trivial] +5C3F5C0D 3F0D [Trivial] +5C3F5C1A 3F1A [Trivial] +5C3F5C22 3F22 [Trivial] +5C3F5C25 3F5C25 [Regular] +5C3F5C27 3F27 [Trivial] +5C3F5C30 3F00 [Regular] +5C3F5C3F 3F3F [Trivial] +5C3F5C40 3F40 [Trivial] +5C3F5C5A 3F1A [Regular] +5C3F5C5C 3F5C [Regular] +5C3F5C5F 3F5C5F [Regular] +5C3F5C61 3F61 [Trivial] +5C3F5C62 3F08 [Regular] +5C3F5C6E 3F0A [Regular] +5C3F5C72 3F0D [Regular] +5C3F5C74 3F09 [Regular] +5C3F5C7E 3F7E [Trivial] +5C3F5C7F 3F7F [Trivial] +5C3F5C80 3F80 [Trivial][ILSEQ] +5C3F5C81 3F81 [Trivial][ILSEQ] +5C3F5C9F 3F9F [Trivial][ILSEQ] +5C3F5CA0 3FA0 [Trivial][ILSEQ] +5C3F5CA1 3FA1 [Trivial][ILSEQ] +5C3F5CE0 3FE0 [Trivial][ILSEQ] +5C3F5CEF 3FEF [Trivial][ILSEQ] +5C3F5CF9 3FF9 [Trivial][ILSEQ] +5C3F5CFA 3FFA [Trivial][ILSEQ] +5C3F5CFC 3FFC [Trivial][ILSEQ] +5C3F5CFD 3FFD [Trivial][ILSEQ] +5C3F5CFE 3FFE [Trivial][ILSEQ] +5C3F5CFF 3FFF [Trivial][ILSEQ] +5C405C00 4000 [Trivial] +5C405C08 4008 [Trivial] +5C405C09 4009 [Trivial] +5C405C0A 400A [Trivial] +5C405C0D 400D [Trivial] +5C405C1A 401A [Trivial] +5C405C22 4022 [Trivial] +5C405C25 405C25 [Regular] +5C405C27 4027 [Trivial] +5C405C30 4000 [Regular] +5C405C3F 403F [Trivial] +5C405C40 4040 [Trivial] +5C405C5A 401A [Regular] +5C405C5C 405C [Regular] +5C405C5F 405C5F [Regular] +5C405C61 4061 [Trivial] +5C405C62 4008 [Regular] +5C405C6E 400A [Regular] +5C405C72 400D [Regular] +5C405C74 4009 [Regular] +5C405C7E 407E [Trivial] +5C405C7F 407F [Trivial] +5C405C80 4080 [Trivial][ILSEQ] +5C405C81 4081 [Trivial][ILSEQ] +5C405C9F 409F [Trivial][ILSEQ] +5C405CA0 40A0 [Trivial][ILSEQ] +5C405CA1 40A1 [Trivial][ILSEQ] +5C405CE0 40E0 [Trivial][ILSEQ] +5C405CEF 40EF [Trivial][ILSEQ] +5C405CF9 40F9 [Trivial][ILSEQ] +5C405CFA 40FA [Trivial][ILSEQ] +5C405CFC 40FC [Trivial][ILSEQ] +5C405CFD 40FD [Trivial][ILSEQ] +5C405CFE 40FE [Trivial][ILSEQ] +5C405CFF 40FF [Trivial][ILSEQ] +5C5A5C00 1A00 [Regular] +5C5A5C08 1A08 [Regular] +5C5A5C09 1A09 [Regular] +5C5A5C0A 1A0A [Regular] +5C5A5C0D 1A0D [Regular] +5C5A5C1A 1A1A [Regular] +5C5A5C22 1A22 [Regular] +5C5A5C25 1A5C25 [Regular] +5C5A5C27 1A27 [Regular] +5C5A5C30 1A00 [Regular] +5C5A5C3F 1A3F [Regular] +5C5A5C40 1A40 [Regular] +5C5A5C5A 1A1A [Regular] +5C5A5C5C 1A5C [Regular] +5C5A5C5F 1A5C5F [Regular] +5C5A5C61 1A61 [Regular] +5C5A5C62 1A08 [Regular] +5C5A5C6E 1A0A [Regular] +5C5A5C72 1A0D [Regular] +5C5A5C74 1A09 [Regular] +5C5A5C7E 1A7E [Regular] +5C5A5C7F 1A7F [Regular] +5C5A5C80 1A80 [Regular][ILSEQ] +5C5A5C81 1A81 [Regular][ILSEQ] +5C5A5C9F 1A9F [Regular][ILSEQ] +5C5A5CA0 1AA0 [Regular][ILSEQ] +5C5A5CA1 1AA1 [Regular][ILSEQ] +5C5A5CE0 1AE0 [Regular][ILSEQ] +5C5A5CEF 1AEF [Regular][ILSEQ] +5C5A5CF9 1AF9 [Regular][ILSEQ] +5C5A5CFA 1AFA [Regular][ILSEQ] +5C5A5CFC 1AFC [Regular][ILSEQ] +5C5A5CFD 1AFD [Regular][ILSEQ] +5C5A5CFE 1AFE [Regular][ILSEQ] +5C5A5CFF 1AFF [Regular][ILSEQ] +5C5C5C00 5C00 [Regular] +5C5C5C08 5C08 [Regular] +5C5C5C09 5C09 [Regular] +5C5C5C0A 5C0A [Regular] +5C5C5C0D 5C0D [Regular] +5C5C5C1A 5C1A [Regular] +5C5C5C22 5C22 [Regular] +5C5C5C25 5C5C25 [Regular] +5C5C5C27 5C27 [Regular] +5C5C5C30 5C00 [Regular] +5C5C5C3F 5C3F [Regular] +5C5C5C40 5C40 [Regular] +5C5C5C5A 5C1A [Regular] +5C5C5C5C 5C5C [Regular] +5C5C5C5F 5C5C5F [Regular] +5C5C5C61 5C61 [Regular] +5C5C5C62 5C08 [Regular] +5C5C5C6E 5C0A [Regular] +5C5C5C72 5C0D [Regular] +5C5C5C74 5C09 [Regular] +5C5C5C7E 5C7E [Regular] +5C5C5C7F 5C7F [Regular] +5C5C5C80 5C80 [Regular][ILSEQ] +5C5C5C81 5C81 [Regular][ILSEQ] +5C5C5C9F 5C9F [Regular][ILSEQ] +5C5C5CA0 5CA0 [Regular][ILSEQ] +5C5C5CA1 5CA1 [Regular][ILSEQ] +5C5C5CE0 5CE0 [Regular][ILSEQ] +5C5C5CEF 5CEF [Regular][ILSEQ] +5C5C5CF9 5CF9 [Regular][ILSEQ] +5C5C5CFA 5CFA [Regular][ILSEQ] +5C5C5CFC 5CFC [Regular][ILSEQ] +5C5C5CFD 5CFD [Regular][ILSEQ] +5C5C5CFE 5CFE [Regular][ILSEQ] +5C5C5CFF 5CFF [Regular][ILSEQ] +5C5F5C00 5C5F00 [Regular] +5C5F5C08 5C5F08 [Regular] +5C5F5C09 5C5F09 [Regular] +5C5F5C0A 5C5F0A [Regular] +5C5F5C0D 5C5F0D [Regular] +5C5F5C1A 5C5F1A [Regular] +5C5F5C22 5C5F22 [Regular] +5C5F5C25 5C5F5C25 [Preserve][LIKE] +5C5F5C27 5C5F27 [Regular] +5C5F5C30 5C5F00 [Regular] +5C5F5C3F 5C5F3F [Regular] +5C5F5C40 5C5F40 [Regular] +5C5F5C5A 5C5F1A [Regular] +5C5F5C5C 5C5F5C [Regular] +5C5F5C5F 5C5F5C5F [Preserve][LIKE] +5C5F5C61 5C5F61 [Regular] +5C5F5C62 5C5F08 [Regular] +5C5F5C6E 5C5F0A [Regular] +5C5F5C72 5C5F0D [Regular] +5C5F5C74 5C5F09 [Regular] +5C5F5C7E 5C5F7E [Regular] +5C5F5C7F 5C5F7F [Regular] +5C5F5C80 5C5F80 [Regular][ILSEQ] +5C5F5C81 5C5F81 [Regular][ILSEQ] +5C5F5C9F 5C5F9F [Regular][ILSEQ] +5C5F5CA0 5C5FA0 [Regular][ILSEQ] +5C5F5CA1 5C5FA1 [Regular][ILSEQ] +5C5F5CE0 5C5FE0 [Regular][ILSEQ] +5C5F5CEF 5C5FEF [Regular][ILSEQ] +5C5F5CF9 5C5FF9 [Regular][ILSEQ] +5C5F5CFA 5C5FFA [Regular][ILSEQ] +5C5F5CFC 5C5FFC [Regular][ILSEQ] +5C5F5CFD 5C5FFD [Regular][ILSEQ] +5C5F5CFE 5C5FFE [Regular][ILSEQ] +5C5F5CFF 5C5FFF [Regular][ILSEQ] +5C615C00 6100 [Trivial] +5C615C08 6108 [Trivial] +5C615C09 6109 [Trivial] +5C615C0A 610A [Trivial] +5C615C0D 610D [Trivial] +5C615C1A 611A [Trivial] +5C615C22 6122 [Trivial] +5C615C25 615C25 [Regular] +5C615C27 6127 [Trivial] +5C615C30 6100 [Regular] +5C615C3F 613F [Trivial] +5C615C40 6140 [Trivial] +5C615C5A 611A [Regular] +5C615C5C 615C [Regular] +5C615C5F 615C5F [Regular] +5C615C61 6161 [Trivial] +5C615C62 6108 [Regular] +5C615C6E 610A [Regular] +5C615C72 610D [Regular] +5C615C74 6109 [Regular] +5C615C7E 617E [Trivial] +5C615C7F 617F [Trivial] +5C615C80 6180 [Trivial][ILSEQ] +5C615C81 6181 [Trivial][ILSEQ] +5C615C9F 619F [Trivial][ILSEQ] +5C615CA0 61A0 [Trivial][ILSEQ] +5C615CA1 61A1 [Trivial][ILSEQ] +5C615CE0 61E0 [Trivial][ILSEQ] +5C615CEF 61EF [Trivial][ILSEQ] +5C615CF9 61F9 [Trivial][ILSEQ] +5C615CFA 61FA [Trivial][ILSEQ] +5C615CFC 61FC [Trivial][ILSEQ] +5C615CFD 61FD [Trivial][ILSEQ] +5C615CFE 61FE [Trivial][ILSEQ] +5C615CFF 61FF [Trivial][ILSEQ] +5C625C00 0800 [Regular] +5C625C08 0808 [Regular] +5C625C09 0809 [Regular] +5C625C0A 080A [Regular] +5C625C0D 080D [Regular] +5C625C1A 081A [Regular] +5C625C22 0822 [Regular] +5C625C25 085C25 [Regular] +5C625C27 0827 [Regular] +5C625C30 0800 [Regular] +5C625C3F 083F [Regular] +5C625C40 0840 [Regular] +5C625C5A 081A [Regular] +5C625C5C 085C [Regular] +5C625C5F 085C5F [Regular] +5C625C61 0861 [Regular] +5C625C62 0808 [Regular] +5C625C6E 080A [Regular] +5C625C72 080D [Regular] +5C625C74 0809 [Regular] +5C625C7E 087E [Regular] +5C625C7F 087F [Regular] +5C625C80 0880 [Regular][ILSEQ] +5C625C81 0881 [Regular][ILSEQ] +5C625C9F 089F [Regular][ILSEQ] +5C625CA0 08A0 [Regular][ILSEQ] +5C625CA1 08A1 [Regular][ILSEQ] +5C625CE0 08E0 [Regular][ILSEQ] +5C625CEF 08EF [Regular][ILSEQ] +5C625CF9 08F9 [Regular][ILSEQ] +5C625CFA 08FA [Regular][ILSEQ] +5C625CFC 08FC [Regular][ILSEQ] +5C625CFD 08FD [Regular][ILSEQ] +5C625CFE 08FE [Regular][ILSEQ] +5C625CFF 08FF [Regular][ILSEQ] +5C6E5C00 0A00 [Regular] +5C6E5C08 0A08 [Regular] +5C6E5C09 0A09 [Regular] +5C6E5C0A 0A0A [Regular] +5C6E5C0D 0A0D [Regular] +5C6E5C1A 0A1A [Regular] +5C6E5C22 0A22 [Regular] +5C6E5C25 0A5C25 [Regular] +5C6E5C27 0A27 [Regular] +5C6E5C30 0A00 [Regular] +5C6E5C3F 0A3F [Regular] +5C6E5C40 0A40 [Regular] +5C6E5C5A 0A1A [Regular] +5C6E5C5C 0A5C [Regular] +5C6E5C5F 0A5C5F [Regular] +5C6E5C61 0A61 [Regular] +5C6E5C62 0A08 [Regular] +5C6E5C6E 0A0A [Regular] +5C6E5C72 0A0D [Regular] +5C6E5C74 0A09 [Regular] +5C6E5C7E 0A7E [Regular] +5C6E5C7F 0A7F [Regular] +5C6E5C80 0A80 [Regular][ILSEQ] +5C6E5C81 0A81 [Regular][ILSEQ] +5C6E5C9F 0A9F [Regular][ILSEQ] +5C6E5CA0 0AA0 [Regular][ILSEQ] +5C6E5CA1 0AA1 [Regular][ILSEQ] +5C6E5CE0 0AE0 [Regular][ILSEQ] +5C6E5CEF 0AEF [Regular][ILSEQ] +5C6E5CF9 0AF9 [Regular][ILSEQ] +5C6E5CFA 0AFA [Regular][ILSEQ] +5C6E5CFC 0AFC [Regular][ILSEQ] +5C6E5CFD 0AFD [Regular][ILSEQ] +5C6E5CFE 0AFE [Regular][ILSEQ] +5C6E5CFF 0AFF [Regular][ILSEQ] +5C725C00 0D00 [Regular] +5C725C08 0D08 [Regular] +5C725C09 0D09 [Regular] +5C725C0A 0D0A [Regular] +5C725C0D 0D0D [Regular] +5C725C1A 0D1A [Regular] +5C725C22 0D22 [Regular] +5C725C25 0D5C25 [Regular] +5C725C27 0D27 [Regular] +5C725C30 0D00 [Regular] +5C725C3F 0D3F [Regular] +5C725C40 0D40 [Regular] +5C725C5A 0D1A [Regular] +5C725C5C 0D5C [Regular] +5C725C5F 0D5C5F [Regular] +5C725C61 0D61 [Regular] +5C725C62 0D08 [Regular] +5C725C6E 0D0A [Regular] +5C725C72 0D0D [Regular] +5C725C74 0D09 [Regular] +5C725C7E 0D7E [Regular] +5C725C7F 0D7F [Regular] +5C725C80 0D80 [Regular][ILSEQ] +5C725C81 0D81 [Regular][ILSEQ] +5C725C9F 0D9F [Regular][ILSEQ] +5C725CA0 0DA0 [Regular][ILSEQ] +5C725CA1 0DA1 [Regular][ILSEQ] +5C725CE0 0DE0 [Regular][ILSEQ] +5C725CEF 0DEF [Regular][ILSEQ] +5C725CF9 0DF9 [Regular][ILSEQ] +5C725CFA 0DFA [Regular][ILSEQ] +5C725CFC 0DFC [Regular][ILSEQ] +5C725CFD 0DFD [Regular][ILSEQ] +5C725CFE 0DFE [Regular][ILSEQ] +5C725CFF 0DFF [Regular][ILSEQ] +5C745C00 0900 [Regular] +5C745C08 0908 [Regular] +5C745C09 0909 [Regular] +5C745C0A 090A [Regular] +5C745C0D 090D [Regular] +5C745C1A 091A [Regular] +5C745C22 0922 [Regular] +5C745C25 095C25 [Regular] +5C745C27 0927 [Regular] +5C745C30 0900 [Regular] +5C745C3F 093F [Regular] +5C745C40 0940 [Regular] +5C745C5A 091A [Regular] +5C745C5C 095C [Regular] +5C745C5F 095C5F [Regular] +5C745C61 0961 [Regular] +5C745C62 0908 [Regular] +5C745C6E 090A [Regular] +5C745C72 090D [Regular] +5C745C74 0909 [Regular] +5C745C7E 097E [Regular] +5C745C7F 097F [Regular] +5C745C80 0980 [Regular][ILSEQ] +5C745C81 0981 [Regular][ILSEQ] +5C745C9F 099F [Regular][ILSEQ] +5C745CA0 09A0 [Regular][ILSEQ] +5C745CA1 09A1 [Regular][ILSEQ] +5C745CE0 09E0 [Regular][ILSEQ] +5C745CEF 09EF [Regular][ILSEQ] +5C745CF9 09F9 [Regular][ILSEQ] +5C745CFA 09FA [Regular][ILSEQ] +5C745CFC 09FC [Regular][ILSEQ] +5C745CFD 09FD [Regular][ILSEQ] +5C745CFE 09FE [Regular][ILSEQ] +5C745CFF 09FF [Regular][ILSEQ] +5C7E5C00 7E00 [Trivial] +5C7E5C08 7E08 [Trivial] +5C7E5C09 7E09 [Trivial] +5C7E5C0A 7E0A [Trivial] +5C7E5C0D 7E0D [Trivial] +5C7E5C1A 7E1A [Trivial] +5C7E5C22 7E22 [Trivial] +5C7E5C25 7E5C25 [Regular] +5C7E5C27 7E27 [Trivial] +5C7E5C30 7E00 [Regular] +5C7E5C3F 7E3F [Trivial] +5C7E5C40 7E40 [Trivial] +5C7E5C5A 7E1A [Regular] +5C7E5C5C 7E5C [Regular] +5C7E5C5F 7E5C5F [Regular] +5C7E5C61 7E61 [Trivial] +5C7E5C62 7E08 [Regular] +5C7E5C6E 7E0A [Regular] +5C7E5C72 7E0D [Regular] +5C7E5C74 7E09 [Regular] +5C7E5C7E 7E7E [Trivial] +5C7E5C7F 7E7F [Trivial] +5C7E5C80 7E80 [Trivial][ILSEQ] +5C7E5C81 7E81 [Trivial][ILSEQ] +5C7E5C9F 7E9F [Trivial][ILSEQ] +5C7E5CA0 7EA0 [Trivial][ILSEQ] +5C7E5CA1 7EA1 [Trivial][ILSEQ] +5C7E5CE0 7EE0 [Trivial][ILSEQ] +5C7E5CEF 7EEF [Trivial][ILSEQ] +5C7E5CF9 7EF9 [Trivial][ILSEQ] +5C7E5CFA 7EFA [Trivial][ILSEQ] +5C7E5CFC 7EFC [Trivial][ILSEQ] +5C7E5CFD 7EFD [Trivial][ILSEQ] +5C7E5CFE 7EFE [Trivial][ILSEQ] +5C7E5CFF 7EFF [Trivial][ILSEQ] +5C7F5C00 7F00 [Trivial] +5C7F5C08 7F08 [Trivial] +5C7F5C09 7F09 [Trivial] +5C7F5C0A 7F0A [Trivial] +5C7F5C0D 7F0D [Trivial] +5C7F5C1A 7F1A [Trivial] +5C7F5C22 7F22 [Trivial] +5C7F5C25 7F5C25 [Regular] +5C7F5C27 7F27 [Trivial] +5C7F5C30 7F00 [Regular] +5C7F5C3F 7F3F [Trivial] +5C7F5C40 7F40 [Trivial] +5C7F5C5A 7F1A [Regular] +5C7F5C5C 7F5C [Regular] +5C7F5C5F 7F5C5F [Regular] +5C7F5C61 7F61 [Trivial] +5C7F5C62 7F08 [Regular] +5C7F5C6E 7F0A [Regular] +5C7F5C72 7F0D [Regular] +5C7F5C74 7F09 [Regular] +5C7F5C7E 7F7E [Trivial] +5C7F5C7F 7F7F [Trivial] +5C7F5C80 7F80 [Trivial][ILSEQ] +5C7F5C81 7F81 [Trivial][ILSEQ] +5C7F5C9F 7F9F [Trivial][ILSEQ] +5C7F5CA0 7FA0 [Trivial][ILSEQ] +5C7F5CA1 7FA1 [Trivial][ILSEQ] +5C7F5CE0 7FE0 [Trivial][ILSEQ] +5C7F5CEF 7FEF [Trivial][ILSEQ] +5C7F5CF9 7FF9 [Trivial][ILSEQ] +5C7F5CFA 7FFA [Trivial][ILSEQ] +5C7F5CFC 7FFC [Trivial][ILSEQ] +5C7F5CFD 7FFD [Trivial][ILSEQ] +5C7F5CFE 7FFE [Trivial][ILSEQ] +5C7F5CFF 7FFF [Trivial][ILSEQ] +5C805C00 8000 [Trivial][ILSEQ] +5C805C08 8008 [Trivial][ILSEQ] +5C805C09 8009 [Trivial][ILSEQ] +5C805C0A 800A [Trivial][ILSEQ] +5C805C0D 800D [Trivial][ILSEQ] +5C805C1A 801A [Trivial][ILSEQ] +5C805C22 8022 [Trivial][ILSEQ] +5C805C25 805C25 [Regular][ILSEQ] +5C805C27 8027 [Trivial][ILSEQ] +5C805C30 8000 [Regular][ILSEQ] +5C805C3F 803F [Trivial][ILSEQ] +5C805C40 8040 [Trivial][ILSEQ] +5C805C5A 801A [Regular][ILSEQ] +5C805C5C 805C [Regular][ILSEQ] +5C805C5F 805C5F [Regular][ILSEQ] +5C805C61 8061 [Trivial][ILSEQ] +5C805C62 8008 [Regular][ILSEQ] +5C805C6E 800A [Regular][ILSEQ] +5C805C72 800D [Regular][ILSEQ] +5C805C74 8009 [Regular][ILSEQ] +5C805C7E 807E [Trivial][ILSEQ] +5C805C7F 807F [Trivial][ILSEQ] +5C805C80 8080 [Trivial][ILSEQ] +5C805C81 8081 [Trivial][ILSEQ] +5C805C9F 809F [Trivial][ILSEQ] +5C805CA0 80A0 [Trivial][ILSEQ] +5C805CA1 80A1 [Trivial][ILSEQ] +5C805CE0 80E0 [Trivial][ILSEQ] +5C805CEF 80EF [Trivial][ILSEQ] +5C805CF9 80F9 [Trivial][ILSEQ] +5C805CFA 80FA [Trivial][ILSEQ] +5C805CFC 80FC [Trivial][ILSEQ] +5C805CFD 80FD [Trivial][ILSEQ] +5C805CFE 80FE [Trivial][ILSEQ] +5C805CFF 80FF [Trivial][ILSEQ] +5C815C00 8100 [Trivial][ILSEQ] +5C815C08 8108 [Trivial][ILSEQ] +5C815C09 8109 [Trivial][ILSEQ] +5C815C0A 810A [Trivial][ILSEQ] +5C815C0D 810D [Trivial][ILSEQ] +5C815C1A 811A [Trivial][ILSEQ] +5C815C22 8122 [Trivial][ILSEQ] +5C815C25 815C25 [Regular][ILSEQ] +5C815C27 8127 [Trivial][ILSEQ] +5C815C30 8100 [Regular][ILSEQ] +5C815C3F 813F [Trivial][ILSEQ] +5C815C40 8140 [Trivial][ILSEQ] +5C815C5A 811A [Regular][ILSEQ] +5C815C5C 815C [Regular][ILSEQ] +5C815C5F 815C5F [Regular][ILSEQ] +5C815C61 8161 [Trivial][ILSEQ] +5C815C62 8108 [Regular][ILSEQ] +5C815C6E 810A [Regular][ILSEQ] +5C815C72 810D [Regular][ILSEQ] +5C815C74 8109 [Regular][ILSEQ] +5C815C7E 817E [Trivial][ILSEQ] +5C815C7F 817F [Trivial][ILSEQ] +5C815C80 8180 [Trivial][ILSEQ] +5C815C81 8181 [Trivial][ILSEQ] +5C815C9F 819F [Trivial][ILSEQ] +5C815CA0 81A0 [Trivial][ILSEQ] +5C815CA1 81A1 [Trivial][ILSEQ] +5C815CE0 81E0 [Trivial][ILSEQ] +5C815CEF 81EF [Trivial][ILSEQ] +5C815CF9 81F9 [Trivial][ILSEQ] +5C815CFA 81FA [Trivial][ILSEQ] +5C815CFC 81FC [Trivial][ILSEQ] +5C815CFD 81FD [Trivial][ILSEQ] +5C815CFE 81FE [Trivial][ILSEQ] +5C815CFF 81FF [Trivial][ILSEQ] +5C9F5C00 9F00 [Trivial][ILSEQ] +5C9F5C08 9F08 [Trivial][ILSEQ] +5C9F5C09 9F09 [Trivial][ILSEQ] +5C9F5C0A 9F0A [Trivial][ILSEQ] +5C9F5C0D 9F0D [Trivial][ILSEQ] +5C9F5C1A 9F1A [Trivial][ILSEQ] +5C9F5C22 9F22 [Trivial][ILSEQ] +5C9F5C25 9F5C25 [Regular][ILSEQ] +5C9F5C27 9F27 [Trivial][ILSEQ] +5C9F5C30 9F00 [Regular][ILSEQ] +5C9F5C3F 9F3F [Trivial][ILSEQ] +5C9F5C40 9F40 [Trivial][ILSEQ] +5C9F5C5A 9F1A [Regular][ILSEQ] +5C9F5C5C 9F5C [Regular][ILSEQ] +5C9F5C5F 9F5C5F [Regular][ILSEQ] +5C9F5C61 9F61 [Trivial][ILSEQ] +5C9F5C62 9F08 [Regular][ILSEQ] +5C9F5C6E 9F0A [Regular][ILSEQ] +5C9F5C72 9F0D [Regular][ILSEQ] +5C9F5C74 9F09 [Regular][ILSEQ] +5C9F5C7E 9F7E [Trivial][ILSEQ] +5C9F5C7F 9F7F [Trivial][ILSEQ] +5C9F5C80 9F80 [Trivial][ILSEQ] +5C9F5C81 9F81 [Trivial][ILSEQ] +5C9F5C9F 9F9F [Trivial][ILSEQ] +5C9F5CA0 9FA0 [Trivial][ILSEQ] +5C9F5CA1 9FA1 [Trivial][ILSEQ] +5C9F5CE0 9FE0 [Trivial][ILSEQ] +5C9F5CEF 9FEF [Trivial][ILSEQ] +5C9F5CF9 9FF9 [Trivial][ILSEQ] +5C9F5CFA 9FFA [Trivial][ILSEQ] +5C9F5CFC 9FFC [Trivial][ILSEQ] +5C9F5CFD 9FFD [Trivial][ILSEQ] +5C9F5CFE 9FFE [Trivial][ILSEQ] +5C9F5CFF 9FFF [Trivial][ILSEQ] +5CA05C00 A000 [Trivial][ILSEQ] +5CA05C08 A008 [Trivial][ILSEQ] +5CA05C09 A009 [Trivial][ILSEQ] +5CA05C0A A00A [Trivial][ILSEQ] +5CA05C0D A00D [Trivial][ILSEQ] +5CA05C1A A01A [Trivial][ILSEQ] +5CA05C22 A022 [Trivial][ILSEQ] +5CA05C25 A05C25 [Regular][ILSEQ] +5CA05C27 A027 [Trivial][ILSEQ] +5CA05C30 A000 [Regular][ILSEQ] +5CA05C3F A03F [Trivial][ILSEQ] +5CA05C40 A040 [Trivial][ILSEQ] +5CA05C5A A01A [Regular][ILSEQ] +5CA05C5C A05C [Regular][ILSEQ] +5CA05C5F A05C5F [Regular][ILSEQ] +5CA05C61 A061 [Trivial][ILSEQ] +5CA05C62 A008 [Regular][ILSEQ] +5CA05C6E A00A [Regular][ILSEQ] +5CA05C72 A00D [Regular][ILSEQ] +5CA05C74 A009 [Regular][ILSEQ] +5CA05C7E A07E [Trivial][ILSEQ] +5CA05C7F A07F [Trivial][ILSEQ] +5CA05C80 A080 [Trivial][ILSEQ] +5CA05C81 A081 [Trivial][ILSEQ] +5CA05C9F A09F [Trivial][ILSEQ] +5CA05CA0 A0A0 [Trivial][ILSEQ] +5CA05CA1 A0A1 [Trivial][ILSEQ] +5CA05CE0 A0E0 [Trivial][ILSEQ] +5CA05CEF A0EF [Trivial][ILSEQ] +5CA05CF9 A0F9 [Trivial][ILSEQ] +5CA05CFA A0FA [Trivial][ILSEQ] +5CA05CFC A0FC [Trivial][ILSEQ] +5CA05CFD A0FD [Trivial][ILSEQ] +5CA05CFE A0FE [Trivial][ILSEQ] +5CA05CFF A0FF [Trivial][ILSEQ] +5CA15C00 A100 [Trivial][ILSEQ] +5CA15C08 A108 [Trivial][ILSEQ] +5CA15C09 A109 [Trivial][ILSEQ] +5CA15C0A A10A [Trivial][ILSEQ] +5CA15C0D A10D [Trivial][ILSEQ] +5CA15C1A A11A [Trivial][ILSEQ] +5CA15C22 A122 [Trivial][ILSEQ] +5CA15C25 A15C25 [Regular][ILSEQ] +5CA15C27 A127 [Trivial][ILSEQ] +5CA15C30 A100 [Regular][ILSEQ] +5CA15C3F A13F [Trivial][ILSEQ] +5CA15C40 A140 [Trivial][ILSEQ] +5CA15C5A A11A [Regular][ILSEQ] +5CA15C5C A15C [Regular][ILSEQ] +5CA15C5F A15C5F [Regular][ILSEQ] +5CA15C61 A161 [Trivial][ILSEQ] +5CA15C62 A108 [Regular][ILSEQ] +5CA15C6E A10A [Regular][ILSEQ] +5CA15C72 A10D [Regular][ILSEQ] +5CA15C74 A109 [Regular][ILSEQ] +5CA15C7E A17E [Trivial][ILSEQ] +5CA15C7F A17F [Trivial][ILSEQ] +5CA15C80 A180 [Trivial][ILSEQ] +5CA15C81 A181 [Trivial][ILSEQ] +5CA15C9F A19F [Trivial][ILSEQ] +5CA15CA0 A1A0 [Trivial][ILSEQ] +5CA15CA1 A1A1 [Trivial][FIXED][USER] +5CA15CE0 A1E0 [Trivial][FIXED][USER] +5CA15CEF A1EF [Trivial][FIXED][USER] +5CA15CF9 A1F9 [Trivial][FIXED][USER] +5CA15CFA A1FA [Trivial][FIXED][USER] +5CA15CFC A1FC [Trivial][FIXED][USER] +5CA15CFD A1FD [Trivial][FIXED][USER] +5CA15CFE A1FE [Trivial][FIXED][USER] +5CA15CFF A1FF [Trivial][ILSEQ] +5CE05C00 E000 [Trivial][ILSEQ] +5CE05C08 E008 [Trivial][ILSEQ] +5CE05C09 E009 [Trivial][ILSEQ] +5CE05C0A E00A [Trivial][ILSEQ] +5CE05C0D E00D [Trivial][ILSEQ] +5CE05C1A E01A [Trivial][ILSEQ] +5CE05C22 E022 [Trivial][ILSEQ] +5CE05C25 E05C25 [Regular][ILSEQ] +5CE05C27 E027 [Trivial][ILSEQ] +5CE05C30 E000 [Regular][ILSEQ] +5CE05C3F E03F [Trivial][ILSEQ] +5CE05C40 E040 [Trivial][ILSEQ] +5CE05C5A E01A [Regular][ILSEQ] +5CE05C5C E05C [Regular][ILSEQ] +5CE05C5F E05C5F [Regular][ILSEQ] +5CE05C61 E061 [Trivial][ILSEQ] +5CE05C62 E008 [Regular][ILSEQ] +5CE05C6E E00A [Regular][ILSEQ] +5CE05C72 E00D [Regular][ILSEQ] +5CE05C74 E009 [Regular][ILSEQ] +5CE05C7E E07E [Trivial][ILSEQ] +5CE05C7F E07F [Trivial][ILSEQ] +5CE05C80 E080 [Trivial][ILSEQ] +5CE05C81 E081 [Trivial][ILSEQ] +5CE05C9F E09F [Trivial][ILSEQ] +5CE05CA0 E0A0 [Trivial][ILSEQ] +5CE05CA1 E0A1 [Trivial][FIXED][USER] +5CE05CE0 E0E0 [Trivial][FIXED][USER] +5CE05CEF E0EF [Trivial][FIXED][USER] +5CE05CF9 E0F9 [Trivial][FIXED][USER] +5CE05CFA E0FA [Trivial][FIXED][USER] +5CE05CFC E0FC [Trivial][FIXED][USER] +5CE05CFD E0FD [Trivial][FIXED][USER] +5CE05CFE E0FE [Trivial][FIXED][USER] +5CE05CFF E0FF [Trivial][ILSEQ] +5CEF5C00 EF00 [Trivial][ILSEQ] +5CEF5C08 EF08 [Trivial][ILSEQ] +5CEF5C09 EF09 [Trivial][ILSEQ] +5CEF5C0A EF0A [Trivial][ILSEQ] +5CEF5C0D EF0D [Trivial][ILSEQ] +5CEF5C1A EF1A [Trivial][ILSEQ] +5CEF5C22 EF22 [Trivial][ILSEQ] +5CEF5C25 EF5C25 [Regular][ILSEQ] +5CEF5C27 EF27 [Trivial][ILSEQ] +5CEF5C30 EF00 [Regular][ILSEQ] +5CEF5C3F EF3F [Trivial][ILSEQ] +5CEF5C40 EF40 [Trivial][ILSEQ] +5CEF5C5A EF1A [Regular][ILSEQ] +5CEF5C5C EF5C [Regular][ILSEQ] +5CEF5C5F EF5C5F [Regular][ILSEQ] +5CEF5C61 EF61 [Trivial][ILSEQ] +5CEF5C62 EF08 [Regular][ILSEQ] +5CEF5C6E EF0A [Regular][ILSEQ] +5CEF5C72 EF0D [Regular][ILSEQ] +5CEF5C74 EF09 [Regular][ILSEQ] +5CEF5C7E EF7E [Trivial][ILSEQ] +5CEF5C7F EF7F [Trivial][ILSEQ] +5CEF5C80 EF80 [Trivial][ILSEQ] +5CEF5C81 EF81 [Trivial][ILSEQ] +5CEF5C9F EF9F [Trivial][ILSEQ] +5CEF5CA0 EFA0 [Trivial][ILSEQ] +5CEF5CA1 EFA1 [Trivial][FIXED][USER] +5CEF5CE0 EFE0 [Trivial][FIXED][USER] +5CEF5CEF EFEF [Trivial][FIXED][USER] +5CEF5CF9 EFF9 [Trivial][FIXED][USER] +5CEF5CFA EFFA [Trivial][FIXED][USER] +5CEF5CFC EFFC [Trivial][FIXED][USER] +5CEF5CFD EFFD [Trivial][FIXED][USER] +5CEF5CFE EFFE [Trivial][FIXED][USER] +5CEF5CFF EFFF [Trivial][ILSEQ] +5CF95C00 F900 [Trivial][ILSEQ] +5CF95C08 F908 [Trivial][ILSEQ] +5CF95C09 F909 [Trivial][ILSEQ] +5CF95C0A F90A [Trivial][ILSEQ] +5CF95C0D F90D [Trivial][ILSEQ] +5CF95C1A F91A [Trivial][ILSEQ] +5CF95C22 F922 [Trivial][ILSEQ] +5CF95C25 F95C25 [Regular][ILSEQ] +5CF95C27 F927 [Trivial][ILSEQ] +5CF95C30 F900 [Regular][ILSEQ] +5CF95C3F F93F [Trivial][ILSEQ] +5CF95C40 F940 [Trivial][ILSEQ] +5CF95C5A F91A [Regular][ILSEQ] +5CF95C5C F95C [Regular][ILSEQ] +5CF95C5F F95C5F [Regular][ILSEQ] +5CF95C61 F961 [Trivial][ILSEQ] +5CF95C62 F908 [Regular][ILSEQ] +5CF95C6E F90A [Regular][ILSEQ] +5CF95C72 F90D [Regular][ILSEQ] +5CF95C74 F909 [Regular][ILSEQ] +5CF95C7E F97E [Trivial][ILSEQ] +5CF95C7F F97F [Trivial][ILSEQ] +5CF95C80 F980 [Trivial][ILSEQ] +5CF95C81 F981 [Trivial][ILSEQ] +5CF95C9F F99F [Trivial][ILSEQ] +5CF95CA0 F9A0 [Trivial][ILSEQ] +5CF95CA1 F9A1 [Trivial][ILSEQ] +5CF95CE0 F9E0 [Trivial][ILSEQ] +5CF95CEF F9EF [Trivial][ILSEQ] +5CF95CF9 F9F9 [Trivial][ILSEQ] +5CF95CFA F9FA [Trivial][ILSEQ] +5CF95CFC F9FC [Trivial][ILSEQ] +5CF95CFD F9FD [Trivial][ILSEQ] +5CF95CFE F9FE [Trivial][ILSEQ] +5CF95CFF F9FF [Trivial][ILSEQ] +5CFA5C00 FA00 [Trivial][ILSEQ] +5CFA5C08 FA08 [Trivial][ILSEQ] +5CFA5C09 FA09 [Trivial][ILSEQ] +5CFA5C0A FA0A [Trivial][ILSEQ] +5CFA5C0D FA0D [Trivial][ILSEQ] +5CFA5C1A FA1A [Trivial][ILSEQ] +5CFA5C22 FA22 [Trivial][ILSEQ] +5CFA5C25 FA5C25 [Regular][ILSEQ] +5CFA5C27 FA27 [Trivial][ILSEQ] +5CFA5C30 FA00 [Regular][ILSEQ] +5CFA5C3F FA3F [Trivial][ILSEQ] +5CFA5C40 FA40 [Trivial][ILSEQ] +5CFA5C5A FA1A [Regular][ILSEQ] +5CFA5C5C FA5C [Regular][ILSEQ] +5CFA5C5F FA5C5F [Regular][ILSEQ] +5CFA5C61 FA61 [Trivial][ILSEQ] +5CFA5C62 FA08 [Regular][ILSEQ] +5CFA5C6E FA0A [Regular][ILSEQ] +5CFA5C72 FA0D [Regular][ILSEQ] +5CFA5C74 FA09 [Regular][ILSEQ] +5CFA5C7E FA7E [Trivial][ILSEQ] +5CFA5C7F FA7F [Trivial][ILSEQ] +5CFA5C80 FA80 [Trivial][ILSEQ] +5CFA5C81 FA81 [Trivial][ILSEQ] +5CFA5C9F FA9F [Trivial][ILSEQ] +5CFA5CA0 FAA0 [Trivial][ILSEQ] +5CFA5CA1 FAA1 [Trivial][ILSEQ] +5CFA5CE0 FAE0 [Trivial][ILSEQ] +5CFA5CEF FAEF [Trivial][ILSEQ] +5CFA5CF9 FAF9 [Trivial][ILSEQ] +5CFA5CFA FAFA [Trivial][ILSEQ] +5CFA5CFC FAFC [Trivial][ILSEQ] +5CFA5CFD FAFD [Trivial][ILSEQ] +5CFA5CFE FAFE [Trivial][ILSEQ] +5CFA5CFF FAFF [Trivial][ILSEQ] +5CFC5C00 FC00 [Trivial][ILSEQ] +5CFC5C08 FC08 [Trivial][ILSEQ] +5CFC5C09 FC09 [Trivial][ILSEQ] +5CFC5C0A FC0A [Trivial][ILSEQ] +5CFC5C0D FC0D [Trivial][ILSEQ] +5CFC5C1A FC1A [Trivial][ILSEQ] +5CFC5C22 FC22 [Trivial][ILSEQ] +5CFC5C25 FC5C25 [Regular][ILSEQ] +5CFC5C27 FC27 [Trivial][ILSEQ] +5CFC5C30 FC00 [Regular][ILSEQ] +5CFC5C3F FC3F [Trivial][ILSEQ] +5CFC5C40 FC40 [Trivial][ILSEQ] +5CFC5C5A FC1A [Regular][ILSEQ] +5CFC5C5C FC5C [Regular][ILSEQ] +5CFC5C5F FC5C5F [Regular][ILSEQ] +5CFC5C61 FC61 [Trivial][ILSEQ] +5CFC5C62 FC08 [Regular][ILSEQ] +5CFC5C6E FC0A [Regular][ILSEQ] +5CFC5C72 FC0D [Regular][ILSEQ] +5CFC5C74 FC09 [Regular][ILSEQ] +5CFC5C7E FC7E [Trivial][ILSEQ] +5CFC5C7F FC7F [Trivial][ILSEQ] +5CFC5C80 FC80 [Trivial][ILSEQ] +5CFC5C81 FC81 [Trivial][ILSEQ] +5CFC5C9F FC9F [Trivial][ILSEQ] +5CFC5CA0 FCA0 [Trivial][ILSEQ] +5CFC5CA1 FCA1 [Trivial][ILSEQ] +5CFC5CE0 FCE0 [Trivial][ILSEQ] +5CFC5CEF FCEF [Trivial][ILSEQ] +5CFC5CF9 FCF9 [Trivial][ILSEQ] +5CFC5CFA FCFA [Trivial][ILSEQ] +5CFC5CFC FCFC [Trivial][ILSEQ] +5CFC5CFD FCFD [Trivial][ILSEQ] +5CFC5CFE FCFE [Trivial][ILSEQ] +5CFC5CFF FCFF [Trivial][ILSEQ] +5CFD5C00 FD00 [Trivial][ILSEQ] +5CFD5C08 FD08 [Trivial][ILSEQ] +5CFD5C09 FD09 [Trivial][ILSEQ] +5CFD5C0A FD0A [Trivial][ILSEQ] +5CFD5C0D FD0D [Trivial][ILSEQ] +5CFD5C1A FD1A [Trivial][ILSEQ] +5CFD5C22 FD22 [Trivial][ILSEQ] +5CFD5C25 FD5C25 [Regular][ILSEQ] +5CFD5C27 FD27 [Trivial][ILSEQ] +5CFD5C30 FD00 [Regular][ILSEQ] +5CFD5C3F FD3F [Trivial][ILSEQ] +5CFD5C40 FD40 [Trivial][ILSEQ] +5CFD5C5A FD1A [Regular][ILSEQ] +5CFD5C5C FD5C [Regular][ILSEQ] +5CFD5C5F FD5C5F [Regular][ILSEQ] +5CFD5C61 FD61 [Trivial][ILSEQ] +5CFD5C62 FD08 [Regular][ILSEQ] +5CFD5C6E FD0A [Regular][ILSEQ] +5CFD5C72 FD0D [Regular][ILSEQ] +5CFD5C74 FD09 [Regular][ILSEQ] +5CFD5C7E FD7E [Trivial][ILSEQ] +5CFD5C7F FD7F [Trivial][ILSEQ] +5CFD5C80 FD80 [Trivial][ILSEQ] +5CFD5C81 FD81 [Trivial][ILSEQ] +5CFD5C9F FD9F [Trivial][ILSEQ] +5CFD5CA0 FDA0 [Trivial][ILSEQ] +5CFD5CA1 FDA1 [Trivial][ILSEQ] +5CFD5CE0 FDE0 [Trivial][ILSEQ] +5CFD5CEF FDEF [Trivial][ILSEQ] +5CFD5CF9 FDF9 [Trivial][ILSEQ] +5CFD5CFA FDFA [Trivial][ILSEQ] +5CFD5CFC FDFC [Trivial][ILSEQ] +5CFD5CFD FDFD [Trivial][ILSEQ] +5CFD5CFE FDFE [Trivial][ILSEQ] +5CFD5CFF FDFF [Trivial][ILSEQ] +5CFE5C00 FE00 [Trivial][ILSEQ] +5CFE5C08 FE08 [Trivial][ILSEQ] +5CFE5C09 FE09 [Trivial][ILSEQ] +5CFE5C0A FE0A [Trivial][ILSEQ] +5CFE5C0D FE0D [Trivial][ILSEQ] +5CFE5C1A FE1A [Trivial][ILSEQ] +5CFE5C22 FE22 [Trivial][ILSEQ] +5CFE5C25 FE5C25 [Regular][ILSEQ] +5CFE5C27 FE27 [Trivial][ILSEQ] +5CFE5C30 FE00 [Regular][ILSEQ] +5CFE5C3F FE3F [Trivial][ILSEQ] +5CFE5C40 FE40 [Trivial][ILSEQ] +5CFE5C5A FE1A [Regular][ILSEQ] +5CFE5C5C FE5C [Regular][ILSEQ] +5CFE5C5F FE5C5F [Regular][ILSEQ] +5CFE5C61 FE61 [Trivial][ILSEQ] +5CFE5C62 FE08 [Regular][ILSEQ] +5CFE5C6E FE0A [Regular][ILSEQ] +5CFE5C72 FE0D [Regular][ILSEQ] +5CFE5C74 FE09 [Regular][ILSEQ] +5CFE5C7E FE7E [Trivial][ILSEQ] +5CFE5C7F FE7F [Trivial][ILSEQ] +5CFE5C80 FE80 [Trivial][ILSEQ] +5CFE5C81 FE81 [Trivial][ILSEQ] +5CFE5C9F FE9F [Trivial][ILSEQ] +5CFE5CA0 FEA0 [Trivial][ILSEQ] +5CFE5CA1 FEA1 [Trivial][ILSEQ] +5CFE5CE0 FEE0 [Trivial][ILSEQ] +5CFE5CEF FEEF [Trivial][ILSEQ] +5CFE5CF9 FEF9 [Trivial][ILSEQ] +5CFE5CFA FEFA [Trivial][ILSEQ] +5CFE5CFC FEFC [Trivial][ILSEQ] +5CFE5CFD FEFD [Trivial][ILSEQ] +5CFE5CFE FEFE [Trivial][ILSEQ] +5CFE5CFF FEFF [Trivial][ILSEQ] +5CFF5C00 FF00 [Trivial][ILSEQ] +5CFF5C08 FF08 [Trivial][ILSEQ] +5CFF5C09 FF09 [Trivial][ILSEQ] +5CFF5C0A FF0A [Trivial][ILSEQ] +5CFF5C0D FF0D [Trivial][ILSEQ] +5CFF5C1A FF1A [Trivial][ILSEQ] +5CFF5C22 FF22 [Trivial][ILSEQ] +5CFF5C25 FF5C25 [Regular][ILSEQ] +5CFF5C27 FF27 [Trivial][ILSEQ] +5CFF5C30 FF00 [Regular][ILSEQ] +5CFF5C3F FF3F [Trivial][ILSEQ] +5CFF5C40 FF40 [Trivial][ILSEQ] +5CFF5C5A FF1A [Regular][ILSEQ] +5CFF5C5C FF5C [Regular][ILSEQ] +5CFF5C5F FF5C5F [Regular][ILSEQ] +5CFF5C61 FF61 [Trivial][ILSEQ] +5CFF5C62 FF08 [Regular][ILSEQ] +5CFF5C6E FF0A [Regular][ILSEQ] +5CFF5C72 FF0D [Regular][ILSEQ] +5CFF5C74 FF09 [Regular][ILSEQ] +5CFF5C7E FF7E [Trivial][ILSEQ] +5CFF5C7F FF7F [Trivial][ILSEQ] +5CFF5C80 FF80 [Trivial][ILSEQ] +5CFF5C81 FF81 [Trivial][ILSEQ] +5CFF5C9F FF9F [Trivial][ILSEQ] +5CFF5CA0 FFA0 [Trivial][ILSEQ] +5CFF5CA1 FFA1 [Trivial][ILSEQ] +5CFF5CE0 FFE0 [Trivial][ILSEQ] +5CFF5CEF FFEF [Trivial][ILSEQ] +5CFF5CF9 FFF9 [Trivial][ILSEQ] +5CFF5CFA FFFA [Trivial][ILSEQ] +5CFF5CFC FFFC [Trivial][ILSEQ] +5CFF5CFD FFFD [Trivial][ILSEQ] +5CFF5CFE FFFE [Trivial][ILSEQ] +5CFF5CFF FFFF [Trivial][ILSEQ] +DROP TABLE t1; +DROP PROCEDURE p1; +DROP PROCEDURE p2; +DROP FUNCTION unescape; +DROP FUNCTION unescape_type; +DROP FUNCTION wellformedness; +DROP FUNCTION mysql_real_escape_string_generated; +DROP FUNCTION iswellformed; +DROP TABLE allbytes; +# End of ctype_backslash.inc +# +# End of 10.0 tests +# diff --git a/mysql-test/r/ctype_gbk.result b/mysql-test/r/ctype_gbk.result index fd4941f9146..c5d997b0213 100644 --- a/mysql-test/r/ctype_gbk.result +++ b/mysql-test/r/ctype_gbk.result @@ -1681,3 +1681,3268 @@ hex(weight_string(cast(0x8EA18EA18EA1 as char),25, 4, 0xC0)) # # End of 5.6 tests # +# +# Start of 10.0 tests +# +# Start of ctype_unescape.inc +SET @query=_binary'SELECT CHARSET(\'test\'),@@character_set_client,@@character_set_connection'; +PREPARE stmt FROM @query; +EXECUTE stmt; +CHARSET('test') @@character_set_client @@character_set_connection +gbk gbk gbk +DEALLOCATE PREPARE stmt; +CREATE TABLE allbytes (a VARBINARY(10)); +# Using selected bytes combinations +CREATE TABLE halfs (a INT); +INSERT INTO halfs VALUES (0x00),(0x01),(0x02),(0x03),(0x04),(0x05),(0x06),(0x07); +INSERT INTO halfs VALUES (0x08),(0x09),(0x0A),(0x0B),(0x0C),(0x0D),(0x0E),(0x0F); +CREATE TEMPORARY TABLE bytes (a BINARY(1), KEY(a)) ENGINE=MyISAM; +INSERT INTO bytes SELECT CHAR((t1.a << 4) | t2.a USING BINARY) FROM halfs t1, halfs t2; +DROP TABLE halfs; +CREATE TABLE selected_bytes (a VARBINARY(10)); +INSERT INTO selected_bytes (a) VALUES ('\0'),('\b'),('\t'),('\r'),('\n'),('\Z'); +INSERT INTO selected_bytes (a) VALUES ('0'),('b'),('t'),('r'),('n'),('Z'); +INSERT INTO selected_bytes (a) VALUES ('\\'),('_'),('%'),(0x22),(0x27); +INSERT INTO selected_bytes (a) VALUES ('a'); +INSERT INTO selected_bytes (a) VALUES +(0x3F), # 7bit +(0x40), # 7bit mbtail +(0x7E), # 7bit mbtail nonascii-8bit +(0x7F), # 7bit nonascii-8bit +(0x80), # mbtail bad-mb +(0x81), # mbhead mbtail +(0x9F), # mbhead mbtail bad-mb +(0xA0), # mbhead mbtail bad-mb +(0xA1), # mbhead mbtail nonascii-8bit +(0xE0), # mbhead mbtai +(0xEF), # mbhead mbtail +(0xF9), # mbhead mbtail +(0xFA), # mbhead mbtail bad-mb +(0xFC), # mbhead mbtail bad-mb +(0xFD), # mbhead mbtail bad-mb +(0xFE), # mbhead mbtial bad-mb +(0xFF); +INSERT INTO allbytes (a) SELECT a FROM bytes; +INSERT INTO allbytes (a) SELECT CONCAT(t1.a,t2.a) FROM selected_bytes t1,selected_bytes t2; +INSERT INTO allbytes (a) SELECT CONCAT(0x5C,t1.a,t2.a) FROM selected_bytes t1,selected_bytes t2; +INSERT INTO allbytes (a) SELECT CONCAT(0x5C,t1.a,0x5C,t2.a) FROM selected_bytes t1,selected_bytes t2; +DROP TABLE selected_bytes; +DELETE FROM allbytes WHERE +OCTET_LENGTH(a)>1 AND +LOCATE(0x5C,a)=0 AND +a NOT LIKE '%\'%' AND + a NOT LIKE '%"%'; +CREATE PROCEDURE p1(val VARBINARY(10)) +BEGIN +DECLARE EXIT HANDLER FOR SQLSTATE '42000' INSERT INTO t1 (a,b) VALUES(val,NULL); +SET @query=CONCAT(_binary"INSERT INTO t1 (a,b) VALUES (0x",HEX(val),",'",val,"')"); +PREPARE stmt FROM @query; +EXECUTE stmt; +DEALLOCATE PREPARE stmt; +END// +CREATE PROCEDURE p2() +BEGIN +DECLARE val VARBINARY(10); +DECLARE done INT DEFAULT FALSE; +DECLARE stmt CURSOR FOR SELECT a FROM allbytes; +DECLARE CONTINUE HANDLER FOR NOT FOUND SET done=TRUE; +OPEN stmt; +read_loop1: LOOP +FETCH stmt INTO val; +IF done THEN +LEAVE read_loop1; +END IF; +CALL p1(val); +END LOOP; +CLOSE stmt; +END// +CREATE FUNCTION iswellformed(a VARBINARY(256)) RETURNS INT RETURN a=BINARY CONVERT(a USING gbk);// +CREATE FUNCTION unescape(a VARBINARY(256)) RETURNS VARBINARY(256) +BEGIN +# We need to do it in a way to avoid producing new escape sequences +# First, enclose all known escsape sequences to '{{xx}}' + # - Backslash not followed by a LIKE pattern characters _ and % +# - Double escapes +# This uses PCRE Branch Reset Groups: (?|(alt1)|(alt2)|(alt3)). +# So '\\1' in the last argument always means the match, no matter +# which alternative it came from. +SET a=REGEXP_REPLACE(a,'(?|(\\\\[^_%])|(\\x{27}\\x{27}))','{{\\1}}'); +# Now unescape all enclosed standard escape sequences +SET a=REPLACE(a,'{{\\0}}', '\0'); +SET a=REPLACE(a,'{{\\b}}', '\b'); +SET a=REPLACE(a,'{{\\t}}', '\t'); +SET a=REPLACE(a,'{{\\r}}', '\r'); +SET a=REPLACE(a,'{{\\n}}', '\n'); +SET a=REPLACE(a,'{{\\Z}}', '\Z'); +SET a=REPLACE(a,'{{\\\'}}', '\''); +# Unescape double quotes +SET a=REPLACE(a,'{{\'\'}}', '\''); + # Unescape the rest: all other \x sequences mean just 'x' + SET a=REGEXP_REPLACE(a, '{{\\\\(.|\\R)}}', '\\1'); + RETURN a; +END// +CREATE FUNCTION unescape_type(a VARBINARY(256),b VARBINARY(256)) RETURNS VARBINARY(256) +BEGIN +RETURN CASE +WHEN b IS NULL THEN '[SyntErr]' + WHEN a=b THEN CASE +WHEN OCTET_LENGTH(a)=1 THEN '[Preserve]' + WHEN a RLIKE '\\\\[_%]' THEN '[Preserve][LIKE]' + WHEN a RLIKE '^[[:ascii:]]+$' THEN '[Preserve][ASCII]' + ELSE '[Preserv][MB]' END +WHEN REPLACE(a,0x5C,'')=b THEN '[Trivial]' + WHEN UNESCAPE(a)=b THEN '[Regular]' + ELSE '[Special]' END; +END// +CREATE FUNCTION wellformedness(a VARBINARY(256), b VARBINARY(256)) +RETURNS VARBINARY(256) +BEGIN +RETURN CASE +WHEN b IS NULL THEN '' + WHEN NOT iswellformed(a) AND iswellformed(b) THEN '[FIXED]' + WHEN iswellformed(a) AND NOT iswellformed(b) THEN '[BROKE]' + WHEN NOT iswellformed(a) AND NOT iswellformed(b) THEN '[ILSEQ]' + ELSE '' + END; +END// +CREATE FUNCTION mysql_real_escape_string_generated(a VARBINARY(256)) +RETURNS VARBINARY(256) +BEGIN +DECLARE a1 BINARY(1) DEFAULT SUBSTR(a,1,1); +DECLARE a2 BINARY(1) DEFAULT SUBSTR(a,2,1); +DECLARE a3 BINARY(1) DEFAULT SUBSTR(a,3,1); +DECLARE a4 BINARY(1) DEFAULT SUBSTR(a,4,1); +DECLARE a2a4 BINARY(2) DEFAULT CONCAT(a2,a4); +RETURN CASE +WHEN (a1=0x5C) AND +(a3=0x5C) AND +(a2>0x7F) AND +(a4 NOT IN ('_','%','0','t','r','n','Z')) AND +iswellformed(a2a4) THEN '[USER]' + ELSE '' + END; +END// +CREATE TABLE t1 (a VARBINARY(10),b VARBINARY(10)); +CALL p2(); +SELECT HEX(a),HEX(b), +CONCAT(unescape_type(a,b), +wellformedness(a,b), +mysql_real_escape_string_generated(a), +IF(UNESCAPE(a)<>b,CONCAT('[BAD',HEX(UNESCAPE(a)),']'),'')) AS comment +FROM t1 ORDER BY LENGTH(a),a; +HEX(a) HEX(b) comment +00 00 [Preserve] +01 01 [Preserve] +02 02 [Preserve] +03 03 [Preserve] +04 04 [Preserve] +05 05 [Preserve] +06 06 [Preserve] +07 07 [Preserve] +08 08 [Preserve] +09 09 [Preserve] +0A 0A [Preserve] +0B 0B [Preserve] +0C 0C [Preserve] +0D 0D [Preserve] +0E 0E [Preserve] +0F 0F [Preserve] +10 10 [Preserve] +11 11 [Preserve] +12 12 [Preserve] +13 13 [Preserve] +14 14 [Preserve] +15 15 [Preserve] +16 16 [Preserve] +17 17 [Preserve] +18 18 [Preserve] +19 19 [Preserve] +1A 1A [Preserve] +1B 1B [Preserve] +1C 1C [Preserve] +1D 1D [Preserve] +1E 1E [Preserve] +1F 1F [Preserve] +20 20 [Preserve] +21 21 [Preserve] +22 22 [Preserve] +23 23 [Preserve] +24 24 [Preserve] +25 25 [Preserve] +26 26 [Preserve] +27 NULL [SyntErr] +28 28 [Preserve] +29 29 [Preserve] +2A 2A [Preserve] +2B 2B [Preserve] +2C 2C [Preserve] +2D 2D [Preserve] +2E 2E [Preserve] +2F 2F [Preserve] +30 30 [Preserve] +31 31 [Preserve] +32 32 [Preserve] +33 33 [Preserve] +34 34 [Preserve] +35 35 [Preserve] +36 36 [Preserve] +37 37 [Preserve] +38 38 [Preserve] +39 39 [Preserve] +3A 3A [Preserve] +3B 3B [Preserve] +3C 3C [Preserve] +3D 3D [Preserve] +3E 3E [Preserve] +3F 3F [Preserve] +40 40 [Preserve] +41 41 [Preserve] +42 42 [Preserve] +43 43 [Preserve] +44 44 [Preserve] +45 45 [Preserve] +46 46 [Preserve] +47 47 [Preserve] +48 48 [Preserve] +49 49 [Preserve] +4A 4A [Preserve] +4B 4B [Preserve] +4C 4C [Preserve] +4D 4D [Preserve] +4E 4E [Preserve] +4F 4F [Preserve] +50 50 [Preserve] +51 51 [Preserve] +52 52 [Preserve] +53 53 [Preserve] +54 54 [Preserve] +55 55 [Preserve] +56 56 [Preserve] +57 57 [Preserve] +58 58 [Preserve] +59 59 [Preserve] +5A 5A [Preserve] +5B 5B [Preserve] +5C NULL [SyntErr] +5D 5D [Preserve] +5E 5E [Preserve] +5F 5F [Preserve] +60 60 [Preserve] +61 61 [Preserve] +62 62 [Preserve] +63 63 [Preserve] +64 64 [Preserve] +65 65 [Preserve] +66 66 [Preserve] +67 67 [Preserve] +68 68 [Preserve] +69 69 [Preserve] +6A 6A [Preserve] +6B 6B [Preserve] +6C 6C [Preserve] +6D 6D [Preserve] +6E 6E [Preserve] +6F 6F [Preserve] +70 70 [Preserve] +71 71 [Preserve] +72 72 [Preserve] +73 73 [Preserve] +74 74 [Preserve] +75 75 [Preserve] +76 76 [Preserve] +77 77 [Preserve] +78 78 [Preserve] +79 79 [Preserve] +7A 7A [Preserve] +7B 7B [Preserve] +7C 7C [Preserve] +7D 7D [Preserve] +7E 7E [Preserve] +7F 7F [Preserve] +80 80 [Preserve][ILSEQ] +81 81 [Preserve][ILSEQ] +82 82 [Preserve][ILSEQ] +83 83 [Preserve][ILSEQ] +84 84 [Preserve][ILSEQ] +85 85 [Preserve][ILSEQ] +86 86 [Preserve][ILSEQ] +87 87 [Preserve][ILSEQ] +88 88 [Preserve][ILSEQ] +89 89 [Preserve][ILSEQ] +8A 8A [Preserve][ILSEQ] +8B 8B [Preserve][ILSEQ] +8C 8C [Preserve][ILSEQ] +8D 8D [Preserve][ILSEQ] +8E 8E [Preserve][ILSEQ] +8F 8F [Preserve][ILSEQ] +90 90 [Preserve][ILSEQ] +91 91 [Preserve][ILSEQ] +92 92 [Preserve][ILSEQ] +93 93 [Preserve][ILSEQ] +94 94 [Preserve][ILSEQ] +95 95 [Preserve][ILSEQ] +96 96 [Preserve][ILSEQ] +97 97 [Preserve][ILSEQ] +98 98 [Preserve][ILSEQ] +99 99 [Preserve][ILSEQ] +9A 9A [Preserve][ILSEQ] +9B 9B [Preserve][ILSEQ] +9C 9C [Preserve][ILSEQ] +9D 9D [Preserve][ILSEQ] +9E 9E [Preserve][ILSEQ] +9F 9F [Preserve][ILSEQ] +A0 A0 [Preserve][ILSEQ] +A1 A1 [Preserve][ILSEQ] +A2 A2 [Preserve][ILSEQ] +A3 A3 [Preserve][ILSEQ] +A4 A4 [Preserve][ILSEQ] +A5 A5 [Preserve][ILSEQ] +A6 A6 [Preserve][ILSEQ] +A7 A7 [Preserve][ILSEQ] +A8 A8 [Preserve][ILSEQ] +A9 A9 [Preserve][ILSEQ] +AA AA [Preserve][ILSEQ] +AB AB [Preserve][ILSEQ] +AC AC [Preserve][ILSEQ] +AD AD [Preserve][ILSEQ] +AE AE [Preserve][ILSEQ] +AF AF [Preserve][ILSEQ] +B0 B0 [Preserve][ILSEQ] +B1 B1 [Preserve][ILSEQ] +B2 B2 [Preserve][ILSEQ] +B3 B3 [Preserve][ILSEQ] +B4 B4 [Preserve][ILSEQ] +B5 B5 [Preserve][ILSEQ] +B6 B6 [Preserve][ILSEQ] +B7 B7 [Preserve][ILSEQ] +B8 B8 [Preserve][ILSEQ] +B9 B9 [Preserve][ILSEQ] +BA BA [Preserve][ILSEQ] +BB BB [Preserve][ILSEQ] +BC BC [Preserve][ILSEQ] +BD BD [Preserve][ILSEQ] +BE BE [Preserve][ILSEQ] +BF BF [Preserve][ILSEQ] +C0 C0 [Preserve][ILSEQ] +C1 C1 [Preserve][ILSEQ] +C2 C2 [Preserve][ILSEQ] +C3 C3 [Preserve][ILSEQ] +C4 C4 [Preserve][ILSEQ] +C5 C5 [Preserve][ILSEQ] +C6 C6 [Preserve][ILSEQ] +C7 C7 [Preserve][ILSEQ] +C8 C8 [Preserve][ILSEQ] +C9 C9 [Preserve][ILSEQ] +CA CA [Preserve][ILSEQ] +CB CB [Preserve][ILSEQ] +CC CC [Preserve][ILSEQ] +CD CD [Preserve][ILSEQ] +CE CE [Preserve][ILSEQ] +CF CF [Preserve][ILSEQ] +D0 D0 [Preserve][ILSEQ] +D1 D1 [Preserve][ILSEQ] +D2 D2 [Preserve][ILSEQ] +D3 D3 [Preserve][ILSEQ] +D4 D4 [Preserve][ILSEQ] +D5 D5 [Preserve][ILSEQ] +D6 D6 [Preserve][ILSEQ] +D7 D7 [Preserve][ILSEQ] +D8 D8 [Preserve][ILSEQ] +D9 D9 [Preserve][ILSEQ] +DA DA [Preserve][ILSEQ] +DB DB [Preserve][ILSEQ] +DC DC [Preserve][ILSEQ] +DD DD [Preserve][ILSEQ] +DE DE [Preserve][ILSEQ] +DF DF [Preserve][ILSEQ] +E0 E0 [Preserve][ILSEQ] +E1 E1 [Preserve][ILSEQ] +E2 E2 [Preserve][ILSEQ] +E3 E3 [Preserve][ILSEQ] +E4 E4 [Preserve][ILSEQ] +E5 E5 [Preserve][ILSEQ] +E6 E6 [Preserve][ILSEQ] +E7 E7 [Preserve][ILSEQ] +E8 E8 [Preserve][ILSEQ] +E9 E9 [Preserve][ILSEQ] +EA EA [Preserve][ILSEQ] +EB EB [Preserve][ILSEQ] +EC EC [Preserve][ILSEQ] +ED ED [Preserve][ILSEQ] +EE EE [Preserve][ILSEQ] +EF EF [Preserve][ILSEQ] +F0 F0 [Preserve][ILSEQ] +F1 F1 [Preserve][ILSEQ] +F2 F2 [Preserve][ILSEQ] +F3 F3 [Preserve][ILSEQ] +F4 F4 [Preserve][ILSEQ] +F5 F5 [Preserve][ILSEQ] +F6 F6 [Preserve][ILSEQ] +F7 F7 [Preserve][ILSEQ] +F8 F8 [Preserve][ILSEQ] +F9 F9 [Preserve][ILSEQ] +FA FA [Preserve][ILSEQ] +FB FB [Preserve][ILSEQ] +FC FC [Preserve][ILSEQ] +FD FD [Preserve][ILSEQ] +FE FE [Preserve][ILSEQ] +FF FF [Preserve][ILSEQ] +0022 0022 [Preserve][ASCII] +0027 NULL [SyntErr] +005C NULL [SyntErr] +0822 0822 [Preserve][ASCII] +0827 NULL [SyntErr] +085C NULL [SyntErr] +0922 0922 [Preserve][ASCII] +0927 NULL [SyntErr] +095C NULL [SyntErr] +0A22 0A22 [Preserve][ASCII] +0A27 NULL [SyntErr] +0A5C NULL [SyntErr] +0D22 0D22 [Preserve][ASCII] +0D27 NULL [SyntErr] +0D5C NULL [SyntErr] +1A22 1A22 [Preserve][ASCII] +1A27 NULL [SyntErr] +1A5C NULL [SyntErr] +2200 2200 [Preserve][ASCII] +2208 2208 [Preserve][ASCII] +2209 2209 [Preserve][ASCII] +220A 220A [Preserve][ASCII] +220D 220D [Preserve][ASCII] +221A 221A [Preserve][ASCII] +2222 2222 [Preserve][ASCII] +2225 2225 [Preserve][ASCII] +2227 NULL [SyntErr] +2230 2230 [Preserve][ASCII] +223F 223F [Preserve][ASCII] +2240 2240 [Preserve][ASCII] +225A 225A [Preserve][ASCII] +225C NULL [SyntErr] +225F 225F [Preserve][ASCII] +2261 2261 [Preserve][ASCII] +2262 2262 [Preserve][ASCII] +226E 226E [Preserve][ASCII] +2272 2272 [Preserve][ASCII] +2274 2274 [Preserve][ASCII] +227E 227E [Preserve][ASCII] +227F 227F [Preserve][ASCII] +2280 2280 [Preserv][MB][ILSEQ] +2281 2281 [Preserv][MB][ILSEQ] +229F 229F [Preserv][MB][ILSEQ] +22A0 22A0 [Preserv][MB][ILSEQ] +22A1 22A1 [Preserv][MB][ILSEQ] +22E0 22E0 [Preserv][MB][ILSEQ] +22EF 22EF [Preserv][MB][ILSEQ] +22F9 22F9 [Preserv][MB][ILSEQ] +22FA 22FA [Preserv][MB][ILSEQ] +22FC 22FC [Preserv][MB][ILSEQ] +22FD 22FD [Preserv][MB][ILSEQ] +22FE 22FE [Preserv][MB][ILSEQ] +22FF 22FF [Preserv][MB][ILSEQ] +2522 2522 [Preserve][ASCII] +2527 NULL [SyntErr] +255C NULL [SyntErr] +2700 NULL [SyntErr] +2708 NULL [SyntErr] +2709 NULL [SyntErr] +270A NULL [SyntErr] +270D NULL [SyntErr] +271A NULL [SyntErr] +2722 NULL [SyntErr] +2725 NULL [SyntErr] +2727 27 [Regular] +2730 NULL [SyntErr] +273F NULL [SyntErr] +2740 NULL [SyntErr] +275A NULL [SyntErr] +275C NULL [SyntErr] +275F NULL [SyntErr] +2761 NULL [SyntErr] +2762 NULL [SyntErr] +276E NULL [SyntErr] +2772 NULL [SyntErr] +2774 NULL [SyntErr] +277E NULL [SyntErr] +277F NULL [SyntErr] +2780 NULL [SyntErr] +2781 NULL [SyntErr] +279F NULL [SyntErr] +27A0 NULL [SyntErr] +27A1 NULL [SyntErr] +27E0 NULL [SyntErr] +27EF NULL [SyntErr] +27F9 NULL [SyntErr] +27FA NULL [SyntErr] +27FC NULL [SyntErr] +27FD NULL [SyntErr] +27FE NULL [SyntErr] +27FF NULL [SyntErr] +3022 3022 [Preserve][ASCII] +3027 NULL [SyntErr] +305C NULL [SyntErr] +3F22 3F22 [Preserve][ASCII] +3F27 NULL [SyntErr] +3F5C NULL [SyntErr] +4022 4022 [Preserve][ASCII] +4027 NULL [SyntErr] +405C NULL [SyntErr] +5A22 5A22 [Preserve][ASCII] +5A27 NULL [SyntErr] +5A5C NULL [SyntErr] +5C00 00 [Trivial] +5C08 08 [Trivial] +5C09 09 [Trivial] +5C0A 0A [Trivial] +5C0D 0D [Trivial] +5C1A 1A [Trivial] +5C22 22 [Trivial] +5C25 5C25 [Preserve][LIKE] +5C27 27 [Trivial] +5C30 00 [Regular] +5C3F 3F [Trivial] +5C40 40 [Trivial] +5C5A 1A [Regular] +5C5C 5C [Regular] +5C5F 5C5F [Preserve][LIKE] +5C61 61 [Trivial] +5C62 08 [Regular] +5C6E 0A [Regular] +5C72 0D [Regular] +5C74 09 [Regular] +5C7E 7E [Trivial] +5C7F 7F [Trivial] +5C80 80 [Trivial][ILSEQ] +5C81 81 [Trivial][ILSEQ] +5C9F 9F [Trivial][ILSEQ] +5CA0 A0 [Trivial][ILSEQ] +5CA1 A1 [Trivial][ILSEQ] +5CE0 E0 [Trivial][ILSEQ] +5CEF EF [Trivial][ILSEQ] +5CF9 F9 [Trivial][ILSEQ] +5CFA FA [Trivial][ILSEQ] +5CFC FC [Trivial][ILSEQ] +5CFD FD [Trivial][ILSEQ] +5CFE FE [Trivial][ILSEQ] +5CFF FF [Trivial][ILSEQ] +5F22 5F22 [Preserve][ASCII] +5F27 NULL [SyntErr] +5F5C NULL [SyntErr] +6122 6122 [Preserve][ASCII] +6127 NULL [SyntErr] +615C NULL [SyntErr] +6222 6222 [Preserve][ASCII] +6227 NULL [SyntErr] +625C NULL [SyntErr] +6E22 6E22 [Preserve][ASCII] +6E27 NULL [SyntErr] +6E5C NULL [SyntErr] +7222 7222 [Preserve][ASCII] +7227 NULL [SyntErr] +725C NULL [SyntErr] +7422 7422 [Preserve][ASCII] +7427 NULL [SyntErr] +745C NULL [SyntErr] +7E22 7E22 [Preserve][ASCII] +7E27 NULL [SyntErr] +7E5C NULL [SyntErr] +7F22 7F22 [Preserve][ASCII] +7F27 NULL [SyntErr] +7F5C NULL [SyntErr] +8022 8022 [Preserv][MB][ILSEQ] +8027 NULL [SyntErr] +805C NULL [SyntErr] +8122 8122 [Preserv][MB][ILSEQ] +8127 NULL [SyntErr] +815C 815C [Preserv][MB] +9F22 9F22 [Preserv][MB][ILSEQ] +9F27 NULL [SyntErr] +9F5C 9F5C [Preserv][MB] +A022 A022 [Preserv][MB][ILSEQ] +A027 NULL [SyntErr] +A05C A05C [Preserv][MB] +A122 A122 [Preserv][MB][ILSEQ] +A127 NULL [SyntErr] +A15C A15C [Preserv][MB] +E022 E022 [Preserv][MB][ILSEQ] +E027 NULL [SyntErr] +E05C E05C [Preserv][MB] +EF22 EF22 [Preserv][MB][ILSEQ] +EF27 NULL [SyntErr] +EF5C EF5C [Preserv][MB] +F922 F922 [Preserv][MB][ILSEQ] +F927 NULL [SyntErr] +F95C F95C [Preserv][MB] +FA22 FA22 [Preserv][MB][ILSEQ] +FA27 NULL [SyntErr] +FA5C FA5C [Preserv][MB] +FC22 FC22 [Preserv][MB][ILSEQ] +FC27 NULL [SyntErr] +FC5C FC5C [Preserv][MB] +FD22 FD22 [Preserv][MB][ILSEQ] +FD27 NULL [SyntErr] +FD5C FD5C [Preserv][MB] +FE22 FE22 [Preserv][MB][ILSEQ] +FE27 NULL [SyntErr] +FE5C FE5C [Preserv][MB] +FF22 FF22 [Preserv][MB][ILSEQ] +FF27 NULL [SyntErr] +FF5C NULL [SyntErr] +5C0000 0000 [Trivial] +5C0008 0008 [Trivial] +5C0009 0009 [Trivial] +5C000A 000A [Trivial] +5C000D 000D [Trivial] +5C001A 001A [Trivial] +5C0022 0022 [Trivial] +5C0025 0025 [Trivial] +5C0027 NULL [SyntErr] +5C0030 0030 [Trivial] +5C003F 003F [Trivial] +5C0040 0040 [Trivial] +5C005A 005A [Trivial] +5C005C NULL [SyntErr] +5C005F 005F [Trivial] +5C0061 0061 [Trivial] +5C0062 0062 [Trivial] +5C006E 006E [Trivial] +5C0072 0072 [Trivial] +5C0074 0074 [Trivial] +5C007E 007E [Trivial] +5C007F 007F [Trivial] +5C0080 0080 [Trivial][ILSEQ] +5C0081 0081 [Trivial][ILSEQ] +5C009F 009F [Trivial][ILSEQ] +5C00A0 00A0 [Trivial][ILSEQ] +5C00A1 00A1 [Trivial][ILSEQ] +5C00E0 00E0 [Trivial][ILSEQ] +5C00EF 00EF [Trivial][ILSEQ] +5C00F9 00F9 [Trivial][ILSEQ] +5C00FA 00FA [Trivial][ILSEQ] +5C00FC 00FC [Trivial][ILSEQ] +5C00FD 00FD [Trivial][ILSEQ] +5C00FE 00FE [Trivial][ILSEQ] +5C00FF 00FF [Trivial][ILSEQ] +5C0800 0800 [Trivial] +5C0808 0808 [Trivial] +5C0809 0809 [Trivial] +5C080A 080A [Trivial] +5C080D 080D [Trivial] +5C081A 081A [Trivial] +5C0822 0822 [Trivial] +5C0825 0825 [Trivial] +5C0827 NULL [SyntErr] +5C0830 0830 [Trivial] +5C083F 083F [Trivial] +5C0840 0840 [Trivial] +5C085A 085A [Trivial] +5C085C NULL [SyntErr] +5C085F 085F [Trivial] +5C0861 0861 [Trivial] +5C0862 0862 [Trivial] +5C086E 086E [Trivial] +5C0872 0872 [Trivial] +5C0874 0874 [Trivial] +5C087E 087E [Trivial] +5C087F 087F [Trivial] +5C0880 0880 [Trivial][ILSEQ] +5C0881 0881 [Trivial][ILSEQ] +5C089F 089F [Trivial][ILSEQ] +5C08A0 08A0 [Trivial][ILSEQ] +5C08A1 08A1 [Trivial][ILSEQ] +5C08E0 08E0 [Trivial][ILSEQ] +5C08EF 08EF [Trivial][ILSEQ] +5C08F9 08F9 [Trivial][ILSEQ] +5C08FA 08FA [Trivial][ILSEQ] +5C08FC 08FC [Trivial][ILSEQ] +5C08FD 08FD [Trivial][ILSEQ] +5C08FE 08FE [Trivial][ILSEQ] +5C08FF 08FF [Trivial][ILSEQ] +5C0900 0900 [Trivial] +5C0908 0908 [Trivial] +5C0909 0909 [Trivial] +5C090A 090A [Trivial] +5C090D 090D [Trivial] +5C091A 091A [Trivial] +5C0922 0922 [Trivial] +5C0925 0925 [Trivial] +5C0927 NULL [SyntErr] +5C0930 0930 [Trivial] +5C093F 093F [Trivial] +5C0940 0940 [Trivial] +5C095A 095A [Trivial] +5C095C NULL [SyntErr] +5C095F 095F [Trivial] +5C0961 0961 [Trivial] +5C0962 0962 [Trivial] +5C096E 096E [Trivial] +5C0972 0972 [Trivial] +5C0974 0974 [Trivial] +5C097E 097E [Trivial] +5C097F 097F [Trivial] +5C0980 0980 [Trivial][ILSEQ] +5C0981 0981 [Trivial][ILSEQ] +5C099F 099F [Trivial][ILSEQ] +5C09A0 09A0 [Trivial][ILSEQ] +5C09A1 09A1 [Trivial][ILSEQ] +5C09E0 09E0 [Trivial][ILSEQ] +5C09EF 09EF [Trivial][ILSEQ] +5C09F9 09F9 [Trivial][ILSEQ] +5C09FA 09FA [Trivial][ILSEQ] +5C09FC 09FC [Trivial][ILSEQ] +5C09FD 09FD [Trivial][ILSEQ] +5C09FE 09FE [Trivial][ILSEQ] +5C09FF 09FF [Trivial][ILSEQ] +5C0A00 0A00 [Trivial] +5C0A08 0A08 [Trivial] +5C0A09 0A09 [Trivial] +5C0A0A 0A0A [Trivial] +5C0A0D 0A0D [Trivial] +5C0A1A 0A1A [Trivial] +5C0A22 0A22 [Trivial] +5C0A25 0A25 [Trivial] +5C0A27 NULL [SyntErr] +5C0A30 0A30 [Trivial] +5C0A3F 0A3F [Trivial] +5C0A40 0A40 [Trivial] +5C0A5A 0A5A [Trivial] +5C0A5C NULL [SyntErr] +5C0A5F 0A5F [Trivial] +5C0A61 0A61 [Trivial] +5C0A62 0A62 [Trivial] +5C0A6E 0A6E [Trivial] +5C0A72 0A72 [Trivial] +5C0A74 0A74 [Trivial] +5C0A7E 0A7E [Trivial] +5C0A7F 0A7F [Trivial] +5C0A80 0A80 [Trivial][ILSEQ] +5C0A81 0A81 [Trivial][ILSEQ] +5C0A9F 0A9F [Trivial][ILSEQ] +5C0AA0 0AA0 [Trivial][ILSEQ] +5C0AA1 0AA1 [Trivial][ILSEQ] +5C0AE0 0AE0 [Trivial][ILSEQ] +5C0AEF 0AEF [Trivial][ILSEQ] +5C0AF9 0AF9 [Trivial][ILSEQ] +5C0AFA 0AFA [Trivial][ILSEQ] +5C0AFC 0AFC [Trivial][ILSEQ] +5C0AFD 0AFD [Trivial][ILSEQ] +5C0AFE 0AFE [Trivial][ILSEQ] +5C0AFF 0AFF [Trivial][ILSEQ] +5C0D00 0D00 [Trivial] +5C0D08 0D08 [Trivial] +5C0D09 0D09 [Trivial] +5C0D0A 0D0A [Trivial] +5C0D0D 0D0D [Trivial] +5C0D1A 0D1A [Trivial] +5C0D22 0D22 [Trivial] +5C0D25 0D25 [Trivial] +5C0D27 NULL [SyntErr] +5C0D30 0D30 [Trivial] +5C0D3F 0D3F [Trivial] +5C0D40 0D40 [Trivial] +5C0D5A 0D5A [Trivial] +5C0D5C NULL [SyntErr] +5C0D5F 0D5F [Trivial] +5C0D61 0D61 [Trivial] +5C0D62 0D62 [Trivial] +5C0D6E 0D6E [Trivial] +5C0D72 0D72 [Trivial] +5C0D74 0D74 [Trivial] +5C0D7E 0D7E [Trivial] +5C0D7F 0D7F [Trivial] +5C0D80 0D80 [Trivial][ILSEQ] +5C0D81 0D81 [Trivial][ILSEQ] +5C0D9F 0D9F [Trivial][ILSEQ] +5C0DA0 0DA0 [Trivial][ILSEQ] +5C0DA1 0DA1 [Trivial][ILSEQ] +5C0DE0 0DE0 [Trivial][ILSEQ] +5C0DEF 0DEF [Trivial][ILSEQ] +5C0DF9 0DF9 [Trivial][ILSEQ] +5C0DFA 0DFA [Trivial][ILSEQ] +5C0DFC 0DFC [Trivial][ILSEQ] +5C0DFD 0DFD [Trivial][ILSEQ] +5C0DFE 0DFE [Trivial][ILSEQ] +5C0DFF 0DFF [Trivial][ILSEQ] +5C1A00 1A00 [Trivial] +5C1A08 1A08 [Trivial] +5C1A09 1A09 [Trivial] +5C1A0A 1A0A [Trivial] +5C1A0D 1A0D [Trivial] +5C1A1A 1A1A [Trivial] +5C1A22 1A22 [Trivial] +5C1A25 1A25 [Trivial] +5C1A27 NULL [SyntErr] +5C1A30 1A30 [Trivial] +5C1A3F 1A3F [Trivial] +5C1A40 1A40 [Trivial] +5C1A5A 1A5A [Trivial] +5C1A5C NULL [SyntErr] +5C1A5F 1A5F [Trivial] +5C1A61 1A61 [Trivial] +5C1A62 1A62 [Trivial] +5C1A6E 1A6E [Trivial] +5C1A72 1A72 [Trivial] +5C1A74 1A74 [Trivial] +5C1A7E 1A7E [Trivial] +5C1A7F 1A7F [Trivial] +5C1A80 1A80 [Trivial][ILSEQ] +5C1A81 1A81 [Trivial][ILSEQ] +5C1A9F 1A9F [Trivial][ILSEQ] +5C1AA0 1AA0 [Trivial][ILSEQ] +5C1AA1 1AA1 [Trivial][ILSEQ] +5C1AE0 1AE0 [Trivial][ILSEQ] +5C1AEF 1AEF [Trivial][ILSEQ] +5C1AF9 1AF9 [Trivial][ILSEQ] +5C1AFA 1AFA [Trivial][ILSEQ] +5C1AFC 1AFC [Trivial][ILSEQ] +5C1AFD 1AFD [Trivial][ILSEQ] +5C1AFE 1AFE [Trivial][ILSEQ] +5C1AFF 1AFF [Trivial][ILSEQ] +5C2200 2200 [Trivial] +5C2208 2208 [Trivial] +5C2209 2209 [Trivial] +5C220A 220A [Trivial] +5C220D 220D [Trivial] +5C221A 221A [Trivial] +5C2222 2222 [Trivial] +5C2225 2225 [Trivial] +5C2227 NULL [SyntErr] +5C2230 2230 [Trivial] +5C223F 223F [Trivial] +5C2240 2240 [Trivial] +5C225A 225A [Trivial] +5C225C NULL [SyntErr] +5C225F 225F [Trivial] +5C2261 2261 [Trivial] +5C2262 2262 [Trivial] +5C226E 226E [Trivial] +5C2272 2272 [Trivial] +5C2274 2274 [Trivial] +5C227E 227E [Trivial] +5C227F 227F [Trivial] +5C2280 2280 [Trivial][ILSEQ] +5C2281 2281 [Trivial][ILSEQ] +5C229F 229F [Trivial][ILSEQ] +5C22A0 22A0 [Trivial][ILSEQ] +5C22A1 22A1 [Trivial][ILSEQ] +5C22E0 22E0 [Trivial][ILSEQ] +5C22EF 22EF [Trivial][ILSEQ] +5C22F9 22F9 [Trivial][ILSEQ] +5C22FA 22FA [Trivial][ILSEQ] +5C22FC 22FC [Trivial][ILSEQ] +5C22FD 22FD [Trivial][ILSEQ] +5C22FE 22FE [Trivial][ILSEQ] +5C22FF 22FF [Trivial][ILSEQ] +5C2500 5C2500 [Preserve][LIKE] +5C2508 5C2508 [Preserve][LIKE] +5C2509 5C2509 [Preserve][LIKE] +5C250A 5C250A [Preserve][LIKE] +5C250D 5C250D [Preserve][LIKE] +5C251A 5C251A [Preserve][LIKE] +5C2522 5C2522 [Preserve][LIKE] +5C2525 5C2525 [Preserve][LIKE] +5C2527 NULL [SyntErr] +5C2530 5C2530 [Preserve][LIKE] +5C253F 5C253F [Preserve][LIKE] +5C2540 5C2540 [Preserve][LIKE] +5C255A 5C255A [Preserve][LIKE] +5C255C NULL [SyntErr] +5C255F 5C255F [Preserve][LIKE] +5C2561 5C2561 [Preserve][LIKE] +5C2562 5C2562 [Preserve][LIKE] +5C256E 5C256E [Preserve][LIKE] +5C2572 5C2572 [Preserve][LIKE] +5C2574 5C2574 [Preserve][LIKE] +5C257E 5C257E [Preserve][LIKE] +5C257F 5C257F [Preserve][LIKE] +5C2580 5C2580 [Preserve][LIKE][ILSEQ] +5C2581 5C2581 [Preserve][LIKE][ILSEQ] +5C259F 5C259F [Preserve][LIKE][ILSEQ] +5C25A0 5C25A0 [Preserve][LIKE][ILSEQ] +5C25A1 5C25A1 [Preserve][LIKE][ILSEQ] +5C25E0 5C25E0 [Preserve][LIKE][ILSEQ] +5C25EF 5C25EF [Preserve][LIKE][ILSEQ] +5C25F9 5C25F9 [Preserve][LIKE][ILSEQ] +5C25FA 5C25FA [Preserve][LIKE][ILSEQ] +5C25FC 5C25FC [Preserve][LIKE][ILSEQ] +5C25FD 5C25FD [Preserve][LIKE][ILSEQ] +5C25FE 5C25FE [Preserve][LIKE][ILSEQ] +5C25FF 5C25FF [Preserve][LIKE][ILSEQ] +5C2700 2700 [Trivial] +5C2708 2708 [Trivial] +5C2709 2709 [Trivial] +5C270A 270A [Trivial] +5C270D 270D [Trivial] +5C271A 271A [Trivial] +5C2722 2722 [Trivial] +5C2725 2725 [Trivial] +5C2727 NULL [SyntErr] +5C2730 2730 [Trivial] +5C273F 273F [Trivial] +5C2740 2740 [Trivial] +5C275A 275A [Trivial] +5C275C NULL [SyntErr] +5C275F 275F [Trivial] +5C2761 2761 [Trivial] +5C2762 2762 [Trivial] +5C276E 276E [Trivial] +5C2772 2772 [Trivial] +5C2774 2774 [Trivial] +5C277E 277E [Trivial] +5C277F 277F [Trivial] +5C2780 2780 [Trivial][ILSEQ] +5C2781 2781 [Trivial][ILSEQ] +5C279F 279F [Trivial][ILSEQ] +5C27A0 27A0 [Trivial][ILSEQ] +5C27A1 27A1 [Trivial][ILSEQ] +5C27E0 27E0 [Trivial][ILSEQ] +5C27EF 27EF [Trivial][ILSEQ] +5C27F9 27F9 [Trivial][ILSEQ] +5C27FA 27FA [Trivial][ILSEQ] +5C27FC 27FC [Trivial][ILSEQ] +5C27FD 27FD [Trivial][ILSEQ] +5C27FE 27FE [Trivial][ILSEQ] +5C27FF 27FF [Trivial][ILSEQ] +5C3000 0000 [Regular] +5C3008 0008 [Regular] +5C3009 0009 [Regular] +5C300A 000A [Regular] +5C300D 000D [Regular] +5C301A 001A [Regular] +5C3022 0022 [Regular] +5C3025 0025 [Regular] +5C3027 NULL [SyntErr] +5C3030 0030 [Regular] +5C303F 003F [Regular] +5C3040 0040 [Regular] +5C305A 005A [Regular] +5C305C NULL [SyntErr] +5C305F 005F [Regular] +5C3061 0061 [Regular] +5C3062 0062 [Regular] +5C306E 006E [Regular] +5C3072 0072 [Regular] +5C3074 0074 [Regular] +5C307E 007E [Regular] +5C307F 007F [Regular] +5C3080 0080 [Regular][ILSEQ] +5C3081 0081 [Regular][ILSEQ] +5C309F 009F [Regular][ILSEQ] +5C30A0 00A0 [Regular][ILSEQ] +5C30A1 00A1 [Regular][ILSEQ] +5C30E0 00E0 [Regular][ILSEQ] +5C30EF 00EF [Regular][ILSEQ] +5C30F9 00F9 [Regular][ILSEQ] +5C30FA 00FA [Regular][ILSEQ] +5C30FC 00FC [Regular][ILSEQ] +5C30FD 00FD [Regular][ILSEQ] +5C30FE 00FE [Regular][ILSEQ] +5C30FF 00FF [Regular][ILSEQ] +5C3F00 3F00 [Trivial] +5C3F08 3F08 [Trivial] +5C3F09 3F09 [Trivial] +5C3F0A 3F0A [Trivial] +5C3F0D 3F0D [Trivial] +5C3F1A 3F1A [Trivial] +5C3F22 3F22 [Trivial] +5C3F25 3F25 [Trivial] +5C3F27 NULL [SyntErr] +5C3F30 3F30 [Trivial] +5C3F3F 3F3F [Trivial] +5C3F40 3F40 [Trivial] +5C3F5A 3F5A [Trivial] +5C3F5C NULL [SyntErr] +5C3F5F 3F5F [Trivial] +5C3F61 3F61 [Trivial] +5C3F62 3F62 [Trivial] +5C3F6E 3F6E [Trivial] +5C3F72 3F72 [Trivial] +5C3F74 3F74 [Trivial] +5C3F7E 3F7E [Trivial] +5C3F7F 3F7F [Trivial] +5C3F80 3F80 [Trivial][ILSEQ] +5C3F81 3F81 [Trivial][ILSEQ] +5C3F9F 3F9F [Trivial][ILSEQ] +5C3FA0 3FA0 [Trivial][ILSEQ] +5C3FA1 3FA1 [Trivial][ILSEQ] +5C3FE0 3FE0 [Trivial][ILSEQ] +5C3FEF 3FEF [Trivial][ILSEQ] +5C3FF9 3FF9 [Trivial][ILSEQ] +5C3FFA 3FFA [Trivial][ILSEQ] +5C3FFC 3FFC [Trivial][ILSEQ] +5C3FFD 3FFD [Trivial][ILSEQ] +5C3FFE 3FFE [Trivial][ILSEQ] +5C3FFF 3FFF [Trivial][ILSEQ] +5C4000 4000 [Trivial] +5C4008 4008 [Trivial] +5C4009 4009 [Trivial] +5C400A 400A [Trivial] +5C400D 400D [Trivial] +5C401A 401A [Trivial] +5C4022 4022 [Trivial] +5C4025 4025 [Trivial] +5C4027 NULL [SyntErr] +5C4030 4030 [Trivial] +5C403F 403F [Trivial] +5C4040 4040 [Trivial] +5C405A 405A [Trivial] +5C405C NULL [SyntErr] +5C405F 405F [Trivial] +5C4061 4061 [Trivial] +5C4062 4062 [Trivial] +5C406E 406E [Trivial] +5C4072 4072 [Trivial] +5C4074 4074 [Trivial] +5C407E 407E [Trivial] +5C407F 407F [Trivial] +5C4080 4080 [Trivial][ILSEQ] +5C4081 4081 [Trivial][ILSEQ] +5C409F 409F [Trivial][ILSEQ] +5C40A0 40A0 [Trivial][ILSEQ] +5C40A1 40A1 [Trivial][ILSEQ] +5C40E0 40E0 [Trivial][ILSEQ] +5C40EF 40EF [Trivial][ILSEQ] +5C40F9 40F9 [Trivial][ILSEQ] +5C40FA 40FA [Trivial][ILSEQ] +5C40FC 40FC [Trivial][ILSEQ] +5C40FD 40FD [Trivial][ILSEQ] +5C40FE 40FE [Trivial][ILSEQ] +5C40FF 40FF [Trivial][ILSEQ] +5C5A00 1A00 [Regular] +5C5A08 1A08 [Regular] +5C5A09 1A09 [Regular] +5C5A0A 1A0A [Regular] +5C5A0D 1A0D [Regular] +5C5A1A 1A1A [Regular] +5C5A22 1A22 [Regular] +5C5A25 1A25 [Regular] +5C5A27 NULL [SyntErr] +5C5A30 1A30 [Regular] +5C5A3F 1A3F [Regular] +5C5A40 1A40 [Regular] +5C5A5A 1A5A [Regular] +5C5A5C NULL [SyntErr] +5C5A5F 1A5F [Regular] +5C5A61 1A61 [Regular] +5C5A62 1A62 [Regular] +5C5A6E 1A6E [Regular] +5C5A72 1A72 [Regular] +5C5A74 1A74 [Regular] +5C5A7E 1A7E [Regular] +5C5A7F 1A7F [Regular] +5C5A80 1A80 [Regular][ILSEQ] +5C5A81 1A81 [Regular][ILSEQ] +5C5A9F 1A9F [Regular][ILSEQ] +5C5AA0 1AA0 [Regular][ILSEQ] +5C5AA1 1AA1 [Regular][ILSEQ] +5C5AE0 1AE0 [Regular][ILSEQ] +5C5AEF 1AEF [Regular][ILSEQ] +5C5AF9 1AF9 [Regular][ILSEQ] +5C5AFA 1AFA [Regular][ILSEQ] +5C5AFC 1AFC [Regular][ILSEQ] +5C5AFD 1AFD [Regular][ILSEQ] +5C5AFE 1AFE [Regular][ILSEQ] +5C5AFF 1AFF [Regular][ILSEQ] +5C5C00 5C00 [Regular] +5C5C08 5C08 [Regular] +5C5C09 5C09 [Regular] +5C5C0A 5C0A [Regular] +5C5C0D 5C0D [Regular] +5C5C1A 5C1A [Regular] +5C5C22 5C22 [Regular] +5C5C25 5C25 [Regular] +5C5C27 NULL [SyntErr] +5C5C30 5C30 [Regular] +5C5C3F 5C3F [Regular] +5C5C40 5C40 [Regular] +5C5C5A 5C5A [Regular] +5C5C5C NULL [SyntErr] +5C5C5F 5C5F [Regular] +5C5C61 5C61 [Regular] +5C5C62 5C62 [Regular] +5C5C6E 5C6E [Regular] +5C5C72 5C72 [Regular] +5C5C74 5C74 [Regular] +5C5C7E 5C7E [Regular] +5C5C7F 5C7F [Regular] +5C5C80 5C80 [Regular][ILSEQ] +5C5C81 5C81 [Regular][ILSEQ] +5C5C9F 5C9F [Regular][ILSEQ] +5C5CA0 5CA0 [Regular][ILSEQ] +5C5CA1 5CA1 [Regular][ILSEQ] +5C5CE0 5CE0 [Regular][ILSEQ] +5C5CEF 5CEF [Regular][ILSEQ] +5C5CF9 5CF9 [Regular][ILSEQ] +5C5CFA 5CFA [Regular][ILSEQ] +5C5CFC 5CFC [Regular][ILSEQ] +5C5CFD 5CFD [Regular][ILSEQ] +5C5CFE 5CFE [Regular][ILSEQ] +5C5CFF 5CFF [Regular][ILSEQ] +5C5F00 5C5F00 [Preserve][LIKE] +5C5F08 5C5F08 [Preserve][LIKE] +5C5F09 5C5F09 [Preserve][LIKE] +5C5F0A 5C5F0A [Preserve][LIKE] +5C5F0D 5C5F0D [Preserve][LIKE] +5C5F1A 5C5F1A [Preserve][LIKE] +5C5F22 5C5F22 [Preserve][LIKE] +5C5F25 5C5F25 [Preserve][LIKE] +5C5F27 NULL [SyntErr] +5C5F30 5C5F30 [Preserve][LIKE] +5C5F3F 5C5F3F [Preserve][LIKE] +5C5F40 5C5F40 [Preserve][LIKE] +5C5F5A 5C5F5A [Preserve][LIKE] +5C5F5C NULL [SyntErr] +5C5F5F 5C5F5F [Preserve][LIKE] +5C5F61 5C5F61 [Preserve][LIKE] +5C5F62 5C5F62 [Preserve][LIKE] +5C5F6E 5C5F6E [Preserve][LIKE] +5C5F72 5C5F72 [Preserve][LIKE] +5C5F74 5C5F74 [Preserve][LIKE] +5C5F7E 5C5F7E [Preserve][LIKE] +5C5F7F 5C5F7F [Preserve][LIKE] +5C5F80 5C5F80 [Preserve][LIKE][ILSEQ] +5C5F81 5C5F81 [Preserve][LIKE][ILSEQ] +5C5F9F 5C5F9F [Preserve][LIKE][ILSEQ] +5C5FA0 5C5FA0 [Preserve][LIKE][ILSEQ] +5C5FA1 5C5FA1 [Preserve][LIKE][ILSEQ] +5C5FE0 5C5FE0 [Preserve][LIKE][ILSEQ] +5C5FEF 5C5FEF [Preserve][LIKE][ILSEQ] +5C5FF9 5C5FF9 [Preserve][LIKE][ILSEQ] +5C5FFA 5C5FFA [Preserve][LIKE][ILSEQ] +5C5FFC 5C5FFC [Preserve][LIKE][ILSEQ] +5C5FFD 5C5FFD [Preserve][LIKE][ILSEQ] +5C5FFE 5C5FFE [Preserve][LIKE][ILSEQ] +5C5FFF 5C5FFF [Preserve][LIKE][ILSEQ] +5C6100 6100 [Trivial] +5C6108 6108 [Trivial] +5C6109 6109 [Trivial] +5C610A 610A [Trivial] +5C610D 610D [Trivial] +5C611A 611A [Trivial] +5C6122 6122 [Trivial] +5C6125 6125 [Trivial] +5C6127 NULL [SyntErr] +5C6130 6130 [Trivial] +5C613F 613F [Trivial] +5C6140 6140 [Trivial] +5C615A 615A [Trivial] +5C615C NULL [SyntErr] +5C615F 615F [Trivial] +5C6161 6161 [Trivial] +5C6162 6162 [Trivial] +5C616E 616E [Trivial] +5C6172 6172 [Trivial] +5C6174 6174 [Trivial] +5C617E 617E [Trivial] +5C617F 617F [Trivial] +5C6180 6180 [Trivial][ILSEQ] +5C6181 6181 [Trivial][ILSEQ] +5C619F 619F [Trivial][ILSEQ] +5C61A0 61A0 [Trivial][ILSEQ] +5C61A1 61A1 [Trivial][ILSEQ] +5C61E0 61E0 [Trivial][ILSEQ] +5C61EF 61EF [Trivial][ILSEQ] +5C61F9 61F9 [Trivial][ILSEQ] +5C61FA 61FA [Trivial][ILSEQ] +5C61FC 61FC [Trivial][ILSEQ] +5C61FD 61FD [Trivial][ILSEQ] +5C61FE 61FE [Trivial][ILSEQ] +5C61FF 61FF [Trivial][ILSEQ] +5C6200 0800 [Regular] +5C6208 0808 [Regular] +5C6209 0809 [Regular] +5C620A 080A [Regular] +5C620D 080D [Regular] +5C621A 081A [Regular] +5C6222 0822 [Regular] +5C6225 0825 [Regular] +5C6227 NULL [SyntErr] +5C6230 0830 [Regular] +5C623F 083F [Regular] +5C6240 0840 [Regular] +5C625A 085A [Regular] +5C625C NULL [SyntErr] +5C625F 085F [Regular] +5C6261 0861 [Regular] +5C6262 0862 [Regular] +5C626E 086E [Regular] +5C6272 0872 [Regular] +5C6274 0874 [Regular] +5C627E 087E [Regular] +5C627F 087F [Regular] +5C6280 0880 [Regular][ILSEQ] +5C6281 0881 [Regular][ILSEQ] +5C629F 089F [Regular][ILSEQ] +5C62A0 08A0 [Regular][ILSEQ] +5C62A1 08A1 [Regular][ILSEQ] +5C62E0 08E0 [Regular][ILSEQ] +5C62EF 08EF [Regular][ILSEQ] +5C62F9 08F9 [Regular][ILSEQ] +5C62FA 08FA [Regular][ILSEQ] +5C62FC 08FC [Regular][ILSEQ] +5C62FD 08FD [Regular][ILSEQ] +5C62FE 08FE [Regular][ILSEQ] +5C62FF 08FF [Regular][ILSEQ] +5C6E00 0A00 [Regular] +5C6E08 0A08 [Regular] +5C6E09 0A09 [Regular] +5C6E0A 0A0A [Regular] +5C6E0D 0A0D [Regular] +5C6E1A 0A1A [Regular] +5C6E22 0A22 [Regular] +5C6E25 0A25 [Regular] +5C6E27 NULL [SyntErr] +5C6E30 0A30 [Regular] +5C6E3F 0A3F [Regular] +5C6E40 0A40 [Regular] +5C6E5A 0A5A [Regular] +5C6E5C NULL [SyntErr] +5C6E5F 0A5F [Regular] +5C6E61 0A61 [Regular] +5C6E62 0A62 [Regular] +5C6E6E 0A6E [Regular] +5C6E72 0A72 [Regular] +5C6E74 0A74 [Regular] +5C6E7E 0A7E [Regular] +5C6E7F 0A7F [Regular] +5C6E80 0A80 [Regular][ILSEQ] +5C6E81 0A81 [Regular][ILSEQ] +5C6E9F 0A9F [Regular][ILSEQ] +5C6EA0 0AA0 [Regular][ILSEQ] +5C6EA1 0AA1 [Regular][ILSEQ] +5C6EE0 0AE0 [Regular][ILSEQ] +5C6EEF 0AEF [Regular][ILSEQ] +5C6EF9 0AF9 [Regular][ILSEQ] +5C6EFA 0AFA [Regular][ILSEQ] +5C6EFC 0AFC [Regular][ILSEQ] +5C6EFD 0AFD [Regular][ILSEQ] +5C6EFE 0AFE [Regular][ILSEQ] +5C6EFF 0AFF [Regular][ILSEQ] +5C7200 0D00 [Regular] +5C7208 0D08 [Regular] +5C7209 0D09 [Regular] +5C720A 0D0A [Regular] +5C720D 0D0D [Regular] +5C721A 0D1A [Regular] +5C7222 0D22 [Regular] +5C7225 0D25 [Regular] +5C7227 NULL [SyntErr] +5C7230 0D30 [Regular] +5C723F 0D3F [Regular] +5C7240 0D40 [Regular] +5C725A 0D5A [Regular] +5C725C NULL [SyntErr] +5C725F 0D5F [Regular] +5C7261 0D61 [Regular] +5C7262 0D62 [Regular] +5C726E 0D6E [Regular] +5C7272 0D72 [Regular] +5C7274 0D74 [Regular] +5C727E 0D7E [Regular] +5C727F 0D7F [Regular] +5C7280 0D80 [Regular][ILSEQ] +5C7281 0D81 [Regular][ILSEQ] +5C729F 0D9F [Regular][ILSEQ] +5C72A0 0DA0 [Regular][ILSEQ] +5C72A1 0DA1 [Regular][ILSEQ] +5C72E0 0DE0 [Regular][ILSEQ] +5C72EF 0DEF [Regular][ILSEQ] +5C72F9 0DF9 [Regular][ILSEQ] +5C72FA 0DFA [Regular][ILSEQ] +5C72FC 0DFC [Regular][ILSEQ] +5C72FD 0DFD [Regular][ILSEQ] +5C72FE 0DFE [Regular][ILSEQ] +5C72FF 0DFF [Regular][ILSEQ] +5C7400 0900 [Regular] +5C7408 0908 [Regular] +5C7409 0909 [Regular] +5C740A 090A [Regular] +5C740D 090D [Regular] +5C741A 091A [Regular] +5C7422 0922 [Regular] +5C7425 0925 [Regular] +5C7427 NULL [SyntErr] +5C7430 0930 [Regular] +5C743F 093F [Regular] +5C7440 0940 [Regular] +5C745A 095A [Regular] +5C745C NULL [SyntErr] +5C745F 095F [Regular] +5C7461 0961 [Regular] +5C7462 0962 [Regular] +5C746E 096E [Regular] +5C7472 0972 [Regular] +5C7474 0974 [Regular] +5C747E 097E [Regular] +5C747F 097F [Regular] +5C7480 0980 [Regular][ILSEQ] +5C7481 0981 [Regular][ILSEQ] +5C749F 099F [Regular][ILSEQ] +5C74A0 09A0 [Regular][ILSEQ] +5C74A1 09A1 [Regular][ILSEQ] +5C74E0 09E0 [Regular][ILSEQ] +5C74EF 09EF [Regular][ILSEQ] +5C74F9 09F9 [Regular][ILSEQ] +5C74FA 09FA [Regular][ILSEQ] +5C74FC 09FC [Regular][ILSEQ] +5C74FD 09FD [Regular][ILSEQ] +5C74FE 09FE [Regular][ILSEQ] +5C74FF 09FF [Regular][ILSEQ] +5C7E00 7E00 [Trivial] +5C7E08 7E08 [Trivial] +5C7E09 7E09 [Trivial] +5C7E0A 7E0A [Trivial] +5C7E0D 7E0D [Trivial] +5C7E1A 7E1A [Trivial] +5C7E22 7E22 [Trivial] +5C7E25 7E25 [Trivial] +5C7E27 NULL [SyntErr] +5C7E30 7E30 [Trivial] +5C7E3F 7E3F [Trivial] +5C7E40 7E40 [Trivial] +5C7E5A 7E5A [Trivial] +5C7E5C NULL [SyntErr] +5C7E5F 7E5F [Trivial] +5C7E61 7E61 [Trivial] +5C7E62 7E62 [Trivial] +5C7E6E 7E6E [Trivial] +5C7E72 7E72 [Trivial] +5C7E74 7E74 [Trivial] +5C7E7E 7E7E [Trivial] +5C7E7F 7E7F [Trivial] +5C7E80 7E80 [Trivial][ILSEQ] +5C7E81 7E81 [Trivial][ILSEQ] +5C7E9F 7E9F [Trivial][ILSEQ] +5C7EA0 7EA0 [Trivial][ILSEQ] +5C7EA1 7EA1 [Trivial][ILSEQ] +5C7EE0 7EE0 [Trivial][ILSEQ] +5C7EEF 7EEF [Trivial][ILSEQ] +5C7EF9 7EF9 [Trivial][ILSEQ] +5C7EFA 7EFA [Trivial][ILSEQ] +5C7EFC 7EFC [Trivial][ILSEQ] +5C7EFD 7EFD [Trivial][ILSEQ] +5C7EFE 7EFE [Trivial][ILSEQ] +5C7EFF 7EFF [Trivial][ILSEQ] +5C7F00 7F00 [Trivial] +5C7F08 7F08 [Trivial] +5C7F09 7F09 [Trivial] +5C7F0A 7F0A [Trivial] +5C7F0D 7F0D [Trivial] +5C7F1A 7F1A [Trivial] +5C7F22 7F22 [Trivial] +5C7F25 7F25 [Trivial] +5C7F27 NULL [SyntErr] +5C7F30 7F30 [Trivial] +5C7F3F 7F3F [Trivial] +5C7F40 7F40 [Trivial] +5C7F5A 7F5A [Trivial] +5C7F5C NULL [SyntErr] +5C7F5F 7F5F [Trivial] +5C7F61 7F61 [Trivial] +5C7F62 7F62 [Trivial] +5C7F6E 7F6E [Trivial] +5C7F72 7F72 [Trivial] +5C7F74 7F74 [Trivial] +5C7F7E 7F7E [Trivial] +5C7F7F 7F7F [Trivial] +5C7F80 7F80 [Trivial][ILSEQ] +5C7F81 7F81 [Trivial][ILSEQ] +5C7F9F 7F9F [Trivial][ILSEQ] +5C7FA0 7FA0 [Trivial][ILSEQ] +5C7FA1 7FA1 [Trivial][ILSEQ] +5C7FE0 7FE0 [Trivial][ILSEQ] +5C7FEF 7FEF [Trivial][ILSEQ] +5C7FF9 7FF9 [Trivial][ILSEQ] +5C7FFA 7FFA [Trivial][ILSEQ] +5C7FFC 7FFC [Trivial][ILSEQ] +5C7FFD 7FFD [Trivial][ILSEQ] +5C7FFE 7FFE [Trivial][ILSEQ] +5C7FFF 7FFF [Trivial][ILSEQ] +5C8000 8000 [Trivial][ILSEQ] +5C8008 8008 [Trivial][ILSEQ] +5C8009 8009 [Trivial][ILSEQ] +5C800A 800A [Trivial][ILSEQ] +5C800D 800D [Trivial][ILSEQ] +5C801A 801A [Trivial][ILSEQ] +5C8022 8022 [Trivial][ILSEQ] +5C8025 8025 [Trivial][ILSEQ] +5C8027 NULL [SyntErr] +5C8030 8030 [Trivial][ILSEQ] +5C803F 803F [Trivial][ILSEQ] +5C8040 8040 [Trivial][ILSEQ] +5C805A 805A [Trivial][ILSEQ] +5C805C NULL [SyntErr] +5C805F 805F [Trivial][ILSEQ] +5C8061 8061 [Trivial][ILSEQ] +5C8062 8062 [Trivial][ILSEQ] +5C806E 806E [Trivial][ILSEQ] +5C8072 8072 [Trivial][ILSEQ] +5C8074 8074 [Trivial][ILSEQ] +5C807E 807E [Trivial][ILSEQ] +5C807F 807F [Trivial][ILSEQ] +5C8080 8080 [Trivial][ILSEQ] +5C8081 8081 [Trivial][ILSEQ] +5C809F 809F [Trivial][ILSEQ] +5C80A0 80A0 [Trivial][ILSEQ] +5C80A1 80A1 [Trivial][ILSEQ] +5C80E0 80E0 [Trivial][ILSEQ] +5C80EF 80EF [Trivial][ILSEQ] +5C80F9 80F9 [Trivial][ILSEQ] +5C80FA 80FA [Trivial][ILSEQ] +5C80FC 80FC [Trivial][ILSEQ] +5C80FD 80FD [Trivial][ILSEQ] +5C80FE 80FE [Trivial][ILSEQ] +5C80FF 80FF [Trivial][ILSEQ] +5C8100 8100 [Trivial][ILSEQ] +5C8108 8108 [Trivial][ILSEQ] +5C8109 8109 [Trivial][ILSEQ] +5C810A 810A [Trivial][ILSEQ] +5C810D 810D [Trivial][ILSEQ] +5C811A 811A [Trivial][ILSEQ] +5C8122 8122 [Trivial][ILSEQ] +5C8125 8125 [Trivial][ILSEQ] +5C8127 NULL [SyntErr] +5C8130 8130 [Trivial][ILSEQ] +5C813F 813F [Trivial][ILSEQ] +5C8140 8140 [Trivial] +5C815A 815A [Trivial] +5C815C NULL [SyntErr] +5C815F 815F [Trivial] +5C8161 8161 [Trivial] +5C8162 8162 [Trivial] +5C816E 816E [Trivial] +5C8172 8172 [Trivial] +5C8174 8174 [Trivial] +5C817E 817E [Trivial] +5C817F 817F [Trivial][ILSEQ] +5C8180 8180 [Trivial] +5C8181 8181 [Trivial] +5C819F 819F [Trivial] +5C81A0 81A0 [Trivial] +5C81A1 81A1 [Trivial] +5C81E0 81E0 [Trivial] +5C81EF 81EF [Trivial] +5C81F9 81F9 [Trivial] +5C81FA 81FA [Trivial] +5C81FC 81FC [Trivial] +5C81FD 81FD [Trivial] +5C81FE 81FE [Trivial] +5C81FF 81FF [Trivial][ILSEQ] +5C9F00 9F00 [Trivial][ILSEQ] +5C9F08 9F08 [Trivial][ILSEQ] +5C9F09 9F09 [Trivial][ILSEQ] +5C9F0A 9F0A [Trivial][ILSEQ] +5C9F0D 9F0D [Trivial][ILSEQ] +5C9F1A 9F1A [Trivial][ILSEQ] +5C9F22 9F22 [Trivial][ILSEQ] +5C9F25 9F25 [Trivial][ILSEQ] +5C9F27 NULL [SyntErr] +5C9F30 9F30 [Trivial][ILSEQ] +5C9F3F 9F3F [Trivial][ILSEQ] +5C9F40 9F40 [Trivial] +5C9F5A 9F5A [Trivial] +5C9F5C NULL [SyntErr] +5C9F5F 9F5F [Trivial] +5C9F61 9F61 [Trivial] +5C9F62 9F62 [Trivial] +5C9F6E 9F6E [Trivial] +5C9F72 9F72 [Trivial] +5C9F74 9F74 [Trivial] +5C9F7E 9F7E [Trivial] +5C9F7F 9F7F [Trivial][ILSEQ] +5C9F80 9F80 [Trivial] +5C9F81 9F81 [Trivial] +5C9F9F 9F9F [Trivial] +5C9FA0 9FA0 [Trivial] +5C9FA1 9FA1 [Trivial] +5C9FE0 9FE0 [Trivial] +5C9FEF 9FEF [Trivial] +5C9FF9 9FF9 [Trivial] +5C9FFA 9FFA [Trivial] +5C9FFC 9FFC [Trivial] +5C9FFD 9FFD [Trivial] +5C9FFE 9FFE [Trivial] +5C9FFF 9FFF [Trivial][ILSEQ] +5CA000 A000 [Trivial][ILSEQ] +5CA008 A008 [Trivial][ILSEQ] +5CA009 A009 [Trivial][ILSEQ] +5CA00A A00A [Trivial][ILSEQ] +5CA00D A00D [Trivial][ILSEQ] +5CA01A A01A [Trivial][ILSEQ] +5CA022 A022 [Trivial][ILSEQ] +5CA025 A025 [Trivial][ILSEQ] +5CA027 NULL [SyntErr] +5CA030 A030 [Trivial][ILSEQ] +5CA03F A03F [Trivial][ILSEQ] +5CA040 A040 [Trivial] +5CA05A A05A [Trivial] +5CA05C NULL [SyntErr] +5CA05F A05F [Trivial] +5CA061 A061 [Trivial] +5CA062 A062 [Trivial] +5CA06E A06E [Trivial] +5CA072 A072 [Trivial] +5CA074 A074 [Trivial] +5CA07E A07E [Trivial] +5CA07F A07F [Trivial][ILSEQ] +5CA080 A080 [Trivial] +5CA081 A081 [Trivial] +5CA09F A09F [Trivial] +5CA0A0 A0A0 [Trivial] +5CA0A1 A0A1 [Trivial] +5CA0E0 A0E0 [Trivial] +5CA0EF A0EF [Trivial] +5CA0F9 A0F9 [Trivial] +5CA0FA A0FA [Trivial] +5CA0FC A0FC [Trivial] +5CA0FD A0FD [Trivial] +5CA0FE A0FE [Trivial] +5CA0FF A0FF [Trivial][ILSEQ] +5CA100 A100 [Trivial][ILSEQ] +5CA108 A108 [Trivial][ILSEQ] +5CA109 A109 [Trivial][ILSEQ] +5CA10A A10A [Trivial][ILSEQ] +5CA10D A10D [Trivial][ILSEQ] +5CA11A A11A [Trivial][ILSEQ] +5CA122 A122 [Trivial][ILSEQ] +5CA125 A125 [Trivial][ILSEQ] +5CA127 NULL [SyntErr] +5CA130 A130 [Trivial][ILSEQ] +5CA13F A13F [Trivial][ILSEQ] +5CA140 A140 [Trivial] +5CA15A A15A [Trivial] +5CA15C NULL [SyntErr] +5CA15F A15F [Trivial] +5CA161 A161 [Trivial] +5CA162 A162 [Trivial] +5CA16E A16E [Trivial] +5CA172 A172 [Trivial] +5CA174 A174 [Trivial] +5CA17E A17E [Trivial] +5CA17F A17F [Trivial][ILSEQ] +5CA180 A180 [Trivial] +5CA181 A181 [Trivial] +5CA19F A19F [Trivial] +5CA1A0 A1A0 [Trivial] +5CA1A1 A1A1 [Trivial] +5CA1E0 A1E0 [Trivial] +5CA1EF A1EF [Trivial] +5CA1F9 A1F9 [Trivial] +5CA1FA A1FA [Trivial] +5CA1FC A1FC [Trivial] +5CA1FD A1FD [Trivial] +5CA1FE A1FE [Trivial] +5CA1FF A1FF [Trivial][ILSEQ] +5CE000 E000 [Trivial][ILSEQ] +5CE008 E008 [Trivial][ILSEQ] +5CE009 E009 [Trivial][ILSEQ] +5CE00A E00A [Trivial][ILSEQ] +5CE00D E00D [Trivial][ILSEQ] +5CE01A E01A [Trivial][ILSEQ] +5CE022 E022 [Trivial][ILSEQ] +5CE025 E025 [Trivial][ILSEQ] +5CE027 NULL [SyntErr] +5CE030 E030 [Trivial][ILSEQ] +5CE03F E03F [Trivial][ILSEQ] +5CE040 E040 [Trivial] +5CE05A E05A [Trivial] +5CE05C NULL [SyntErr] +5CE05F E05F [Trivial] +5CE061 E061 [Trivial] +5CE062 E062 [Trivial] +5CE06E E06E [Trivial] +5CE072 E072 [Trivial] +5CE074 E074 [Trivial] +5CE07E E07E [Trivial] +5CE07F E07F [Trivial][ILSEQ] +5CE080 E080 [Trivial] +5CE081 E081 [Trivial] +5CE09F E09F [Trivial] +5CE0A0 E0A0 [Trivial] +5CE0A1 E0A1 [Trivial] +5CE0E0 E0E0 [Trivial] +5CE0EF E0EF [Trivial] +5CE0F9 E0F9 [Trivial] +5CE0FA E0FA [Trivial] +5CE0FC E0FC [Trivial] +5CE0FD E0FD [Trivial] +5CE0FE E0FE [Trivial] +5CE0FF E0FF [Trivial][ILSEQ] +5CEF00 EF00 [Trivial][ILSEQ] +5CEF08 EF08 [Trivial][ILSEQ] +5CEF09 EF09 [Trivial][ILSEQ] +5CEF0A EF0A [Trivial][ILSEQ] +5CEF0D EF0D [Trivial][ILSEQ] +5CEF1A EF1A [Trivial][ILSEQ] +5CEF22 EF22 [Trivial][ILSEQ] +5CEF25 EF25 [Trivial][ILSEQ] +5CEF27 NULL [SyntErr] +5CEF30 EF30 [Trivial][ILSEQ] +5CEF3F EF3F [Trivial][ILSEQ] +5CEF40 EF40 [Trivial] +5CEF5A EF5A [Trivial] +5CEF5C NULL [SyntErr] +5CEF5F EF5F [Trivial] +5CEF61 EF61 [Trivial] +5CEF62 EF62 [Trivial] +5CEF6E EF6E [Trivial] +5CEF72 EF72 [Trivial] +5CEF74 EF74 [Trivial] +5CEF7E EF7E [Trivial] +5CEF7F EF7F [Trivial][ILSEQ] +5CEF80 EF80 [Trivial] +5CEF81 EF81 [Trivial] +5CEF9F EF9F [Trivial] +5CEFA0 EFA0 [Trivial] +5CEFA1 EFA1 [Trivial] +5CEFE0 EFE0 [Trivial] +5CEFEF EFEF [Trivial] +5CEFF9 EFF9 [Trivial] +5CEFFA EFFA [Trivial] +5CEFFC EFFC [Trivial] +5CEFFD EFFD [Trivial] +5CEFFE EFFE [Trivial] +5CEFFF EFFF [Trivial][ILSEQ] +5CF900 F900 [Trivial][ILSEQ] +5CF908 F908 [Trivial][ILSEQ] +5CF909 F909 [Trivial][ILSEQ] +5CF90A F90A [Trivial][ILSEQ] +5CF90D F90D [Trivial][ILSEQ] +5CF91A F91A [Trivial][ILSEQ] +5CF922 F922 [Trivial][ILSEQ] +5CF925 F925 [Trivial][ILSEQ] +5CF927 NULL [SyntErr] +5CF930 F930 [Trivial][ILSEQ] +5CF93F F93F [Trivial][ILSEQ] +5CF940 F940 [Trivial] +5CF95A F95A [Trivial] +5CF95C NULL [SyntErr] +5CF95F F95F [Trivial] +5CF961 F961 [Trivial] +5CF962 F962 [Trivial] +5CF96E F96E [Trivial] +5CF972 F972 [Trivial] +5CF974 F974 [Trivial] +5CF97E F97E [Trivial] +5CF97F F97F [Trivial][ILSEQ] +5CF980 F980 [Trivial] +5CF981 F981 [Trivial] +5CF99F F99F [Trivial] +5CF9A0 F9A0 [Trivial] +5CF9A1 F9A1 [Trivial] +5CF9E0 F9E0 [Trivial] +5CF9EF F9EF [Trivial] +5CF9F9 F9F9 [Trivial] +5CF9FA F9FA [Trivial] +5CF9FC F9FC [Trivial] +5CF9FD F9FD [Trivial] +5CF9FE F9FE [Trivial] +5CF9FF F9FF [Trivial][ILSEQ] +5CFA00 FA00 [Trivial][ILSEQ] +5CFA08 FA08 [Trivial][ILSEQ] +5CFA09 FA09 [Trivial][ILSEQ] +5CFA0A FA0A [Trivial][ILSEQ] +5CFA0D FA0D [Trivial][ILSEQ] +5CFA1A FA1A [Trivial][ILSEQ] +5CFA22 FA22 [Trivial][ILSEQ] +5CFA25 FA25 [Trivial][ILSEQ] +5CFA27 NULL [SyntErr] +5CFA30 FA30 [Trivial][ILSEQ] +5CFA3F FA3F [Trivial][ILSEQ] +5CFA40 FA40 [Trivial] +5CFA5A FA5A [Trivial] +5CFA5C NULL [SyntErr] +5CFA5F FA5F [Trivial] +5CFA61 FA61 [Trivial] +5CFA62 FA62 [Trivial] +5CFA6E FA6E [Trivial] +5CFA72 FA72 [Trivial] +5CFA74 FA74 [Trivial] +5CFA7E FA7E [Trivial] +5CFA7F FA7F [Trivial][ILSEQ] +5CFA80 FA80 [Trivial] +5CFA81 FA81 [Trivial] +5CFA9F FA9F [Trivial] +5CFAA0 FAA0 [Trivial] +5CFAA1 FAA1 [Trivial] +5CFAE0 FAE0 [Trivial] +5CFAEF FAEF [Trivial] +5CFAF9 FAF9 [Trivial] +5CFAFA FAFA [Trivial] +5CFAFC FAFC [Trivial] +5CFAFD FAFD [Trivial] +5CFAFE FAFE [Trivial] +5CFAFF FAFF [Trivial][ILSEQ] +5CFC00 FC00 [Trivial][ILSEQ] +5CFC08 FC08 [Trivial][ILSEQ] +5CFC09 FC09 [Trivial][ILSEQ] +5CFC0A FC0A [Trivial][ILSEQ] +5CFC0D FC0D [Trivial][ILSEQ] +5CFC1A FC1A [Trivial][ILSEQ] +5CFC22 FC22 [Trivial][ILSEQ] +5CFC25 FC25 [Trivial][ILSEQ] +5CFC27 NULL [SyntErr] +5CFC30 FC30 [Trivial][ILSEQ] +5CFC3F FC3F [Trivial][ILSEQ] +5CFC40 FC40 [Trivial] +5CFC5A FC5A [Trivial] +5CFC5C NULL [SyntErr] +5CFC5F FC5F [Trivial] +5CFC61 FC61 [Trivial] +5CFC62 FC62 [Trivial] +5CFC6E FC6E [Trivial] +5CFC72 FC72 [Trivial] +5CFC74 FC74 [Trivial] +5CFC7E FC7E [Trivial] +5CFC7F FC7F [Trivial][ILSEQ] +5CFC80 FC80 [Trivial] +5CFC81 FC81 [Trivial] +5CFC9F FC9F [Trivial] +5CFCA0 FCA0 [Trivial] +5CFCA1 FCA1 [Trivial] +5CFCE0 FCE0 [Trivial] +5CFCEF FCEF [Trivial] +5CFCF9 FCF9 [Trivial] +5CFCFA FCFA [Trivial] +5CFCFC FCFC [Trivial] +5CFCFD FCFD [Trivial] +5CFCFE FCFE [Trivial] +5CFCFF FCFF [Trivial][ILSEQ] +5CFD00 FD00 [Trivial][ILSEQ] +5CFD08 FD08 [Trivial][ILSEQ] +5CFD09 FD09 [Trivial][ILSEQ] +5CFD0A FD0A [Trivial][ILSEQ] +5CFD0D FD0D [Trivial][ILSEQ] +5CFD1A FD1A [Trivial][ILSEQ] +5CFD22 FD22 [Trivial][ILSEQ] +5CFD25 FD25 [Trivial][ILSEQ] +5CFD27 NULL [SyntErr] +5CFD30 FD30 [Trivial][ILSEQ] +5CFD3F FD3F [Trivial][ILSEQ] +5CFD40 FD40 [Trivial] +5CFD5A FD5A [Trivial] +5CFD5C NULL [SyntErr] +5CFD5F FD5F [Trivial] +5CFD61 FD61 [Trivial] +5CFD62 FD62 [Trivial] +5CFD6E FD6E [Trivial] +5CFD72 FD72 [Trivial] +5CFD74 FD74 [Trivial] +5CFD7E FD7E [Trivial] +5CFD7F FD7F [Trivial][ILSEQ] +5CFD80 FD80 [Trivial] +5CFD81 FD81 [Trivial] +5CFD9F FD9F [Trivial] +5CFDA0 FDA0 [Trivial] +5CFDA1 FDA1 [Trivial] +5CFDE0 FDE0 [Trivial] +5CFDEF FDEF [Trivial] +5CFDF9 FDF9 [Trivial] +5CFDFA FDFA [Trivial] +5CFDFC FDFC [Trivial] +5CFDFD FDFD [Trivial] +5CFDFE FDFE [Trivial] +5CFDFF FDFF [Trivial][ILSEQ] +5CFE00 FE00 [Trivial][ILSEQ] +5CFE08 FE08 [Trivial][ILSEQ] +5CFE09 FE09 [Trivial][ILSEQ] +5CFE0A FE0A [Trivial][ILSEQ] +5CFE0D FE0D [Trivial][ILSEQ] +5CFE1A FE1A [Trivial][ILSEQ] +5CFE22 FE22 [Trivial][ILSEQ] +5CFE25 FE25 [Trivial][ILSEQ] +5CFE27 NULL [SyntErr] +5CFE30 FE30 [Trivial][ILSEQ] +5CFE3F FE3F [Trivial][ILSEQ] +5CFE40 FE40 [Trivial] +5CFE5A FE5A [Trivial] +5CFE5C NULL [SyntErr] +5CFE5F FE5F [Trivial] +5CFE61 FE61 [Trivial] +5CFE62 FE62 [Trivial] +5CFE6E FE6E [Trivial] +5CFE72 FE72 [Trivial] +5CFE74 FE74 [Trivial] +5CFE7E FE7E [Trivial] +5CFE7F FE7F [Trivial][ILSEQ] +5CFE80 FE80 [Trivial] +5CFE81 FE81 [Trivial] +5CFE9F FE9F [Trivial] +5CFEA0 FEA0 [Trivial] +5CFEA1 FEA1 [Trivial] +5CFEE0 FEE0 [Trivial] +5CFEEF FEEF [Trivial] +5CFEF9 FEF9 [Trivial] +5CFEFA FEFA [Trivial] +5CFEFC FEFC [Trivial] +5CFEFD FEFD [Trivial] +5CFEFE FEFE [Trivial] +5CFEFF FEFF [Trivial][ILSEQ] +5CFF00 FF00 [Trivial][ILSEQ] +5CFF08 FF08 [Trivial][ILSEQ] +5CFF09 FF09 [Trivial][ILSEQ] +5CFF0A FF0A [Trivial][ILSEQ] +5CFF0D FF0D [Trivial][ILSEQ] +5CFF1A FF1A [Trivial][ILSEQ] +5CFF22 FF22 [Trivial][ILSEQ] +5CFF25 FF25 [Trivial][ILSEQ] +5CFF27 NULL [SyntErr] +5CFF30 FF30 [Trivial][ILSEQ] +5CFF3F FF3F [Trivial][ILSEQ] +5CFF40 FF40 [Trivial][ILSEQ] +5CFF5A FF5A [Trivial][ILSEQ] +5CFF5C NULL [SyntErr] +5CFF5F FF5F [Trivial][ILSEQ] +5CFF61 FF61 [Trivial][ILSEQ] +5CFF62 FF62 [Trivial][ILSEQ] +5CFF6E FF6E [Trivial][ILSEQ] +5CFF72 FF72 [Trivial][ILSEQ] +5CFF74 FF74 [Trivial][ILSEQ] +5CFF7E FF7E [Trivial][ILSEQ] +5CFF7F FF7F [Trivial][ILSEQ] +5CFF80 FF80 [Trivial][ILSEQ] +5CFF81 FF81 [Trivial][ILSEQ] +5CFF9F FF9F [Trivial][ILSEQ] +5CFFA0 FFA0 [Trivial][ILSEQ] +5CFFA1 FFA1 [Trivial][ILSEQ] +5CFFE0 FFE0 [Trivial][ILSEQ] +5CFFEF FFEF [Trivial][ILSEQ] +5CFFF9 FFF9 [Trivial][ILSEQ] +5CFFFA FFFA [Trivial][ILSEQ] +5CFFFC FFFC [Trivial][ILSEQ] +5CFFFD FFFD [Trivial][ILSEQ] +5CFFFE FFFE [Trivial][ILSEQ] +5CFFFF FFFF [Trivial][ILSEQ] +5C005C00 0000 [Trivial] +5C005C08 0008 [Trivial] +5C005C09 0009 [Trivial] +5C005C0A 000A [Trivial] +5C005C0D 000D [Trivial] +5C005C1A 001A [Trivial] +5C005C22 0022 [Trivial] +5C005C25 005C25 [Regular] +5C005C27 0027 [Trivial] +5C005C30 0000 [Regular] +5C005C3F 003F [Trivial] +5C005C40 0040 [Trivial] +5C005C5A 001A [Regular] +5C005C5C 005C [Regular] +5C005C5F 005C5F [Regular] +5C005C61 0061 [Trivial] +5C005C62 0008 [Regular] +5C005C6E 000A [Regular] +5C005C72 000D [Regular] +5C005C74 0009 [Regular] +5C005C7E 007E [Trivial] +5C005C7F 007F [Trivial] +5C005C80 0080 [Trivial][ILSEQ] +5C005C81 0081 [Trivial][ILSEQ] +5C005C9F 009F [Trivial][ILSEQ] +5C005CA0 00A0 [Trivial][ILSEQ] +5C005CA1 00A1 [Trivial][ILSEQ] +5C005CE0 00E0 [Trivial][ILSEQ] +5C005CEF 00EF [Trivial][ILSEQ] +5C005CF9 00F9 [Trivial][ILSEQ] +5C005CFA 00FA [Trivial][ILSEQ] +5C005CFC 00FC [Trivial][ILSEQ] +5C005CFD 00FD [Trivial][ILSEQ] +5C005CFE 00FE [Trivial][ILSEQ] +5C005CFF 00FF [Trivial][ILSEQ] +5C085C00 0800 [Trivial] +5C085C08 0808 [Trivial] +5C085C09 0809 [Trivial] +5C085C0A 080A [Trivial] +5C085C0D 080D [Trivial] +5C085C1A 081A [Trivial] +5C085C22 0822 [Trivial] +5C085C25 085C25 [Regular] +5C085C27 0827 [Trivial] +5C085C30 0800 [Regular] +5C085C3F 083F [Trivial] +5C085C40 0840 [Trivial] +5C085C5A 081A [Regular] +5C085C5C 085C [Regular] +5C085C5F 085C5F [Regular] +5C085C61 0861 [Trivial] +5C085C62 0808 [Regular] +5C085C6E 080A [Regular] +5C085C72 080D [Regular] +5C085C74 0809 [Regular] +5C085C7E 087E [Trivial] +5C085C7F 087F [Trivial] +5C085C80 0880 [Trivial][ILSEQ] +5C085C81 0881 [Trivial][ILSEQ] +5C085C9F 089F [Trivial][ILSEQ] +5C085CA0 08A0 [Trivial][ILSEQ] +5C085CA1 08A1 [Trivial][ILSEQ] +5C085CE0 08E0 [Trivial][ILSEQ] +5C085CEF 08EF [Trivial][ILSEQ] +5C085CF9 08F9 [Trivial][ILSEQ] +5C085CFA 08FA [Trivial][ILSEQ] +5C085CFC 08FC [Trivial][ILSEQ] +5C085CFD 08FD [Trivial][ILSEQ] +5C085CFE 08FE [Trivial][ILSEQ] +5C085CFF 08FF [Trivial][ILSEQ] +5C095C00 0900 [Trivial] +5C095C08 0908 [Trivial] +5C095C09 0909 [Trivial] +5C095C0A 090A [Trivial] +5C095C0D 090D [Trivial] +5C095C1A 091A [Trivial] +5C095C22 0922 [Trivial] +5C095C25 095C25 [Regular] +5C095C27 0927 [Trivial] +5C095C30 0900 [Regular] +5C095C3F 093F [Trivial] +5C095C40 0940 [Trivial] +5C095C5A 091A [Regular] +5C095C5C 095C [Regular] +5C095C5F 095C5F [Regular] +5C095C61 0961 [Trivial] +5C095C62 0908 [Regular] +5C095C6E 090A [Regular] +5C095C72 090D [Regular] +5C095C74 0909 [Regular] +5C095C7E 097E [Trivial] +5C095C7F 097F [Trivial] +5C095C80 0980 [Trivial][ILSEQ] +5C095C81 0981 [Trivial][ILSEQ] +5C095C9F 099F [Trivial][ILSEQ] +5C095CA0 09A0 [Trivial][ILSEQ] +5C095CA1 09A1 [Trivial][ILSEQ] +5C095CE0 09E0 [Trivial][ILSEQ] +5C095CEF 09EF [Trivial][ILSEQ] +5C095CF9 09F9 [Trivial][ILSEQ] +5C095CFA 09FA [Trivial][ILSEQ] +5C095CFC 09FC [Trivial][ILSEQ] +5C095CFD 09FD [Trivial][ILSEQ] +5C095CFE 09FE [Trivial][ILSEQ] +5C095CFF 09FF [Trivial][ILSEQ] +5C0A5C00 0A00 [Trivial] +5C0A5C08 0A08 [Trivial] +5C0A5C09 0A09 [Trivial] +5C0A5C0A 0A0A [Trivial] +5C0A5C0D 0A0D [Trivial] +5C0A5C1A 0A1A [Trivial] +5C0A5C22 0A22 [Trivial] +5C0A5C25 0A5C25 [Regular] +5C0A5C27 0A27 [Trivial] +5C0A5C30 0A00 [Regular] +5C0A5C3F 0A3F [Trivial] +5C0A5C40 0A40 [Trivial] +5C0A5C5A 0A1A [Regular] +5C0A5C5C 0A5C [Regular] +5C0A5C5F 0A5C5F [Regular] +5C0A5C61 0A61 [Trivial] +5C0A5C62 0A08 [Regular] +5C0A5C6E 0A0A [Regular] +5C0A5C72 0A0D [Regular] +5C0A5C74 0A09 [Regular] +5C0A5C7E 0A7E [Trivial] +5C0A5C7F 0A7F [Trivial] +5C0A5C80 0A80 [Trivial][ILSEQ] +5C0A5C81 0A81 [Trivial][ILSEQ] +5C0A5C9F 0A9F [Trivial][ILSEQ] +5C0A5CA0 0AA0 [Trivial][ILSEQ] +5C0A5CA1 0AA1 [Trivial][ILSEQ] +5C0A5CE0 0AE0 [Trivial][ILSEQ] +5C0A5CEF 0AEF [Trivial][ILSEQ] +5C0A5CF9 0AF9 [Trivial][ILSEQ] +5C0A5CFA 0AFA [Trivial][ILSEQ] +5C0A5CFC 0AFC [Trivial][ILSEQ] +5C0A5CFD 0AFD [Trivial][ILSEQ] +5C0A5CFE 0AFE [Trivial][ILSEQ] +5C0A5CFF 0AFF [Trivial][ILSEQ] +5C0D5C00 0D00 [Trivial] +5C0D5C08 0D08 [Trivial] +5C0D5C09 0D09 [Trivial] +5C0D5C0A 0D0A [Trivial] +5C0D5C0D 0D0D [Trivial] +5C0D5C1A 0D1A [Trivial] +5C0D5C22 0D22 [Trivial] +5C0D5C25 0D5C25 [Regular] +5C0D5C27 0D27 [Trivial] +5C0D5C30 0D00 [Regular] +5C0D5C3F 0D3F [Trivial] +5C0D5C40 0D40 [Trivial] +5C0D5C5A 0D1A [Regular] +5C0D5C5C 0D5C [Regular] +5C0D5C5F 0D5C5F [Regular] +5C0D5C61 0D61 [Trivial] +5C0D5C62 0D08 [Regular] +5C0D5C6E 0D0A [Regular] +5C0D5C72 0D0D [Regular] +5C0D5C74 0D09 [Regular] +5C0D5C7E 0D7E [Trivial] +5C0D5C7F 0D7F [Trivial] +5C0D5C80 0D80 [Trivial][ILSEQ] +5C0D5C81 0D81 [Trivial][ILSEQ] +5C0D5C9F 0D9F [Trivial][ILSEQ] +5C0D5CA0 0DA0 [Trivial][ILSEQ] +5C0D5CA1 0DA1 [Trivial][ILSEQ] +5C0D5CE0 0DE0 [Trivial][ILSEQ] +5C0D5CEF 0DEF [Trivial][ILSEQ] +5C0D5CF9 0DF9 [Trivial][ILSEQ] +5C0D5CFA 0DFA [Trivial][ILSEQ] +5C0D5CFC 0DFC [Trivial][ILSEQ] +5C0D5CFD 0DFD [Trivial][ILSEQ] +5C0D5CFE 0DFE [Trivial][ILSEQ] +5C0D5CFF 0DFF [Trivial][ILSEQ] +5C1A5C00 1A00 [Trivial] +5C1A5C08 1A08 [Trivial] +5C1A5C09 1A09 [Trivial] +5C1A5C0A 1A0A [Trivial] +5C1A5C0D 1A0D [Trivial] +5C1A5C1A 1A1A [Trivial] +5C1A5C22 1A22 [Trivial] +5C1A5C25 1A5C25 [Regular] +5C1A5C27 1A27 [Trivial] +5C1A5C30 1A00 [Regular] +5C1A5C3F 1A3F [Trivial] +5C1A5C40 1A40 [Trivial] +5C1A5C5A 1A1A [Regular] +5C1A5C5C 1A5C [Regular] +5C1A5C5F 1A5C5F [Regular] +5C1A5C61 1A61 [Trivial] +5C1A5C62 1A08 [Regular] +5C1A5C6E 1A0A [Regular] +5C1A5C72 1A0D [Regular] +5C1A5C74 1A09 [Regular] +5C1A5C7E 1A7E [Trivial] +5C1A5C7F 1A7F [Trivial] +5C1A5C80 1A80 [Trivial][ILSEQ] +5C1A5C81 1A81 [Trivial][ILSEQ] +5C1A5C9F 1A9F [Trivial][ILSEQ] +5C1A5CA0 1AA0 [Trivial][ILSEQ] +5C1A5CA1 1AA1 [Trivial][ILSEQ] +5C1A5CE0 1AE0 [Trivial][ILSEQ] +5C1A5CEF 1AEF [Trivial][ILSEQ] +5C1A5CF9 1AF9 [Trivial][ILSEQ] +5C1A5CFA 1AFA [Trivial][ILSEQ] +5C1A5CFC 1AFC [Trivial][ILSEQ] +5C1A5CFD 1AFD [Trivial][ILSEQ] +5C1A5CFE 1AFE [Trivial][ILSEQ] +5C1A5CFF 1AFF [Trivial][ILSEQ] +5C225C00 2200 [Trivial] +5C225C08 2208 [Trivial] +5C225C09 2209 [Trivial] +5C225C0A 220A [Trivial] +5C225C0D 220D [Trivial] +5C225C1A 221A [Trivial] +5C225C22 2222 [Trivial] +5C225C25 225C25 [Regular] +5C225C27 2227 [Trivial] +5C225C30 2200 [Regular] +5C225C3F 223F [Trivial] +5C225C40 2240 [Trivial] +5C225C5A 221A [Regular] +5C225C5C 225C [Regular] +5C225C5F 225C5F [Regular] +5C225C61 2261 [Trivial] +5C225C62 2208 [Regular] +5C225C6E 220A [Regular] +5C225C72 220D [Regular] +5C225C74 2209 [Regular] +5C225C7E 227E [Trivial] +5C225C7F 227F [Trivial] +5C225C80 2280 [Trivial][ILSEQ] +5C225C81 2281 [Trivial][ILSEQ] +5C225C9F 229F [Trivial][ILSEQ] +5C225CA0 22A0 [Trivial][ILSEQ] +5C225CA1 22A1 [Trivial][ILSEQ] +5C225CE0 22E0 [Trivial][ILSEQ] +5C225CEF 22EF [Trivial][ILSEQ] +5C225CF9 22F9 [Trivial][ILSEQ] +5C225CFA 22FA [Trivial][ILSEQ] +5C225CFC 22FC [Trivial][ILSEQ] +5C225CFD 22FD [Trivial][ILSEQ] +5C225CFE 22FE [Trivial][ILSEQ] +5C225CFF 22FF [Trivial][ILSEQ] +5C255C00 5C2500 [Regular] +5C255C08 5C2508 [Regular] +5C255C09 5C2509 [Regular] +5C255C0A 5C250A [Regular] +5C255C0D 5C250D [Regular] +5C255C1A 5C251A [Regular] +5C255C22 5C2522 [Regular] +5C255C25 5C255C25 [Preserve][LIKE] +5C255C27 5C2527 [Regular] +5C255C30 5C2500 [Regular] +5C255C3F 5C253F [Regular] +5C255C40 5C2540 [Regular] +5C255C5A 5C251A [Regular] +5C255C5C 5C255C [Regular] +5C255C5F 5C255C5F [Preserve][LIKE] +5C255C61 5C2561 [Regular] +5C255C62 5C2508 [Regular] +5C255C6E 5C250A [Regular] +5C255C72 5C250D [Regular] +5C255C74 5C2509 [Regular] +5C255C7E 5C257E [Regular] +5C255C7F 5C257F [Regular] +5C255C80 5C2580 [Regular][ILSEQ] +5C255C81 5C2581 [Regular][ILSEQ] +5C255C9F 5C259F [Regular][ILSEQ] +5C255CA0 5C25A0 [Regular][ILSEQ] +5C255CA1 5C25A1 [Regular][ILSEQ] +5C255CE0 5C25E0 [Regular][ILSEQ] +5C255CEF 5C25EF [Regular][ILSEQ] +5C255CF9 5C25F9 [Regular][ILSEQ] +5C255CFA 5C25FA [Regular][ILSEQ] +5C255CFC 5C25FC [Regular][ILSEQ] +5C255CFD 5C25FD [Regular][ILSEQ] +5C255CFE 5C25FE [Regular][ILSEQ] +5C255CFF 5C25FF [Regular][ILSEQ] +5C275C00 2700 [Trivial] +5C275C08 2708 [Trivial] +5C275C09 2709 [Trivial] +5C275C0A 270A [Trivial] +5C275C0D 270D [Trivial] +5C275C1A 271A [Trivial] +5C275C22 2722 [Trivial] +5C275C25 275C25 [Regular] +5C275C27 2727 [Trivial] +5C275C30 2700 [Regular] +5C275C3F 273F [Trivial] +5C275C40 2740 [Trivial] +5C275C5A 271A [Regular] +5C275C5C 275C [Regular] +5C275C5F 275C5F [Regular] +5C275C61 2761 [Trivial] +5C275C62 2708 [Regular] +5C275C6E 270A [Regular] +5C275C72 270D [Regular] +5C275C74 2709 [Regular] +5C275C7E 277E [Trivial] +5C275C7F 277F [Trivial] +5C275C80 2780 [Trivial][ILSEQ] +5C275C81 2781 [Trivial][ILSEQ] +5C275C9F 279F [Trivial][ILSEQ] +5C275CA0 27A0 [Trivial][ILSEQ] +5C275CA1 27A1 [Trivial][ILSEQ] +5C275CE0 27E0 [Trivial][ILSEQ] +5C275CEF 27EF [Trivial][ILSEQ] +5C275CF9 27F9 [Trivial][ILSEQ] +5C275CFA 27FA [Trivial][ILSEQ] +5C275CFC 27FC [Trivial][ILSEQ] +5C275CFD 27FD [Trivial][ILSEQ] +5C275CFE 27FE [Trivial][ILSEQ] +5C275CFF 27FF [Trivial][ILSEQ] +5C305C00 0000 [Regular] +5C305C08 0008 [Regular] +5C305C09 0009 [Regular] +5C305C0A 000A [Regular] +5C305C0D 000D [Regular] +5C305C1A 001A [Regular] +5C305C22 0022 [Regular] +5C305C25 005C25 [Regular] +5C305C27 0027 [Regular] +5C305C30 0000 [Regular] +5C305C3F 003F [Regular] +5C305C40 0040 [Regular] +5C305C5A 001A [Regular] +5C305C5C 005C [Regular] +5C305C5F 005C5F [Regular] +5C305C61 0061 [Regular] +5C305C62 0008 [Regular] +5C305C6E 000A [Regular] +5C305C72 000D [Regular] +5C305C74 0009 [Regular] +5C305C7E 007E [Regular] +5C305C7F 007F [Regular] +5C305C80 0080 [Regular][ILSEQ] +5C305C81 0081 [Regular][ILSEQ] +5C305C9F 009F [Regular][ILSEQ] +5C305CA0 00A0 [Regular][ILSEQ] +5C305CA1 00A1 [Regular][ILSEQ] +5C305CE0 00E0 [Regular][ILSEQ] +5C305CEF 00EF [Regular][ILSEQ] +5C305CF9 00F9 [Regular][ILSEQ] +5C305CFA 00FA [Regular][ILSEQ] +5C305CFC 00FC [Regular][ILSEQ] +5C305CFD 00FD [Regular][ILSEQ] +5C305CFE 00FE [Regular][ILSEQ] +5C305CFF 00FF [Regular][ILSEQ] +5C3F5C00 3F00 [Trivial] +5C3F5C08 3F08 [Trivial] +5C3F5C09 3F09 [Trivial] +5C3F5C0A 3F0A [Trivial] +5C3F5C0D 3F0D [Trivial] +5C3F5C1A 3F1A [Trivial] +5C3F5C22 3F22 [Trivial] +5C3F5C25 3F5C25 [Regular] +5C3F5C27 3F27 [Trivial] +5C3F5C30 3F00 [Regular] +5C3F5C3F 3F3F [Trivial] +5C3F5C40 3F40 [Trivial] +5C3F5C5A 3F1A [Regular] +5C3F5C5C 3F5C [Regular] +5C3F5C5F 3F5C5F [Regular] +5C3F5C61 3F61 [Trivial] +5C3F5C62 3F08 [Regular] +5C3F5C6E 3F0A [Regular] +5C3F5C72 3F0D [Regular] +5C3F5C74 3F09 [Regular] +5C3F5C7E 3F7E [Trivial] +5C3F5C7F 3F7F [Trivial] +5C3F5C80 3F80 [Trivial][ILSEQ] +5C3F5C81 3F81 [Trivial][ILSEQ] +5C3F5C9F 3F9F [Trivial][ILSEQ] +5C3F5CA0 3FA0 [Trivial][ILSEQ] +5C3F5CA1 3FA1 [Trivial][ILSEQ] +5C3F5CE0 3FE0 [Trivial][ILSEQ] +5C3F5CEF 3FEF [Trivial][ILSEQ] +5C3F5CF9 3FF9 [Trivial][ILSEQ] +5C3F5CFA 3FFA [Trivial][ILSEQ] +5C3F5CFC 3FFC [Trivial][ILSEQ] +5C3F5CFD 3FFD [Trivial][ILSEQ] +5C3F5CFE 3FFE [Trivial][ILSEQ] +5C3F5CFF 3FFF [Trivial][ILSEQ] +5C405C00 4000 [Trivial] +5C405C08 4008 [Trivial] +5C405C09 4009 [Trivial] +5C405C0A 400A [Trivial] +5C405C0D 400D [Trivial] +5C405C1A 401A [Trivial] +5C405C22 4022 [Trivial] +5C405C25 405C25 [Regular] +5C405C27 4027 [Trivial] +5C405C30 4000 [Regular] +5C405C3F 403F [Trivial] +5C405C40 4040 [Trivial] +5C405C5A 401A [Regular] +5C405C5C 405C [Regular] +5C405C5F 405C5F [Regular] +5C405C61 4061 [Trivial] +5C405C62 4008 [Regular] +5C405C6E 400A [Regular] +5C405C72 400D [Regular] +5C405C74 4009 [Regular] +5C405C7E 407E [Trivial] +5C405C7F 407F [Trivial] +5C405C80 4080 [Trivial][ILSEQ] +5C405C81 4081 [Trivial][ILSEQ] +5C405C9F 409F [Trivial][ILSEQ] +5C405CA0 40A0 [Trivial][ILSEQ] +5C405CA1 40A1 [Trivial][ILSEQ] +5C405CE0 40E0 [Trivial][ILSEQ] +5C405CEF 40EF [Trivial][ILSEQ] +5C405CF9 40F9 [Trivial][ILSEQ] +5C405CFA 40FA [Trivial][ILSEQ] +5C405CFC 40FC [Trivial][ILSEQ] +5C405CFD 40FD [Trivial][ILSEQ] +5C405CFE 40FE [Trivial][ILSEQ] +5C405CFF 40FF [Trivial][ILSEQ] +5C5A5C00 1A00 [Regular] +5C5A5C08 1A08 [Regular] +5C5A5C09 1A09 [Regular] +5C5A5C0A 1A0A [Regular] +5C5A5C0D 1A0D [Regular] +5C5A5C1A 1A1A [Regular] +5C5A5C22 1A22 [Regular] +5C5A5C25 1A5C25 [Regular] +5C5A5C27 1A27 [Regular] +5C5A5C30 1A00 [Regular] +5C5A5C3F 1A3F [Regular] +5C5A5C40 1A40 [Regular] +5C5A5C5A 1A1A [Regular] +5C5A5C5C 1A5C [Regular] +5C5A5C5F 1A5C5F [Regular] +5C5A5C61 1A61 [Regular] +5C5A5C62 1A08 [Regular] +5C5A5C6E 1A0A [Regular] +5C5A5C72 1A0D [Regular] +5C5A5C74 1A09 [Regular] +5C5A5C7E 1A7E [Regular] +5C5A5C7F 1A7F [Regular] +5C5A5C80 1A80 [Regular][ILSEQ] +5C5A5C81 1A81 [Regular][ILSEQ] +5C5A5C9F 1A9F [Regular][ILSEQ] +5C5A5CA0 1AA0 [Regular][ILSEQ] +5C5A5CA1 1AA1 [Regular][ILSEQ] +5C5A5CE0 1AE0 [Regular][ILSEQ] +5C5A5CEF 1AEF [Regular][ILSEQ] +5C5A5CF9 1AF9 [Regular][ILSEQ] +5C5A5CFA 1AFA [Regular][ILSEQ] +5C5A5CFC 1AFC [Regular][ILSEQ] +5C5A5CFD 1AFD [Regular][ILSEQ] +5C5A5CFE 1AFE [Regular][ILSEQ] +5C5A5CFF 1AFF [Regular][ILSEQ] +5C5C5C00 5C00 [Regular] +5C5C5C08 5C08 [Regular] +5C5C5C09 5C09 [Regular] +5C5C5C0A 5C0A [Regular] +5C5C5C0D 5C0D [Regular] +5C5C5C1A 5C1A [Regular] +5C5C5C22 5C22 [Regular] +5C5C5C25 5C5C25 [Regular] +5C5C5C27 5C27 [Regular] +5C5C5C30 5C00 [Regular] +5C5C5C3F 5C3F [Regular] +5C5C5C40 5C40 [Regular] +5C5C5C5A 5C1A [Regular] +5C5C5C5C 5C5C [Regular] +5C5C5C5F 5C5C5F [Regular] +5C5C5C61 5C61 [Regular] +5C5C5C62 5C08 [Regular] +5C5C5C6E 5C0A [Regular] +5C5C5C72 5C0D [Regular] +5C5C5C74 5C09 [Regular] +5C5C5C7E 5C7E [Regular] +5C5C5C7F 5C7F [Regular] +5C5C5C80 5C80 [Regular][ILSEQ] +5C5C5C81 5C81 [Regular][ILSEQ] +5C5C5C9F 5C9F [Regular][ILSEQ] +5C5C5CA0 5CA0 [Regular][ILSEQ] +5C5C5CA1 5CA1 [Regular][ILSEQ] +5C5C5CE0 5CE0 [Regular][ILSEQ] +5C5C5CEF 5CEF [Regular][ILSEQ] +5C5C5CF9 5CF9 [Regular][ILSEQ] +5C5C5CFA 5CFA [Regular][ILSEQ] +5C5C5CFC 5CFC [Regular][ILSEQ] +5C5C5CFD 5CFD [Regular][ILSEQ] +5C5C5CFE 5CFE [Regular][ILSEQ] +5C5C5CFF 5CFF [Regular][ILSEQ] +5C5F5C00 5C5F00 [Regular] +5C5F5C08 5C5F08 [Regular] +5C5F5C09 5C5F09 [Regular] +5C5F5C0A 5C5F0A [Regular] +5C5F5C0D 5C5F0D [Regular] +5C5F5C1A 5C5F1A [Regular] +5C5F5C22 5C5F22 [Regular] +5C5F5C25 5C5F5C25 [Preserve][LIKE] +5C5F5C27 5C5F27 [Regular] +5C5F5C30 5C5F00 [Regular] +5C5F5C3F 5C5F3F [Regular] +5C5F5C40 5C5F40 [Regular] +5C5F5C5A 5C5F1A [Regular] +5C5F5C5C 5C5F5C [Regular] +5C5F5C5F 5C5F5C5F [Preserve][LIKE] +5C5F5C61 5C5F61 [Regular] +5C5F5C62 5C5F08 [Regular] +5C5F5C6E 5C5F0A [Regular] +5C5F5C72 5C5F0D [Regular] +5C5F5C74 5C5F09 [Regular] +5C5F5C7E 5C5F7E [Regular] +5C5F5C7F 5C5F7F [Regular] +5C5F5C80 5C5F80 [Regular][ILSEQ] +5C5F5C81 5C5F81 [Regular][ILSEQ] +5C5F5C9F 5C5F9F [Regular][ILSEQ] +5C5F5CA0 5C5FA0 [Regular][ILSEQ] +5C5F5CA1 5C5FA1 [Regular][ILSEQ] +5C5F5CE0 5C5FE0 [Regular][ILSEQ] +5C5F5CEF 5C5FEF [Regular][ILSEQ] +5C5F5CF9 5C5FF9 [Regular][ILSEQ] +5C5F5CFA 5C5FFA [Regular][ILSEQ] +5C5F5CFC 5C5FFC [Regular][ILSEQ] +5C5F5CFD 5C5FFD [Regular][ILSEQ] +5C5F5CFE 5C5FFE [Regular][ILSEQ] +5C5F5CFF 5C5FFF [Regular][ILSEQ] +5C615C00 6100 [Trivial] +5C615C08 6108 [Trivial] +5C615C09 6109 [Trivial] +5C615C0A 610A [Trivial] +5C615C0D 610D [Trivial] +5C615C1A 611A [Trivial] +5C615C22 6122 [Trivial] +5C615C25 615C25 [Regular] +5C615C27 6127 [Trivial] +5C615C30 6100 [Regular] +5C615C3F 613F [Trivial] +5C615C40 6140 [Trivial] +5C615C5A 611A [Regular] +5C615C5C 615C [Regular] +5C615C5F 615C5F [Regular] +5C615C61 6161 [Trivial] +5C615C62 6108 [Regular] +5C615C6E 610A [Regular] +5C615C72 610D [Regular] +5C615C74 6109 [Regular] +5C615C7E 617E [Trivial] +5C615C7F 617F [Trivial] +5C615C80 6180 [Trivial][ILSEQ] +5C615C81 6181 [Trivial][ILSEQ] +5C615C9F 619F [Trivial][ILSEQ] +5C615CA0 61A0 [Trivial][ILSEQ] +5C615CA1 61A1 [Trivial][ILSEQ] +5C615CE0 61E0 [Trivial][ILSEQ] +5C615CEF 61EF [Trivial][ILSEQ] +5C615CF9 61F9 [Trivial][ILSEQ] +5C615CFA 61FA [Trivial][ILSEQ] +5C615CFC 61FC [Trivial][ILSEQ] +5C615CFD 61FD [Trivial][ILSEQ] +5C615CFE 61FE [Trivial][ILSEQ] +5C615CFF 61FF [Trivial][ILSEQ] +5C625C00 0800 [Regular] +5C625C08 0808 [Regular] +5C625C09 0809 [Regular] +5C625C0A 080A [Regular] +5C625C0D 080D [Regular] +5C625C1A 081A [Regular] +5C625C22 0822 [Regular] +5C625C25 085C25 [Regular] +5C625C27 0827 [Regular] +5C625C30 0800 [Regular] +5C625C3F 083F [Regular] +5C625C40 0840 [Regular] +5C625C5A 081A [Regular] +5C625C5C 085C [Regular] +5C625C5F 085C5F [Regular] +5C625C61 0861 [Regular] +5C625C62 0808 [Regular] +5C625C6E 080A [Regular] +5C625C72 080D [Regular] +5C625C74 0809 [Regular] +5C625C7E 087E [Regular] +5C625C7F 087F [Regular] +5C625C80 0880 [Regular][ILSEQ] +5C625C81 0881 [Regular][ILSEQ] +5C625C9F 089F [Regular][ILSEQ] +5C625CA0 08A0 [Regular][ILSEQ] +5C625CA1 08A1 [Regular][ILSEQ] +5C625CE0 08E0 [Regular][ILSEQ] +5C625CEF 08EF [Regular][ILSEQ] +5C625CF9 08F9 [Regular][ILSEQ] +5C625CFA 08FA [Regular][ILSEQ] +5C625CFC 08FC [Regular][ILSEQ] +5C625CFD 08FD [Regular][ILSEQ] +5C625CFE 08FE [Regular][ILSEQ] +5C625CFF 08FF [Regular][ILSEQ] +5C6E5C00 0A00 [Regular] +5C6E5C08 0A08 [Regular] +5C6E5C09 0A09 [Regular] +5C6E5C0A 0A0A [Regular] +5C6E5C0D 0A0D [Regular] +5C6E5C1A 0A1A [Regular] +5C6E5C22 0A22 [Regular] +5C6E5C25 0A5C25 [Regular] +5C6E5C27 0A27 [Regular] +5C6E5C30 0A00 [Regular] +5C6E5C3F 0A3F [Regular] +5C6E5C40 0A40 [Regular] +5C6E5C5A 0A1A [Regular] +5C6E5C5C 0A5C [Regular] +5C6E5C5F 0A5C5F [Regular] +5C6E5C61 0A61 [Regular] +5C6E5C62 0A08 [Regular] +5C6E5C6E 0A0A [Regular] +5C6E5C72 0A0D [Regular] +5C6E5C74 0A09 [Regular] +5C6E5C7E 0A7E [Regular] +5C6E5C7F 0A7F [Regular] +5C6E5C80 0A80 [Regular][ILSEQ] +5C6E5C81 0A81 [Regular][ILSEQ] +5C6E5C9F 0A9F [Regular][ILSEQ] +5C6E5CA0 0AA0 [Regular][ILSEQ] +5C6E5CA1 0AA1 [Regular][ILSEQ] +5C6E5CE0 0AE0 [Regular][ILSEQ] +5C6E5CEF 0AEF [Regular][ILSEQ] +5C6E5CF9 0AF9 [Regular][ILSEQ] +5C6E5CFA 0AFA [Regular][ILSEQ] +5C6E5CFC 0AFC [Regular][ILSEQ] +5C6E5CFD 0AFD [Regular][ILSEQ] +5C6E5CFE 0AFE [Regular][ILSEQ] +5C6E5CFF 0AFF [Regular][ILSEQ] +5C725C00 0D00 [Regular] +5C725C08 0D08 [Regular] +5C725C09 0D09 [Regular] +5C725C0A 0D0A [Regular] +5C725C0D 0D0D [Regular] +5C725C1A 0D1A [Regular] +5C725C22 0D22 [Regular] +5C725C25 0D5C25 [Regular] +5C725C27 0D27 [Regular] +5C725C30 0D00 [Regular] +5C725C3F 0D3F [Regular] +5C725C40 0D40 [Regular] +5C725C5A 0D1A [Regular] +5C725C5C 0D5C [Regular] +5C725C5F 0D5C5F [Regular] +5C725C61 0D61 [Regular] +5C725C62 0D08 [Regular] +5C725C6E 0D0A [Regular] +5C725C72 0D0D [Regular] +5C725C74 0D09 [Regular] +5C725C7E 0D7E [Regular] +5C725C7F 0D7F [Regular] +5C725C80 0D80 [Regular][ILSEQ] +5C725C81 0D81 [Regular][ILSEQ] +5C725C9F 0D9F [Regular][ILSEQ] +5C725CA0 0DA0 [Regular][ILSEQ] +5C725CA1 0DA1 [Regular][ILSEQ] +5C725CE0 0DE0 [Regular][ILSEQ] +5C725CEF 0DEF [Regular][ILSEQ] +5C725CF9 0DF9 [Regular][ILSEQ] +5C725CFA 0DFA [Regular][ILSEQ] +5C725CFC 0DFC [Regular][ILSEQ] +5C725CFD 0DFD [Regular][ILSEQ] +5C725CFE 0DFE [Regular][ILSEQ] +5C725CFF 0DFF [Regular][ILSEQ] +5C745C00 0900 [Regular] +5C745C08 0908 [Regular] +5C745C09 0909 [Regular] +5C745C0A 090A [Regular] +5C745C0D 090D [Regular] +5C745C1A 091A [Regular] +5C745C22 0922 [Regular] +5C745C25 095C25 [Regular] +5C745C27 0927 [Regular] +5C745C30 0900 [Regular] +5C745C3F 093F [Regular] +5C745C40 0940 [Regular] +5C745C5A 091A [Regular] +5C745C5C 095C [Regular] +5C745C5F 095C5F [Regular] +5C745C61 0961 [Regular] +5C745C62 0908 [Regular] +5C745C6E 090A [Regular] +5C745C72 090D [Regular] +5C745C74 0909 [Regular] +5C745C7E 097E [Regular] +5C745C7F 097F [Regular] +5C745C80 0980 [Regular][ILSEQ] +5C745C81 0981 [Regular][ILSEQ] +5C745C9F 099F [Regular][ILSEQ] +5C745CA0 09A0 [Regular][ILSEQ] +5C745CA1 09A1 [Regular][ILSEQ] +5C745CE0 09E0 [Regular][ILSEQ] +5C745CEF 09EF [Regular][ILSEQ] +5C745CF9 09F9 [Regular][ILSEQ] +5C745CFA 09FA [Regular][ILSEQ] +5C745CFC 09FC [Regular][ILSEQ] +5C745CFD 09FD [Regular][ILSEQ] +5C745CFE 09FE [Regular][ILSEQ] +5C745CFF 09FF [Regular][ILSEQ] +5C7E5C00 7E00 [Trivial] +5C7E5C08 7E08 [Trivial] +5C7E5C09 7E09 [Trivial] +5C7E5C0A 7E0A [Trivial] +5C7E5C0D 7E0D [Trivial] +5C7E5C1A 7E1A [Trivial] +5C7E5C22 7E22 [Trivial] +5C7E5C25 7E5C25 [Regular] +5C7E5C27 7E27 [Trivial] +5C7E5C30 7E00 [Regular] +5C7E5C3F 7E3F [Trivial] +5C7E5C40 7E40 [Trivial] +5C7E5C5A 7E1A [Regular] +5C7E5C5C 7E5C [Regular] +5C7E5C5F 7E5C5F [Regular] +5C7E5C61 7E61 [Trivial] +5C7E5C62 7E08 [Regular] +5C7E5C6E 7E0A [Regular] +5C7E5C72 7E0D [Regular] +5C7E5C74 7E09 [Regular] +5C7E5C7E 7E7E [Trivial] +5C7E5C7F 7E7F [Trivial] +5C7E5C80 7E80 [Trivial][ILSEQ] +5C7E5C81 7E81 [Trivial][ILSEQ] +5C7E5C9F 7E9F [Trivial][ILSEQ] +5C7E5CA0 7EA0 [Trivial][ILSEQ] +5C7E5CA1 7EA1 [Trivial][ILSEQ] +5C7E5CE0 7EE0 [Trivial][ILSEQ] +5C7E5CEF 7EEF [Trivial][ILSEQ] +5C7E5CF9 7EF9 [Trivial][ILSEQ] +5C7E5CFA 7EFA [Trivial][ILSEQ] +5C7E5CFC 7EFC [Trivial][ILSEQ] +5C7E5CFD 7EFD [Trivial][ILSEQ] +5C7E5CFE 7EFE [Trivial][ILSEQ] +5C7E5CFF 7EFF [Trivial][ILSEQ] +5C7F5C00 7F00 [Trivial] +5C7F5C08 7F08 [Trivial] +5C7F5C09 7F09 [Trivial] +5C7F5C0A 7F0A [Trivial] +5C7F5C0D 7F0D [Trivial] +5C7F5C1A 7F1A [Trivial] +5C7F5C22 7F22 [Trivial] +5C7F5C25 7F5C25 [Regular] +5C7F5C27 7F27 [Trivial] +5C7F5C30 7F00 [Regular] +5C7F5C3F 7F3F [Trivial] +5C7F5C40 7F40 [Trivial] +5C7F5C5A 7F1A [Regular] +5C7F5C5C 7F5C [Regular] +5C7F5C5F 7F5C5F [Regular] +5C7F5C61 7F61 [Trivial] +5C7F5C62 7F08 [Regular] +5C7F5C6E 7F0A [Regular] +5C7F5C72 7F0D [Regular] +5C7F5C74 7F09 [Regular] +5C7F5C7E 7F7E [Trivial] +5C7F5C7F 7F7F [Trivial] +5C7F5C80 7F80 [Trivial][ILSEQ] +5C7F5C81 7F81 [Trivial][ILSEQ] +5C7F5C9F 7F9F [Trivial][ILSEQ] +5C7F5CA0 7FA0 [Trivial][ILSEQ] +5C7F5CA1 7FA1 [Trivial][ILSEQ] +5C7F5CE0 7FE0 [Trivial][ILSEQ] +5C7F5CEF 7FEF [Trivial][ILSEQ] +5C7F5CF9 7FF9 [Trivial][ILSEQ] +5C7F5CFA 7FFA [Trivial][ILSEQ] +5C7F5CFC 7FFC [Trivial][ILSEQ] +5C7F5CFD 7FFD [Trivial][ILSEQ] +5C7F5CFE 7FFE [Trivial][ILSEQ] +5C7F5CFF 7FFF [Trivial][ILSEQ] +5C805C00 8000 [Trivial][ILSEQ] +5C805C08 8008 [Trivial][ILSEQ] +5C805C09 8009 [Trivial][ILSEQ] +5C805C0A 800A [Trivial][ILSEQ] +5C805C0D 800D [Trivial][ILSEQ] +5C805C1A 801A [Trivial][ILSEQ] +5C805C22 8022 [Trivial][ILSEQ] +5C805C25 805C25 [Regular][ILSEQ] +5C805C27 8027 [Trivial][ILSEQ] +5C805C30 8000 [Regular][ILSEQ] +5C805C3F 803F [Trivial][ILSEQ] +5C805C40 8040 [Trivial][ILSEQ] +5C805C5A 801A [Regular][ILSEQ] +5C805C5C 805C [Regular][ILSEQ] +5C805C5F 805C5F [Regular][ILSEQ] +5C805C61 8061 [Trivial][ILSEQ] +5C805C62 8008 [Regular][ILSEQ] +5C805C6E 800A [Regular][ILSEQ] +5C805C72 800D [Regular][ILSEQ] +5C805C74 8009 [Regular][ILSEQ] +5C805C7E 807E [Trivial][ILSEQ] +5C805C7F 807F [Trivial][ILSEQ] +5C805C80 8080 [Trivial][ILSEQ] +5C805C81 8081 [Trivial][ILSEQ] +5C805C9F 809F [Trivial][ILSEQ] +5C805CA0 80A0 [Trivial][ILSEQ] +5C805CA1 80A1 [Trivial][ILSEQ] +5C805CE0 80E0 [Trivial][ILSEQ] +5C805CEF 80EF [Trivial][ILSEQ] +5C805CF9 80F9 [Trivial][ILSEQ] +5C805CFA 80FA [Trivial][ILSEQ] +5C805CFC 80FC [Trivial][ILSEQ] +5C805CFD 80FD [Trivial][ILSEQ] +5C805CFE 80FE [Trivial][ILSEQ] +5C805CFF 80FF [Trivial][ILSEQ] +5C815C00 8100 [Trivial][BROKE] +5C815C08 8108 [Trivial][BROKE] +5C815C09 8109 [Trivial][BROKE] +5C815C0A 810A [Trivial][BROKE] +5C815C0D 810D [Trivial][BROKE] +5C815C1A 811A [Trivial][BROKE] +5C815C22 8122 [Trivial][BROKE] +5C815C25 815C25 [Regular] +5C815C27 8127 [Trivial][BROKE] +5C815C30 8100 [Regular][BROKE] +5C815C3F 813F [Trivial][BROKE] +5C815C40 8140 [Trivial][USER] +5C815C5A 811A [Regular][BROKE] +5C815C5C 815C [Regular][USER] +5C815C5F 815C5F [Regular] +5C815C61 8161 [Trivial][USER] +5C815C62 8108 [Regular][BROKE][USER] +5C815C6E 810A [Regular][BROKE] +5C815C72 810D [Regular][BROKE] +5C815C74 8109 [Regular][BROKE] +5C815C7E 817E [Trivial][USER] +5C815C7F 817F [Trivial][BROKE] +5C815C80 8180 [Trivial][FIXED][USER] +5C815C81 8181 [Trivial][FIXED][USER] +5C815C9F 819F [Trivial][FIXED][USER] +5C815CA0 81A0 [Trivial][FIXED][USER] +5C815CA1 81A1 [Trivial][FIXED][USER] +5C815CE0 81E0 [Trivial][FIXED][USER] +5C815CEF 81EF [Trivial][FIXED][USER] +5C815CF9 81F9 [Trivial][FIXED][USER] +5C815CFA 81FA [Trivial][FIXED][USER] +5C815CFC 81FC [Trivial][FIXED][USER] +5C815CFD 81FD [Trivial][FIXED][USER] +5C815CFE 81FE [Trivial][FIXED][USER] +5C815CFF 81FF [Trivial][ILSEQ] +5C9F5C00 9F00 [Trivial][BROKE] +5C9F5C08 9F08 [Trivial][BROKE] +5C9F5C09 9F09 [Trivial][BROKE] +5C9F5C0A 9F0A [Trivial][BROKE] +5C9F5C0D 9F0D [Trivial][BROKE] +5C9F5C1A 9F1A [Trivial][BROKE] +5C9F5C22 9F22 [Trivial][BROKE] +5C9F5C25 9F5C25 [Regular] +5C9F5C27 9F27 [Trivial][BROKE] +5C9F5C30 9F00 [Regular][BROKE] +5C9F5C3F 9F3F [Trivial][BROKE] +5C9F5C40 9F40 [Trivial][USER] +5C9F5C5A 9F1A [Regular][BROKE] +5C9F5C5C 9F5C [Regular][USER] +5C9F5C5F 9F5C5F [Regular] +5C9F5C61 9F61 [Trivial][USER] +5C9F5C62 9F08 [Regular][BROKE][USER] +5C9F5C6E 9F0A [Regular][BROKE] +5C9F5C72 9F0D [Regular][BROKE] +5C9F5C74 9F09 [Regular][BROKE] +5C9F5C7E 9F7E [Trivial][USER] +5C9F5C7F 9F7F [Trivial][BROKE] +5C9F5C80 9F80 [Trivial][FIXED][USER] +5C9F5C81 9F81 [Trivial][FIXED][USER] +5C9F5C9F 9F9F [Trivial][FIXED][USER] +5C9F5CA0 9FA0 [Trivial][FIXED][USER] +5C9F5CA1 9FA1 [Trivial][FIXED][USER] +5C9F5CE0 9FE0 [Trivial][FIXED][USER] +5C9F5CEF 9FEF [Trivial][FIXED][USER] +5C9F5CF9 9FF9 [Trivial][FIXED][USER] +5C9F5CFA 9FFA [Trivial][FIXED][USER] +5C9F5CFC 9FFC [Trivial][FIXED][USER] +5C9F5CFD 9FFD [Trivial][FIXED][USER] +5C9F5CFE 9FFE [Trivial][FIXED][USER] +5C9F5CFF 9FFF [Trivial][ILSEQ] +5CA05C00 A000 [Trivial][BROKE] +5CA05C08 A008 [Trivial][BROKE] +5CA05C09 A009 [Trivial][BROKE] +5CA05C0A A00A [Trivial][BROKE] +5CA05C0D A00D [Trivial][BROKE] +5CA05C1A A01A [Trivial][BROKE] +5CA05C22 A022 [Trivial][BROKE] +5CA05C25 A05C25 [Regular] +5CA05C27 A027 [Trivial][BROKE] +5CA05C30 A000 [Regular][BROKE] +5CA05C3F A03F [Trivial][BROKE] +5CA05C40 A040 [Trivial][USER] +5CA05C5A A01A [Regular][BROKE] +5CA05C5C A05C [Regular][USER] +5CA05C5F A05C5F [Regular] +5CA05C61 A061 [Trivial][USER] +5CA05C62 A008 [Regular][BROKE][USER] +5CA05C6E A00A [Regular][BROKE] +5CA05C72 A00D [Regular][BROKE] +5CA05C74 A009 [Regular][BROKE] +5CA05C7E A07E [Trivial][USER] +5CA05C7F A07F [Trivial][BROKE] +5CA05C80 A080 [Trivial][FIXED][USER] +5CA05C81 A081 [Trivial][FIXED][USER] +5CA05C9F A09F [Trivial][FIXED][USER] +5CA05CA0 A0A0 [Trivial][FIXED][USER] +5CA05CA1 A0A1 [Trivial][FIXED][USER] +5CA05CE0 A0E0 [Trivial][FIXED][USER] +5CA05CEF A0EF [Trivial][FIXED][USER] +5CA05CF9 A0F9 [Trivial][FIXED][USER] +5CA05CFA A0FA [Trivial][FIXED][USER] +5CA05CFC A0FC [Trivial][FIXED][USER] +5CA05CFD A0FD [Trivial][FIXED][USER] +5CA05CFE A0FE [Trivial][FIXED][USER] +5CA05CFF A0FF [Trivial][ILSEQ] +5CA15C00 A100 [Trivial][BROKE] +5CA15C08 A108 [Trivial][BROKE] +5CA15C09 A109 [Trivial][BROKE] +5CA15C0A A10A [Trivial][BROKE] +5CA15C0D A10D [Trivial][BROKE] +5CA15C1A A11A [Trivial][BROKE] +5CA15C22 A122 [Trivial][BROKE] +5CA15C25 A15C25 [Regular] +5CA15C27 A127 [Trivial][BROKE] +5CA15C30 A100 [Regular][BROKE] +5CA15C3F A13F [Trivial][BROKE] +5CA15C40 A140 [Trivial][USER] +5CA15C5A A11A [Regular][BROKE] +5CA15C5C A15C [Regular][USER] +5CA15C5F A15C5F [Regular] +5CA15C61 A161 [Trivial][USER] +5CA15C62 A108 [Regular][BROKE][USER] +5CA15C6E A10A [Regular][BROKE] +5CA15C72 A10D [Regular][BROKE] +5CA15C74 A109 [Regular][BROKE] +5CA15C7E A17E [Trivial][USER] +5CA15C7F A17F [Trivial][BROKE] +5CA15C80 A180 [Trivial][FIXED][USER] +5CA15C81 A181 [Trivial][FIXED][USER] +5CA15C9F A19F [Trivial][FIXED][USER] +5CA15CA0 A1A0 [Trivial][FIXED][USER] +5CA15CA1 A1A1 [Trivial][FIXED][USER] +5CA15CE0 A1E0 [Trivial][FIXED][USER] +5CA15CEF A1EF [Trivial][FIXED][USER] +5CA15CF9 A1F9 [Trivial][FIXED][USER] +5CA15CFA A1FA [Trivial][FIXED][USER] +5CA15CFC A1FC [Trivial][FIXED][USER] +5CA15CFD A1FD [Trivial][FIXED][USER] +5CA15CFE A1FE [Trivial][FIXED][USER] +5CA15CFF A1FF [Trivial][ILSEQ] +5CE05C00 E000 [Trivial][BROKE] +5CE05C08 E008 [Trivial][BROKE] +5CE05C09 E009 [Trivial][BROKE] +5CE05C0A E00A [Trivial][BROKE] +5CE05C0D E00D [Trivial][BROKE] +5CE05C1A E01A [Trivial][BROKE] +5CE05C22 E022 [Trivial][BROKE] +5CE05C25 E05C25 [Regular] +5CE05C27 E027 [Trivial][BROKE] +5CE05C30 E000 [Regular][BROKE] +5CE05C3F E03F [Trivial][BROKE] +5CE05C40 E040 [Trivial][USER] +5CE05C5A E01A [Regular][BROKE] +5CE05C5C E05C [Regular][USER] +5CE05C5F E05C5F [Regular] +5CE05C61 E061 [Trivial][USER] +5CE05C62 E008 [Regular][BROKE][USER] +5CE05C6E E00A [Regular][BROKE] +5CE05C72 E00D [Regular][BROKE] +5CE05C74 E009 [Regular][BROKE] +5CE05C7E E07E [Trivial][USER] +5CE05C7F E07F [Trivial][BROKE] +5CE05C80 E080 [Trivial][FIXED][USER] +5CE05C81 E081 [Trivial][FIXED][USER] +5CE05C9F E09F [Trivial][FIXED][USER] +5CE05CA0 E0A0 [Trivial][FIXED][USER] +5CE05CA1 E0A1 [Trivial][FIXED][USER] +5CE05CE0 E0E0 [Trivial][FIXED][USER] +5CE05CEF E0EF [Trivial][FIXED][USER] +5CE05CF9 E0F9 [Trivial][FIXED][USER] +5CE05CFA E0FA [Trivial][FIXED][USER] +5CE05CFC E0FC [Trivial][FIXED][USER] +5CE05CFD E0FD [Trivial][FIXED][USER] +5CE05CFE E0FE [Trivial][FIXED][USER] +5CE05CFF E0FF [Trivial][ILSEQ] +5CEF5C00 EF00 [Trivial][BROKE] +5CEF5C08 EF08 [Trivial][BROKE] +5CEF5C09 EF09 [Trivial][BROKE] +5CEF5C0A EF0A [Trivial][BROKE] +5CEF5C0D EF0D [Trivial][BROKE] +5CEF5C1A EF1A [Trivial][BROKE] +5CEF5C22 EF22 [Trivial][BROKE] +5CEF5C25 EF5C25 [Regular] +5CEF5C27 EF27 [Trivial][BROKE] +5CEF5C30 EF00 [Regular][BROKE] +5CEF5C3F EF3F [Trivial][BROKE] +5CEF5C40 EF40 [Trivial][USER] +5CEF5C5A EF1A [Regular][BROKE] +5CEF5C5C EF5C [Regular][USER] +5CEF5C5F EF5C5F [Regular] +5CEF5C61 EF61 [Trivial][USER] +5CEF5C62 EF08 [Regular][BROKE][USER] +5CEF5C6E EF0A [Regular][BROKE] +5CEF5C72 EF0D [Regular][BROKE] +5CEF5C74 EF09 [Regular][BROKE] +5CEF5C7E EF7E [Trivial][USER] +5CEF5C7F EF7F [Trivial][BROKE] +5CEF5C80 EF80 [Trivial][FIXED][USER] +5CEF5C81 EF81 [Trivial][FIXED][USER] +5CEF5C9F EF9F [Trivial][FIXED][USER] +5CEF5CA0 EFA0 [Trivial][FIXED][USER] +5CEF5CA1 EFA1 [Trivial][FIXED][USER] +5CEF5CE0 EFE0 [Trivial][FIXED][USER] +5CEF5CEF EFEF [Trivial][FIXED][USER] +5CEF5CF9 EFF9 [Trivial][FIXED][USER] +5CEF5CFA EFFA [Trivial][FIXED][USER] +5CEF5CFC EFFC [Trivial][FIXED][USER] +5CEF5CFD EFFD [Trivial][FIXED][USER] +5CEF5CFE EFFE [Trivial][FIXED][USER] +5CEF5CFF EFFF [Trivial][ILSEQ] +5CF95C00 F900 [Trivial][BROKE] +5CF95C08 F908 [Trivial][BROKE] +5CF95C09 F909 [Trivial][BROKE] +5CF95C0A F90A [Trivial][BROKE] +5CF95C0D F90D [Trivial][BROKE] +5CF95C1A F91A [Trivial][BROKE] +5CF95C22 F922 [Trivial][BROKE] +5CF95C25 F95C25 [Regular] +5CF95C27 F927 [Trivial][BROKE] +5CF95C30 F900 [Regular][BROKE] +5CF95C3F F93F [Trivial][BROKE] +5CF95C40 F940 [Trivial][USER] +5CF95C5A F91A [Regular][BROKE] +5CF95C5C F95C [Regular][USER] +5CF95C5F F95C5F [Regular] +5CF95C61 F961 [Trivial][USER] +5CF95C62 F908 [Regular][BROKE][USER] +5CF95C6E F90A [Regular][BROKE] +5CF95C72 F90D [Regular][BROKE] +5CF95C74 F909 [Regular][BROKE] +5CF95C7E F97E [Trivial][USER] +5CF95C7F F97F [Trivial][BROKE] +5CF95C80 F980 [Trivial][FIXED][USER] +5CF95C81 F981 [Trivial][FIXED][USER] +5CF95C9F F99F [Trivial][FIXED][USER] +5CF95CA0 F9A0 [Trivial][FIXED][USER] +5CF95CA1 F9A1 [Trivial][FIXED][USER] +5CF95CE0 F9E0 [Trivial][FIXED][USER] +5CF95CEF F9EF [Trivial][FIXED][USER] +5CF95CF9 F9F9 [Trivial][FIXED][USER] +5CF95CFA F9FA [Trivial][FIXED][USER] +5CF95CFC F9FC [Trivial][FIXED][USER] +5CF95CFD F9FD [Trivial][FIXED][USER] +5CF95CFE F9FE [Trivial][FIXED][USER] +5CF95CFF F9FF [Trivial][ILSEQ] +5CFA5C00 FA00 [Trivial][BROKE] +5CFA5C08 FA08 [Trivial][BROKE] +5CFA5C09 FA09 [Trivial][BROKE] +5CFA5C0A FA0A [Trivial][BROKE] +5CFA5C0D FA0D [Trivial][BROKE] +5CFA5C1A FA1A [Trivial][BROKE] +5CFA5C22 FA22 [Trivial][BROKE] +5CFA5C25 FA5C25 [Regular] +5CFA5C27 FA27 [Trivial][BROKE] +5CFA5C30 FA00 [Regular][BROKE] +5CFA5C3F FA3F [Trivial][BROKE] +5CFA5C40 FA40 [Trivial][USER] +5CFA5C5A FA1A [Regular][BROKE] +5CFA5C5C FA5C [Regular][USER] +5CFA5C5F FA5C5F [Regular] +5CFA5C61 FA61 [Trivial][USER] +5CFA5C62 FA08 [Regular][BROKE][USER] +5CFA5C6E FA0A [Regular][BROKE] +5CFA5C72 FA0D [Regular][BROKE] +5CFA5C74 FA09 [Regular][BROKE] +5CFA5C7E FA7E [Trivial][USER] +5CFA5C7F FA7F [Trivial][BROKE] +5CFA5C80 FA80 [Trivial][FIXED][USER] +5CFA5C81 FA81 [Trivial][FIXED][USER] +5CFA5C9F FA9F [Trivial][FIXED][USER] +5CFA5CA0 FAA0 [Trivial][FIXED][USER] +5CFA5CA1 FAA1 [Trivial][FIXED][USER] +5CFA5CE0 FAE0 [Trivial][FIXED][USER] +5CFA5CEF FAEF [Trivial][FIXED][USER] +5CFA5CF9 FAF9 [Trivial][FIXED][USER] +5CFA5CFA FAFA [Trivial][FIXED][USER] +5CFA5CFC FAFC [Trivial][FIXED][USER] +5CFA5CFD FAFD [Trivial][FIXED][USER] +5CFA5CFE FAFE [Trivial][FIXED][USER] +5CFA5CFF FAFF [Trivial][ILSEQ] +5CFC5C00 FC00 [Trivial][BROKE] +5CFC5C08 FC08 [Trivial][BROKE] +5CFC5C09 FC09 [Trivial][BROKE] +5CFC5C0A FC0A [Trivial][BROKE] +5CFC5C0D FC0D [Trivial][BROKE] +5CFC5C1A FC1A [Trivial][BROKE] +5CFC5C22 FC22 [Trivial][BROKE] +5CFC5C25 FC5C25 [Regular] +5CFC5C27 FC27 [Trivial][BROKE] +5CFC5C30 FC00 [Regular][BROKE] +5CFC5C3F FC3F [Trivial][BROKE] +5CFC5C40 FC40 [Trivial][USER] +5CFC5C5A FC1A [Regular][BROKE] +5CFC5C5C FC5C [Regular][USER] +5CFC5C5F FC5C5F [Regular] +5CFC5C61 FC61 [Trivial][USER] +5CFC5C62 FC08 [Regular][BROKE][USER] +5CFC5C6E FC0A [Regular][BROKE] +5CFC5C72 FC0D [Regular][BROKE] +5CFC5C74 FC09 [Regular][BROKE] +5CFC5C7E FC7E [Trivial][USER] +5CFC5C7F FC7F [Trivial][BROKE] +5CFC5C80 FC80 [Trivial][FIXED][USER] +5CFC5C81 FC81 [Trivial][FIXED][USER] +5CFC5C9F FC9F [Trivial][FIXED][USER] +5CFC5CA0 FCA0 [Trivial][FIXED][USER] +5CFC5CA1 FCA1 [Trivial][FIXED][USER] +5CFC5CE0 FCE0 [Trivial][FIXED][USER] +5CFC5CEF FCEF [Trivial][FIXED][USER] +5CFC5CF9 FCF9 [Trivial][FIXED][USER] +5CFC5CFA FCFA [Trivial][FIXED][USER] +5CFC5CFC FCFC [Trivial][FIXED][USER] +5CFC5CFD FCFD [Trivial][FIXED][USER] +5CFC5CFE FCFE [Trivial][FIXED][USER] +5CFC5CFF FCFF [Trivial][ILSEQ] +5CFD5C00 FD00 [Trivial][BROKE] +5CFD5C08 FD08 [Trivial][BROKE] +5CFD5C09 FD09 [Trivial][BROKE] +5CFD5C0A FD0A [Trivial][BROKE] +5CFD5C0D FD0D [Trivial][BROKE] +5CFD5C1A FD1A [Trivial][BROKE] +5CFD5C22 FD22 [Trivial][BROKE] +5CFD5C25 FD5C25 [Regular] +5CFD5C27 FD27 [Trivial][BROKE] +5CFD5C30 FD00 [Regular][BROKE] +5CFD5C3F FD3F [Trivial][BROKE] +5CFD5C40 FD40 [Trivial][USER] +5CFD5C5A FD1A [Regular][BROKE] +5CFD5C5C FD5C [Regular][USER] +5CFD5C5F FD5C5F [Regular] +5CFD5C61 FD61 [Trivial][USER] +5CFD5C62 FD08 [Regular][BROKE][USER] +5CFD5C6E FD0A [Regular][BROKE] +5CFD5C72 FD0D [Regular][BROKE] +5CFD5C74 FD09 [Regular][BROKE] +5CFD5C7E FD7E [Trivial][USER] +5CFD5C7F FD7F [Trivial][BROKE] +5CFD5C80 FD80 [Trivial][FIXED][USER] +5CFD5C81 FD81 [Trivial][FIXED][USER] +5CFD5C9F FD9F [Trivial][FIXED][USER] +5CFD5CA0 FDA0 [Trivial][FIXED][USER] +5CFD5CA1 FDA1 [Trivial][FIXED][USER] +5CFD5CE0 FDE0 [Trivial][FIXED][USER] +5CFD5CEF FDEF [Trivial][FIXED][USER] +5CFD5CF9 FDF9 [Trivial][FIXED][USER] +5CFD5CFA FDFA [Trivial][FIXED][USER] +5CFD5CFC FDFC [Trivial][FIXED][USER] +5CFD5CFD FDFD [Trivial][FIXED][USER] +5CFD5CFE FDFE [Trivial][FIXED][USER] +5CFD5CFF FDFF [Trivial][ILSEQ] +5CFE5C00 FE00 [Trivial][BROKE] +5CFE5C08 FE08 [Trivial][BROKE] +5CFE5C09 FE09 [Trivial][BROKE] +5CFE5C0A FE0A [Trivial][BROKE] +5CFE5C0D FE0D [Trivial][BROKE] +5CFE5C1A FE1A [Trivial][BROKE] +5CFE5C22 FE22 [Trivial][BROKE] +5CFE5C25 FE5C25 [Regular] +5CFE5C27 FE27 [Trivial][BROKE] +5CFE5C30 FE00 [Regular][BROKE] +5CFE5C3F FE3F [Trivial][BROKE] +5CFE5C40 FE40 [Trivial][USER] +5CFE5C5A FE1A [Regular][BROKE] +5CFE5C5C FE5C [Regular][USER] +5CFE5C5F FE5C5F [Regular] +5CFE5C61 FE61 [Trivial][USER] +5CFE5C62 FE08 [Regular][BROKE][USER] +5CFE5C6E FE0A [Regular][BROKE] +5CFE5C72 FE0D [Regular][BROKE] +5CFE5C74 FE09 [Regular][BROKE] +5CFE5C7E FE7E [Trivial][USER] +5CFE5C7F FE7F [Trivial][BROKE] +5CFE5C80 FE80 [Trivial][FIXED][USER] +5CFE5C81 FE81 [Trivial][FIXED][USER] +5CFE5C9F FE9F [Trivial][FIXED][USER] +5CFE5CA0 FEA0 [Trivial][FIXED][USER] +5CFE5CA1 FEA1 [Trivial][FIXED][USER] +5CFE5CE0 FEE0 [Trivial][FIXED][USER] +5CFE5CEF FEEF [Trivial][FIXED][USER] +5CFE5CF9 FEF9 [Trivial][FIXED][USER] +5CFE5CFA FEFA [Trivial][FIXED][USER] +5CFE5CFC FEFC [Trivial][FIXED][USER] +5CFE5CFD FEFD [Trivial][FIXED][USER] +5CFE5CFE FEFE [Trivial][FIXED][USER] +5CFE5CFF FEFF [Trivial][ILSEQ] +5CFF5C00 FF00 [Trivial][ILSEQ] +5CFF5C08 FF08 [Trivial][ILSEQ] +5CFF5C09 FF09 [Trivial][ILSEQ] +5CFF5C0A FF0A [Trivial][ILSEQ] +5CFF5C0D FF0D [Trivial][ILSEQ] +5CFF5C1A FF1A [Trivial][ILSEQ] +5CFF5C22 FF22 [Trivial][ILSEQ] +5CFF5C25 FF5C25 [Regular][ILSEQ] +5CFF5C27 FF27 [Trivial][ILSEQ] +5CFF5C30 FF00 [Regular][ILSEQ] +5CFF5C3F FF3F [Trivial][ILSEQ] +5CFF5C40 FF40 [Trivial][ILSEQ] +5CFF5C5A FF1A [Regular][ILSEQ] +5CFF5C5C FF5C [Regular][ILSEQ] +5CFF5C5F FF5C5F [Regular][ILSEQ] +5CFF5C61 FF61 [Trivial][ILSEQ] +5CFF5C62 FF08 [Regular][ILSEQ] +5CFF5C6E FF0A [Regular][ILSEQ] +5CFF5C72 FF0D [Regular][ILSEQ] +5CFF5C74 FF09 [Regular][ILSEQ] +5CFF5C7E FF7E [Trivial][ILSEQ] +5CFF5C7F FF7F [Trivial][ILSEQ] +5CFF5C80 FF80 [Trivial][ILSEQ] +5CFF5C81 FF81 [Trivial][ILSEQ] +5CFF5C9F FF9F [Trivial][ILSEQ] +5CFF5CA0 FFA0 [Trivial][ILSEQ] +5CFF5CA1 FFA1 [Trivial][ILSEQ] +5CFF5CE0 FFE0 [Trivial][ILSEQ] +5CFF5CEF FFEF [Trivial][ILSEQ] +5CFF5CF9 FFF9 [Trivial][ILSEQ] +5CFF5CFA FFFA [Trivial][ILSEQ] +5CFF5CFC FFFC [Trivial][ILSEQ] +5CFF5CFD FFFD [Trivial][ILSEQ] +5CFF5CFE FFFE [Trivial][ILSEQ] +5CFF5CFF FFFF [Trivial][ILSEQ] +DROP TABLE t1; +DROP PROCEDURE p1; +DROP PROCEDURE p2; +DROP FUNCTION unescape; +DROP FUNCTION unescape_type; +DROP FUNCTION wellformedness; +DROP FUNCTION mysql_real_escape_string_generated; +DROP FUNCTION iswellformed; +DROP TABLE allbytes; +# End of ctype_backslash.inc +SET NAMES gbk; +# Start of ctype_E05C.inc +SELECT HEX('à\'),HEX('à\t'); +HEX('à\') HEX('à\t') +E05C E05C74 +SELECT HEX('\\à\'),HEX('\\à\t'),HEX('\\à\t\t'); +HEX('\\à\') HEX('\\à\t') HEX('\\à\t\t') +5CE05C 5CE05C74 5CE05C7409 +SELECT HEX('''à\'),HEX('à\'''); +HEX('''à\') HEX('à\''') +27E05C E05C27 +SELECT HEX('\\''à\'),HEX('à\''\\'); +HEX('\\''à\') HEX('à\''\\') +5C27E05C E05C275C +SELECT HEX(BINARY('à\')),HEX(BINARY('à\t')); +HEX(BINARY('à\')) HEX(BINARY('à\t')) +E05C E05C74 +SELECT HEX(BINARY('\\à\')),HEX(BINARY('\\à\t')),HEX(BINARY('\\à\t\t')); +HEX(BINARY('\\à\')) HEX(BINARY('\\à\t')) HEX(BINARY('\\à\t\t')) +5CE05C 5CE05C74 5CE05C7409 +SELECT HEX(BINARY('''à\')),HEX(BINARY('à\''')); +HEX(BINARY('''à\')) HEX(BINARY('à\''')) +27E05C E05C27 +SELECT HEX(BINARY('\\''à\')),HEX(BINARY('à\''\\')); +HEX(BINARY('\\''à\')) HEX(BINARY('à\''\\')) +5C27E05C E05C275C +SELECT HEX(_BINARY'à\'),HEX(_BINARY'à\t'); +HEX(_BINARY'à\') HEX(_BINARY'à\t') +E05C E05C74 +SELECT HEX(_BINARY'\\à\'),HEX(_BINARY'\\à\t'),HEX(_BINARY'\\à\t\t'); +HEX(_BINARY'\\à\') HEX(_BINARY'\\à\t') HEX(_BINARY'\\à\t\t') +5CE05C 5CE05C74 5CE05C7409 +SELECT HEX(_BINARY'''à\'),HEX(_BINARY'à\'''); +HEX(_BINARY'''à\') HEX(_BINARY'à\''') +27E05C E05C27 +SELECT HEX(_BINARY'\\''à\'),HEX(_BINARY'à\''\\'); +HEX(_BINARY'\\''à\') HEX(_BINARY'à\''\\') +5C27E05C E05C275C +CREATE TABLE t1 AS SELECT REPEAT(' ',10) AS a LIMIT 0; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` varchar(10) CHARACTER SET gbk NOT NULL DEFAULT '' +) ENGINE=MyISAM DEFAULT CHARSET=latin1 +INSERT INTO t1 VALUES ('à\'),('à\t'); +INSERT INTO t1 VALUES ('\\à\'),('\\à\t'),('\\à\t\t'); +INSERT INTO t1 VALUES ('''à\'),('à\'''); +INSERT INTO t1 VALUES ('\\''à\'),('à\''\\'); +SELECT a, HEX(a) FROM t1; +a HEX(a) +à\ E05C +à\t E05C74 +\à\ 5CE05C +\à\t 5CE05C74 +\à\t 5CE05C7409 +'à\ 27E05C +à\' E05C27 +\'à\ 5C27E05C +à\'\ E05C275C +DROP TABLE t1; +CREATE TABLE t1 (a BLOB); +INSERT INTO t1 VALUES ('à\'),('à\t'); +INSERT INTO t1 VALUES ('\\à\'),('\\à\t'),('\\à\t\t'); +INSERT INTO t1 VALUES ('''à\'),('à\'''); +INSERT INTO t1 VALUES ('\\''à\'),('à\''\\'); +SELECT a, HEX(a) FROM t1; +a HEX(a) +à\ E05C +à\t E05C74 +\à\ 5CE05C +\à\t 5CE05C74 +\à\t 5CE05C7409 +'à\ 27E05C +à\' E05C27 +\'à\ 5C27E05C +à\'\ E05C275C +DROP TABLE t1; +CREATE TABLE t1 AS SELECT REPEAT(' ', 10) AS a LIMIT 0; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` varchar(10) CHARACTER SET gbk NOT NULL DEFAULT '' +) ENGINE=MyISAM DEFAULT CHARSET=latin1 +INSERT INTO t1 VALUES (BINARY('à\')),(BINARY('à\t')); +INSERT INTO t1 VALUES (BINARY('\\à\')),(BINARY('\\à\t')),(BINARY('\\à\t\t')); +INSERT INTO t1 VALUES (BINARY('''à\')),(BINARY('à\''')); +INSERT INTO t1 VALUES (BINARY('\\''à\')),(BINARY('à\''\\')); +SELECT a, HEX(a) FROM t1; +a HEX(a) +à\ E05C +à\t E05C74 +\à\ 5CE05C +\à\t 5CE05C74 +\à\t 5CE05C7409 +'à\ 27E05C +à\' E05C27 +\'à\ 5C27E05C +à\'\ E05C275C +DROP TABLE t1; +CREATE TABLE t1 (a BLOB); +INSERT INTO t1 VALUES (BINARY('à\')),(BINARY('à\t')); +INSERT INTO t1 VALUES (BINARY('\\à\')),(BINARY('\\à\t')),(BINARY('\\à\t\t')); +INSERT INTO t1 VALUES (BINARY('''à\')),(BINARY('à\''')); +INSERT INTO t1 VALUES (BINARY('\\''à\')),(BINARY('à\''\\')); +SELECT a, HEX(a) FROM t1; +a HEX(a) +à\ E05C +à\t E05C74 +\à\ 5CE05C +\à\t 5CE05C74 +\à\t 5CE05C7409 +'à\ 27E05C +à\' E05C27 +\'à\ 5C27E05C +à\'\ E05C275C +DROP TABLE t1; +CREATE TABLE t1 AS SELECT REPEAT(' ', 10) AS a LIMIT 0; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` varchar(10) CHARACTER SET gbk NOT NULL DEFAULT '' +) ENGINE=MyISAM DEFAULT CHARSET=latin1 +INSERT INTO t1 VALUES (_BINARY'à\'),(_BINARY'à\t'); +INSERT INTO t1 VALUES (_BINARY'\\à\'),(_BINARY'\\à\t'),(_BINARY'\\à\t\t'); +INSERT INTO t1 VALUES (_BINARY'''à\'),(_BINARY'à\'''); +INSERT INTO t1 VALUES (_BINARY'\\''à\'),(_BINARY'à\''\\'); +SELECT a, HEX(a) FROM t1; +a HEX(a) +à\ E05C +à\t E05C74 +\à\ 5CE05C +\à\t 5CE05C74 +\à\t 5CE05C7409 +'à\ 27E05C +à\' E05C27 +\'à\ 5C27E05C +à\'\ E05C275C +DROP TABLE t1; +CREATE TABLE t1 (a BLOB); +INSERT INTO t1 VALUES (_BINARY'à\'),(_BINARY'à\t'); +INSERT INTO t1 VALUES (_BINARY'\\à\'),(_BINARY'\\à\t'),(_BINARY'\\à\t\t'); +INSERT INTO t1 VALUES (_BINARY'''à\'),(_BINARY'à\'''); +INSERT INTO t1 VALUES (_BINARY'\\''à\'),(_BINARY'à\''\\'); +SELECT a, HEX(a) FROM t1; +a HEX(a) +à\ E05C +à\t E05C74 +\à\ 5CE05C +\à\t 5CE05C74 +\à\t 5CE05C7409 +'à\ 27E05C +à\' E05C27 +\'à\ 5C27E05C +à\'\ E05C275C +DROP TABLE t1; +SET character_set_client=binary, character_set_results=binary; +SELECT @@character_set_client, @@character_set_connection, @@character_set_results; +@@character_set_client @@character_set_connection @@character_set_results +binary gbk binary +SELECT HEX('à\['), HEX('\à\['); +HEX('à\[') HEX('\à\[') +E05B E05B +CREATE TABLE t1 AS SELECT REPEAT(' ', 10) AS a LIMIT 0; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` varchar(10) CHARACTER SET gbk NOT NULL DEFAULT '' +) ENGINE=MyISAM DEFAULT CHARSET=latin1 +INSERT INTO t1 VALUES ('à\['),('\à\['); +SELECT HEX(a) FROM t1; +HEX(a) +E05B +E05B +DROP TABLE t1; +SET character_set_client=@@character_set_connection, character_set_results=@@character_set_connection; +SET character_set_connection=binary; +SELECT @@character_set_client, @@character_set_connection, @@character_set_results; +@@character_set_client @@character_set_connection @@character_set_results +gbk binary gbk +SELECT HEX('à\['), HEX('\à\['); +HEX('à\[') HEX('\à\[') +E05C5B E05B +CREATE TABLE t1 AS SELECT REPEAT(' ', 10) AS a LIMIT 0; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` varbinary(10) NOT NULL DEFAULT '' +) ENGINE=MyISAM DEFAULT CHARSET=latin1 +INSERT INTO t1 VALUES ('à\['),('\à\['); +SELECT HEX(a) FROM t1; +HEX(a) +E05C5B +E05B +DROP TABLE t1; +# Start of ctype_E05C.inc +# +# End of 10.0 tests +# diff --git a/mysql-test/r/ctype_latin1.result b/mysql-test/r/ctype_latin1.result index 8beb60c368b..fac9824401f 100644 --- a/mysql-test/r/ctype_latin1.result +++ b/mysql-test/r/ctype_latin1.result @@ -4592,3 +4592,3128 @@ set names latin1; # # End of 5.6 tests # +# +# Start of 10.0 tests +# +# Start of ctype_unescape.inc +SET @query=_binary'SELECT CHARSET(\'test\'),@@character_set_client,@@character_set_connection'; +PREPARE stmt FROM @query; +EXECUTE stmt; +CHARSET('test') @@character_set_client @@character_set_connection +latin1 latin1 latin1 +DEALLOCATE PREPARE stmt; +CREATE TABLE allbytes (a VARBINARY(10)); +# Using selected bytes combinations +CREATE TABLE halfs (a INT); +INSERT INTO halfs VALUES (0x00),(0x01),(0x02),(0x03),(0x04),(0x05),(0x06),(0x07); +INSERT INTO halfs VALUES (0x08),(0x09),(0x0A),(0x0B),(0x0C),(0x0D),(0x0E),(0x0F); +CREATE TEMPORARY TABLE bytes (a BINARY(1), KEY(a)) ENGINE=MyISAM; +INSERT INTO bytes SELECT CHAR((t1.a << 4) | t2.a USING BINARY) FROM halfs t1, halfs t2; +DROP TABLE halfs; +CREATE TABLE selected_bytes (a VARBINARY(10)); +INSERT INTO selected_bytes (a) VALUES ('\0'),('\b'),('\t'),('\r'),('\n'),('\Z'); +INSERT INTO selected_bytes (a) VALUES ('0'),('b'),('t'),('r'),('n'),('Z'); +INSERT INTO selected_bytes (a) VALUES ('\\'),('_'),('%'),(0x22),(0x27); +INSERT INTO selected_bytes (a) VALUES ('a'); +INSERT INTO selected_bytes (a) VALUES +(0x3F), # 7bit +(0x40), # 7bit mbtail +(0x7E), # 7bit mbtail nonascii-8bit +(0x7F), # 7bit nonascii-8bit +(0x80), # mbtail bad-mb +(0x81), # mbhead mbtail +(0x9F), # mbhead mbtail bad-mb +(0xA0), # mbhead mbtail bad-mb +(0xA1), # mbhead mbtail nonascii-8bit +(0xE0), # mbhead mbtai +(0xEF), # mbhead mbtail +(0xF9), # mbhead mbtail +(0xFA), # mbhead mbtail bad-mb +(0xFC), # mbhead mbtail bad-mb +(0xFD), # mbhead mbtail bad-mb +(0xFE), # mbhead mbtial bad-mb +(0xFF); +INSERT INTO allbytes (a) SELECT a FROM bytes; +INSERT INTO allbytes (a) SELECT CONCAT(t1.a,t2.a) FROM selected_bytes t1,selected_bytes t2; +INSERT INTO allbytes (a) SELECT CONCAT(0x5C,t1.a,t2.a) FROM selected_bytes t1,selected_bytes t2; +INSERT INTO allbytes (a) SELECT CONCAT(0x5C,t1.a,0x5C,t2.a) FROM selected_bytes t1,selected_bytes t2; +DROP TABLE selected_bytes; +DELETE FROM allbytes WHERE +OCTET_LENGTH(a)>1 AND +LOCATE(0x5C,a)=0 AND +a NOT LIKE '%\'%' AND + a NOT LIKE '%"%'; +CREATE PROCEDURE p1(val VARBINARY(10)) +BEGIN +DECLARE EXIT HANDLER FOR SQLSTATE '42000' INSERT INTO t1 (a,b) VALUES(val,NULL); +SET @query=CONCAT(_binary"INSERT INTO t1 (a,b) VALUES (0x",HEX(val),",'",val,"')"); +PREPARE stmt FROM @query; +EXECUTE stmt; +DEALLOCATE PREPARE stmt; +END// +CREATE PROCEDURE p2() +BEGIN +DECLARE val VARBINARY(10); +DECLARE done INT DEFAULT FALSE; +DECLARE stmt CURSOR FOR SELECT a FROM allbytes; +DECLARE CONTINUE HANDLER FOR NOT FOUND SET done=TRUE; +OPEN stmt; +read_loop1: LOOP +FETCH stmt INTO val; +IF done THEN +LEAVE read_loop1; +END IF; +CALL p1(val); +END LOOP; +CLOSE stmt; +END// +CREATE FUNCTION iswellformed(a VARBINARY(256)) RETURNS INT RETURN a=BINARY CONVERT(a USING latin1);// +CREATE FUNCTION unescape(a VARBINARY(256)) RETURNS VARBINARY(256) +BEGIN +# We need to do it in a way to avoid producing new escape sequences +# First, enclose all known escsape sequences to '{{xx}}' + # - Backslash not followed by a LIKE pattern characters _ and % +# - Double escapes +# This uses PCRE Branch Reset Groups: (?|(alt1)|(alt2)|(alt3)). +# So '\\1' in the last argument always means the match, no matter +# which alternative it came from. +SET a=REGEXP_REPLACE(a,'(?|(\\\\[^_%])|(\\x{27}\\x{27}))','{{\\1}}'); +# Now unescape all enclosed standard escape sequences +SET a=REPLACE(a,'{{\\0}}', '\0'); +SET a=REPLACE(a,'{{\\b}}', '\b'); +SET a=REPLACE(a,'{{\\t}}', '\t'); +SET a=REPLACE(a,'{{\\r}}', '\r'); +SET a=REPLACE(a,'{{\\n}}', '\n'); +SET a=REPLACE(a,'{{\\Z}}', '\Z'); +SET a=REPLACE(a,'{{\\\'}}', '\''); +# Unescape double quotes +SET a=REPLACE(a,'{{\'\'}}', '\''); + # Unescape the rest: all other \x sequences mean just 'x' + SET a=REGEXP_REPLACE(a, '{{\\\\(.|\\R)}}', '\\1'); + RETURN a; +END// +CREATE FUNCTION unescape_type(a VARBINARY(256),b VARBINARY(256)) RETURNS VARBINARY(256) +BEGIN +RETURN CASE +WHEN b IS NULL THEN '[SyntErr]' + WHEN a=b THEN CASE +WHEN OCTET_LENGTH(a)=1 THEN '[Preserve]' + WHEN a RLIKE '\\\\[_%]' THEN '[Preserve][LIKE]' + WHEN a RLIKE '^[[:ascii:]]+$' THEN '[Preserve][ASCII]' + ELSE '[Preserv][MB]' END +WHEN REPLACE(a,0x5C,'')=b THEN '[Trivial]' + WHEN UNESCAPE(a)=b THEN '[Regular]' + ELSE '[Special]' END; +END// +CREATE FUNCTION wellformedness(a VARBINARY(256), b VARBINARY(256)) +RETURNS VARBINARY(256) +BEGIN +RETURN CASE +WHEN b IS NULL THEN '' + WHEN NOT iswellformed(a) AND iswellformed(b) THEN '[FIXED]' + WHEN iswellformed(a) AND NOT iswellformed(b) THEN '[BROKE]' + WHEN NOT iswellformed(a) AND NOT iswellformed(b) THEN '[ILSEQ]' + ELSE '' + END; +END// +CREATE FUNCTION mysql_real_escape_string_generated(a VARBINARY(256)) +RETURNS VARBINARY(256) +BEGIN +DECLARE a1 BINARY(1) DEFAULT SUBSTR(a,1,1); +DECLARE a2 BINARY(1) DEFAULT SUBSTR(a,2,1); +DECLARE a3 BINARY(1) DEFAULT SUBSTR(a,3,1); +DECLARE a4 BINARY(1) DEFAULT SUBSTR(a,4,1); +DECLARE a2a4 BINARY(2) DEFAULT CONCAT(a2,a4); +RETURN CASE +WHEN (a1=0x5C) AND +(a3=0x5C) AND +(a2>0x7F) AND +(a4 NOT IN ('_','%','0','t','r','n','Z')) AND +iswellformed(a2a4) THEN '[USER]' + ELSE '' + END; +END// +CREATE TABLE t1 (a VARBINARY(10),b VARBINARY(10)); +CALL p2(); +SELECT HEX(a),HEX(b), +CONCAT(unescape_type(a,b), +wellformedness(a,b), +mysql_real_escape_string_generated(a), +IF(UNESCAPE(a)<>b,CONCAT('[BAD',HEX(UNESCAPE(a)),']'),'')) AS comment +FROM t1 ORDER BY LENGTH(a),a; +HEX(a) HEX(b) comment +00 00 [Preserve] +01 01 [Preserve] +02 02 [Preserve] +03 03 [Preserve] +04 04 [Preserve] +05 05 [Preserve] +06 06 [Preserve] +07 07 [Preserve] +08 08 [Preserve] +09 09 [Preserve] +0A 0A [Preserve] +0B 0B [Preserve] +0C 0C [Preserve] +0D 0D [Preserve] +0E 0E [Preserve] +0F 0F [Preserve] +10 10 [Preserve] +11 11 [Preserve] +12 12 [Preserve] +13 13 [Preserve] +14 14 [Preserve] +15 15 [Preserve] +16 16 [Preserve] +17 17 [Preserve] +18 18 [Preserve] +19 19 [Preserve] +1A 1A [Preserve] +1B 1B [Preserve] +1C 1C [Preserve] +1D 1D [Preserve] +1E 1E [Preserve] +1F 1F [Preserve] +20 20 [Preserve] +21 21 [Preserve] +22 22 [Preserve] +23 23 [Preserve] +24 24 [Preserve] +25 25 [Preserve] +26 26 [Preserve] +27 NULL [SyntErr] +28 28 [Preserve] +29 29 [Preserve] +2A 2A [Preserve] +2B 2B [Preserve] +2C 2C [Preserve] +2D 2D [Preserve] +2E 2E [Preserve] +2F 2F [Preserve] +30 30 [Preserve] +31 31 [Preserve] +32 32 [Preserve] +33 33 [Preserve] +34 34 [Preserve] +35 35 [Preserve] +36 36 [Preserve] +37 37 [Preserve] +38 38 [Preserve] +39 39 [Preserve] +3A 3A [Preserve] +3B 3B [Preserve] +3C 3C [Preserve] +3D 3D [Preserve] +3E 3E [Preserve] +3F 3F [Preserve] +40 40 [Preserve] +41 41 [Preserve] +42 42 [Preserve] +43 43 [Preserve] +44 44 [Preserve] +45 45 [Preserve] +46 46 [Preserve] +47 47 [Preserve] +48 48 [Preserve] +49 49 [Preserve] +4A 4A [Preserve] +4B 4B [Preserve] +4C 4C [Preserve] +4D 4D [Preserve] +4E 4E [Preserve] +4F 4F [Preserve] +50 50 [Preserve] +51 51 [Preserve] +52 52 [Preserve] +53 53 [Preserve] +54 54 [Preserve] +55 55 [Preserve] +56 56 [Preserve] +57 57 [Preserve] +58 58 [Preserve] +59 59 [Preserve] +5A 5A [Preserve] +5B 5B [Preserve] +5C NULL [SyntErr] +5D 5D [Preserve] +5E 5E [Preserve] +5F 5F [Preserve] +60 60 [Preserve] +61 61 [Preserve] +62 62 [Preserve] +63 63 [Preserve] +64 64 [Preserve] +65 65 [Preserve] +66 66 [Preserve] +67 67 [Preserve] +68 68 [Preserve] +69 69 [Preserve] +6A 6A [Preserve] +6B 6B [Preserve] +6C 6C [Preserve] +6D 6D [Preserve] +6E 6E [Preserve] +6F 6F [Preserve] +70 70 [Preserve] +71 71 [Preserve] +72 72 [Preserve] +73 73 [Preserve] +74 74 [Preserve] +75 75 [Preserve] +76 76 [Preserve] +77 77 [Preserve] +78 78 [Preserve] +79 79 [Preserve] +7A 7A [Preserve] +7B 7B [Preserve] +7C 7C [Preserve] +7D 7D [Preserve] +7E 7E [Preserve] +7F 7F [Preserve] +80 80 [Preserve] +81 81 [Preserve] +82 82 [Preserve] +83 83 [Preserve] +84 84 [Preserve] +85 85 [Preserve] +86 86 [Preserve] +87 87 [Preserve] +88 88 [Preserve] +89 89 [Preserve] +8A 8A [Preserve] +8B 8B [Preserve] +8C 8C [Preserve] +8D 8D [Preserve] +8E 8E [Preserve] +8F 8F [Preserve] +90 90 [Preserve] +91 91 [Preserve] +92 92 [Preserve] +93 93 [Preserve] +94 94 [Preserve] +95 95 [Preserve] +96 96 [Preserve] +97 97 [Preserve] +98 98 [Preserve] +99 99 [Preserve] +9A 9A [Preserve] +9B 9B [Preserve] +9C 9C [Preserve] +9D 9D [Preserve] +9E 9E [Preserve] +9F 9F [Preserve] +A0 A0 [Preserve] +A1 A1 [Preserve] +A2 A2 [Preserve] +A3 A3 [Preserve] +A4 A4 [Preserve] +A5 A5 [Preserve] +A6 A6 [Preserve] +A7 A7 [Preserve] +A8 A8 [Preserve] +A9 A9 [Preserve] +AA AA [Preserve] +AB AB [Preserve] +AC AC [Preserve] +AD AD [Preserve] +AE AE [Preserve] +AF AF [Preserve] +B0 B0 [Preserve] +B1 B1 [Preserve] +B2 B2 [Preserve] +B3 B3 [Preserve] +B4 B4 [Preserve] +B5 B5 [Preserve] +B6 B6 [Preserve] +B7 B7 [Preserve] +B8 B8 [Preserve] +B9 B9 [Preserve] +BA BA [Preserve] +BB BB [Preserve] +BC BC [Preserve] +BD BD [Preserve] +BE BE [Preserve] +BF BF [Preserve] +C0 C0 [Preserve] +C1 C1 [Preserve] +C2 C2 [Preserve] +C3 C3 [Preserve] +C4 C4 [Preserve] +C5 C5 [Preserve] +C6 C6 [Preserve] +C7 C7 [Preserve] +C8 C8 [Preserve] +C9 C9 [Preserve] +CA CA [Preserve] +CB CB [Preserve] +CC CC [Preserve] +CD CD [Preserve] +CE CE [Preserve] +CF CF [Preserve] +D0 D0 [Preserve] +D1 D1 [Preserve] +D2 D2 [Preserve] +D3 D3 [Preserve] +D4 D4 [Preserve] +D5 D5 [Preserve] +D6 D6 [Preserve] +D7 D7 [Preserve] +D8 D8 [Preserve] +D9 D9 [Preserve] +DA DA [Preserve] +DB DB [Preserve] +DC DC [Preserve] +DD DD [Preserve] +DE DE [Preserve] +DF DF [Preserve] +E0 E0 [Preserve] +E1 E1 [Preserve] +E2 E2 [Preserve] +E3 E3 [Preserve] +E4 E4 [Preserve] +E5 E5 [Preserve] +E6 E6 [Preserve] +E7 E7 [Preserve] +E8 E8 [Preserve] +E9 E9 [Preserve] +EA EA [Preserve] +EB EB [Preserve] +EC EC [Preserve] +ED ED [Preserve] +EE EE [Preserve] +EF EF [Preserve] +F0 F0 [Preserve] +F1 F1 [Preserve] +F2 F2 [Preserve] +F3 F3 [Preserve] +F4 F4 [Preserve] +F5 F5 [Preserve] +F6 F6 [Preserve] +F7 F7 [Preserve] +F8 F8 [Preserve] +F9 F9 [Preserve] +FA FA [Preserve] +FB FB [Preserve] +FC FC [Preserve] +FD FD [Preserve] +FE FE [Preserve] +FF FF [Preserve] +0022 0022 [Preserve][ASCII] +0027 NULL [SyntErr] +005C NULL [SyntErr] +0822 0822 [Preserve][ASCII] +0827 NULL [SyntErr] +085C NULL [SyntErr] +0922 0922 [Preserve][ASCII] +0927 NULL [SyntErr] +095C NULL [SyntErr] +0A22 0A22 [Preserve][ASCII] +0A27 NULL [SyntErr] +0A5C NULL [SyntErr] +0D22 0D22 [Preserve][ASCII] +0D27 NULL [SyntErr] +0D5C NULL [SyntErr] +1A22 1A22 [Preserve][ASCII] +1A27 NULL [SyntErr] +1A5C NULL [SyntErr] +2200 2200 [Preserve][ASCII] +2208 2208 [Preserve][ASCII] +2209 2209 [Preserve][ASCII] +220A 220A [Preserve][ASCII] +220D 220D [Preserve][ASCII] +221A 221A [Preserve][ASCII] +2222 2222 [Preserve][ASCII] +2225 2225 [Preserve][ASCII] +2227 NULL [SyntErr] +2230 2230 [Preserve][ASCII] +223F 223F [Preserve][ASCII] +2240 2240 [Preserve][ASCII] +225A 225A [Preserve][ASCII] +225C NULL [SyntErr] +225F 225F [Preserve][ASCII] +2261 2261 [Preserve][ASCII] +2262 2262 [Preserve][ASCII] +226E 226E [Preserve][ASCII] +2272 2272 [Preserve][ASCII] +2274 2274 [Preserve][ASCII] +227E 227E [Preserve][ASCII] +227F 227F [Preserve][ASCII] +2280 2280 [Preserv][MB] +2281 2281 [Preserv][MB] +229F 229F [Preserv][MB] +22A0 22A0 [Preserv][MB] +22A1 22A1 [Preserv][MB] +22E0 22E0 [Preserv][MB] +22EF 22EF [Preserv][MB] +22F9 22F9 [Preserv][MB] +22FA 22FA [Preserv][MB] +22FC 22FC [Preserv][MB] +22FD 22FD [Preserv][MB] +22FE 22FE [Preserv][MB] +22FF 22FF [Preserv][MB] +2522 2522 [Preserve][ASCII] +2527 NULL [SyntErr] +255C NULL [SyntErr] +2700 NULL [SyntErr] +2708 NULL [SyntErr] +2709 NULL [SyntErr] +270A NULL [SyntErr] +270D NULL [SyntErr] +271A NULL [SyntErr] +2722 NULL [SyntErr] +2725 NULL [SyntErr] +2727 27 [Regular] +2730 NULL [SyntErr] +273F NULL [SyntErr] +2740 NULL [SyntErr] +275A NULL [SyntErr] +275C NULL [SyntErr] +275F NULL [SyntErr] +2761 NULL [SyntErr] +2762 NULL [SyntErr] +276E NULL [SyntErr] +2772 NULL [SyntErr] +2774 NULL [SyntErr] +277E NULL [SyntErr] +277F NULL [SyntErr] +2780 NULL [SyntErr] +2781 NULL [SyntErr] +279F NULL [SyntErr] +27A0 NULL [SyntErr] +27A1 NULL [SyntErr] +27E0 NULL [SyntErr] +27EF NULL [SyntErr] +27F9 NULL [SyntErr] +27FA NULL [SyntErr] +27FC NULL [SyntErr] +27FD NULL [SyntErr] +27FE NULL [SyntErr] +27FF NULL [SyntErr] +3022 3022 [Preserve][ASCII] +3027 NULL [SyntErr] +305C NULL [SyntErr] +3F22 3F22 [Preserve][ASCII] +3F27 NULL [SyntErr] +3F5C NULL [SyntErr] +4022 4022 [Preserve][ASCII] +4027 NULL [SyntErr] +405C NULL [SyntErr] +5A22 5A22 [Preserve][ASCII] +5A27 NULL [SyntErr] +5A5C NULL [SyntErr] +5C00 00 [Trivial] +5C08 08 [Trivial] +5C09 09 [Trivial] +5C0A 0A [Trivial] +5C0D 0D [Trivial] +5C1A 1A [Trivial] +5C22 22 [Trivial] +5C25 5C25 [Preserve][LIKE] +5C27 27 [Trivial] +5C30 00 [Regular] +5C3F 3F [Trivial] +5C40 40 [Trivial] +5C5A 1A [Regular] +5C5C 5C [Regular] +5C5F 5C5F [Preserve][LIKE] +5C61 61 [Trivial] +5C62 08 [Regular] +5C6E 0A [Regular] +5C72 0D [Regular] +5C74 09 [Regular] +5C7E 7E [Trivial] +5C7F 7F [Trivial] +5C80 80 [Trivial] +5C81 81 [Trivial] +5C9F 9F [Trivial] +5CA0 A0 [Trivial] +5CA1 A1 [Trivial] +5CE0 E0 [Trivial] +5CEF EF [Trivial] +5CF9 F9 [Trivial] +5CFA FA [Trivial] +5CFC FC [Trivial] +5CFD FD [Trivial] +5CFE FE [Trivial] +5CFF FF [Trivial] +5F22 5F22 [Preserve][ASCII] +5F27 NULL [SyntErr] +5F5C NULL [SyntErr] +6122 6122 [Preserve][ASCII] +6127 NULL [SyntErr] +615C NULL [SyntErr] +6222 6222 [Preserve][ASCII] +6227 NULL [SyntErr] +625C NULL [SyntErr] +6E22 6E22 [Preserve][ASCII] +6E27 NULL [SyntErr] +6E5C NULL [SyntErr] +7222 7222 [Preserve][ASCII] +7227 NULL [SyntErr] +725C NULL [SyntErr] +7422 7422 [Preserve][ASCII] +7427 NULL [SyntErr] +745C NULL [SyntErr] +7E22 7E22 [Preserve][ASCII] +7E27 NULL [SyntErr] +7E5C NULL [SyntErr] +7F22 7F22 [Preserve][ASCII] +7F27 NULL [SyntErr] +7F5C NULL [SyntErr] +8022 8022 [Preserv][MB] +8027 NULL [SyntErr] +805C NULL [SyntErr] +8122 8122 [Preserv][MB] +8127 NULL [SyntErr] +815C NULL [SyntErr] +9F22 9F22 [Preserv][MB] +9F27 NULL [SyntErr] +9F5C NULL [SyntErr] +A022 A022 [Preserv][MB] +A027 NULL [SyntErr] +A05C NULL [SyntErr] +A122 A122 [Preserv][MB] +A127 NULL [SyntErr] +A15C NULL [SyntErr] +E022 E022 [Preserv][MB] +E027 NULL [SyntErr] +E05C NULL [SyntErr] +EF22 EF22 [Preserv][MB] +EF27 NULL [SyntErr] +EF5C NULL [SyntErr] +F922 F922 [Preserv][MB] +F927 NULL [SyntErr] +F95C NULL [SyntErr] +FA22 FA22 [Preserv][MB] +FA27 NULL [SyntErr] +FA5C NULL [SyntErr] +FC22 FC22 [Preserv][MB] +FC27 NULL [SyntErr] +FC5C NULL [SyntErr] +FD22 FD22 [Preserv][MB] +FD27 NULL [SyntErr] +FD5C NULL [SyntErr] +FE22 FE22 [Preserv][MB] +FE27 NULL [SyntErr] +FE5C NULL [SyntErr] +FF22 FF22 [Preserv][MB] +FF27 NULL [SyntErr] +FF5C NULL [SyntErr] +5C0000 0000 [Trivial] +5C0008 0008 [Trivial] +5C0009 0009 [Trivial] +5C000A 000A [Trivial] +5C000D 000D [Trivial] +5C001A 001A [Trivial] +5C0022 0022 [Trivial] +5C0025 0025 [Trivial] +5C0027 NULL [SyntErr] +5C0030 0030 [Trivial] +5C003F 003F [Trivial] +5C0040 0040 [Trivial] +5C005A 005A [Trivial] +5C005C NULL [SyntErr] +5C005F 005F [Trivial] +5C0061 0061 [Trivial] +5C0062 0062 [Trivial] +5C006E 006E [Trivial] +5C0072 0072 [Trivial] +5C0074 0074 [Trivial] +5C007E 007E [Trivial] +5C007F 007F [Trivial] +5C0080 0080 [Trivial] +5C0081 0081 [Trivial] +5C009F 009F [Trivial] +5C00A0 00A0 [Trivial] +5C00A1 00A1 [Trivial] +5C00E0 00E0 [Trivial] +5C00EF 00EF [Trivial] +5C00F9 00F9 [Trivial] +5C00FA 00FA [Trivial] +5C00FC 00FC [Trivial] +5C00FD 00FD [Trivial] +5C00FE 00FE [Trivial] +5C00FF 00FF [Trivial] +5C0800 0800 [Trivial] +5C0808 0808 [Trivial] +5C0809 0809 [Trivial] +5C080A 080A [Trivial] +5C080D 080D [Trivial] +5C081A 081A [Trivial] +5C0822 0822 [Trivial] +5C0825 0825 [Trivial] +5C0827 NULL [SyntErr] +5C0830 0830 [Trivial] +5C083F 083F [Trivial] +5C0840 0840 [Trivial] +5C085A 085A [Trivial] +5C085C NULL [SyntErr] +5C085F 085F [Trivial] +5C0861 0861 [Trivial] +5C0862 0862 [Trivial] +5C086E 086E [Trivial] +5C0872 0872 [Trivial] +5C0874 0874 [Trivial] +5C087E 087E [Trivial] +5C087F 087F [Trivial] +5C0880 0880 [Trivial] +5C0881 0881 [Trivial] +5C089F 089F [Trivial] +5C08A0 08A0 [Trivial] +5C08A1 08A1 [Trivial] +5C08E0 08E0 [Trivial] +5C08EF 08EF [Trivial] +5C08F9 08F9 [Trivial] +5C08FA 08FA [Trivial] +5C08FC 08FC [Trivial] +5C08FD 08FD [Trivial] +5C08FE 08FE [Trivial] +5C08FF 08FF [Trivial] +5C0900 0900 [Trivial] +5C0908 0908 [Trivial] +5C0909 0909 [Trivial] +5C090A 090A [Trivial] +5C090D 090D [Trivial] +5C091A 091A [Trivial] +5C0922 0922 [Trivial] +5C0925 0925 [Trivial] +5C0927 NULL [SyntErr] +5C0930 0930 [Trivial] +5C093F 093F [Trivial] +5C0940 0940 [Trivial] +5C095A 095A [Trivial] +5C095C NULL [SyntErr] +5C095F 095F [Trivial] +5C0961 0961 [Trivial] +5C0962 0962 [Trivial] +5C096E 096E [Trivial] +5C0972 0972 [Trivial] +5C0974 0974 [Trivial] +5C097E 097E [Trivial] +5C097F 097F [Trivial] +5C0980 0980 [Trivial] +5C0981 0981 [Trivial] +5C099F 099F [Trivial] +5C09A0 09A0 [Trivial] +5C09A1 09A1 [Trivial] +5C09E0 09E0 [Trivial] +5C09EF 09EF [Trivial] +5C09F9 09F9 [Trivial] +5C09FA 09FA [Trivial] +5C09FC 09FC [Trivial] +5C09FD 09FD [Trivial] +5C09FE 09FE [Trivial] +5C09FF 09FF [Trivial] +5C0A00 0A00 [Trivial] +5C0A08 0A08 [Trivial] +5C0A09 0A09 [Trivial] +5C0A0A 0A0A [Trivial] +5C0A0D 0A0D [Trivial] +5C0A1A 0A1A [Trivial] +5C0A22 0A22 [Trivial] +5C0A25 0A25 [Trivial] +5C0A27 NULL [SyntErr] +5C0A30 0A30 [Trivial] +5C0A3F 0A3F [Trivial] +5C0A40 0A40 [Trivial] +5C0A5A 0A5A [Trivial] +5C0A5C NULL [SyntErr] +5C0A5F 0A5F [Trivial] +5C0A61 0A61 [Trivial] +5C0A62 0A62 [Trivial] +5C0A6E 0A6E [Trivial] +5C0A72 0A72 [Trivial] +5C0A74 0A74 [Trivial] +5C0A7E 0A7E [Trivial] +5C0A7F 0A7F [Trivial] +5C0A80 0A80 [Trivial] +5C0A81 0A81 [Trivial] +5C0A9F 0A9F [Trivial] +5C0AA0 0AA0 [Trivial] +5C0AA1 0AA1 [Trivial] +5C0AE0 0AE0 [Trivial] +5C0AEF 0AEF [Trivial] +5C0AF9 0AF9 [Trivial] +5C0AFA 0AFA [Trivial] +5C0AFC 0AFC [Trivial] +5C0AFD 0AFD [Trivial] +5C0AFE 0AFE [Trivial] +5C0AFF 0AFF [Trivial] +5C0D00 0D00 [Trivial] +5C0D08 0D08 [Trivial] +5C0D09 0D09 [Trivial] +5C0D0A 0D0A [Trivial] +5C0D0D 0D0D [Trivial] +5C0D1A 0D1A [Trivial] +5C0D22 0D22 [Trivial] +5C0D25 0D25 [Trivial] +5C0D27 NULL [SyntErr] +5C0D30 0D30 [Trivial] +5C0D3F 0D3F [Trivial] +5C0D40 0D40 [Trivial] +5C0D5A 0D5A [Trivial] +5C0D5C NULL [SyntErr] +5C0D5F 0D5F [Trivial] +5C0D61 0D61 [Trivial] +5C0D62 0D62 [Trivial] +5C0D6E 0D6E [Trivial] +5C0D72 0D72 [Trivial] +5C0D74 0D74 [Trivial] +5C0D7E 0D7E [Trivial] +5C0D7F 0D7F [Trivial] +5C0D80 0D80 [Trivial] +5C0D81 0D81 [Trivial] +5C0D9F 0D9F [Trivial] +5C0DA0 0DA0 [Trivial] +5C0DA1 0DA1 [Trivial] +5C0DE0 0DE0 [Trivial] +5C0DEF 0DEF [Trivial] +5C0DF9 0DF9 [Trivial] +5C0DFA 0DFA [Trivial] +5C0DFC 0DFC [Trivial] +5C0DFD 0DFD [Trivial] +5C0DFE 0DFE [Trivial] +5C0DFF 0DFF [Trivial] +5C1A00 1A00 [Trivial] +5C1A08 1A08 [Trivial] +5C1A09 1A09 [Trivial] +5C1A0A 1A0A [Trivial] +5C1A0D 1A0D [Trivial] +5C1A1A 1A1A [Trivial] +5C1A22 1A22 [Trivial] +5C1A25 1A25 [Trivial] +5C1A27 NULL [SyntErr] +5C1A30 1A30 [Trivial] +5C1A3F 1A3F [Trivial] +5C1A40 1A40 [Trivial] +5C1A5A 1A5A [Trivial] +5C1A5C NULL [SyntErr] +5C1A5F 1A5F [Trivial] +5C1A61 1A61 [Trivial] +5C1A62 1A62 [Trivial] +5C1A6E 1A6E [Trivial] +5C1A72 1A72 [Trivial] +5C1A74 1A74 [Trivial] +5C1A7E 1A7E [Trivial] +5C1A7F 1A7F [Trivial] +5C1A80 1A80 [Trivial] +5C1A81 1A81 [Trivial] +5C1A9F 1A9F [Trivial] +5C1AA0 1AA0 [Trivial] +5C1AA1 1AA1 [Trivial] +5C1AE0 1AE0 [Trivial] +5C1AEF 1AEF [Trivial] +5C1AF9 1AF9 [Trivial] +5C1AFA 1AFA [Trivial] +5C1AFC 1AFC [Trivial] +5C1AFD 1AFD [Trivial] +5C1AFE 1AFE [Trivial] +5C1AFF 1AFF [Trivial] +5C2200 2200 [Trivial] +5C2208 2208 [Trivial] +5C2209 2209 [Trivial] +5C220A 220A [Trivial] +5C220D 220D [Trivial] +5C221A 221A [Trivial] +5C2222 2222 [Trivial] +5C2225 2225 [Trivial] +5C2227 NULL [SyntErr] +5C2230 2230 [Trivial] +5C223F 223F [Trivial] +5C2240 2240 [Trivial] +5C225A 225A [Trivial] +5C225C NULL [SyntErr] +5C225F 225F [Trivial] +5C2261 2261 [Trivial] +5C2262 2262 [Trivial] +5C226E 226E [Trivial] +5C2272 2272 [Trivial] +5C2274 2274 [Trivial] +5C227E 227E [Trivial] +5C227F 227F [Trivial] +5C2280 2280 [Trivial] +5C2281 2281 [Trivial] +5C229F 229F [Trivial] +5C22A0 22A0 [Trivial] +5C22A1 22A1 [Trivial] +5C22E0 22E0 [Trivial] +5C22EF 22EF [Trivial] +5C22F9 22F9 [Trivial] +5C22FA 22FA [Trivial] +5C22FC 22FC [Trivial] +5C22FD 22FD [Trivial] +5C22FE 22FE [Trivial] +5C22FF 22FF [Trivial] +5C2500 5C2500 [Preserve][LIKE] +5C2508 5C2508 [Preserve][LIKE] +5C2509 5C2509 [Preserve][LIKE] +5C250A 5C250A [Preserve][LIKE] +5C250D 5C250D [Preserve][LIKE] +5C251A 5C251A [Preserve][LIKE] +5C2522 5C2522 [Preserve][LIKE] +5C2525 5C2525 [Preserve][LIKE] +5C2527 NULL [SyntErr] +5C2530 5C2530 [Preserve][LIKE] +5C253F 5C253F [Preserve][LIKE] +5C2540 5C2540 [Preserve][LIKE] +5C255A 5C255A [Preserve][LIKE] +5C255C NULL [SyntErr] +5C255F 5C255F [Preserve][LIKE] +5C2561 5C2561 [Preserve][LIKE] +5C2562 5C2562 [Preserve][LIKE] +5C256E 5C256E [Preserve][LIKE] +5C2572 5C2572 [Preserve][LIKE] +5C2574 5C2574 [Preserve][LIKE] +5C257E 5C257E [Preserve][LIKE] +5C257F 5C257F [Preserve][LIKE] +5C2580 5C2580 [Preserve][LIKE] +5C2581 5C2581 [Preserve][LIKE] +5C259F 5C259F [Preserve][LIKE] +5C25A0 5C25A0 [Preserve][LIKE] +5C25A1 5C25A1 [Preserve][LIKE] +5C25E0 5C25E0 [Preserve][LIKE] +5C25EF 5C25EF [Preserve][LIKE] +5C25F9 5C25F9 [Preserve][LIKE] +5C25FA 5C25FA [Preserve][LIKE] +5C25FC 5C25FC [Preserve][LIKE] +5C25FD 5C25FD [Preserve][LIKE] +5C25FE 5C25FE [Preserve][LIKE] +5C25FF 5C25FF [Preserve][LIKE] +5C2700 2700 [Trivial] +5C2708 2708 [Trivial] +5C2709 2709 [Trivial] +5C270A 270A [Trivial] +5C270D 270D [Trivial] +5C271A 271A [Trivial] +5C2722 2722 [Trivial] +5C2725 2725 [Trivial] +5C2727 NULL [SyntErr] +5C2730 2730 [Trivial] +5C273F 273F [Trivial] +5C2740 2740 [Trivial] +5C275A 275A [Trivial] +5C275C NULL [SyntErr] +5C275F 275F [Trivial] +5C2761 2761 [Trivial] +5C2762 2762 [Trivial] +5C276E 276E [Trivial] +5C2772 2772 [Trivial] +5C2774 2774 [Trivial] +5C277E 277E [Trivial] +5C277F 277F [Trivial] +5C2780 2780 [Trivial] +5C2781 2781 [Trivial] +5C279F 279F [Trivial] +5C27A0 27A0 [Trivial] +5C27A1 27A1 [Trivial] +5C27E0 27E0 [Trivial] +5C27EF 27EF [Trivial] +5C27F9 27F9 [Trivial] +5C27FA 27FA [Trivial] +5C27FC 27FC [Trivial] +5C27FD 27FD [Trivial] +5C27FE 27FE [Trivial] +5C27FF 27FF [Trivial] +5C3000 0000 [Regular] +5C3008 0008 [Regular] +5C3009 0009 [Regular] +5C300A 000A [Regular] +5C300D 000D [Regular] +5C301A 001A [Regular] +5C3022 0022 [Regular] +5C3025 0025 [Regular] +5C3027 NULL [SyntErr] +5C3030 0030 [Regular] +5C303F 003F [Regular] +5C3040 0040 [Regular] +5C305A 005A [Regular] +5C305C NULL [SyntErr] +5C305F 005F [Regular] +5C3061 0061 [Regular] +5C3062 0062 [Regular] +5C306E 006E [Regular] +5C3072 0072 [Regular] +5C3074 0074 [Regular] +5C307E 007E [Regular] +5C307F 007F [Regular] +5C3080 0080 [Regular] +5C3081 0081 [Regular] +5C309F 009F [Regular] +5C30A0 00A0 [Regular] +5C30A1 00A1 [Regular] +5C30E0 00E0 [Regular] +5C30EF 00EF [Regular] +5C30F9 00F9 [Regular] +5C30FA 00FA [Regular] +5C30FC 00FC [Regular] +5C30FD 00FD [Regular] +5C30FE 00FE [Regular] +5C30FF 00FF [Regular] +5C3F00 3F00 [Trivial] +5C3F08 3F08 [Trivial] +5C3F09 3F09 [Trivial] +5C3F0A 3F0A [Trivial] +5C3F0D 3F0D [Trivial] +5C3F1A 3F1A [Trivial] +5C3F22 3F22 [Trivial] +5C3F25 3F25 [Trivial] +5C3F27 NULL [SyntErr] +5C3F30 3F30 [Trivial] +5C3F3F 3F3F [Trivial] +5C3F40 3F40 [Trivial] +5C3F5A 3F5A [Trivial] +5C3F5C NULL [SyntErr] +5C3F5F 3F5F [Trivial] +5C3F61 3F61 [Trivial] +5C3F62 3F62 [Trivial] +5C3F6E 3F6E [Trivial] +5C3F72 3F72 [Trivial] +5C3F74 3F74 [Trivial] +5C3F7E 3F7E [Trivial] +5C3F7F 3F7F [Trivial] +5C3F80 3F80 [Trivial] +5C3F81 3F81 [Trivial] +5C3F9F 3F9F [Trivial] +5C3FA0 3FA0 [Trivial] +5C3FA1 3FA1 [Trivial] +5C3FE0 3FE0 [Trivial] +5C3FEF 3FEF [Trivial] +5C3FF9 3FF9 [Trivial] +5C3FFA 3FFA [Trivial] +5C3FFC 3FFC [Trivial] +5C3FFD 3FFD [Trivial] +5C3FFE 3FFE [Trivial] +5C3FFF 3FFF [Trivial] +5C4000 4000 [Trivial] +5C4008 4008 [Trivial] +5C4009 4009 [Trivial] +5C400A 400A [Trivial] +5C400D 400D [Trivial] +5C401A 401A [Trivial] +5C4022 4022 [Trivial] +5C4025 4025 [Trivial] +5C4027 NULL [SyntErr] +5C4030 4030 [Trivial] +5C403F 403F [Trivial] +5C4040 4040 [Trivial] +5C405A 405A [Trivial] +5C405C NULL [SyntErr] +5C405F 405F [Trivial] +5C4061 4061 [Trivial] +5C4062 4062 [Trivial] +5C406E 406E [Trivial] +5C4072 4072 [Trivial] +5C4074 4074 [Trivial] +5C407E 407E [Trivial] +5C407F 407F [Trivial] +5C4080 4080 [Trivial] +5C4081 4081 [Trivial] +5C409F 409F [Trivial] +5C40A0 40A0 [Trivial] +5C40A1 40A1 [Trivial] +5C40E0 40E0 [Trivial] +5C40EF 40EF [Trivial] +5C40F9 40F9 [Trivial] +5C40FA 40FA [Trivial] +5C40FC 40FC [Trivial] +5C40FD 40FD [Trivial] +5C40FE 40FE [Trivial] +5C40FF 40FF [Trivial] +5C5A00 1A00 [Regular] +5C5A08 1A08 [Regular] +5C5A09 1A09 [Regular] +5C5A0A 1A0A [Regular] +5C5A0D 1A0D [Regular] +5C5A1A 1A1A [Regular] +5C5A22 1A22 [Regular] +5C5A25 1A25 [Regular] +5C5A27 NULL [SyntErr] +5C5A30 1A30 [Regular] +5C5A3F 1A3F [Regular] +5C5A40 1A40 [Regular] +5C5A5A 1A5A [Regular] +5C5A5C NULL [SyntErr] +5C5A5F 1A5F [Regular] +5C5A61 1A61 [Regular] +5C5A62 1A62 [Regular] +5C5A6E 1A6E [Regular] +5C5A72 1A72 [Regular] +5C5A74 1A74 [Regular] +5C5A7E 1A7E [Regular] +5C5A7F 1A7F [Regular] +5C5A80 1A80 [Regular] +5C5A81 1A81 [Regular] +5C5A9F 1A9F [Regular] +5C5AA0 1AA0 [Regular] +5C5AA1 1AA1 [Regular] +5C5AE0 1AE0 [Regular] +5C5AEF 1AEF [Regular] +5C5AF9 1AF9 [Regular] +5C5AFA 1AFA [Regular] +5C5AFC 1AFC [Regular] +5C5AFD 1AFD [Regular] +5C5AFE 1AFE [Regular] +5C5AFF 1AFF [Regular] +5C5C00 5C00 [Regular] +5C5C08 5C08 [Regular] +5C5C09 5C09 [Regular] +5C5C0A 5C0A [Regular] +5C5C0D 5C0D [Regular] +5C5C1A 5C1A [Regular] +5C5C22 5C22 [Regular] +5C5C25 5C25 [Regular] +5C5C27 NULL [SyntErr] +5C5C30 5C30 [Regular] +5C5C3F 5C3F [Regular] +5C5C40 5C40 [Regular] +5C5C5A 5C5A [Regular] +5C5C5C NULL [SyntErr] +5C5C5F 5C5F [Regular] +5C5C61 5C61 [Regular] +5C5C62 5C62 [Regular] +5C5C6E 5C6E [Regular] +5C5C72 5C72 [Regular] +5C5C74 5C74 [Regular] +5C5C7E 5C7E [Regular] +5C5C7F 5C7F [Regular] +5C5C80 5C80 [Regular] +5C5C81 5C81 [Regular] +5C5C9F 5C9F [Regular] +5C5CA0 5CA0 [Regular] +5C5CA1 5CA1 [Regular] +5C5CE0 5CE0 [Regular] +5C5CEF 5CEF [Regular] +5C5CF9 5CF9 [Regular] +5C5CFA 5CFA [Regular] +5C5CFC 5CFC [Regular] +5C5CFD 5CFD [Regular] +5C5CFE 5CFE [Regular] +5C5CFF 5CFF [Regular] +5C5F00 5C5F00 [Preserve][LIKE] +5C5F08 5C5F08 [Preserve][LIKE] +5C5F09 5C5F09 [Preserve][LIKE] +5C5F0A 5C5F0A [Preserve][LIKE] +5C5F0D 5C5F0D [Preserve][LIKE] +5C5F1A 5C5F1A [Preserve][LIKE] +5C5F22 5C5F22 [Preserve][LIKE] +5C5F25 5C5F25 [Preserve][LIKE] +5C5F27 NULL [SyntErr] +5C5F30 5C5F30 [Preserve][LIKE] +5C5F3F 5C5F3F [Preserve][LIKE] +5C5F40 5C5F40 [Preserve][LIKE] +5C5F5A 5C5F5A [Preserve][LIKE] +5C5F5C NULL [SyntErr] +5C5F5F 5C5F5F [Preserve][LIKE] +5C5F61 5C5F61 [Preserve][LIKE] +5C5F62 5C5F62 [Preserve][LIKE] +5C5F6E 5C5F6E [Preserve][LIKE] +5C5F72 5C5F72 [Preserve][LIKE] +5C5F74 5C5F74 [Preserve][LIKE] +5C5F7E 5C5F7E [Preserve][LIKE] +5C5F7F 5C5F7F [Preserve][LIKE] +5C5F80 5C5F80 [Preserve][LIKE] +5C5F81 5C5F81 [Preserve][LIKE] +5C5F9F 5C5F9F [Preserve][LIKE] +5C5FA0 5C5FA0 [Preserve][LIKE] +5C5FA1 5C5FA1 [Preserve][LIKE] +5C5FE0 5C5FE0 [Preserve][LIKE] +5C5FEF 5C5FEF [Preserve][LIKE] +5C5FF9 5C5FF9 [Preserve][LIKE] +5C5FFA 5C5FFA [Preserve][LIKE] +5C5FFC 5C5FFC [Preserve][LIKE] +5C5FFD 5C5FFD [Preserve][LIKE] +5C5FFE 5C5FFE [Preserve][LIKE] +5C5FFF 5C5FFF [Preserve][LIKE] +5C6100 6100 [Trivial] +5C6108 6108 [Trivial] +5C6109 6109 [Trivial] +5C610A 610A [Trivial] +5C610D 610D [Trivial] +5C611A 611A [Trivial] +5C6122 6122 [Trivial] +5C6125 6125 [Trivial] +5C6127 NULL [SyntErr] +5C6130 6130 [Trivial] +5C613F 613F [Trivial] +5C6140 6140 [Trivial] +5C615A 615A [Trivial] +5C615C NULL [SyntErr] +5C615F 615F [Trivial] +5C6161 6161 [Trivial] +5C6162 6162 [Trivial] +5C616E 616E [Trivial] +5C6172 6172 [Trivial] +5C6174 6174 [Trivial] +5C617E 617E [Trivial] +5C617F 617F [Trivial] +5C6180 6180 [Trivial] +5C6181 6181 [Trivial] +5C619F 619F [Trivial] +5C61A0 61A0 [Trivial] +5C61A1 61A1 [Trivial] +5C61E0 61E0 [Trivial] +5C61EF 61EF [Trivial] +5C61F9 61F9 [Trivial] +5C61FA 61FA [Trivial] +5C61FC 61FC [Trivial] +5C61FD 61FD [Trivial] +5C61FE 61FE [Trivial] +5C61FF 61FF [Trivial] +5C6200 0800 [Regular] +5C6208 0808 [Regular] +5C6209 0809 [Regular] +5C620A 080A [Regular] +5C620D 080D [Regular] +5C621A 081A [Regular] +5C6222 0822 [Regular] +5C6225 0825 [Regular] +5C6227 NULL [SyntErr] +5C6230 0830 [Regular] +5C623F 083F [Regular] +5C6240 0840 [Regular] +5C625A 085A [Regular] +5C625C NULL [SyntErr] +5C625F 085F [Regular] +5C6261 0861 [Regular] +5C6262 0862 [Regular] +5C626E 086E [Regular] +5C6272 0872 [Regular] +5C6274 0874 [Regular] +5C627E 087E [Regular] +5C627F 087F [Regular] +5C6280 0880 [Regular] +5C6281 0881 [Regular] +5C629F 089F [Regular] +5C62A0 08A0 [Regular] +5C62A1 08A1 [Regular] +5C62E0 08E0 [Regular] +5C62EF 08EF [Regular] +5C62F9 08F9 [Regular] +5C62FA 08FA [Regular] +5C62FC 08FC [Regular] +5C62FD 08FD [Regular] +5C62FE 08FE [Regular] +5C62FF 08FF [Regular] +5C6E00 0A00 [Regular] +5C6E08 0A08 [Regular] +5C6E09 0A09 [Regular] +5C6E0A 0A0A [Regular] +5C6E0D 0A0D [Regular] +5C6E1A 0A1A [Regular] +5C6E22 0A22 [Regular] +5C6E25 0A25 [Regular] +5C6E27 NULL [SyntErr] +5C6E30 0A30 [Regular] +5C6E3F 0A3F [Regular] +5C6E40 0A40 [Regular] +5C6E5A 0A5A [Regular] +5C6E5C NULL [SyntErr] +5C6E5F 0A5F [Regular] +5C6E61 0A61 [Regular] +5C6E62 0A62 [Regular] +5C6E6E 0A6E [Regular] +5C6E72 0A72 [Regular] +5C6E74 0A74 [Regular] +5C6E7E 0A7E [Regular] +5C6E7F 0A7F [Regular] +5C6E80 0A80 [Regular] +5C6E81 0A81 [Regular] +5C6E9F 0A9F [Regular] +5C6EA0 0AA0 [Regular] +5C6EA1 0AA1 [Regular] +5C6EE0 0AE0 [Regular] +5C6EEF 0AEF [Regular] +5C6EF9 0AF9 [Regular] +5C6EFA 0AFA [Regular] +5C6EFC 0AFC [Regular] +5C6EFD 0AFD [Regular] +5C6EFE 0AFE [Regular] +5C6EFF 0AFF [Regular] +5C7200 0D00 [Regular] +5C7208 0D08 [Regular] +5C7209 0D09 [Regular] +5C720A 0D0A [Regular] +5C720D 0D0D [Regular] +5C721A 0D1A [Regular] +5C7222 0D22 [Regular] +5C7225 0D25 [Regular] +5C7227 NULL [SyntErr] +5C7230 0D30 [Regular] +5C723F 0D3F [Regular] +5C7240 0D40 [Regular] +5C725A 0D5A [Regular] +5C725C NULL [SyntErr] +5C725F 0D5F [Regular] +5C7261 0D61 [Regular] +5C7262 0D62 [Regular] +5C726E 0D6E [Regular] +5C7272 0D72 [Regular] +5C7274 0D74 [Regular] +5C727E 0D7E [Regular] +5C727F 0D7F [Regular] +5C7280 0D80 [Regular] +5C7281 0D81 [Regular] +5C729F 0D9F [Regular] +5C72A0 0DA0 [Regular] +5C72A1 0DA1 [Regular] +5C72E0 0DE0 [Regular] +5C72EF 0DEF [Regular] +5C72F9 0DF9 [Regular] +5C72FA 0DFA [Regular] +5C72FC 0DFC [Regular] +5C72FD 0DFD [Regular] +5C72FE 0DFE [Regular] +5C72FF 0DFF [Regular] +5C7400 0900 [Regular] +5C7408 0908 [Regular] +5C7409 0909 [Regular] +5C740A 090A [Regular] +5C740D 090D [Regular] +5C741A 091A [Regular] +5C7422 0922 [Regular] +5C7425 0925 [Regular] +5C7427 NULL [SyntErr] +5C7430 0930 [Regular] +5C743F 093F [Regular] +5C7440 0940 [Regular] +5C745A 095A [Regular] +5C745C NULL [SyntErr] +5C745F 095F [Regular] +5C7461 0961 [Regular] +5C7462 0962 [Regular] +5C746E 096E [Regular] +5C7472 0972 [Regular] +5C7474 0974 [Regular] +5C747E 097E [Regular] +5C747F 097F [Regular] +5C7480 0980 [Regular] +5C7481 0981 [Regular] +5C749F 099F [Regular] +5C74A0 09A0 [Regular] +5C74A1 09A1 [Regular] +5C74E0 09E0 [Regular] +5C74EF 09EF [Regular] +5C74F9 09F9 [Regular] +5C74FA 09FA [Regular] +5C74FC 09FC [Regular] +5C74FD 09FD [Regular] +5C74FE 09FE [Regular] +5C74FF 09FF [Regular] +5C7E00 7E00 [Trivial] +5C7E08 7E08 [Trivial] +5C7E09 7E09 [Trivial] +5C7E0A 7E0A [Trivial] +5C7E0D 7E0D [Trivial] +5C7E1A 7E1A [Trivial] +5C7E22 7E22 [Trivial] +5C7E25 7E25 [Trivial] +5C7E27 NULL [SyntErr] +5C7E30 7E30 [Trivial] +5C7E3F 7E3F [Trivial] +5C7E40 7E40 [Trivial] +5C7E5A 7E5A [Trivial] +5C7E5C NULL [SyntErr] +5C7E5F 7E5F [Trivial] +5C7E61 7E61 [Trivial] +5C7E62 7E62 [Trivial] +5C7E6E 7E6E [Trivial] +5C7E72 7E72 [Trivial] +5C7E74 7E74 [Trivial] +5C7E7E 7E7E [Trivial] +5C7E7F 7E7F [Trivial] +5C7E80 7E80 [Trivial] +5C7E81 7E81 [Trivial] +5C7E9F 7E9F [Trivial] +5C7EA0 7EA0 [Trivial] +5C7EA1 7EA1 [Trivial] +5C7EE0 7EE0 [Trivial] +5C7EEF 7EEF [Trivial] +5C7EF9 7EF9 [Trivial] +5C7EFA 7EFA [Trivial] +5C7EFC 7EFC [Trivial] +5C7EFD 7EFD [Trivial] +5C7EFE 7EFE [Trivial] +5C7EFF 7EFF [Trivial] +5C7F00 7F00 [Trivial] +5C7F08 7F08 [Trivial] +5C7F09 7F09 [Trivial] +5C7F0A 7F0A [Trivial] +5C7F0D 7F0D [Trivial] +5C7F1A 7F1A [Trivial] +5C7F22 7F22 [Trivial] +5C7F25 7F25 [Trivial] +5C7F27 NULL [SyntErr] +5C7F30 7F30 [Trivial] +5C7F3F 7F3F [Trivial] +5C7F40 7F40 [Trivial] +5C7F5A 7F5A [Trivial] +5C7F5C NULL [SyntErr] +5C7F5F 7F5F [Trivial] +5C7F61 7F61 [Trivial] +5C7F62 7F62 [Trivial] +5C7F6E 7F6E [Trivial] +5C7F72 7F72 [Trivial] +5C7F74 7F74 [Trivial] +5C7F7E 7F7E [Trivial] +5C7F7F 7F7F [Trivial] +5C7F80 7F80 [Trivial] +5C7F81 7F81 [Trivial] +5C7F9F 7F9F [Trivial] +5C7FA0 7FA0 [Trivial] +5C7FA1 7FA1 [Trivial] +5C7FE0 7FE0 [Trivial] +5C7FEF 7FEF [Trivial] +5C7FF9 7FF9 [Trivial] +5C7FFA 7FFA [Trivial] +5C7FFC 7FFC [Trivial] +5C7FFD 7FFD [Trivial] +5C7FFE 7FFE [Trivial] +5C7FFF 7FFF [Trivial] +5C8000 8000 [Trivial] +5C8008 8008 [Trivial] +5C8009 8009 [Trivial] +5C800A 800A [Trivial] +5C800D 800D [Trivial] +5C801A 801A [Trivial] +5C8022 8022 [Trivial] +5C8025 8025 [Trivial] +5C8027 NULL [SyntErr] +5C8030 8030 [Trivial] +5C803F 803F [Trivial] +5C8040 8040 [Trivial] +5C805A 805A [Trivial] +5C805C NULL [SyntErr][USER] +5C805F 805F [Trivial] +5C8061 8061 [Trivial] +5C8062 8062 [Trivial] +5C806E 806E [Trivial] +5C8072 8072 [Trivial] +5C8074 8074 [Trivial] +5C807E 807E [Trivial] +5C807F 807F [Trivial] +5C8080 8080 [Trivial] +5C8081 8081 [Trivial] +5C809F 809F [Trivial] +5C80A0 80A0 [Trivial] +5C80A1 80A1 [Trivial] +5C80E0 80E0 [Trivial] +5C80EF 80EF [Trivial] +5C80F9 80F9 [Trivial] +5C80FA 80FA [Trivial] +5C80FC 80FC [Trivial] +5C80FD 80FD [Trivial] +5C80FE 80FE [Trivial] +5C80FF 80FF [Trivial] +5C8100 8100 [Trivial] +5C8108 8108 [Trivial] +5C8109 8109 [Trivial] +5C810A 810A [Trivial] +5C810D 810D [Trivial] +5C811A 811A [Trivial] +5C8122 8122 [Trivial] +5C8125 8125 [Trivial] +5C8127 NULL [SyntErr] +5C8130 8130 [Trivial] +5C813F 813F [Trivial] +5C8140 8140 [Trivial] +5C815A 815A [Trivial] +5C815C NULL [SyntErr][USER] +5C815F 815F [Trivial] +5C8161 8161 [Trivial] +5C8162 8162 [Trivial] +5C816E 816E [Trivial] +5C8172 8172 [Trivial] +5C8174 8174 [Trivial] +5C817E 817E [Trivial] +5C817F 817F [Trivial] +5C8180 8180 [Trivial] +5C8181 8181 [Trivial] +5C819F 819F [Trivial] +5C81A0 81A0 [Trivial] +5C81A1 81A1 [Trivial] +5C81E0 81E0 [Trivial] +5C81EF 81EF [Trivial] +5C81F9 81F9 [Trivial] +5C81FA 81FA [Trivial] +5C81FC 81FC [Trivial] +5C81FD 81FD [Trivial] +5C81FE 81FE [Trivial] +5C81FF 81FF [Trivial] +5C9F00 9F00 [Trivial] +5C9F08 9F08 [Trivial] +5C9F09 9F09 [Trivial] +5C9F0A 9F0A [Trivial] +5C9F0D 9F0D [Trivial] +5C9F1A 9F1A [Trivial] +5C9F22 9F22 [Trivial] +5C9F25 9F25 [Trivial] +5C9F27 NULL [SyntErr] +5C9F30 9F30 [Trivial] +5C9F3F 9F3F [Trivial] +5C9F40 9F40 [Trivial] +5C9F5A 9F5A [Trivial] +5C9F5C NULL [SyntErr][USER] +5C9F5F 9F5F [Trivial] +5C9F61 9F61 [Trivial] +5C9F62 9F62 [Trivial] +5C9F6E 9F6E [Trivial] +5C9F72 9F72 [Trivial] +5C9F74 9F74 [Trivial] +5C9F7E 9F7E [Trivial] +5C9F7F 9F7F [Trivial] +5C9F80 9F80 [Trivial] +5C9F81 9F81 [Trivial] +5C9F9F 9F9F [Trivial] +5C9FA0 9FA0 [Trivial] +5C9FA1 9FA1 [Trivial] +5C9FE0 9FE0 [Trivial] +5C9FEF 9FEF [Trivial] +5C9FF9 9FF9 [Trivial] +5C9FFA 9FFA [Trivial] +5C9FFC 9FFC [Trivial] +5C9FFD 9FFD [Trivial] +5C9FFE 9FFE [Trivial] +5C9FFF 9FFF [Trivial] +5CA000 A000 [Trivial] +5CA008 A008 [Trivial] +5CA009 A009 [Trivial] +5CA00A A00A [Trivial] +5CA00D A00D [Trivial] +5CA01A A01A [Trivial] +5CA022 A022 [Trivial] +5CA025 A025 [Trivial] +5CA027 NULL [SyntErr] +5CA030 A030 [Trivial] +5CA03F A03F [Trivial] +5CA040 A040 [Trivial] +5CA05A A05A [Trivial] +5CA05C NULL [SyntErr][USER] +5CA05F A05F [Trivial] +5CA061 A061 [Trivial] +5CA062 A062 [Trivial] +5CA06E A06E [Trivial] +5CA072 A072 [Trivial] +5CA074 A074 [Trivial] +5CA07E A07E [Trivial] +5CA07F A07F [Trivial] +5CA080 A080 [Trivial] +5CA081 A081 [Trivial] +5CA09F A09F [Trivial] +5CA0A0 A0A0 [Trivial] +5CA0A1 A0A1 [Trivial] +5CA0E0 A0E0 [Trivial] +5CA0EF A0EF [Trivial] +5CA0F9 A0F9 [Trivial] +5CA0FA A0FA [Trivial] +5CA0FC A0FC [Trivial] +5CA0FD A0FD [Trivial] +5CA0FE A0FE [Trivial] +5CA0FF A0FF [Trivial] +5CA100 A100 [Trivial] +5CA108 A108 [Trivial] +5CA109 A109 [Trivial] +5CA10A A10A [Trivial] +5CA10D A10D [Trivial] +5CA11A A11A [Trivial] +5CA122 A122 [Trivial] +5CA125 A125 [Trivial] +5CA127 NULL [SyntErr] +5CA130 A130 [Trivial] +5CA13F A13F [Trivial] +5CA140 A140 [Trivial] +5CA15A A15A [Trivial] +5CA15C NULL [SyntErr][USER] +5CA15F A15F [Trivial] +5CA161 A161 [Trivial] +5CA162 A162 [Trivial] +5CA16E A16E [Trivial] +5CA172 A172 [Trivial] +5CA174 A174 [Trivial] +5CA17E A17E [Trivial] +5CA17F A17F [Trivial] +5CA180 A180 [Trivial] +5CA181 A181 [Trivial] +5CA19F A19F [Trivial] +5CA1A0 A1A0 [Trivial] +5CA1A1 A1A1 [Trivial] +5CA1E0 A1E0 [Trivial] +5CA1EF A1EF [Trivial] +5CA1F9 A1F9 [Trivial] +5CA1FA A1FA [Trivial] +5CA1FC A1FC [Trivial] +5CA1FD A1FD [Trivial] +5CA1FE A1FE [Trivial] +5CA1FF A1FF [Trivial] +5CE000 E000 [Trivial] +5CE008 E008 [Trivial] +5CE009 E009 [Trivial] +5CE00A E00A [Trivial] +5CE00D E00D [Trivial] +5CE01A E01A [Trivial] +5CE022 E022 [Trivial] +5CE025 E025 [Trivial] +5CE027 NULL [SyntErr] +5CE030 E030 [Trivial] +5CE03F E03F [Trivial] +5CE040 E040 [Trivial] +5CE05A E05A [Trivial] +5CE05C NULL [SyntErr][USER] +5CE05F E05F [Trivial] +5CE061 E061 [Trivial] +5CE062 E062 [Trivial] +5CE06E E06E [Trivial] +5CE072 E072 [Trivial] +5CE074 E074 [Trivial] +5CE07E E07E [Trivial] +5CE07F E07F [Trivial] +5CE080 E080 [Trivial] +5CE081 E081 [Trivial] +5CE09F E09F [Trivial] +5CE0A0 E0A0 [Trivial] +5CE0A1 E0A1 [Trivial] +5CE0E0 E0E0 [Trivial] +5CE0EF E0EF [Trivial] +5CE0F9 E0F9 [Trivial] +5CE0FA E0FA [Trivial] +5CE0FC E0FC [Trivial] +5CE0FD E0FD [Trivial] +5CE0FE E0FE [Trivial] +5CE0FF E0FF [Trivial] +5CEF00 EF00 [Trivial] +5CEF08 EF08 [Trivial] +5CEF09 EF09 [Trivial] +5CEF0A EF0A [Trivial] +5CEF0D EF0D [Trivial] +5CEF1A EF1A [Trivial] +5CEF22 EF22 [Trivial] +5CEF25 EF25 [Trivial] +5CEF27 NULL [SyntErr] +5CEF30 EF30 [Trivial] +5CEF3F EF3F [Trivial] +5CEF40 EF40 [Trivial] +5CEF5A EF5A [Trivial] +5CEF5C NULL [SyntErr][USER] +5CEF5F EF5F [Trivial] +5CEF61 EF61 [Trivial] +5CEF62 EF62 [Trivial] +5CEF6E EF6E [Trivial] +5CEF72 EF72 [Trivial] +5CEF74 EF74 [Trivial] +5CEF7E EF7E [Trivial] +5CEF7F EF7F [Trivial] +5CEF80 EF80 [Trivial] +5CEF81 EF81 [Trivial] +5CEF9F EF9F [Trivial] +5CEFA0 EFA0 [Trivial] +5CEFA1 EFA1 [Trivial] +5CEFE0 EFE0 [Trivial] +5CEFEF EFEF [Trivial] +5CEFF9 EFF9 [Trivial] +5CEFFA EFFA [Trivial] +5CEFFC EFFC [Trivial] +5CEFFD EFFD [Trivial] +5CEFFE EFFE [Trivial] +5CEFFF EFFF [Trivial] +5CF900 F900 [Trivial] +5CF908 F908 [Trivial] +5CF909 F909 [Trivial] +5CF90A F90A [Trivial] +5CF90D F90D [Trivial] +5CF91A F91A [Trivial] +5CF922 F922 [Trivial] +5CF925 F925 [Trivial] +5CF927 NULL [SyntErr] +5CF930 F930 [Trivial] +5CF93F F93F [Trivial] +5CF940 F940 [Trivial] +5CF95A F95A [Trivial] +5CF95C NULL [SyntErr][USER] +5CF95F F95F [Trivial] +5CF961 F961 [Trivial] +5CF962 F962 [Trivial] +5CF96E F96E [Trivial] +5CF972 F972 [Trivial] +5CF974 F974 [Trivial] +5CF97E F97E [Trivial] +5CF97F F97F [Trivial] +5CF980 F980 [Trivial] +5CF981 F981 [Trivial] +5CF99F F99F [Trivial] +5CF9A0 F9A0 [Trivial] +5CF9A1 F9A1 [Trivial] +5CF9E0 F9E0 [Trivial] +5CF9EF F9EF [Trivial] +5CF9F9 F9F9 [Trivial] +5CF9FA F9FA [Trivial] +5CF9FC F9FC [Trivial] +5CF9FD F9FD [Trivial] +5CF9FE F9FE [Trivial] +5CF9FF F9FF [Trivial] +5CFA00 FA00 [Trivial] +5CFA08 FA08 [Trivial] +5CFA09 FA09 [Trivial] +5CFA0A FA0A [Trivial] +5CFA0D FA0D [Trivial] +5CFA1A FA1A [Trivial] +5CFA22 FA22 [Trivial] +5CFA25 FA25 [Trivial] +5CFA27 NULL [SyntErr] +5CFA30 FA30 [Trivial] +5CFA3F FA3F [Trivial] +5CFA40 FA40 [Trivial] +5CFA5A FA5A [Trivial] +5CFA5C NULL [SyntErr][USER] +5CFA5F FA5F [Trivial] +5CFA61 FA61 [Trivial] +5CFA62 FA62 [Trivial] +5CFA6E FA6E [Trivial] +5CFA72 FA72 [Trivial] +5CFA74 FA74 [Trivial] +5CFA7E FA7E [Trivial] +5CFA7F FA7F [Trivial] +5CFA80 FA80 [Trivial] +5CFA81 FA81 [Trivial] +5CFA9F FA9F [Trivial] +5CFAA0 FAA0 [Trivial] +5CFAA1 FAA1 [Trivial] +5CFAE0 FAE0 [Trivial] +5CFAEF FAEF [Trivial] +5CFAF9 FAF9 [Trivial] +5CFAFA FAFA [Trivial] +5CFAFC FAFC [Trivial] +5CFAFD FAFD [Trivial] +5CFAFE FAFE [Trivial] +5CFAFF FAFF [Trivial] +5CFC00 FC00 [Trivial] +5CFC08 FC08 [Trivial] +5CFC09 FC09 [Trivial] +5CFC0A FC0A [Trivial] +5CFC0D FC0D [Trivial] +5CFC1A FC1A [Trivial] +5CFC22 FC22 [Trivial] +5CFC25 FC25 [Trivial] +5CFC27 NULL [SyntErr] +5CFC30 FC30 [Trivial] +5CFC3F FC3F [Trivial] +5CFC40 FC40 [Trivial] +5CFC5A FC5A [Trivial] +5CFC5C NULL [SyntErr][USER] +5CFC5F FC5F [Trivial] +5CFC61 FC61 [Trivial] +5CFC62 FC62 [Trivial] +5CFC6E FC6E [Trivial] +5CFC72 FC72 [Trivial] +5CFC74 FC74 [Trivial] +5CFC7E FC7E [Trivial] +5CFC7F FC7F [Trivial] +5CFC80 FC80 [Trivial] +5CFC81 FC81 [Trivial] +5CFC9F FC9F [Trivial] +5CFCA0 FCA0 [Trivial] +5CFCA1 FCA1 [Trivial] +5CFCE0 FCE0 [Trivial] +5CFCEF FCEF [Trivial] +5CFCF9 FCF9 [Trivial] +5CFCFA FCFA [Trivial] +5CFCFC FCFC [Trivial] +5CFCFD FCFD [Trivial] +5CFCFE FCFE [Trivial] +5CFCFF FCFF [Trivial] +5CFD00 FD00 [Trivial] +5CFD08 FD08 [Trivial] +5CFD09 FD09 [Trivial] +5CFD0A FD0A [Trivial] +5CFD0D FD0D [Trivial] +5CFD1A FD1A [Trivial] +5CFD22 FD22 [Trivial] +5CFD25 FD25 [Trivial] +5CFD27 NULL [SyntErr] +5CFD30 FD30 [Trivial] +5CFD3F FD3F [Trivial] +5CFD40 FD40 [Trivial] +5CFD5A FD5A [Trivial] +5CFD5C NULL [SyntErr][USER] +5CFD5F FD5F [Trivial] +5CFD61 FD61 [Trivial] +5CFD62 FD62 [Trivial] +5CFD6E FD6E [Trivial] +5CFD72 FD72 [Trivial] +5CFD74 FD74 [Trivial] +5CFD7E FD7E [Trivial] +5CFD7F FD7F [Trivial] +5CFD80 FD80 [Trivial] +5CFD81 FD81 [Trivial] +5CFD9F FD9F [Trivial] +5CFDA0 FDA0 [Trivial] +5CFDA1 FDA1 [Trivial] +5CFDE0 FDE0 [Trivial] +5CFDEF FDEF [Trivial] +5CFDF9 FDF9 [Trivial] +5CFDFA FDFA [Trivial] +5CFDFC FDFC [Trivial] +5CFDFD FDFD [Trivial] +5CFDFE FDFE [Trivial] +5CFDFF FDFF [Trivial] +5CFE00 FE00 [Trivial] +5CFE08 FE08 [Trivial] +5CFE09 FE09 [Trivial] +5CFE0A FE0A [Trivial] +5CFE0D FE0D [Trivial] +5CFE1A FE1A [Trivial] +5CFE22 FE22 [Trivial] +5CFE25 FE25 [Trivial] +5CFE27 NULL [SyntErr] +5CFE30 FE30 [Trivial] +5CFE3F FE3F [Trivial] +5CFE40 FE40 [Trivial] +5CFE5A FE5A [Trivial] +5CFE5C NULL [SyntErr][USER] +5CFE5F FE5F [Trivial] +5CFE61 FE61 [Trivial] +5CFE62 FE62 [Trivial] +5CFE6E FE6E [Trivial] +5CFE72 FE72 [Trivial] +5CFE74 FE74 [Trivial] +5CFE7E FE7E [Trivial] +5CFE7F FE7F [Trivial] +5CFE80 FE80 [Trivial] +5CFE81 FE81 [Trivial] +5CFE9F FE9F [Trivial] +5CFEA0 FEA0 [Trivial] +5CFEA1 FEA1 [Trivial] +5CFEE0 FEE0 [Trivial] +5CFEEF FEEF [Trivial] +5CFEF9 FEF9 [Trivial] +5CFEFA FEFA [Trivial] +5CFEFC FEFC [Trivial] +5CFEFD FEFD [Trivial] +5CFEFE FEFE [Trivial] +5CFEFF FEFF [Trivial] +5CFF00 FF00 [Trivial] +5CFF08 FF08 [Trivial] +5CFF09 FF09 [Trivial] +5CFF0A FF0A [Trivial] +5CFF0D FF0D [Trivial] +5CFF1A FF1A [Trivial] +5CFF22 FF22 [Trivial] +5CFF25 FF25 [Trivial] +5CFF27 NULL [SyntErr] +5CFF30 FF30 [Trivial] +5CFF3F FF3F [Trivial] +5CFF40 FF40 [Trivial] +5CFF5A FF5A [Trivial] +5CFF5C NULL [SyntErr][USER] +5CFF5F FF5F [Trivial] +5CFF61 FF61 [Trivial] +5CFF62 FF62 [Trivial] +5CFF6E FF6E [Trivial] +5CFF72 FF72 [Trivial] +5CFF74 FF74 [Trivial] +5CFF7E FF7E [Trivial] +5CFF7F FF7F [Trivial] +5CFF80 FF80 [Trivial] +5CFF81 FF81 [Trivial] +5CFF9F FF9F [Trivial] +5CFFA0 FFA0 [Trivial] +5CFFA1 FFA1 [Trivial] +5CFFE0 FFE0 [Trivial] +5CFFEF FFEF [Trivial] +5CFFF9 FFF9 [Trivial] +5CFFFA FFFA [Trivial] +5CFFFC FFFC [Trivial] +5CFFFD FFFD [Trivial] +5CFFFE FFFE [Trivial] +5CFFFF FFFF [Trivial] +5C005C00 0000 [Trivial] +5C005C08 0008 [Trivial] +5C005C09 0009 [Trivial] +5C005C0A 000A [Trivial] +5C005C0D 000D [Trivial] +5C005C1A 001A [Trivial] +5C005C22 0022 [Trivial] +5C005C25 005C25 [Regular] +5C005C27 0027 [Trivial] +5C005C30 0000 [Regular] +5C005C3F 003F [Trivial] +5C005C40 0040 [Trivial] +5C005C5A 001A [Regular] +5C005C5C 005C [Regular] +5C005C5F 005C5F [Regular] +5C005C61 0061 [Trivial] +5C005C62 0008 [Regular] +5C005C6E 000A [Regular] +5C005C72 000D [Regular] +5C005C74 0009 [Regular] +5C005C7E 007E [Trivial] +5C005C7F 007F [Trivial] +5C005C80 0080 [Trivial] +5C005C81 0081 [Trivial] +5C005C9F 009F [Trivial] +5C005CA0 00A0 [Trivial] +5C005CA1 00A1 [Trivial] +5C005CE0 00E0 [Trivial] +5C005CEF 00EF [Trivial] +5C005CF9 00F9 [Trivial] +5C005CFA 00FA [Trivial] +5C005CFC 00FC [Trivial] +5C005CFD 00FD [Trivial] +5C005CFE 00FE [Trivial] +5C005CFF 00FF [Trivial] +5C085C00 0800 [Trivial] +5C085C08 0808 [Trivial] +5C085C09 0809 [Trivial] +5C085C0A 080A [Trivial] +5C085C0D 080D [Trivial] +5C085C1A 081A [Trivial] +5C085C22 0822 [Trivial] +5C085C25 085C25 [Regular] +5C085C27 0827 [Trivial] +5C085C30 0800 [Regular] +5C085C3F 083F [Trivial] +5C085C40 0840 [Trivial] +5C085C5A 081A [Regular] +5C085C5C 085C [Regular] +5C085C5F 085C5F [Regular] +5C085C61 0861 [Trivial] +5C085C62 0808 [Regular] +5C085C6E 080A [Regular] +5C085C72 080D [Regular] +5C085C74 0809 [Regular] +5C085C7E 087E [Trivial] +5C085C7F 087F [Trivial] +5C085C80 0880 [Trivial] +5C085C81 0881 [Trivial] +5C085C9F 089F [Trivial] +5C085CA0 08A0 [Trivial] +5C085CA1 08A1 [Trivial] +5C085CE0 08E0 [Trivial] +5C085CEF 08EF [Trivial] +5C085CF9 08F9 [Trivial] +5C085CFA 08FA [Trivial] +5C085CFC 08FC [Trivial] +5C085CFD 08FD [Trivial] +5C085CFE 08FE [Trivial] +5C085CFF 08FF [Trivial] +5C095C00 0900 [Trivial] +5C095C08 0908 [Trivial] +5C095C09 0909 [Trivial] +5C095C0A 090A [Trivial] +5C095C0D 090D [Trivial] +5C095C1A 091A [Trivial] +5C095C22 0922 [Trivial] +5C095C25 095C25 [Regular] +5C095C27 0927 [Trivial] +5C095C30 0900 [Regular] +5C095C3F 093F [Trivial] +5C095C40 0940 [Trivial] +5C095C5A 091A [Regular] +5C095C5C 095C [Regular] +5C095C5F 095C5F [Regular] +5C095C61 0961 [Trivial] +5C095C62 0908 [Regular] +5C095C6E 090A [Regular] +5C095C72 090D [Regular] +5C095C74 0909 [Regular] +5C095C7E 097E [Trivial] +5C095C7F 097F [Trivial] +5C095C80 0980 [Trivial] +5C095C81 0981 [Trivial] +5C095C9F 099F [Trivial] +5C095CA0 09A0 [Trivial] +5C095CA1 09A1 [Trivial] +5C095CE0 09E0 [Trivial] +5C095CEF 09EF [Trivial] +5C095CF9 09F9 [Trivial] +5C095CFA 09FA [Trivial] +5C095CFC 09FC [Trivial] +5C095CFD 09FD [Trivial] +5C095CFE 09FE [Trivial] +5C095CFF 09FF [Trivial] +5C0A5C00 0A00 [Trivial] +5C0A5C08 0A08 [Trivial] +5C0A5C09 0A09 [Trivial] +5C0A5C0A 0A0A [Trivial] +5C0A5C0D 0A0D [Trivial] +5C0A5C1A 0A1A [Trivial] +5C0A5C22 0A22 [Trivial] +5C0A5C25 0A5C25 [Regular] +5C0A5C27 0A27 [Trivial] +5C0A5C30 0A00 [Regular] +5C0A5C3F 0A3F [Trivial] +5C0A5C40 0A40 [Trivial] +5C0A5C5A 0A1A [Regular] +5C0A5C5C 0A5C [Regular] +5C0A5C5F 0A5C5F [Regular] +5C0A5C61 0A61 [Trivial] +5C0A5C62 0A08 [Regular] +5C0A5C6E 0A0A [Regular] +5C0A5C72 0A0D [Regular] +5C0A5C74 0A09 [Regular] +5C0A5C7E 0A7E [Trivial] +5C0A5C7F 0A7F [Trivial] +5C0A5C80 0A80 [Trivial] +5C0A5C81 0A81 [Trivial] +5C0A5C9F 0A9F [Trivial] +5C0A5CA0 0AA0 [Trivial] +5C0A5CA1 0AA1 [Trivial] +5C0A5CE0 0AE0 [Trivial] +5C0A5CEF 0AEF [Trivial] +5C0A5CF9 0AF9 [Trivial] +5C0A5CFA 0AFA [Trivial] +5C0A5CFC 0AFC [Trivial] +5C0A5CFD 0AFD [Trivial] +5C0A5CFE 0AFE [Trivial] +5C0A5CFF 0AFF [Trivial] +5C0D5C00 0D00 [Trivial] +5C0D5C08 0D08 [Trivial] +5C0D5C09 0D09 [Trivial] +5C0D5C0A 0D0A [Trivial] +5C0D5C0D 0D0D [Trivial] +5C0D5C1A 0D1A [Trivial] +5C0D5C22 0D22 [Trivial] +5C0D5C25 0D5C25 [Regular] +5C0D5C27 0D27 [Trivial] +5C0D5C30 0D00 [Regular] +5C0D5C3F 0D3F [Trivial] +5C0D5C40 0D40 [Trivial] +5C0D5C5A 0D1A [Regular] +5C0D5C5C 0D5C [Regular] +5C0D5C5F 0D5C5F [Regular] +5C0D5C61 0D61 [Trivial] +5C0D5C62 0D08 [Regular] +5C0D5C6E 0D0A [Regular] +5C0D5C72 0D0D [Regular] +5C0D5C74 0D09 [Regular] +5C0D5C7E 0D7E [Trivial] +5C0D5C7F 0D7F [Trivial] +5C0D5C80 0D80 [Trivial] +5C0D5C81 0D81 [Trivial] +5C0D5C9F 0D9F [Trivial] +5C0D5CA0 0DA0 [Trivial] +5C0D5CA1 0DA1 [Trivial] +5C0D5CE0 0DE0 [Trivial] +5C0D5CEF 0DEF [Trivial] +5C0D5CF9 0DF9 [Trivial] +5C0D5CFA 0DFA [Trivial] +5C0D5CFC 0DFC [Trivial] +5C0D5CFD 0DFD [Trivial] +5C0D5CFE 0DFE [Trivial] +5C0D5CFF 0DFF [Trivial] +5C1A5C00 1A00 [Trivial] +5C1A5C08 1A08 [Trivial] +5C1A5C09 1A09 [Trivial] +5C1A5C0A 1A0A [Trivial] +5C1A5C0D 1A0D [Trivial] +5C1A5C1A 1A1A [Trivial] +5C1A5C22 1A22 [Trivial] +5C1A5C25 1A5C25 [Regular] +5C1A5C27 1A27 [Trivial] +5C1A5C30 1A00 [Regular] +5C1A5C3F 1A3F [Trivial] +5C1A5C40 1A40 [Trivial] +5C1A5C5A 1A1A [Regular] +5C1A5C5C 1A5C [Regular] +5C1A5C5F 1A5C5F [Regular] +5C1A5C61 1A61 [Trivial] +5C1A5C62 1A08 [Regular] +5C1A5C6E 1A0A [Regular] +5C1A5C72 1A0D [Regular] +5C1A5C74 1A09 [Regular] +5C1A5C7E 1A7E [Trivial] +5C1A5C7F 1A7F [Trivial] +5C1A5C80 1A80 [Trivial] +5C1A5C81 1A81 [Trivial] +5C1A5C9F 1A9F [Trivial] +5C1A5CA0 1AA0 [Trivial] +5C1A5CA1 1AA1 [Trivial] +5C1A5CE0 1AE0 [Trivial] +5C1A5CEF 1AEF [Trivial] +5C1A5CF9 1AF9 [Trivial] +5C1A5CFA 1AFA [Trivial] +5C1A5CFC 1AFC [Trivial] +5C1A5CFD 1AFD [Trivial] +5C1A5CFE 1AFE [Trivial] +5C1A5CFF 1AFF [Trivial] +5C225C00 2200 [Trivial] +5C225C08 2208 [Trivial] +5C225C09 2209 [Trivial] +5C225C0A 220A [Trivial] +5C225C0D 220D [Trivial] +5C225C1A 221A [Trivial] +5C225C22 2222 [Trivial] +5C225C25 225C25 [Regular] +5C225C27 2227 [Trivial] +5C225C30 2200 [Regular] +5C225C3F 223F [Trivial] +5C225C40 2240 [Trivial] +5C225C5A 221A [Regular] +5C225C5C 225C [Regular] +5C225C5F 225C5F [Regular] +5C225C61 2261 [Trivial] +5C225C62 2208 [Regular] +5C225C6E 220A [Regular] +5C225C72 220D [Regular] +5C225C74 2209 [Regular] +5C225C7E 227E [Trivial] +5C225C7F 227F [Trivial] +5C225C80 2280 [Trivial] +5C225C81 2281 [Trivial] +5C225C9F 229F [Trivial] +5C225CA0 22A0 [Trivial] +5C225CA1 22A1 [Trivial] +5C225CE0 22E0 [Trivial] +5C225CEF 22EF [Trivial] +5C225CF9 22F9 [Trivial] +5C225CFA 22FA [Trivial] +5C225CFC 22FC [Trivial] +5C225CFD 22FD [Trivial] +5C225CFE 22FE [Trivial] +5C225CFF 22FF [Trivial] +5C255C00 5C2500 [Regular] +5C255C08 5C2508 [Regular] +5C255C09 5C2509 [Regular] +5C255C0A 5C250A [Regular] +5C255C0D 5C250D [Regular] +5C255C1A 5C251A [Regular] +5C255C22 5C2522 [Regular] +5C255C25 5C255C25 [Preserve][LIKE] +5C255C27 5C2527 [Regular] +5C255C30 5C2500 [Regular] +5C255C3F 5C253F [Regular] +5C255C40 5C2540 [Regular] +5C255C5A 5C251A [Regular] +5C255C5C 5C255C [Regular] +5C255C5F 5C255C5F [Preserve][LIKE] +5C255C61 5C2561 [Regular] +5C255C62 5C2508 [Regular] +5C255C6E 5C250A [Regular] +5C255C72 5C250D [Regular] +5C255C74 5C2509 [Regular] +5C255C7E 5C257E [Regular] +5C255C7F 5C257F [Regular] +5C255C80 5C2580 [Regular] +5C255C81 5C2581 [Regular] +5C255C9F 5C259F [Regular] +5C255CA0 5C25A0 [Regular] +5C255CA1 5C25A1 [Regular] +5C255CE0 5C25E0 [Regular] +5C255CEF 5C25EF [Regular] +5C255CF9 5C25F9 [Regular] +5C255CFA 5C25FA [Regular] +5C255CFC 5C25FC [Regular] +5C255CFD 5C25FD [Regular] +5C255CFE 5C25FE [Regular] +5C255CFF 5C25FF [Regular] +5C275C00 2700 [Trivial] +5C275C08 2708 [Trivial] +5C275C09 2709 [Trivial] +5C275C0A 270A [Trivial] +5C275C0D 270D [Trivial] +5C275C1A 271A [Trivial] +5C275C22 2722 [Trivial] +5C275C25 275C25 [Regular] +5C275C27 2727 [Trivial] +5C275C30 2700 [Regular] +5C275C3F 273F [Trivial] +5C275C40 2740 [Trivial] +5C275C5A 271A [Regular] +5C275C5C 275C [Regular] +5C275C5F 275C5F [Regular] +5C275C61 2761 [Trivial] +5C275C62 2708 [Regular] +5C275C6E 270A [Regular] +5C275C72 270D [Regular] +5C275C74 2709 [Regular] +5C275C7E 277E [Trivial] +5C275C7F 277F [Trivial] +5C275C80 2780 [Trivial] +5C275C81 2781 [Trivial] +5C275C9F 279F [Trivial] +5C275CA0 27A0 [Trivial] +5C275CA1 27A1 [Trivial] +5C275CE0 27E0 [Trivial] +5C275CEF 27EF [Trivial] +5C275CF9 27F9 [Trivial] +5C275CFA 27FA [Trivial] +5C275CFC 27FC [Trivial] +5C275CFD 27FD [Trivial] +5C275CFE 27FE [Trivial] +5C275CFF 27FF [Trivial] +5C305C00 0000 [Regular] +5C305C08 0008 [Regular] +5C305C09 0009 [Regular] +5C305C0A 000A [Regular] +5C305C0D 000D [Regular] +5C305C1A 001A [Regular] +5C305C22 0022 [Regular] +5C305C25 005C25 [Regular] +5C305C27 0027 [Regular] +5C305C30 0000 [Regular] +5C305C3F 003F [Regular] +5C305C40 0040 [Regular] +5C305C5A 001A [Regular] +5C305C5C 005C [Regular] +5C305C5F 005C5F [Regular] +5C305C61 0061 [Regular] +5C305C62 0008 [Regular] +5C305C6E 000A [Regular] +5C305C72 000D [Regular] +5C305C74 0009 [Regular] +5C305C7E 007E [Regular] +5C305C7F 007F [Regular] +5C305C80 0080 [Regular] +5C305C81 0081 [Regular] +5C305C9F 009F [Regular] +5C305CA0 00A0 [Regular] +5C305CA1 00A1 [Regular] +5C305CE0 00E0 [Regular] +5C305CEF 00EF [Regular] +5C305CF9 00F9 [Regular] +5C305CFA 00FA [Regular] +5C305CFC 00FC [Regular] +5C305CFD 00FD [Regular] +5C305CFE 00FE [Regular] +5C305CFF 00FF [Regular] +5C3F5C00 3F00 [Trivial] +5C3F5C08 3F08 [Trivial] +5C3F5C09 3F09 [Trivial] +5C3F5C0A 3F0A [Trivial] +5C3F5C0D 3F0D [Trivial] +5C3F5C1A 3F1A [Trivial] +5C3F5C22 3F22 [Trivial] +5C3F5C25 3F5C25 [Regular] +5C3F5C27 3F27 [Trivial] +5C3F5C30 3F00 [Regular] +5C3F5C3F 3F3F [Trivial] +5C3F5C40 3F40 [Trivial] +5C3F5C5A 3F1A [Regular] +5C3F5C5C 3F5C [Regular] +5C3F5C5F 3F5C5F [Regular] +5C3F5C61 3F61 [Trivial] +5C3F5C62 3F08 [Regular] +5C3F5C6E 3F0A [Regular] +5C3F5C72 3F0D [Regular] +5C3F5C74 3F09 [Regular] +5C3F5C7E 3F7E [Trivial] +5C3F5C7F 3F7F [Trivial] +5C3F5C80 3F80 [Trivial] +5C3F5C81 3F81 [Trivial] +5C3F5C9F 3F9F [Trivial] +5C3F5CA0 3FA0 [Trivial] +5C3F5CA1 3FA1 [Trivial] +5C3F5CE0 3FE0 [Trivial] +5C3F5CEF 3FEF [Trivial] +5C3F5CF9 3FF9 [Trivial] +5C3F5CFA 3FFA [Trivial] +5C3F5CFC 3FFC [Trivial] +5C3F5CFD 3FFD [Trivial] +5C3F5CFE 3FFE [Trivial] +5C3F5CFF 3FFF [Trivial] +5C405C00 4000 [Trivial] +5C405C08 4008 [Trivial] +5C405C09 4009 [Trivial] +5C405C0A 400A [Trivial] +5C405C0D 400D [Trivial] +5C405C1A 401A [Trivial] +5C405C22 4022 [Trivial] +5C405C25 405C25 [Regular] +5C405C27 4027 [Trivial] +5C405C30 4000 [Regular] +5C405C3F 403F [Trivial] +5C405C40 4040 [Trivial] +5C405C5A 401A [Regular] +5C405C5C 405C [Regular] +5C405C5F 405C5F [Regular] +5C405C61 4061 [Trivial] +5C405C62 4008 [Regular] +5C405C6E 400A [Regular] +5C405C72 400D [Regular] +5C405C74 4009 [Regular] +5C405C7E 407E [Trivial] +5C405C7F 407F [Trivial] +5C405C80 4080 [Trivial] +5C405C81 4081 [Trivial] +5C405C9F 409F [Trivial] +5C405CA0 40A0 [Trivial] +5C405CA1 40A1 [Trivial] +5C405CE0 40E0 [Trivial] +5C405CEF 40EF [Trivial] +5C405CF9 40F9 [Trivial] +5C405CFA 40FA [Trivial] +5C405CFC 40FC [Trivial] +5C405CFD 40FD [Trivial] +5C405CFE 40FE [Trivial] +5C405CFF 40FF [Trivial] +5C5A5C00 1A00 [Regular] +5C5A5C08 1A08 [Regular] +5C5A5C09 1A09 [Regular] +5C5A5C0A 1A0A [Regular] +5C5A5C0D 1A0D [Regular] +5C5A5C1A 1A1A [Regular] +5C5A5C22 1A22 [Regular] +5C5A5C25 1A5C25 [Regular] +5C5A5C27 1A27 [Regular] +5C5A5C30 1A00 [Regular] +5C5A5C3F 1A3F [Regular] +5C5A5C40 1A40 [Regular] +5C5A5C5A 1A1A [Regular] +5C5A5C5C 1A5C [Regular] +5C5A5C5F 1A5C5F [Regular] +5C5A5C61 1A61 [Regular] +5C5A5C62 1A08 [Regular] +5C5A5C6E 1A0A [Regular] +5C5A5C72 1A0D [Regular] +5C5A5C74 1A09 [Regular] +5C5A5C7E 1A7E [Regular] +5C5A5C7F 1A7F [Regular] +5C5A5C80 1A80 [Regular] +5C5A5C81 1A81 [Regular] +5C5A5C9F 1A9F [Regular] +5C5A5CA0 1AA0 [Regular] +5C5A5CA1 1AA1 [Regular] +5C5A5CE0 1AE0 [Regular] +5C5A5CEF 1AEF [Regular] +5C5A5CF9 1AF9 [Regular] +5C5A5CFA 1AFA [Regular] +5C5A5CFC 1AFC [Regular] +5C5A5CFD 1AFD [Regular] +5C5A5CFE 1AFE [Regular] +5C5A5CFF 1AFF [Regular] +5C5C5C00 5C00 [Regular] +5C5C5C08 5C08 [Regular] +5C5C5C09 5C09 [Regular] +5C5C5C0A 5C0A [Regular] +5C5C5C0D 5C0D [Regular] +5C5C5C1A 5C1A [Regular] +5C5C5C22 5C22 [Regular] +5C5C5C25 5C5C25 [Regular] +5C5C5C27 5C27 [Regular] +5C5C5C30 5C00 [Regular] +5C5C5C3F 5C3F [Regular] +5C5C5C40 5C40 [Regular] +5C5C5C5A 5C1A [Regular] +5C5C5C5C 5C5C [Regular] +5C5C5C5F 5C5C5F [Regular] +5C5C5C61 5C61 [Regular] +5C5C5C62 5C08 [Regular] +5C5C5C6E 5C0A [Regular] +5C5C5C72 5C0D [Regular] +5C5C5C74 5C09 [Regular] +5C5C5C7E 5C7E [Regular] +5C5C5C7F 5C7F [Regular] +5C5C5C80 5C80 [Regular] +5C5C5C81 5C81 [Regular] +5C5C5C9F 5C9F [Regular] +5C5C5CA0 5CA0 [Regular] +5C5C5CA1 5CA1 [Regular] +5C5C5CE0 5CE0 [Regular] +5C5C5CEF 5CEF [Regular] +5C5C5CF9 5CF9 [Regular] +5C5C5CFA 5CFA [Regular] +5C5C5CFC 5CFC [Regular] +5C5C5CFD 5CFD [Regular] +5C5C5CFE 5CFE [Regular] +5C5C5CFF 5CFF [Regular] +5C5F5C00 5C5F00 [Regular] +5C5F5C08 5C5F08 [Regular] +5C5F5C09 5C5F09 [Regular] +5C5F5C0A 5C5F0A [Regular] +5C5F5C0D 5C5F0D [Regular] +5C5F5C1A 5C5F1A [Regular] +5C5F5C22 5C5F22 [Regular] +5C5F5C25 5C5F5C25 [Preserve][LIKE] +5C5F5C27 5C5F27 [Regular] +5C5F5C30 5C5F00 [Regular] +5C5F5C3F 5C5F3F [Regular] +5C5F5C40 5C5F40 [Regular] +5C5F5C5A 5C5F1A [Regular] +5C5F5C5C 5C5F5C [Regular] +5C5F5C5F 5C5F5C5F [Preserve][LIKE] +5C5F5C61 5C5F61 [Regular] +5C5F5C62 5C5F08 [Regular] +5C5F5C6E 5C5F0A [Regular] +5C5F5C72 5C5F0D [Regular] +5C5F5C74 5C5F09 [Regular] +5C5F5C7E 5C5F7E [Regular] +5C5F5C7F 5C5F7F [Regular] +5C5F5C80 5C5F80 [Regular] +5C5F5C81 5C5F81 [Regular] +5C5F5C9F 5C5F9F [Regular] +5C5F5CA0 5C5FA0 [Regular] +5C5F5CA1 5C5FA1 [Regular] +5C5F5CE0 5C5FE0 [Regular] +5C5F5CEF 5C5FEF [Regular] +5C5F5CF9 5C5FF9 [Regular] +5C5F5CFA 5C5FFA [Regular] +5C5F5CFC 5C5FFC [Regular] +5C5F5CFD 5C5FFD [Regular] +5C5F5CFE 5C5FFE [Regular] +5C5F5CFF 5C5FFF [Regular] +5C615C00 6100 [Trivial] +5C615C08 6108 [Trivial] +5C615C09 6109 [Trivial] +5C615C0A 610A [Trivial] +5C615C0D 610D [Trivial] +5C615C1A 611A [Trivial] +5C615C22 6122 [Trivial] +5C615C25 615C25 [Regular] +5C615C27 6127 [Trivial] +5C615C30 6100 [Regular] +5C615C3F 613F [Trivial] +5C615C40 6140 [Trivial] +5C615C5A 611A [Regular] +5C615C5C 615C [Regular] +5C615C5F 615C5F [Regular] +5C615C61 6161 [Trivial] +5C615C62 6108 [Regular] +5C615C6E 610A [Regular] +5C615C72 610D [Regular] +5C615C74 6109 [Regular] +5C615C7E 617E [Trivial] +5C615C7F 617F [Trivial] +5C615C80 6180 [Trivial] +5C615C81 6181 [Trivial] +5C615C9F 619F [Trivial] +5C615CA0 61A0 [Trivial] +5C615CA1 61A1 [Trivial] +5C615CE0 61E0 [Trivial] +5C615CEF 61EF [Trivial] +5C615CF9 61F9 [Trivial] +5C615CFA 61FA [Trivial] +5C615CFC 61FC [Trivial] +5C615CFD 61FD [Trivial] +5C615CFE 61FE [Trivial] +5C615CFF 61FF [Trivial] +5C625C00 0800 [Regular] +5C625C08 0808 [Regular] +5C625C09 0809 [Regular] +5C625C0A 080A [Regular] +5C625C0D 080D [Regular] +5C625C1A 081A [Regular] +5C625C22 0822 [Regular] +5C625C25 085C25 [Regular] +5C625C27 0827 [Regular] +5C625C30 0800 [Regular] +5C625C3F 083F [Regular] +5C625C40 0840 [Regular] +5C625C5A 081A [Regular] +5C625C5C 085C [Regular] +5C625C5F 085C5F [Regular] +5C625C61 0861 [Regular] +5C625C62 0808 [Regular] +5C625C6E 080A [Regular] +5C625C72 080D [Regular] +5C625C74 0809 [Regular] +5C625C7E 087E [Regular] +5C625C7F 087F [Regular] +5C625C80 0880 [Regular] +5C625C81 0881 [Regular] +5C625C9F 089F [Regular] +5C625CA0 08A0 [Regular] +5C625CA1 08A1 [Regular] +5C625CE0 08E0 [Regular] +5C625CEF 08EF [Regular] +5C625CF9 08F9 [Regular] +5C625CFA 08FA [Regular] +5C625CFC 08FC [Regular] +5C625CFD 08FD [Regular] +5C625CFE 08FE [Regular] +5C625CFF 08FF [Regular] +5C6E5C00 0A00 [Regular] +5C6E5C08 0A08 [Regular] +5C6E5C09 0A09 [Regular] +5C6E5C0A 0A0A [Regular] +5C6E5C0D 0A0D [Regular] +5C6E5C1A 0A1A [Regular] +5C6E5C22 0A22 [Regular] +5C6E5C25 0A5C25 [Regular] +5C6E5C27 0A27 [Regular] +5C6E5C30 0A00 [Regular] +5C6E5C3F 0A3F [Regular] +5C6E5C40 0A40 [Regular] +5C6E5C5A 0A1A [Regular] +5C6E5C5C 0A5C [Regular] +5C6E5C5F 0A5C5F [Regular] +5C6E5C61 0A61 [Regular] +5C6E5C62 0A08 [Regular] +5C6E5C6E 0A0A [Regular] +5C6E5C72 0A0D [Regular] +5C6E5C74 0A09 [Regular] +5C6E5C7E 0A7E [Regular] +5C6E5C7F 0A7F [Regular] +5C6E5C80 0A80 [Regular] +5C6E5C81 0A81 [Regular] +5C6E5C9F 0A9F [Regular] +5C6E5CA0 0AA0 [Regular] +5C6E5CA1 0AA1 [Regular] +5C6E5CE0 0AE0 [Regular] +5C6E5CEF 0AEF [Regular] +5C6E5CF9 0AF9 [Regular] +5C6E5CFA 0AFA [Regular] +5C6E5CFC 0AFC [Regular] +5C6E5CFD 0AFD [Regular] +5C6E5CFE 0AFE [Regular] +5C6E5CFF 0AFF [Regular] +5C725C00 0D00 [Regular] +5C725C08 0D08 [Regular] +5C725C09 0D09 [Regular] +5C725C0A 0D0A [Regular] +5C725C0D 0D0D [Regular] +5C725C1A 0D1A [Regular] +5C725C22 0D22 [Regular] +5C725C25 0D5C25 [Regular] +5C725C27 0D27 [Regular] +5C725C30 0D00 [Regular] +5C725C3F 0D3F [Regular] +5C725C40 0D40 [Regular] +5C725C5A 0D1A [Regular] +5C725C5C 0D5C [Regular] +5C725C5F 0D5C5F [Regular] +5C725C61 0D61 [Regular] +5C725C62 0D08 [Regular] +5C725C6E 0D0A [Regular] +5C725C72 0D0D [Regular] +5C725C74 0D09 [Regular] +5C725C7E 0D7E [Regular] +5C725C7F 0D7F [Regular] +5C725C80 0D80 [Regular] +5C725C81 0D81 [Regular] +5C725C9F 0D9F [Regular] +5C725CA0 0DA0 [Regular] +5C725CA1 0DA1 [Regular] +5C725CE0 0DE0 [Regular] +5C725CEF 0DEF [Regular] +5C725CF9 0DF9 [Regular] +5C725CFA 0DFA [Regular] +5C725CFC 0DFC [Regular] +5C725CFD 0DFD [Regular] +5C725CFE 0DFE [Regular] +5C725CFF 0DFF [Regular] +5C745C00 0900 [Regular] +5C745C08 0908 [Regular] +5C745C09 0909 [Regular] +5C745C0A 090A [Regular] +5C745C0D 090D [Regular] +5C745C1A 091A [Regular] +5C745C22 0922 [Regular] +5C745C25 095C25 [Regular] +5C745C27 0927 [Regular] +5C745C30 0900 [Regular] +5C745C3F 093F [Regular] +5C745C40 0940 [Regular] +5C745C5A 091A [Regular] +5C745C5C 095C [Regular] +5C745C5F 095C5F [Regular] +5C745C61 0961 [Regular] +5C745C62 0908 [Regular] +5C745C6E 090A [Regular] +5C745C72 090D [Regular] +5C745C74 0909 [Regular] +5C745C7E 097E [Regular] +5C745C7F 097F [Regular] +5C745C80 0980 [Regular] +5C745C81 0981 [Regular] +5C745C9F 099F [Regular] +5C745CA0 09A0 [Regular] +5C745CA1 09A1 [Regular] +5C745CE0 09E0 [Regular] +5C745CEF 09EF [Regular] +5C745CF9 09F9 [Regular] +5C745CFA 09FA [Regular] +5C745CFC 09FC [Regular] +5C745CFD 09FD [Regular] +5C745CFE 09FE [Regular] +5C745CFF 09FF [Regular] +5C7E5C00 7E00 [Trivial] +5C7E5C08 7E08 [Trivial] +5C7E5C09 7E09 [Trivial] +5C7E5C0A 7E0A [Trivial] +5C7E5C0D 7E0D [Trivial] +5C7E5C1A 7E1A [Trivial] +5C7E5C22 7E22 [Trivial] +5C7E5C25 7E5C25 [Regular] +5C7E5C27 7E27 [Trivial] +5C7E5C30 7E00 [Regular] +5C7E5C3F 7E3F [Trivial] +5C7E5C40 7E40 [Trivial] +5C7E5C5A 7E1A [Regular] +5C7E5C5C 7E5C [Regular] +5C7E5C5F 7E5C5F [Regular] +5C7E5C61 7E61 [Trivial] +5C7E5C62 7E08 [Regular] +5C7E5C6E 7E0A [Regular] +5C7E5C72 7E0D [Regular] +5C7E5C74 7E09 [Regular] +5C7E5C7E 7E7E [Trivial] +5C7E5C7F 7E7F [Trivial] +5C7E5C80 7E80 [Trivial] +5C7E5C81 7E81 [Trivial] +5C7E5C9F 7E9F [Trivial] +5C7E5CA0 7EA0 [Trivial] +5C7E5CA1 7EA1 [Trivial] +5C7E5CE0 7EE0 [Trivial] +5C7E5CEF 7EEF [Trivial] +5C7E5CF9 7EF9 [Trivial] +5C7E5CFA 7EFA [Trivial] +5C7E5CFC 7EFC [Trivial] +5C7E5CFD 7EFD [Trivial] +5C7E5CFE 7EFE [Trivial] +5C7E5CFF 7EFF [Trivial] +5C7F5C00 7F00 [Trivial] +5C7F5C08 7F08 [Trivial] +5C7F5C09 7F09 [Trivial] +5C7F5C0A 7F0A [Trivial] +5C7F5C0D 7F0D [Trivial] +5C7F5C1A 7F1A [Trivial] +5C7F5C22 7F22 [Trivial] +5C7F5C25 7F5C25 [Regular] +5C7F5C27 7F27 [Trivial] +5C7F5C30 7F00 [Regular] +5C7F5C3F 7F3F [Trivial] +5C7F5C40 7F40 [Trivial] +5C7F5C5A 7F1A [Regular] +5C7F5C5C 7F5C [Regular] +5C7F5C5F 7F5C5F [Regular] +5C7F5C61 7F61 [Trivial] +5C7F5C62 7F08 [Regular] +5C7F5C6E 7F0A [Regular] +5C7F5C72 7F0D [Regular] +5C7F5C74 7F09 [Regular] +5C7F5C7E 7F7E [Trivial] +5C7F5C7F 7F7F [Trivial] +5C7F5C80 7F80 [Trivial] +5C7F5C81 7F81 [Trivial] +5C7F5C9F 7F9F [Trivial] +5C7F5CA0 7FA0 [Trivial] +5C7F5CA1 7FA1 [Trivial] +5C7F5CE0 7FE0 [Trivial] +5C7F5CEF 7FEF [Trivial] +5C7F5CF9 7FF9 [Trivial] +5C7F5CFA 7FFA [Trivial] +5C7F5CFC 7FFC [Trivial] +5C7F5CFD 7FFD [Trivial] +5C7F5CFE 7FFE [Trivial] +5C7F5CFF 7FFF [Trivial] +5C805C00 8000 [Trivial][USER] +5C805C08 8008 [Trivial][USER] +5C805C09 8009 [Trivial][USER] +5C805C0A 800A [Trivial][USER] +5C805C0D 800D [Trivial][USER] +5C805C1A 801A [Trivial][USER] +5C805C22 8022 [Trivial][USER] +5C805C25 805C25 [Regular] +5C805C27 8027 [Trivial][USER] +5C805C30 8000 [Regular] +5C805C3F 803F [Trivial][USER] +5C805C40 8040 [Trivial][USER] +5C805C5A 801A [Regular] +5C805C5C 805C [Regular][USER] +5C805C5F 805C5F [Regular] +5C805C61 8061 [Trivial][USER] +5C805C62 8008 [Regular][USER] +5C805C6E 800A [Regular] +5C805C72 800D [Regular] +5C805C74 8009 [Regular] +5C805C7E 807E [Trivial][USER] +5C805C7F 807F [Trivial][USER] +5C805C80 8080 [Trivial][USER] +5C805C81 8081 [Trivial][USER] +5C805C9F 809F [Trivial][USER] +5C805CA0 80A0 [Trivial][USER] +5C805CA1 80A1 [Trivial][USER] +5C805CE0 80E0 [Trivial][USER] +5C805CEF 80EF [Trivial][USER] +5C805CF9 80F9 [Trivial][USER] +5C805CFA 80FA [Trivial][USER] +5C805CFC 80FC [Trivial][USER] +5C805CFD 80FD [Trivial][USER] +5C805CFE 80FE [Trivial][USER] +5C805CFF 80FF [Trivial][USER] +5C815C00 8100 [Trivial][USER] +5C815C08 8108 [Trivial][USER] +5C815C09 8109 [Trivial][USER] +5C815C0A 810A [Trivial][USER] +5C815C0D 810D [Trivial][USER] +5C815C1A 811A [Trivial][USER] +5C815C22 8122 [Trivial][USER] +5C815C25 815C25 [Regular] +5C815C27 8127 [Trivial][USER] +5C815C30 8100 [Regular] +5C815C3F 813F [Trivial][USER] +5C815C40 8140 [Trivial][USER] +5C815C5A 811A [Regular] +5C815C5C 815C [Regular][USER] +5C815C5F 815C5F [Regular] +5C815C61 8161 [Trivial][USER] +5C815C62 8108 [Regular][USER] +5C815C6E 810A [Regular] +5C815C72 810D [Regular] +5C815C74 8109 [Regular] +5C815C7E 817E [Trivial][USER] +5C815C7F 817F [Trivial][USER] +5C815C80 8180 [Trivial][USER] +5C815C81 8181 [Trivial][USER] +5C815C9F 819F [Trivial][USER] +5C815CA0 81A0 [Trivial][USER] +5C815CA1 81A1 [Trivial][USER] +5C815CE0 81E0 [Trivial][USER] +5C815CEF 81EF [Trivial][USER] +5C815CF9 81F9 [Trivial][USER] +5C815CFA 81FA [Trivial][USER] +5C815CFC 81FC [Trivial][USER] +5C815CFD 81FD [Trivial][USER] +5C815CFE 81FE [Trivial][USER] +5C815CFF 81FF [Trivial][USER] +5C9F5C00 9F00 [Trivial][USER] +5C9F5C08 9F08 [Trivial][USER] +5C9F5C09 9F09 [Trivial][USER] +5C9F5C0A 9F0A [Trivial][USER] +5C9F5C0D 9F0D [Trivial][USER] +5C9F5C1A 9F1A [Trivial][USER] +5C9F5C22 9F22 [Trivial][USER] +5C9F5C25 9F5C25 [Regular] +5C9F5C27 9F27 [Trivial][USER] +5C9F5C30 9F00 [Regular] +5C9F5C3F 9F3F [Trivial][USER] +5C9F5C40 9F40 [Trivial][USER] +5C9F5C5A 9F1A [Regular] +5C9F5C5C 9F5C [Regular][USER] +5C9F5C5F 9F5C5F [Regular] +5C9F5C61 9F61 [Trivial][USER] +5C9F5C62 9F08 [Regular][USER] +5C9F5C6E 9F0A [Regular] +5C9F5C72 9F0D [Regular] +5C9F5C74 9F09 [Regular] +5C9F5C7E 9F7E [Trivial][USER] +5C9F5C7F 9F7F [Trivial][USER] +5C9F5C80 9F80 [Trivial][USER] +5C9F5C81 9F81 [Trivial][USER] +5C9F5C9F 9F9F [Trivial][USER] +5C9F5CA0 9FA0 [Trivial][USER] +5C9F5CA1 9FA1 [Trivial][USER] +5C9F5CE0 9FE0 [Trivial][USER] +5C9F5CEF 9FEF [Trivial][USER] +5C9F5CF9 9FF9 [Trivial][USER] +5C9F5CFA 9FFA [Trivial][USER] +5C9F5CFC 9FFC [Trivial][USER] +5C9F5CFD 9FFD [Trivial][USER] +5C9F5CFE 9FFE [Trivial][USER] +5C9F5CFF 9FFF [Trivial][USER] +5CA05C00 A000 [Trivial][USER] +5CA05C08 A008 [Trivial][USER] +5CA05C09 A009 [Trivial][USER] +5CA05C0A A00A [Trivial][USER] +5CA05C0D A00D [Trivial][USER] +5CA05C1A A01A [Trivial][USER] +5CA05C22 A022 [Trivial][USER] +5CA05C25 A05C25 [Regular] +5CA05C27 A027 [Trivial][USER] +5CA05C30 A000 [Regular] +5CA05C3F A03F [Trivial][USER] +5CA05C40 A040 [Trivial][USER] +5CA05C5A A01A [Regular] +5CA05C5C A05C [Regular][USER] +5CA05C5F A05C5F [Regular] +5CA05C61 A061 [Trivial][USER] +5CA05C62 A008 [Regular][USER] +5CA05C6E A00A [Regular] +5CA05C72 A00D [Regular] +5CA05C74 A009 [Regular] +5CA05C7E A07E [Trivial][USER] +5CA05C7F A07F [Trivial][USER] +5CA05C80 A080 [Trivial][USER] +5CA05C81 A081 [Trivial][USER] +5CA05C9F A09F [Trivial][USER] +5CA05CA0 A0A0 [Trivial][USER] +5CA05CA1 A0A1 [Trivial][USER] +5CA05CE0 A0E0 [Trivial][USER] +5CA05CEF A0EF [Trivial][USER] +5CA05CF9 A0F9 [Trivial][USER] +5CA05CFA A0FA [Trivial][USER] +5CA05CFC A0FC [Trivial][USER] +5CA05CFD A0FD [Trivial][USER] +5CA05CFE A0FE [Trivial][USER] +5CA05CFF A0FF [Trivial][USER] +5CA15C00 A100 [Trivial][USER] +5CA15C08 A108 [Trivial][USER] +5CA15C09 A109 [Trivial][USER] +5CA15C0A A10A [Trivial][USER] +5CA15C0D A10D [Trivial][USER] +5CA15C1A A11A [Trivial][USER] +5CA15C22 A122 [Trivial][USER] +5CA15C25 A15C25 [Regular] +5CA15C27 A127 [Trivial][USER] +5CA15C30 A100 [Regular] +5CA15C3F A13F [Trivial][USER] +5CA15C40 A140 [Trivial][USER] +5CA15C5A A11A [Regular] +5CA15C5C A15C [Regular][USER] +5CA15C5F A15C5F [Regular] +5CA15C61 A161 [Trivial][USER] +5CA15C62 A108 [Regular][USER] +5CA15C6E A10A [Regular] +5CA15C72 A10D [Regular] +5CA15C74 A109 [Regular] +5CA15C7E A17E [Trivial][USER] +5CA15C7F A17F [Trivial][USER] +5CA15C80 A180 [Trivial][USER] +5CA15C81 A181 [Trivial][USER] +5CA15C9F A19F [Trivial][USER] +5CA15CA0 A1A0 [Trivial][USER] +5CA15CA1 A1A1 [Trivial][USER] +5CA15CE0 A1E0 [Trivial][USER] +5CA15CEF A1EF [Trivial][USER] +5CA15CF9 A1F9 [Trivial][USER] +5CA15CFA A1FA [Trivial][USER] +5CA15CFC A1FC [Trivial][USER] +5CA15CFD A1FD [Trivial][USER] +5CA15CFE A1FE [Trivial][USER] +5CA15CFF A1FF [Trivial][USER] +5CE05C00 E000 [Trivial][USER] +5CE05C08 E008 [Trivial][USER] +5CE05C09 E009 [Trivial][USER] +5CE05C0A E00A [Trivial][USER] +5CE05C0D E00D [Trivial][USER] +5CE05C1A E01A [Trivial][USER] +5CE05C22 E022 [Trivial][USER] +5CE05C25 E05C25 [Regular] +5CE05C27 E027 [Trivial][USER] +5CE05C30 E000 [Regular] +5CE05C3F E03F [Trivial][USER] +5CE05C40 E040 [Trivial][USER] +5CE05C5A E01A [Regular] +5CE05C5C E05C [Regular][USER] +5CE05C5F E05C5F [Regular] +5CE05C61 E061 [Trivial][USER] +5CE05C62 E008 [Regular][USER] +5CE05C6E E00A [Regular] +5CE05C72 E00D [Regular] +5CE05C74 E009 [Regular] +5CE05C7E E07E [Trivial][USER] +5CE05C7F E07F [Trivial][USER] +5CE05C80 E080 [Trivial][USER] +5CE05C81 E081 [Trivial][USER] +5CE05C9F E09F [Trivial][USER] +5CE05CA0 E0A0 [Trivial][USER] +5CE05CA1 E0A1 [Trivial][USER] +5CE05CE0 E0E0 [Trivial][USER] +5CE05CEF E0EF [Trivial][USER] +5CE05CF9 E0F9 [Trivial][USER] +5CE05CFA E0FA [Trivial][USER] +5CE05CFC E0FC [Trivial][USER] +5CE05CFD E0FD [Trivial][USER] +5CE05CFE E0FE [Trivial][USER] +5CE05CFF E0FF [Trivial][USER] +5CEF5C00 EF00 [Trivial][USER] +5CEF5C08 EF08 [Trivial][USER] +5CEF5C09 EF09 [Trivial][USER] +5CEF5C0A EF0A [Trivial][USER] +5CEF5C0D EF0D [Trivial][USER] +5CEF5C1A EF1A [Trivial][USER] +5CEF5C22 EF22 [Trivial][USER] +5CEF5C25 EF5C25 [Regular] +5CEF5C27 EF27 [Trivial][USER] +5CEF5C30 EF00 [Regular] +5CEF5C3F EF3F [Trivial][USER] +5CEF5C40 EF40 [Trivial][USER] +5CEF5C5A EF1A [Regular] +5CEF5C5C EF5C [Regular][USER] +5CEF5C5F EF5C5F [Regular] +5CEF5C61 EF61 [Trivial][USER] +5CEF5C62 EF08 [Regular][USER] +5CEF5C6E EF0A [Regular] +5CEF5C72 EF0D [Regular] +5CEF5C74 EF09 [Regular] +5CEF5C7E EF7E [Trivial][USER] +5CEF5C7F EF7F [Trivial][USER] +5CEF5C80 EF80 [Trivial][USER] +5CEF5C81 EF81 [Trivial][USER] +5CEF5C9F EF9F [Trivial][USER] +5CEF5CA0 EFA0 [Trivial][USER] +5CEF5CA1 EFA1 [Trivial][USER] +5CEF5CE0 EFE0 [Trivial][USER] +5CEF5CEF EFEF [Trivial][USER] +5CEF5CF9 EFF9 [Trivial][USER] +5CEF5CFA EFFA [Trivial][USER] +5CEF5CFC EFFC [Trivial][USER] +5CEF5CFD EFFD [Trivial][USER] +5CEF5CFE EFFE [Trivial][USER] +5CEF5CFF EFFF [Trivial][USER] +5CF95C00 F900 [Trivial][USER] +5CF95C08 F908 [Trivial][USER] +5CF95C09 F909 [Trivial][USER] +5CF95C0A F90A [Trivial][USER] +5CF95C0D F90D [Trivial][USER] +5CF95C1A F91A [Trivial][USER] +5CF95C22 F922 [Trivial][USER] +5CF95C25 F95C25 [Regular] +5CF95C27 F927 [Trivial][USER] +5CF95C30 F900 [Regular] +5CF95C3F F93F [Trivial][USER] +5CF95C40 F940 [Trivial][USER] +5CF95C5A F91A [Regular] +5CF95C5C F95C [Regular][USER] +5CF95C5F F95C5F [Regular] +5CF95C61 F961 [Trivial][USER] +5CF95C62 F908 [Regular][USER] +5CF95C6E F90A [Regular] +5CF95C72 F90D [Regular] +5CF95C74 F909 [Regular] +5CF95C7E F97E [Trivial][USER] +5CF95C7F F97F [Trivial][USER] +5CF95C80 F980 [Trivial][USER] +5CF95C81 F981 [Trivial][USER] +5CF95C9F F99F [Trivial][USER] +5CF95CA0 F9A0 [Trivial][USER] +5CF95CA1 F9A1 [Trivial][USER] +5CF95CE0 F9E0 [Trivial][USER] +5CF95CEF F9EF [Trivial][USER] +5CF95CF9 F9F9 [Trivial][USER] +5CF95CFA F9FA [Trivial][USER] +5CF95CFC F9FC [Trivial][USER] +5CF95CFD F9FD [Trivial][USER] +5CF95CFE F9FE [Trivial][USER] +5CF95CFF F9FF [Trivial][USER] +5CFA5C00 FA00 [Trivial][USER] +5CFA5C08 FA08 [Trivial][USER] +5CFA5C09 FA09 [Trivial][USER] +5CFA5C0A FA0A [Trivial][USER] +5CFA5C0D FA0D [Trivial][USER] +5CFA5C1A FA1A [Trivial][USER] +5CFA5C22 FA22 [Trivial][USER] +5CFA5C25 FA5C25 [Regular] +5CFA5C27 FA27 [Trivial][USER] +5CFA5C30 FA00 [Regular] +5CFA5C3F FA3F [Trivial][USER] +5CFA5C40 FA40 [Trivial][USER] +5CFA5C5A FA1A [Regular] +5CFA5C5C FA5C [Regular][USER] +5CFA5C5F FA5C5F [Regular] +5CFA5C61 FA61 [Trivial][USER] +5CFA5C62 FA08 [Regular][USER] +5CFA5C6E FA0A [Regular] +5CFA5C72 FA0D [Regular] +5CFA5C74 FA09 [Regular] +5CFA5C7E FA7E [Trivial][USER] +5CFA5C7F FA7F [Trivial][USER] +5CFA5C80 FA80 [Trivial][USER] +5CFA5C81 FA81 [Trivial][USER] +5CFA5C9F FA9F [Trivial][USER] +5CFA5CA0 FAA0 [Trivial][USER] +5CFA5CA1 FAA1 [Trivial][USER] +5CFA5CE0 FAE0 [Trivial][USER] +5CFA5CEF FAEF [Trivial][USER] +5CFA5CF9 FAF9 [Trivial][USER] +5CFA5CFA FAFA [Trivial][USER] +5CFA5CFC FAFC [Trivial][USER] +5CFA5CFD FAFD [Trivial][USER] +5CFA5CFE FAFE [Trivial][USER] +5CFA5CFF FAFF [Trivial][USER] +5CFC5C00 FC00 [Trivial][USER] +5CFC5C08 FC08 [Trivial][USER] +5CFC5C09 FC09 [Trivial][USER] +5CFC5C0A FC0A [Trivial][USER] +5CFC5C0D FC0D [Trivial][USER] +5CFC5C1A FC1A [Trivial][USER] +5CFC5C22 FC22 [Trivial][USER] +5CFC5C25 FC5C25 [Regular] +5CFC5C27 FC27 [Trivial][USER] +5CFC5C30 FC00 [Regular] +5CFC5C3F FC3F [Trivial][USER] +5CFC5C40 FC40 [Trivial][USER] +5CFC5C5A FC1A [Regular] +5CFC5C5C FC5C [Regular][USER] +5CFC5C5F FC5C5F [Regular] +5CFC5C61 FC61 [Trivial][USER] +5CFC5C62 FC08 [Regular][USER] +5CFC5C6E FC0A [Regular] +5CFC5C72 FC0D [Regular] +5CFC5C74 FC09 [Regular] +5CFC5C7E FC7E [Trivial][USER] +5CFC5C7F FC7F [Trivial][USER] +5CFC5C80 FC80 [Trivial][USER] +5CFC5C81 FC81 [Trivial][USER] +5CFC5C9F FC9F [Trivial][USER] +5CFC5CA0 FCA0 [Trivial][USER] +5CFC5CA1 FCA1 [Trivial][USER] +5CFC5CE0 FCE0 [Trivial][USER] +5CFC5CEF FCEF [Trivial][USER] +5CFC5CF9 FCF9 [Trivial][USER] +5CFC5CFA FCFA [Trivial][USER] +5CFC5CFC FCFC [Trivial][USER] +5CFC5CFD FCFD [Trivial][USER] +5CFC5CFE FCFE [Trivial][USER] +5CFC5CFF FCFF [Trivial][USER] +5CFD5C00 FD00 [Trivial][USER] +5CFD5C08 FD08 [Trivial][USER] +5CFD5C09 FD09 [Trivial][USER] +5CFD5C0A FD0A [Trivial][USER] +5CFD5C0D FD0D [Trivial][USER] +5CFD5C1A FD1A [Trivial][USER] +5CFD5C22 FD22 [Trivial][USER] +5CFD5C25 FD5C25 [Regular] +5CFD5C27 FD27 [Trivial][USER] +5CFD5C30 FD00 [Regular] +5CFD5C3F FD3F [Trivial][USER] +5CFD5C40 FD40 [Trivial][USER] +5CFD5C5A FD1A [Regular] +5CFD5C5C FD5C [Regular][USER] +5CFD5C5F FD5C5F [Regular] +5CFD5C61 FD61 [Trivial][USER] +5CFD5C62 FD08 [Regular][USER] +5CFD5C6E FD0A [Regular] +5CFD5C72 FD0D [Regular] +5CFD5C74 FD09 [Regular] +5CFD5C7E FD7E [Trivial][USER] +5CFD5C7F FD7F [Trivial][USER] +5CFD5C80 FD80 [Trivial][USER] +5CFD5C81 FD81 [Trivial][USER] +5CFD5C9F FD9F [Trivial][USER] +5CFD5CA0 FDA0 [Trivial][USER] +5CFD5CA1 FDA1 [Trivial][USER] +5CFD5CE0 FDE0 [Trivial][USER] +5CFD5CEF FDEF [Trivial][USER] +5CFD5CF9 FDF9 [Trivial][USER] +5CFD5CFA FDFA [Trivial][USER] +5CFD5CFC FDFC [Trivial][USER] +5CFD5CFD FDFD [Trivial][USER] +5CFD5CFE FDFE [Trivial][USER] +5CFD5CFF FDFF [Trivial][USER] +5CFE5C00 FE00 [Trivial][USER] +5CFE5C08 FE08 [Trivial][USER] +5CFE5C09 FE09 [Trivial][USER] +5CFE5C0A FE0A [Trivial][USER] +5CFE5C0D FE0D [Trivial][USER] +5CFE5C1A FE1A [Trivial][USER] +5CFE5C22 FE22 [Trivial][USER] +5CFE5C25 FE5C25 [Regular] +5CFE5C27 FE27 [Trivial][USER] +5CFE5C30 FE00 [Regular] +5CFE5C3F FE3F [Trivial][USER] +5CFE5C40 FE40 [Trivial][USER] +5CFE5C5A FE1A [Regular] +5CFE5C5C FE5C [Regular][USER] +5CFE5C5F FE5C5F [Regular] +5CFE5C61 FE61 [Trivial][USER] +5CFE5C62 FE08 [Regular][USER] +5CFE5C6E FE0A [Regular] +5CFE5C72 FE0D [Regular] +5CFE5C74 FE09 [Regular] +5CFE5C7E FE7E [Trivial][USER] +5CFE5C7F FE7F [Trivial][USER] +5CFE5C80 FE80 [Trivial][USER] +5CFE5C81 FE81 [Trivial][USER] +5CFE5C9F FE9F [Trivial][USER] +5CFE5CA0 FEA0 [Trivial][USER] +5CFE5CA1 FEA1 [Trivial][USER] +5CFE5CE0 FEE0 [Trivial][USER] +5CFE5CEF FEEF [Trivial][USER] +5CFE5CF9 FEF9 [Trivial][USER] +5CFE5CFA FEFA [Trivial][USER] +5CFE5CFC FEFC [Trivial][USER] +5CFE5CFD FEFD [Trivial][USER] +5CFE5CFE FEFE [Trivial][USER] +5CFE5CFF FEFF [Trivial][USER] +5CFF5C00 FF00 [Trivial][USER] +5CFF5C08 FF08 [Trivial][USER] +5CFF5C09 FF09 [Trivial][USER] +5CFF5C0A FF0A [Trivial][USER] +5CFF5C0D FF0D [Trivial][USER] +5CFF5C1A FF1A [Trivial][USER] +5CFF5C22 FF22 [Trivial][USER] +5CFF5C25 FF5C25 [Regular] +5CFF5C27 FF27 [Trivial][USER] +5CFF5C30 FF00 [Regular] +5CFF5C3F FF3F [Trivial][USER] +5CFF5C40 FF40 [Trivial][USER] +5CFF5C5A FF1A [Regular] +5CFF5C5C FF5C [Regular][USER] +5CFF5C5F FF5C5F [Regular] +5CFF5C61 FF61 [Trivial][USER] +5CFF5C62 FF08 [Regular][USER] +5CFF5C6E FF0A [Regular] +5CFF5C72 FF0D [Regular] +5CFF5C74 FF09 [Regular] +5CFF5C7E FF7E [Trivial][USER] +5CFF5C7F FF7F [Trivial][USER] +5CFF5C80 FF80 [Trivial][USER] +5CFF5C81 FF81 [Trivial][USER] +5CFF5C9F FF9F [Trivial][USER] +5CFF5CA0 FFA0 [Trivial][USER] +5CFF5CA1 FFA1 [Trivial][USER] +5CFF5CE0 FFE0 [Trivial][USER] +5CFF5CEF FFEF [Trivial][USER] +5CFF5CF9 FFF9 [Trivial][USER] +5CFF5CFA FFFA [Trivial][USER] +5CFF5CFC FFFC [Trivial][USER] +5CFF5CFD FFFD [Trivial][USER] +5CFF5CFE FFFE [Trivial][USER] +5CFF5CFF FFFF [Trivial][USER] +DROP TABLE t1; +DROP PROCEDURE p1; +DROP PROCEDURE p2; +DROP FUNCTION unescape; +DROP FUNCTION unescape_type; +DROP FUNCTION wellformedness; +DROP FUNCTION mysql_real_escape_string_generated; +DROP FUNCTION iswellformed; +DROP TABLE allbytes; +# End of ctype_backslash.inc +# +# MDEV-6752 Trailing incomplete characters are not replaced to question marks on conversion +# +SET NAMES utf8, character_set_connection=latin1; +SELECT 'Â'; +? +? +SELECT HEX('Â'); +HEX('Â') +3F +SELECT HEX(CAST('Â' AS CHAR CHARACTER SET utf8)); +HEX(CAST('Â' AS CHAR CHARACTER SET utf8)) +3F +SELECT HEX(CAST('Â' AS CHAR CHARACTER SET latin1)); +HEX(CAST('Â' AS CHAR CHARACTER SET latin1)) +3F +SELECT HEX(CONVERT('Â' USING utf8)); +HEX(CONVERT('Â' USING utf8)) +3F +SELECT HEX(CONVERT('Â' USING latin1)); +HEX(CONVERT('Â' USING latin1)) +3F +SELECT 'Âx'; +?x +?x +SELECT HEX('Âx'); +HEX('Âx') +3F78 +SELECT HEX(CAST('Âx' AS CHAR CHARACTER SET utf8)); +HEX(CAST('Âx' AS CHAR CHARACTER SET utf8)) +3F78 +SELECT HEX(CAST('Âx' AS CHAR CHARACTER SET latin1)); +HEX(CAST('Âx' AS CHAR CHARACTER SET latin1)) +3F78 +SELECT HEX(CONVERT('Âx' USING utf8)); +HEX(CONVERT('Âx' USING utf8)) +3F78 +SELECT HEX(CONVERT('Âx' USING latin1)); +HEX(CONVERT('Âx' USING latin1)) +3F78 +SET NAMES utf8; +CREATE TABLE t1 (a VARCHAR(10) CHARACTER SET latin1); +INSERT INTO t1 VALUES ('Â'),('Â#'); +Warnings: +Warning 1366 Incorrect string value: '\xC2' for column 'a' at row 1 +Warning 1366 Incorrect string value: '\xC2#' for column 'a' at row 2 +SHOW WARNINGS; +Level Code Message +Warning 1366 Incorrect string value: '\xC2' for column 'a' at row 1 +Warning 1366 Incorrect string value: '\xC2#' for column 'a' at row 2 +SELECT HEX(a),a FROM t1; +HEX(a) a +3F ? +3F23 ?# +DROP TABLE t1; +# +# End of 10.0 tests +# diff --git a/mysql-test/r/ctype_partitions.result b/mysql-test/r/ctype_partitions.result new file mode 100644 index 00000000000..a39ecc11529 --- /dev/null +++ b/mysql-test/r/ctype_partitions.result @@ -0,0 +1,51 @@ +# +# MDEV-6255 DUPLICATE KEY Errors on SELECT .. GROUP BY that uses temporary and filesort +# +CREATE TABLE t1 (a VARCHAR(10) CHARACTER SET cp1251 COLLATE cp1251_ukrainian_ci); +INSERT INTO t1 VALUES (0x20),(0x60),(0x6060),(0x606060); +SELECT HEX(a) FROM t1 WHERE a=0x60; +HEX(a) +20 +60 +6060 +606060 +ALTER TABLE t1 PARTITION BY KEY(a) PARTITIONS 3; +SELECT HEX(a) FROM t1 WHERE a=0x60; +HEX(a) +20 +60 +6060 +606060 +DROP TABLE t1; +CREATE TABLE t1 (a VARCHAR(10) CHARACTER SET koi8u COLLATE koi8u_general_ci); +INSERT INTO t1 VALUES (0x20),(0x60),(0x6060),(0x606060); +SELECT HEX(a) FROM t1 WHERE a=0x60; +HEX(a) +20 +60 +6060 +606060 +ALTER TABLE t1 PARTITION BY KEY(a) PARTITIONS 3; +SELECT HEX(a) FROM t1 WHERE a=0x60; +HEX(a) +20 +60 +6060 +606060 +DROP TABLE t1; +CREATE TABLE t1 (a VARCHAR(10) CHARACTER SET cp1250 COLLATE cp1250_general_ci); +INSERT INTO t1 VALUES (0x20),(0xA0),(0xA0A0),(0xA0A0A0); +SELECT HEX(a) FROM t1 WHERE a=0xA0; +HEX(a) +20 +A0 +A0A0 +A0A0A0 +ALTER TABLE t1 PARTITION BY KEY(a) PARTITIONS 3; +SELECT HEX(a) FROM t1 WHERE a=0xA0; +HEX(a) +20 +A0 +A0A0 +A0A0A0 +DROP TABLE t1; diff --git a/mysql-test/r/ctype_sjis.result b/mysql-test/r/ctype_sjis.result index ffeb8524c6e..48456c16705 100644 --- a/mysql-test/r/ctype_sjis.result +++ b/mysql-test/r/ctype_sjis.result @@ -15244,3 +15244,3268 @@ hex(weight_string(cast(0x814081408140 as char),25, 4, 0xC0)) # # End of 5.6 tests # +# +# Start of 10.0 tests +# +# Start of ctype_unescape.inc +SET @query=_binary'SELECT CHARSET(\'test\'),@@character_set_client,@@character_set_connection'; +PREPARE stmt FROM @query; +EXECUTE stmt; +CHARSET('test') @@character_set_client @@character_set_connection +sjis sjis sjis +DEALLOCATE PREPARE stmt; +CREATE TABLE allbytes (a VARBINARY(10)); +# Using selected bytes combinations +CREATE TABLE halfs (a INT); +INSERT INTO halfs VALUES (0x00),(0x01),(0x02),(0x03),(0x04),(0x05),(0x06),(0x07); +INSERT INTO halfs VALUES (0x08),(0x09),(0x0A),(0x0B),(0x0C),(0x0D),(0x0E),(0x0F); +CREATE TEMPORARY TABLE bytes (a BINARY(1), KEY(a)) ENGINE=MyISAM; +INSERT INTO bytes SELECT CHAR((t1.a << 4) | t2.a USING BINARY) FROM halfs t1, halfs t2; +DROP TABLE halfs; +CREATE TABLE selected_bytes (a VARBINARY(10)); +INSERT INTO selected_bytes (a) VALUES ('\0'),('\b'),('\t'),('\r'),('\n'),('\Z'); +INSERT INTO selected_bytes (a) VALUES ('0'),('b'),('t'),('r'),('n'),('Z'); +INSERT INTO selected_bytes (a) VALUES ('\\'),('_'),('%'),(0x22),(0x27); +INSERT INTO selected_bytes (a) VALUES ('a'); +INSERT INTO selected_bytes (a) VALUES +(0x3F), # 7bit +(0x40), # 7bit mbtail +(0x7E), # 7bit mbtail nonascii-8bit +(0x7F), # 7bit nonascii-8bit +(0x80), # mbtail bad-mb +(0x81), # mbhead mbtail +(0x9F), # mbhead mbtail bad-mb +(0xA0), # mbhead mbtail bad-mb +(0xA1), # mbhead mbtail nonascii-8bit +(0xE0), # mbhead mbtai +(0xEF), # mbhead mbtail +(0xF9), # mbhead mbtail +(0xFA), # mbhead mbtail bad-mb +(0xFC), # mbhead mbtail bad-mb +(0xFD), # mbhead mbtail bad-mb +(0xFE), # mbhead mbtial bad-mb +(0xFF); +INSERT INTO allbytes (a) SELECT a FROM bytes; +INSERT INTO allbytes (a) SELECT CONCAT(t1.a,t2.a) FROM selected_bytes t1,selected_bytes t2; +INSERT INTO allbytes (a) SELECT CONCAT(0x5C,t1.a,t2.a) FROM selected_bytes t1,selected_bytes t2; +INSERT INTO allbytes (a) SELECT CONCAT(0x5C,t1.a,0x5C,t2.a) FROM selected_bytes t1,selected_bytes t2; +DROP TABLE selected_bytes; +DELETE FROM allbytes WHERE +OCTET_LENGTH(a)>1 AND +LOCATE(0x5C,a)=0 AND +a NOT LIKE '%\'%' AND + a NOT LIKE '%"%'; +CREATE PROCEDURE p1(val VARBINARY(10)) +BEGIN +DECLARE EXIT HANDLER FOR SQLSTATE '42000' INSERT INTO t1 (a,b) VALUES(val,NULL); +SET @query=CONCAT(_binary"INSERT INTO t1 (a,b) VALUES (0x",HEX(val),",'",val,"')"); +PREPARE stmt FROM @query; +EXECUTE stmt; +DEALLOCATE PREPARE stmt; +END// +CREATE PROCEDURE p2() +BEGIN +DECLARE val VARBINARY(10); +DECLARE done INT DEFAULT FALSE; +DECLARE stmt CURSOR FOR SELECT a FROM allbytes; +DECLARE CONTINUE HANDLER FOR NOT FOUND SET done=TRUE; +OPEN stmt; +read_loop1: LOOP +FETCH stmt INTO val; +IF done THEN +LEAVE read_loop1; +END IF; +CALL p1(val); +END LOOP; +CLOSE stmt; +END// +CREATE FUNCTION iswellformed(a VARBINARY(256)) RETURNS INT RETURN a=BINARY CONVERT(a USING sjis);// +CREATE FUNCTION unescape(a VARBINARY(256)) RETURNS VARBINARY(256) +BEGIN +# We need to do it in a way to avoid producing new escape sequences +# First, enclose all known escsape sequences to '{{xx}}' + # - Backslash not followed by a LIKE pattern characters _ and % +# - Double escapes +# This uses PCRE Branch Reset Groups: (?|(alt1)|(alt2)|(alt3)). +# So '\\1' in the last argument always means the match, no matter +# which alternative it came from. +SET a=REGEXP_REPLACE(a,'(?|(\\\\[^_%])|(\\x{27}\\x{27}))','{{\\1}}'); +# Now unescape all enclosed standard escape sequences +SET a=REPLACE(a,'{{\\0}}', '\0'); +SET a=REPLACE(a,'{{\\b}}', '\b'); +SET a=REPLACE(a,'{{\\t}}', '\t'); +SET a=REPLACE(a,'{{\\r}}', '\r'); +SET a=REPLACE(a,'{{\\n}}', '\n'); +SET a=REPLACE(a,'{{\\Z}}', '\Z'); +SET a=REPLACE(a,'{{\\\'}}', '\''); +# Unescape double quotes +SET a=REPLACE(a,'{{\'\'}}', '\''); + # Unescape the rest: all other \x sequences mean just 'x' + SET a=REGEXP_REPLACE(a, '{{\\\\(.|\\R)}}', '\\1'); + RETURN a; +END// +CREATE FUNCTION unescape_type(a VARBINARY(256),b VARBINARY(256)) RETURNS VARBINARY(256) +BEGIN +RETURN CASE +WHEN b IS NULL THEN '[SyntErr]' + WHEN a=b THEN CASE +WHEN OCTET_LENGTH(a)=1 THEN '[Preserve]' + WHEN a RLIKE '\\\\[_%]' THEN '[Preserve][LIKE]' + WHEN a RLIKE '^[[:ascii:]]+$' THEN '[Preserve][ASCII]' + ELSE '[Preserv][MB]' END +WHEN REPLACE(a,0x5C,'')=b THEN '[Trivial]' + WHEN UNESCAPE(a)=b THEN '[Regular]' + ELSE '[Special]' END; +END// +CREATE FUNCTION wellformedness(a VARBINARY(256), b VARBINARY(256)) +RETURNS VARBINARY(256) +BEGIN +RETURN CASE +WHEN b IS NULL THEN '' + WHEN NOT iswellformed(a) AND iswellformed(b) THEN '[FIXED]' + WHEN iswellformed(a) AND NOT iswellformed(b) THEN '[BROKE]' + WHEN NOT iswellformed(a) AND NOT iswellformed(b) THEN '[ILSEQ]' + ELSE '' + END; +END// +CREATE FUNCTION mysql_real_escape_string_generated(a VARBINARY(256)) +RETURNS VARBINARY(256) +BEGIN +DECLARE a1 BINARY(1) DEFAULT SUBSTR(a,1,1); +DECLARE a2 BINARY(1) DEFAULT SUBSTR(a,2,1); +DECLARE a3 BINARY(1) DEFAULT SUBSTR(a,3,1); +DECLARE a4 BINARY(1) DEFAULT SUBSTR(a,4,1); +DECLARE a2a4 BINARY(2) DEFAULT CONCAT(a2,a4); +RETURN CASE +WHEN (a1=0x5C) AND +(a3=0x5C) AND +(a2>0x7F) AND +(a4 NOT IN ('_','%','0','t','r','n','Z')) AND +iswellformed(a2a4) THEN '[USER]' + ELSE '' + END; +END// +CREATE TABLE t1 (a VARBINARY(10),b VARBINARY(10)); +CALL p2(); +SELECT HEX(a),HEX(b), +CONCAT(unescape_type(a,b), +wellformedness(a,b), +mysql_real_escape_string_generated(a), +IF(UNESCAPE(a)<>b,CONCAT('[BAD',HEX(UNESCAPE(a)),']'),'')) AS comment +FROM t1 ORDER BY LENGTH(a),a; +HEX(a) HEX(b) comment +00 00 [Preserve] +01 01 [Preserve] +02 02 [Preserve] +03 03 [Preserve] +04 04 [Preserve] +05 05 [Preserve] +06 06 [Preserve] +07 07 [Preserve] +08 08 [Preserve] +09 09 [Preserve] +0A 0A [Preserve] +0B 0B [Preserve] +0C 0C [Preserve] +0D 0D [Preserve] +0E 0E [Preserve] +0F 0F [Preserve] +10 10 [Preserve] +11 11 [Preserve] +12 12 [Preserve] +13 13 [Preserve] +14 14 [Preserve] +15 15 [Preserve] +16 16 [Preserve] +17 17 [Preserve] +18 18 [Preserve] +19 19 [Preserve] +1A 1A [Preserve] +1B 1B [Preserve] +1C 1C [Preserve] +1D 1D [Preserve] +1E 1E [Preserve] +1F 1F [Preserve] +20 20 [Preserve] +21 21 [Preserve] +22 22 [Preserve] +23 23 [Preserve] +24 24 [Preserve] +25 25 [Preserve] +26 26 [Preserve] +27 NULL [SyntErr] +28 28 [Preserve] +29 29 [Preserve] +2A 2A [Preserve] +2B 2B [Preserve] +2C 2C [Preserve] +2D 2D [Preserve] +2E 2E [Preserve] +2F 2F [Preserve] +30 30 [Preserve] +31 31 [Preserve] +32 32 [Preserve] +33 33 [Preserve] +34 34 [Preserve] +35 35 [Preserve] +36 36 [Preserve] +37 37 [Preserve] +38 38 [Preserve] +39 39 [Preserve] +3A 3A [Preserve] +3B 3B [Preserve] +3C 3C [Preserve] +3D 3D [Preserve] +3E 3E [Preserve] +3F 3F [Preserve] +40 40 [Preserve] +41 41 [Preserve] +42 42 [Preserve] +43 43 [Preserve] +44 44 [Preserve] +45 45 [Preserve] +46 46 [Preserve] +47 47 [Preserve] +48 48 [Preserve] +49 49 [Preserve] +4A 4A [Preserve] +4B 4B [Preserve] +4C 4C [Preserve] +4D 4D [Preserve] +4E 4E [Preserve] +4F 4F [Preserve] +50 50 [Preserve] +51 51 [Preserve] +52 52 [Preserve] +53 53 [Preserve] +54 54 [Preserve] +55 55 [Preserve] +56 56 [Preserve] +57 57 [Preserve] +58 58 [Preserve] +59 59 [Preserve] +5A 5A [Preserve] +5B 5B [Preserve] +5C NULL [SyntErr] +5D 5D [Preserve] +5E 5E [Preserve] +5F 5F [Preserve] +60 60 [Preserve] +61 61 [Preserve] +62 62 [Preserve] +63 63 [Preserve] +64 64 [Preserve] +65 65 [Preserve] +66 66 [Preserve] +67 67 [Preserve] +68 68 [Preserve] +69 69 [Preserve] +6A 6A [Preserve] +6B 6B [Preserve] +6C 6C [Preserve] +6D 6D [Preserve] +6E 6E [Preserve] +6F 6F [Preserve] +70 70 [Preserve] +71 71 [Preserve] +72 72 [Preserve] +73 73 [Preserve] +74 74 [Preserve] +75 75 [Preserve] +76 76 [Preserve] +77 77 [Preserve] +78 78 [Preserve] +79 79 [Preserve] +7A 7A [Preserve] +7B 7B [Preserve] +7C 7C [Preserve] +7D 7D [Preserve] +7E 7E [Preserve] +7F 7F [Preserve] +80 80 [Preserve][ILSEQ] +81 81 [Preserve][ILSEQ] +82 82 [Preserve][ILSEQ] +83 83 [Preserve][ILSEQ] +84 84 [Preserve][ILSEQ] +85 85 [Preserve][ILSEQ] +86 86 [Preserve][ILSEQ] +87 87 [Preserve][ILSEQ] +88 88 [Preserve][ILSEQ] +89 89 [Preserve][ILSEQ] +8A 8A [Preserve][ILSEQ] +8B 8B [Preserve][ILSEQ] +8C 8C [Preserve][ILSEQ] +8D 8D [Preserve][ILSEQ] +8E 8E [Preserve][ILSEQ] +8F 8F [Preserve][ILSEQ] +90 90 [Preserve][ILSEQ] +91 91 [Preserve][ILSEQ] +92 92 [Preserve][ILSEQ] +93 93 [Preserve][ILSEQ] +94 94 [Preserve][ILSEQ] +95 95 [Preserve][ILSEQ] +96 96 [Preserve][ILSEQ] +97 97 [Preserve][ILSEQ] +98 98 [Preserve][ILSEQ] +99 99 [Preserve][ILSEQ] +9A 9A [Preserve][ILSEQ] +9B 9B [Preserve][ILSEQ] +9C 9C [Preserve][ILSEQ] +9D 9D [Preserve][ILSEQ] +9E 9E [Preserve][ILSEQ] +9F 9F [Preserve][ILSEQ] +A0 A0 [Preserve][ILSEQ] +A1 A1 [Preserve] +A2 A2 [Preserve] +A3 A3 [Preserve] +A4 A4 [Preserve] +A5 A5 [Preserve] +A6 A6 [Preserve] +A7 A7 [Preserve] +A8 A8 [Preserve] +A9 A9 [Preserve] +AA AA [Preserve] +AB AB [Preserve] +AC AC [Preserve] +AD AD [Preserve] +AE AE [Preserve] +AF AF [Preserve] +B0 B0 [Preserve] +B1 B1 [Preserve] +B2 B2 [Preserve] +B3 B3 [Preserve] +B4 B4 [Preserve] +B5 B5 [Preserve] +B6 B6 [Preserve] +B7 B7 [Preserve] +B8 B8 [Preserve] +B9 B9 [Preserve] +BA BA [Preserve] +BB BB [Preserve] +BC BC [Preserve] +BD BD [Preserve] +BE BE [Preserve] +BF BF [Preserve] +C0 C0 [Preserve] +C1 C1 [Preserve] +C2 C2 [Preserve] +C3 C3 [Preserve] +C4 C4 [Preserve] +C5 C5 [Preserve] +C6 C6 [Preserve] +C7 C7 [Preserve] +C8 C8 [Preserve] +C9 C9 [Preserve] +CA CA [Preserve] +CB CB [Preserve] +CC CC [Preserve] +CD CD [Preserve] +CE CE [Preserve] +CF CF [Preserve] +D0 D0 [Preserve] +D1 D1 [Preserve] +D2 D2 [Preserve] +D3 D3 [Preserve] +D4 D4 [Preserve] +D5 D5 [Preserve] +D6 D6 [Preserve] +D7 D7 [Preserve] +D8 D8 [Preserve] +D9 D9 [Preserve] +DA DA [Preserve] +DB DB [Preserve] +DC DC [Preserve] +DD DD [Preserve] +DE DE [Preserve] +DF DF [Preserve] +E0 E0 [Preserve][ILSEQ] +E1 E1 [Preserve][ILSEQ] +E2 E2 [Preserve][ILSEQ] +E3 E3 [Preserve][ILSEQ] +E4 E4 [Preserve][ILSEQ] +E5 E5 [Preserve][ILSEQ] +E6 E6 [Preserve][ILSEQ] +E7 E7 [Preserve][ILSEQ] +E8 E8 [Preserve][ILSEQ] +E9 E9 [Preserve][ILSEQ] +EA EA [Preserve][ILSEQ] +EB EB [Preserve][ILSEQ] +EC EC [Preserve][ILSEQ] +ED ED [Preserve][ILSEQ] +EE EE [Preserve][ILSEQ] +EF EF [Preserve][ILSEQ] +F0 F0 [Preserve][ILSEQ] +F1 F1 [Preserve][ILSEQ] +F2 F2 [Preserve][ILSEQ] +F3 F3 [Preserve][ILSEQ] +F4 F4 [Preserve][ILSEQ] +F5 F5 [Preserve][ILSEQ] +F6 F6 [Preserve][ILSEQ] +F7 F7 [Preserve][ILSEQ] +F8 F8 [Preserve][ILSEQ] +F9 F9 [Preserve][ILSEQ] +FA FA [Preserve][ILSEQ] +FB FB [Preserve][ILSEQ] +FC FC [Preserve][ILSEQ] +FD FD [Preserve][ILSEQ] +FE FE [Preserve][ILSEQ] +FF FF [Preserve][ILSEQ] +0022 0022 [Preserve][ASCII] +0027 NULL [SyntErr] +005C NULL [SyntErr] +0822 0822 [Preserve][ASCII] +0827 NULL [SyntErr] +085C NULL [SyntErr] +0922 0922 [Preserve][ASCII] +0927 NULL [SyntErr] +095C NULL [SyntErr] +0A22 0A22 [Preserve][ASCII] +0A27 NULL [SyntErr] +0A5C NULL [SyntErr] +0D22 0D22 [Preserve][ASCII] +0D27 NULL [SyntErr] +0D5C NULL [SyntErr] +1A22 1A22 [Preserve][ASCII] +1A27 NULL [SyntErr] +1A5C NULL [SyntErr] +2200 2200 [Preserve][ASCII] +2208 2208 [Preserve][ASCII] +2209 2209 [Preserve][ASCII] +220A 220A [Preserve][ASCII] +220D 220D [Preserve][ASCII] +221A 221A [Preserve][ASCII] +2222 2222 [Preserve][ASCII] +2225 2225 [Preserve][ASCII] +2227 NULL [SyntErr] +2230 2230 [Preserve][ASCII] +223F 223F [Preserve][ASCII] +2240 2240 [Preserve][ASCII] +225A 225A [Preserve][ASCII] +225C NULL [SyntErr] +225F 225F [Preserve][ASCII] +2261 2261 [Preserve][ASCII] +2262 2262 [Preserve][ASCII] +226E 226E [Preserve][ASCII] +2272 2272 [Preserve][ASCII] +2274 2274 [Preserve][ASCII] +227E 227E [Preserve][ASCII] +227F 227F [Preserve][ASCII] +2280 2280 [Preserv][MB][ILSEQ] +2281 2281 [Preserv][MB][ILSEQ] +229F 229F [Preserv][MB][ILSEQ] +22A0 22A0 [Preserv][MB][ILSEQ] +22A1 22A1 [Preserv][MB] +22E0 22E0 [Preserv][MB][ILSEQ] +22EF 22EF [Preserv][MB][ILSEQ] +22F9 22F9 [Preserv][MB][ILSEQ] +22FA 22FA [Preserv][MB][ILSEQ] +22FC 22FC [Preserv][MB][ILSEQ] +22FD 22FD [Preserv][MB][ILSEQ] +22FE 22FE [Preserv][MB][ILSEQ] +22FF 22FF [Preserv][MB][ILSEQ] +2522 2522 [Preserve][ASCII] +2527 NULL [SyntErr] +255C NULL [SyntErr] +2700 NULL [SyntErr] +2708 NULL [SyntErr] +2709 NULL [SyntErr] +270A NULL [SyntErr] +270D NULL [SyntErr] +271A NULL [SyntErr] +2722 NULL [SyntErr] +2725 NULL [SyntErr] +2727 27 [Regular] +2730 NULL [SyntErr] +273F NULL [SyntErr] +2740 NULL [SyntErr] +275A NULL [SyntErr] +275C NULL [SyntErr] +275F NULL [SyntErr] +2761 NULL [SyntErr] +2762 NULL [SyntErr] +276E NULL [SyntErr] +2772 NULL [SyntErr] +2774 NULL [SyntErr] +277E NULL [SyntErr] +277F NULL [SyntErr] +2780 NULL [SyntErr] +2781 NULL [SyntErr] +279F NULL [SyntErr] +27A0 NULL [SyntErr] +27A1 NULL [SyntErr] +27E0 NULL [SyntErr] +27EF NULL [SyntErr] +27F9 NULL [SyntErr] +27FA NULL [SyntErr] +27FC NULL [SyntErr] +27FD NULL [SyntErr] +27FE NULL [SyntErr] +27FF NULL [SyntErr] +3022 3022 [Preserve][ASCII] +3027 NULL [SyntErr] +305C NULL [SyntErr] +3F22 3F22 [Preserve][ASCII] +3F27 NULL [SyntErr] +3F5C NULL [SyntErr] +4022 4022 [Preserve][ASCII] +4027 NULL [SyntErr] +405C NULL [SyntErr] +5A22 5A22 [Preserve][ASCII] +5A27 NULL [SyntErr] +5A5C NULL [SyntErr] +5C00 00 [Trivial] +5C08 08 [Trivial] +5C09 09 [Trivial] +5C0A 0A [Trivial] +5C0D 0D [Trivial] +5C1A 1A [Trivial] +5C22 22 [Trivial] +5C25 5C25 [Preserve][LIKE] +5C27 27 [Trivial] +5C30 00 [Regular] +5C3F 3F [Trivial] +5C40 40 [Trivial] +5C5A 1A [Regular] +5C5C 5C [Regular] +5C5F 5C5F [Preserve][LIKE] +5C61 61 [Trivial] +5C62 08 [Regular] +5C6E 0A [Regular] +5C72 0D [Regular] +5C74 09 [Regular] +5C7E 7E [Trivial] +5C7F 7F [Trivial] +5C80 80 [Trivial][ILSEQ] +5C81 81 [Trivial][ILSEQ] +5C9F 9F [Trivial][ILSEQ] +5CA0 A0 [Trivial][ILSEQ] +5CA1 A1 [Trivial] +5CE0 E0 [Trivial][ILSEQ] +5CEF EF [Trivial][ILSEQ] +5CF9 F9 [Trivial][ILSEQ] +5CFA FA [Trivial][ILSEQ] +5CFC FC [Trivial][ILSEQ] +5CFD FD [Trivial][ILSEQ] +5CFE FE [Trivial][ILSEQ] +5CFF FF [Trivial][ILSEQ] +5F22 5F22 [Preserve][ASCII] +5F27 NULL [SyntErr] +5F5C NULL [SyntErr] +6122 6122 [Preserve][ASCII] +6127 NULL [SyntErr] +615C NULL [SyntErr] +6222 6222 [Preserve][ASCII] +6227 NULL [SyntErr] +625C NULL [SyntErr] +6E22 6E22 [Preserve][ASCII] +6E27 NULL [SyntErr] +6E5C NULL [SyntErr] +7222 7222 [Preserve][ASCII] +7227 NULL [SyntErr] +725C NULL [SyntErr] +7422 7422 [Preserve][ASCII] +7427 NULL [SyntErr] +745C NULL [SyntErr] +7E22 7E22 [Preserve][ASCII] +7E27 NULL [SyntErr] +7E5C NULL [SyntErr] +7F22 7F22 [Preserve][ASCII] +7F27 NULL [SyntErr] +7F5C NULL [SyntErr] +8022 8022 [Preserv][MB][ILSEQ] +8027 NULL [SyntErr] +805C NULL [SyntErr] +8122 8122 [Preserv][MB][ILSEQ] +8127 NULL [SyntErr] +815C 815C [Preserv][MB] +9F22 9F22 [Preserv][MB][ILSEQ] +9F27 NULL [SyntErr] +9F5C 9F5C [Preserv][MB] +A022 A022 [Preserv][MB][ILSEQ] +A027 NULL [SyntErr] +A05C NULL [SyntErr] +A122 A122 [Preserv][MB] +A127 NULL [SyntErr] +A15C NULL [SyntErr] +E022 E022 [Preserv][MB][ILSEQ] +E027 NULL [SyntErr] +E05C E05C [Preserv][MB] +EF22 EF22 [Preserv][MB][ILSEQ] +EF27 NULL [SyntErr] +EF5C EF5C [Preserv][MB] +F922 F922 [Preserv][MB][ILSEQ] +F927 NULL [SyntErr] +F95C F95C [Preserv][MB] +FA22 FA22 [Preserv][MB][ILSEQ] +FA27 NULL [SyntErr] +FA5C FA5C [Preserv][MB] +FC22 FC22 [Preserv][MB][ILSEQ] +FC27 NULL [SyntErr] +FC5C FC5C [Preserv][MB] +FD22 FD22 [Preserv][MB][ILSEQ] +FD27 NULL [SyntErr] +FD5C NULL [SyntErr] +FE22 FE22 [Preserv][MB][ILSEQ] +FE27 NULL [SyntErr] +FE5C NULL [SyntErr] +FF22 FF22 [Preserv][MB][ILSEQ] +FF27 NULL [SyntErr] +FF5C NULL [SyntErr] +5C0000 0000 [Trivial] +5C0008 0008 [Trivial] +5C0009 0009 [Trivial] +5C000A 000A [Trivial] +5C000D 000D [Trivial] +5C001A 001A [Trivial] +5C0022 0022 [Trivial] +5C0025 0025 [Trivial] +5C0027 NULL [SyntErr] +5C0030 0030 [Trivial] +5C003F 003F [Trivial] +5C0040 0040 [Trivial] +5C005A 005A [Trivial] +5C005C NULL [SyntErr] +5C005F 005F [Trivial] +5C0061 0061 [Trivial] +5C0062 0062 [Trivial] +5C006E 006E [Trivial] +5C0072 0072 [Trivial] +5C0074 0074 [Trivial] +5C007E 007E [Trivial] +5C007F 007F [Trivial] +5C0080 0080 [Trivial][ILSEQ] +5C0081 0081 [Trivial][ILSEQ] +5C009F 009F [Trivial][ILSEQ] +5C00A0 00A0 [Trivial][ILSEQ] +5C00A1 00A1 [Trivial] +5C00E0 00E0 [Trivial][ILSEQ] +5C00EF 00EF [Trivial][ILSEQ] +5C00F9 00F9 [Trivial][ILSEQ] +5C00FA 00FA [Trivial][ILSEQ] +5C00FC 00FC [Trivial][ILSEQ] +5C00FD 00FD [Trivial][ILSEQ] +5C00FE 00FE [Trivial][ILSEQ] +5C00FF 00FF [Trivial][ILSEQ] +5C0800 0800 [Trivial] +5C0808 0808 [Trivial] +5C0809 0809 [Trivial] +5C080A 080A [Trivial] +5C080D 080D [Trivial] +5C081A 081A [Trivial] +5C0822 0822 [Trivial] +5C0825 0825 [Trivial] +5C0827 NULL [SyntErr] +5C0830 0830 [Trivial] +5C083F 083F [Trivial] +5C0840 0840 [Trivial] +5C085A 085A [Trivial] +5C085C NULL [SyntErr] +5C085F 085F [Trivial] +5C0861 0861 [Trivial] +5C0862 0862 [Trivial] +5C086E 086E [Trivial] +5C0872 0872 [Trivial] +5C0874 0874 [Trivial] +5C087E 087E [Trivial] +5C087F 087F [Trivial] +5C0880 0880 [Trivial][ILSEQ] +5C0881 0881 [Trivial][ILSEQ] +5C089F 089F [Trivial][ILSEQ] +5C08A0 08A0 [Trivial][ILSEQ] +5C08A1 08A1 [Trivial] +5C08E0 08E0 [Trivial][ILSEQ] +5C08EF 08EF [Trivial][ILSEQ] +5C08F9 08F9 [Trivial][ILSEQ] +5C08FA 08FA [Trivial][ILSEQ] +5C08FC 08FC [Trivial][ILSEQ] +5C08FD 08FD [Trivial][ILSEQ] +5C08FE 08FE [Trivial][ILSEQ] +5C08FF 08FF [Trivial][ILSEQ] +5C0900 0900 [Trivial] +5C0908 0908 [Trivial] +5C0909 0909 [Trivial] +5C090A 090A [Trivial] +5C090D 090D [Trivial] +5C091A 091A [Trivial] +5C0922 0922 [Trivial] +5C0925 0925 [Trivial] +5C0927 NULL [SyntErr] +5C0930 0930 [Trivial] +5C093F 093F [Trivial] +5C0940 0940 [Trivial] +5C095A 095A [Trivial] +5C095C NULL [SyntErr] +5C095F 095F [Trivial] +5C0961 0961 [Trivial] +5C0962 0962 [Trivial] +5C096E 096E [Trivial] +5C0972 0972 [Trivial] +5C0974 0974 [Trivial] +5C097E 097E [Trivial] +5C097F 097F [Trivial] +5C0980 0980 [Trivial][ILSEQ] +5C0981 0981 [Trivial][ILSEQ] +5C099F 099F [Trivial][ILSEQ] +5C09A0 09A0 [Trivial][ILSEQ] +5C09A1 09A1 [Trivial] +5C09E0 09E0 [Trivial][ILSEQ] +5C09EF 09EF [Trivial][ILSEQ] +5C09F9 09F9 [Trivial][ILSEQ] +5C09FA 09FA [Trivial][ILSEQ] +5C09FC 09FC [Trivial][ILSEQ] +5C09FD 09FD [Trivial][ILSEQ] +5C09FE 09FE [Trivial][ILSEQ] +5C09FF 09FF [Trivial][ILSEQ] +5C0A00 0A00 [Trivial] +5C0A08 0A08 [Trivial] +5C0A09 0A09 [Trivial] +5C0A0A 0A0A [Trivial] +5C0A0D 0A0D [Trivial] +5C0A1A 0A1A [Trivial] +5C0A22 0A22 [Trivial] +5C0A25 0A25 [Trivial] +5C0A27 NULL [SyntErr] +5C0A30 0A30 [Trivial] +5C0A3F 0A3F [Trivial] +5C0A40 0A40 [Trivial] +5C0A5A 0A5A [Trivial] +5C0A5C NULL [SyntErr] +5C0A5F 0A5F [Trivial] +5C0A61 0A61 [Trivial] +5C0A62 0A62 [Trivial] +5C0A6E 0A6E [Trivial] +5C0A72 0A72 [Trivial] +5C0A74 0A74 [Trivial] +5C0A7E 0A7E [Trivial] +5C0A7F 0A7F [Trivial] +5C0A80 0A80 [Trivial][ILSEQ] +5C0A81 0A81 [Trivial][ILSEQ] +5C0A9F 0A9F [Trivial][ILSEQ] +5C0AA0 0AA0 [Trivial][ILSEQ] +5C0AA1 0AA1 [Trivial] +5C0AE0 0AE0 [Trivial][ILSEQ] +5C0AEF 0AEF [Trivial][ILSEQ] +5C0AF9 0AF9 [Trivial][ILSEQ] +5C0AFA 0AFA [Trivial][ILSEQ] +5C0AFC 0AFC [Trivial][ILSEQ] +5C0AFD 0AFD [Trivial][ILSEQ] +5C0AFE 0AFE [Trivial][ILSEQ] +5C0AFF 0AFF [Trivial][ILSEQ] +5C0D00 0D00 [Trivial] +5C0D08 0D08 [Trivial] +5C0D09 0D09 [Trivial] +5C0D0A 0D0A [Trivial] +5C0D0D 0D0D [Trivial] +5C0D1A 0D1A [Trivial] +5C0D22 0D22 [Trivial] +5C0D25 0D25 [Trivial] +5C0D27 NULL [SyntErr] +5C0D30 0D30 [Trivial] +5C0D3F 0D3F [Trivial] +5C0D40 0D40 [Trivial] +5C0D5A 0D5A [Trivial] +5C0D5C NULL [SyntErr] +5C0D5F 0D5F [Trivial] +5C0D61 0D61 [Trivial] +5C0D62 0D62 [Trivial] +5C0D6E 0D6E [Trivial] +5C0D72 0D72 [Trivial] +5C0D74 0D74 [Trivial] +5C0D7E 0D7E [Trivial] +5C0D7F 0D7F [Trivial] +5C0D80 0D80 [Trivial][ILSEQ] +5C0D81 0D81 [Trivial][ILSEQ] +5C0D9F 0D9F [Trivial][ILSEQ] +5C0DA0 0DA0 [Trivial][ILSEQ] +5C0DA1 0DA1 [Trivial] +5C0DE0 0DE0 [Trivial][ILSEQ] +5C0DEF 0DEF [Trivial][ILSEQ] +5C0DF9 0DF9 [Trivial][ILSEQ] +5C0DFA 0DFA [Trivial][ILSEQ] +5C0DFC 0DFC [Trivial][ILSEQ] +5C0DFD 0DFD [Trivial][ILSEQ] +5C0DFE 0DFE [Trivial][ILSEQ] +5C0DFF 0DFF [Trivial][ILSEQ] +5C1A00 1A00 [Trivial] +5C1A08 1A08 [Trivial] +5C1A09 1A09 [Trivial] +5C1A0A 1A0A [Trivial] +5C1A0D 1A0D [Trivial] +5C1A1A 1A1A [Trivial] +5C1A22 1A22 [Trivial] +5C1A25 1A25 [Trivial] +5C1A27 NULL [SyntErr] +5C1A30 1A30 [Trivial] +5C1A3F 1A3F [Trivial] +5C1A40 1A40 [Trivial] +5C1A5A 1A5A [Trivial] +5C1A5C NULL [SyntErr] +5C1A5F 1A5F [Trivial] +5C1A61 1A61 [Trivial] +5C1A62 1A62 [Trivial] +5C1A6E 1A6E [Trivial] +5C1A72 1A72 [Trivial] +5C1A74 1A74 [Trivial] +5C1A7E 1A7E [Trivial] +5C1A7F 1A7F [Trivial] +5C1A80 1A80 [Trivial][ILSEQ] +5C1A81 1A81 [Trivial][ILSEQ] +5C1A9F 1A9F [Trivial][ILSEQ] +5C1AA0 1AA0 [Trivial][ILSEQ] +5C1AA1 1AA1 [Trivial] +5C1AE0 1AE0 [Trivial][ILSEQ] +5C1AEF 1AEF [Trivial][ILSEQ] +5C1AF9 1AF9 [Trivial][ILSEQ] +5C1AFA 1AFA [Trivial][ILSEQ] +5C1AFC 1AFC [Trivial][ILSEQ] +5C1AFD 1AFD [Trivial][ILSEQ] +5C1AFE 1AFE [Trivial][ILSEQ] +5C1AFF 1AFF [Trivial][ILSEQ] +5C2200 2200 [Trivial] +5C2208 2208 [Trivial] +5C2209 2209 [Trivial] +5C220A 220A [Trivial] +5C220D 220D [Trivial] +5C221A 221A [Trivial] +5C2222 2222 [Trivial] +5C2225 2225 [Trivial] +5C2227 NULL [SyntErr] +5C2230 2230 [Trivial] +5C223F 223F [Trivial] +5C2240 2240 [Trivial] +5C225A 225A [Trivial] +5C225C NULL [SyntErr] +5C225F 225F [Trivial] +5C2261 2261 [Trivial] +5C2262 2262 [Trivial] +5C226E 226E [Trivial] +5C2272 2272 [Trivial] +5C2274 2274 [Trivial] +5C227E 227E [Trivial] +5C227F 227F [Trivial] +5C2280 2280 [Trivial][ILSEQ] +5C2281 2281 [Trivial][ILSEQ] +5C229F 229F [Trivial][ILSEQ] +5C22A0 22A0 [Trivial][ILSEQ] +5C22A1 22A1 [Trivial] +5C22E0 22E0 [Trivial][ILSEQ] +5C22EF 22EF [Trivial][ILSEQ] +5C22F9 22F9 [Trivial][ILSEQ] +5C22FA 22FA [Trivial][ILSEQ] +5C22FC 22FC [Trivial][ILSEQ] +5C22FD 22FD [Trivial][ILSEQ] +5C22FE 22FE [Trivial][ILSEQ] +5C22FF 22FF [Trivial][ILSEQ] +5C2500 5C2500 [Preserve][LIKE] +5C2508 5C2508 [Preserve][LIKE] +5C2509 5C2509 [Preserve][LIKE] +5C250A 5C250A [Preserve][LIKE] +5C250D 5C250D [Preserve][LIKE] +5C251A 5C251A [Preserve][LIKE] +5C2522 5C2522 [Preserve][LIKE] +5C2525 5C2525 [Preserve][LIKE] +5C2527 NULL [SyntErr] +5C2530 5C2530 [Preserve][LIKE] +5C253F 5C253F [Preserve][LIKE] +5C2540 5C2540 [Preserve][LIKE] +5C255A 5C255A [Preserve][LIKE] +5C255C NULL [SyntErr] +5C255F 5C255F [Preserve][LIKE] +5C2561 5C2561 [Preserve][LIKE] +5C2562 5C2562 [Preserve][LIKE] +5C256E 5C256E [Preserve][LIKE] +5C2572 5C2572 [Preserve][LIKE] +5C2574 5C2574 [Preserve][LIKE] +5C257E 5C257E [Preserve][LIKE] +5C257F 5C257F [Preserve][LIKE] +5C2580 5C2580 [Preserve][LIKE][ILSEQ] +5C2581 5C2581 [Preserve][LIKE][ILSEQ] +5C259F 5C259F [Preserve][LIKE][ILSEQ] +5C25A0 5C25A0 [Preserve][LIKE][ILSEQ] +5C25A1 5C25A1 [Preserve][LIKE] +5C25E0 5C25E0 [Preserve][LIKE][ILSEQ] +5C25EF 5C25EF [Preserve][LIKE][ILSEQ] +5C25F9 5C25F9 [Preserve][LIKE][ILSEQ] +5C25FA 5C25FA [Preserve][LIKE][ILSEQ] +5C25FC 5C25FC [Preserve][LIKE][ILSEQ] +5C25FD 5C25FD [Preserve][LIKE][ILSEQ] +5C25FE 5C25FE [Preserve][LIKE][ILSEQ] +5C25FF 5C25FF [Preserve][LIKE][ILSEQ] +5C2700 2700 [Trivial] +5C2708 2708 [Trivial] +5C2709 2709 [Trivial] +5C270A 270A [Trivial] +5C270D 270D [Trivial] +5C271A 271A [Trivial] +5C2722 2722 [Trivial] +5C2725 2725 [Trivial] +5C2727 NULL [SyntErr] +5C2730 2730 [Trivial] +5C273F 273F [Trivial] +5C2740 2740 [Trivial] +5C275A 275A [Trivial] +5C275C NULL [SyntErr] +5C275F 275F [Trivial] +5C2761 2761 [Trivial] +5C2762 2762 [Trivial] +5C276E 276E [Trivial] +5C2772 2772 [Trivial] +5C2774 2774 [Trivial] +5C277E 277E [Trivial] +5C277F 277F [Trivial] +5C2780 2780 [Trivial][ILSEQ] +5C2781 2781 [Trivial][ILSEQ] +5C279F 279F [Trivial][ILSEQ] +5C27A0 27A0 [Trivial][ILSEQ] +5C27A1 27A1 [Trivial] +5C27E0 27E0 [Trivial][ILSEQ] +5C27EF 27EF [Trivial][ILSEQ] +5C27F9 27F9 [Trivial][ILSEQ] +5C27FA 27FA [Trivial][ILSEQ] +5C27FC 27FC [Trivial][ILSEQ] +5C27FD 27FD [Trivial][ILSEQ] +5C27FE 27FE [Trivial][ILSEQ] +5C27FF 27FF [Trivial][ILSEQ] +5C3000 0000 [Regular] +5C3008 0008 [Regular] +5C3009 0009 [Regular] +5C300A 000A [Regular] +5C300D 000D [Regular] +5C301A 001A [Regular] +5C3022 0022 [Regular] +5C3025 0025 [Regular] +5C3027 NULL [SyntErr] +5C3030 0030 [Regular] +5C303F 003F [Regular] +5C3040 0040 [Regular] +5C305A 005A [Regular] +5C305C NULL [SyntErr] +5C305F 005F [Regular] +5C3061 0061 [Regular] +5C3062 0062 [Regular] +5C306E 006E [Regular] +5C3072 0072 [Regular] +5C3074 0074 [Regular] +5C307E 007E [Regular] +5C307F 007F [Regular] +5C3080 0080 [Regular][ILSEQ] +5C3081 0081 [Regular][ILSEQ] +5C309F 009F [Regular][ILSEQ] +5C30A0 00A0 [Regular][ILSEQ] +5C30A1 00A1 [Regular] +5C30E0 00E0 [Regular][ILSEQ] +5C30EF 00EF [Regular][ILSEQ] +5C30F9 00F9 [Regular][ILSEQ] +5C30FA 00FA [Regular][ILSEQ] +5C30FC 00FC [Regular][ILSEQ] +5C30FD 00FD [Regular][ILSEQ] +5C30FE 00FE [Regular][ILSEQ] +5C30FF 00FF [Regular][ILSEQ] +5C3F00 3F00 [Trivial] +5C3F08 3F08 [Trivial] +5C3F09 3F09 [Trivial] +5C3F0A 3F0A [Trivial] +5C3F0D 3F0D [Trivial] +5C3F1A 3F1A [Trivial] +5C3F22 3F22 [Trivial] +5C3F25 3F25 [Trivial] +5C3F27 NULL [SyntErr] +5C3F30 3F30 [Trivial] +5C3F3F 3F3F [Trivial] +5C3F40 3F40 [Trivial] +5C3F5A 3F5A [Trivial] +5C3F5C NULL [SyntErr] +5C3F5F 3F5F [Trivial] +5C3F61 3F61 [Trivial] +5C3F62 3F62 [Trivial] +5C3F6E 3F6E [Trivial] +5C3F72 3F72 [Trivial] +5C3F74 3F74 [Trivial] +5C3F7E 3F7E [Trivial] +5C3F7F 3F7F [Trivial] +5C3F80 3F80 [Trivial][ILSEQ] +5C3F81 3F81 [Trivial][ILSEQ] +5C3F9F 3F9F [Trivial][ILSEQ] +5C3FA0 3FA0 [Trivial][ILSEQ] +5C3FA1 3FA1 [Trivial] +5C3FE0 3FE0 [Trivial][ILSEQ] +5C3FEF 3FEF [Trivial][ILSEQ] +5C3FF9 3FF9 [Trivial][ILSEQ] +5C3FFA 3FFA [Trivial][ILSEQ] +5C3FFC 3FFC [Trivial][ILSEQ] +5C3FFD 3FFD [Trivial][ILSEQ] +5C3FFE 3FFE [Trivial][ILSEQ] +5C3FFF 3FFF [Trivial][ILSEQ] +5C4000 4000 [Trivial] +5C4008 4008 [Trivial] +5C4009 4009 [Trivial] +5C400A 400A [Trivial] +5C400D 400D [Trivial] +5C401A 401A [Trivial] +5C4022 4022 [Trivial] +5C4025 4025 [Trivial] +5C4027 NULL [SyntErr] +5C4030 4030 [Trivial] +5C403F 403F [Trivial] +5C4040 4040 [Trivial] +5C405A 405A [Trivial] +5C405C NULL [SyntErr] +5C405F 405F [Trivial] +5C4061 4061 [Trivial] +5C4062 4062 [Trivial] +5C406E 406E [Trivial] +5C4072 4072 [Trivial] +5C4074 4074 [Trivial] +5C407E 407E [Trivial] +5C407F 407F [Trivial] +5C4080 4080 [Trivial][ILSEQ] +5C4081 4081 [Trivial][ILSEQ] +5C409F 409F [Trivial][ILSEQ] +5C40A0 40A0 [Trivial][ILSEQ] +5C40A1 40A1 [Trivial] +5C40E0 40E0 [Trivial][ILSEQ] +5C40EF 40EF [Trivial][ILSEQ] +5C40F9 40F9 [Trivial][ILSEQ] +5C40FA 40FA [Trivial][ILSEQ] +5C40FC 40FC [Trivial][ILSEQ] +5C40FD 40FD [Trivial][ILSEQ] +5C40FE 40FE [Trivial][ILSEQ] +5C40FF 40FF [Trivial][ILSEQ] +5C5A00 1A00 [Regular] +5C5A08 1A08 [Regular] +5C5A09 1A09 [Regular] +5C5A0A 1A0A [Regular] +5C5A0D 1A0D [Regular] +5C5A1A 1A1A [Regular] +5C5A22 1A22 [Regular] +5C5A25 1A25 [Regular] +5C5A27 NULL [SyntErr] +5C5A30 1A30 [Regular] +5C5A3F 1A3F [Regular] +5C5A40 1A40 [Regular] +5C5A5A 1A5A [Regular] +5C5A5C NULL [SyntErr] +5C5A5F 1A5F [Regular] +5C5A61 1A61 [Regular] +5C5A62 1A62 [Regular] +5C5A6E 1A6E [Regular] +5C5A72 1A72 [Regular] +5C5A74 1A74 [Regular] +5C5A7E 1A7E [Regular] +5C5A7F 1A7F [Regular] +5C5A80 1A80 [Regular][ILSEQ] +5C5A81 1A81 [Regular][ILSEQ] +5C5A9F 1A9F [Regular][ILSEQ] +5C5AA0 1AA0 [Regular][ILSEQ] +5C5AA1 1AA1 [Regular] +5C5AE0 1AE0 [Regular][ILSEQ] +5C5AEF 1AEF [Regular][ILSEQ] +5C5AF9 1AF9 [Regular][ILSEQ] +5C5AFA 1AFA [Regular][ILSEQ] +5C5AFC 1AFC [Regular][ILSEQ] +5C5AFD 1AFD [Regular][ILSEQ] +5C5AFE 1AFE [Regular][ILSEQ] +5C5AFF 1AFF [Regular][ILSEQ] +5C5C00 5C00 [Regular] +5C5C08 5C08 [Regular] +5C5C09 5C09 [Regular] +5C5C0A 5C0A [Regular] +5C5C0D 5C0D [Regular] +5C5C1A 5C1A [Regular] +5C5C22 5C22 [Regular] +5C5C25 5C25 [Regular] +5C5C27 NULL [SyntErr] +5C5C30 5C30 [Regular] +5C5C3F 5C3F [Regular] +5C5C40 5C40 [Regular] +5C5C5A 5C5A [Regular] +5C5C5C NULL [SyntErr] +5C5C5F 5C5F [Regular] +5C5C61 5C61 [Regular] +5C5C62 5C62 [Regular] +5C5C6E 5C6E [Regular] +5C5C72 5C72 [Regular] +5C5C74 5C74 [Regular] +5C5C7E 5C7E [Regular] +5C5C7F 5C7F [Regular] +5C5C80 5C80 [Regular][ILSEQ] +5C5C81 5C81 [Regular][ILSEQ] +5C5C9F 5C9F [Regular][ILSEQ] +5C5CA0 5CA0 [Regular][ILSEQ] +5C5CA1 5CA1 [Regular] +5C5CE0 5CE0 [Regular][ILSEQ] +5C5CEF 5CEF [Regular][ILSEQ] +5C5CF9 5CF9 [Regular][ILSEQ] +5C5CFA 5CFA [Regular][ILSEQ] +5C5CFC 5CFC [Regular][ILSEQ] +5C5CFD 5CFD [Regular][ILSEQ] +5C5CFE 5CFE [Regular][ILSEQ] +5C5CFF 5CFF [Regular][ILSEQ] +5C5F00 5C5F00 [Preserve][LIKE] +5C5F08 5C5F08 [Preserve][LIKE] +5C5F09 5C5F09 [Preserve][LIKE] +5C5F0A 5C5F0A [Preserve][LIKE] +5C5F0D 5C5F0D [Preserve][LIKE] +5C5F1A 5C5F1A [Preserve][LIKE] +5C5F22 5C5F22 [Preserve][LIKE] +5C5F25 5C5F25 [Preserve][LIKE] +5C5F27 NULL [SyntErr] +5C5F30 5C5F30 [Preserve][LIKE] +5C5F3F 5C5F3F [Preserve][LIKE] +5C5F40 5C5F40 [Preserve][LIKE] +5C5F5A 5C5F5A [Preserve][LIKE] +5C5F5C NULL [SyntErr] +5C5F5F 5C5F5F [Preserve][LIKE] +5C5F61 5C5F61 [Preserve][LIKE] +5C5F62 5C5F62 [Preserve][LIKE] +5C5F6E 5C5F6E [Preserve][LIKE] +5C5F72 5C5F72 [Preserve][LIKE] +5C5F74 5C5F74 [Preserve][LIKE] +5C5F7E 5C5F7E [Preserve][LIKE] +5C5F7F 5C5F7F [Preserve][LIKE] +5C5F80 5C5F80 [Preserve][LIKE][ILSEQ] +5C5F81 5C5F81 [Preserve][LIKE][ILSEQ] +5C5F9F 5C5F9F [Preserve][LIKE][ILSEQ] +5C5FA0 5C5FA0 [Preserve][LIKE][ILSEQ] +5C5FA1 5C5FA1 [Preserve][LIKE] +5C5FE0 5C5FE0 [Preserve][LIKE][ILSEQ] +5C5FEF 5C5FEF [Preserve][LIKE][ILSEQ] +5C5FF9 5C5FF9 [Preserve][LIKE][ILSEQ] +5C5FFA 5C5FFA [Preserve][LIKE][ILSEQ] +5C5FFC 5C5FFC [Preserve][LIKE][ILSEQ] +5C5FFD 5C5FFD [Preserve][LIKE][ILSEQ] +5C5FFE 5C5FFE [Preserve][LIKE][ILSEQ] +5C5FFF 5C5FFF [Preserve][LIKE][ILSEQ] +5C6100 6100 [Trivial] +5C6108 6108 [Trivial] +5C6109 6109 [Trivial] +5C610A 610A [Trivial] +5C610D 610D [Trivial] +5C611A 611A [Trivial] +5C6122 6122 [Trivial] +5C6125 6125 [Trivial] +5C6127 NULL [SyntErr] +5C6130 6130 [Trivial] +5C613F 613F [Trivial] +5C6140 6140 [Trivial] +5C615A 615A [Trivial] +5C615C NULL [SyntErr] +5C615F 615F [Trivial] +5C6161 6161 [Trivial] +5C6162 6162 [Trivial] +5C616E 616E [Trivial] +5C6172 6172 [Trivial] +5C6174 6174 [Trivial] +5C617E 617E [Trivial] +5C617F 617F [Trivial] +5C6180 6180 [Trivial][ILSEQ] +5C6181 6181 [Trivial][ILSEQ] +5C619F 619F [Trivial][ILSEQ] +5C61A0 61A0 [Trivial][ILSEQ] +5C61A1 61A1 [Trivial] +5C61E0 61E0 [Trivial][ILSEQ] +5C61EF 61EF [Trivial][ILSEQ] +5C61F9 61F9 [Trivial][ILSEQ] +5C61FA 61FA [Trivial][ILSEQ] +5C61FC 61FC [Trivial][ILSEQ] +5C61FD 61FD [Trivial][ILSEQ] +5C61FE 61FE [Trivial][ILSEQ] +5C61FF 61FF [Trivial][ILSEQ] +5C6200 0800 [Regular] +5C6208 0808 [Regular] +5C6209 0809 [Regular] +5C620A 080A [Regular] +5C620D 080D [Regular] +5C621A 081A [Regular] +5C6222 0822 [Regular] +5C6225 0825 [Regular] +5C6227 NULL [SyntErr] +5C6230 0830 [Regular] +5C623F 083F [Regular] +5C6240 0840 [Regular] +5C625A 085A [Regular] +5C625C NULL [SyntErr] +5C625F 085F [Regular] +5C6261 0861 [Regular] +5C6262 0862 [Regular] +5C626E 086E [Regular] +5C6272 0872 [Regular] +5C6274 0874 [Regular] +5C627E 087E [Regular] +5C627F 087F [Regular] +5C6280 0880 [Regular][ILSEQ] +5C6281 0881 [Regular][ILSEQ] +5C629F 089F [Regular][ILSEQ] +5C62A0 08A0 [Regular][ILSEQ] +5C62A1 08A1 [Regular] +5C62E0 08E0 [Regular][ILSEQ] +5C62EF 08EF [Regular][ILSEQ] +5C62F9 08F9 [Regular][ILSEQ] +5C62FA 08FA [Regular][ILSEQ] +5C62FC 08FC [Regular][ILSEQ] +5C62FD 08FD [Regular][ILSEQ] +5C62FE 08FE [Regular][ILSEQ] +5C62FF 08FF [Regular][ILSEQ] +5C6E00 0A00 [Regular] +5C6E08 0A08 [Regular] +5C6E09 0A09 [Regular] +5C6E0A 0A0A [Regular] +5C6E0D 0A0D [Regular] +5C6E1A 0A1A [Regular] +5C6E22 0A22 [Regular] +5C6E25 0A25 [Regular] +5C6E27 NULL [SyntErr] +5C6E30 0A30 [Regular] +5C6E3F 0A3F [Regular] +5C6E40 0A40 [Regular] +5C6E5A 0A5A [Regular] +5C6E5C NULL [SyntErr] +5C6E5F 0A5F [Regular] +5C6E61 0A61 [Regular] +5C6E62 0A62 [Regular] +5C6E6E 0A6E [Regular] +5C6E72 0A72 [Regular] +5C6E74 0A74 [Regular] +5C6E7E 0A7E [Regular] +5C6E7F 0A7F [Regular] +5C6E80 0A80 [Regular][ILSEQ] +5C6E81 0A81 [Regular][ILSEQ] +5C6E9F 0A9F [Regular][ILSEQ] +5C6EA0 0AA0 [Regular][ILSEQ] +5C6EA1 0AA1 [Regular] +5C6EE0 0AE0 [Regular][ILSEQ] +5C6EEF 0AEF [Regular][ILSEQ] +5C6EF9 0AF9 [Regular][ILSEQ] +5C6EFA 0AFA [Regular][ILSEQ] +5C6EFC 0AFC [Regular][ILSEQ] +5C6EFD 0AFD [Regular][ILSEQ] +5C6EFE 0AFE [Regular][ILSEQ] +5C6EFF 0AFF [Regular][ILSEQ] +5C7200 0D00 [Regular] +5C7208 0D08 [Regular] +5C7209 0D09 [Regular] +5C720A 0D0A [Regular] +5C720D 0D0D [Regular] +5C721A 0D1A [Regular] +5C7222 0D22 [Regular] +5C7225 0D25 [Regular] +5C7227 NULL [SyntErr] +5C7230 0D30 [Regular] +5C723F 0D3F [Regular] +5C7240 0D40 [Regular] +5C725A 0D5A [Regular] +5C725C NULL [SyntErr] +5C725F 0D5F [Regular] +5C7261 0D61 [Regular] +5C7262 0D62 [Regular] +5C726E 0D6E [Regular] +5C7272 0D72 [Regular] +5C7274 0D74 [Regular] +5C727E 0D7E [Regular] +5C727F 0D7F [Regular] +5C7280 0D80 [Regular][ILSEQ] +5C7281 0D81 [Regular][ILSEQ] +5C729F 0D9F [Regular][ILSEQ] +5C72A0 0DA0 [Regular][ILSEQ] +5C72A1 0DA1 [Regular] +5C72E0 0DE0 [Regular][ILSEQ] +5C72EF 0DEF [Regular][ILSEQ] +5C72F9 0DF9 [Regular][ILSEQ] +5C72FA 0DFA [Regular][ILSEQ] +5C72FC 0DFC [Regular][ILSEQ] +5C72FD 0DFD [Regular][ILSEQ] +5C72FE 0DFE [Regular][ILSEQ] +5C72FF 0DFF [Regular][ILSEQ] +5C7400 0900 [Regular] +5C7408 0908 [Regular] +5C7409 0909 [Regular] +5C740A 090A [Regular] +5C740D 090D [Regular] +5C741A 091A [Regular] +5C7422 0922 [Regular] +5C7425 0925 [Regular] +5C7427 NULL [SyntErr] +5C7430 0930 [Regular] +5C743F 093F [Regular] +5C7440 0940 [Regular] +5C745A 095A [Regular] +5C745C NULL [SyntErr] +5C745F 095F [Regular] +5C7461 0961 [Regular] +5C7462 0962 [Regular] +5C746E 096E [Regular] +5C7472 0972 [Regular] +5C7474 0974 [Regular] +5C747E 097E [Regular] +5C747F 097F [Regular] +5C7480 0980 [Regular][ILSEQ] +5C7481 0981 [Regular][ILSEQ] +5C749F 099F [Regular][ILSEQ] +5C74A0 09A0 [Regular][ILSEQ] +5C74A1 09A1 [Regular] +5C74E0 09E0 [Regular][ILSEQ] +5C74EF 09EF [Regular][ILSEQ] +5C74F9 09F9 [Regular][ILSEQ] +5C74FA 09FA [Regular][ILSEQ] +5C74FC 09FC [Regular][ILSEQ] +5C74FD 09FD [Regular][ILSEQ] +5C74FE 09FE [Regular][ILSEQ] +5C74FF 09FF [Regular][ILSEQ] +5C7E00 7E00 [Trivial] +5C7E08 7E08 [Trivial] +5C7E09 7E09 [Trivial] +5C7E0A 7E0A [Trivial] +5C7E0D 7E0D [Trivial] +5C7E1A 7E1A [Trivial] +5C7E22 7E22 [Trivial] +5C7E25 7E25 [Trivial] +5C7E27 NULL [SyntErr] +5C7E30 7E30 [Trivial] +5C7E3F 7E3F [Trivial] +5C7E40 7E40 [Trivial] +5C7E5A 7E5A [Trivial] +5C7E5C NULL [SyntErr] +5C7E5F 7E5F [Trivial] +5C7E61 7E61 [Trivial] +5C7E62 7E62 [Trivial] +5C7E6E 7E6E [Trivial] +5C7E72 7E72 [Trivial] +5C7E74 7E74 [Trivial] +5C7E7E 7E7E [Trivial] +5C7E7F 7E7F [Trivial] +5C7E80 7E80 [Trivial][ILSEQ] +5C7E81 7E81 [Trivial][ILSEQ] +5C7E9F 7E9F [Trivial][ILSEQ] +5C7EA0 7EA0 [Trivial][ILSEQ] +5C7EA1 7EA1 [Trivial] +5C7EE0 7EE0 [Trivial][ILSEQ] +5C7EEF 7EEF [Trivial][ILSEQ] +5C7EF9 7EF9 [Trivial][ILSEQ] +5C7EFA 7EFA [Trivial][ILSEQ] +5C7EFC 7EFC [Trivial][ILSEQ] +5C7EFD 7EFD [Trivial][ILSEQ] +5C7EFE 7EFE [Trivial][ILSEQ] +5C7EFF 7EFF [Trivial][ILSEQ] +5C7F00 7F00 [Trivial] +5C7F08 7F08 [Trivial] +5C7F09 7F09 [Trivial] +5C7F0A 7F0A [Trivial] +5C7F0D 7F0D [Trivial] +5C7F1A 7F1A [Trivial] +5C7F22 7F22 [Trivial] +5C7F25 7F25 [Trivial] +5C7F27 NULL [SyntErr] +5C7F30 7F30 [Trivial] +5C7F3F 7F3F [Trivial] +5C7F40 7F40 [Trivial] +5C7F5A 7F5A [Trivial] +5C7F5C NULL [SyntErr] +5C7F5F 7F5F [Trivial] +5C7F61 7F61 [Trivial] +5C7F62 7F62 [Trivial] +5C7F6E 7F6E [Trivial] +5C7F72 7F72 [Trivial] +5C7F74 7F74 [Trivial] +5C7F7E 7F7E [Trivial] +5C7F7F 7F7F [Trivial] +5C7F80 7F80 [Trivial][ILSEQ] +5C7F81 7F81 [Trivial][ILSEQ] +5C7F9F 7F9F [Trivial][ILSEQ] +5C7FA0 7FA0 [Trivial][ILSEQ] +5C7FA1 7FA1 [Trivial] +5C7FE0 7FE0 [Trivial][ILSEQ] +5C7FEF 7FEF [Trivial][ILSEQ] +5C7FF9 7FF9 [Trivial][ILSEQ] +5C7FFA 7FFA [Trivial][ILSEQ] +5C7FFC 7FFC [Trivial][ILSEQ] +5C7FFD 7FFD [Trivial][ILSEQ] +5C7FFE 7FFE [Trivial][ILSEQ] +5C7FFF 7FFF [Trivial][ILSEQ] +5C8000 8000 [Trivial][ILSEQ] +5C8008 8008 [Trivial][ILSEQ] +5C8009 8009 [Trivial][ILSEQ] +5C800A 800A [Trivial][ILSEQ] +5C800D 800D [Trivial][ILSEQ] +5C801A 801A [Trivial][ILSEQ] +5C8022 8022 [Trivial][ILSEQ] +5C8025 8025 [Trivial][ILSEQ] +5C8027 NULL [SyntErr] +5C8030 8030 [Trivial][ILSEQ] +5C803F 803F [Trivial][ILSEQ] +5C8040 8040 [Trivial][ILSEQ] +5C805A 805A [Trivial][ILSEQ] +5C805C NULL [SyntErr] +5C805F 805F [Trivial][ILSEQ] +5C8061 8061 [Trivial][ILSEQ] +5C8062 8062 [Trivial][ILSEQ] +5C806E 806E [Trivial][ILSEQ] +5C8072 8072 [Trivial][ILSEQ] +5C8074 8074 [Trivial][ILSEQ] +5C807E 807E [Trivial][ILSEQ] +5C807F 807F [Trivial][ILSEQ] +5C8080 8080 [Trivial][ILSEQ] +5C8081 8081 [Trivial][ILSEQ] +5C809F 809F [Trivial][ILSEQ] +5C80A0 80A0 [Trivial][ILSEQ] +5C80A1 80A1 [Trivial][ILSEQ] +5C80E0 80E0 [Trivial][ILSEQ] +5C80EF 80EF [Trivial][ILSEQ] +5C80F9 80F9 [Trivial][ILSEQ] +5C80FA 80FA [Trivial][ILSEQ] +5C80FC 80FC [Trivial][ILSEQ] +5C80FD 80FD [Trivial][ILSEQ] +5C80FE 80FE [Trivial][ILSEQ] +5C80FF 80FF [Trivial][ILSEQ] +5C8100 8100 [Trivial][ILSEQ] +5C8108 8108 [Trivial][ILSEQ] +5C8109 8109 [Trivial][ILSEQ] +5C810A 810A [Trivial][ILSEQ] +5C810D 810D [Trivial][ILSEQ] +5C811A 811A [Trivial][ILSEQ] +5C8122 8122 [Trivial][ILSEQ] +5C8125 8125 [Trivial][ILSEQ] +5C8127 NULL [SyntErr] +5C8130 8130 [Trivial][ILSEQ] +5C813F 813F [Trivial][ILSEQ] +5C8140 8140 [Trivial] +5C815A 815A [Trivial] +5C815C NULL [SyntErr] +5C815F 815F [Trivial] +5C8161 8161 [Trivial] +5C8162 8162 [Trivial] +5C816E 816E [Trivial] +5C8172 8172 [Trivial] +5C8174 8174 [Trivial] +5C817E 817E [Trivial] +5C817F 817F [Trivial][ILSEQ] +5C8180 8180 [Trivial] +5C8181 8181 [Trivial] +5C819F 819F [Trivial] +5C81A0 81A0 [Trivial] +5C81A1 81A1 [Trivial] +5C81E0 81E0 [Trivial] +5C81EF 81EF [Trivial] +5C81F9 81F9 [Trivial] +5C81FA 81FA [Trivial] +5C81FC 81FC [Trivial] +5C81FD 81FD [Trivial][ILSEQ] +5C81FE 81FE [Trivial][ILSEQ] +5C81FF 81FF [Trivial][ILSEQ] +5C9F00 9F00 [Trivial][ILSEQ] +5C9F08 9F08 [Trivial][ILSEQ] +5C9F09 9F09 [Trivial][ILSEQ] +5C9F0A 9F0A [Trivial][ILSEQ] +5C9F0D 9F0D [Trivial][ILSEQ] +5C9F1A 9F1A [Trivial][ILSEQ] +5C9F22 9F22 [Trivial][ILSEQ] +5C9F25 9F25 [Trivial][ILSEQ] +5C9F27 NULL [SyntErr] +5C9F30 9F30 [Trivial][ILSEQ] +5C9F3F 9F3F [Trivial][ILSEQ] +5C9F40 9F40 [Trivial] +5C9F5A 9F5A [Trivial] +5C9F5C NULL [SyntErr] +5C9F5F 9F5F [Trivial] +5C9F61 9F61 [Trivial] +5C9F62 9F62 [Trivial] +5C9F6E 9F6E [Trivial] +5C9F72 9F72 [Trivial] +5C9F74 9F74 [Trivial] +5C9F7E 9F7E [Trivial] +5C9F7F 9F7F [Trivial][ILSEQ] +5C9F80 9F80 [Trivial] +5C9F81 9F81 [Trivial] +5C9F9F 9F9F [Trivial] +5C9FA0 9FA0 [Trivial] +5C9FA1 9FA1 [Trivial] +5C9FE0 9FE0 [Trivial] +5C9FEF 9FEF [Trivial] +5C9FF9 9FF9 [Trivial] +5C9FFA 9FFA [Trivial] +5C9FFC 9FFC [Trivial] +5C9FFD 9FFD [Trivial][ILSEQ] +5C9FFE 9FFE [Trivial][ILSEQ] +5C9FFF 9FFF [Trivial][ILSEQ] +5CA000 A000 [Trivial][ILSEQ] +5CA008 A008 [Trivial][ILSEQ] +5CA009 A009 [Trivial][ILSEQ] +5CA00A A00A [Trivial][ILSEQ] +5CA00D A00D [Trivial][ILSEQ] +5CA01A A01A [Trivial][ILSEQ] +5CA022 A022 [Trivial][ILSEQ] +5CA025 A025 [Trivial][ILSEQ] +5CA027 NULL [SyntErr] +5CA030 A030 [Trivial][ILSEQ] +5CA03F A03F [Trivial][ILSEQ] +5CA040 A040 [Trivial][ILSEQ] +5CA05A A05A [Trivial][ILSEQ] +5CA05C NULL [SyntErr] +5CA05F A05F [Trivial][ILSEQ] +5CA061 A061 [Trivial][ILSEQ] +5CA062 A062 [Trivial][ILSEQ] +5CA06E A06E [Trivial][ILSEQ] +5CA072 A072 [Trivial][ILSEQ] +5CA074 A074 [Trivial][ILSEQ] +5CA07E A07E [Trivial][ILSEQ] +5CA07F A07F [Trivial][ILSEQ] +5CA080 A080 [Trivial][ILSEQ] +5CA081 A081 [Trivial][ILSEQ] +5CA09F A09F [Trivial][ILSEQ] +5CA0A0 A0A0 [Trivial][ILSEQ] +5CA0A1 A0A1 [Trivial][ILSEQ] +5CA0E0 A0E0 [Trivial][ILSEQ] +5CA0EF A0EF [Trivial][ILSEQ] +5CA0F9 A0F9 [Trivial][ILSEQ] +5CA0FA A0FA [Trivial][ILSEQ] +5CA0FC A0FC [Trivial][ILSEQ] +5CA0FD A0FD [Trivial][ILSEQ] +5CA0FE A0FE [Trivial][ILSEQ] +5CA0FF A0FF [Trivial][ILSEQ] +5CA100 A100 [Trivial] +5CA108 A108 [Trivial] +5CA109 A109 [Trivial] +5CA10A A10A [Trivial] +5CA10D A10D [Trivial] +5CA11A A11A [Trivial] +5CA122 A122 [Trivial] +5CA125 A125 [Trivial] +5CA127 NULL [SyntErr] +5CA130 A130 [Trivial] +5CA13F A13F [Trivial] +5CA140 A140 [Trivial] +5CA15A A15A [Trivial] +5CA15C NULL [SyntErr][USER] +5CA15F A15F [Trivial] +5CA161 A161 [Trivial] +5CA162 A162 [Trivial] +5CA16E A16E [Trivial] +5CA172 A172 [Trivial] +5CA174 A174 [Trivial] +5CA17E A17E [Trivial] +5CA17F A17F [Trivial] +5CA180 A180 [Trivial][ILSEQ] +5CA181 A181 [Trivial][ILSEQ] +5CA19F A19F [Trivial][ILSEQ] +5CA1A0 A1A0 [Trivial][ILSEQ] +5CA1A1 A1A1 [Trivial] +5CA1E0 A1E0 [Trivial][ILSEQ] +5CA1EF A1EF [Trivial][ILSEQ] +5CA1F9 A1F9 [Trivial][ILSEQ] +5CA1FA A1FA [Trivial][ILSEQ] +5CA1FC A1FC [Trivial][ILSEQ] +5CA1FD A1FD [Trivial][ILSEQ] +5CA1FE A1FE [Trivial][ILSEQ] +5CA1FF A1FF [Trivial][ILSEQ] +5CE000 E000 [Trivial][ILSEQ] +5CE008 E008 [Trivial][ILSEQ] +5CE009 E009 [Trivial][ILSEQ] +5CE00A E00A [Trivial][ILSEQ] +5CE00D E00D [Trivial][ILSEQ] +5CE01A E01A [Trivial][ILSEQ] +5CE022 E022 [Trivial][ILSEQ] +5CE025 E025 [Trivial][ILSEQ] +5CE027 NULL [SyntErr] +5CE030 E030 [Trivial][ILSEQ] +5CE03F E03F [Trivial][ILSEQ] +5CE040 E040 [Trivial] +5CE05A E05A [Trivial] +5CE05C NULL [SyntErr] +5CE05F E05F [Trivial] +5CE061 E061 [Trivial] +5CE062 E062 [Trivial] +5CE06E E06E [Trivial] +5CE072 E072 [Trivial] +5CE074 E074 [Trivial] +5CE07E E07E [Trivial] +5CE07F E07F [Trivial][ILSEQ] +5CE080 E080 [Trivial] +5CE081 E081 [Trivial] +5CE09F E09F [Trivial] +5CE0A0 E0A0 [Trivial] +5CE0A1 E0A1 [Trivial] +5CE0E0 E0E0 [Trivial] +5CE0EF E0EF [Trivial] +5CE0F9 E0F9 [Trivial] +5CE0FA E0FA [Trivial] +5CE0FC E0FC [Trivial] +5CE0FD E0FD [Trivial][ILSEQ] +5CE0FE E0FE [Trivial][ILSEQ] +5CE0FF E0FF [Trivial][ILSEQ] +5CEF00 EF00 [Trivial][ILSEQ] +5CEF08 EF08 [Trivial][ILSEQ] +5CEF09 EF09 [Trivial][ILSEQ] +5CEF0A EF0A [Trivial][ILSEQ] +5CEF0D EF0D [Trivial][ILSEQ] +5CEF1A EF1A [Trivial][ILSEQ] +5CEF22 EF22 [Trivial][ILSEQ] +5CEF25 EF25 [Trivial][ILSEQ] +5CEF27 NULL [SyntErr] +5CEF30 EF30 [Trivial][ILSEQ] +5CEF3F EF3F [Trivial][ILSEQ] +5CEF40 EF40 [Trivial] +5CEF5A EF5A [Trivial] +5CEF5C NULL [SyntErr] +5CEF5F EF5F [Trivial] +5CEF61 EF61 [Trivial] +5CEF62 EF62 [Trivial] +5CEF6E EF6E [Trivial] +5CEF72 EF72 [Trivial] +5CEF74 EF74 [Trivial] +5CEF7E EF7E [Trivial] +5CEF7F EF7F [Trivial][ILSEQ] +5CEF80 EF80 [Trivial] +5CEF81 EF81 [Trivial] +5CEF9F EF9F [Trivial] +5CEFA0 EFA0 [Trivial] +5CEFA1 EFA1 [Trivial] +5CEFE0 EFE0 [Trivial] +5CEFEF EFEF [Trivial] +5CEFF9 EFF9 [Trivial] +5CEFFA EFFA [Trivial] +5CEFFC EFFC [Trivial] +5CEFFD EFFD [Trivial][ILSEQ] +5CEFFE EFFE [Trivial][ILSEQ] +5CEFFF EFFF [Trivial][ILSEQ] +5CF900 F900 [Trivial][ILSEQ] +5CF908 F908 [Trivial][ILSEQ] +5CF909 F909 [Trivial][ILSEQ] +5CF90A F90A [Trivial][ILSEQ] +5CF90D F90D [Trivial][ILSEQ] +5CF91A F91A [Trivial][ILSEQ] +5CF922 F922 [Trivial][ILSEQ] +5CF925 F925 [Trivial][ILSEQ] +5CF927 NULL [SyntErr] +5CF930 F930 [Trivial][ILSEQ] +5CF93F F93F [Trivial][ILSEQ] +5CF940 F940 [Trivial] +5CF95A F95A [Trivial] +5CF95C NULL [SyntErr] +5CF95F F95F [Trivial] +5CF961 F961 [Trivial] +5CF962 F962 [Trivial] +5CF96E F96E [Trivial] +5CF972 F972 [Trivial] +5CF974 F974 [Trivial] +5CF97E F97E [Trivial] +5CF97F F97F [Trivial][ILSEQ] +5CF980 F980 [Trivial] +5CF981 F981 [Trivial] +5CF99F F99F [Trivial] +5CF9A0 F9A0 [Trivial] +5CF9A1 F9A1 [Trivial] +5CF9E0 F9E0 [Trivial] +5CF9EF F9EF [Trivial] +5CF9F9 F9F9 [Trivial] +5CF9FA F9FA [Trivial] +5CF9FC F9FC [Trivial] +5CF9FD F9FD [Trivial][ILSEQ] +5CF9FE F9FE [Trivial][ILSEQ] +5CF9FF F9FF [Trivial][ILSEQ] +5CFA00 FA00 [Trivial][ILSEQ] +5CFA08 FA08 [Trivial][ILSEQ] +5CFA09 FA09 [Trivial][ILSEQ] +5CFA0A FA0A [Trivial][ILSEQ] +5CFA0D FA0D [Trivial][ILSEQ] +5CFA1A FA1A [Trivial][ILSEQ] +5CFA22 FA22 [Trivial][ILSEQ] +5CFA25 FA25 [Trivial][ILSEQ] +5CFA27 NULL [SyntErr] +5CFA30 FA30 [Trivial][ILSEQ] +5CFA3F FA3F [Trivial][ILSEQ] +5CFA40 FA40 [Trivial] +5CFA5A FA5A [Trivial] +5CFA5C NULL [SyntErr] +5CFA5F FA5F [Trivial] +5CFA61 FA61 [Trivial] +5CFA62 FA62 [Trivial] +5CFA6E FA6E [Trivial] +5CFA72 FA72 [Trivial] +5CFA74 FA74 [Trivial] +5CFA7E FA7E [Trivial] +5CFA7F FA7F [Trivial][ILSEQ] +5CFA80 FA80 [Trivial] +5CFA81 FA81 [Trivial] +5CFA9F FA9F [Trivial] +5CFAA0 FAA0 [Trivial] +5CFAA1 FAA1 [Trivial] +5CFAE0 FAE0 [Trivial] +5CFAEF FAEF [Trivial] +5CFAF9 FAF9 [Trivial] +5CFAFA FAFA [Trivial] +5CFAFC FAFC [Trivial] +5CFAFD FAFD [Trivial][ILSEQ] +5CFAFE FAFE [Trivial][ILSEQ] +5CFAFF FAFF [Trivial][ILSEQ] +5CFC00 FC00 [Trivial][ILSEQ] +5CFC08 FC08 [Trivial][ILSEQ] +5CFC09 FC09 [Trivial][ILSEQ] +5CFC0A FC0A [Trivial][ILSEQ] +5CFC0D FC0D [Trivial][ILSEQ] +5CFC1A FC1A [Trivial][ILSEQ] +5CFC22 FC22 [Trivial][ILSEQ] +5CFC25 FC25 [Trivial][ILSEQ] +5CFC27 NULL [SyntErr] +5CFC30 FC30 [Trivial][ILSEQ] +5CFC3F FC3F [Trivial][ILSEQ] +5CFC40 FC40 [Trivial] +5CFC5A FC5A [Trivial] +5CFC5C NULL [SyntErr] +5CFC5F FC5F [Trivial] +5CFC61 FC61 [Trivial] +5CFC62 FC62 [Trivial] +5CFC6E FC6E [Trivial] +5CFC72 FC72 [Trivial] +5CFC74 FC74 [Trivial] +5CFC7E FC7E [Trivial] +5CFC7F FC7F [Trivial][ILSEQ] +5CFC80 FC80 [Trivial] +5CFC81 FC81 [Trivial] +5CFC9F FC9F [Trivial] +5CFCA0 FCA0 [Trivial] +5CFCA1 FCA1 [Trivial] +5CFCE0 FCE0 [Trivial] +5CFCEF FCEF [Trivial] +5CFCF9 FCF9 [Trivial] +5CFCFA FCFA [Trivial] +5CFCFC FCFC [Trivial] +5CFCFD FCFD [Trivial][ILSEQ] +5CFCFE FCFE [Trivial][ILSEQ] +5CFCFF FCFF [Trivial][ILSEQ] +5CFD00 FD00 [Trivial][ILSEQ] +5CFD08 FD08 [Trivial][ILSEQ] +5CFD09 FD09 [Trivial][ILSEQ] +5CFD0A FD0A [Trivial][ILSEQ] +5CFD0D FD0D [Trivial][ILSEQ] +5CFD1A FD1A [Trivial][ILSEQ] +5CFD22 FD22 [Trivial][ILSEQ] +5CFD25 FD25 [Trivial][ILSEQ] +5CFD27 NULL [SyntErr] +5CFD30 FD30 [Trivial][ILSEQ] +5CFD3F FD3F [Trivial][ILSEQ] +5CFD40 FD40 [Trivial][ILSEQ] +5CFD5A FD5A [Trivial][ILSEQ] +5CFD5C NULL [SyntErr] +5CFD5F FD5F [Trivial][ILSEQ] +5CFD61 FD61 [Trivial][ILSEQ] +5CFD62 FD62 [Trivial][ILSEQ] +5CFD6E FD6E [Trivial][ILSEQ] +5CFD72 FD72 [Trivial][ILSEQ] +5CFD74 FD74 [Trivial][ILSEQ] +5CFD7E FD7E [Trivial][ILSEQ] +5CFD7F FD7F [Trivial][ILSEQ] +5CFD80 FD80 [Trivial][ILSEQ] +5CFD81 FD81 [Trivial][ILSEQ] +5CFD9F FD9F [Trivial][ILSEQ] +5CFDA0 FDA0 [Trivial][ILSEQ] +5CFDA1 FDA1 [Trivial][ILSEQ] +5CFDE0 FDE0 [Trivial][ILSEQ] +5CFDEF FDEF [Trivial][ILSEQ] +5CFDF9 FDF9 [Trivial][ILSEQ] +5CFDFA FDFA [Trivial][ILSEQ] +5CFDFC FDFC [Trivial][ILSEQ] +5CFDFD FDFD [Trivial][ILSEQ] +5CFDFE FDFE [Trivial][ILSEQ] +5CFDFF FDFF [Trivial][ILSEQ] +5CFE00 FE00 [Trivial][ILSEQ] +5CFE08 FE08 [Trivial][ILSEQ] +5CFE09 FE09 [Trivial][ILSEQ] +5CFE0A FE0A [Trivial][ILSEQ] +5CFE0D FE0D [Trivial][ILSEQ] +5CFE1A FE1A [Trivial][ILSEQ] +5CFE22 FE22 [Trivial][ILSEQ] +5CFE25 FE25 [Trivial][ILSEQ] +5CFE27 NULL [SyntErr] +5CFE30 FE30 [Trivial][ILSEQ] +5CFE3F FE3F [Trivial][ILSEQ] +5CFE40 FE40 [Trivial][ILSEQ] +5CFE5A FE5A [Trivial][ILSEQ] +5CFE5C NULL [SyntErr] +5CFE5F FE5F [Trivial][ILSEQ] +5CFE61 FE61 [Trivial][ILSEQ] +5CFE62 FE62 [Trivial][ILSEQ] +5CFE6E FE6E [Trivial][ILSEQ] +5CFE72 FE72 [Trivial][ILSEQ] +5CFE74 FE74 [Trivial][ILSEQ] +5CFE7E FE7E [Trivial][ILSEQ] +5CFE7F FE7F [Trivial][ILSEQ] +5CFE80 FE80 [Trivial][ILSEQ] +5CFE81 FE81 [Trivial][ILSEQ] +5CFE9F FE9F [Trivial][ILSEQ] +5CFEA0 FEA0 [Trivial][ILSEQ] +5CFEA1 FEA1 [Trivial][ILSEQ] +5CFEE0 FEE0 [Trivial][ILSEQ] +5CFEEF FEEF [Trivial][ILSEQ] +5CFEF9 FEF9 [Trivial][ILSEQ] +5CFEFA FEFA [Trivial][ILSEQ] +5CFEFC FEFC [Trivial][ILSEQ] +5CFEFD FEFD [Trivial][ILSEQ] +5CFEFE FEFE [Trivial][ILSEQ] +5CFEFF FEFF [Trivial][ILSEQ] +5CFF00 FF00 [Trivial][ILSEQ] +5CFF08 FF08 [Trivial][ILSEQ] +5CFF09 FF09 [Trivial][ILSEQ] +5CFF0A FF0A [Trivial][ILSEQ] +5CFF0D FF0D [Trivial][ILSEQ] +5CFF1A FF1A [Trivial][ILSEQ] +5CFF22 FF22 [Trivial][ILSEQ] +5CFF25 FF25 [Trivial][ILSEQ] +5CFF27 NULL [SyntErr] +5CFF30 FF30 [Trivial][ILSEQ] +5CFF3F FF3F [Trivial][ILSEQ] +5CFF40 FF40 [Trivial][ILSEQ] +5CFF5A FF5A [Trivial][ILSEQ] +5CFF5C NULL [SyntErr] +5CFF5F FF5F [Trivial][ILSEQ] +5CFF61 FF61 [Trivial][ILSEQ] +5CFF62 FF62 [Trivial][ILSEQ] +5CFF6E FF6E [Trivial][ILSEQ] +5CFF72 FF72 [Trivial][ILSEQ] +5CFF74 FF74 [Trivial][ILSEQ] +5CFF7E FF7E [Trivial][ILSEQ] +5CFF7F FF7F [Trivial][ILSEQ] +5CFF80 FF80 [Trivial][ILSEQ] +5CFF81 FF81 [Trivial][ILSEQ] +5CFF9F FF9F [Trivial][ILSEQ] +5CFFA0 FFA0 [Trivial][ILSEQ] +5CFFA1 FFA1 [Trivial][ILSEQ] +5CFFE0 FFE0 [Trivial][ILSEQ] +5CFFEF FFEF [Trivial][ILSEQ] +5CFFF9 FFF9 [Trivial][ILSEQ] +5CFFFA FFFA [Trivial][ILSEQ] +5CFFFC FFFC [Trivial][ILSEQ] +5CFFFD FFFD [Trivial][ILSEQ] +5CFFFE FFFE [Trivial][ILSEQ] +5CFFFF FFFF [Trivial][ILSEQ] +5C005C00 0000 [Trivial] +5C005C08 0008 [Trivial] +5C005C09 0009 [Trivial] +5C005C0A 000A [Trivial] +5C005C0D 000D [Trivial] +5C005C1A 001A [Trivial] +5C005C22 0022 [Trivial] +5C005C25 005C25 [Regular] +5C005C27 0027 [Trivial] +5C005C30 0000 [Regular] +5C005C3F 003F [Trivial] +5C005C40 0040 [Trivial] +5C005C5A 001A [Regular] +5C005C5C 005C [Regular] +5C005C5F 005C5F [Regular] +5C005C61 0061 [Trivial] +5C005C62 0008 [Regular] +5C005C6E 000A [Regular] +5C005C72 000D [Regular] +5C005C74 0009 [Regular] +5C005C7E 007E [Trivial] +5C005C7F 007F [Trivial] +5C005C80 0080 [Trivial][ILSEQ] +5C005C81 0081 [Trivial][ILSEQ] +5C005C9F 009F [Trivial][ILSEQ] +5C005CA0 00A0 [Trivial][ILSEQ] +5C005CA1 00A1 [Trivial] +5C005CE0 00E0 [Trivial][ILSEQ] +5C005CEF 00EF [Trivial][ILSEQ] +5C005CF9 00F9 [Trivial][ILSEQ] +5C005CFA 00FA [Trivial][ILSEQ] +5C005CFC 00FC [Trivial][ILSEQ] +5C005CFD 00FD [Trivial][ILSEQ] +5C005CFE 00FE [Trivial][ILSEQ] +5C005CFF 00FF [Trivial][ILSEQ] +5C085C00 0800 [Trivial] +5C085C08 0808 [Trivial] +5C085C09 0809 [Trivial] +5C085C0A 080A [Trivial] +5C085C0D 080D [Trivial] +5C085C1A 081A [Trivial] +5C085C22 0822 [Trivial] +5C085C25 085C25 [Regular] +5C085C27 0827 [Trivial] +5C085C30 0800 [Regular] +5C085C3F 083F [Trivial] +5C085C40 0840 [Trivial] +5C085C5A 081A [Regular] +5C085C5C 085C [Regular] +5C085C5F 085C5F [Regular] +5C085C61 0861 [Trivial] +5C085C62 0808 [Regular] +5C085C6E 080A [Regular] +5C085C72 080D [Regular] +5C085C74 0809 [Regular] +5C085C7E 087E [Trivial] +5C085C7F 087F [Trivial] +5C085C80 0880 [Trivial][ILSEQ] +5C085C81 0881 [Trivial][ILSEQ] +5C085C9F 089F [Trivial][ILSEQ] +5C085CA0 08A0 [Trivial][ILSEQ] +5C085CA1 08A1 [Trivial] +5C085CE0 08E0 [Trivial][ILSEQ] +5C085CEF 08EF [Trivial][ILSEQ] +5C085CF9 08F9 [Trivial][ILSEQ] +5C085CFA 08FA [Trivial][ILSEQ] +5C085CFC 08FC [Trivial][ILSEQ] +5C085CFD 08FD [Trivial][ILSEQ] +5C085CFE 08FE [Trivial][ILSEQ] +5C085CFF 08FF [Trivial][ILSEQ] +5C095C00 0900 [Trivial] +5C095C08 0908 [Trivial] +5C095C09 0909 [Trivial] +5C095C0A 090A [Trivial] +5C095C0D 090D [Trivial] +5C095C1A 091A [Trivial] +5C095C22 0922 [Trivial] +5C095C25 095C25 [Regular] +5C095C27 0927 [Trivial] +5C095C30 0900 [Regular] +5C095C3F 093F [Trivial] +5C095C40 0940 [Trivial] +5C095C5A 091A [Regular] +5C095C5C 095C [Regular] +5C095C5F 095C5F [Regular] +5C095C61 0961 [Trivial] +5C095C62 0908 [Regular] +5C095C6E 090A [Regular] +5C095C72 090D [Regular] +5C095C74 0909 [Regular] +5C095C7E 097E [Trivial] +5C095C7F 097F [Trivial] +5C095C80 0980 [Trivial][ILSEQ] +5C095C81 0981 [Trivial][ILSEQ] +5C095C9F 099F [Trivial][ILSEQ] +5C095CA0 09A0 [Trivial][ILSEQ] +5C095CA1 09A1 [Trivial] +5C095CE0 09E0 [Trivial][ILSEQ] +5C095CEF 09EF [Trivial][ILSEQ] +5C095CF9 09F9 [Trivial][ILSEQ] +5C095CFA 09FA [Trivial][ILSEQ] +5C095CFC 09FC [Trivial][ILSEQ] +5C095CFD 09FD [Trivial][ILSEQ] +5C095CFE 09FE [Trivial][ILSEQ] +5C095CFF 09FF [Trivial][ILSEQ] +5C0A5C00 0A00 [Trivial] +5C0A5C08 0A08 [Trivial] +5C0A5C09 0A09 [Trivial] +5C0A5C0A 0A0A [Trivial] +5C0A5C0D 0A0D [Trivial] +5C0A5C1A 0A1A [Trivial] +5C0A5C22 0A22 [Trivial] +5C0A5C25 0A5C25 [Regular] +5C0A5C27 0A27 [Trivial] +5C0A5C30 0A00 [Regular] +5C0A5C3F 0A3F [Trivial] +5C0A5C40 0A40 [Trivial] +5C0A5C5A 0A1A [Regular] +5C0A5C5C 0A5C [Regular] +5C0A5C5F 0A5C5F [Regular] +5C0A5C61 0A61 [Trivial] +5C0A5C62 0A08 [Regular] +5C0A5C6E 0A0A [Regular] +5C0A5C72 0A0D [Regular] +5C0A5C74 0A09 [Regular] +5C0A5C7E 0A7E [Trivial] +5C0A5C7F 0A7F [Trivial] +5C0A5C80 0A80 [Trivial][ILSEQ] +5C0A5C81 0A81 [Trivial][ILSEQ] +5C0A5C9F 0A9F [Trivial][ILSEQ] +5C0A5CA0 0AA0 [Trivial][ILSEQ] +5C0A5CA1 0AA1 [Trivial] +5C0A5CE0 0AE0 [Trivial][ILSEQ] +5C0A5CEF 0AEF [Trivial][ILSEQ] +5C0A5CF9 0AF9 [Trivial][ILSEQ] +5C0A5CFA 0AFA [Trivial][ILSEQ] +5C0A5CFC 0AFC [Trivial][ILSEQ] +5C0A5CFD 0AFD [Trivial][ILSEQ] +5C0A5CFE 0AFE [Trivial][ILSEQ] +5C0A5CFF 0AFF [Trivial][ILSEQ] +5C0D5C00 0D00 [Trivial] +5C0D5C08 0D08 [Trivial] +5C0D5C09 0D09 [Trivial] +5C0D5C0A 0D0A [Trivial] +5C0D5C0D 0D0D [Trivial] +5C0D5C1A 0D1A [Trivial] +5C0D5C22 0D22 [Trivial] +5C0D5C25 0D5C25 [Regular] +5C0D5C27 0D27 [Trivial] +5C0D5C30 0D00 [Regular] +5C0D5C3F 0D3F [Trivial] +5C0D5C40 0D40 [Trivial] +5C0D5C5A 0D1A [Regular] +5C0D5C5C 0D5C [Regular] +5C0D5C5F 0D5C5F [Regular] +5C0D5C61 0D61 [Trivial] +5C0D5C62 0D08 [Regular] +5C0D5C6E 0D0A [Regular] +5C0D5C72 0D0D [Regular] +5C0D5C74 0D09 [Regular] +5C0D5C7E 0D7E [Trivial] +5C0D5C7F 0D7F [Trivial] +5C0D5C80 0D80 [Trivial][ILSEQ] +5C0D5C81 0D81 [Trivial][ILSEQ] +5C0D5C9F 0D9F [Trivial][ILSEQ] +5C0D5CA0 0DA0 [Trivial][ILSEQ] +5C0D5CA1 0DA1 [Trivial] +5C0D5CE0 0DE0 [Trivial][ILSEQ] +5C0D5CEF 0DEF [Trivial][ILSEQ] +5C0D5CF9 0DF9 [Trivial][ILSEQ] +5C0D5CFA 0DFA [Trivial][ILSEQ] +5C0D5CFC 0DFC [Trivial][ILSEQ] +5C0D5CFD 0DFD [Trivial][ILSEQ] +5C0D5CFE 0DFE [Trivial][ILSEQ] +5C0D5CFF 0DFF [Trivial][ILSEQ] +5C1A5C00 1A00 [Trivial] +5C1A5C08 1A08 [Trivial] +5C1A5C09 1A09 [Trivial] +5C1A5C0A 1A0A [Trivial] +5C1A5C0D 1A0D [Trivial] +5C1A5C1A 1A1A [Trivial] +5C1A5C22 1A22 [Trivial] +5C1A5C25 1A5C25 [Regular] +5C1A5C27 1A27 [Trivial] +5C1A5C30 1A00 [Regular] +5C1A5C3F 1A3F [Trivial] +5C1A5C40 1A40 [Trivial] +5C1A5C5A 1A1A [Regular] +5C1A5C5C 1A5C [Regular] +5C1A5C5F 1A5C5F [Regular] +5C1A5C61 1A61 [Trivial] +5C1A5C62 1A08 [Regular] +5C1A5C6E 1A0A [Regular] +5C1A5C72 1A0D [Regular] +5C1A5C74 1A09 [Regular] +5C1A5C7E 1A7E [Trivial] +5C1A5C7F 1A7F [Trivial] +5C1A5C80 1A80 [Trivial][ILSEQ] +5C1A5C81 1A81 [Trivial][ILSEQ] +5C1A5C9F 1A9F [Trivial][ILSEQ] +5C1A5CA0 1AA0 [Trivial][ILSEQ] +5C1A5CA1 1AA1 [Trivial] +5C1A5CE0 1AE0 [Trivial][ILSEQ] +5C1A5CEF 1AEF [Trivial][ILSEQ] +5C1A5CF9 1AF9 [Trivial][ILSEQ] +5C1A5CFA 1AFA [Trivial][ILSEQ] +5C1A5CFC 1AFC [Trivial][ILSEQ] +5C1A5CFD 1AFD [Trivial][ILSEQ] +5C1A5CFE 1AFE [Trivial][ILSEQ] +5C1A5CFF 1AFF [Trivial][ILSEQ] +5C225C00 2200 [Trivial] +5C225C08 2208 [Trivial] +5C225C09 2209 [Trivial] +5C225C0A 220A [Trivial] +5C225C0D 220D [Trivial] +5C225C1A 221A [Trivial] +5C225C22 2222 [Trivial] +5C225C25 225C25 [Regular] +5C225C27 2227 [Trivial] +5C225C30 2200 [Regular] +5C225C3F 223F [Trivial] +5C225C40 2240 [Trivial] +5C225C5A 221A [Regular] +5C225C5C 225C [Regular] +5C225C5F 225C5F [Regular] +5C225C61 2261 [Trivial] +5C225C62 2208 [Regular] +5C225C6E 220A [Regular] +5C225C72 220D [Regular] +5C225C74 2209 [Regular] +5C225C7E 227E [Trivial] +5C225C7F 227F [Trivial] +5C225C80 2280 [Trivial][ILSEQ] +5C225C81 2281 [Trivial][ILSEQ] +5C225C9F 229F [Trivial][ILSEQ] +5C225CA0 22A0 [Trivial][ILSEQ] +5C225CA1 22A1 [Trivial] +5C225CE0 22E0 [Trivial][ILSEQ] +5C225CEF 22EF [Trivial][ILSEQ] +5C225CF9 22F9 [Trivial][ILSEQ] +5C225CFA 22FA [Trivial][ILSEQ] +5C225CFC 22FC [Trivial][ILSEQ] +5C225CFD 22FD [Trivial][ILSEQ] +5C225CFE 22FE [Trivial][ILSEQ] +5C225CFF 22FF [Trivial][ILSEQ] +5C255C00 5C2500 [Regular] +5C255C08 5C2508 [Regular] +5C255C09 5C2509 [Regular] +5C255C0A 5C250A [Regular] +5C255C0D 5C250D [Regular] +5C255C1A 5C251A [Regular] +5C255C22 5C2522 [Regular] +5C255C25 5C255C25 [Preserve][LIKE] +5C255C27 5C2527 [Regular] +5C255C30 5C2500 [Regular] +5C255C3F 5C253F [Regular] +5C255C40 5C2540 [Regular] +5C255C5A 5C251A [Regular] +5C255C5C 5C255C [Regular] +5C255C5F 5C255C5F [Preserve][LIKE] +5C255C61 5C2561 [Regular] +5C255C62 5C2508 [Regular] +5C255C6E 5C250A [Regular] +5C255C72 5C250D [Regular] +5C255C74 5C2509 [Regular] +5C255C7E 5C257E [Regular] +5C255C7F 5C257F [Regular] +5C255C80 5C2580 [Regular][ILSEQ] +5C255C81 5C2581 [Regular][ILSEQ] +5C255C9F 5C259F [Regular][ILSEQ] +5C255CA0 5C25A0 [Regular][ILSEQ] +5C255CA1 5C25A1 [Regular] +5C255CE0 5C25E0 [Regular][ILSEQ] +5C255CEF 5C25EF [Regular][ILSEQ] +5C255CF9 5C25F9 [Regular][ILSEQ] +5C255CFA 5C25FA [Regular][ILSEQ] +5C255CFC 5C25FC [Regular][ILSEQ] +5C255CFD 5C25FD [Regular][ILSEQ] +5C255CFE 5C25FE [Regular][ILSEQ] +5C255CFF 5C25FF [Regular][ILSEQ] +5C275C00 2700 [Trivial] +5C275C08 2708 [Trivial] +5C275C09 2709 [Trivial] +5C275C0A 270A [Trivial] +5C275C0D 270D [Trivial] +5C275C1A 271A [Trivial] +5C275C22 2722 [Trivial] +5C275C25 275C25 [Regular] +5C275C27 2727 [Trivial] +5C275C30 2700 [Regular] +5C275C3F 273F [Trivial] +5C275C40 2740 [Trivial] +5C275C5A 271A [Regular] +5C275C5C 275C [Regular] +5C275C5F 275C5F [Regular] +5C275C61 2761 [Trivial] +5C275C62 2708 [Regular] +5C275C6E 270A [Regular] +5C275C72 270D [Regular] +5C275C74 2709 [Regular] +5C275C7E 277E [Trivial] +5C275C7F 277F [Trivial] +5C275C80 2780 [Trivial][ILSEQ] +5C275C81 2781 [Trivial][ILSEQ] +5C275C9F 279F [Trivial][ILSEQ] +5C275CA0 27A0 [Trivial][ILSEQ] +5C275CA1 27A1 [Trivial] +5C275CE0 27E0 [Trivial][ILSEQ] +5C275CEF 27EF [Trivial][ILSEQ] +5C275CF9 27F9 [Trivial][ILSEQ] +5C275CFA 27FA [Trivial][ILSEQ] +5C275CFC 27FC [Trivial][ILSEQ] +5C275CFD 27FD [Trivial][ILSEQ] +5C275CFE 27FE [Trivial][ILSEQ] +5C275CFF 27FF [Trivial][ILSEQ] +5C305C00 0000 [Regular] +5C305C08 0008 [Regular] +5C305C09 0009 [Regular] +5C305C0A 000A [Regular] +5C305C0D 000D [Regular] +5C305C1A 001A [Regular] +5C305C22 0022 [Regular] +5C305C25 005C25 [Regular] +5C305C27 0027 [Regular] +5C305C30 0000 [Regular] +5C305C3F 003F [Regular] +5C305C40 0040 [Regular] +5C305C5A 001A [Regular] +5C305C5C 005C [Regular] +5C305C5F 005C5F [Regular] +5C305C61 0061 [Regular] +5C305C62 0008 [Regular] +5C305C6E 000A [Regular] +5C305C72 000D [Regular] +5C305C74 0009 [Regular] +5C305C7E 007E [Regular] +5C305C7F 007F [Regular] +5C305C80 0080 [Regular][ILSEQ] +5C305C81 0081 [Regular][ILSEQ] +5C305C9F 009F [Regular][ILSEQ] +5C305CA0 00A0 [Regular][ILSEQ] +5C305CA1 00A1 [Regular] +5C305CE0 00E0 [Regular][ILSEQ] +5C305CEF 00EF [Regular][ILSEQ] +5C305CF9 00F9 [Regular][ILSEQ] +5C305CFA 00FA [Regular][ILSEQ] +5C305CFC 00FC [Regular][ILSEQ] +5C305CFD 00FD [Regular][ILSEQ] +5C305CFE 00FE [Regular][ILSEQ] +5C305CFF 00FF [Regular][ILSEQ] +5C3F5C00 3F00 [Trivial] +5C3F5C08 3F08 [Trivial] +5C3F5C09 3F09 [Trivial] +5C3F5C0A 3F0A [Trivial] +5C3F5C0D 3F0D [Trivial] +5C3F5C1A 3F1A [Trivial] +5C3F5C22 3F22 [Trivial] +5C3F5C25 3F5C25 [Regular] +5C3F5C27 3F27 [Trivial] +5C3F5C30 3F00 [Regular] +5C3F5C3F 3F3F [Trivial] +5C3F5C40 3F40 [Trivial] +5C3F5C5A 3F1A [Regular] +5C3F5C5C 3F5C [Regular] +5C3F5C5F 3F5C5F [Regular] +5C3F5C61 3F61 [Trivial] +5C3F5C62 3F08 [Regular] +5C3F5C6E 3F0A [Regular] +5C3F5C72 3F0D [Regular] +5C3F5C74 3F09 [Regular] +5C3F5C7E 3F7E [Trivial] +5C3F5C7F 3F7F [Trivial] +5C3F5C80 3F80 [Trivial][ILSEQ] +5C3F5C81 3F81 [Trivial][ILSEQ] +5C3F5C9F 3F9F [Trivial][ILSEQ] +5C3F5CA0 3FA0 [Trivial][ILSEQ] +5C3F5CA1 3FA1 [Trivial] +5C3F5CE0 3FE0 [Trivial][ILSEQ] +5C3F5CEF 3FEF [Trivial][ILSEQ] +5C3F5CF9 3FF9 [Trivial][ILSEQ] +5C3F5CFA 3FFA [Trivial][ILSEQ] +5C3F5CFC 3FFC [Trivial][ILSEQ] +5C3F5CFD 3FFD [Trivial][ILSEQ] +5C3F5CFE 3FFE [Trivial][ILSEQ] +5C3F5CFF 3FFF [Trivial][ILSEQ] +5C405C00 4000 [Trivial] +5C405C08 4008 [Trivial] +5C405C09 4009 [Trivial] +5C405C0A 400A [Trivial] +5C405C0D 400D [Trivial] +5C405C1A 401A [Trivial] +5C405C22 4022 [Trivial] +5C405C25 405C25 [Regular] +5C405C27 4027 [Trivial] +5C405C30 4000 [Regular] +5C405C3F 403F [Trivial] +5C405C40 4040 [Trivial] +5C405C5A 401A [Regular] +5C405C5C 405C [Regular] +5C405C5F 405C5F [Regular] +5C405C61 4061 [Trivial] +5C405C62 4008 [Regular] +5C405C6E 400A [Regular] +5C405C72 400D [Regular] +5C405C74 4009 [Regular] +5C405C7E 407E [Trivial] +5C405C7F 407F [Trivial] +5C405C80 4080 [Trivial][ILSEQ] +5C405C81 4081 [Trivial][ILSEQ] +5C405C9F 409F [Trivial][ILSEQ] +5C405CA0 40A0 [Trivial][ILSEQ] +5C405CA1 40A1 [Trivial] +5C405CE0 40E0 [Trivial][ILSEQ] +5C405CEF 40EF [Trivial][ILSEQ] +5C405CF9 40F9 [Trivial][ILSEQ] +5C405CFA 40FA [Trivial][ILSEQ] +5C405CFC 40FC [Trivial][ILSEQ] +5C405CFD 40FD [Trivial][ILSEQ] +5C405CFE 40FE [Trivial][ILSEQ] +5C405CFF 40FF [Trivial][ILSEQ] +5C5A5C00 1A00 [Regular] +5C5A5C08 1A08 [Regular] +5C5A5C09 1A09 [Regular] +5C5A5C0A 1A0A [Regular] +5C5A5C0D 1A0D [Regular] +5C5A5C1A 1A1A [Regular] +5C5A5C22 1A22 [Regular] +5C5A5C25 1A5C25 [Regular] +5C5A5C27 1A27 [Regular] +5C5A5C30 1A00 [Regular] +5C5A5C3F 1A3F [Regular] +5C5A5C40 1A40 [Regular] +5C5A5C5A 1A1A [Regular] +5C5A5C5C 1A5C [Regular] +5C5A5C5F 1A5C5F [Regular] +5C5A5C61 1A61 [Regular] +5C5A5C62 1A08 [Regular] +5C5A5C6E 1A0A [Regular] +5C5A5C72 1A0D [Regular] +5C5A5C74 1A09 [Regular] +5C5A5C7E 1A7E [Regular] +5C5A5C7F 1A7F [Regular] +5C5A5C80 1A80 [Regular][ILSEQ] +5C5A5C81 1A81 [Regular][ILSEQ] +5C5A5C9F 1A9F [Regular][ILSEQ] +5C5A5CA0 1AA0 [Regular][ILSEQ] +5C5A5CA1 1AA1 [Regular] +5C5A5CE0 1AE0 [Regular][ILSEQ] +5C5A5CEF 1AEF [Regular][ILSEQ] +5C5A5CF9 1AF9 [Regular][ILSEQ] +5C5A5CFA 1AFA [Regular][ILSEQ] +5C5A5CFC 1AFC [Regular][ILSEQ] +5C5A5CFD 1AFD [Regular][ILSEQ] +5C5A5CFE 1AFE [Regular][ILSEQ] +5C5A5CFF 1AFF [Regular][ILSEQ] +5C5C5C00 5C00 [Regular] +5C5C5C08 5C08 [Regular] +5C5C5C09 5C09 [Regular] +5C5C5C0A 5C0A [Regular] +5C5C5C0D 5C0D [Regular] +5C5C5C1A 5C1A [Regular] +5C5C5C22 5C22 [Regular] +5C5C5C25 5C5C25 [Regular] +5C5C5C27 5C27 [Regular] +5C5C5C30 5C00 [Regular] +5C5C5C3F 5C3F [Regular] +5C5C5C40 5C40 [Regular] +5C5C5C5A 5C1A [Regular] +5C5C5C5C 5C5C [Regular] +5C5C5C5F 5C5C5F [Regular] +5C5C5C61 5C61 [Regular] +5C5C5C62 5C08 [Regular] +5C5C5C6E 5C0A [Regular] +5C5C5C72 5C0D [Regular] +5C5C5C74 5C09 [Regular] +5C5C5C7E 5C7E [Regular] +5C5C5C7F 5C7F [Regular] +5C5C5C80 5C80 [Regular][ILSEQ] +5C5C5C81 5C81 [Regular][ILSEQ] +5C5C5C9F 5C9F [Regular][ILSEQ] +5C5C5CA0 5CA0 [Regular][ILSEQ] +5C5C5CA1 5CA1 [Regular] +5C5C5CE0 5CE0 [Regular][ILSEQ] +5C5C5CEF 5CEF [Regular][ILSEQ] +5C5C5CF9 5CF9 [Regular][ILSEQ] +5C5C5CFA 5CFA [Regular][ILSEQ] +5C5C5CFC 5CFC [Regular][ILSEQ] +5C5C5CFD 5CFD [Regular][ILSEQ] +5C5C5CFE 5CFE [Regular][ILSEQ] +5C5C5CFF 5CFF [Regular][ILSEQ] +5C5F5C00 5C5F00 [Regular] +5C5F5C08 5C5F08 [Regular] +5C5F5C09 5C5F09 [Regular] +5C5F5C0A 5C5F0A [Regular] +5C5F5C0D 5C5F0D [Regular] +5C5F5C1A 5C5F1A [Regular] +5C5F5C22 5C5F22 [Regular] +5C5F5C25 5C5F5C25 [Preserve][LIKE] +5C5F5C27 5C5F27 [Regular] +5C5F5C30 5C5F00 [Regular] +5C5F5C3F 5C5F3F [Regular] +5C5F5C40 5C5F40 [Regular] +5C5F5C5A 5C5F1A [Regular] +5C5F5C5C 5C5F5C [Regular] +5C5F5C5F 5C5F5C5F [Preserve][LIKE] +5C5F5C61 5C5F61 [Regular] +5C5F5C62 5C5F08 [Regular] +5C5F5C6E 5C5F0A [Regular] +5C5F5C72 5C5F0D [Regular] +5C5F5C74 5C5F09 [Regular] +5C5F5C7E 5C5F7E [Regular] +5C5F5C7F 5C5F7F [Regular] +5C5F5C80 5C5F80 [Regular][ILSEQ] +5C5F5C81 5C5F81 [Regular][ILSEQ] +5C5F5C9F 5C5F9F [Regular][ILSEQ] +5C5F5CA0 5C5FA0 [Regular][ILSEQ] +5C5F5CA1 5C5FA1 [Regular] +5C5F5CE0 5C5FE0 [Regular][ILSEQ] +5C5F5CEF 5C5FEF [Regular][ILSEQ] +5C5F5CF9 5C5FF9 [Regular][ILSEQ] +5C5F5CFA 5C5FFA [Regular][ILSEQ] +5C5F5CFC 5C5FFC [Regular][ILSEQ] +5C5F5CFD 5C5FFD [Regular][ILSEQ] +5C5F5CFE 5C5FFE [Regular][ILSEQ] +5C5F5CFF 5C5FFF [Regular][ILSEQ] +5C615C00 6100 [Trivial] +5C615C08 6108 [Trivial] +5C615C09 6109 [Trivial] +5C615C0A 610A [Trivial] +5C615C0D 610D [Trivial] +5C615C1A 611A [Trivial] +5C615C22 6122 [Trivial] +5C615C25 615C25 [Regular] +5C615C27 6127 [Trivial] +5C615C30 6100 [Regular] +5C615C3F 613F [Trivial] +5C615C40 6140 [Trivial] +5C615C5A 611A [Regular] +5C615C5C 615C [Regular] +5C615C5F 615C5F [Regular] +5C615C61 6161 [Trivial] +5C615C62 6108 [Regular] +5C615C6E 610A [Regular] +5C615C72 610D [Regular] +5C615C74 6109 [Regular] +5C615C7E 617E [Trivial] +5C615C7F 617F [Trivial] +5C615C80 6180 [Trivial][ILSEQ] +5C615C81 6181 [Trivial][ILSEQ] +5C615C9F 619F [Trivial][ILSEQ] +5C615CA0 61A0 [Trivial][ILSEQ] +5C615CA1 61A1 [Trivial] +5C615CE0 61E0 [Trivial][ILSEQ] +5C615CEF 61EF [Trivial][ILSEQ] +5C615CF9 61F9 [Trivial][ILSEQ] +5C615CFA 61FA [Trivial][ILSEQ] +5C615CFC 61FC [Trivial][ILSEQ] +5C615CFD 61FD [Trivial][ILSEQ] +5C615CFE 61FE [Trivial][ILSEQ] +5C615CFF 61FF [Trivial][ILSEQ] +5C625C00 0800 [Regular] +5C625C08 0808 [Regular] +5C625C09 0809 [Regular] +5C625C0A 080A [Regular] +5C625C0D 080D [Regular] +5C625C1A 081A [Regular] +5C625C22 0822 [Regular] +5C625C25 085C25 [Regular] +5C625C27 0827 [Regular] +5C625C30 0800 [Regular] +5C625C3F 083F [Regular] +5C625C40 0840 [Regular] +5C625C5A 081A [Regular] +5C625C5C 085C [Regular] +5C625C5F 085C5F [Regular] +5C625C61 0861 [Regular] +5C625C62 0808 [Regular] +5C625C6E 080A [Regular] +5C625C72 080D [Regular] +5C625C74 0809 [Regular] +5C625C7E 087E [Regular] +5C625C7F 087F [Regular] +5C625C80 0880 [Regular][ILSEQ] +5C625C81 0881 [Regular][ILSEQ] +5C625C9F 089F [Regular][ILSEQ] +5C625CA0 08A0 [Regular][ILSEQ] +5C625CA1 08A1 [Regular] +5C625CE0 08E0 [Regular][ILSEQ] +5C625CEF 08EF [Regular][ILSEQ] +5C625CF9 08F9 [Regular][ILSEQ] +5C625CFA 08FA [Regular][ILSEQ] +5C625CFC 08FC [Regular][ILSEQ] +5C625CFD 08FD [Regular][ILSEQ] +5C625CFE 08FE [Regular][ILSEQ] +5C625CFF 08FF [Regular][ILSEQ] +5C6E5C00 0A00 [Regular] +5C6E5C08 0A08 [Regular] +5C6E5C09 0A09 [Regular] +5C6E5C0A 0A0A [Regular] +5C6E5C0D 0A0D [Regular] +5C6E5C1A 0A1A [Regular] +5C6E5C22 0A22 [Regular] +5C6E5C25 0A5C25 [Regular] +5C6E5C27 0A27 [Regular] +5C6E5C30 0A00 [Regular] +5C6E5C3F 0A3F [Regular] +5C6E5C40 0A40 [Regular] +5C6E5C5A 0A1A [Regular] +5C6E5C5C 0A5C [Regular] +5C6E5C5F 0A5C5F [Regular] +5C6E5C61 0A61 [Regular] +5C6E5C62 0A08 [Regular] +5C6E5C6E 0A0A [Regular] +5C6E5C72 0A0D [Regular] +5C6E5C74 0A09 [Regular] +5C6E5C7E 0A7E [Regular] +5C6E5C7F 0A7F [Regular] +5C6E5C80 0A80 [Regular][ILSEQ] +5C6E5C81 0A81 [Regular][ILSEQ] +5C6E5C9F 0A9F [Regular][ILSEQ] +5C6E5CA0 0AA0 [Regular][ILSEQ] +5C6E5CA1 0AA1 [Regular] +5C6E5CE0 0AE0 [Regular][ILSEQ] +5C6E5CEF 0AEF [Regular][ILSEQ] +5C6E5CF9 0AF9 [Regular][ILSEQ] +5C6E5CFA 0AFA [Regular][ILSEQ] +5C6E5CFC 0AFC [Regular][ILSEQ] +5C6E5CFD 0AFD [Regular][ILSEQ] +5C6E5CFE 0AFE [Regular][ILSEQ] +5C6E5CFF 0AFF [Regular][ILSEQ] +5C725C00 0D00 [Regular] +5C725C08 0D08 [Regular] +5C725C09 0D09 [Regular] +5C725C0A 0D0A [Regular] +5C725C0D 0D0D [Regular] +5C725C1A 0D1A [Regular] +5C725C22 0D22 [Regular] +5C725C25 0D5C25 [Regular] +5C725C27 0D27 [Regular] +5C725C30 0D00 [Regular] +5C725C3F 0D3F [Regular] +5C725C40 0D40 [Regular] +5C725C5A 0D1A [Regular] +5C725C5C 0D5C [Regular] +5C725C5F 0D5C5F [Regular] +5C725C61 0D61 [Regular] +5C725C62 0D08 [Regular] +5C725C6E 0D0A [Regular] +5C725C72 0D0D [Regular] +5C725C74 0D09 [Regular] +5C725C7E 0D7E [Regular] +5C725C7F 0D7F [Regular] +5C725C80 0D80 [Regular][ILSEQ] +5C725C81 0D81 [Regular][ILSEQ] +5C725C9F 0D9F [Regular][ILSEQ] +5C725CA0 0DA0 [Regular][ILSEQ] +5C725CA1 0DA1 [Regular] +5C725CE0 0DE0 [Regular][ILSEQ] +5C725CEF 0DEF [Regular][ILSEQ] +5C725CF9 0DF9 [Regular][ILSEQ] +5C725CFA 0DFA [Regular][ILSEQ] +5C725CFC 0DFC [Regular][ILSEQ] +5C725CFD 0DFD [Regular][ILSEQ] +5C725CFE 0DFE [Regular][ILSEQ] +5C725CFF 0DFF [Regular][ILSEQ] +5C745C00 0900 [Regular] +5C745C08 0908 [Regular] +5C745C09 0909 [Regular] +5C745C0A 090A [Regular] +5C745C0D 090D [Regular] +5C745C1A 091A [Regular] +5C745C22 0922 [Regular] +5C745C25 095C25 [Regular] +5C745C27 0927 [Regular] +5C745C30 0900 [Regular] +5C745C3F 093F [Regular] +5C745C40 0940 [Regular] +5C745C5A 091A [Regular] +5C745C5C 095C [Regular] +5C745C5F 095C5F [Regular] +5C745C61 0961 [Regular] +5C745C62 0908 [Regular] +5C745C6E 090A [Regular] +5C745C72 090D [Regular] +5C745C74 0909 [Regular] +5C745C7E 097E [Regular] +5C745C7F 097F [Regular] +5C745C80 0980 [Regular][ILSEQ] +5C745C81 0981 [Regular][ILSEQ] +5C745C9F 099F [Regular][ILSEQ] +5C745CA0 09A0 [Regular][ILSEQ] +5C745CA1 09A1 [Regular] +5C745CE0 09E0 [Regular][ILSEQ] +5C745CEF 09EF [Regular][ILSEQ] +5C745CF9 09F9 [Regular][ILSEQ] +5C745CFA 09FA [Regular][ILSEQ] +5C745CFC 09FC [Regular][ILSEQ] +5C745CFD 09FD [Regular][ILSEQ] +5C745CFE 09FE [Regular][ILSEQ] +5C745CFF 09FF [Regular][ILSEQ] +5C7E5C00 7E00 [Trivial] +5C7E5C08 7E08 [Trivial] +5C7E5C09 7E09 [Trivial] +5C7E5C0A 7E0A [Trivial] +5C7E5C0D 7E0D [Trivial] +5C7E5C1A 7E1A [Trivial] +5C7E5C22 7E22 [Trivial] +5C7E5C25 7E5C25 [Regular] +5C7E5C27 7E27 [Trivial] +5C7E5C30 7E00 [Regular] +5C7E5C3F 7E3F [Trivial] +5C7E5C40 7E40 [Trivial] +5C7E5C5A 7E1A [Regular] +5C7E5C5C 7E5C [Regular] +5C7E5C5F 7E5C5F [Regular] +5C7E5C61 7E61 [Trivial] +5C7E5C62 7E08 [Regular] +5C7E5C6E 7E0A [Regular] +5C7E5C72 7E0D [Regular] +5C7E5C74 7E09 [Regular] +5C7E5C7E 7E7E [Trivial] +5C7E5C7F 7E7F [Trivial] +5C7E5C80 7E80 [Trivial][ILSEQ] +5C7E5C81 7E81 [Trivial][ILSEQ] +5C7E5C9F 7E9F [Trivial][ILSEQ] +5C7E5CA0 7EA0 [Trivial][ILSEQ] +5C7E5CA1 7EA1 [Trivial] +5C7E5CE0 7EE0 [Trivial][ILSEQ] +5C7E5CEF 7EEF [Trivial][ILSEQ] +5C7E5CF9 7EF9 [Trivial][ILSEQ] +5C7E5CFA 7EFA [Trivial][ILSEQ] +5C7E5CFC 7EFC [Trivial][ILSEQ] +5C7E5CFD 7EFD [Trivial][ILSEQ] +5C7E5CFE 7EFE [Trivial][ILSEQ] +5C7E5CFF 7EFF [Trivial][ILSEQ] +5C7F5C00 7F00 [Trivial] +5C7F5C08 7F08 [Trivial] +5C7F5C09 7F09 [Trivial] +5C7F5C0A 7F0A [Trivial] +5C7F5C0D 7F0D [Trivial] +5C7F5C1A 7F1A [Trivial] +5C7F5C22 7F22 [Trivial] +5C7F5C25 7F5C25 [Regular] +5C7F5C27 7F27 [Trivial] +5C7F5C30 7F00 [Regular] +5C7F5C3F 7F3F [Trivial] +5C7F5C40 7F40 [Trivial] +5C7F5C5A 7F1A [Regular] +5C7F5C5C 7F5C [Regular] +5C7F5C5F 7F5C5F [Regular] +5C7F5C61 7F61 [Trivial] +5C7F5C62 7F08 [Regular] +5C7F5C6E 7F0A [Regular] +5C7F5C72 7F0D [Regular] +5C7F5C74 7F09 [Regular] +5C7F5C7E 7F7E [Trivial] +5C7F5C7F 7F7F [Trivial] +5C7F5C80 7F80 [Trivial][ILSEQ] +5C7F5C81 7F81 [Trivial][ILSEQ] +5C7F5C9F 7F9F [Trivial][ILSEQ] +5C7F5CA0 7FA0 [Trivial][ILSEQ] +5C7F5CA1 7FA1 [Trivial] +5C7F5CE0 7FE0 [Trivial][ILSEQ] +5C7F5CEF 7FEF [Trivial][ILSEQ] +5C7F5CF9 7FF9 [Trivial][ILSEQ] +5C7F5CFA 7FFA [Trivial][ILSEQ] +5C7F5CFC 7FFC [Trivial][ILSEQ] +5C7F5CFD 7FFD [Trivial][ILSEQ] +5C7F5CFE 7FFE [Trivial][ILSEQ] +5C7F5CFF 7FFF [Trivial][ILSEQ] +5C805C00 8000 [Trivial][ILSEQ] +5C805C08 8008 [Trivial][ILSEQ] +5C805C09 8009 [Trivial][ILSEQ] +5C805C0A 800A [Trivial][ILSEQ] +5C805C0D 800D [Trivial][ILSEQ] +5C805C1A 801A [Trivial][ILSEQ] +5C805C22 8022 [Trivial][ILSEQ] +5C805C25 805C25 [Regular][ILSEQ] +5C805C27 8027 [Trivial][ILSEQ] +5C805C30 8000 [Regular][ILSEQ] +5C805C3F 803F [Trivial][ILSEQ] +5C805C40 8040 [Trivial][ILSEQ] +5C805C5A 801A [Regular][ILSEQ] +5C805C5C 805C [Regular][ILSEQ] +5C805C5F 805C5F [Regular][ILSEQ] +5C805C61 8061 [Trivial][ILSEQ] +5C805C62 8008 [Regular][ILSEQ] +5C805C6E 800A [Regular][ILSEQ] +5C805C72 800D [Regular][ILSEQ] +5C805C74 8009 [Regular][ILSEQ] +5C805C7E 807E [Trivial][ILSEQ] +5C805C7F 807F [Trivial][ILSEQ] +5C805C80 8080 [Trivial][ILSEQ] +5C805C81 8081 [Trivial][ILSEQ] +5C805C9F 809F [Trivial][ILSEQ] +5C805CA0 80A0 [Trivial][ILSEQ] +5C805CA1 80A1 [Trivial][ILSEQ] +5C805CE0 80E0 [Trivial][ILSEQ] +5C805CEF 80EF [Trivial][ILSEQ] +5C805CF9 80F9 [Trivial][ILSEQ] +5C805CFA 80FA [Trivial][ILSEQ] +5C805CFC 80FC [Trivial][ILSEQ] +5C805CFD 80FD [Trivial][ILSEQ] +5C805CFE 80FE [Trivial][ILSEQ] +5C805CFF 80FF [Trivial][ILSEQ] +5C815C00 8100 [Trivial][BROKE] +5C815C08 8108 [Trivial][BROKE] +5C815C09 8109 [Trivial][BROKE] +5C815C0A 810A [Trivial][BROKE] +5C815C0D 810D [Trivial][BROKE] +5C815C1A 811A [Trivial][BROKE] +5C815C22 8122 [Trivial][BROKE] +5C815C25 815C25 [Regular] +5C815C27 8127 [Trivial][BROKE] +5C815C30 8100 [Regular][BROKE] +5C815C3F 813F [Trivial][BROKE] +5C815C40 8140 [Trivial][USER] +5C815C5A 811A [Regular][BROKE] +5C815C5C 815C [Regular][USER] +5C815C5F 815C5F [Regular] +5C815C61 8161 [Trivial][USER] +5C815C62 8108 [Regular][BROKE][USER] +5C815C6E 810A [Regular][BROKE] +5C815C72 810D [Regular][BROKE] +5C815C74 8109 [Regular][BROKE] +5C815C7E 817E [Trivial][USER] +5C815C7F 817F [Trivial][BROKE] +5C815C80 8180 [Trivial][FIXED][USER] +5C815C81 8181 [Trivial][FIXED][USER] +5C815C9F 819F [Trivial][FIXED][USER] +5C815CA0 81A0 [Trivial][FIXED][USER] +5C815CA1 81A1 [Trivial][USER] +5C815CE0 81E0 [Trivial][FIXED][USER] +5C815CEF 81EF [Trivial][FIXED][USER] +5C815CF9 81F9 [Trivial][FIXED][USER] +5C815CFA 81FA [Trivial][FIXED][USER] +5C815CFC 81FC [Trivial][FIXED][USER] +5C815CFD 81FD [Trivial][ILSEQ] +5C815CFE 81FE [Trivial][ILSEQ] +5C815CFF 81FF [Trivial][ILSEQ] +5C9F5C00 9F00 [Trivial][BROKE] +5C9F5C08 9F08 [Trivial][BROKE] +5C9F5C09 9F09 [Trivial][BROKE] +5C9F5C0A 9F0A [Trivial][BROKE] +5C9F5C0D 9F0D [Trivial][BROKE] +5C9F5C1A 9F1A [Trivial][BROKE] +5C9F5C22 9F22 [Trivial][BROKE] +5C9F5C25 9F5C25 [Regular] +5C9F5C27 9F27 [Trivial][BROKE] +5C9F5C30 9F00 [Regular][BROKE] +5C9F5C3F 9F3F [Trivial][BROKE] +5C9F5C40 9F40 [Trivial][USER] +5C9F5C5A 9F1A [Regular][BROKE] +5C9F5C5C 9F5C [Regular][USER] +5C9F5C5F 9F5C5F [Regular] +5C9F5C61 9F61 [Trivial][USER] +5C9F5C62 9F08 [Regular][BROKE][USER] +5C9F5C6E 9F0A [Regular][BROKE] +5C9F5C72 9F0D [Regular][BROKE] +5C9F5C74 9F09 [Regular][BROKE] +5C9F5C7E 9F7E [Trivial][USER] +5C9F5C7F 9F7F [Trivial][BROKE] +5C9F5C80 9F80 [Trivial][FIXED][USER] +5C9F5C81 9F81 [Trivial][FIXED][USER] +5C9F5C9F 9F9F [Trivial][FIXED][USER] +5C9F5CA0 9FA0 [Trivial][FIXED][USER] +5C9F5CA1 9FA1 [Trivial][USER] +5C9F5CE0 9FE0 [Trivial][FIXED][USER] +5C9F5CEF 9FEF [Trivial][FIXED][USER] +5C9F5CF9 9FF9 [Trivial][FIXED][USER] +5C9F5CFA 9FFA [Trivial][FIXED][USER] +5C9F5CFC 9FFC [Trivial][FIXED][USER] +5C9F5CFD 9FFD [Trivial][ILSEQ] +5C9F5CFE 9FFE [Trivial][ILSEQ] +5C9F5CFF 9FFF [Trivial][ILSEQ] +5CA05C00 A000 [Trivial][ILSEQ] +5CA05C08 A008 [Trivial][ILSEQ] +5CA05C09 A009 [Trivial][ILSEQ] +5CA05C0A A00A [Trivial][ILSEQ] +5CA05C0D A00D [Trivial][ILSEQ] +5CA05C1A A01A [Trivial][ILSEQ] +5CA05C22 A022 [Trivial][ILSEQ] +5CA05C25 A05C25 [Regular][ILSEQ] +5CA05C27 A027 [Trivial][ILSEQ] +5CA05C30 A000 [Regular][ILSEQ] +5CA05C3F A03F [Trivial][ILSEQ] +5CA05C40 A040 [Trivial][ILSEQ] +5CA05C5A A01A [Regular][ILSEQ] +5CA05C5C A05C [Regular][ILSEQ] +5CA05C5F A05C5F [Regular][ILSEQ] +5CA05C61 A061 [Trivial][ILSEQ] +5CA05C62 A008 [Regular][ILSEQ] +5CA05C6E A00A [Regular][ILSEQ] +5CA05C72 A00D [Regular][ILSEQ] +5CA05C74 A009 [Regular][ILSEQ] +5CA05C7E A07E [Trivial][ILSEQ] +5CA05C7F A07F [Trivial][ILSEQ] +5CA05C80 A080 [Trivial][ILSEQ] +5CA05C81 A081 [Trivial][ILSEQ] +5CA05C9F A09F [Trivial][ILSEQ] +5CA05CA0 A0A0 [Trivial][ILSEQ] +5CA05CA1 A0A1 [Trivial][ILSEQ] +5CA05CE0 A0E0 [Trivial][ILSEQ] +5CA05CEF A0EF [Trivial][ILSEQ] +5CA05CF9 A0F9 [Trivial][ILSEQ] +5CA05CFA A0FA [Trivial][ILSEQ] +5CA05CFC A0FC [Trivial][ILSEQ] +5CA05CFD A0FD [Trivial][ILSEQ] +5CA05CFE A0FE [Trivial][ILSEQ] +5CA05CFF A0FF [Trivial][ILSEQ] +5CA15C00 A100 [Trivial][USER] +5CA15C08 A108 [Trivial][USER] +5CA15C09 A109 [Trivial][USER] +5CA15C0A A10A [Trivial][USER] +5CA15C0D A10D [Trivial][USER] +5CA15C1A A11A [Trivial][USER] +5CA15C22 A122 [Trivial][USER] +5CA15C25 A15C25 [Regular] +5CA15C27 A127 [Trivial][USER] +5CA15C30 A100 [Regular] +5CA15C3F A13F [Trivial][USER] +5CA15C40 A140 [Trivial][USER] +5CA15C5A A11A [Regular] +5CA15C5C A15C [Regular][USER] +5CA15C5F A15C5F [Regular] +5CA15C61 A161 [Trivial][USER] +5CA15C62 A108 [Regular][USER] +5CA15C6E A10A [Regular] +5CA15C72 A10D [Regular] +5CA15C74 A109 [Regular] +5CA15C7E A17E [Trivial][USER] +5CA15C7F A17F [Trivial][USER] +5CA15C80 A180 [Trivial][ILSEQ] +5CA15C81 A181 [Trivial][ILSEQ] +5CA15C9F A19F [Trivial][ILSEQ] +5CA15CA0 A1A0 [Trivial][ILSEQ] +5CA15CA1 A1A1 [Trivial][USER] +5CA15CE0 A1E0 [Trivial][ILSEQ] +5CA15CEF A1EF [Trivial][ILSEQ] +5CA15CF9 A1F9 [Trivial][ILSEQ] +5CA15CFA A1FA [Trivial][ILSEQ] +5CA15CFC A1FC [Trivial][ILSEQ] +5CA15CFD A1FD [Trivial][ILSEQ] +5CA15CFE A1FE [Trivial][ILSEQ] +5CA15CFF A1FF [Trivial][ILSEQ] +5CE05C00 E000 [Trivial][BROKE] +5CE05C08 E008 [Trivial][BROKE] +5CE05C09 E009 [Trivial][BROKE] +5CE05C0A E00A [Trivial][BROKE] +5CE05C0D E00D [Trivial][BROKE] +5CE05C1A E01A [Trivial][BROKE] +5CE05C22 E022 [Trivial][BROKE] +5CE05C25 E05C25 [Regular] +5CE05C27 E027 [Trivial][BROKE] +5CE05C30 E000 [Regular][BROKE] +5CE05C3F E03F [Trivial][BROKE] +5CE05C40 E040 [Trivial][USER] +5CE05C5A E01A [Regular][BROKE] +5CE05C5C E05C [Regular][USER] +5CE05C5F E05C5F [Regular] +5CE05C61 E061 [Trivial][USER] +5CE05C62 E008 [Regular][BROKE][USER] +5CE05C6E E00A [Regular][BROKE] +5CE05C72 E00D [Regular][BROKE] +5CE05C74 E009 [Regular][BROKE] +5CE05C7E E07E [Trivial][USER] +5CE05C7F E07F [Trivial][BROKE] +5CE05C80 E080 [Trivial][FIXED][USER] +5CE05C81 E081 [Trivial][FIXED][USER] +5CE05C9F E09F [Trivial][FIXED][USER] +5CE05CA0 E0A0 [Trivial][FIXED][USER] +5CE05CA1 E0A1 [Trivial][USER] +5CE05CE0 E0E0 [Trivial][FIXED][USER] +5CE05CEF E0EF [Trivial][FIXED][USER] +5CE05CF9 E0F9 [Trivial][FIXED][USER] +5CE05CFA E0FA [Trivial][FIXED][USER] +5CE05CFC E0FC [Trivial][FIXED][USER] +5CE05CFD E0FD [Trivial][ILSEQ] +5CE05CFE E0FE [Trivial][ILSEQ] +5CE05CFF E0FF [Trivial][ILSEQ] +5CEF5C00 EF00 [Trivial][BROKE] +5CEF5C08 EF08 [Trivial][BROKE] +5CEF5C09 EF09 [Trivial][BROKE] +5CEF5C0A EF0A [Trivial][BROKE] +5CEF5C0D EF0D [Trivial][BROKE] +5CEF5C1A EF1A [Trivial][BROKE] +5CEF5C22 EF22 [Trivial][BROKE] +5CEF5C25 EF5C25 [Regular] +5CEF5C27 EF27 [Trivial][BROKE] +5CEF5C30 EF00 [Regular][BROKE] +5CEF5C3F EF3F [Trivial][BROKE] +5CEF5C40 EF40 [Trivial][USER] +5CEF5C5A EF1A [Regular][BROKE] +5CEF5C5C EF5C [Regular][USER] +5CEF5C5F EF5C5F [Regular] +5CEF5C61 EF61 [Trivial][USER] +5CEF5C62 EF08 [Regular][BROKE][USER] +5CEF5C6E EF0A [Regular][BROKE] +5CEF5C72 EF0D [Regular][BROKE] +5CEF5C74 EF09 [Regular][BROKE] +5CEF5C7E EF7E [Trivial][USER] +5CEF5C7F EF7F [Trivial][BROKE] +5CEF5C80 EF80 [Trivial][FIXED][USER] +5CEF5C81 EF81 [Trivial][FIXED][USER] +5CEF5C9F EF9F [Trivial][FIXED][USER] +5CEF5CA0 EFA0 [Trivial][FIXED][USER] +5CEF5CA1 EFA1 [Trivial][USER] +5CEF5CE0 EFE0 [Trivial][FIXED][USER] +5CEF5CEF EFEF [Trivial][FIXED][USER] +5CEF5CF9 EFF9 [Trivial][FIXED][USER] +5CEF5CFA EFFA [Trivial][FIXED][USER] +5CEF5CFC EFFC [Trivial][FIXED][USER] +5CEF5CFD EFFD [Trivial][ILSEQ] +5CEF5CFE EFFE [Trivial][ILSEQ] +5CEF5CFF EFFF [Trivial][ILSEQ] +5CF95C00 F900 [Trivial][BROKE] +5CF95C08 F908 [Trivial][BROKE] +5CF95C09 F909 [Trivial][BROKE] +5CF95C0A F90A [Trivial][BROKE] +5CF95C0D F90D [Trivial][BROKE] +5CF95C1A F91A [Trivial][BROKE] +5CF95C22 F922 [Trivial][BROKE] +5CF95C25 F95C25 [Regular] +5CF95C27 F927 [Trivial][BROKE] +5CF95C30 F900 [Regular][BROKE] +5CF95C3F F93F [Trivial][BROKE] +5CF95C40 F940 [Trivial][USER] +5CF95C5A F91A [Regular][BROKE] +5CF95C5C F95C [Regular][USER] +5CF95C5F F95C5F [Regular] +5CF95C61 F961 [Trivial][USER] +5CF95C62 F908 [Regular][BROKE][USER] +5CF95C6E F90A [Regular][BROKE] +5CF95C72 F90D [Regular][BROKE] +5CF95C74 F909 [Regular][BROKE] +5CF95C7E F97E [Trivial][USER] +5CF95C7F F97F [Trivial][BROKE] +5CF95C80 F980 [Trivial][FIXED][USER] +5CF95C81 F981 [Trivial][FIXED][USER] +5CF95C9F F99F [Trivial][FIXED][USER] +5CF95CA0 F9A0 [Trivial][FIXED][USER] +5CF95CA1 F9A1 [Trivial][USER] +5CF95CE0 F9E0 [Trivial][FIXED][USER] +5CF95CEF F9EF [Trivial][FIXED][USER] +5CF95CF9 F9F9 [Trivial][FIXED][USER] +5CF95CFA F9FA [Trivial][FIXED][USER] +5CF95CFC F9FC [Trivial][FIXED][USER] +5CF95CFD F9FD [Trivial][ILSEQ] +5CF95CFE F9FE [Trivial][ILSEQ] +5CF95CFF F9FF [Trivial][ILSEQ] +5CFA5C00 FA00 [Trivial][BROKE] +5CFA5C08 FA08 [Trivial][BROKE] +5CFA5C09 FA09 [Trivial][BROKE] +5CFA5C0A FA0A [Trivial][BROKE] +5CFA5C0D FA0D [Trivial][BROKE] +5CFA5C1A FA1A [Trivial][BROKE] +5CFA5C22 FA22 [Trivial][BROKE] +5CFA5C25 FA5C25 [Regular] +5CFA5C27 FA27 [Trivial][BROKE] +5CFA5C30 FA00 [Regular][BROKE] +5CFA5C3F FA3F [Trivial][BROKE] +5CFA5C40 FA40 [Trivial][USER] +5CFA5C5A FA1A [Regular][BROKE] +5CFA5C5C FA5C [Regular][USER] +5CFA5C5F FA5C5F [Regular] +5CFA5C61 FA61 [Trivial][USER] +5CFA5C62 FA08 [Regular][BROKE][USER] +5CFA5C6E FA0A [Regular][BROKE] +5CFA5C72 FA0D [Regular][BROKE] +5CFA5C74 FA09 [Regular][BROKE] +5CFA5C7E FA7E [Trivial][USER] +5CFA5C7F FA7F [Trivial][BROKE] +5CFA5C80 FA80 [Trivial][FIXED][USER] +5CFA5C81 FA81 [Trivial][FIXED][USER] +5CFA5C9F FA9F [Trivial][FIXED][USER] +5CFA5CA0 FAA0 [Trivial][FIXED][USER] +5CFA5CA1 FAA1 [Trivial][USER] +5CFA5CE0 FAE0 [Trivial][FIXED][USER] +5CFA5CEF FAEF [Trivial][FIXED][USER] +5CFA5CF9 FAF9 [Trivial][FIXED][USER] +5CFA5CFA FAFA [Trivial][FIXED][USER] +5CFA5CFC FAFC [Trivial][FIXED][USER] +5CFA5CFD FAFD [Trivial][ILSEQ] +5CFA5CFE FAFE [Trivial][ILSEQ] +5CFA5CFF FAFF [Trivial][ILSEQ] +5CFC5C00 FC00 [Trivial][BROKE] +5CFC5C08 FC08 [Trivial][BROKE] +5CFC5C09 FC09 [Trivial][BROKE] +5CFC5C0A FC0A [Trivial][BROKE] +5CFC5C0D FC0D [Trivial][BROKE] +5CFC5C1A FC1A [Trivial][BROKE] +5CFC5C22 FC22 [Trivial][BROKE] +5CFC5C25 FC5C25 [Regular] +5CFC5C27 FC27 [Trivial][BROKE] +5CFC5C30 FC00 [Regular][BROKE] +5CFC5C3F FC3F [Trivial][BROKE] +5CFC5C40 FC40 [Trivial][USER] +5CFC5C5A FC1A [Regular][BROKE] +5CFC5C5C FC5C [Regular][USER] +5CFC5C5F FC5C5F [Regular] +5CFC5C61 FC61 [Trivial][USER] +5CFC5C62 FC08 [Regular][BROKE][USER] +5CFC5C6E FC0A [Regular][BROKE] +5CFC5C72 FC0D [Regular][BROKE] +5CFC5C74 FC09 [Regular][BROKE] +5CFC5C7E FC7E [Trivial][USER] +5CFC5C7F FC7F [Trivial][BROKE] +5CFC5C80 FC80 [Trivial][FIXED][USER] +5CFC5C81 FC81 [Trivial][FIXED][USER] +5CFC5C9F FC9F [Trivial][FIXED][USER] +5CFC5CA0 FCA0 [Trivial][FIXED][USER] +5CFC5CA1 FCA1 [Trivial][USER] +5CFC5CE0 FCE0 [Trivial][FIXED][USER] +5CFC5CEF FCEF [Trivial][FIXED][USER] +5CFC5CF9 FCF9 [Trivial][FIXED][USER] +5CFC5CFA FCFA [Trivial][FIXED][USER] +5CFC5CFC FCFC [Trivial][FIXED][USER] +5CFC5CFD FCFD [Trivial][ILSEQ] +5CFC5CFE FCFE [Trivial][ILSEQ] +5CFC5CFF FCFF [Trivial][ILSEQ] +5CFD5C00 FD00 [Trivial][ILSEQ] +5CFD5C08 FD08 [Trivial][ILSEQ] +5CFD5C09 FD09 [Trivial][ILSEQ] +5CFD5C0A FD0A [Trivial][ILSEQ] +5CFD5C0D FD0D [Trivial][ILSEQ] +5CFD5C1A FD1A [Trivial][ILSEQ] +5CFD5C22 FD22 [Trivial][ILSEQ] +5CFD5C25 FD5C25 [Regular][ILSEQ] +5CFD5C27 FD27 [Trivial][ILSEQ] +5CFD5C30 FD00 [Regular][ILSEQ] +5CFD5C3F FD3F [Trivial][ILSEQ] +5CFD5C40 FD40 [Trivial][ILSEQ] +5CFD5C5A FD1A [Regular][ILSEQ] +5CFD5C5C FD5C [Regular][ILSEQ] +5CFD5C5F FD5C5F [Regular][ILSEQ] +5CFD5C61 FD61 [Trivial][ILSEQ] +5CFD5C62 FD08 [Regular][ILSEQ] +5CFD5C6E FD0A [Regular][ILSEQ] +5CFD5C72 FD0D [Regular][ILSEQ] +5CFD5C74 FD09 [Regular][ILSEQ] +5CFD5C7E FD7E [Trivial][ILSEQ] +5CFD5C7F FD7F [Trivial][ILSEQ] +5CFD5C80 FD80 [Trivial][ILSEQ] +5CFD5C81 FD81 [Trivial][ILSEQ] +5CFD5C9F FD9F [Trivial][ILSEQ] +5CFD5CA0 FDA0 [Trivial][ILSEQ] +5CFD5CA1 FDA1 [Trivial][ILSEQ] +5CFD5CE0 FDE0 [Trivial][ILSEQ] +5CFD5CEF FDEF [Trivial][ILSEQ] +5CFD5CF9 FDF9 [Trivial][ILSEQ] +5CFD5CFA FDFA [Trivial][ILSEQ] +5CFD5CFC FDFC [Trivial][ILSEQ] +5CFD5CFD FDFD [Trivial][ILSEQ] +5CFD5CFE FDFE [Trivial][ILSEQ] +5CFD5CFF FDFF [Trivial][ILSEQ] +5CFE5C00 FE00 [Trivial][ILSEQ] +5CFE5C08 FE08 [Trivial][ILSEQ] +5CFE5C09 FE09 [Trivial][ILSEQ] +5CFE5C0A FE0A [Trivial][ILSEQ] +5CFE5C0D FE0D [Trivial][ILSEQ] +5CFE5C1A FE1A [Trivial][ILSEQ] +5CFE5C22 FE22 [Trivial][ILSEQ] +5CFE5C25 FE5C25 [Regular][ILSEQ] +5CFE5C27 FE27 [Trivial][ILSEQ] +5CFE5C30 FE00 [Regular][ILSEQ] +5CFE5C3F FE3F [Trivial][ILSEQ] +5CFE5C40 FE40 [Trivial][ILSEQ] +5CFE5C5A FE1A [Regular][ILSEQ] +5CFE5C5C FE5C [Regular][ILSEQ] +5CFE5C5F FE5C5F [Regular][ILSEQ] +5CFE5C61 FE61 [Trivial][ILSEQ] +5CFE5C62 FE08 [Regular][ILSEQ] +5CFE5C6E FE0A [Regular][ILSEQ] +5CFE5C72 FE0D [Regular][ILSEQ] +5CFE5C74 FE09 [Regular][ILSEQ] +5CFE5C7E FE7E [Trivial][ILSEQ] +5CFE5C7F FE7F [Trivial][ILSEQ] +5CFE5C80 FE80 [Trivial][ILSEQ] +5CFE5C81 FE81 [Trivial][ILSEQ] +5CFE5C9F FE9F [Trivial][ILSEQ] +5CFE5CA0 FEA0 [Trivial][ILSEQ] +5CFE5CA1 FEA1 [Trivial][ILSEQ] +5CFE5CE0 FEE0 [Trivial][ILSEQ] +5CFE5CEF FEEF [Trivial][ILSEQ] +5CFE5CF9 FEF9 [Trivial][ILSEQ] +5CFE5CFA FEFA [Trivial][ILSEQ] +5CFE5CFC FEFC [Trivial][ILSEQ] +5CFE5CFD FEFD [Trivial][ILSEQ] +5CFE5CFE FEFE [Trivial][ILSEQ] +5CFE5CFF FEFF [Trivial][ILSEQ] +5CFF5C00 FF00 [Trivial][ILSEQ] +5CFF5C08 FF08 [Trivial][ILSEQ] +5CFF5C09 FF09 [Trivial][ILSEQ] +5CFF5C0A FF0A [Trivial][ILSEQ] +5CFF5C0D FF0D [Trivial][ILSEQ] +5CFF5C1A FF1A [Trivial][ILSEQ] +5CFF5C22 FF22 [Trivial][ILSEQ] +5CFF5C25 FF5C25 [Regular][ILSEQ] +5CFF5C27 FF27 [Trivial][ILSEQ] +5CFF5C30 FF00 [Regular][ILSEQ] +5CFF5C3F FF3F [Trivial][ILSEQ] +5CFF5C40 FF40 [Trivial][ILSEQ] +5CFF5C5A FF1A [Regular][ILSEQ] +5CFF5C5C FF5C [Regular][ILSEQ] +5CFF5C5F FF5C5F [Regular][ILSEQ] +5CFF5C61 FF61 [Trivial][ILSEQ] +5CFF5C62 FF08 [Regular][ILSEQ] +5CFF5C6E FF0A [Regular][ILSEQ] +5CFF5C72 FF0D [Regular][ILSEQ] +5CFF5C74 FF09 [Regular][ILSEQ] +5CFF5C7E FF7E [Trivial][ILSEQ] +5CFF5C7F FF7F [Trivial][ILSEQ] +5CFF5C80 FF80 [Trivial][ILSEQ] +5CFF5C81 FF81 [Trivial][ILSEQ] +5CFF5C9F FF9F [Trivial][ILSEQ] +5CFF5CA0 FFA0 [Trivial][ILSEQ] +5CFF5CA1 FFA1 [Trivial][ILSEQ] +5CFF5CE0 FFE0 [Trivial][ILSEQ] +5CFF5CEF FFEF [Trivial][ILSEQ] +5CFF5CF9 FFF9 [Trivial][ILSEQ] +5CFF5CFA FFFA [Trivial][ILSEQ] +5CFF5CFC FFFC [Trivial][ILSEQ] +5CFF5CFD FFFD [Trivial][ILSEQ] +5CFF5CFE FFFE [Trivial][ILSEQ] +5CFF5CFF FFFF [Trivial][ILSEQ] +DROP TABLE t1; +DROP PROCEDURE p1; +DROP PROCEDURE p2; +DROP FUNCTION unescape; +DROP FUNCTION unescape_type; +DROP FUNCTION wellformedness; +DROP FUNCTION mysql_real_escape_string_generated; +DROP FUNCTION iswellformed; +DROP TABLE allbytes; +# End of ctype_backslash.inc +SET NAMES sjis; +# Start of ctype_E05C.inc +SELECT HEX('à\'),HEX('à\t'); +HEX('à\') HEX('à\t') +E05C E05C74 +SELECT HEX('\\à\'),HEX('\\à\t'),HEX('\\à\t\t'); +HEX('__à\') HEX('__à\t') HEX('__à\t_t') +5CE05C 5CE05C74 5CE05C7409 +SELECT HEX('''à\'),HEX('à\'''); +HEX('''à\') HEX('à\''') +27E05C E05C27 +SELECT HEX('\\''à\'),HEX('à\''\\'); +HEX('__''à\') HEX('à\''__') +5C27E05C E05C275C +SELECT HEX(BINARY('à\')),HEX(BINARY('à\t')); +HEX(BINARY('à\')) HEX(BINARY('à\t')) +E05C E05C74 +SELECT HEX(BINARY('\\à\')),HEX(BINARY('\\à\t')),HEX(BINARY('\\à\t\t')); +HEX(BINARY('__à\')) HEX(BINARY('__à\t')) HEX(BINARY('__à\t_t')) +5CE05C 5CE05C74 5CE05C7409 +SELECT HEX(BINARY('''à\')),HEX(BINARY('à\''')); +HEX(BINARY('''à\')) HEX(BINARY('à\''')) +27E05C E05C27 +SELECT HEX(BINARY('\\''à\')),HEX(BINARY('à\''\\')); +HEX(BINARY('__''à\')) HEX(BINARY('à\''__')) +5C27E05C E05C275C +SELECT HEX(_BINARY'à\'),HEX(_BINARY'à\t'); +HEX(_BINARY'à\') HEX(_BINARY'à\t') +E05C E05C74 +SELECT HEX(_BINARY'\\à\'),HEX(_BINARY'\\à\t'),HEX(_BINARY'\\à\t\t'); +HEX(_BINARY'__à\') HEX(_BINARY'__à\t') HEX(_BINARY'__à\t_t') +5CE05C 5CE05C74 5CE05C7409 +SELECT HEX(_BINARY'''à\'),HEX(_BINARY'à\'''); +HEX(_BINARY'''à\') HEX(_BINARY'à\''') +27E05C E05C27 +SELECT HEX(_BINARY'\\''à\'),HEX(_BINARY'à\''\\'); +HEX(_BINARY'__''à\') HEX(_BINARY'à\''__') +5C27E05C E05C275C +CREATE TABLE t1 AS SELECT REPEAT(' ',10) AS a LIMIT 0; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` varchar(10) CHARACTER SET sjis NOT NULL DEFAULT '' +) ENGINE=MyISAM DEFAULT CHARSET=latin1 +INSERT INTO t1 VALUES ('à\'),('à\t'); +INSERT INTO t1 VALUES ('\\à\'),('\\à\t'),('\\à\t\t'); +INSERT INTO t1 VALUES ('''à\'),('à\'''); +INSERT INTO t1 VALUES ('\\''à\'),('à\''\\'); +SELECT a, HEX(a) FROM t1; +a HEX(a) +à\ E05C +à\t E05C74 +\à\ 5CE05C +\à\t 5CE05C74 +\à\t 5CE05C7409 +'à\ 27E05C +à\' E05C27 +\'à\ 5C27E05C +à\'\ E05C275C +DROP TABLE t1; +CREATE TABLE t1 (a BLOB); +INSERT INTO t1 VALUES ('à\'),('à\t'); +INSERT INTO t1 VALUES ('\\à\'),('\\à\t'),('\\à\t\t'); +INSERT INTO t1 VALUES ('''à\'),('à\'''); +INSERT INTO t1 VALUES ('\\''à\'),('à\''\\'); +SELECT a, HEX(a) FROM t1; +a HEX(a) +à\ E05C +à\t E05C74 +\à\ 5CE05C +\à\t 5CE05C74 +\à\t 5CE05C7409 +'à\ 27E05C +à\' E05C27 +\'à\ 5C27E05C +à\'\ E05C275C +DROP TABLE t1; +CREATE TABLE t1 AS SELECT REPEAT(' ', 10) AS a LIMIT 0; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` varchar(10) CHARACTER SET sjis NOT NULL DEFAULT '' +) ENGINE=MyISAM DEFAULT CHARSET=latin1 +INSERT INTO t1 VALUES (BINARY('à\')),(BINARY('à\t')); +INSERT INTO t1 VALUES (BINARY('\\à\')),(BINARY('\\à\t')),(BINARY('\\à\t\t')); +INSERT INTO t1 VALUES (BINARY('''à\')),(BINARY('à\''')); +INSERT INTO t1 VALUES (BINARY('\\''à\')),(BINARY('à\''\\')); +SELECT a, HEX(a) FROM t1; +a HEX(a) +à\ E05C +à\t E05C74 +\à\ 5CE05C +\à\t 5CE05C74 +\à\t 5CE05C7409 +'à\ 27E05C +à\' E05C27 +\'à\ 5C27E05C +à\'\ E05C275C +DROP TABLE t1; +CREATE TABLE t1 (a BLOB); +INSERT INTO t1 VALUES (BINARY('à\')),(BINARY('à\t')); +INSERT INTO t1 VALUES (BINARY('\\à\')),(BINARY('\\à\t')),(BINARY('\\à\t\t')); +INSERT INTO t1 VALUES (BINARY('''à\')),(BINARY('à\''')); +INSERT INTO t1 VALUES (BINARY('\\''à\')),(BINARY('à\''\\')); +SELECT a, HEX(a) FROM t1; +a HEX(a) +à\ E05C +à\t E05C74 +\à\ 5CE05C +\à\t 5CE05C74 +\à\t 5CE05C7409 +'à\ 27E05C +à\' E05C27 +\'à\ 5C27E05C +à\'\ E05C275C +DROP TABLE t1; +CREATE TABLE t1 AS SELECT REPEAT(' ', 10) AS a LIMIT 0; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` varchar(10) CHARACTER SET sjis NOT NULL DEFAULT '' +) ENGINE=MyISAM DEFAULT CHARSET=latin1 +INSERT INTO t1 VALUES (_BINARY'à\'),(_BINARY'à\t'); +INSERT INTO t1 VALUES (_BINARY'\\à\'),(_BINARY'\\à\t'),(_BINARY'\\à\t\t'); +INSERT INTO t1 VALUES (_BINARY'''à\'),(_BINARY'à\'''); +INSERT INTO t1 VALUES (_BINARY'\\''à\'),(_BINARY'à\''\\'); +SELECT a, HEX(a) FROM t1; +a HEX(a) +à\ E05C +à\t E05C74 +\à\ 5CE05C +\à\t 5CE05C74 +\à\t 5CE05C7409 +'à\ 27E05C +à\' E05C27 +\'à\ 5C27E05C +à\'\ E05C275C +DROP TABLE t1; +CREATE TABLE t1 (a BLOB); +INSERT INTO t1 VALUES (_BINARY'à\'),(_BINARY'à\t'); +INSERT INTO t1 VALUES (_BINARY'\\à\'),(_BINARY'\\à\t'),(_BINARY'\\à\t\t'); +INSERT INTO t1 VALUES (_BINARY'''à\'),(_BINARY'à\'''); +INSERT INTO t1 VALUES (_BINARY'\\''à\'),(_BINARY'à\''\\'); +SELECT a, HEX(a) FROM t1; +a HEX(a) +à\ E05C +à\t E05C74 +\à\ 5CE05C +\à\t 5CE05C74 +\à\t 5CE05C7409 +'à\ 27E05C +à\' E05C27 +\'à\ 5C27E05C +à\'\ E05C275C +DROP TABLE t1; +SET character_set_client=binary, character_set_results=binary; +SELECT @@character_set_client, @@character_set_connection, @@character_set_results; +@@character_set_client @@character_set_connection @@character_set_results +binary sjis binary +SELECT HEX('à\['), HEX('\à\['); +HEX('à\[') HEX('\à\[') +E05B E05B +CREATE TABLE t1 AS SELECT REPEAT(' ', 10) AS a LIMIT 0; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` varchar(10) CHARACTER SET sjis NOT NULL DEFAULT '' +) ENGINE=MyISAM DEFAULT CHARSET=latin1 +INSERT INTO t1 VALUES ('à\['),('\à\['); +SELECT HEX(a) FROM t1; +HEX(a) +E05B +E05B +DROP TABLE t1; +SET character_set_client=@@character_set_connection, character_set_results=@@character_set_connection; +SET character_set_connection=binary; +SELECT @@character_set_client, @@character_set_connection, @@character_set_results; +@@character_set_client @@character_set_connection @@character_set_results +sjis binary sjis +SELECT HEX('à\['), HEX('\à\['); +HEX('à\[') HEX('_à\[') +E05C5B E05B +CREATE TABLE t1 AS SELECT REPEAT(' ', 10) AS a LIMIT 0; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` varbinary(10) NOT NULL DEFAULT '' +) ENGINE=MyISAM DEFAULT CHARSET=latin1 +INSERT INTO t1 VALUES ('à\['),('\à\['); +SELECT HEX(a) FROM t1; +HEX(a) +E05C5B +E05B +DROP TABLE t1; +# Start of ctype_E05C.inc +# +# End of 10.0 tests +# diff --git a/mysql-test/r/ctype_swe7.result b/mysql-test/r/ctype_swe7.result new file mode 100644 index 00000000000..ccab77c21d1 --- /dev/null +++ b/mysql-test/r/ctype_swe7.result @@ -0,0 +1,3071 @@ +# +# Start of 10.0 tests +# +SET NAMES swe7; +# Start of ctype_unescape.inc +SET @query=_binary'SELECT CHARSET(\'test\'),@@character_set_client,@@character_set_connection'; +PREPARE stmt FROM @query; +EXECUTE stmt; +CHARSET('test') @@character_set_client @@character_set_connection +swe7 swe7 swe7 +DEALLOCATE PREPARE stmt; +CREATE TABLE allbytes (a VARBINARY(10)); +# Using selected bytes combinations +CREATE TABLE halfs (a INT); +INSERT INTO halfs VALUES (0x00),(0x01),(0x02),(0x03),(0x04),(0x05),(0x06),(0x07); +INSERT INTO halfs VALUES (0x08),(0x09),(0x0A),(0x0B),(0x0C),(0x0D),(0x0E),(0x0F); +CREATE TEMPORARY TABLE bytes (a BINARY(1), KEY(a)) ENGINE=MyISAM; +INSERT INTO bytes SELECT CHAR((t1.a << 4) | t2.a USING BINARY) FROM halfs t1, halfs t2; +DROP TABLE halfs; +CREATE TABLE selected_bytes (a VARBINARY(10)); +INSERT INTO selected_bytes (a) VALUES ('\0'),('\b'),('\t'),('\r'),('\n'),('\Z'); +INSERT INTO selected_bytes (a) VALUES ('0'),('b'),('t'),('r'),('n'),('Z'); +INSERT INTO selected_bytes (a) VALUES ('\\'),('_'),('%'),(0x22),(0x27); +INSERT INTO selected_bytes (a) VALUES ('a'); +INSERT INTO selected_bytes (a) VALUES +(0x3F), # 7bit +(0x40), # 7bit mbtail +(0x7E), # 7bit mbtail nonascii-8bit +(0x7F), # 7bit nonascii-8bit +(0x80), # mbtail bad-mb +(0x81), # mbhead mbtail +(0x9F), # mbhead mbtail bad-mb +(0xA0), # mbhead mbtail bad-mb +(0xA1), # mbhead mbtail nonascii-8bit +(0xE0), # mbhead mbtai +(0xEF), # mbhead mbtail +(0xF9), # mbhead mbtail +(0xFA), # mbhead mbtail bad-mb +(0xFC), # mbhead mbtail bad-mb +(0xFD), # mbhead mbtail bad-mb +(0xFE), # mbhead mbtial bad-mb +(0xFF); +INSERT INTO allbytes (a) SELECT a FROM bytes; +INSERT INTO allbytes (a) SELECT CONCAT(t1.a,t2.a) FROM selected_bytes t1,selected_bytes t2; +INSERT INTO allbytes (a) SELECT CONCAT(0x5C,t1.a,t2.a) FROM selected_bytes t1,selected_bytes t2; +INSERT INTO allbytes (a) SELECT CONCAT(0x5C,t1.a,0x5C,t2.a) FROM selected_bytes t1,selected_bytes t2; +DROP TABLE selected_bytes; +DELETE FROM allbytes WHERE +OCTET_LENGTH(a)>1 AND +LOCATE(0x5C,a)=0 AND +a NOT LIKE '%\'%' AND + a NOT LIKE '%"%'; +CREATE PROCEDURE p1(val VARBINARY(10)) +BEGIN +DECLARE EXIT HANDLER FOR SQLSTATE '42000' INSERT INTO t1 (a,b) VALUES(val,NULL); +SET @query=CONCAT(_binary"INSERT INTO t1 (a,b) VALUES (0x",HEX(val),",'",val,"')"); +PREPARE stmt FROM @query; +EXECUTE stmt; +DEALLOCATE PREPARE stmt; +END// +CREATE PROCEDURE p2() +BEGIN +DECLARE val VARBINARY(10); +DECLARE done INT DEFAULT FALSE; +DECLARE stmt CURSOR FOR SELECT a FROM allbytes; +DECLARE CONTINUE HANDLER FOR NOT FOUND SET done=TRUE; +OPEN stmt; +read_loop1: LOOP +FETCH stmt INTO val; +IF done THEN +LEAVE read_loop1; +END IF; +CALL p1(val); +END LOOP; +CLOSE stmt; +END// +CREATE FUNCTION iswellformed(a VARBINARY(256)) RETURNS INT RETURN a=BINARY CONVERT(a USING swe7);// +CREATE FUNCTION unescape(a VARBINARY(256)) RETURNS VARBINARY(256) +BEGIN +# We need to do it in a way to avoid producing new escape sequences +# First, enclose all known escsape sequences to '{{xx}}' + # - Backslash not followed by a LIKE pattern characters _ and % +# - Double escapes +# This uses PCRE Branch Reset Groups: (?|(alt1)|(alt2)|(alt3)). +# So '\\1' in the last argument always means the match, no matter +# which alternative it came from. +SET a=REGEXP_REPLACE(a,'(?|(\\\\[^_%])|(\\x{27}\\x{27}))','{{\\1}}'); +# Now unescape all enclosed standard escape sequences +SET a=REPLACE(a,'{{\\0}}', '\0'); +SET a=REPLACE(a,'{{\\b}}', '\b'); +SET a=REPLACE(a,'{{\\t}}', '\t'); +SET a=REPLACE(a,'{{\\r}}', '\r'); +SET a=REPLACE(a,'{{\\n}}', '\n'); +SET a=REPLACE(a,'{{\\Z}}', '\Z'); +SET a=REPLACE(a,'{{\\\'}}', '\''); +# Unescape double quotes +SET a=REPLACE(a,'{{\'\'}}', '\''); + # Unescape the rest: all other \x sequences mean just 'x' + SET a=REGEXP_REPLACE(a, '{{\\\\(.|\\R)}}', '\\1'); + RETURN a; +END// +CREATE FUNCTION unescape_type(a VARBINARY(256),b VARBINARY(256)) RETURNS VARBINARY(256) +BEGIN +RETURN CASE +WHEN b IS NULL THEN '[SyntErr]' + WHEN a=b THEN CASE +WHEN OCTET_LENGTH(a)=1 THEN '[Preserve]' + WHEN a RLIKE '\\\\[_%]' THEN '[Preserve][LIKE]' + WHEN a RLIKE '^[[:ascii:]]+$' THEN '[Preserve][ASCII]' + ELSE '[Preserv][MB]' END +WHEN REPLACE(a,0x5C,'')=b THEN '[Trivial]' + WHEN UNESCAPE(a)=b THEN '[Regular]' + ELSE '[Special]' END; +END// +CREATE FUNCTION wellformedness(a VARBINARY(256), b VARBINARY(256)) +RETURNS VARBINARY(256) +BEGIN +RETURN CASE +WHEN b IS NULL THEN '' + WHEN NOT iswellformed(a) AND iswellformed(b) THEN '[FIXED]' + WHEN iswellformed(a) AND NOT iswellformed(b) THEN '[BROKE]' + WHEN NOT iswellformed(a) AND NOT iswellformed(b) THEN '[ILSEQ]' + ELSE '' + END; +END// +CREATE FUNCTION mysql_real_escape_string_generated(a VARBINARY(256)) +RETURNS VARBINARY(256) +BEGIN +DECLARE a1 BINARY(1) DEFAULT SUBSTR(a,1,1); +DECLARE a2 BINARY(1) DEFAULT SUBSTR(a,2,1); +DECLARE a3 BINARY(1) DEFAULT SUBSTR(a,3,1); +DECLARE a4 BINARY(1) DEFAULT SUBSTR(a,4,1); +DECLARE a2a4 BINARY(2) DEFAULT CONCAT(a2,a4); +RETURN CASE +WHEN (a1=0x5C) AND +(a3=0x5C) AND +(a2>0x7F) AND +(a4 NOT IN ('_','%','0','t','r','n','Z')) AND +iswellformed(a2a4) THEN '[USER]' + ELSE '' + END; +END// +CREATE TABLE t1 (a VARBINARY(10),b VARBINARY(10)); +CALL p2(); +SELECT HEX(a),HEX(b), +CONCAT(unescape_type(a,b), +wellformedness(a,b), +mysql_real_escape_string_generated(a), +IF(UNESCAPE(a)<>b,CONCAT('[BAD',HEX(UNESCAPE(a)),']'),'')) AS comment +FROM t1 ORDER BY LENGTH(a),a; +HEX(a) HEX(b) comment +00 00 [Preserve] +01 01 [Preserve] +02 02 [Preserve] +03 03 [Preserve] +04 04 [Preserve] +05 05 [Preserve] +06 06 [Preserve] +07 07 [Preserve] +08 08 [Preserve] +09 09 [Preserve] +0A 0A [Preserve] +0B 0B [Preserve] +0C 0C [Preserve] +0D 0D [Preserve] +0E 0E [Preserve] +0F 0F [Preserve] +10 10 [Preserve] +11 11 [Preserve] +12 12 [Preserve] +13 13 [Preserve] +14 14 [Preserve] +15 15 [Preserve] +16 16 [Preserve] +17 17 [Preserve] +18 18 [Preserve] +19 19 [Preserve] +1A 1A [Preserve] +1B 1B [Preserve] +1C 1C [Preserve] +1D 1D [Preserve] +1E 1E [Preserve] +1F 1F [Preserve] +20 20 [Preserve] +21 21 [Preserve] +22 22 [Preserve] +23 23 [Preserve] +24 24 [Preserve] +25 25 [Preserve] +26 26 [Preserve] +27 NULL [SyntErr] +28 28 [Preserve] +29 29 [Preserve] +2A 2A [Preserve] +2B 2B [Preserve] +2C 2C [Preserve] +2D 2D [Preserve] +2E 2E [Preserve] +2F 2F [Preserve] +30 30 [Preserve] +31 31 [Preserve] +32 32 [Preserve] +33 33 [Preserve] +34 34 [Preserve] +35 35 [Preserve] +36 36 [Preserve] +37 37 [Preserve] +38 38 [Preserve] +39 39 [Preserve] +3A 3A [Preserve] +3B 3B [Preserve] +3C 3C [Preserve] +3D 3D [Preserve] +3E 3E [Preserve] +3F 3F [Preserve] +40 40 [Preserve] +41 41 [Preserve] +42 42 [Preserve] +43 43 [Preserve] +44 44 [Preserve] +45 45 [Preserve] +46 46 [Preserve] +47 47 [Preserve] +48 48 [Preserve] +49 49 [Preserve] +4A 4A [Preserve] +4B 4B [Preserve] +4C 4C [Preserve] +4D 4D [Preserve] +4E 4E [Preserve] +4F 4F [Preserve] +50 50 [Preserve] +51 51 [Preserve] +52 52 [Preserve] +53 53 [Preserve] +54 54 [Preserve] +55 55 [Preserve] +56 56 [Preserve] +57 57 [Preserve] +58 58 [Preserve] +59 59 [Preserve] +5A 5A [Preserve] +5B 5B [Preserve] +5C NULL [SyntErr] +5D 5D [Preserve] +5E 5E [Preserve] +5F 5F [Preserve] +60 60 [Preserve] +61 61 [Preserve] +62 62 [Preserve] +63 63 [Preserve] +64 64 [Preserve] +65 65 [Preserve] +66 66 [Preserve] +67 67 [Preserve] +68 68 [Preserve] +69 69 [Preserve] +6A 6A [Preserve] +6B 6B [Preserve] +6C 6C [Preserve] +6D 6D [Preserve] +6E 6E [Preserve] +6F 6F [Preserve] +70 70 [Preserve] +71 71 [Preserve] +72 72 [Preserve] +73 73 [Preserve] +74 74 [Preserve] +75 75 [Preserve] +76 76 [Preserve] +77 77 [Preserve] +78 78 [Preserve] +79 79 [Preserve] +7A 7A [Preserve] +7B 7B [Preserve] +7C 7C [Preserve] +7D 7D [Preserve] +7E 7E [Preserve] +7F 7F [Preserve] +80 80 [Preserve] +81 81 [Preserve] +82 82 [Preserve] +83 83 [Preserve] +84 84 [Preserve] +85 85 [Preserve] +86 86 [Preserve] +87 87 [Preserve] +88 88 [Preserve] +89 89 [Preserve] +8A 8A [Preserve] +8B 8B [Preserve] +8C 8C [Preserve] +8D 8D [Preserve] +8E 8E [Preserve] +8F 8F [Preserve] +90 90 [Preserve] +91 91 [Preserve] +92 92 [Preserve] +93 93 [Preserve] +94 94 [Preserve] +95 95 [Preserve] +96 96 [Preserve] +97 97 [Preserve] +98 98 [Preserve] +99 99 [Preserve] +9A 9A [Preserve] +9B 9B [Preserve] +9C 9C [Preserve] +9D 9D [Preserve] +9E 9E [Preserve] +9F 9F [Preserve] +A0 A0 [Preserve] +A1 A1 [Preserve] +A2 A2 [Preserve] +A3 A3 [Preserve] +A4 A4 [Preserve] +A5 A5 [Preserve] +A6 A6 [Preserve] +A7 A7 [Preserve] +A8 A8 [Preserve] +A9 A9 [Preserve] +AA AA [Preserve] +AB AB [Preserve] +AC AC [Preserve] +AD AD [Preserve] +AE AE [Preserve] +AF AF [Preserve] +B0 B0 [Preserve] +B1 B1 [Preserve] +B2 B2 [Preserve] +B3 B3 [Preserve] +B4 B4 [Preserve] +B5 B5 [Preserve] +B6 B6 [Preserve] +B7 B7 [Preserve] +B8 B8 [Preserve] +B9 B9 [Preserve] +BA BA [Preserve] +BB BB [Preserve] +BC BC [Preserve] +BD BD [Preserve] +BE BE [Preserve] +BF BF [Preserve] +C0 C0 [Preserve] +C1 C1 [Preserve] +C2 C2 [Preserve] +C3 C3 [Preserve] +C4 C4 [Preserve] +C5 C5 [Preserve] +C6 C6 [Preserve] +C7 C7 [Preserve] +C8 C8 [Preserve] +C9 C9 [Preserve] +CA CA [Preserve] +CB CB [Preserve] +CC CC [Preserve] +CD CD [Preserve] +CE CE [Preserve] +CF CF [Preserve] +D0 D0 [Preserve] +D1 D1 [Preserve] +D2 D2 [Preserve] +D3 D3 [Preserve] +D4 D4 [Preserve] +D5 D5 [Preserve] +D6 D6 [Preserve] +D7 D7 [Preserve] +D8 D8 [Preserve] +D9 D9 [Preserve] +DA DA [Preserve] +DB DB [Preserve] +DC DC [Preserve] +DD DD [Preserve] +DE DE [Preserve] +DF DF [Preserve] +E0 E0 [Preserve] +E1 E1 [Preserve] +E2 E2 [Preserve] +E3 E3 [Preserve] +E4 E4 [Preserve] +E5 E5 [Preserve] +E6 E6 [Preserve] +E7 E7 [Preserve] +E8 E8 [Preserve] +E9 E9 [Preserve] +EA EA [Preserve] +EB EB [Preserve] +EC EC [Preserve] +ED ED [Preserve] +EE EE [Preserve] +EF EF [Preserve] +F0 F0 [Preserve] +F1 F1 [Preserve] +F2 F2 [Preserve] +F3 F3 [Preserve] +F4 F4 [Preserve] +F5 F5 [Preserve] +F6 F6 [Preserve] +F7 F7 [Preserve] +F8 F8 [Preserve] +F9 F9 [Preserve] +FA FA [Preserve] +FB FB [Preserve] +FC FC [Preserve] +FD FD [Preserve] +FE FE [Preserve] +FF FF [Preserve] +0022 0022 [Preserve][ASCII] +0027 NULL [SyntErr] +005C NULL [SyntErr] +0822 0822 [Preserve][ASCII] +0827 NULL [SyntErr] +085C NULL [SyntErr] +0922 0922 [Preserve][ASCII] +0927 NULL [SyntErr] +095C NULL [SyntErr] +0A22 0A22 [Preserve][ASCII] +0A27 NULL [SyntErr] +0A5C NULL [SyntErr] +0D22 0D22 [Preserve][ASCII] +0D27 NULL [SyntErr] +0D5C NULL [SyntErr] +1A22 1A22 [Preserve][ASCII] +1A27 NULL [SyntErr] +1A5C NULL [SyntErr] +2200 2200 [Preserve][ASCII] +2208 2208 [Preserve][ASCII] +2209 2209 [Preserve][ASCII] +220A 220A [Preserve][ASCII] +220D 220D [Preserve][ASCII] +221A 221A [Preserve][ASCII] +2222 2222 [Preserve][ASCII] +2225 2225 [Preserve][ASCII] +2227 NULL [SyntErr] +2230 2230 [Preserve][ASCII] +223F 223F [Preserve][ASCII] +2240 2240 [Preserve][ASCII] +225A 225A [Preserve][ASCII] +225C NULL [SyntErr] +225F 225F [Preserve][ASCII] +2261 2261 [Preserve][ASCII] +2262 2262 [Preserve][ASCII] +226E 226E [Preserve][ASCII] +2272 2272 [Preserve][ASCII] +2274 2274 [Preserve][ASCII] +227E 227E [Preserve][ASCII] +227F 227F [Preserve][ASCII] +2280 2280 [Preserv][MB] +2281 2281 [Preserv][MB] +229F 229F [Preserv][MB] +22A0 22A0 [Preserv][MB] +22A1 22A1 [Preserv][MB] +22E0 22E0 [Preserv][MB] +22EF 22EF [Preserv][MB] +22F9 22F9 [Preserv][MB] +22FA 22FA [Preserv][MB] +22FC 22FC [Preserv][MB] +22FD 22FD [Preserv][MB] +22FE 22FE [Preserv][MB] +22FF 22FF [Preserv][MB] +2522 2522 [Preserve][ASCII] +2527 NULL [SyntErr] +255C NULL [SyntErr] +2700 NULL [SyntErr] +2708 NULL [SyntErr] +2709 NULL [SyntErr] +270A NULL [SyntErr] +270D NULL [SyntErr] +271A NULL [SyntErr] +2722 NULL [SyntErr] +2725 NULL [SyntErr] +2727 27 [Regular] +2730 NULL [SyntErr] +273F NULL [SyntErr] +2740 NULL [SyntErr] +275A NULL [SyntErr] +275C NULL [SyntErr] +275F NULL [SyntErr] +2761 NULL [SyntErr] +2762 NULL [SyntErr] +276E NULL [SyntErr] +2772 NULL [SyntErr] +2774 NULL [SyntErr] +277E NULL [SyntErr] +277F NULL [SyntErr] +2780 NULL [SyntErr] +2781 NULL [SyntErr] +279F NULL [SyntErr] +27A0 NULL [SyntErr] +27A1 NULL [SyntErr] +27E0 NULL [SyntErr] +27EF NULL [SyntErr] +27F9 NULL [SyntErr] +27FA NULL [SyntErr] +27FC NULL [SyntErr] +27FD NULL [SyntErr] +27FE NULL [SyntErr] +27FF NULL [SyntErr] +3022 3022 [Preserve][ASCII] +3027 NULL [SyntErr] +305C NULL [SyntErr] +3F22 3F22 [Preserve][ASCII] +3F27 NULL [SyntErr] +3F5C NULL [SyntErr] +4022 4022 [Preserve][ASCII] +4027 NULL [SyntErr] +405C NULL [SyntErr] +5A22 5A22 [Preserve][ASCII] +5A27 NULL [SyntErr] +5A5C NULL [SyntErr] +5C00 00 [Trivial] +5C08 08 [Trivial] +5C09 09 [Trivial] +5C0A 0A [Trivial] +5C0D 0D [Trivial] +5C1A 1A [Trivial] +5C22 22 [Trivial] +5C25 5C25 [Preserve][LIKE] +5C27 27 [Trivial] +5C30 00 [Regular] +5C3F 3F [Trivial] +5C40 40 [Trivial] +5C5A 1A [Regular] +5C5C 5C [Regular] +5C5F 5C5F [Preserve][LIKE] +5C61 61 [Trivial] +5C62 08 [Regular] +5C6E 0A [Regular] +5C72 0D [Regular] +5C74 09 [Regular] +5C7E 7E [Trivial] +5C7F 7F [Trivial] +5C80 80 [Trivial] +5C81 81 [Trivial] +5C9F 9F [Trivial] +5CA0 A0 [Trivial] +5CA1 A1 [Trivial] +5CE0 E0 [Trivial] +5CEF EF [Trivial] +5CF9 F9 [Trivial] +5CFA FA [Trivial] +5CFC FC [Trivial] +5CFD FD [Trivial] +5CFE FE [Trivial] +5CFF FF [Trivial] +5F22 5F22 [Preserve][ASCII] +5F27 NULL [SyntErr] +5F5C NULL [SyntErr] +6122 6122 [Preserve][ASCII] +6127 NULL [SyntErr] +615C NULL [SyntErr] +6222 6222 [Preserve][ASCII] +6227 NULL [SyntErr] +625C NULL [SyntErr] +6E22 6E22 [Preserve][ASCII] +6E27 NULL [SyntErr] +6E5C NULL [SyntErr] +7222 7222 [Preserve][ASCII] +7227 NULL [SyntErr] +725C NULL [SyntErr] +7422 7422 [Preserve][ASCII] +7427 NULL [SyntErr] +745C NULL [SyntErr] +7E22 7E22 [Preserve][ASCII] +7E27 NULL [SyntErr] +7E5C NULL [SyntErr] +7F22 7F22 [Preserve][ASCII] +7F27 NULL [SyntErr] +7F5C NULL [SyntErr] +8022 8022 [Preserv][MB] +8027 NULL [SyntErr] +805C NULL [SyntErr] +8122 8122 [Preserv][MB] +8127 NULL [SyntErr] +815C NULL [SyntErr] +9F22 9F22 [Preserv][MB] +9F27 NULL [SyntErr] +9F5C NULL [SyntErr] +A022 A022 [Preserv][MB] +A027 NULL [SyntErr] +A05C NULL [SyntErr] +A122 A122 [Preserv][MB] +A127 NULL [SyntErr] +A15C NULL [SyntErr] +E022 E022 [Preserv][MB] +E027 NULL [SyntErr] +E05C NULL [SyntErr] +EF22 EF22 [Preserv][MB] +EF27 NULL [SyntErr] +EF5C NULL [SyntErr] +F922 F922 [Preserv][MB] +F927 NULL [SyntErr] +F95C NULL [SyntErr] +FA22 FA22 [Preserv][MB] +FA27 NULL [SyntErr] +FA5C NULL [SyntErr] +FC22 FC22 [Preserv][MB] +FC27 NULL [SyntErr] +FC5C NULL [SyntErr] +FD22 FD22 [Preserv][MB] +FD27 NULL [SyntErr] +FD5C NULL [SyntErr] +FE22 FE22 [Preserv][MB] +FE27 NULL [SyntErr] +FE5C NULL [SyntErr] +FF22 FF22 [Preserv][MB] +FF27 NULL [SyntErr] +FF5C NULL [SyntErr] +5C0000 0000 [Trivial] +5C0008 0008 [Trivial] +5C0009 0009 [Trivial] +5C000A 000A [Trivial] +5C000D 000D [Trivial] +5C001A 001A [Trivial] +5C0022 0022 [Trivial] +5C0025 0025 [Trivial] +5C0027 NULL [SyntErr] +5C0030 0030 [Trivial] +5C003F 003F [Trivial] +5C0040 0040 [Trivial] +5C005A 005A [Trivial] +5C005C NULL [SyntErr] +5C005F 005F [Trivial] +5C0061 0061 [Trivial] +5C0062 0062 [Trivial] +5C006E 006E [Trivial] +5C0072 0072 [Trivial] +5C0074 0074 [Trivial] +5C007E 007E [Trivial] +5C007F 007F [Trivial] +5C0080 0080 [Trivial] +5C0081 0081 [Trivial] +5C009F 009F [Trivial] +5C00A0 00A0 [Trivial] +5C00A1 00A1 [Trivial] +5C00E0 00E0 [Trivial] +5C00EF 00EF [Trivial] +5C00F9 00F9 [Trivial] +5C00FA 00FA [Trivial] +5C00FC 00FC [Trivial] +5C00FD 00FD [Trivial] +5C00FE 00FE [Trivial] +5C00FF 00FF [Trivial] +5C0800 0800 [Trivial] +5C0808 0808 [Trivial] +5C0809 0809 [Trivial] +5C080A 080A [Trivial] +5C080D 080D [Trivial] +5C081A 081A [Trivial] +5C0822 0822 [Trivial] +5C0825 0825 [Trivial] +5C0827 NULL [SyntErr] +5C0830 0830 [Trivial] +5C083F 083F [Trivial] +5C0840 0840 [Trivial] +5C085A 085A [Trivial] +5C085C NULL [SyntErr] +5C085F 085F [Trivial] +5C0861 0861 [Trivial] +5C0862 0862 [Trivial] +5C086E 086E [Trivial] +5C0872 0872 [Trivial] +5C0874 0874 [Trivial] +5C087E 087E [Trivial] +5C087F 087F [Trivial] +5C0880 0880 [Trivial] +5C0881 0881 [Trivial] +5C089F 089F [Trivial] +5C08A0 08A0 [Trivial] +5C08A1 08A1 [Trivial] +5C08E0 08E0 [Trivial] +5C08EF 08EF [Trivial] +5C08F9 08F9 [Trivial] +5C08FA 08FA [Trivial] +5C08FC 08FC [Trivial] +5C08FD 08FD [Trivial] +5C08FE 08FE [Trivial] +5C08FF 08FF [Trivial] +5C0900 0900 [Trivial] +5C0908 0908 [Trivial] +5C0909 0909 [Trivial] +5C090A 090A [Trivial] +5C090D 090D [Trivial] +5C091A 091A [Trivial] +5C0922 0922 [Trivial] +5C0925 0925 [Trivial] +5C0927 NULL [SyntErr] +5C0930 0930 [Trivial] +5C093F 093F [Trivial] +5C0940 0940 [Trivial] +5C095A 095A [Trivial] +5C095C NULL [SyntErr] +5C095F 095F [Trivial] +5C0961 0961 [Trivial] +5C0962 0962 [Trivial] +5C096E 096E [Trivial] +5C0972 0972 [Trivial] +5C0974 0974 [Trivial] +5C097E 097E [Trivial] +5C097F 097F [Trivial] +5C0980 0980 [Trivial] +5C0981 0981 [Trivial] +5C099F 099F [Trivial] +5C09A0 09A0 [Trivial] +5C09A1 09A1 [Trivial] +5C09E0 09E0 [Trivial] +5C09EF 09EF [Trivial] +5C09F9 09F9 [Trivial] +5C09FA 09FA [Trivial] +5C09FC 09FC [Trivial] +5C09FD 09FD [Trivial] +5C09FE 09FE [Trivial] +5C09FF 09FF [Trivial] +5C0A00 0A00 [Trivial] +5C0A08 0A08 [Trivial] +5C0A09 0A09 [Trivial] +5C0A0A 0A0A [Trivial] +5C0A0D 0A0D [Trivial] +5C0A1A 0A1A [Trivial] +5C0A22 0A22 [Trivial] +5C0A25 0A25 [Trivial] +5C0A27 NULL [SyntErr] +5C0A30 0A30 [Trivial] +5C0A3F 0A3F [Trivial] +5C0A40 0A40 [Trivial] +5C0A5A 0A5A [Trivial] +5C0A5C NULL [SyntErr] +5C0A5F 0A5F [Trivial] +5C0A61 0A61 [Trivial] +5C0A62 0A62 [Trivial] +5C0A6E 0A6E [Trivial] +5C0A72 0A72 [Trivial] +5C0A74 0A74 [Trivial] +5C0A7E 0A7E [Trivial] +5C0A7F 0A7F [Trivial] +5C0A80 0A80 [Trivial] +5C0A81 0A81 [Trivial] +5C0A9F 0A9F [Trivial] +5C0AA0 0AA0 [Trivial] +5C0AA1 0AA1 [Trivial] +5C0AE0 0AE0 [Trivial] +5C0AEF 0AEF [Trivial] +5C0AF9 0AF9 [Trivial] +5C0AFA 0AFA [Trivial] +5C0AFC 0AFC [Trivial] +5C0AFD 0AFD [Trivial] +5C0AFE 0AFE [Trivial] +5C0AFF 0AFF [Trivial] +5C0D00 0D00 [Trivial] +5C0D08 0D08 [Trivial] +5C0D09 0D09 [Trivial] +5C0D0A 0D0A [Trivial] +5C0D0D 0D0D [Trivial] +5C0D1A 0D1A [Trivial] +5C0D22 0D22 [Trivial] +5C0D25 0D25 [Trivial] +5C0D27 NULL [SyntErr] +5C0D30 0D30 [Trivial] +5C0D3F 0D3F [Trivial] +5C0D40 0D40 [Trivial] +5C0D5A 0D5A [Trivial] +5C0D5C NULL [SyntErr] +5C0D5F 0D5F [Trivial] +5C0D61 0D61 [Trivial] +5C0D62 0D62 [Trivial] +5C0D6E 0D6E [Trivial] +5C0D72 0D72 [Trivial] +5C0D74 0D74 [Trivial] +5C0D7E 0D7E [Trivial] +5C0D7F 0D7F [Trivial] +5C0D80 0D80 [Trivial] +5C0D81 0D81 [Trivial] +5C0D9F 0D9F [Trivial] +5C0DA0 0DA0 [Trivial] +5C0DA1 0DA1 [Trivial] +5C0DE0 0DE0 [Trivial] +5C0DEF 0DEF [Trivial] +5C0DF9 0DF9 [Trivial] +5C0DFA 0DFA [Trivial] +5C0DFC 0DFC [Trivial] +5C0DFD 0DFD [Trivial] +5C0DFE 0DFE [Trivial] +5C0DFF 0DFF [Trivial] +5C1A00 1A00 [Trivial] +5C1A08 1A08 [Trivial] +5C1A09 1A09 [Trivial] +5C1A0A 1A0A [Trivial] +5C1A0D 1A0D [Trivial] +5C1A1A 1A1A [Trivial] +5C1A22 1A22 [Trivial] +5C1A25 1A25 [Trivial] +5C1A27 NULL [SyntErr] +5C1A30 1A30 [Trivial] +5C1A3F 1A3F [Trivial] +5C1A40 1A40 [Trivial] +5C1A5A 1A5A [Trivial] +5C1A5C NULL [SyntErr] +5C1A5F 1A5F [Trivial] +5C1A61 1A61 [Trivial] +5C1A62 1A62 [Trivial] +5C1A6E 1A6E [Trivial] +5C1A72 1A72 [Trivial] +5C1A74 1A74 [Trivial] +5C1A7E 1A7E [Trivial] +5C1A7F 1A7F [Trivial] +5C1A80 1A80 [Trivial] +5C1A81 1A81 [Trivial] +5C1A9F 1A9F [Trivial] +5C1AA0 1AA0 [Trivial] +5C1AA1 1AA1 [Trivial] +5C1AE0 1AE0 [Trivial] +5C1AEF 1AEF [Trivial] +5C1AF9 1AF9 [Trivial] +5C1AFA 1AFA [Trivial] +5C1AFC 1AFC [Trivial] +5C1AFD 1AFD [Trivial] +5C1AFE 1AFE [Trivial] +5C1AFF 1AFF [Trivial] +5C2200 2200 [Trivial] +5C2208 2208 [Trivial] +5C2209 2209 [Trivial] +5C220A 220A [Trivial] +5C220D 220D [Trivial] +5C221A 221A [Trivial] +5C2222 2222 [Trivial] +5C2225 2225 [Trivial] +5C2227 NULL [SyntErr] +5C2230 2230 [Trivial] +5C223F 223F [Trivial] +5C2240 2240 [Trivial] +5C225A 225A [Trivial] +5C225C NULL [SyntErr] +5C225F 225F [Trivial] +5C2261 2261 [Trivial] +5C2262 2262 [Trivial] +5C226E 226E [Trivial] +5C2272 2272 [Trivial] +5C2274 2274 [Trivial] +5C227E 227E [Trivial] +5C227F 227F [Trivial] +5C2280 2280 [Trivial] +5C2281 2281 [Trivial] +5C229F 229F [Trivial] +5C22A0 22A0 [Trivial] +5C22A1 22A1 [Trivial] +5C22E0 22E0 [Trivial] +5C22EF 22EF [Trivial] +5C22F9 22F9 [Trivial] +5C22FA 22FA [Trivial] +5C22FC 22FC [Trivial] +5C22FD 22FD [Trivial] +5C22FE 22FE [Trivial] +5C22FF 22FF [Trivial] +5C2500 5C2500 [Preserve][LIKE] +5C2508 5C2508 [Preserve][LIKE] +5C2509 5C2509 [Preserve][LIKE] +5C250A 5C250A [Preserve][LIKE] +5C250D 5C250D [Preserve][LIKE] +5C251A 5C251A [Preserve][LIKE] +5C2522 5C2522 [Preserve][LIKE] +5C2525 5C2525 [Preserve][LIKE] +5C2527 NULL [SyntErr] +5C2530 5C2530 [Preserve][LIKE] +5C253F 5C253F [Preserve][LIKE] +5C2540 5C2540 [Preserve][LIKE] +5C255A 5C255A [Preserve][LIKE] +5C255C NULL [SyntErr] +5C255F 5C255F [Preserve][LIKE] +5C2561 5C2561 [Preserve][LIKE] +5C2562 5C2562 [Preserve][LIKE] +5C256E 5C256E [Preserve][LIKE] +5C2572 5C2572 [Preserve][LIKE] +5C2574 5C2574 [Preserve][LIKE] +5C257E 5C257E [Preserve][LIKE] +5C257F 5C257F [Preserve][LIKE] +5C2580 5C2580 [Preserve][LIKE] +5C2581 5C2581 [Preserve][LIKE] +5C259F 5C259F [Preserve][LIKE] +5C25A0 5C25A0 [Preserve][LIKE] +5C25A1 5C25A1 [Preserve][LIKE] +5C25E0 5C25E0 [Preserve][LIKE] +5C25EF 5C25EF [Preserve][LIKE] +5C25F9 5C25F9 [Preserve][LIKE] +5C25FA 5C25FA [Preserve][LIKE] +5C25FC 5C25FC [Preserve][LIKE] +5C25FD 5C25FD [Preserve][LIKE] +5C25FE 5C25FE [Preserve][LIKE] +5C25FF 5C25FF [Preserve][LIKE] +5C2700 2700 [Trivial] +5C2708 2708 [Trivial] +5C2709 2709 [Trivial] +5C270A 270A [Trivial] +5C270D 270D [Trivial] +5C271A 271A [Trivial] +5C2722 2722 [Trivial] +5C2725 2725 [Trivial] +5C2727 NULL [SyntErr] +5C2730 2730 [Trivial] +5C273F 273F [Trivial] +5C2740 2740 [Trivial] +5C275A 275A [Trivial] +5C275C NULL [SyntErr] +5C275F 275F [Trivial] +5C2761 2761 [Trivial] +5C2762 2762 [Trivial] +5C276E 276E [Trivial] +5C2772 2772 [Trivial] +5C2774 2774 [Trivial] +5C277E 277E [Trivial] +5C277F 277F [Trivial] +5C2780 2780 [Trivial] +5C2781 2781 [Trivial] +5C279F 279F [Trivial] +5C27A0 27A0 [Trivial] +5C27A1 27A1 [Trivial] +5C27E0 27E0 [Trivial] +5C27EF 27EF [Trivial] +5C27F9 27F9 [Trivial] +5C27FA 27FA [Trivial] +5C27FC 27FC [Trivial] +5C27FD 27FD [Trivial] +5C27FE 27FE [Trivial] +5C27FF 27FF [Trivial] +5C3000 0000 [Regular] +5C3008 0008 [Regular] +5C3009 0009 [Regular] +5C300A 000A [Regular] +5C300D 000D [Regular] +5C301A 001A [Regular] +5C3022 0022 [Regular] +5C3025 0025 [Regular] +5C3027 NULL [SyntErr] +5C3030 0030 [Regular] +5C303F 003F [Regular] +5C3040 0040 [Regular] +5C305A 005A [Regular] +5C305C NULL [SyntErr] +5C305F 005F [Regular] +5C3061 0061 [Regular] +5C3062 0062 [Regular] +5C306E 006E [Regular] +5C3072 0072 [Regular] +5C3074 0074 [Regular] +5C307E 007E [Regular] +5C307F 007F [Regular] +5C3080 0080 [Regular] +5C3081 0081 [Regular] +5C309F 009F [Regular] +5C30A0 00A0 [Regular] +5C30A1 00A1 [Regular] +5C30E0 00E0 [Regular] +5C30EF 00EF [Regular] +5C30F9 00F9 [Regular] +5C30FA 00FA [Regular] +5C30FC 00FC [Regular] +5C30FD 00FD [Regular] +5C30FE 00FE [Regular] +5C30FF 00FF [Regular] +5C3F00 3F00 [Trivial] +5C3F08 3F08 [Trivial] +5C3F09 3F09 [Trivial] +5C3F0A 3F0A [Trivial] +5C3F0D 3F0D [Trivial] +5C3F1A 3F1A [Trivial] +5C3F22 3F22 [Trivial] +5C3F25 3F25 [Trivial] +5C3F27 NULL [SyntErr] +5C3F30 3F30 [Trivial] +5C3F3F 3F3F [Trivial] +5C3F40 3F40 [Trivial] +5C3F5A 3F5A [Trivial] +5C3F5C NULL [SyntErr] +5C3F5F 3F5F [Trivial] +5C3F61 3F61 [Trivial] +5C3F62 3F62 [Trivial] +5C3F6E 3F6E [Trivial] +5C3F72 3F72 [Trivial] +5C3F74 3F74 [Trivial] +5C3F7E 3F7E [Trivial] +5C3F7F 3F7F [Trivial] +5C3F80 3F80 [Trivial] +5C3F81 3F81 [Trivial] +5C3F9F 3F9F [Trivial] +5C3FA0 3FA0 [Trivial] +5C3FA1 3FA1 [Trivial] +5C3FE0 3FE0 [Trivial] +5C3FEF 3FEF [Trivial] +5C3FF9 3FF9 [Trivial] +5C3FFA 3FFA [Trivial] +5C3FFC 3FFC [Trivial] +5C3FFD 3FFD [Trivial] +5C3FFE 3FFE [Trivial] +5C3FFF 3FFF [Trivial] +5C4000 4000 [Trivial] +5C4008 4008 [Trivial] +5C4009 4009 [Trivial] +5C400A 400A [Trivial] +5C400D 400D [Trivial] +5C401A 401A [Trivial] +5C4022 4022 [Trivial] +5C4025 4025 [Trivial] +5C4027 NULL [SyntErr] +5C4030 4030 [Trivial] +5C403F 403F [Trivial] +5C4040 4040 [Trivial] +5C405A 405A [Trivial] +5C405C NULL [SyntErr] +5C405F 405F [Trivial] +5C4061 4061 [Trivial] +5C4062 4062 [Trivial] +5C406E 406E [Trivial] +5C4072 4072 [Trivial] +5C4074 4074 [Trivial] +5C407E 407E [Trivial] +5C407F 407F [Trivial] +5C4080 4080 [Trivial] +5C4081 4081 [Trivial] +5C409F 409F [Trivial] +5C40A0 40A0 [Trivial] +5C40A1 40A1 [Trivial] +5C40E0 40E0 [Trivial] +5C40EF 40EF [Trivial] +5C40F9 40F9 [Trivial] +5C40FA 40FA [Trivial] +5C40FC 40FC [Trivial] +5C40FD 40FD [Trivial] +5C40FE 40FE [Trivial] +5C40FF 40FF [Trivial] +5C5A00 1A00 [Regular] +5C5A08 1A08 [Regular] +5C5A09 1A09 [Regular] +5C5A0A 1A0A [Regular] +5C5A0D 1A0D [Regular] +5C5A1A 1A1A [Regular] +5C5A22 1A22 [Regular] +5C5A25 1A25 [Regular] +5C5A27 NULL [SyntErr] +5C5A30 1A30 [Regular] +5C5A3F 1A3F [Regular] +5C5A40 1A40 [Regular] +5C5A5A 1A5A [Regular] +5C5A5C NULL [SyntErr] +5C5A5F 1A5F [Regular] +5C5A61 1A61 [Regular] +5C5A62 1A62 [Regular] +5C5A6E 1A6E [Regular] +5C5A72 1A72 [Regular] +5C5A74 1A74 [Regular] +5C5A7E 1A7E [Regular] +5C5A7F 1A7F [Regular] +5C5A80 1A80 [Regular] +5C5A81 1A81 [Regular] +5C5A9F 1A9F [Regular] +5C5AA0 1AA0 [Regular] +5C5AA1 1AA1 [Regular] +5C5AE0 1AE0 [Regular] +5C5AEF 1AEF [Regular] +5C5AF9 1AF9 [Regular] +5C5AFA 1AFA [Regular] +5C5AFC 1AFC [Regular] +5C5AFD 1AFD [Regular] +5C5AFE 1AFE [Regular] +5C5AFF 1AFF [Regular] +5C5C00 5C00 [Regular] +5C5C08 5C08 [Regular] +5C5C09 5C09 [Regular] +5C5C0A 5C0A [Regular] +5C5C0D 5C0D [Regular] +5C5C1A 5C1A [Regular] +5C5C22 5C22 [Regular] +5C5C25 5C25 [Regular] +5C5C27 NULL [SyntErr] +5C5C30 5C30 [Regular] +5C5C3F 5C3F [Regular] +5C5C40 5C40 [Regular] +5C5C5A 5C5A [Regular] +5C5C5C NULL [SyntErr] +5C5C5F 5C5F [Regular] +5C5C61 5C61 [Regular] +5C5C62 5C62 [Regular] +5C5C6E 5C6E [Regular] +5C5C72 5C72 [Regular] +5C5C74 5C74 [Regular] +5C5C7E 5C7E [Regular] +5C5C7F 5C7F [Regular] +5C5C80 5C80 [Regular] +5C5C81 5C81 [Regular] +5C5C9F 5C9F [Regular] +5C5CA0 5CA0 [Regular] +5C5CA1 5CA1 [Regular] +5C5CE0 5CE0 [Regular] +5C5CEF 5CEF [Regular] +5C5CF9 5CF9 [Regular] +5C5CFA 5CFA [Regular] +5C5CFC 5CFC [Regular] +5C5CFD 5CFD [Regular] +5C5CFE 5CFE [Regular] +5C5CFF 5CFF [Regular] +5C5F00 5C5F00 [Preserve][LIKE] +5C5F08 5C5F08 [Preserve][LIKE] +5C5F09 5C5F09 [Preserve][LIKE] +5C5F0A 5C5F0A [Preserve][LIKE] +5C5F0D 5C5F0D [Preserve][LIKE] +5C5F1A 5C5F1A [Preserve][LIKE] +5C5F22 5C5F22 [Preserve][LIKE] +5C5F25 5C5F25 [Preserve][LIKE] +5C5F27 NULL [SyntErr] +5C5F30 5C5F30 [Preserve][LIKE] +5C5F3F 5C5F3F [Preserve][LIKE] +5C5F40 5C5F40 [Preserve][LIKE] +5C5F5A 5C5F5A [Preserve][LIKE] +5C5F5C NULL [SyntErr] +5C5F5F 5C5F5F [Preserve][LIKE] +5C5F61 5C5F61 [Preserve][LIKE] +5C5F62 5C5F62 [Preserve][LIKE] +5C5F6E 5C5F6E [Preserve][LIKE] +5C5F72 5C5F72 [Preserve][LIKE] +5C5F74 5C5F74 [Preserve][LIKE] +5C5F7E 5C5F7E [Preserve][LIKE] +5C5F7F 5C5F7F [Preserve][LIKE] +5C5F80 5C5F80 [Preserve][LIKE] +5C5F81 5C5F81 [Preserve][LIKE] +5C5F9F 5C5F9F [Preserve][LIKE] +5C5FA0 5C5FA0 [Preserve][LIKE] +5C5FA1 5C5FA1 [Preserve][LIKE] +5C5FE0 5C5FE0 [Preserve][LIKE] +5C5FEF 5C5FEF [Preserve][LIKE] +5C5FF9 5C5FF9 [Preserve][LIKE] +5C5FFA 5C5FFA [Preserve][LIKE] +5C5FFC 5C5FFC [Preserve][LIKE] +5C5FFD 5C5FFD [Preserve][LIKE] +5C5FFE 5C5FFE [Preserve][LIKE] +5C5FFF 5C5FFF [Preserve][LIKE] +5C6100 6100 [Trivial] +5C6108 6108 [Trivial] +5C6109 6109 [Trivial] +5C610A 610A [Trivial] +5C610D 610D [Trivial] +5C611A 611A [Trivial] +5C6122 6122 [Trivial] +5C6125 6125 [Trivial] +5C6127 NULL [SyntErr] +5C6130 6130 [Trivial] +5C613F 613F [Trivial] +5C6140 6140 [Trivial] +5C615A 615A [Trivial] +5C615C NULL [SyntErr] +5C615F 615F [Trivial] +5C6161 6161 [Trivial] +5C6162 6162 [Trivial] +5C616E 616E [Trivial] +5C6172 6172 [Trivial] +5C6174 6174 [Trivial] +5C617E 617E [Trivial] +5C617F 617F [Trivial] +5C6180 6180 [Trivial] +5C6181 6181 [Trivial] +5C619F 619F [Trivial] +5C61A0 61A0 [Trivial] +5C61A1 61A1 [Trivial] +5C61E0 61E0 [Trivial] +5C61EF 61EF [Trivial] +5C61F9 61F9 [Trivial] +5C61FA 61FA [Trivial] +5C61FC 61FC [Trivial] +5C61FD 61FD [Trivial] +5C61FE 61FE [Trivial] +5C61FF 61FF [Trivial] +5C6200 0800 [Regular] +5C6208 0808 [Regular] +5C6209 0809 [Regular] +5C620A 080A [Regular] +5C620D 080D [Regular] +5C621A 081A [Regular] +5C6222 0822 [Regular] +5C6225 0825 [Regular] +5C6227 NULL [SyntErr] +5C6230 0830 [Regular] +5C623F 083F [Regular] +5C6240 0840 [Regular] +5C625A 085A [Regular] +5C625C NULL [SyntErr] +5C625F 085F [Regular] +5C6261 0861 [Regular] +5C6262 0862 [Regular] +5C626E 086E [Regular] +5C6272 0872 [Regular] +5C6274 0874 [Regular] +5C627E 087E [Regular] +5C627F 087F [Regular] +5C6280 0880 [Regular] +5C6281 0881 [Regular] +5C629F 089F [Regular] +5C62A0 08A0 [Regular] +5C62A1 08A1 [Regular] +5C62E0 08E0 [Regular] +5C62EF 08EF [Regular] +5C62F9 08F9 [Regular] +5C62FA 08FA [Regular] +5C62FC 08FC [Regular] +5C62FD 08FD [Regular] +5C62FE 08FE [Regular] +5C62FF 08FF [Regular] +5C6E00 0A00 [Regular] +5C6E08 0A08 [Regular] +5C6E09 0A09 [Regular] +5C6E0A 0A0A [Regular] +5C6E0D 0A0D [Regular] +5C6E1A 0A1A [Regular] +5C6E22 0A22 [Regular] +5C6E25 0A25 [Regular] +5C6E27 NULL [SyntErr] +5C6E30 0A30 [Regular] +5C6E3F 0A3F [Regular] +5C6E40 0A40 [Regular] +5C6E5A 0A5A [Regular] +5C6E5C NULL [SyntErr] +5C6E5F 0A5F [Regular] +5C6E61 0A61 [Regular] +5C6E62 0A62 [Regular] +5C6E6E 0A6E [Regular] +5C6E72 0A72 [Regular] +5C6E74 0A74 [Regular] +5C6E7E 0A7E [Regular] +5C6E7F 0A7F [Regular] +5C6E80 0A80 [Regular] +5C6E81 0A81 [Regular] +5C6E9F 0A9F [Regular] +5C6EA0 0AA0 [Regular] +5C6EA1 0AA1 [Regular] +5C6EE0 0AE0 [Regular] +5C6EEF 0AEF [Regular] +5C6EF9 0AF9 [Regular] +5C6EFA 0AFA [Regular] +5C6EFC 0AFC [Regular] +5C6EFD 0AFD [Regular] +5C6EFE 0AFE [Regular] +5C6EFF 0AFF [Regular] +5C7200 0D00 [Regular] +5C7208 0D08 [Regular] +5C7209 0D09 [Regular] +5C720A 0D0A [Regular] +5C720D 0D0D [Regular] +5C721A 0D1A [Regular] +5C7222 0D22 [Regular] +5C7225 0D25 [Regular] +5C7227 NULL [SyntErr] +5C7230 0D30 [Regular] +5C723F 0D3F [Regular] +5C7240 0D40 [Regular] +5C725A 0D5A [Regular] +5C725C NULL [SyntErr] +5C725F 0D5F [Regular] +5C7261 0D61 [Regular] +5C7262 0D62 [Regular] +5C726E 0D6E [Regular] +5C7272 0D72 [Regular] +5C7274 0D74 [Regular] +5C727E 0D7E [Regular] +5C727F 0D7F [Regular] +5C7280 0D80 [Regular] +5C7281 0D81 [Regular] +5C729F 0D9F [Regular] +5C72A0 0DA0 [Regular] +5C72A1 0DA1 [Regular] +5C72E0 0DE0 [Regular] +5C72EF 0DEF [Regular] +5C72F9 0DF9 [Regular] +5C72FA 0DFA [Regular] +5C72FC 0DFC [Regular] +5C72FD 0DFD [Regular] +5C72FE 0DFE [Regular] +5C72FF 0DFF [Regular] +5C7400 0900 [Regular] +5C7408 0908 [Regular] +5C7409 0909 [Regular] +5C740A 090A [Regular] +5C740D 090D [Regular] +5C741A 091A [Regular] +5C7422 0922 [Regular] +5C7425 0925 [Regular] +5C7427 NULL [SyntErr] +5C7430 0930 [Regular] +5C743F 093F [Regular] +5C7440 0940 [Regular] +5C745A 095A [Regular] +5C745C NULL [SyntErr] +5C745F 095F [Regular] +5C7461 0961 [Regular] +5C7462 0962 [Regular] +5C746E 096E [Regular] +5C7472 0972 [Regular] +5C7474 0974 [Regular] +5C747E 097E [Regular] +5C747F 097F [Regular] +5C7480 0980 [Regular] +5C7481 0981 [Regular] +5C749F 099F [Regular] +5C74A0 09A0 [Regular] +5C74A1 09A1 [Regular] +5C74E0 09E0 [Regular] +5C74EF 09EF [Regular] +5C74F9 09F9 [Regular] +5C74FA 09FA [Regular] +5C74FC 09FC [Regular] +5C74FD 09FD [Regular] +5C74FE 09FE [Regular] +5C74FF 09FF [Regular] +5C7E00 7E00 [Trivial] +5C7E08 7E08 [Trivial] +5C7E09 7E09 [Trivial] +5C7E0A 7E0A [Trivial] +5C7E0D 7E0D [Trivial] +5C7E1A 7E1A [Trivial] +5C7E22 7E22 [Trivial] +5C7E25 7E25 [Trivial] +5C7E27 NULL [SyntErr] +5C7E30 7E30 [Trivial] +5C7E3F 7E3F [Trivial] +5C7E40 7E40 [Trivial] +5C7E5A 7E5A [Trivial] +5C7E5C NULL [SyntErr] +5C7E5F 7E5F [Trivial] +5C7E61 7E61 [Trivial] +5C7E62 7E62 [Trivial] +5C7E6E 7E6E [Trivial] +5C7E72 7E72 [Trivial] +5C7E74 7E74 [Trivial] +5C7E7E 7E7E [Trivial] +5C7E7F 7E7F [Trivial] +5C7E80 7E80 [Trivial] +5C7E81 7E81 [Trivial] +5C7E9F 7E9F [Trivial] +5C7EA0 7EA0 [Trivial] +5C7EA1 7EA1 [Trivial] +5C7EE0 7EE0 [Trivial] +5C7EEF 7EEF [Trivial] +5C7EF9 7EF9 [Trivial] +5C7EFA 7EFA [Trivial] +5C7EFC 7EFC [Trivial] +5C7EFD 7EFD [Trivial] +5C7EFE 7EFE [Trivial] +5C7EFF 7EFF [Trivial] +5C7F00 7F00 [Trivial] +5C7F08 7F08 [Trivial] +5C7F09 7F09 [Trivial] +5C7F0A 7F0A [Trivial] +5C7F0D 7F0D [Trivial] +5C7F1A 7F1A [Trivial] +5C7F22 7F22 [Trivial] +5C7F25 7F25 [Trivial] +5C7F27 NULL [SyntErr] +5C7F30 7F30 [Trivial] +5C7F3F 7F3F [Trivial] +5C7F40 7F40 [Trivial] +5C7F5A 7F5A [Trivial] +5C7F5C NULL [SyntErr] +5C7F5F 7F5F [Trivial] +5C7F61 7F61 [Trivial] +5C7F62 7F62 [Trivial] +5C7F6E 7F6E [Trivial] +5C7F72 7F72 [Trivial] +5C7F74 7F74 [Trivial] +5C7F7E 7F7E [Trivial] +5C7F7F 7F7F [Trivial] +5C7F80 7F80 [Trivial] +5C7F81 7F81 [Trivial] +5C7F9F 7F9F [Trivial] +5C7FA0 7FA0 [Trivial] +5C7FA1 7FA1 [Trivial] +5C7FE0 7FE0 [Trivial] +5C7FEF 7FEF [Trivial] +5C7FF9 7FF9 [Trivial] +5C7FFA 7FFA [Trivial] +5C7FFC 7FFC [Trivial] +5C7FFD 7FFD [Trivial] +5C7FFE 7FFE [Trivial] +5C7FFF 7FFF [Trivial] +5C8000 8000 [Trivial] +5C8008 8008 [Trivial] +5C8009 8009 [Trivial] +5C800A 800A [Trivial] +5C800D 800D [Trivial] +5C801A 801A [Trivial] +5C8022 8022 [Trivial] +5C8025 8025 [Trivial] +5C8027 NULL [SyntErr] +5C8030 8030 [Trivial] +5C803F 803F [Trivial] +5C8040 8040 [Trivial] +5C805A 805A [Trivial] +5C805C NULL [SyntErr][USER] +5C805F 805F [Trivial] +5C8061 8061 [Trivial] +5C8062 8062 [Trivial] +5C806E 806E [Trivial] +5C8072 8072 [Trivial] +5C8074 8074 [Trivial] +5C807E 807E [Trivial] +5C807F 807F [Trivial] +5C8080 8080 [Trivial] +5C8081 8081 [Trivial] +5C809F 809F [Trivial] +5C80A0 80A0 [Trivial] +5C80A1 80A1 [Trivial] +5C80E0 80E0 [Trivial] +5C80EF 80EF [Trivial] +5C80F9 80F9 [Trivial] +5C80FA 80FA [Trivial] +5C80FC 80FC [Trivial] +5C80FD 80FD [Trivial] +5C80FE 80FE [Trivial] +5C80FF 80FF [Trivial] +5C8100 8100 [Trivial] +5C8108 8108 [Trivial] +5C8109 8109 [Trivial] +5C810A 810A [Trivial] +5C810D 810D [Trivial] +5C811A 811A [Trivial] +5C8122 8122 [Trivial] +5C8125 8125 [Trivial] +5C8127 NULL [SyntErr] +5C8130 8130 [Trivial] +5C813F 813F [Trivial] +5C8140 8140 [Trivial] +5C815A 815A [Trivial] +5C815C NULL [SyntErr][USER] +5C815F 815F [Trivial] +5C8161 8161 [Trivial] +5C8162 8162 [Trivial] +5C816E 816E [Trivial] +5C8172 8172 [Trivial] +5C8174 8174 [Trivial] +5C817E 817E [Trivial] +5C817F 817F [Trivial] +5C8180 8180 [Trivial] +5C8181 8181 [Trivial] +5C819F 819F [Trivial] +5C81A0 81A0 [Trivial] +5C81A1 81A1 [Trivial] +5C81E0 81E0 [Trivial] +5C81EF 81EF [Trivial] +5C81F9 81F9 [Trivial] +5C81FA 81FA [Trivial] +5C81FC 81FC [Trivial] +5C81FD 81FD [Trivial] +5C81FE 81FE [Trivial] +5C81FF 81FF [Trivial] +5C9F00 9F00 [Trivial] +5C9F08 9F08 [Trivial] +5C9F09 9F09 [Trivial] +5C9F0A 9F0A [Trivial] +5C9F0D 9F0D [Trivial] +5C9F1A 9F1A [Trivial] +5C9F22 9F22 [Trivial] +5C9F25 9F25 [Trivial] +5C9F27 NULL [SyntErr] +5C9F30 9F30 [Trivial] +5C9F3F 9F3F [Trivial] +5C9F40 9F40 [Trivial] +5C9F5A 9F5A [Trivial] +5C9F5C NULL [SyntErr][USER] +5C9F5F 9F5F [Trivial] +5C9F61 9F61 [Trivial] +5C9F62 9F62 [Trivial] +5C9F6E 9F6E [Trivial] +5C9F72 9F72 [Trivial] +5C9F74 9F74 [Trivial] +5C9F7E 9F7E [Trivial] +5C9F7F 9F7F [Trivial] +5C9F80 9F80 [Trivial] +5C9F81 9F81 [Trivial] +5C9F9F 9F9F [Trivial] +5C9FA0 9FA0 [Trivial] +5C9FA1 9FA1 [Trivial] +5C9FE0 9FE0 [Trivial] +5C9FEF 9FEF [Trivial] +5C9FF9 9FF9 [Trivial] +5C9FFA 9FFA [Trivial] +5C9FFC 9FFC [Trivial] +5C9FFD 9FFD [Trivial] +5C9FFE 9FFE [Trivial] +5C9FFF 9FFF [Trivial] +5CA000 A000 [Trivial] +5CA008 A008 [Trivial] +5CA009 A009 [Trivial] +5CA00A A00A [Trivial] +5CA00D A00D [Trivial] +5CA01A A01A [Trivial] +5CA022 A022 [Trivial] +5CA025 A025 [Trivial] +5CA027 NULL [SyntErr] +5CA030 A030 [Trivial] +5CA03F A03F [Trivial] +5CA040 A040 [Trivial] +5CA05A A05A [Trivial] +5CA05C NULL [SyntErr][USER] +5CA05F A05F [Trivial] +5CA061 A061 [Trivial] +5CA062 A062 [Trivial] +5CA06E A06E [Trivial] +5CA072 A072 [Trivial] +5CA074 A074 [Trivial] +5CA07E A07E [Trivial] +5CA07F A07F [Trivial] +5CA080 A080 [Trivial] +5CA081 A081 [Trivial] +5CA09F A09F [Trivial] +5CA0A0 A0A0 [Trivial] +5CA0A1 A0A1 [Trivial] +5CA0E0 A0E0 [Trivial] +5CA0EF A0EF [Trivial] +5CA0F9 A0F9 [Trivial] +5CA0FA A0FA [Trivial] +5CA0FC A0FC [Trivial] +5CA0FD A0FD [Trivial] +5CA0FE A0FE [Trivial] +5CA0FF A0FF [Trivial] +5CA100 A100 [Trivial] +5CA108 A108 [Trivial] +5CA109 A109 [Trivial] +5CA10A A10A [Trivial] +5CA10D A10D [Trivial] +5CA11A A11A [Trivial] +5CA122 A122 [Trivial] +5CA125 A125 [Trivial] +5CA127 NULL [SyntErr] +5CA130 A130 [Trivial] +5CA13F A13F [Trivial] +5CA140 A140 [Trivial] +5CA15A A15A [Trivial] +5CA15C NULL [SyntErr][USER] +5CA15F A15F [Trivial] +5CA161 A161 [Trivial] +5CA162 A162 [Trivial] +5CA16E A16E [Trivial] +5CA172 A172 [Trivial] +5CA174 A174 [Trivial] +5CA17E A17E [Trivial] +5CA17F A17F [Trivial] +5CA180 A180 [Trivial] +5CA181 A181 [Trivial] +5CA19F A19F [Trivial] +5CA1A0 A1A0 [Trivial] +5CA1A1 A1A1 [Trivial] +5CA1E0 A1E0 [Trivial] +5CA1EF A1EF [Trivial] +5CA1F9 A1F9 [Trivial] +5CA1FA A1FA [Trivial] +5CA1FC A1FC [Trivial] +5CA1FD A1FD [Trivial] +5CA1FE A1FE [Trivial] +5CA1FF A1FF [Trivial] +5CE000 E000 [Trivial] +5CE008 E008 [Trivial] +5CE009 E009 [Trivial] +5CE00A E00A [Trivial] +5CE00D E00D [Trivial] +5CE01A E01A [Trivial] +5CE022 E022 [Trivial] +5CE025 E025 [Trivial] +5CE027 NULL [SyntErr] +5CE030 E030 [Trivial] +5CE03F E03F [Trivial] +5CE040 E040 [Trivial] +5CE05A E05A [Trivial] +5CE05C NULL [SyntErr][USER] +5CE05F E05F [Trivial] +5CE061 E061 [Trivial] +5CE062 E062 [Trivial] +5CE06E E06E [Trivial] +5CE072 E072 [Trivial] +5CE074 E074 [Trivial] +5CE07E E07E [Trivial] +5CE07F E07F [Trivial] +5CE080 E080 [Trivial] +5CE081 E081 [Trivial] +5CE09F E09F [Trivial] +5CE0A0 E0A0 [Trivial] +5CE0A1 E0A1 [Trivial] +5CE0E0 E0E0 [Trivial] +5CE0EF E0EF [Trivial] +5CE0F9 E0F9 [Trivial] +5CE0FA E0FA [Trivial] +5CE0FC E0FC [Trivial] +5CE0FD E0FD [Trivial] +5CE0FE E0FE [Trivial] +5CE0FF E0FF [Trivial] +5CEF00 EF00 [Trivial] +5CEF08 EF08 [Trivial] +5CEF09 EF09 [Trivial] +5CEF0A EF0A [Trivial] +5CEF0D EF0D [Trivial] +5CEF1A EF1A [Trivial] +5CEF22 EF22 [Trivial] +5CEF25 EF25 [Trivial] +5CEF27 NULL [SyntErr] +5CEF30 EF30 [Trivial] +5CEF3F EF3F [Trivial] +5CEF40 EF40 [Trivial] +5CEF5A EF5A [Trivial] +5CEF5C NULL [SyntErr][USER] +5CEF5F EF5F [Trivial] +5CEF61 EF61 [Trivial] +5CEF62 EF62 [Trivial] +5CEF6E EF6E [Trivial] +5CEF72 EF72 [Trivial] +5CEF74 EF74 [Trivial] +5CEF7E EF7E [Trivial] +5CEF7F EF7F [Trivial] +5CEF80 EF80 [Trivial] +5CEF81 EF81 [Trivial] +5CEF9F EF9F [Trivial] +5CEFA0 EFA0 [Trivial] +5CEFA1 EFA1 [Trivial] +5CEFE0 EFE0 [Trivial] +5CEFEF EFEF [Trivial] +5CEFF9 EFF9 [Trivial] +5CEFFA EFFA [Trivial] +5CEFFC EFFC [Trivial] +5CEFFD EFFD [Trivial] +5CEFFE EFFE [Trivial] +5CEFFF EFFF [Trivial] +5CF900 F900 [Trivial] +5CF908 F908 [Trivial] +5CF909 F909 [Trivial] +5CF90A F90A [Trivial] +5CF90D F90D [Trivial] +5CF91A F91A [Trivial] +5CF922 F922 [Trivial] +5CF925 F925 [Trivial] +5CF927 NULL [SyntErr] +5CF930 F930 [Trivial] +5CF93F F93F [Trivial] +5CF940 F940 [Trivial] +5CF95A F95A [Trivial] +5CF95C NULL [SyntErr][USER] +5CF95F F95F [Trivial] +5CF961 F961 [Trivial] +5CF962 F962 [Trivial] +5CF96E F96E [Trivial] +5CF972 F972 [Trivial] +5CF974 F974 [Trivial] +5CF97E F97E [Trivial] +5CF97F F97F [Trivial] +5CF980 F980 [Trivial] +5CF981 F981 [Trivial] +5CF99F F99F [Trivial] +5CF9A0 F9A0 [Trivial] +5CF9A1 F9A1 [Trivial] +5CF9E0 F9E0 [Trivial] +5CF9EF F9EF [Trivial] +5CF9F9 F9F9 [Trivial] +5CF9FA F9FA [Trivial] +5CF9FC F9FC [Trivial] +5CF9FD F9FD [Trivial] +5CF9FE F9FE [Trivial] +5CF9FF F9FF [Trivial] +5CFA00 FA00 [Trivial] +5CFA08 FA08 [Trivial] +5CFA09 FA09 [Trivial] +5CFA0A FA0A [Trivial] +5CFA0D FA0D [Trivial] +5CFA1A FA1A [Trivial] +5CFA22 FA22 [Trivial] +5CFA25 FA25 [Trivial] +5CFA27 NULL [SyntErr] +5CFA30 FA30 [Trivial] +5CFA3F FA3F [Trivial] +5CFA40 FA40 [Trivial] +5CFA5A FA5A [Trivial] +5CFA5C NULL [SyntErr][USER] +5CFA5F FA5F [Trivial] +5CFA61 FA61 [Trivial] +5CFA62 FA62 [Trivial] +5CFA6E FA6E [Trivial] +5CFA72 FA72 [Trivial] +5CFA74 FA74 [Trivial] +5CFA7E FA7E [Trivial] +5CFA7F FA7F [Trivial] +5CFA80 FA80 [Trivial] +5CFA81 FA81 [Trivial] +5CFA9F FA9F [Trivial] +5CFAA0 FAA0 [Trivial] +5CFAA1 FAA1 [Trivial] +5CFAE0 FAE0 [Trivial] +5CFAEF FAEF [Trivial] +5CFAF9 FAF9 [Trivial] +5CFAFA FAFA [Trivial] +5CFAFC FAFC [Trivial] +5CFAFD FAFD [Trivial] +5CFAFE FAFE [Trivial] +5CFAFF FAFF [Trivial] +5CFC00 FC00 [Trivial] +5CFC08 FC08 [Trivial] +5CFC09 FC09 [Trivial] +5CFC0A FC0A [Trivial] +5CFC0D FC0D [Trivial] +5CFC1A FC1A [Trivial] +5CFC22 FC22 [Trivial] +5CFC25 FC25 [Trivial] +5CFC27 NULL [SyntErr] +5CFC30 FC30 [Trivial] +5CFC3F FC3F [Trivial] +5CFC40 FC40 [Trivial] +5CFC5A FC5A [Trivial] +5CFC5C NULL [SyntErr][USER] +5CFC5F FC5F [Trivial] +5CFC61 FC61 [Trivial] +5CFC62 FC62 [Trivial] +5CFC6E FC6E [Trivial] +5CFC72 FC72 [Trivial] +5CFC74 FC74 [Trivial] +5CFC7E FC7E [Trivial] +5CFC7F FC7F [Trivial] +5CFC80 FC80 [Trivial] +5CFC81 FC81 [Trivial] +5CFC9F FC9F [Trivial] +5CFCA0 FCA0 [Trivial] +5CFCA1 FCA1 [Trivial] +5CFCE0 FCE0 [Trivial] +5CFCEF FCEF [Trivial] +5CFCF9 FCF9 [Trivial] +5CFCFA FCFA [Trivial] +5CFCFC FCFC [Trivial] +5CFCFD FCFD [Trivial] +5CFCFE FCFE [Trivial] +5CFCFF FCFF [Trivial] +5CFD00 FD00 [Trivial] +5CFD08 FD08 [Trivial] +5CFD09 FD09 [Trivial] +5CFD0A FD0A [Trivial] +5CFD0D FD0D [Trivial] +5CFD1A FD1A [Trivial] +5CFD22 FD22 [Trivial] +5CFD25 FD25 [Trivial] +5CFD27 NULL [SyntErr] +5CFD30 FD30 [Trivial] +5CFD3F FD3F [Trivial] +5CFD40 FD40 [Trivial] +5CFD5A FD5A [Trivial] +5CFD5C NULL [SyntErr][USER] +5CFD5F FD5F [Trivial] +5CFD61 FD61 [Trivial] +5CFD62 FD62 [Trivial] +5CFD6E FD6E [Trivial] +5CFD72 FD72 [Trivial] +5CFD74 FD74 [Trivial] +5CFD7E FD7E [Trivial] +5CFD7F FD7F [Trivial] +5CFD80 FD80 [Trivial] +5CFD81 FD81 [Trivial] +5CFD9F FD9F [Trivial] +5CFDA0 FDA0 [Trivial] +5CFDA1 FDA1 [Trivial] +5CFDE0 FDE0 [Trivial] +5CFDEF FDEF [Trivial] +5CFDF9 FDF9 [Trivial] +5CFDFA FDFA [Trivial] +5CFDFC FDFC [Trivial] +5CFDFD FDFD [Trivial] +5CFDFE FDFE [Trivial] +5CFDFF FDFF [Trivial] +5CFE00 FE00 [Trivial] +5CFE08 FE08 [Trivial] +5CFE09 FE09 [Trivial] +5CFE0A FE0A [Trivial] +5CFE0D FE0D [Trivial] +5CFE1A FE1A [Trivial] +5CFE22 FE22 [Trivial] +5CFE25 FE25 [Trivial] +5CFE27 NULL [SyntErr] +5CFE30 FE30 [Trivial] +5CFE3F FE3F [Trivial] +5CFE40 FE40 [Trivial] +5CFE5A FE5A [Trivial] +5CFE5C NULL [SyntErr][USER] +5CFE5F FE5F [Trivial] +5CFE61 FE61 [Trivial] +5CFE62 FE62 [Trivial] +5CFE6E FE6E [Trivial] +5CFE72 FE72 [Trivial] +5CFE74 FE74 [Trivial] +5CFE7E FE7E [Trivial] +5CFE7F FE7F [Trivial] +5CFE80 FE80 [Trivial] +5CFE81 FE81 [Trivial] +5CFE9F FE9F [Trivial] +5CFEA0 FEA0 [Trivial] +5CFEA1 FEA1 [Trivial] +5CFEE0 FEE0 [Trivial] +5CFEEF FEEF [Trivial] +5CFEF9 FEF9 [Trivial] +5CFEFA FEFA [Trivial] +5CFEFC FEFC [Trivial] +5CFEFD FEFD [Trivial] +5CFEFE FEFE [Trivial] +5CFEFF FEFF [Trivial] +5CFF00 FF00 [Trivial] +5CFF08 FF08 [Trivial] +5CFF09 FF09 [Trivial] +5CFF0A FF0A [Trivial] +5CFF0D FF0D [Trivial] +5CFF1A FF1A [Trivial] +5CFF22 FF22 [Trivial] +5CFF25 FF25 [Trivial] +5CFF27 NULL [SyntErr] +5CFF30 FF30 [Trivial] +5CFF3F FF3F [Trivial] +5CFF40 FF40 [Trivial] +5CFF5A FF5A [Trivial] +5CFF5C NULL [SyntErr][USER] +5CFF5F FF5F [Trivial] +5CFF61 FF61 [Trivial] +5CFF62 FF62 [Trivial] +5CFF6E FF6E [Trivial] +5CFF72 FF72 [Trivial] +5CFF74 FF74 [Trivial] +5CFF7E FF7E [Trivial] +5CFF7F FF7F [Trivial] +5CFF80 FF80 [Trivial] +5CFF81 FF81 [Trivial] +5CFF9F FF9F [Trivial] +5CFFA0 FFA0 [Trivial] +5CFFA1 FFA1 [Trivial] +5CFFE0 FFE0 [Trivial] +5CFFEF FFEF [Trivial] +5CFFF9 FFF9 [Trivial] +5CFFFA FFFA [Trivial] +5CFFFC FFFC [Trivial] +5CFFFD FFFD [Trivial] +5CFFFE FFFE [Trivial] +5CFFFF FFFF [Trivial] +5C005C00 0000 [Trivial] +5C005C08 0008 [Trivial] +5C005C09 0009 [Trivial] +5C005C0A 000A [Trivial] +5C005C0D 000D [Trivial] +5C005C1A 001A [Trivial] +5C005C22 0022 [Trivial] +5C005C25 005C25 [Regular] +5C005C27 0027 [Trivial] +5C005C30 0000 [Regular] +5C005C3F 003F [Trivial] +5C005C40 0040 [Trivial] +5C005C5A 001A [Regular] +5C005C5C 005C [Regular] +5C005C5F 005C5F [Regular] +5C005C61 0061 [Trivial] +5C005C62 0008 [Regular] +5C005C6E 000A [Regular] +5C005C72 000D [Regular] +5C005C74 0009 [Regular] +5C005C7E 007E [Trivial] +5C005C7F 007F [Trivial] +5C005C80 0080 [Trivial] +5C005C81 0081 [Trivial] +5C005C9F 009F [Trivial] +5C005CA0 00A0 [Trivial] +5C005CA1 00A1 [Trivial] +5C005CE0 00E0 [Trivial] +5C005CEF 00EF [Trivial] +5C005CF9 00F9 [Trivial] +5C005CFA 00FA [Trivial] +5C005CFC 00FC [Trivial] +5C005CFD 00FD [Trivial] +5C005CFE 00FE [Trivial] +5C005CFF 00FF [Trivial] +5C085C00 0800 [Trivial] +5C085C08 0808 [Trivial] +5C085C09 0809 [Trivial] +5C085C0A 080A [Trivial] +5C085C0D 080D [Trivial] +5C085C1A 081A [Trivial] +5C085C22 0822 [Trivial] +5C085C25 085C25 [Regular] +5C085C27 0827 [Trivial] +5C085C30 0800 [Regular] +5C085C3F 083F [Trivial] +5C085C40 0840 [Trivial] +5C085C5A 081A [Regular] +5C085C5C 085C [Regular] +5C085C5F 085C5F [Regular] +5C085C61 0861 [Trivial] +5C085C62 0808 [Regular] +5C085C6E 080A [Regular] +5C085C72 080D [Regular] +5C085C74 0809 [Regular] +5C085C7E 087E [Trivial] +5C085C7F 087F [Trivial] +5C085C80 0880 [Trivial] +5C085C81 0881 [Trivial] +5C085C9F 089F [Trivial] +5C085CA0 08A0 [Trivial] +5C085CA1 08A1 [Trivial] +5C085CE0 08E0 [Trivial] +5C085CEF 08EF [Trivial] +5C085CF9 08F9 [Trivial] +5C085CFA 08FA [Trivial] +5C085CFC 08FC [Trivial] +5C085CFD 08FD [Trivial] +5C085CFE 08FE [Trivial] +5C085CFF 08FF [Trivial] +5C095C00 0900 [Trivial] +5C095C08 0908 [Trivial] +5C095C09 0909 [Trivial] +5C095C0A 090A [Trivial] +5C095C0D 090D [Trivial] +5C095C1A 091A [Trivial] +5C095C22 0922 [Trivial] +5C095C25 095C25 [Regular] +5C095C27 0927 [Trivial] +5C095C30 0900 [Regular] +5C095C3F 093F [Trivial] +5C095C40 0940 [Trivial] +5C095C5A 091A [Regular] +5C095C5C 095C [Regular] +5C095C5F 095C5F [Regular] +5C095C61 0961 [Trivial] +5C095C62 0908 [Regular] +5C095C6E 090A [Regular] +5C095C72 090D [Regular] +5C095C74 0909 [Regular] +5C095C7E 097E [Trivial] +5C095C7F 097F [Trivial] +5C095C80 0980 [Trivial] +5C095C81 0981 [Trivial] +5C095C9F 099F [Trivial] +5C095CA0 09A0 [Trivial] +5C095CA1 09A1 [Trivial] +5C095CE0 09E0 [Trivial] +5C095CEF 09EF [Trivial] +5C095CF9 09F9 [Trivial] +5C095CFA 09FA [Trivial] +5C095CFC 09FC [Trivial] +5C095CFD 09FD [Trivial] +5C095CFE 09FE [Trivial] +5C095CFF 09FF [Trivial] +5C0A5C00 0A00 [Trivial] +5C0A5C08 0A08 [Trivial] +5C0A5C09 0A09 [Trivial] +5C0A5C0A 0A0A [Trivial] +5C0A5C0D 0A0D [Trivial] +5C0A5C1A 0A1A [Trivial] +5C0A5C22 0A22 [Trivial] +5C0A5C25 0A5C25 [Regular] +5C0A5C27 0A27 [Trivial] +5C0A5C30 0A00 [Regular] +5C0A5C3F 0A3F [Trivial] +5C0A5C40 0A40 [Trivial] +5C0A5C5A 0A1A [Regular] +5C0A5C5C 0A5C [Regular] +5C0A5C5F 0A5C5F [Regular] +5C0A5C61 0A61 [Trivial] +5C0A5C62 0A08 [Regular] +5C0A5C6E 0A0A [Regular] +5C0A5C72 0A0D [Regular] +5C0A5C74 0A09 [Regular] +5C0A5C7E 0A7E [Trivial] +5C0A5C7F 0A7F [Trivial] +5C0A5C80 0A80 [Trivial] +5C0A5C81 0A81 [Trivial] +5C0A5C9F 0A9F [Trivial] +5C0A5CA0 0AA0 [Trivial] +5C0A5CA1 0AA1 [Trivial] +5C0A5CE0 0AE0 [Trivial] +5C0A5CEF 0AEF [Trivial] +5C0A5CF9 0AF9 [Trivial] +5C0A5CFA 0AFA [Trivial] +5C0A5CFC 0AFC [Trivial] +5C0A5CFD 0AFD [Trivial] +5C0A5CFE 0AFE [Trivial] +5C0A5CFF 0AFF [Trivial] +5C0D5C00 0D00 [Trivial] +5C0D5C08 0D08 [Trivial] +5C0D5C09 0D09 [Trivial] +5C0D5C0A 0D0A [Trivial] +5C0D5C0D 0D0D [Trivial] +5C0D5C1A 0D1A [Trivial] +5C0D5C22 0D22 [Trivial] +5C0D5C25 0D5C25 [Regular] +5C0D5C27 0D27 [Trivial] +5C0D5C30 0D00 [Regular] +5C0D5C3F 0D3F [Trivial] +5C0D5C40 0D40 [Trivial] +5C0D5C5A 0D1A [Regular] +5C0D5C5C 0D5C [Regular] +5C0D5C5F 0D5C5F [Regular] +5C0D5C61 0D61 [Trivial] +5C0D5C62 0D08 [Regular] +5C0D5C6E 0D0A [Regular] +5C0D5C72 0D0D [Regular] +5C0D5C74 0D09 [Regular] +5C0D5C7E 0D7E [Trivial] +5C0D5C7F 0D7F [Trivial] +5C0D5C80 0D80 [Trivial] +5C0D5C81 0D81 [Trivial] +5C0D5C9F 0D9F [Trivial] +5C0D5CA0 0DA0 [Trivial] +5C0D5CA1 0DA1 [Trivial] +5C0D5CE0 0DE0 [Trivial] +5C0D5CEF 0DEF [Trivial] +5C0D5CF9 0DF9 [Trivial] +5C0D5CFA 0DFA [Trivial] +5C0D5CFC 0DFC [Trivial] +5C0D5CFD 0DFD [Trivial] +5C0D5CFE 0DFE [Trivial] +5C0D5CFF 0DFF [Trivial] +5C1A5C00 1A00 [Trivial] +5C1A5C08 1A08 [Trivial] +5C1A5C09 1A09 [Trivial] +5C1A5C0A 1A0A [Trivial] +5C1A5C0D 1A0D [Trivial] +5C1A5C1A 1A1A [Trivial] +5C1A5C22 1A22 [Trivial] +5C1A5C25 1A5C25 [Regular] +5C1A5C27 1A27 [Trivial] +5C1A5C30 1A00 [Regular] +5C1A5C3F 1A3F [Trivial] +5C1A5C40 1A40 [Trivial] +5C1A5C5A 1A1A [Regular] +5C1A5C5C 1A5C [Regular] +5C1A5C5F 1A5C5F [Regular] +5C1A5C61 1A61 [Trivial] +5C1A5C62 1A08 [Regular] +5C1A5C6E 1A0A [Regular] +5C1A5C72 1A0D [Regular] +5C1A5C74 1A09 [Regular] +5C1A5C7E 1A7E [Trivial] +5C1A5C7F 1A7F [Trivial] +5C1A5C80 1A80 [Trivial] +5C1A5C81 1A81 [Trivial] +5C1A5C9F 1A9F [Trivial] +5C1A5CA0 1AA0 [Trivial] +5C1A5CA1 1AA1 [Trivial] +5C1A5CE0 1AE0 [Trivial] +5C1A5CEF 1AEF [Trivial] +5C1A5CF9 1AF9 [Trivial] +5C1A5CFA 1AFA [Trivial] +5C1A5CFC 1AFC [Trivial] +5C1A5CFD 1AFD [Trivial] +5C1A5CFE 1AFE [Trivial] +5C1A5CFF 1AFF [Trivial] +5C225C00 2200 [Trivial] +5C225C08 2208 [Trivial] +5C225C09 2209 [Trivial] +5C225C0A 220A [Trivial] +5C225C0D 220D [Trivial] +5C225C1A 221A [Trivial] +5C225C22 2222 [Trivial] +5C225C25 225C25 [Regular] +5C225C27 2227 [Trivial] +5C225C30 2200 [Regular] +5C225C3F 223F [Trivial] +5C225C40 2240 [Trivial] +5C225C5A 221A [Regular] +5C225C5C 225C [Regular] +5C225C5F 225C5F [Regular] +5C225C61 2261 [Trivial] +5C225C62 2208 [Regular] +5C225C6E 220A [Regular] +5C225C72 220D [Regular] +5C225C74 2209 [Regular] +5C225C7E 227E [Trivial] +5C225C7F 227F [Trivial] +5C225C80 2280 [Trivial] +5C225C81 2281 [Trivial] +5C225C9F 229F [Trivial] +5C225CA0 22A0 [Trivial] +5C225CA1 22A1 [Trivial] +5C225CE0 22E0 [Trivial] +5C225CEF 22EF [Trivial] +5C225CF9 22F9 [Trivial] +5C225CFA 22FA [Trivial] +5C225CFC 22FC [Trivial] +5C225CFD 22FD [Trivial] +5C225CFE 22FE [Trivial] +5C225CFF 22FF [Trivial] +5C255C00 5C2500 [Regular] +5C255C08 5C2508 [Regular] +5C255C09 5C2509 [Regular] +5C255C0A 5C250A [Regular] +5C255C0D 5C250D [Regular] +5C255C1A 5C251A [Regular] +5C255C22 5C2522 [Regular] +5C255C25 5C255C25 [Preserve][LIKE] +5C255C27 5C2527 [Regular] +5C255C30 5C2500 [Regular] +5C255C3F 5C253F [Regular] +5C255C40 5C2540 [Regular] +5C255C5A 5C251A [Regular] +5C255C5C 5C255C [Regular] +5C255C5F 5C255C5F [Preserve][LIKE] +5C255C61 5C2561 [Regular] +5C255C62 5C2508 [Regular] +5C255C6E 5C250A [Regular] +5C255C72 5C250D [Regular] +5C255C74 5C2509 [Regular] +5C255C7E 5C257E [Regular] +5C255C7F 5C257F [Regular] +5C255C80 5C2580 [Regular] +5C255C81 5C2581 [Regular] +5C255C9F 5C259F [Regular] +5C255CA0 5C25A0 [Regular] +5C255CA1 5C25A1 [Regular] +5C255CE0 5C25E0 [Regular] +5C255CEF 5C25EF [Regular] +5C255CF9 5C25F9 [Regular] +5C255CFA 5C25FA [Regular] +5C255CFC 5C25FC [Regular] +5C255CFD 5C25FD [Regular] +5C255CFE 5C25FE [Regular] +5C255CFF 5C25FF [Regular] +5C275C00 2700 [Trivial] +5C275C08 2708 [Trivial] +5C275C09 2709 [Trivial] +5C275C0A 270A [Trivial] +5C275C0D 270D [Trivial] +5C275C1A 271A [Trivial] +5C275C22 2722 [Trivial] +5C275C25 275C25 [Regular] +5C275C27 2727 [Trivial] +5C275C30 2700 [Regular] +5C275C3F 273F [Trivial] +5C275C40 2740 [Trivial] +5C275C5A 271A [Regular] +5C275C5C 275C [Regular] +5C275C5F 275C5F [Regular] +5C275C61 2761 [Trivial] +5C275C62 2708 [Regular] +5C275C6E 270A [Regular] +5C275C72 270D [Regular] +5C275C74 2709 [Regular] +5C275C7E 277E [Trivial] +5C275C7F 277F [Trivial] +5C275C80 2780 [Trivial] +5C275C81 2781 [Trivial] +5C275C9F 279F [Trivial] +5C275CA0 27A0 [Trivial] +5C275CA1 27A1 [Trivial] +5C275CE0 27E0 [Trivial] +5C275CEF 27EF [Trivial] +5C275CF9 27F9 [Trivial] +5C275CFA 27FA [Trivial] +5C275CFC 27FC [Trivial] +5C275CFD 27FD [Trivial] +5C275CFE 27FE [Trivial] +5C275CFF 27FF [Trivial] +5C305C00 0000 [Regular] +5C305C08 0008 [Regular] +5C305C09 0009 [Regular] +5C305C0A 000A [Regular] +5C305C0D 000D [Regular] +5C305C1A 001A [Regular] +5C305C22 0022 [Regular] +5C305C25 005C25 [Regular] +5C305C27 0027 [Regular] +5C305C30 0000 [Regular] +5C305C3F 003F [Regular] +5C305C40 0040 [Regular] +5C305C5A 001A [Regular] +5C305C5C 005C [Regular] +5C305C5F 005C5F [Regular] +5C305C61 0061 [Regular] +5C305C62 0008 [Regular] +5C305C6E 000A [Regular] +5C305C72 000D [Regular] +5C305C74 0009 [Regular] +5C305C7E 007E [Regular] +5C305C7F 007F [Regular] +5C305C80 0080 [Regular] +5C305C81 0081 [Regular] +5C305C9F 009F [Regular] +5C305CA0 00A0 [Regular] +5C305CA1 00A1 [Regular] +5C305CE0 00E0 [Regular] +5C305CEF 00EF [Regular] +5C305CF9 00F9 [Regular] +5C305CFA 00FA [Regular] +5C305CFC 00FC [Regular] +5C305CFD 00FD [Regular] +5C305CFE 00FE [Regular] +5C305CFF 00FF [Regular] +5C3F5C00 3F00 [Trivial] +5C3F5C08 3F08 [Trivial] +5C3F5C09 3F09 [Trivial] +5C3F5C0A 3F0A [Trivial] +5C3F5C0D 3F0D [Trivial] +5C3F5C1A 3F1A [Trivial] +5C3F5C22 3F22 [Trivial] +5C3F5C25 3F5C25 [Regular] +5C3F5C27 3F27 [Trivial] +5C3F5C30 3F00 [Regular] +5C3F5C3F 3F3F [Trivial] +5C3F5C40 3F40 [Trivial] +5C3F5C5A 3F1A [Regular] +5C3F5C5C 3F5C [Regular] +5C3F5C5F 3F5C5F [Regular] +5C3F5C61 3F61 [Trivial] +5C3F5C62 3F08 [Regular] +5C3F5C6E 3F0A [Regular] +5C3F5C72 3F0D [Regular] +5C3F5C74 3F09 [Regular] +5C3F5C7E 3F7E [Trivial] +5C3F5C7F 3F7F [Trivial] +5C3F5C80 3F80 [Trivial] +5C3F5C81 3F81 [Trivial] +5C3F5C9F 3F9F [Trivial] +5C3F5CA0 3FA0 [Trivial] +5C3F5CA1 3FA1 [Trivial] +5C3F5CE0 3FE0 [Trivial] +5C3F5CEF 3FEF [Trivial] +5C3F5CF9 3FF9 [Trivial] +5C3F5CFA 3FFA [Trivial] +5C3F5CFC 3FFC [Trivial] +5C3F5CFD 3FFD [Trivial] +5C3F5CFE 3FFE [Trivial] +5C3F5CFF 3FFF [Trivial] +5C405C00 4000 [Trivial] +5C405C08 4008 [Trivial] +5C405C09 4009 [Trivial] +5C405C0A 400A [Trivial] +5C405C0D 400D [Trivial] +5C405C1A 401A [Trivial] +5C405C22 4022 [Trivial] +5C405C25 405C25 [Regular] +5C405C27 4027 [Trivial] +5C405C30 4000 [Regular] +5C405C3F 403F [Trivial] +5C405C40 4040 [Trivial] +5C405C5A 401A [Regular] +5C405C5C 405C [Regular] +5C405C5F 405C5F [Regular] +5C405C61 4061 [Trivial] +5C405C62 4008 [Regular] +5C405C6E 400A [Regular] +5C405C72 400D [Regular] +5C405C74 4009 [Regular] +5C405C7E 407E [Trivial] +5C405C7F 407F [Trivial] +5C405C80 4080 [Trivial] +5C405C81 4081 [Trivial] +5C405C9F 409F [Trivial] +5C405CA0 40A0 [Trivial] +5C405CA1 40A1 [Trivial] +5C405CE0 40E0 [Trivial] +5C405CEF 40EF [Trivial] +5C405CF9 40F9 [Trivial] +5C405CFA 40FA [Trivial] +5C405CFC 40FC [Trivial] +5C405CFD 40FD [Trivial] +5C405CFE 40FE [Trivial] +5C405CFF 40FF [Trivial] +5C5A5C00 1A00 [Regular] +5C5A5C08 1A08 [Regular] +5C5A5C09 1A09 [Regular] +5C5A5C0A 1A0A [Regular] +5C5A5C0D 1A0D [Regular] +5C5A5C1A 1A1A [Regular] +5C5A5C22 1A22 [Regular] +5C5A5C25 1A5C25 [Regular] +5C5A5C27 1A27 [Regular] +5C5A5C30 1A00 [Regular] +5C5A5C3F 1A3F [Regular] +5C5A5C40 1A40 [Regular] +5C5A5C5A 1A1A [Regular] +5C5A5C5C 1A5C [Regular] +5C5A5C5F 1A5C5F [Regular] +5C5A5C61 1A61 [Regular] +5C5A5C62 1A08 [Regular] +5C5A5C6E 1A0A [Regular] +5C5A5C72 1A0D [Regular] +5C5A5C74 1A09 [Regular] +5C5A5C7E 1A7E [Regular] +5C5A5C7F 1A7F [Regular] +5C5A5C80 1A80 [Regular] +5C5A5C81 1A81 [Regular] +5C5A5C9F 1A9F [Regular] +5C5A5CA0 1AA0 [Regular] +5C5A5CA1 1AA1 [Regular] +5C5A5CE0 1AE0 [Regular] +5C5A5CEF 1AEF [Regular] +5C5A5CF9 1AF9 [Regular] +5C5A5CFA 1AFA [Regular] +5C5A5CFC 1AFC [Regular] +5C5A5CFD 1AFD [Regular] +5C5A5CFE 1AFE [Regular] +5C5A5CFF 1AFF [Regular] +5C5C5C00 5C00 [Regular] +5C5C5C08 5C08 [Regular] +5C5C5C09 5C09 [Regular] +5C5C5C0A 5C0A [Regular] +5C5C5C0D 5C0D [Regular] +5C5C5C1A 5C1A [Regular] +5C5C5C22 5C22 [Regular] +5C5C5C25 5C5C25 [Regular] +5C5C5C27 5C27 [Regular] +5C5C5C30 5C00 [Regular] +5C5C5C3F 5C3F [Regular] +5C5C5C40 5C40 [Regular] +5C5C5C5A 5C1A [Regular] +5C5C5C5C 5C5C [Regular] +5C5C5C5F 5C5C5F [Regular] +5C5C5C61 5C61 [Regular] +5C5C5C62 5C08 [Regular] +5C5C5C6E 5C0A [Regular] +5C5C5C72 5C0D [Regular] +5C5C5C74 5C09 [Regular] +5C5C5C7E 5C7E [Regular] +5C5C5C7F 5C7F [Regular] +5C5C5C80 5C80 [Regular] +5C5C5C81 5C81 [Regular] +5C5C5C9F 5C9F [Regular] +5C5C5CA0 5CA0 [Regular] +5C5C5CA1 5CA1 [Regular] +5C5C5CE0 5CE0 [Regular] +5C5C5CEF 5CEF [Regular] +5C5C5CF9 5CF9 [Regular] +5C5C5CFA 5CFA [Regular] +5C5C5CFC 5CFC [Regular] +5C5C5CFD 5CFD [Regular] +5C5C5CFE 5CFE [Regular] +5C5C5CFF 5CFF [Regular] +5C5F5C00 5C5F00 [Regular] +5C5F5C08 5C5F08 [Regular] +5C5F5C09 5C5F09 [Regular] +5C5F5C0A 5C5F0A [Regular] +5C5F5C0D 5C5F0D [Regular] +5C5F5C1A 5C5F1A [Regular] +5C5F5C22 5C5F22 [Regular] +5C5F5C25 5C5F5C25 [Preserve][LIKE] +5C5F5C27 5C5F27 [Regular] +5C5F5C30 5C5F00 [Regular] +5C5F5C3F 5C5F3F [Regular] +5C5F5C40 5C5F40 [Regular] +5C5F5C5A 5C5F1A [Regular] +5C5F5C5C 5C5F5C [Regular] +5C5F5C5F 5C5F5C5F [Preserve][LIKE] +5C5F5C61 5C5F61 [Regular] +5C5F5C62 5C5F08 [Regular] +5C5F5C6E 5C5F0A [Regular] +5C5F5C72 5C5F0D [Regular] +5C5F5C74 5C5F09 [Regular] +5C5F5C7E 5C5F7E [Regular] +5C5F5C7F 5C5F7F [Regular] +5C5F5C80 5C5F80 [Regular] +5C5F5C81 5C5F81 [Regular] +5C5F5C9F 5C5F9F [Regular] +5C5F5CA0 5C5FA0 [Regular] +5C5F5CA1 5C5FA1 [Regular] +5C5F5CE0 5C5FE0 [Regular] +5C5F5CEF 5C5FEF [Regular] +5C5F5CF9 5C5FF9 [Regular] +5C5F5CFA 5C5FFA [Regular] +5C5F5CFC 5C5FFC [Regular] +5C5F5CFD 5C5FFD [Regular] +5C5F5CFE 5C5FFE [Regular] +5C5F5CFF 5C5FFF [Regular] +5C615C00 6100 [Trivial] +5C615C08 6108 [Trivial] +5C615C09 6109 [Trivial] +5C615C0A 610A [Trivial] +5C615C0D 610D [Trivial] +5C615C1A 611A [Trivial] +5C615C22 6122 [Trivial] +5C615C25 615C25 [Regular] +5C615C27 6127 [Trivial] +5C615C30 6100 [Regular] +5C615C3F 613F [Trivial] +5C615C40 6140 [Trivial] +5C615C5A 611A [Regular] +5C615C5C 615C [Regular] +5C615C5F 615C5F [Regular] +5C615C61 6161 [Trivial] +5C615C62 6108 [Regular] +5C615C6E 610A [Regular] +5C615C72 610D [Regular] +5C615C74 6109 [Regular] +5C615C7E 617E [Trivial] +5C615C7F 617F [Trivial] +5C615C80 6180 [Trivial] +5C615C81 6181 [Trivial] +5C615C9F 619F [Trivial] +5C615CA0 61A0 [Trivial] +5C615CA1 61A1 [Trivial] +5C615CE0 61E0 [Trivial] +5C615CEF 61EF [Trivial] +5C615CF9 61F9 [Trivial] +5C615CFA 61FA [Trivial] +5C615CFC 61FC [Trivial] +5C615CFD 61FD [Trivial] +5C615CFE 61FE [Trivial] +5C615CFF 61FF [Trivial] +5C625C00 0800 [Regular] +5C625C08 0808 [Regular] +5C625C09 0809 [Regular] +5C625C0A 080A [Regular] +5C625C0D 080D [Regular] +5C625C1A 081A [Regular] +5C625C22 0822 [Regular] +5C625C25 085C25 [Regular] +5C625C27 0827 [Regular] +5C625C30 0800 [Regular] +5C625C3F 083F [Regular] +5C625C40 0840 [Regular] +5C625C5A 081A [Regular] +5C625C5C 085C [Regular] +5C625C5F 085C5F [Regular] +5C625C61 0861 [Regular] +5C625C62 0808 [Regular] +5C625C6E 080A [Regular] +5C625C72 080D [Regular] +5C625C74 0809 [Regular] +5C625C7E 087E [Regular] +5C625C7F 087F [Regular] +5C625C80 0880 [Regular] +5C625C81 0881 [Regular] +5C625C9F 089F [Regular] +5C625CA0 08A0 [Regular] +5C625CA1 08A1 [Regular] +5C625CE0 08E0 [Regular] +5C625CEF 08EF [Regular] +5C625CF9 08F9 [Regular] +5C625CFA 08FA [Regular] +5C625CFC 08FC [Regular] +5C625CFD 08FD [Regular] +5C625CFE 08FE [Regular] +5C625CFF 08FF [Regular] +5C6E5C00 0A00 [Regular] +5C6E5C08 0A08 [Regular] +5C6E5C09 0A09 [Regular] +5C6E5C0A 0A0A [Regular] +5C6E5C0D 0A0D [Regular] +5C6E5C1A 0A1A [Regular] +5C6E5C22 0A22 [Regular] +5C6E5C25 0A5C25 [Regular] +5C6E5C27 0A27 [Regular] +5C6E5C30 0A00 [Regular] +5C6E5C3F 0A3F [Regular] +5C6E5C40 0A40 [Regular] +5C6E5C5A 0A1A [Regular] +5C6E5C5C 0A5C [Regular] +5C6E5C5F 0A5C5F [Regular] +5C6E5C61 0A61 [Regular] +5C6E5C62 0A08 [Regular] +5C6E5C6E 0A0A [Regular] +5C6E5C72 0A0D [Regular] +5C6E5C74 0A09 [Regular] +5C6E5C7E 0A7E [Regular] +5C6E5C7F 0A7F [Regular] +5C6E5C80 0A80 [Regular] +5C6E5C81 0A81 [Regular] +5C6E5C9F 0A9F [Regular] +5C6E5CA0 0AA0 [Regular] +5C6E5CA1 0AA1 [Regular] +5C6E5CE0 0AE0 [Regular] +5C6E5CEF 0AEF [Regular] +5C6E5CF9 0AF9 [Regular] +5C6E5CFA 0AFA [Regular] +5C6E5CFC 0AFC [Regular] +5C6E5CFD 0AFD [Regular] +5C6E5CFE 0AFE [Regular] +5C6E5CFF 0AFF [Regular] +5C725C00 0D00 [Regular] +5C725C08 0D08 [Regular] +5C725C09 0D09 [Regular] +5C725C0A 0D0A [Regular] +5C725C0D 0D0D [Regular] +5C725C1A 0D1A [Regular] +5C725C22 0D22 [Regular] +5C725C25 0D5C25 [Regular] +5C725C27 0D27 [Regular] +5C725C30 0D00 [Regular] +5C725C3F 0D3F [Regular] +5C725C40 0D40 [Regular] +5C725C5A 0D1A [Regular] +5C725C5C 0D5C [Regular] +5C725C5F 0D5C5F [Regular] +5C725C61 0D61 [Regular] +5C725C62 0D08 [Regular] +5C725C6E 0D0A [Regular] +5C725C72 0D0D [Regular] +5C725C74 0D09 [Regular] +5C725C7E 0D7E [Regular] +5C725C7F 0D7F [Regular] +5C725C80 0D80 [Regular] +5C725C81 0D81 [Regular] +5C725C9F 0D9F [Regular] +5C725CA0 0DA0 [Regular] +5C725CA1 0DA1 [Regular] +5C725CE0 0DE0 [Regular] +5C725CEF 0DEF [Regular] +5C725CF9 0DF9 [Regular] +5C725CFA 0DFA [Regular] +5C725CFC 0DFC [Regular] +5C725CFD 0DFD [Regular] +5C725CFE 0DFE [Regular] +5C725CFF 0DFF [Regular] +5C745C00 0900 [Regular] +5C745C08 0908 [Regular] +5C745C09 0909 [Regular] +5C745C0A 090A [Regular] +5C745C0D 090D [Regular] +5C745C1A 091A [Regular] +5C745C22 0922 [Regular] +5C745C25 095C25 [Regular] +5C745C27 0927 [Regular] +5C745C30 0900 [Regular] +5C745C3F 093F [Regular] +5C745C40 0940 [Regular] +5C745C5A 091A [Regular] +5C745C5C 095C [Regular] +5C745C5F 095C5F [Regular] +5C745C61 0961 [Regular] +5C745C62 0908 [Regular] +5C745C6E 090A [Regular] +5C745C72 090D [Regular] +5C745C74 0909 [Regular] +5C745C7E 097E [Regular] +5C745C7F 097F [Regular] +5C745C80 0980 [Regular] +5C745C81 0981 [Regular] +5C745C9F 099F [Regular] +5C745CA0 09A0 [Regular] +5C745CA1 09A1 [Regular] +5C745CE0 09E0 [Regular] +5C745CEF 09EF [Regular] +5C745CF9 09F9 [Regular] +5C745CFA 09FA [Regular] +5C745CFC 09FC [Regular] +5C745CFD 09FD [Regular] +5C745CFE 09FE [Regular] +5C745CFF 09FF [Regular] +5C7E5C00 7E00 [Trivial] +5C7E5C08 7E08 [Trivial] +5C7E5C09 7E09 [Trivial] +5C7E5C0A 7E0A [Trivial] +5C7E5C0D 7E0D [Trivial] +5C7E5C1A 7E1A [Trivial] +5C7E5C22 7E22 [Trivial] +5C7E5C25 7E5C25 [Regular] +5C7E5C27 7E27 [Trivial] +5C7E5C30 7E00 [Regular] +5C7E5C3F 7E3F [Trivial] +5C7E5C40 7E40 [Trivial] +5C7E5C5A 7E1A [Regular] +5C7E5C5C 7E5C [Regular] +5C7E5C5F 7E5C5F [Regular] +5C7E5C61 7E61 [Trivial] +5C7E5C62 7E08 [Regular] +5C7E5C6E 7E0A [Regular] +5C7E5C72 7E0D [Regular] +5C7E5C74 7E09 [Regular] +5C7E5C7E 7E7E [Trivial] +5C7E5C7F 7E7F [Trivial] +5C7E5C80 7E80 [Trivial] +5C7E5C81 7E81 [Trivial] +5C7E5C9F 7E9F [Trivial] +5C7E5CA0 7EA0 [Trivial] +5C7E5CA1 7EA1 [Trivial] +5C7E5CE0 7EE0 [Trivial] +5C7E5CEF 7EEF [Trivial] +5C7E5CF9 7EF9 [Trivial] +5C7E5CFA 7EFA [Trivial] +5C7E5CFC 7EFC [Trivial] +5C7E5CFD 7EFD [Trivial] +5C7E5CFE 7EFE [Trivial] +5C7E5CFF 7EFF [Trivial] +5C7F5C00 7F00 [Trivial] +5C7F5C08 7F08 [Trivial] +5C7F5C09 7F09 [Trivial] +5C7F5C0A 7F0A [Trivial] +5C7F5C0D 7F0D [Trivial] +5C7F5C1A 7F1A [Trivial] +5C7F5C22 7F22 [Trivial] +5C7F5C25 7F5C25 [Regular] +5C7F5C27 7F27 [Trivial] +5C7F5C30 7F00 [Regular] +5C7F5C3F 7F3F [Trivial] +5C7F5C40 7F40 [Trivial] +5C7F5C5A 7F1A [Regular] +5C7F5C5C 7F5C [Regular] +5C7F5C5F 7F5C5F [Regular] +5C7F5C61 7F61 [Trivial] +5C7F5C62 7F08 [Regular] +5C7F5C6E 7F0A [Regular] +5C7F5C72 7F0D [Regular] +5C7F5C74 7F09 [Regular] +5C7F5C7E 7F7E [Trivial] +5C7F5C7F 7F7F [Trivial] +5C7F5C80 7F80 [Trivial] +5C7F5C81 7F81 [Trivial] +5C7F5C9F 7F9F [Trivial] +5C7F5CA0 7FA0 [Trivial] +5C7F5CA1 7FA1 [Trivial] +5C7F5CE0 7FE0 [Trivial] +5C7F5CEF 7FEF [Trivial] +5C7F5CF9 7FF9 [Trivial] +5C7F5CFA 7FFA [Trivial] +5C7F5CFC 7FFC [Trivial] +5C7F5CFD 7FFD [Trivial] +5C7F5CFE 7FFE [Trivial] +5C7F5CFF 7FFF [Trivial] +5C805C00 8000 [Trivial][USER] +5C805C08 8008 [Trivial][USER] +5C805C09 8009 [Trivial][USER] +5C805C0A 800A [Trivial][USER] +5C805C0D 800D [Trivial][USER] +5C805C1A 801A [Trivial][USER] +5C805C22 8022 [Trivial][USER] +5C805C25 805C25 [Regular] +5C805C27 8027 [Trivial][USER] +5C805C30 8000 [Regular] +5C805C3F 803F [Trivial][USER] +5C805C40 8040 [Trivial][USER] +5C805C5A 801A [Regular] +5C805C5C 805C [Regular][USER] +5C805C5F 805C5F [Regular] +5C805C61 8061 [Trivial][USER] +5C805C62 8008 [Regular][USER] +5C805C6E 800A [Regular] +5C805C72 800D [Regular] +5C805C74 8009 [Regular] +5C805C7E 807E [Trivial][USER] +5C805C7F 807F [Trivial][USER] +5C805C80 8080 [Trivial][USER] +5C805C81 8081 [Trivial][USER] +5C805C9F 809F [Trivial][USER] +5C805CA0 80A0 [Trivial][USER] +5C805CA1 80A1 [Trivial][USER] +5C805CE0 80E0 [Trivial][USER] +5C805CEF 80EF [Trivial][USER] +5C805CF9 80F9 [Trivial][USER] +5C805CFA 80FA [Trivial][USER] +5C805CFC 80FC [Trivial][USER] +5C805CFD 80FD [Trivial][USER] +5C805CFE 80FE [Trivial][USER] +5C805CFF 80FF [Trivial][USER] +5C815C00 8100 [Trivial][USER] +5C815C08 8108 [Trivial][USER] +5C815C09 8109 [Trivial][USER] +5C815C0A 810A [Trivial][USER] +5C815C0D 810D [Trivial][USER] +5C815C1A 811A [Trivial][USER] +5C815C22 8122 [Trivial][USER] +5C815C25 815C25 [Regular] +5C815C27 8127 [Trivial][USER] +5C815C30 8100 [Regular] +5C815C3F 813F [Trivial][USER] +5C815C40 8140 [Trivial][USER] +5C815C5A 811A [Regular] +5C815C5C 815C [Regular][USER] +5C815C5F 815C5F [Regular] +5C815C61 8161 [Trivial][USER] +5C815C62 8108 [Regular][USER] +5C815C6E 810A [Regular] +5C815C72 810D [Regular] +5C815C74 8109 [Regular] +5C815C7E 817E [Trivial][USER] +5C815C7F 817F [Trivial][USER] +5C815C80 8180 [Trivial][USER] +5C815C81 8181 [Trivial][USER] +5C815C9F 819F [Trivial][USER] +5C815CA0 81A0 [Trivial][USER] +5C815CA1 81A1 [Trivial][USER] +5C815CE0 81E0 [Trivial][USER] +5C815CEF 81EF [Trivial][USER] +5C815CF9 81F9 [Trivial][USER] +5C815CFA 81FA [Trivial][USER] +5C815CFC 81FC [Trivial][USER] +5C815CFD 81FD [Trivial][USER] +5C815CFE 81FE [Trivial][USER] +5C815CFF 81FF [Trivial][USER] +5C9F5C00 9F00 [Trivial][USER] +5C9F5C08 9F08 [Trivial][USER] +5C9F5C09 9F09 [Trivial][USER] +5C9F5C0A 9F0A [Trivial][USER] +5C9F5C0D 9F0D [Trivial][USER] +5C9F5C1A 9F1A [Trivial][USER] +5C9F5C22 9F22 [Trivial][USER] +5C9F5C25 9F5C25 [Regular] +5C9F5C27 9F27 [Trivial][USER] +5C9F5C30 9F00 [Regular] +5C9F5C3F 9F3F [Trivial][USER] +5C9F5C40 9F40 [Trivial][USER] +5C9F5C5A 9F1A [Regular] +5C9F5C5C 9F5C [Regular][USER] +5C9F5C5F 9F5C5F [Regular] +5C9F5C61 9F61 [Trivial][USER] +5C9F5C62 9F08 [Regular][USER] +5C9F5C6E 9F0A [Regular] +5C9F5C72 9F0D [Regular] +5C9F5C74 9F09 [Regular] +5C9F5C7E 9F7E [Trivial][USER] +5C9F5C7F 9F7F [Trivial][USER] +5C9F5C80 9F80 [Trivial][USER] +5C9F5C81 9F81 [Trivial][USER] +5C9F5C9F 9F9F [Trivial][USER] +5C9F5CA0 9FA0 [Trivial][USER] +5C9F5CA1 9FA1 [Trivial][USER] +5C9F5CE0 9FE0 [Trivial][USER] +5C9F5CEF 9FEF [Trivial][USER] +5C9F5CF9 9FF9 [Trivial][USER] +5C9F5CFA 9FFA [Trivial][USER] +5C9F5CFC 9FFC [Trivial][USER] +5C9F5CFD 9FFD [Trivial][USER] +5C9F5CFE 9FFE [Trivial][USER] +5C9F5CFF 9FFF [Trivial][USER] +5CA05C00 A000 [Trivial][USER] +5CA05C08 A008 [Trivial][USER] +5CA05C09 A009 [Trivial][USER] +5CA05C0A A00A [Trivial][USER] +5CA05C0D A00D [Trivial][USER] +5CA05C1A A01A [Trivial][USER] +5CA05C22 A022 [Trivial][USER] +5CA05C25 A05C25 [Regular] +5CA05C27 A027 [Trivial][USER] +5CA05C30 A000 [Regular] +5CA05C3F A03F [Trivial][USER] +5CA05C40 A040 [Trivial][USER] +5CA05C5A A01A [Regular] +5CA05C5C A05C [Regular][USER] +5CA05C5F A05C5F [Regular] +5CA05C61 A061 [Trivial][USER] +5CA05C62 A008 [Regular][USER] +5CA05C6E A00A [Regular] +5CA05C72 A00D [Regular] +5CA05C74 A009 [Regular] +5CA05C7E A07E [Trivial][USER] +5CA05C7F A07F [Trivial][USER] +5CA05C80 A080 [Trivial][USER] +5CA05C81 A081 [Trivial][USER] +5CA05C9F A09F [Trivial][USER] +5CA05CA0 A0A0 [Trivial][USER] +5CA05CA1 A0A1 [Trivial][USER] +5CA05CE0 A0E0 [Trivial][USER] +5CA05CEF A0EF [Trivial][USER] +5CA05CF9 A0F9 [Trivial][USER] +5CA05CFA A0FA [Trivial][USER] +5CA05CFC A0FC [Trivial][USER] +5CA05CFD A0FD [Trivial][USER] +5CA05CFE A0FE [Trivial][USER] +5CA05CFF A0FF [Trivial][USER] +5CA15C00 A100 [Trivial][USER] +5CA15C08 A108 [Trivial][USER] +5CA15C09 A109 [Trivial][USER] +5CA15C0A A10A [Trivial][USER] +5CA15C0D A10D [Trivial][USER] +5CA15C1A A11A [Trivial][USER] +5CA15C22 A122 [Trivial][USER] +5CA15C25 A15C25 [Regular] +5CA15C27 A127 [Trivial][USER] +5CA15C30 A100 [Regular] +5CA15C3F A13F [Trivial][USER] +5CA15C40 A140 [Trivial][USER] +5CA15C5A A11A [Regular] +5CA15C5C A15C [Regular][USER] +5CA15C5F A15C5F [Regular] +5CA15C61 A161 [Trivial][USER] +5CA15C62 A108 [Regular][USER] +5CA15C6E A10A [Regular] +5CA15C72 A10D [Regular] +5CA15C74 A109 [Regular] +5CA15C7E A17E [Trivial][USER] +5CA15C7F A17F [Trivial][USER] +5CA15C80 A180 [Trivial][USER] +5CA15C81 A181 [Trivial][USER] +5CA15C9F A19F [Trivial][USER] +5CA15CA0 A1A0 [Trivial][USER] +5CA15CA1 A1A1 [Trivial][USER] +5CA15CE0 A1E0 [Trivial][USER] +5CA15CEF A1EF [Trivial][USER] +5CA15CF9 A1F9 [Trivial][USER] +5CA15CFA A1FA [Trivial][USER] +5CA15CFC A1FC [Trivial][USER] +5CA15CFD A1FD [Trivial][USER] +5CA15CFE A1FE [Trivial][USER] +5CA15CFF A1FF [Trivial][USER] +5CE05C00 E000 [Trivial][USER] +5CE05C08 E008 [Trivial][USER] +5CE05C09 E009 [Trivial][USER] +5CE05C0A E00A [Trivial][USER] +5CE05C0D E00D [Trivial][USER] +5CE05C1A E01A [Trivial][USER] +5CE05C22 E022 [Trivial][USER] +5CE05C25 E05C25 [Regular] +5CE05C27 E027 [Trivial][USER] +5CE05C30 E000 [Regular] +5CE05C3F E03F [Trivial][USER] +5CE05C40 E040 [Trivial][USER] +5CE05C5A E01A [Regular] +5CE05C5C E05C [Regular][USER] +5CE05C5F E05C5F [Regular] +5CE05C61 E061 [Trivial][USER] +5CE05C62 E008 [Regular][USER] +5CE05C6E E00A [Regular] +5CE05C72 E00D [Regular] +5CE05C74 E009 [Regular] +5CE05C7E E07E [Trivial][USER] +5CE05C7F E07F [Trivial][USER] +5CE05C80 E080 [Trivial][USER] +5CE05C81 E081 [Trivial][USER] +5CE05C9F E09F [Trivial][USER] +5CE05CA0 E0A0 [Trivial][USER] +5CE05CA1 E0A1 [Trivial][USER] +5CE05CE0 E0E0 [Trivial][USER] +5CE05CEF E0EF [Trivial][USER] +5CE05CF9 E0F9 [Trivial][USER] +5CE05CFA E0FA [Trivial][USER] +5CE05CFC E0FC [Trivial][USER] +5CE05CFD E0FD [Trivial][USER] +5CE05CFE E0FE [Trivial][USER] +5CE05CFF E0FF [Trivial][USER] +5CEF5C00 EF00 [Trivial][USER] +5CEF5C08 EF08 [Trivial][USER] +5CEF5C09 EF09 [Trivial][USER] +5CEF5C0A EF0A [Trivial][USER] +5CEF5C0D EF0D [Trivial][USER] +5CEF5C1A EF1A [Trivial][USER] +5CEF5C22 EF22 [Trivial][USER] +5CEF5C25 EF5C25 [Regular] +5CEF5C27 EF27 [Trivial][USER] +5CEF5C30 EF00 [Regular] +5CEF5C3F EF3F [Trivial][USER] +5CEF5C40 EF40 [Trivial][USER] +5CEF5C5A EF1A [Regular] +5CEF5C5C EF5C [Regular][USER] +5CEF5C5F EF5C5F [Regular] +5CEF5C61 EF61 [Trivial][USER] +5CEF5C62 EF08 [Regular][USER] +5CEF5C6E EF0A [Regular] +5CEF5C72 EF0D [Regular] +5CEF5C74 EF09 [Regular] +5CEF5C7E EF7E [Trivial][USER] +5CEF5C7F EF7F [Trivial][USER] +5CEF5C80 EF80 [Trivial][USER] +5CEF5C81 EF81 [Trivial][USER] +5CEF5C9F EF9F [Trivial][USER] +5CEF5CA0 EFA0 [Trivial][USER] +5CEF5CA1 EFA1 [Trivial][USER] +5CEF5CE0 EFE0 [Trivial][USER] +5CEF5CEF EFEF [Trivial][USER] +5CEF5CF9 EFF9 [Trivial][USER] +5CEF5CFA EFFA [Trivial][USER] +5CEF5CFC EFFC [Trivial][USER] +5CEF5CFD EFFD [Trivial][USER] +5CEF5CFE EFFE [Trivial][USER] +5CEF5CFF EFFF [Trivial][USER] +5CF95C00 F900 [Trivial][USER] +5CF95C08 F908 [Trivial][USER] +5CF95C09 F909 [Trivial][USER] +5CF95C0A F90A [Trivial][USER] +5CF95C0D F90D [Trivial][USER] +5CF95C1A F91A [Trivial][USER] +5CF95C22 F922 [Trivial][USER] +5CF95C25 F95C25 [Regular] +5CF95C27 F927 [Trivial][USER] +5CF95C30 F900 [Regular] +5CF95C3F F93F [Trivial][USER] +5CF95C40 F940 [Trivial][USER] +5CF95C5A F91A [Regular] +5CF95C5C F95C [Regular][USER] +5CF95C5F F95C5F [Regular] +5CF95C61 F961 [Trivial][USER] +5CF95C62 F908 [Regular][USER] +5CF95C6E F90A [Regular] +5CF95C72 F90D [Regular] +5CF95C74 F909 [Regular] +5CF95C7E F97E [Trivial][USER] +5CF95C7F F97F [Trivial][USER] +5CF95C80 F980 [Trivial][USER] +5CF95C81 F981 [Trivial][USER] +5CF95C9F F99F [Trivial][USER] +5CF95CA0 F9A0 [Trivial][USER] +5CF95CA1 F9A1 [Trivial][USER] +5CF95CE0 F9E0 [Trivial][USER] +5CF95CEF F9EF [Trivial][USER] +5CF95CF9 F9F9 [Trivial][USER] +5CF95CFA F9FA [Trivial][USER] +5CF95CFC F9FC [Trivial][USER] +5CF95CFD F9FD [Trivial][USER] +5CF95CFE F9FE [Trivial][USER] +5CF95CFF F9FF [Trivial][USER] +5CFA5C00 FA00 [Trivial][USER] +5CFA5C08 FA08 [Trivial][USER] +5CFA5C09 FA09 [Trivial][USER] +5CFA5C0A FA0A [Trivial][USER] +5CFA5C0D FA0D [Trivial][USER] +5CFA5C1A FA1A [Trivial][USER] +5CFA5C22 FA22 [Trivial][USER] +5CFA5C25 FA5C25 [Regular] +5CFA5C27 FA27 [Trivial][USER] +5CFA5C30 FA00 [Regular] +5CFA5C3F FA3F [Trivial][USER] +5CFA5C40 FA40 [Trivial][USER] +5CFA5C5A FA1A [Regular] +5CFA5C5C FA5C [Regular][USER] +5CFA5C5F FA5C5F [Regular] +5CFA5C61 FA61 [Trivial][USER] +5CFA5C62 FA08 [Regular][USER] +5CFA5C6E FA0A [Regular] +5CFA5C72 FA0D [Regular] +5CFA5C74 FA09 [Regular] +5CFA5C7E FA7E [Trivial][USER] +5CFA5C7F FA7F [Trivial][USER] +5CFA5C80 FA80 [Trivial][USER] +5CFA5C81 FA81 [Trivial][USER] +5CFA5C9F FA9F [Trivial][USER] +5CFA5CA0 FAA0 [Trivial][USER] +5CFA5CA1 FAA1 [Trivial][USER] +5CFA5CE0 FAE0 [Trivial][USER] +5CFA5CEF FAEF [Trivial][USER] +5CFA5CF9 FAF9 [Trivial][USER] +5CFA5CFA FAFA [Trivial][USER] +5CFA5CFC FAFC [Trivial][USER] +5CFA5CFD FAFD [Trivial][USER] +5CFA5CFE FAFE [Trivial][USER] +5CFA5CFF FAFF [Trivial][USER] +5CFC5C00 FC00 [Trivial][USER] +5CFC5C08 FC08 [Trivial][USER] +5CFC5C09 FC09 [Trivial][USER] +5CFC5C0A FC0A [Trivial][USER] +5CFC5C0D FC0D [Trivial][USER] +5CFC5C1A FC1A [Trivial][USER] +5CFC5C22 FC22 [Trivial][USER] +5CFC5C25 FC5C25 [Regular] +5CFC5C27 FC27 [Trivial][USER] +5CFC5C30 FC00 [Regular] +5CFC5C3F FC3F [Trivial][USER] +5CFC5C40 FC40 [Trivial][USER] +5CFC5C5A FC1A [Regular] +5CFC5C5C FC5C [Regular][USER] +5CFC5C5F FC5C5F [Regular] +5CFC5C61 FC61 [Trivial][USER] +5CFC5C62 FC08 [Regular][USER] +5CFC5C6E FC0A [Regular] +5CFC5C72 FC0D [Regular] +5CFC5C74 FC09 [Regular] +5CFC5C7E FC7E [Trivial][USER] +5CFC5C7F FC7F [Trivial][USER] +5CFC5C80 FC80 [Trivial][USER] +5CFC5C81 FC81 [Trivial][USER] +5CFC5C9F FC9F [Trivial][USER] +5CFC5CA0 FCA0 [Trivial][USER] +5CFC5CA1 FCA1 [Trivial][USER] +5CFC5CE0 FCE0 [Trivial][USER] +5CFC5CEF FCEF [Trivial][USER] +5CFC5CF9 FCF9 [Trivial][USER] +5CFC5CFA FCFA [Trivial][USER] +5CFC5CFC FCFC [Trivial][USER] +5CFC5CFD FCFD [Trivial][USER] +5CFC5CFE FCFE [Trivial][USER] +5CFC5CFF FCFF [Trivial][USER] +5CFD5C00 FD00 [Trivial][USER] +5CFD5C08 FD08 [Trivial][USER] +5CFD5C09 FD09 [Trivial][USER] +5CFD5C0A FD0A [Trivial][USER] +5CFD5C0D FD0D [Trivial][USER] +5CFD5C1A FD1A [Trivial][USER] +5CFD5C22 FD22 [Trivial][USER] +5CFD5C25 FD5C25 [Regular] +5CFD5C27 FD27 [Trivial][USER] +5CFD5C30 FD00 [Regular] +5CFD5C3F FD3F [Trivial][USER] +5CFD5C40 FD40 [Trivial][USER] +5CFD5C5A FD1A [Regular] +5CFD5C5C FD5C [Regular][USER] +5CFD5C5F FD5C5F [Regular] +5CFD5C61 FD61 [Trivial][USER] +5CFD5C62 FD08 [Regular][USER] +5CFD5C6E FD0A [Regular] +5CFD5C72 FD0D [Regular] +5CFD5C74 FD09 [Regular] +5CFD5C7E FD7E [Trivial][USER] +5CFD5C7F FD7F [Trivial][USER] +5CFD5C80 FD80 [Trivial][USER] +5CFD5C81 FD81 [Trivial][USER] +5CFD5C9F FD9F [Trivial][USER] +5CFD5CA0 FDA0 [Trivial][USER] +5CFD5CA1 FDA1 [Trivial][USER] +5CFD5CE0 FDE0 [Trivial][USER] +5CFD5CEF FDEF [Trivial][USER] +5CFD5CF9 FDF9 [Trivial][USER] +5CFD5CFA FDFA [Trivial][USER] +5CFD5CFC FDFC [Trivial][USER] +5CFD5CFD FDFD [Trivial][USER] +5CFD5CFE FDFE [Trivial][USER] +5CFD5CFF FDFF [Trivial][USER] +5CFE5C00 FE00 [Trivial][USER] +5CFE5C08 FE08 [Trivial][USER] +5CFE5C09 FE09 [Trivial][USER] +5CFE5C0A FE0A [Trivial][USER] +5CFE5C0D FE0D [Trivial][USER] +5CFE5C1A FE1A [Trivial][USER] +5CFE5C22 FE22 [Trivial][USER] +5CFE5C25 FE5C25 [Regular] +5CFE5C27 FE27 [Trivial][USER] +5CFE5C30 FE00 [Regular] +5CFE5C3F FE3F [Trivial][USER] +5CFE5C40 FE40 [Trivial][USER] +5CFE5C5A FE1A [Regular] +5CFE5C5C FE5C [Regular][USER] +5CFE5C5F FE5C5F [Regular] +5CFE5C61 FE61 [Trivial][USER] +5CFE5C62 FE08 [Regular][USER] +5CFE5C6E FE0A [Regular] +5CFE5C72 FE0D [Regular] +5CFE5C74 FE09 [Regular] +5CFE5C7E FE7E [Trivial][USER] +5CFE5C7F FE7F [Trivial][USER] +5CFE5C80 FE80 [Trivial][USER] +5CFE5C81 FE81 [Trivial][USER] +5CFE5C9F FE9F [Trivial][USER] +5CFE5CA0 FEA0 [Trivial][USER] +5CFE5CA1 FEA1 [Trivial][USER] +5CFE5CE0 FEE0 [Trivial][USER] +5CFE5CEF FEEF [Trivial][USER] +5CFE5CF9 FEF9 [Trivial][USER] +5CFE5CFA FEFA [Trivial][USER] +5CFE5CFC FEFC [Trivial][USER] +5CFE5CFD FEFD [Trivial][USER] +5CFE5CFE FEFE [Trivial][USER] +5CFE5CFF FEFF [Trivial][USER] +5CFF5C00 FF00 [Trivial][USER] +5CFF5C08 FF08 [Trivial][USER] +5CFF5C09 FF09 [Trivial][USER] +5CFF5C0A FF0A [Trivial][USER] +5CFF5C0D FF0D [Trivial][USER] +5CFF5C1A FF1A [Trivial][USER] +5CFF5C22 FF22 [Trivial][USER] +5CFF5C25 FF5C25 [Regular] +5CFF5C27 FF27 [Trivial][USER] +5CFF5C30 FF00 [Regular] +5CFF5C3F FF3F [Trivial][USER] +5CFF5C40 FF40 [Trivial][USER] +5CFF5C5A FF1A [Regular] +5CFF5C5C FF5C [Regular][USER] +5CFF5C5F FF5C5F [Regular] +5CFF5C61 FF61 [Trivial][USER] +5CFF5C62 FF08 [Regular][USER] +5CFF5C6E FF0A [Regular] +5CFF5C72 FF0D [Regular] +5CFF5C74 FF09 [Regular] +5CFF5C7E FF7E [Trivial][USER] +5CFF5C7F FF7F [Trivial][USER] +5CFF5C80 FF80 [Trivial][USER] +5CFF5C81 FF81 [Trivial][USER] +5CFF5C9F FF9F [Trivial][USER] +5CFF5CA0 FFA0 [Trivial][USER] +5CFF5CA1 FFA1 [Trivial][USER] +5CFF5CE0 FFE0 [Trivial][USER] +5CFF5CEF FFEF [Trivial][USER] +5CFF5CF9 FFF9 [Trivial][USER] +5CFF5CFA FFFA [Trivial][USER] +5CFF5CFC FFFC [Trivial][USER] +5CFF5CFD FFFD [Trivial][USER] +5CFF5CFE FFFE [Trivial][USER] +5CFF5CFF FFFF [Trivial][USER] +DROP TABLE t1; +DROP PROCEDURE p1; +DROP PROCEDURE p2; +DROP FUNCTION unescape; +DROP FUNCTION unescape_type; +DROP FUNCTION wellformedness; +DROP FUNCTION mysql_real_escape_string_generated; +DROP FUNCTION iswellformed; +DROP TABLE allbytes; +# End of ctype_backslash.inc +# +# End of 10.0 tests +# diff --git a/mysql-test/r/ctype_ucs.result b/mysql-test/r/ctype_ucs.result index 492c9877917..3cfc076b8a0 100644 --- a/mysql-test/r/ctype_ucs.result +++ b/mysql-test/r/ctype_ucs.result @@ -4508,6 +4508,39 @@ COALESCE(c1) DROP TABLE t1; # +# MDEV-5745 analyze MySQL fix for bug#12368495 +# +SELECT CHAR_LENGTH(TRIM(LEADING 0x000000 FROM _ucs2 0x0061)); +CHAR_LENGTH(TRIM(LEADING 0x000000 FROM _ucs2 0x0061)) +2 +SELECT CHAR_LENGTH(TRIM(LEADING 0x0001 FROM _ucs2 0x0061)); +CHAR_LENGTH(TRIM(LEADING 0x0001 FROM _ucs2 0x0061)) +2 +SELECT CHAR_LENGTH(TRIM(LEADING 0x00 FROM _ucs2 0x0061)); +CHAR_LENGTH(TRIM(LEADING 0x00 FROM _ucs2 0x0061)) +1 +SELECT CHAR_LENGTH(TRIM(TRAILING 0x000000 FROM _ucs2 0x0061)); +CHAR_LENGTH(TRIM(TRAILING 0x000000 FROM _ucs2 0x0061)) +2 +SELECT CHAR_LENGTH(TRIM(TRAILING 0x0001 FROM _ucs2 0x0061)); +CHAR_LENGTH(TRIM(TRAILING 0x0001 FROM _ucs2 0x0061)) +2 +SELECT CHAR_LENGTH(TRIM(TRAILING 0x61 FROM _ucs2 0x0061)); +CHAR_LENGTH(TRIM(TRAILING 0x61 FROM _ucs2 0x0061)) +1 +SELECT CHAR_LENGTH(TRIM(BOTH 0x000000 FROM _ucs2 0x0061)); +CHAR_LENGTH(TRIM(BOTH 0x000000 FROM _ucs2 0x0061)) +2 +SELECT CHAR_LENGTH(TRIM(BOTH 0x0001 FROM _ucs2 0x0061)); +CHAR_LENGTH(TRIM(BOTH 0x0001 FROM _ucs2 0x0061)) +2 +SELECT CHAR_LENGTH(TRIM(BOTH 0x61 FROM _ucs2 0x0061)); +CHAR_LENGTH(TRIM(BOTH 0x61 FROM _ucs2 0x0061)) +1 +SELECT CHAR_LENGTH(TRIM(BOTH 0x00 FROM _ucs2 0x0061)); +CHAR_LENGTH(TRIM(BOTH 0x00 FROM _ucs2 0x0061)) +1 +# # End of 5.5 tests # # @@ -5290,3 +5323,22 @@ DROP TABLE t1; # # End of 5.6 tests # +# +# Start of 10.0 tests +# +# +# MDEV-6661 PI() does not work well in UCS2/UTF16/UTF32 context +# +SELECT CONCAT(CONVERT('pi=' USING ucs2),PI()) AS PI; +PI +pi=3.141593 +# +# MDEV-6695 Bad column name for UCS2 string literals +# +SET NAMES utf8, character_set_connection=ucs2; +SELECT 'a','aa'; +a aa +a aa +# +# End of 10.0 tests +# diff --git a/mysql-test/r/ctype_ujis.result b/mysql-test/r/ctype_ujis.result index 77145fe2eb0..413ab4efe31 100644 --- a/mysql-test/r/ctype_ujis.result +++ b/mysql-test/r/ctype_ujis.result @@ -25928,3 +25928,20 @@ hex(weight_string(cast(0x8FA2C38FA2C38FA2C3 as char),25, 4, 0xC0)) # # End of 5.6 tests # +# +# Start of 10.0 tests +# +# +# MDEV-6776 ujis and eucjmps erroneously accept 0x8EA0 as a valid byte sequence +# +CREATE TABLE t1 (a VARCHAR(10) CHARACTER SET ujis); +INSERT INTO t1 VALUES (0x8EA0); +SELECT HEX(a), CHAR_LENGTH(a) FROM t1; +HEX(a) CHAR_LENGTH(a) + 0 +DROP TABLE t1; +SELECT _ujis 0x8EA0; +ERROR HY000: Invalid ujis character string: '8EA0' +# +# End of 10.0 tests +# diff --git a/mysql-test/r/ctype_upgrade.result b/mysql-test/r/ctype_upgrade.result index 0fc73203494..825ad8dac21 100644 --- a/mysql-test/r/ctype_upgrade.result +++ b/mysql-test/r/ctype_upgrade.result @@ -227,13 +227,8 @@ DROP TABLE mysql050614_xxx_croatian_ci; # Checking mysql_upgrade # # Running mysql_upgrade -Phase 1/3: Fixing table and database names -Phase 2/3: Checking and upgrading tables +Phase 1/4: Checking mysql database Processing databases -information_schema -mtr -mtr.global_suppressions OK -mtr.test_suppressions OK mysql mysql.column_stats OK mysql.columns_priv OK @@ -263,6 +258,14 @@ mysql.time_zone_name OK mysql.time_zone_transition OK mysql.time_zone_transition_type OK mysql.user OK +Phase 2/4: Running 'mysql_fix_privilege_tables'... +Phase 3/4: Fixing table and database names +Phase 4/4: Checking and upgrading tables +Processing databases +information_schema +mtr +mtr.global_suppressions OK +mtr.test_suppressions OK performance_schema test test.maria050313_ucs2_croatian_ci_def Needs upgrade @@ -276,17 +279,11 @@ test.maria050313_ucs2_croatian_ci_def OK test.maria050313_utf8_croatian_ci OK test.maria050533_xxx_croatian_ci OK test.maria100004_xxx_croatian_ci OK -Phase 3/3: Running 'mysql_fix_privilege_tables'... OK # Running mysql_upgrade for the second time # This should report OK for all tables -Phase 1/3: Fixing table and database names -Phase 2/3: Checking and upgrading tables +Phase 1/4: Checking mysql database Processing databases -information_schema -mtr -mtr.global_suppressions OK -mtr.test_suppressions OK mysql mysql.column_stats OK mysql.columns_priv OK @@ -316,6 +313,14 @@ mysql.time_zone_name OK mysql.time_zone_transition OK mysql.time_zone_transition_type OK mysql.user OK +Phase 2/4: Running 'mysql_fix_privilege_tables'... +Phase 3/4: Fixing table and database names +Phase 4/4: Checking and upgrading tables +Processing databases +information_schema +mtr +mtr.global_suppressions OK +mtr.test_suppressions OK performance_schema test test.maria050313_ucs2_croatian_ci_def OK @@ -323,7 +328,6 @@ test.maria050313_utf8_croatian_ci OK test.maria050533_xxx_croatian_ci OK test.maria100004_xxx_croatian_ci OK test.mysql050614_xxx_croatian_ci OK -Phase 3/3: Running 'mysql_fix_privilege_tables'... OK SHOW CREATE TABLE maria050313_ucs2_croatian_ci_def; Table Create Table diff --git a/mysql-test/r/ctype_utf16.result b/mysql-test/r/ctype_utf16.result index 150700bf60d..074fc28a6b7 100644 --- a/mysql-test/r/ctype_utf16.result +++ b/mysql-test/r/ctype_utf16.result @@ -1549,7 +1549,7 @@ SELECT space(date_add(101, INTERVAL CHAR('1' USING utf16) hour_second)); space(date_add(101, INTERVAL CHAR('1' USING utf16) hour_second)) NULL Warnings: -Warning 1301 Result of repeat() was larger than max_allowed_packet (1048576) - truncated +Warning 1301 Result of space() was larger than max_allowed_packet (1048576) - truncated # # Bug#11750518 41090: ORDER BY TRUNCATES GROUP_CONCAT RESULT # @@ -2078,3 +2078,45 @@ DFFFFFDFFFFF9CFFFF9DFFFF9EFFFF # # End of 5.6 tests # +# +# Start of 10.0 tests +# +# +# MDEV-6661 PI() does not work well in UCS2/UTF16/UTF32 context +# +SELECT CONCAT(CONVERT('pi=' USING utf16),PI()) AS PI; +PI +pi=3.141593 +# +# MDEV-6666 Malformed result for CONCAT(utf8_column, binary_string) +# +SET NAMES utf8mb4; +CREATE TABLE t1 (a VARCHAR(10) CHARACTER SET utf16); +INSERT INTO t1 VALUES ('a'); +SELECT CONCAT(a,0xD800) FROM t1; +ERROR HY000: Invalid utf16 character string: 'D800' +SELECT CONCAT(a,0xD800DC00) FROM t1; +CONCAT(a,0xD800DC00) +að€€ +SELECT CONCAT(a,0x00FF) FROM t1; +CONCAT(a,0x00FF) +aÿ +DROP TABLE t1; +SELECT CONCAT(_utf16'a' COLLATE utf16_unicode_ci, _binary 0xD800); +ERROR HY000: Invalid utf16 character string: 'D800' +PREPARE stmt FROM "SELECT CONCAT(_utf16'a' COLLATE utf16_unicode_ci, ?)"; +SET @arg00=_binary 0xD800; +EXECUTE stmt USING @arg00; +ERROR HY000: Invalid utf16 character string: 'D800' +SET @arg00=_binary 0xD800DC00; +EXECUTE stmt USING @arg00; +CONCAT(_utf16'a' COLLATE utf16_unicode_ci, ?) +að€€ +SET @arg00=_binary 0x00FF; +EXECUTE stmt USING @arg00; +CONCAT(_utf16'a' COLLATE utf16_unicode_ci, ?) +aÿ +DEALLOCATE PREPARE stmt; +# +# End of 10.0 tests +# diff --git a/mysql-test/r/ctype_utf32.result b/mysql-test/r/ctype_utf32.result index 214ec9f9b1d..0ec89a50c0f 100644 --- a/mysql-test/r/ctype_utf32.result +++ b/mysql-test/r/ctype_utf32.result @@ -1626,6 +1626,39 @@ SELECT '2010-10-10 10:10:10' + INTERVAL GeometryType(GeomFromText('POINT(1 1)')) '2010-10-10 10:10:10' + INTERVAL GeometryType(GeomFromText('POINT(1 1)')) hour_second 2010-10-10 10:10:10 # +# MDEV-5745 analyze MySQL fix for bug#12368495 +# +SELECT CHAR_LENGTH(TRIM(LEADING 0x0000000000 FROM _utf32 0x00000061)); +CHAR_LENGTH(TRIM(LEADING 0x0000000000 FROM _utf32 0x00000061)) +4 +SELECT CHAR_LENGTH(TRIM(LEADING 0x0001 FROM _utf32 0x00000061)); +CHAR_LENGTH(TRIM(LEADING 0x0001 FROM _utf32 0x00000061)) +4 +SELECT CHAR_LENGTH(TRIM(LEADING 0x00 FROM _utf32 0x00000061)); +CHAR_LENGTH(TRIM(LEADING 0x00 FROM _utf32 0x00000061)) +1 +SELECT CHAR_LENGTH(TRIM(TRAILING 0x0000000000 FROM _utf32 0x00000061)); +CHAR_LENGTH(TRIM(TRAILING 0x0000000000 FROM _utf32 0x00000061)) +4 +SELECT CHAR_LENGTH(TRIM(TRAILING 0x0001 FROM _utf32 0x00000061)); +CHAR_LENGTH(TRIM(TRAILING 0x0001 FROM _utf32 0x00000061)) +4 +SELECT CHAR_LENGTH(TRIM(TRAILING 0x61 FROM _utf32 0x00000061)); +CHAR_LENGTH(TRIM(TRAILING 0x61 FROM _utf32 0x00000061)) +3 +SELECT CHAR_LENGTH(TRIM(BOTH 0x0000000000 FROM _utf32 0x00000061)); +CHAR_LENGTH(TRIM(BOTH 0x0000000000 FROM _utf32 0x00000061)) +4 +SELECT CHAR_LENGTH(TRIM(BOTH 0x0001 FROM _utf32 0x00000061)); +CHAR_LENGTH(TRIM(BOTH 0x0001 FROM _utf32 0x00000061)) +4 +SELECT CHAR_LENGTH(TRIM(BOTH 0x61 FROM _utf32 0x00000061)); +CHAR_LENGTH(TRIM(BOTH 0x61 FROM _utf32 0x00000061)) +3 +SELECT CHAR_LENGTH(TRIM(BOTH 0x00 FROM _utf32 0x00000061)); +CHAR_LENGTH(TRIM(BOTH 0x00 FROM _utf32 0x00000061)) +1 +# # End of 5.5 tests # # @@ -2131,3 +2164,45 @@ DFFFFFDFFFFF9CFFFF9DFFFF9EFFFF # # End of 5.6 tests # +# +# Start of 10.0 tests +# +# +# MDEV-6661 PI() does not work well in UCS2/UTF16/UTF32 context +# +SELECT CONCAT(CONVERT('pi=' USING utf32),PI()) AS PI; +PI +pi=3.141593 +# +# MDEV-6666 Malformed result for CONCAT(utf8_column, binary_string) +# +SET NAMES utf8mb4; +CREATE TABLE t1 (a VARCHAR(10) CHARACTER SET utf32); +INSERT INTO t1 VALUES ('a'); +SELECT CONCAT(a,0x20FFFF) FROM t1; +ERROR HY000: Invalid utf32 character string: '0020FF' +SELECT CONCAT(a,0x010000) FROM t1; +CONCAT(a,0x010000) +að€€ +SELECT CONCAT(a,0x00FF) FROM t1; +CONCAT(a,0x00FF) +aÿ +DROP TABLE t1; +SELECT CONCAT(_utf32'a' COLLATE utf32_unicode_ci, _binary 0x20FFFF); +ERROR HY000: Invalid utf32 character string: '0020FF' +PREPARE stmt FROM "SELECT CONCAT(_utf32'a' COLLATE utf32_unicode_ci, ?)"; +SET @arg00=_binary 0x20FFFF; +EXECUTE stmt USING @arg00; +ERROR HY000: Invalid utf32 character string: '0020FF' +SET @arg00=_binary 0x010000; +EXECUTE stmt USING @arg00; +CONCAT(_utf32'a' COLLATE utf32_unicode_ci, ?) +að€€ +SET @arg00=_binary 0x00FF; +EXECUTE stmt USING @arg00; +CONCAT(_utf32'a' COLLATE utf32_unicode_ci, ?) +aÿ +DEALLOCATE PREPARE stmt; +# +# End of 10.0 tests +# diff --git a/mysql-test/r/ctype_utf8.result b/mysql-test/r/ctype_utf8.result index a8aa4595ab4..43f3aa4b6c9 100644 --- a/mysql-test/r/ctype_utf8.result +++ b/mysql-test/r/ctype_utf8.result @@ -5933,3 +5933,3167 @@ set max_sort_length=default; # # End of 5.6 tests # +# +# Start of 10.0 tests +# +# +# MDEV-6666 Malformed result for CONCAT(utf8_column, binary_string) +# +CREATE TABLE t1 (a VARCHAR(10) CHARACTER SET utf8); +INSERT INTO t1 VALUES ('a'); +SELECT CONCAT(a,0xFF) FROM t1; +ERROR HY000: Invalid utf8 character string: 'FF' +SELECT CONCAT(a,0xC3BF) FROM t1; +CONCAT(a,0xC3BF) +aÿ +DROP TABLE t1; +SELECT CONCAT('a' COLLATE utf8_unicode_ci, _binary 0xFF); +ERROR HY000: Invalid utf8 character string: 'FF' +PREPARE stmt FROM "SELECT CONCAT('a' COLLATE utf8_unicode_ci, ?)"; +SET @arg00=_binary 0xFF; +EXECUTE stmt USING @arg00; +ERROR HY000: Invalid utf8 character string: 'FF' +DEALLOCATE PREPARE stmt; +SET NAMES latin1; +PREPARE stmt FROM "SELECT CONCAT(_utf8'a' COLLATE utf8_unicode_ci, ?)"; +EXECUTE stmt USING @no_such_var; +CONCAT(_utf8'a' COLLATE utf8_unicode_ci, ?) +NULL +DEALLOCATE PREPARE stmt; +SET NAMES utf8; +# +# MDEV-6679 Different optimizer plan for "a BETWEEN 'string' AND ?" and "a BETWEEN ? AND 'string'" +# +SET NAMES utf8, collation_connection=utf8_swedish_ci; +CREATE TABLE t1 (a VARCHAR(10) CHARACTER SET utf8, b INT NOT NULL DEFAULT 0, key(a)); +INSERT INTO t1 (a) VALUES ('a'),('b'),('c'),('d'),('¢'); +SET @arg='¢'; +PREPARE stmt FROM "EXPLAIN SELECT * FROM t1 WHERE a BETWEEN _utf8'¢' and ?"; +EXECUTE stmt USING @arg; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 range a a 33 NULL 1 Using index condition +PREPARE stmt FROM "EXPLAIN SELECT * FROM t1 WHERE a between ? and _utf8'¢'"; +EXECUTE stmt USING @arg; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 range a a 33 NULL 1 Using index condition +DEALLOCATE PREPARE stmt; +DROP TABLE t1; +# +# MDEV-6683 A parameter and a string literal with the same values are not recognized as equal by the optimizer +# +SET NAMES utf8, collation_connection=utf8_swedish_ci; +CREATE TABLE t1 (a VARCHAR(10) CHARACTER SET latin1, b INT NOT NULL DEFAULT 0, key(a)); +INSERT INTO t1 (a) VALUES ('a'),('b'),('c'),('d'),('¢'); +SET @arg='¢'; +PREPARE stmt FROM "EXPLAIN SELECT * FROM t1 WHERE a BETWEEN _utf8'¢' and ?"; +EXECUTE stmt USING @arg; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ref a a 13 const 1 Using index condition +PREPARE stmt FROM "EXPLAIN SELECT * FROM t1 WHERE a between ? and _utf8'¢'"; +EXECUTE stmt USING @arg; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ref a a 13 const 1 Using index condition +DEALLOCATE PREPARE stmt; +DROP TABLE t1; +# +# MDEV-6688 Illegal mix of collation with bit string B'01100001' +# +CREATE TABLE t1 (a VARCHAR(10) CHARACTER SET latin1, b INT); +INSERT INTO t1 VALUES ('a',1); +SELECT CONCAT(a, IF(b>10, _utf8 X'61', _utf8 X'61')) FROM t1; +CONCAT(a, IF(b>10, _utf8 X'61', _utf8 X'61')) +aa +SELECT CONCAT(a, IF(b>10, _utf8 X'61', _utf8 B'01100001')) FROM t1; +CONCAT(a, IF(b>10, _utf8 X'61', _utf8 B'01100001')) +aa +DROP TABLE t1; +# +# MDEV-6694 Illegal mix of collation with a PS parameter +# +SET NAMES utf8; +CREATE TABLE t1 (a INT, b VARCHAR(10) CHARACTER SET latin1); +INSERT INTO t1 VALUES (1,'a'); +SELECT CONCAT(b,IF(a,'b','b')) FROM t1; +CONCAT(b,IF(a,'b','b')) +ab +PREPARE stmt FROM "SELECT CONCAT(b,IF(a,?,?)) FROM t1"; +SET @b='b'; +EXECUTE stmt USING @b,@b; +CONCAT(b,IF(a,?,?)) +ab +SET @b=''; +EXECUTE stmt USING @b,@b; +CONCAT(b,IF(a,?,?)) +a +SET @b='Ñ'; +EXECUTE stmt USING @b,@b; +ERROR HY000: Illegal mix of collations (latin1_swedish_ci,IMPLICIT) and (utf8_general_ci,COERCIBLE) for operation 'concat' +DEALLOCATE PREPARE stmt; +DROP TABLE t1; +# Start of ctype_unescape.inc +SET @query=_binary'SELECT CHARSET(\'test\'),@@character_set_client,@@character_set_connection'; +PREPARE stmt FROM @query; +EXECUTE stmt; +CHARSET('test') @@character_set_client @@character_set_connection +utf8 utf8 utf8 +DEALLOCATE PREPARE stmt; +CREATE TABLE allbytes (a VARBINARY(10)); +# Using selected bytes combinations +CREATE TABLE halfs (a INT); +INSERT INTO halfs VALUES (0x00),(0x01),(0x02),(0x03),(0x04),(0x05),(0x06),(0x07); +INSERT INTO halfs VALUES (0x08),(0x09),(0x0A),(0x0B),(0x0C),(0x0D),(0x0E),(0x0F); +CREATE TEMPORARY TABLE bytes (a BINARY(1), KEY(a)) ENGINE=MyISAM; +INSERT INTO bytes SELECT CHAR((t1.a << 4) | t2.a USING BINARY) FROM halfs t1, halfs t2; +DROP TABLE halfs; +CREATE TABLE selected_bytes (a VARBINARY(10)); +INSERT INTO selected_bytes (a) VALUES ('\0'),('\b'),('\t'),('\r'),('\n'),('\Z'); +INSERT INTO selected_bytes (a) VALUES ('0'),('b'),('t'),('r'),('n'),('Z'); +INSERT INTO selected_bytes (a) VALUES ('\\'),('_'),('%'),(0x22),(0x27); +INSERT INTO selected_bytes (a) VALUES ('a'); +INSERT INTO selected_bytes (a) VALUES +(0x3F), # 7bit +(0x40), # 7bit mbtail +(0x7E), # 7bit mbtail nonascii-8bit +(0x7F), # 7bit nonascii-8bit +(0x80), # mbtail bad-mb +(0x81), # mbhead mbtail +(0x9F), # mbhead mbtail bad-mb +(0xA0), # mbhead mbtail bad-mb +(0xA1), # mbhead mbtail nonascii-8bit +(0xE0), # mbhead mbtai +(0xEF), # mbhead mbtail +(0xF9), # mbhead mbtail +(0xFA), # mbhead mbtail bad-mb +(0xFC), # mbhead mbtail bad-mb +(0xFD), # mbhead mbtail bad-mb +(0xFE), # mbhead mbtial bad-mb +(0xFF); +INSERT INTO allbytes (a) SELECT a FROM bytes; +INSERT INTO allbytes (a) SELECT CONCAT(t1.a,t2.a) FROM selected_bytes t1,selected_bytes t2; +INSERT INTO allbytes (a) SELECT CONCAT(0x5C,t1.a,t2.a) FROM selected_bytes t1,selected_bytes t2; +INSERT INTO allbytes (a) SELECT CONCAT(0x5C,t1.a,0x5C,t2.a) FROM selected_bytes t1,selected_bytes t2; +DROP TABLE selected_bytes; +DELETE FROM allbytes WHERE +OCTET_LENGTH(a)>1 AND +LOCATE(0x5C,a)=0 AND +a NOT LIKE '%\'%' AND + a NOT LIKE '%"%'; +CREATE PROCEDURE p1(val VARBINARY(10)) +BEGIN +DECLARE EXIT HANDLER FOR SQLSTATE '42000' INSERT INTO t1 (a,b) VALUES(val,NULL); +SET @query=CONCAT(_binary"INSERT INTO t1 (a,b) VALUES (0x",HEX(val),",'",val,"')"); +PREPARE stmt FROM @query; +EXECUTE stmt; +DEALLOCATE PREPARE stmt; +END// +CREATE PROCEDURE p2() +BEGIN +DECLARE val VARBINARY(10); +DECLARE done INT DEFAULT FALSE; +DECLARE stmt CURSOR FOR SELECT a FROM allbytes; +DECLARE CONTINUE HANDLER FOR NOT FOUND SET done=TRUE; +OPEN stmt; +read_loop1: LOOP +FETCH stmt INTO val; +IF done THEN +LEAVE read_loop1; +END IF; +CALL p1(val); +END LOOP; +CLOSE stmt; +END// +CREATE FUNCTION iswellformed(a VARBINARY(256)) RETURNS INT RETURN a=BINARY CONVERT(a USING utf8);// +CREATE FUNCTION unescape(a VARBINARY(256)) RETURNS VARBINARY(256) +BEGIN +# We need to do it in a way to avoid producing new escape sequences +# First, enclose all known escsape sequences to '{{xx}}' + # - Backslash not followed by a LIKE pattern characters _ and % +# - Double escapes +# This uses PCRE Branch Reset Groups: (?|(alt1)|(alt2)|(alt3)). +# So '\\1' in the last argument always means the match, no matter +# which alternative it came from. +SET a=REGEXP_REPLACE(a,'(?|(\\\\[^_%])|(\\x{27}\\x{27}))','{{\\1}}'); +# Now unescape all enclosed standard escape sequences +SET a=REPLACE(a,'{{\\0}}', '\0'); +SET a=REPLACE(a,'{{\\b}}', '\b'); +SET a=REPLACE(a,'{{\\t}}', '\t'); +SET a=REPLACE(a,'{{\\r}}', '\r'); +SET a=REPLACE(a,'{{\\n}}', '\n'); +SET a=REPLACE(a,'{{\\Z}}', '\Z'); +SET a=REPLACE(a,'{{\\\'}}', '\''); +# Unescape double quotes +SET a=REPLACE(a,'{{\'\'}}', '\''); + # Unescape the rest: all other \x sequences mean just 'x' + SET a=REGEXP_REPLACE(a, '{{\\\\(.|\\R)}}', '\\1'); + RETURN a; +END// +CREATE FUNCTION unescape_type(a VARBINARY(256),b VARBINARY(256)) RETURNS VARBINARY(256) +BEGIN +RETURN CASE +WHEN b IS NULL THEN '[SyntErr]' + WHEN a=b THEN CASE +WHEN OCTET_LENGTH(a)=1 THEN '[Preserve]' + WHEN a RLIKE '\\\\[_%]' THEN '[Preserve][LIKE]' + WHEN a RLIKE '^[[:ascii:]]+$' THEN '[Preserve][ASCII]' + ELSE '[Preserv][MB]' END +WHEN REPLACE(a,0x5C,'')=b THEN '[Trivial]' + WHEN UNESCAPE(a)=b THEN '[Regular]' + ELSE '[Special]' END; +END// +CREATE FUNCTION wellformedness(a VARBINARY(256), b VARBINARY(256)) +RETURNS VARBINARY(256) +BEGIN +RETURN CASE +WHEN b IS NULL THEN '' + WHEN NOT iswellformed(a) AND iswellformed(b) THEN '[FIXED]' + WHEN iswellformed(a) AND NOT iswellformed(b) THEN '[BROKE]' + WHEN NOT iswellformed(a) AND NOT iswellformed(b) THEN '[ILSEQ]' + ELSE '' + END; +END// +CREATE FUNCTION mysql_real_escape_string_generated(a VARBINARY(256)) +RETURNS VARBINARY(256) +BEGIN +DECLARE a1 BINARY(1) DEFAULT SUBSTR(a,1,1); +DECLARE a2 BINARY(1) DEFAULT SUBSTR(a,2,1); +DECLARE a3 BINARY(1) DEFAULT SUBSTR(a,3,1); +DECLARE a4 BINARY(1) DEFAULT SUBSTR(a,4,1); +DECLARE a2a4 BINARY(2) DEFAULT CONCAT(a2,a4); +RETURN CASE +WHEN (a1=0x5C) AND +(a3=0x5C) AND +(a2>0x7F) AND +(a4 NOT IN ('_','%','0','t','r','n','Z')) AND +iswellformed(a2a4) THEN '[USER]' + ELSE '' + END; +END// +CREATE TABLE t1 (a VARBINARY(10),b VARBINARY(10)); +CALL p2(); +SELECT HEX(a),HEX(b), +CONCAT(unescape_type(a,b), +wellformedness(a,b), +mysql_real_escape_string_generated(a), +IF(UNESCAPE(a)<>b,CONCAT('[BAD',HEX(UNESCAPE(a)),']'),'')) AS comment +FROM t1 ORDER BY LENGTH(a),a; +HEX(a) HEX(b) comment +00 00 [Preserve] +01 01 [Preserve] +02 02 [Preserve] +03 03 [Preserve] +04 04 [Preserve] +05 05 [Preserve] +06 06 [Preserve] +07 07 [Preserve] +08 08 [Preserve] +09 09 [Preserve] +0A 0A [Preserve] +0B 0B [Preserve] +0C 0C [Preserve] +0D 0D [Preserve] +0E 0E [Preserve] +0F 0F [Preserve] +10 10 [Preserve] +11 11 [Preserve] +12 12 [Preserve] +13 13 [Preserve] +14 14 [Preserve] +15 15 [Preserve] +16 16 [Preserve] +17 17 [Preserve] +18 18 [Preserve] +19 19 [Preserve] +1A 1A [Preserve] +1B 1B [Preserve] +1C 1C [Preserve] +1D 1D [Preserve] +1E 1E [Preserve] +1F 1F [Preserve] +20 20 [Preserve] +21 21 [Preserve] +22 22 [Preserve] +23 23 [Preserve] +24 24 [Preserve] +25 25 [Preserve] +26 26 [Preserve] +27 NULL [SyntErr] +28 28 [Preserve] +29 29 [Preserve] +2A 2A [Preserve] +2B 2B [Preserve] +2C 2C [Preserve] +2D 2D [Preserve] +2E 2E [Preserve] +2F 2F [Preserve] +30 30 [Preserve] +31 31 [Preserve] +32 32 [Preserve] +33 33 [Preserve] +34 34 [Preserve] +35 35 [Preserve] +36 36 [Preserve] +37 37 [Preserve] +38 38 [Preserve] +39 39 [Preserve] +3A 3A [Preserve] +3B 3B [Preserve] +3C 3C [Preserve] +3D 3D [Preserve] +3E 3E [Preserve] +3F 3F [Preserve] +40 40 [Preserve] +41 41 [Preserve] +42 42 [Preserve] +43 43 [Preserve] +44 44 [Preserve] +45 45 [Preserve] +46 46 [Preserve] +47 47 [Preserve] +48 48 [Preserve] +49 49 [Preserve] +4A 4A [Preserve] +4B 4B [Preserve] +4C 4C [Preserve] +4D 4D [Preserve] +4E 4E [Preserve] +4F 4F [Preserve] +50 50 [Preserve] +51 51 [Preserve] +52 52 [Preserve] +53 53 [Preserve] +54 54 [Preserve] +55 55 [Preserve] +56 56 [Preserve] +57 57 [Preserve] +58 58 [Preserve] +59 59 [Preserve] +5A 5A [Preserve] +5B 5B [Preserve] +5C NULL [SyntErr] +5D 5D [Preserve] +5E 5E [Preserve] +5F 5F [Preserve] +60 60 [Preserve] +61 61 [Preserve] +62 62 [Preserve] +63 63 [Preserve] +64 64 [Preserve] +65 65 [Preserve] +66 66 [Preserve] +67 67 [Preserve] +68 68 [Preserve] +69 69 [Preserve] +6A 6A [Preserve] +6B 6B [Preserve] +6C 6C [Preserve] +6D 6D [Preserve] +6E 6E [Preserve] +6F 6F [Preserve] +70 70 [Preserve] +71 71 [Preserve] +72 72 [Preserve] +73 73 [Preserve] +74 74 [Preserve] +75 75 [Preserve] +76 76 [Preserve] +77 77 [Preserve] +78 78 [Preserve] +79 79 [Preserve] +7A 7A [Preserve] +7B 7B [Preserve] +7C 7C [Preserve] +7D 7D [Preserve] +7E 7E [Preserve] +7F 7F [Preserve] +80 80 [Preserve][ILSEQ] +81 81 [Preserve][ILSEQ] +82 82 [Preserve][ILSEQ] +83 83 [Preserve][ILSEQ] +84 84 [Preserve][ILSEQ] +85 85 [Preserve][ILSEQ] +86 86 [Preserve][ILSEQ] +87 87 [Preserve][ILSEQ] +88 88 [Preserve][ILSEQ] +89 89 [Preserve][ILSEQ] +8A 8A [Preserve][ILSEQ] +8B 8B [Preserve][ILSEQ] +8C 8C [Preserve][ILSEQ] +8D 8D [Preserve][ILSEQ] +8E 8E [Preserve][ILSEQ] +8F 8F [Preserve][ILSEQ] +90 90 [Preserve][ILSEQ] +91 91 [Preserve][ILSEQ] +92 92 [Preserve][ILSEQ] +93 93 [Preserve][ILSEQ] +94 94 [Preserve][ILSEQ] +95 95 [Preserve][ILSEQ] +96 96 [Preserve][ILSEQ] +97 97 [Preserve][ILSEQ] +98 98 [Preserve][ILSEQ] +99 99 [Preserve][ILSEQ] +9A 9A [Preserve][ILSEQ] +9B 9B [Preserve][ILSEQ] +9C 9C [Preserve][ILSEQ] +9D 9D [Preserve][ILSEQ] +9E 9E [Preserve][ILSEQ] +9F 9F [Preserve][ILSEQ] +A0 A0 [Preserve][ILSEQ] +A1 A1 [Preserve][ILSEQ] +A2 A2 [Preserve][ILSEQ] +A3 A3 [Preserve][ILSEQ] +A4 A4 [Preserve][ILSEQ] +A5 A5 [Preserve][ILSEQ] +A6 A6 [Preserve][ILSEQ] +A7 A7 [Preserve][ILSEQ] +A8 A8 [Preserve][ILSEQ] +A9 A9 [Preserve][ILSEQ] +AA AA [Preserve][ILSEQ] +AB AB [Preserve][ILSEQ] +AC AC [Preserve][ILSEQ] +AD AD [Preserve][ILSEQ] +AE AE [Preserve][ILSEQ] +AF AF [Preserve][ILSEQ] +B0 B0 [Preserve][ILSEQ] +B1 B1 [Preserve][ILSEQ] +B2 B2 [Preserve][ILSEQ] +B3 B3 [Preserve][ILSEQ] +B4 B4 [Preserve][ILSEQ] +B5 B5 [Preserve][ILSEQ] +B6 B6 [Preserve][ILSEQ] +B7 B7 [Preserve][ILSEQ] +B8 B8 [Preserve][ILSEQ] +B9 B9 [Preserve][ILSEQ] +BA BA [Preserve][ILSEQ] +BB BB [Preserve][ILSEQ] +BC BC [Preserve][ILSEQ] +BD BD [Preserve][ILSEQ] +BE BE [Preserve][ILSEQ] +BF BF [Preserve][ILSEQ] +C0 C0 [Preserve][ILSEQ] +C1 C1 [Preserve][ILSEQ] +C2 C2 [Preserve][ILSEQ] +C3 C3 [Preserve][ILSEQ] +C4 C4 [Preserve][ILSEQ] +C5 C5 [Preserve][ILSEQ] +C6 C6 [Preserve][ILSEQ] +C7 C7 [Preserve][ILSEQ] +C8 C8 [Preserve][ILSEQ] +C9 C9 [Preserve][ILSEQ] +CA CA [Preserve][ILSEQ] +CB CB [Preserve][ILSEQ] +CC CC [Preserve][ILSEQ] +CD CD [Preserve][ILSEQ] +CE CE [Preserve][ILSEQ] +CF CF [Preserve][ILSEQ] +D0 D0 [Preserve][ILSEQ] +D1 D1 [Preserve][ILSEQ] +D2 D2 [Preserve][ILSEQ] +D3 D3 [Preserve][ILSEQ] +D4 D4 [Preserve][ILSEQ] +D5 D5 [Preserve][ILSEQ] +D6 D6 [Preserve][ILSEQ] +D7 D7 [Preserve][ILSEQ] +D8 D8 [Preserve][ILSEQ] +D9 D9 [Preserve][ILSEQ] +DA DA [Preserve][ILSEQ] +DB DB [Preserve][ILSEQ] +DC DC [Preserve][ILSEQ] +DD DD [Preserve][ILSEQ] +DE DE [Preserve][ILSEQ] +DF DF [Preserve][ILSEQ] +E0 E0 [Preserve][ILSEQ] +E1 E1 [Preserve][ILSEQ] +E2 E2 [Preserve][ILSEQ] +E3 E3 [Preserve][ILSEQ] +E4 E4 [Preserve][ILSEQ] +E5 E5 [Preserve][ILSEQ] +E6 E6 [Preserve][ILSEQ] +E7 E7 [Preserve][ILSEQ] +E8 E8 [Preserve][ILSEQ] +E9 E9 [Preserve][ILSEQ] +EA EA [Preserve][ILSEQ] +EB EB [Preserve][ILSEQ] +EC EC [Preserve][ILSEQ] +ED ED [Preserve][ILSEQ] +EE EE [Preserve][ILSEQ] +EF EF [Preserve][ILSEQ] +F0 F0 [Preserve][ILSEQ] +F1 F1 [Preserve][ILSEQ] +F2 F2 [Preserve][ILSEQ] +F3 F3 [Preserve][ILSEQ] +F4 F4 [Preserve][ILSEQ] +F5 F5 [Preserve][ILSEQ] +F6 F6 [Preserve][ILSEQ] +F7 F7 [Preserve][ILSEQ] +F8 F8 [Preserve][ILSEQ] +F9 F9 [Preserve][ILSEQ] +FA FA [Preserve][ILSEQ] +FB FB [Preserve][ILSEQ] +FC FC [Preserve][ILSEQ] +FD FD [Preserve][ILSEQ] +FE FE [Preserve][ILSEQ] +FF FF [Preserve][ILSEQ] +0022 0022 [Preserve][ASCII] +0027 NULL [SyntErr] +005C NULL [SyntErr] +0822 0822 [Preserve][ASCII] +0827 NULL [SyntErr] +085C NULL [SyntErr] +0922 0922 [Preserve][ASCII] +0927 NULL [SyntErr] +095C NULL [SyntErr] +0A22 0A22 [Preserve][ASCII] +0A27 NULL [SyntErr] +0A5C NULL [SyntErr] +0D22 0D22 [Preserve][ASCII] +0D27 NULL [SyntErr] +0D5C NULL [SyntErr] +1A22 1A22 [Preserve][ASCII] +1A27 NULL [SyntErr] +1A5C NULL [SyntErr] +2200 2200 [Preserve][ASCII] +2208 2208 [Preserve][ASCII] +2209 2209 [Preserve][ASCII] +220A 220A [Preserve][ASCII] +220D 220D [Preserve][ASCII] +221A 221A [Preserve][ASCII] +2222 2222 [Preserve][ASCII] +2225 2225 [Preserve][ASCII] +2227 NULL [SyntErr] +2230 2230 [Preserve][ASCII] +223F 223F [Preserve][ASCII] +2240 2240 [Preserve][ASCII] +225A 225A [Preserve][ASCII] +225C NULL [SyntErr] +225F 225F [Preserve][ASCII] +2261 2261 [Preserve][ASCII] +2262 2262 [Preserve][ASCII] +226E 226E [Preserve][ASCII] +2272 2272 [Preserve][ASCII] +2274 2274 [Preserve][ASCII] +227E 227E [Preserve][ASCII] +227F 227F [Preserve][ASCII] +2280 2280 [Preserv][MB][ILSEQ] +2281 2281 [Preserv][MB][ILSEQ] +229F 229F [Preserv][MB][ILSEQ] +22A0 22A0 [Preserv][MB][ILSEQ] +22A1 22A1 [Preserv][MB][ILSEQ] +22E0 22E0 [Preserv][MB][ILSEQ] +22EF 22EF [Preserv][MB][ILSEQ] +22F9 22F9 [Preserv][MB][ILSEQ] +22FA 22FA [Preserv][MB][ILSEQ] +22FC 22FC [Preserv][MB][ILSEQ] +22FD 22FD [Preserv][MB][ILSEQ] +22FE 22FE [Preserv][MB][ILSEQ] +22FF 22FF [Preserv][MB][ILSEQ] +2522 2522 [Preserve][ASCII] +2527 NULL [SyntErr] +255C NULL [SyntErr] +2700 NULL [SyntErr] +2708 NULL [SyntErr] +2709 NULL [SyntErr] +270A NULL [SyntErr] +270D NULL [SyntErr] +271A NULL [SyntErr] +2722 NULL [SyntErr] +2725 NULL [SyntErr] +2727 27 [Regular] +2730 NULL [SyntErr] +273F NULL [SyntErr] +2740 NULL [SyntErr] +275A NULL [SyntErr] +275C NULL [SyntErr] +275F NULL [SyntErr] +2761 NULL [SyntErr] +2762 NULL [SyntErr] +276E NULL [SyntErr] +2772 NULL [SyntErr] +2774 NULL [SyntErr] +277E NULL [SyntErr] +277F NULL [SyntErr] +2780 NULL [SyntErr] +2781 NULL [SyntErr] +279F NULL [SyntErr] +27A0 NULL [SyntErr] +27A1 NULL [SyntErr] +27E0 NULL [SyntErr] +27EF NULL [SyntErr] +27F9 NULL [SyntErr] +27FA NULL [SyntErr] +27FC NULL [SyntErr] +27FD NULL [SyntErr] +27FE NULL [SyntErr] +27FF NULL [SyntErr] +3022 3022 [Preserve][ASCII] +3027 NULL [SyntErr] +305C NULL [SyntErr] +3F22 3F22 [Preserve][ASCII] +3F27 NULL [SyntErr] +3F5C NULL [SyntErr] +4022 4022 [Preserve][ASCII] +4027 NULL [SyntErr] +405C NULL [SyntErr] +5A22 5A22 [Preserve][ASCII] +5A27 NULL [SyntErr] +5A5C NULL [SyntErr] +5C00 00 [Trivial] +5C08 08 [Trivial] +5C09 09 [Trivial] +5C0A 0A [Trivial] +5C0D 0D [Trivial] +5C1A 1A [Trivial] +5C22 22 [Trivial] +5C25 5C25 [Preserve][LIKE] +5C27 27 [Trivial] +5C30 00 [Regular] +5C3F 3F [Trivial] +5C40 40 [Trivial] +5C5A 1A [Regular] +5C5C 5C [Regular] +5C5F 5C5F [Preserve][LIKE] +5C61 61 [Trivial] +5C62 08 [Regular] +5C6E 0A [Regular] +5C72 0D [Regular] +5C74 09 [Regular] +5C7E 7E [Trivial] +5C7F 7F [Trivial] +5C80 80 [Trivial][ILSEQ] +5C81 81 [Trivial][ILSEQ] +5C9F 9F [Trivial][ILSEQ] +5CA0 A0 [Trivial][ILSEQ] +5CA1 A1 [Trivial][ILSEQ] +5CE0 E0 [Trivial][ILSEQ] +5CEF EF [Trivial][ILSEQ] +5CF9 F9 [Trivial][ILSEQ] +5CFA FA [Trivial][ILSEQ] +5CFC FC [Trivial][ILSEQ] +5CFD FD [Trivial][ILSEQ] +5CFE FE [Trivial][ILSEQ] +5CFF FF [Trivial][ILSEQ] +5F22 5F22 [Preserve][ASCII] +5F27 NULL [SyntErr] +5F5C NULL [SyntErr] +6122 6122 [Preserve][ASCII] +6127 NULL [SyntErr] +615C NULL [SyntErr] +6222 6222 [Preserve][ASCII] +6227 NULL [SyntErr] +625C NULL [SyntErr] +6E22 6E22 [Preserve][ASCII] +6E27 NULL [SyntErr] +6E5C NULL [SyntErr] +7222 7222 [Preserve][ASCII] +7227 NULL [SyntErr] +725C NULL [SyntErr] +7422 7422 [Preserve][ASCII] +7427 NULL [SyntErr] +745C NULL [SyntErr] +7E22 7E22 [Preserve][ASCII] +7E27 NULL [SyntErr] +7E5C NULL [SyntErr] +7F22 7F22 [Preserve][ASCII] +7F27 NULL [SyntErr] +7F5C NULL [SyntErr] +8022 8022 [Preserv][MB][ILSEQ] +8027 NULL [SyntErr] +805C NULL [SyntErr] +8122 8122 [Preserv][MB][ILSEQ] +8127 NULL [SyntErr] +815C NULL [SyntErr] +9F22 9F22 [Preserv][MB][ILSEQ] +9F27 NULL [SyntErr] +9F5C NULL [SyntErr] +A022 A022 [Preserv][MB][ILSEQ] +A027 NULL [SyntErr] +A05C NULL [SyntErr] +A122 A122 [Preserv][MB][ILSEQ] +A127 NULL [SyntErr] +A15C NULL [SyntErr] +E022 E022 [Preserv][MB][ILSEQ] +E027 NULL [SyntErr] +E05C NULL [SyntErr] +EF22 EF22 [Preserv][MB][ILSEQ] +EF27 NULL [SyntErr] +EF5C NULL [SyntErr] +F922 F922 [Preserv][MB][ILSEQ] +F927 NULL [SyntErr] +F95C NULL [SyntErr] +FA22 FA22 [Preserv][MB][ILSEQ] +FA27 NULL [SyntErr] +FA5C NULL [SyntErr] +FC22 FC22 [Preserv][MB][ILSEQ] +FC27 NULL [SyntErr] +FC5C NULL [SyntErr] +FD22 FD22 [Preserv][MB][ILSEQ] +FD27 NULL [SyntErr] +FD5C NULL [SyntErr] +FE22 FE22 [Preserv][MB][ILSEQ] +FE27 NULL [SyntErr] +FE5C NULL [SyntErr] +FF22 FF22 [Preserv][MB][ILSEQ] +FF27 NULL [SyntErr] +FF5C NULL [SyntErr] +5C0000 0000 [Trivial] +5C0008 0008 [Trivial] +5C0009 0009 [Trivial] +5C000A 000A [Trivial] +5C000D 000D [Trivial] +5C001A 001A [Trivial] +5C0022 0022 [Trivial] +5C0025 0025 [Trivial] +5C0027 NULL [SyntErr] +5C0030 0030 [Trivial] +5C003F 003F [Trivial] +5C0040 0040 [Trivial] +5C005A 005A [Trivial] +5C005C NULL [SyntErr] +5C005F 005F [Trivial] +5C0061 0061 [Trivial] +5C0062 0062 [Trivial] +5C006E 006E [Trivial] +5C0072 0072 [Trivial] +5C0074 0074 [Trivial] +5C007E 007E [Trivial] +5C007F 007F [Trivial] +5C0080 0080 [Trivial][ILSEQ] +5C0081 0081 [Trivial][ILSEQ] +5C009F 009F [Trivial][ILSEQ] +5C00A0 00A0 [Trivial][ILSEQ] +5C00A1 00A1 [Trivial][ILSEQ] +5C00E0 00E0 [Trivial][ILSEQ] +5C00EF 00EF [Trivial][ILSEQ] +5C00F9 00F9 [Trivial][ILSEQ] +5C00FA 00FA [Trivial][ILSEQ] +5C00FC 00FC [Trivial][ILSEQ] +5C00FD 00FD [Trivial][ILSEQ] +5C00FE 00FE [Trivial][ILSEQ] +5C00FF 00FF [Trivial][ILSEQ] +5C0800 0800 [Trivial] +5C0808 0808 [Trivial] +5C0809 0809 [Trivial] +5C080A 080A [Trivial] +5C080D 080D [Trivial] +5C081A 081A [Trivial] +5C0822 0822 [Trivial] +5C0825 0825 [Trivial] +5C0827 NULL [SyntErr] +5C0830 0830 [Trivial] +5C083F 083F [Trivial] +5C0840 0840 [Trivial] +5C085A 085A [Trivial] +5C085C NULL [SyntErr] +5C085F 085F [Trivial] +5C0861 0861 [Trivial] +5C0862 0862 [Trivial] +5C086E 086E [Trivial] +5C0872 0872 [Trivial] +5C0874 0874 [Trivial] +5C087E 087E [Trivial] +5C087F 087F [Trivial] +5C0880 0880 [Trivial][ILSEQ] +5C0881 0881 [Trivial][ILSEQ] +5C089F 089F [Trivial][ILSEQ] +5C08A0 08A0 [Trivial][ILSEQ] +5C08A1 08A1 [Trivial][ILSEQ] +5C08E0 08E0 [Trivial][ILSEQ] +5C08EF 08EF [Trivial][ILSEQ] +5C08F9 08F9 [Trivial][ILSEQ] +5C08FA 08FA [Trivial][ILSEQ] +5C08FC 08FC [Trivial][ILSEQ] +5C08FD 08FD [Trivial][ILSEQ] +5C08FE 08FE [Trivial][ILSEQ] +5C08FF 08FF [Trivial][ILSEQ] +5C0900 0900 [Trivial] +5C0908 0908 [Trivial] +5C0909 0909 [Trivial] +5C090A 090A [Trivial] +5C090D 090D [Trivial] +5C091A 091A [Trivial] +5C0922 0922 [Trivial] +5C0925 0925 [Trivial] +5C0927 NULL [SyntErr] +5C0930 0930 [Trivial] +5C093F 093F [Trivial] +5C0940 0940 [Trivial] +5C095A 095A [Trivial] +5C095C NULL [SyntErr] +5C095F 095F [Trivial] +5C0961 0961 [Trivial] +5C0962 0962 [Trivial] +5C096E 096E [Trivial] +5C0972 0972 [Trivial] +5C0974 0974 [Trivial] +5C097E 097E [Trivial] +5C097F 097F [Trivial] +5C0980 0980 [Trivial][ILSEQ] +5C0981 0981 [Trivial][ILSEQ] +5C099F 099F [Trivial][ILSEQ] +5C09A0 09A0 [Trivial][ILSEQ] +5C09A1 09A1 [Trivial][ILSEQ] +5C09E0 09E0 [Trivial][ILSEQ] +5C09EF 09EF [Trivial][ILSEQ] +5C09F9 09F9 [Trivial][ILSEQ] +5C09FA 09FA [Trivial][ILSEQ] +5C09FC 09FC [Trivial][ILSEQ] +5C09FD 09FD [Trivial][ILSEQ] +5C09FE 09FE [Trivial][ILSEQ] +5C09FF 09FF [Trivial][ILSEQ] +5C0A00 0A00 [Trivial] +5C0A08 0A08 [Trivial] +5C0A09 0A09 [Trivial] +5C0A0A 0A0A [Trivial] +5C0A0D 0A0D [Trivial] +5C0A1A 0A1A [Trivial] +5C0A22 0A22 [Trivial] +5C0A25 0A25 [Trivial] +5C0A27 NULL [SyntErr] +5C0A30 0A30 [Trivial] +5C0A3F 0A3F [Trivial] +5C0A40 0A40 [Trivial] +5C0A5A 0A5A [Trivial] +5C0A5C NULL [SyntErr] +5C0A5F 0A5F [Trivial] +5C0A61 0A61 [Trivial] +5C0A62 0A62 [Trivial] +5C0A6E 0A6E [Trivial] +5C0A72 0A72 [Trivial] +5C0A74 0A74 [Trivial] +5C0A7E 0A7E [Trivial] +5C0A7F 0A7F [Trivial] +5C0A80 0A80 [Trivial][ILSEQ] +5C0A81 0A81 [Trivial][ILSEQ] +5C0A9F 0A9F [Trivial][ILSEQ] +5C0AA0 0AA0 [Trivial][ILSEQ] +5C0AA1 0AA1 [Trivial][ILSEQ] +5C0AE0 0AE0 [Trivial][ILSEQ] +5C0AEF 0AEF [Trivial][ILSEQ] +5C0AF9 0AF9 [Trivial][ILSEQ] +5C0AFA 0AFA [Trivial][ILSEQ] +5C0AFC 0AFC [Trivial][ILSEQ] +5C0AFD 0AFD [Trivial][ILSEQ] +5C0AFE 0AFE [Trivial][ILSEQ] +5C0AFF 0AFF [Trivial][ILSEQ] +5C0D00 0D00 [Trivial] +5C0D08 0D08 [Trivial] +5C0D09 0D09 [Trivial] +5C0D0A 0D0A [Trivial] +5C0D0D 0D0D [Trivial] +5C0D1A 0D1A [Trivial] +5C0D22 0D22 [Trivial] +5C0D25 0D25 [Trivial] +5C0D27 NULL [SyntErr] +5C0D30 0D30 [Trivial] +5C0D3F 0D3F [Trivial] +5C0D40 0D40 [Trivial] +5C0D5A 0D5A [Trivial] +5C0D5C NULL [SyntErr] +5C0D5F 0D5F [Trivial] +5C0D61 0D61 [Trivial] +5C0D62 0D62 [Trivial] +5C0D6E 0D6E [Trivial] +5C0D72 0D72 [Trivial] +5C0D74 0D74 [Trivial] +5C0D7E 0D7E [Trivial] +5C0D7F 0D7F [Trivial] +5C0D80 0D80 [Trivial][ILSEQ] +5C0D81 0D81 [Trivial][ILSEQ] +5C0D9F 0D9F [Trivial][ILSEQ] +5C0DA0 0DA0 [Trivial][ILSEQ] +5C0DA1 0DA1 [Trivial][ILSEQ] +5C0DE0 0DE0 [Trivial][ILSEQ] +5C0DEF 0DEF [Trivial][ILSEQ] +5C0DF9 0DF9 [Trivial][ILSEQ] +5C0DFA 0DFA [Trivial][ILSEQ] +5C0DFC 0DFC [Trivial][ILSEQ] +5C0DFD 0DFD [Trivial][ILSEQ] +5C0DFE 0DFE [Trivial][ILSEQ] +5C0DFF 0DFF [Trivial][ILSEQ] +5C1A00 1A00 [Trivial] +5C1A08 1A08 [Trivial] +5C1A09 1A09 [Trivial] +5C1A0A 1A0A [Trivial] +5C1A0D 1A0D [Trivial] +5C1A1A 1A1A [Trivial] +5C1A22 1A22 [Trivial] +5C1A25 1A25 [Trivial] +5C1A27 NULL [SyntErr] +5C1A30 1A30 [Trivial] +5C1A3F 1A3F [Trivial] +5C1A40 1A40 [Trivial] +5C1A5A 1A5A [Trivial] +5C1A5C NULL [SyntErr] +5C1A5F 1A5F [Trivial] +5C1A61 1A61 [Trivial] +5C1A62 1A62 [Trivial] +5C1A6E 1A6E [Trivial] +5C1A72 1A72 [Trivial] +5C1A74 1A74 [Trivial] +5C1A7E 1A7E [Trivial] +5C1A7F 1A7F [Trivial] +5C1A80 1A80 [Trivial][ILSEQ] +5C1A81 1A81 [Trivial][ILSEQ] +5C1A9F 1A9F [Trivial][ILSEQ] +5C1AA0 1AA0 [Trivial][ILSEQ] +5C1AA1 1AA1 [Trivial][ILSEQ] +5C1AE0 1AE0 [Trivial][ILSEQ] +5C1AEF 1AEF [Trivial][ILSEQ] +5C1AF9 1AF9 [Trivial][ILSEQ] +5C1AFA 1AFA [Trivial][ILSEQ] +5C1AFC 1AFC [Trivial][ILSEQ] +5C1AFD 1AFD [Trivial][ILSEQ] +5C1AFE 1AFE [Trivial][ILSEQ] +5C1AFF 1AFF [Trivial][ILSEQ] +5C2200 2200 [Trivial] +5C2208 2208 [Trivial] +5C2209 2209 [Trivial] +5C220A 220A [Trivial] +5C220D 220D [Trivial] +5C221A 221A [Trivial] +5C2222 2222 [Trivial] +5C2225 2225 [Trivial] +5C2227 NULL [SyntErr] +5C2230 2230 [Trivial] +5C223F 223F [Trivial] +5C2240 2240 [Trivial] +5C225A 225A [Trivial] +5C225C NULL [SyntErr] +5C225F 225F [Trivial] +5C2261 2261 [Trivial] +5C2262 2262 [Trivial] +5C226E 226E [Trivial] +5C2272 2272 [Trivial] +5C2274 2274 [Trivial] +5C227E 227E [Trivial] +5C227F 227F [Trivial] +5C2280 2280 [Trivial][ILSEQ] +5C2281 2281 [Trivial][ILSEQ] +5C229F 229F [Trivial][ILSEQ] +5C22A0 22A0 [Trivial][ILSEQ] +5C22A1 22A1 [Trivial][ILSEQ] +5C22E0 22E0 [Trivial][ILSEQ] +5C22EF 22EF [Trivial][ILSEQ] +5C22F9 22F9 [Trivial][ILSEQ] +5C22FA 22FA [Trivial][ILSEQ] +5C22FC 22FC [Trivial][ILSEQ] +5C22FD 22FD [Trivial][ILSEQ] +5C22FE 22FE [Trivial][ILSEQ] +5C22FF 22FF [Trivial][ILSEQ] +5C2500 5C2500 [Preserve][LIKE] +5C2508 5C2508 [Preserve][LIKE] +5C2509 5C2509 [Preserve][LIKE] +5C250A 5C250A [Preserve][LIKE] +5C250D 5C250D [Preserve][LIKE] +5C251A 5C251A [Preserve][LIKE] +5C2522 5C2522 [Preserve][LIKE] +5C2525 5C2525 [Preserve][LIKE] +5C2527 NULL [SyntErr] +5C2530 5C2530 [Preserve][LIKE] +5C253F 5C253F [Preserve][LIKE] +5C2540 5C2540 [Preserve][LIKE] +5C255A 5C255A [Preserve][LIKE] +5C255C NULL [SyntErr] +5C255F 5C255F [Preserve][LIKE] +5C2561 5C2561 [Preserve][LIKE] +5C2562 5C2562 [Preserve][LIKE] +5C256E 5C256E [Preserve][LIKE] +5C2572 5C2572 [Preserve][LIKE] +5C2574 5C2574 [Preserve][LIKE] +5C257E 5C257E [Preserve][LIKE] +5C257F 5C257F [Preserve][LIKE] +5C2580 5C2580 [Preserve][LIKE][ILSEQ] +5C2581 5C2581 [Preserve][LIKE][ILSEQ] +5C259F 5C259F [Preserve][LIKE][ILSEQ] +5C25A0 5C25A0 [Preserve][LIKE][ILSEQ] +5C25A1 5C25A1 [Preserve][LIKE][ILSEQ] +5C25E0 5C25E0 [Preserve][LIKE][ILSEQ] +5C25EF 5C25EF [Preserve][LIKE][ILSEQ] +5C25F9 5C25F9 [Preserve][LIKE][ILSEQ] +5C25FA 5C25FA [Preserve][LIKE][ILSEQ] +5C25FC 5C25FC [Preserve][LIKE][ILSEQ] +5C25FD 5C25FD [Preserve][LIKE][ILSEQ] +5C25FE 5C25FE [Preserve][LIKE][ILSEQ] +5C25FF 5C25FF [Preserve][LIKE][ILSEQ] +5C2700 2700 [Trivial] +5C2708 2708 [Trivial] +5C2709 2709 [Trivial] +5C270A 270A [Trivial] +5C270D 270D [Trivial] +5C271A 271A [Trivial] +5C2722 2722 [Trivial] +5C2725 2725 [Trivial] +5C2727 NULL [SyntErr] +5C2730 2730 [Trivial] +5C273F 273F [Trivial] +5C2740 2740 [Trivial] +5C275A 275A [Trivial] +5C275C NULL [SyntErr] +5C275F 275F [Trivial] +5C2761 2761 [Trivial] +5C2762 2762 [Trivial] +5C276E 276E [Trivial] +5C2772 2772 [Trivial] +5C2774 2774 [Trivial] +5C277E 277E [Trivial] +5C277F 277F [Trivial] +5C2780 2780 [Trivial][ILSEQ] +5C2781 2781 [Trivial][ILSEQ] +5C279F 279F [Trivial][ILSEQ] +5C27A0 27A0 [Trivial][ILSEQ] +5C27A1 27A1 [Trivial][ILSEQ] +5C27E0 27E0 [Trivial][ILSEQ] +5C27EF 27EF [Trivial][ILSEQ] +5C27F9 27F9 [Trivial][ILSEQ] +5C27FA 27FA [Trivial][ILSEQ] +5C27FC 27FC [Trivial][ILSEQ] +5C27FD 27FD [Trivial][ILSEQ] +5C27FE 27FE [Trivial][ILSEQ] +5C27FF 27FF [Trivial][ILSEQ] +5C3000 0000 [Regular] +5C3008 0008 [Regular] +5C3009 0009 [Regular] +5C300A 000A [Regular] +5C300D 000D [Regular] +5C301A 001A [Regular] +5C3022 0022 [Regular] +5C3025 0025 [Regular] +5C3027 NULL [SyntErr] +5C3030 0030 [Regular] +5C303F 003F [Regular] +5C3040 0040 [Regular] +5C305A 005A [Regular] +5C305C NULL [SyntErr] +5C305F 005F [Regular] +5C3061 0061 [Regular] +5C3062 0062 [Regular] +5C306E 006E [Regular] +5C3072 0072 [Regular] +5C3074 0074 [Regular] +5C307E 007E [Regular] +5C307F 007F [Regular] +5C3080 0080 [Regular][ILSEQ] +5C3081 0081 [Regular][ILSEQ] +5C309F 009F [Regular][ILSEQ] +5C30A0 00A0 [Regular][ILSEQ] +5C30A1 00A1 [Regular][ILSEQ] +5C30E0 00E0 [Regular][ILSEQ] +5C30EF 00EF [Regular][ILSEQ] +5C30F9 00F9 [Regular][ILSEQ] +5C30FA 00FA [Regular][ILSEQ] +5C30FC 00FC [Regular][ILSEQ] +5C30FD 00FD [Regular][ILSEQ] +5C30FE 00FE [Regular][ILSEQ] +5C30FF 00FF [Regular][ILSEQ] +5C3F00 3F00 [Trivial] +5C3F08 3F08 [Trivial] +5C3F09 3F09 [Trivial] +5C3F0A 3F0A [Trivial] +5C3F0D 3F0D [Trivial] +5C3F1A 3F1A [Trivial] +5C3F22 3F22 [Trivial] +5C3F25 3F25 [Trivial] +5C3F27 NULL [SyntErr] +5C3F30 3F30 [Trivial] +5C3F3F 3F3F [Trivial] +5C3F40 3F40 [Trivial] +5C3F5A 3F5A [Trivial] +5C3F5C NULL [SyntErr] +5C3F5F 3F5F [Trivial] +5C3F61 3F61 [Trivial] +5C3F62 3F62 [Trivial] +5C3F6E 3F6E [Trivial] +5C3F72 3F72 [Trivial] +5C3F74 3F74 [Trivial] +5C3F7E 3F7E [Trivial] +5C3F7F 3F7F [Trivial] +5C3F80 3F80 [Trivial][ILSEQ] +5C3F81 3F81 [Trivial][ILSEQ] +5C3F9F 3F9F [Trivial][ILSEQ] +5C3FA0 3FA0 [Trivial][ILSEQ] +5C3FA1 3FA1 [Trivial][ILSEQ] +5C3FE0 3FE0 [Trivial][ILSEQ] +5C3FEF 3FEF [Trivial][ILSEQ] +5C3FF9 3FF9 [Trivial][ILSEQ] +5C3FFA 3FFA [Trivial][ILSEQ] +5C3FFC 3FFC [Trivial][ILSEQ] +5C3FFD 3FFD [Trivial][ILSEQ] +5C3FFE 3FFE [Trivial][ILSEQ] +5C3FFF 3FFF [Trivial][ILSEQ] +5C4000 4000 [Trivial] +5C4008 4008 [Trivial] +5C4009 4009 [Trivial] +5C400A 400A [Trivial] +5C400D 400D [Trivial] +5C401A 401A [Trivial] +5C4022 4022 [Trivial] +5C4025 4025 [Trivial] +5C4027 NULL [SyntErr] +5C4030 4030 [Trivial] +5C403F 403F [Trivial] +5C4040 4040 [Trivial] +5C405A 405A [Trivial] +5C405C NULL [SyntErr] +5C405F 405F [Trivial] +5C4061 4061 [Trivial] +5C4062 4062 [Trivial] +5C406E 406E [Trivial] +5C4072 4072 [Trivial] +5C4074 4074 [Trivial] +5C407E 407E [Trivial] +5C407F 407F [Trivial] +5C4080 4080 [Trivial][ILSEQ] +5C4081 4081 [Trivial][ILSEQ] +5C409F 409F [Trivial][ILSEQ] +5C40A0 40A0 [Trivial][ILSEQ] +5C40A1 40A1 [Trivial][ILSEQ] +5C40E0 40E0 [Trivial][ILSEQ] +5C40EF 40EF [Trivial][ILSEQ] +5C40F9 40F9 [Trivial][ILSEQ] +5C40FA 40FA [Trivial][ILSEQ] +5C40FC 40FC [Trivial][ILSEQ] +5C40FD 40FD [Trivial][ILSEQ] +5C40FE 40FE [Trivial][ILSEQ] +5C40FF 40FF [Trivial][ILSEQ] +5C5A00 1A00 [Regular] +5C5A08 1A08 [Regular] +5C5A09 1A09 [Regular] +5C5A0A 1A0A [Regular] +5C5A0D 1A0D [Regular] +5C5A1A 1A1A [Regular] +5C5A22 1A22 [Regular] +5C5A25 1A25 [Regular] +5C5A27 NULL [SyntErr] +5C5A30 1A30 [Regular] +5C5A3F 1A3F [Regular] +5C5A40 1A40 [Regular] +5C5A5A 1A5A [Regular] +5C5A5C NULL [SyntErr] +5C5A5F 1A5F [Regular] +5C5A61 1A61 [Regular] +5C5A62 1A62 [Regular] +5C5A6E 1A6E [Regular] +5C5A72 1A72 [Regular] +5C5A74 1A74 [Regular] +5C5A7E 1A7E [Regular] +5C5A7F 1A7F [Regular] +5C5A80 1A80 [Regular][ILSEQ] +5C5A81 1A81 [Regular][ILSEQ] +5C5A9F 1A9F [Regular][ILSEQ] +5C5AA0 1AA0 [Regular][ILSEQ] +5C5AA1 1AA1 [Regular][ILSEQ] +5C5AE0 1AE0 [Regular][ILSEQ] +5C5AEF 1AEF [Regular][ILSEQ] +5C5AF9 1AF9 [Regular][ILSEQ] +5C5AFA 1AFA [Regular][ILSEQ] +5C5AFC 1AFC [Regular][ILSEQ] +5C5AFD 1AFD [Regular][ILSEQ] +5C5AFE 1AFE [Regular][ILSEQ] +5C5AFF 1AFF [Regular][ILSEQ] +5C5C00 5C00 [Regular] +5C5C08 5C08 [Regular] +5C5C09 5C09 [Regular] +5C5C0A 5C0A [Regular] +5C5C0D 5C0D [Regular] +5C5C1A 5C1A [Regular] +5C5C22 5C22 [Regular] +5C5C25 5C25 [Regular] +5C5C27 NULL [SyntErr] +5C5C30 5C30 [Regular] +5C5C3F 5C3F [Regular] +5C5C40 5C40 [Regular] +5C5C5A 5C5A [Regular] +5C5C5C NULL [SyntErr] +5C5C5F 5C5F [Regular] +5C5C61 5C61 [Regular] +5C5C62 5C62 [Regular] +5C5C6E 5C6E [Regular] +5C5C72 5C72 [Regular] +5C5C74 5C74 [Regular] +5C5C7E 5C7E [Regular] +5C5C7F 5C7F [Regular] +5C5C80 5C80 [Regular][ILSEQ] +5C5C81 5C81 [Regular][ILSEQ] +5C5C9F 5C9F [Regular][ILSEQ] +5C5CA0 5CA0 [Regular][ILSEQ] +5C5CA1 5CA1 [Regular][ILSEQ] +5C5CE0 5CE0 [Regular][ILSEQ] +5C5CEF 5CEF [Regular][ILSEQ] +5C5CF9 5CF9 [Regular][ILSEQ] +5C5CFA 5CFA [Regular][ILSEQ] +5C5CFC 5CFC [Regular][ILSEQ] +5C5CFD 5CFD [Regular][ILSEQ] +5C5CFE 5CFE [Regular][ILSEQ] +5C5CFF 5CFF [Regular][ILSEQ] +5C5F00 5C5F00 [Preserve][LIKE] +5C5F08 5C5F08 [Preserve][LIKE] +5C5F09 5C5F09 [Preserve][LIKE] +5C5F0A 5C5F0A [Preserve][LIKE] +5C5F0D 5C5F0D [Preserve][LIKE] +5C5F1A 5C5F1A [Preserve][LIKE] +5C5F22 5C5F22 [Preserve][LIKE] +5C5F25 5C5F25 [Preserve][LIKE] +5C5F27 NULL [SyntErr] +5C5F30 5C5F30 [Preserve][LIKE] +5C5F3F 5C5F3F [Preserve][LIKE] +5C5F40 5C5F40 [Preserve][LIKE] +5C5F5A 5C5F5A [Preserve][LIKE] +5C5F5C NULL [SyntErr] +5C5F5F 5C5F5F [Preserve][LIKE] +5C5F61 5C5F61 [Preserve][LIKE] +5C5F62 5C5F62 [Preserve][LIKE] +5C5F6E 5C5F6E [Preserve][LIKE] +5C5F72 5C5F72 [Preserve][LIKE] +5C5F74 5C5F74 [Preserve][LIKE] +5C5F7E 5C5F7E [Preserve][LIKE] +5C5F7F 5C5F7F [Preserve][LIKE] +5C5F80 5C5F80 [Preserve][LIKE][ILSEQ] +5C5F81 5C5F81 [Preserve][LIKE][ILSEQ] +5C5F9F 5C5F9F [Preserve][LIKE][ILSEQ] +5C5FA0 5C5FA0 [Preserve][LIKE][ILSEQ] +5C5FA1 5C5FA1 [Preserve][LIKE][ILSEQ] +5C5FE0 5C5FE0 [Preserve][LIKE][ILSEQ] +5C5FEF 5C5FEF [Preserve][LIKE][ILSEQ] +5C5FF9 5C5FF9 [Preserve][LIKE][ILSEQ] +5C5FFA 5C5FFA [Preserve][LIKE][ILSEQ] +5C5FFC 5C5FFC [Preserve][LIKE][ILSEQ] +5C5FFD 5C5FFD [Preserve][LIKE][ILSEQ] +5C5FFE 5C5FFE [Preserve][LIKE][ILSEQ] +5C5FFF 5C5FFF [Preserve][LIKE][ILSEQ] +5C6100 6100 [Trivial] +5C6108 6108 [Trivial] +5C6109 6109 [Trivial] +5C610A 610A [Trivial] +5C610D 610D [Trivial] +5C611A 611A [Trivial] +5C6122 6122 [Trivial] +5C6125 6125 [Trivial] +5C6127 NULL [SyntErr] +5C6130 6130 [Trivial] +5C613F 613F [Trivial] +5C6140 6140 [Trivial] +5C615A 615A [Trivial] +5C615C NULL [SyntErr] +5C615F 615F [Trivial] +5C6161 6161 [Trivial] +5C6162 6162 [Trivial] +5C616E 616E [Trivial] +5C6172 6172 [Trivial] +5C6174 6174 [Trivial] +5C617E 617E [Trivial] +5C617F 617F [Trivial] +5C6180 6180 [Trivial][ILSEQ] +5C6181 6181 [Trivial][ILSEQ] +5C619F 619F [Trivial][ILSEQ] +5C61A0 61A0 [Trivial][ILSEQ] +5C61A1 61A1 [Trivial][ILSEQ] +5C61E0 61E0 [Trivial][ILSEQ] +5C61EF 61EF [Trivial][ILSEQ] +5C61F9 61F9 [Trivial][ILSEQ] +5C61FA 61FA [Trivial][ILSEQ] +5C61FC 61FC [Trivial][ILSEQ] +5C61FD 61FD [Trivial][ILSEQ] +5C61FE 61FE [Trivial][ILSEQ] +5C61FF 61FF [Trivial][ILSEQ] +5C6200 0800 [Regular] +5C6208 0808 [Regular] +5C6209 0809 [Regular] +5C620A 080A [Regular] +5C620D 080D [Regular] +5C621A 081A [Regular] +5C6222 0822 [Regular] +5C6225 0825 [Regular] +5C6227 NULL [SyntErr] +5C6230 0830 [Regular] +5C623F 083F [Regular] +5C6240 0840 [Regular] +5C625A 085A [Regular] +5C625C NULL [SyntErr] +5C625F 085F [Regular] +5C6261 0861 [Regular] +5C6262 0862 [Regular] +5C626E 086E [Regular] +5C6272 0872 [Regular] +5C6274 0874 [Regular] +5C627E 087E [Regular] +5C627F 087F [Regular] +5C6280 0880 [Regular][ILSEQ] +5C6281 0881 [Regular][ILSEQ] +5C629F 089F [Regular][ILSEQ] +5C62A0 08A0 [Regular][ILSEQ] +5C62A1 08A1 [Regular][ILSEQ] +5C62E0 08E0 [Regular][ILSEQ] +5C62EF 08EF [Regular][ILSEQ] +5C62F9 08F9 [Regular][ILSEQ] +5C62FA 08FA [Regular][ILSEQ] +5C62FC 08FC [Regular][ILSEQ] +5C62FD 08FD [Regular][ILSEQ] +5C62FE 08FE [Regular][ILSEQ] +5C62FF 08FF [Regular][ILSEQ] +5C6E00 0A00 [Regular] +5C6E08 0A08 [Regular] +5C6E09 0A09 [Regular] +5C6E0A 0A0A [Regular] +5C6E0D 0A0D [Regular] +5C6E1A 0A1A [Regular] +5C6E22 0A22 [Regular] +5C6E25 0A25 [Regular] +5C6E27 NULL [SyntErr] +5C6E30 0A30 [Regular] +5C6E3F 0A3F [Regular] +5C6E40 0A40 [Regular] +5C6E5A 0A5A [Regular] +5C6E5C NULL [SyntErr] +5C6E5F 0A5F [Regular] +5C6E61 0A61 [Regular] +5C6E62 0A62 [Regular] +5C6E6E 0A6E [Regular] +5C6E72 0A72 [Regular] +5C6E74 0A74 [Regular] +5C6E7E 0A7E [Regular] +5C6E7F 0A7F [Regular] +5C6E80 0A80 [Regular][ILSEQ] +5C6E81 0A81 [Regular][ILSEQ] +5C6E9F 0A9F [Regular][ILSEQ] +5C6EA0 0AA0 [Regular][ILSEQ] +5C6EA1 0AA1 [Regular][ILSEQ] +5C6EE0 0AE0 [Regular][ILSEQ] +5C6EEF 0AEF [Regular][ILSEQ] +5C6EF9 0AF9 [Regular][ILSEQ] +5C6EFA 0AFA [Regular][ILSEQ] +5C6EFC 0AFC [Regular][ILSEQ] +5C6EFD 0AFD [Regular][ILSEQ] +5C6EFE 0AFE [Regular][ILSEQ] +5C6EFF 0AFF [Regular][ILSEQ] +5C7200 0D00 [Regular] +5C7208 0D08 [Regular] +5C7209 0D09 [Regular] +5C720A 0D0A [Regular] +5C720D 0D0D [Regular] +5C721A 0D1A [Regular] +5C7222 0D22 [Regular] +5C7225 0D25 [Regular] +5C7227 NULL [SyntErr] +5C7230 0D30 [Regular] +5C723F 0D3F [Regular] +5C7240 0D40 [Regular] +5C725A 0D5A [Regular] +5C725C NULL [SyntErr] +5C725F 0D5F [Regular] +5C7261 0D61 [Regular] +5C7262 0D62 [Regular] +5C726E 0D6E [Regular] +5C7272 0D72 [Regular] +5C7274 0D74 [Regular] +5C727E 0D7E [Regular] +5C727F 0D7F [Regular] +5C7280 0D80 [Regular][ILSEQ] +5C7281 0D81 [Regular][ILSEQ] +5C729F 0D9F [Regular][ILSEQ] +5C72A0 0DA0 [Regular][ILSEQ] +5C72A1 0DA1 [Regular][ILSEQ] +5C72E0 0DE0 [Regular][ILSEQ] +5C72EF 0DEF [Regular][ILSEQ] +5C72F9 0DF9 [Regular][ILSEQ] +5C72FA 0DFA [Regular][ILSEQ] +5C72FC 0DFC [Regular][ILSEQ] +5C72FD 0DFD [Regular][ILSEQ] +5C72FE 0DFE [Regular][ILSEQ] +5C72FF 0DFF [Regular][ILSEQ] +5C7400 0900 [Regular] +5C7408 0908 [Regular] +5C7409 0909 [Regular] +5C740A 090A [Regular] +5C740D 090D [Regular] +5C741A 091A [Regular] +5C7422 0922 [Regular] +5C7425 0925 [Regular] +5C7427 NULL [SyntErr] +5C7430 0930 [Regular] +5C743F 093F [Regular] +5C7440 0940 [Regular] +5C745A 095A [Regular] +5C745C NULL [SyntErr] +5C745F 095F [Regular] +5C7461 0961 [Regular] +5C7462 0962 [Regular] +5C746E 096E [Regular] +5C7472 0972 [Regular] +5C7474 0974 [Regular] +5C747E 097E [Regular] +5C747F 097F [Regular] +5C7480 0980 [Regular][ILSEQ] +5C7481 0981 [Regular][ILSEQ] +5C749F 099F [Regular][ILSEQ] +5C74A0 09A0 [Regular][ILSEQ] +5C74A1 09A1 [Regular][ILSEQ] +5C74E0 09E0 [Regular][ILSEQ] +5C74EF 09EF [Regular][ILSEQ] +5C74F9 09F9 [Regular][ILSEQ] +5C74FA 09FA [Regular][ILSEQ] +5C74FC 09FC [Regular][ILSEQ] +5C74FD 09FD [Regular][ILSEQ] +5C74FE 09FE [Regular][ILSEQ] +5C74FF 09FF [Regular][ILSEQ] +5C7E00 7E00 [Trivial] +5C7E08 7E08 [Trivial] +5C7E09 7E09 [Trivial] +5C7E0A 7E0A [Trivial] +5C7E0D 7E0D [Trivial] +5C7E1A 7E1A [Trivial] +5C7E22 7E22 [Trivial] +5C7E25 7E25 [Trivial] +5C7E27 NULL [SyntErr] +5C7E30 7E30 [Trivial] +5C7E3F 7E3F [Trivial] +5C7E40 7E40 [Trivial] +5C7E5A 7E5A [Trivial] +5C7E5C NULL [SyntErr] +5C7E5F 7E5F [Trivial] +5C7E61 7E61 [Trivial] +5C7E62 7E62 [Trivial] +5C7E6E 7E6E [Trivial] +5C7E72 7E72 [Trivial] +5C7E74 7E74 [Trivial] +5C7E7E 7E7E [Trivial] +5C7E7F 7E7F [Trivial] +5C7E80 7E80 [Trivial][ILSEQ] +5C7E81 7E81 [Trivial][ILSEQ] +5C7E9F 7E9F [Trivial][ILSEQ] +5C7EA0 7EA0 [Trivial][ILSEQ] +5C7EA1 7EA1 [Trivial][ILSEQ] +5C7EE0 7EE0 [Trivial][ILSEQ] +5C7EEF 7EEF [Trivial][ILSEQ] +5C7EF9 7EF9 [Trivial][ILSEQ] +5C7EFA 7EFA [Trivial][ILSEQ] +5C7EFC 7EFC [Trivial][ILSEQ] +5C7EFD 7EFD [Trivial][ILSEQ] +5C7EFE 7EFE [Trivial][ILSEQ] +5C7EFF 7EFF [Trivial][ILSEQ] +5C7F00 7F00 [Trivial] +5C7F08 7F08 [Trivial] +5C7F09 7F09 [Trivial] +5C7F0A 7F0A [Trivial] +5C7F0D 7F0D [Trivial] +5C7F1A 7F1A [Trivial] +5C7F22 7F22 [Trivial] +5C7F25 7F25 [Trivial] +5C7F27 NULL [SyntErr] +5C7F30 7F30 [Trivial] +5C7F3F 7F3F [Trivial] +5C7F40 7F40 [Trivial] +5C7F5A 7F5A [Trivial] +5C7F5C NULL [SyntErr] +5C7F5F 7F5F [Trivial] +5C7F61 7F61 [Trivial] +5C7F62 7F62 [Trivial] +5C7F6E 7F6E [Trivial] +5C7F72 7F72 [Trivial] +5C7F74 7F74 [Trivial] +5C7F7E 7F7E [Trivial] +5C7F7F 7F7F [Trivial] +5C7F80 7F80 [Trivial][ILSEQ] +5C7F81 7F81 [Trivial][ILSEQ] +5C7F9F 7F9F [Trivial][ILSEQ] +5C7FA0 7FA0 [Trivial][ILSEQ] +5C7FA1 7FA1 [Trivial][ILSEQ] +5C7FE0 7FE0 [Trivial][ILSEQ] +5C7FEF 7FEF [Trivial][ILSEQ] +5C7FF9 7FF9 [Trivial][ILSEQ] +5C7FFA 7FFA [Trivial][ILSEQ] +5C7FFC 7FFC [Trivial][ILSEQ] +5C7FFD 7FFD [Trivial][ILSEQ] +5C7FFE 7FFE [Trivial][ILSEQ] +5C7FFF 7FFF [Trivial][ILSEQ] +5C8000 8000 [Trivial][ILSEQ] +5C8008 8008 [Trivial][ILSEQ] +5C8009 8009 [Trivial][ILSEQ] +5C800A 800A [Trivial][ILSEQ] +5C800D 800D [Trivial][ILSEQ] +5C801A 801A [Trivial][ILSEQ] +5C8022 8022 [Trivial][ILSEQ] +5C8025 8025 [Trivial][ILSEQ] +5C8027 NULL [SyntErr] +5C8030 8030 [Trivial][ILSEQ] +5C803F 803F [Trivial][ILSEQ] +5C8040 8040 [Trivial][ILSEQ] +5C805A 805A [Trivial][ILSEQ] +5C805C NULL [SyntErr] +5C805F 805F [Trivial][ILSEQ] +5C8061 8061 [Trivial][ILSEQ] +5C8062 8062 [Trivial][ILSEQ] +5C806E 806E [Trivial][ILSEQ] +5C8072 8072 [Trivial][ILSEQ] +5C8074 8074 [Trivial][ILSEQ] +5C807E 807E [Trivial][ILSEQ] +5C807F 807F [Trivial][ILSEQ] +5C8080 8080 [Trivial][ILSEQ] +5C8081 8081 [Trivial][ILSEQ] +5C809F 809F [Trivial][ILSEQ] +5C80A0 80A0 [Trivial][ILSEQ] +5C80A1 80A1 [Trivial][ILSEQ] +5C80E0 80E0 [Trivial][ILSEQ] +5C80EF 80EF [Trivial][ILSEQ] +5C80F9 80F9 [Trivial][ILSEQ] +5C80FA 80FA [Trivial][ILSEQ] +5C80FC 80FC [Trivial][ILSEQ] +5C80FD 80FD [Trivial][ILSEQ] +5C80FE 80FE [Trivial][ILSEQ] +5C80FF 80FF [Trivial][ILSEQ] +5C8100 8100 [Trivial][ILSEQ] +5C8108 8108 [Trivial][ILSEQ] +5C8109 8109 [Trivial][ILSEQ] +5C810A 810A [Trivial][ILSEQ] +5C810D 810D [Trivial][ILSEQ] +5C811A 811A [Trivial][ILSEQ] +5C8122 8122 [Trivial][ILSEQ] +5C8125 8125 [Trivial][ILSEQ] +5C8127 NULL [SyntErr] +5C8130 8130 [Trivial][ILSEQ] +5C813F 813F [Trivial][ILSEQ] +5C8140 8140 [Trivial][ILSEQ] +5C815A 815A [Trivial][ILSEQ] +5C815C NULL [SyntErr] +5C815F 815F [Trivial][ILSEQ] +5C8161 8161 [Trivial][ILSEQ] +5C8162 8162 [Trivial][ILSEQ] +5C816E 816E [Trivial][ILSEQ] +5C8172 8172 [Trivial][ILSEQ] +5C8174 8174 [Trivial][ILSEQ] +5C817E 817E [Trivial][ILSEQ] +5C817F 817F [Trivial][ILSEQ] +5C8180 8180 [Trivial][ILSEQ] +5C8181 8181 [Trivial][ILSEQ] +5C819F 819F [Trivial][ILSEQ] +5C81A0 81A0 [Trivial][ILSEQ] +5C81A1 81A1 [Trivial][ILSEQ] +5C81E0 81E0 [Trivial][ILSEQ] +5C81EF 81EF [Trivial][ILSEQ] +5C81F9 81F9 [Trivial][ILSEQ] +5C81FA 81FA [Trivial][ILSEQ] +5C81FC 81FC [Trivial][ILSEQ] +5C81FD 81FD [Trivial][ILSEQ] +5C81FE 81FE [Trivial][ILSEQ] +5C81FF 81FF [Trivial][ILSEQ] +5C9F00 9F00 [Trivial][ILSEQ] +5C9F08 9F08 [Trivial][ILSEQ] +5C9F09 9F09 [Trivial][ILSEQ] +5C9F0A 9F0A [Trivial][ILSEQ] +5C9F0D 9F0D [Trivial][ILSEQ] +5C9F1A 9F1A [Trivial][ILSEQ] +5C9F22 9F22 [Trivial][ILSEQ] +5C9F25 9F25 [Trivial][ILSEQ] +5C9F27 NULL [SyntErr] +5C9F30 9F30 [Trivial][ILSEQ] +5C9F3F 9F3F [Trivial][ILSEQ] +5C9F40 9F40 [Trivial][ILSEQ] +5C9F5A 9F5A [Trivial][ILSEQ] +5C9F5C NULL [SyntErr] +5C9F5F 9F5F [Trivial][ILSEQ] +5C9F61 9F61 [Trivial][ILSEQ] +5C9F62 9F62 [Trivial][ILSEQ] +5C9F6E 9F6E [Trivial][ILSEQ] +5C9F72 9F72 [Trivial][ILSEQ] +5C9F74 9F74 [Trivial][ILSEQ] +5C9F7E 9F7E [Trivial][ILSEQ] +5C9F7F 9F7F [Trivial][ILSEQ] +5C9F80 9F80 [Trivial][ILSEQ] +5C9F81 9F81 [Trivial][ILSEQ] +5C9F9F 9F9F [Trivial][ILSEQ] +5C9FA0 9FA0 [Trivial][ILSEQ] +5C9FA1 9FA1 [Trivial][ILSEQ] +5C9FE0 9FE0 [Trivial][ILSEQ] +5C9FEF 9FEF [Trivial][ILSEQ] +5C9FF9 9FF9 [Trivial][ILSEQ] +5C9FFA 9FFA [Trivial][ILSEQ] +5C9FFC 9FFC [Trivial][ILSEQ] +5C9FFD 9FFD [Trivial][ILSEQ] +5C9FFE 9FFE [Trivial][ILSEQ] +5C9FFF 9FFF [Trivial][ILSEQ] +5CA000 A000 [Trivial][ILSEQ] +5CA008 A008 [Trivial][ILSEQ] +5CA009 A009 [Trivial][ILSEQ] +5CA00A A00A [Trivial][ILSEQ] +5CA00D A00D [Trivial][ILSEQ] +5CA01A A01A [Trivial][ILSEQ] +5CA022 A022 [Trivial][ILSEQ] +5CA025 A025 [Trivial][ILSEQ] +5CA027 NULL [SyntErr] +5CA030 A030 [Trivial][ILSEQ] +5CA03F A03F [Trivial][ILSEQ] +5CA040 A040 [Trivial][ILSEQ] +5CA05A A05A [Trivial][ILSEQ] +5CA05C NULL [SyntErr] +5CA05F A05F [Trivial][ILSEQ] +5CA061 A061 [Trivial][ILSEQ] +5CA062 A062 [Trivial][ILSEQ] +5CA06E A06E [Trivial][ILSEQ] +5CA072 A072 [Trivial][ILSEQ] +5CA074 A074 [Trivial][ILSEQ] +5CA07E A07E [Trivial][ILSEQ] +5CA07F A07F [Trivial][ILSEQ] +5CA080 A080 [Trivial][ILSEQ] +5CA081 A081 [Trivial][ILSEQ] +5CA09F A09F [Trivial][ILSEQ] +5CA0A0 A0A0 [Trivial][ILSEQ] +5CA0A1 A0A1 [Trivial][ILSEQ] +5CA0E0 A0E0 [Trivial][ILSEQ] +5CA0EF A0EF [Trivial][ILSEQ] +5CA0F9 A0F9 [Trivial][ILSEQ] +5CA0FA A0FA [Trivial][ILSEQ] +5CA0FC A0FC [Trivial][ILSEQ] +5CA0FD A0FD [Trivial][ILSEQ] +5CA0FE A0FE [Trivial][ILSEQ] +5CA0FF A0FF [Trivial][ILSEQ] +5CA100 A100 [Trivial][ILSEQ] +5CA108 A108 [Trivial][ILSEQ] +5CA109 A109 [Trivial][ILSEQ] +5CA10A A10A [Trivial][ILSEQ] +5CA10D A10D [Trivial][ILSEQ] +5CA11A A11A [Trivial][ILSEQ] +5CA122 A122 [Trivial][ILSEQ] +5CA125 A125 [Trivial][ILSEQ] +5CA127 NULL [SyntErr] +5CA130 A130 [Trivial][ILSEQ] +5CA13F A13F [Trivial][ILSEQ] +5CA140 A140 [Trivial][ILSEQ] +5CA15A A15A [Trivial][ILSEQ] +5CA15C NULL [SyntErr] +5CA15F A15F [Trivial][ILSEQ] +5CA161 A161 [Trivial][ILSEQ] +5CA162 A162 [Trivial][ILSEQ] +5CA16E A16E [Trivial][ILSEQ] +5CA172 A172 [Trivial][ILSEQ] +5CA174 A174 [Trivial][ILSEQ] +5CA17E A17E [Trivial][ILSEQ] +5CA17F A17F [Trivial][ILSEQ] +5CA180 A180 [Trivial][ILSEQ] +5CA181 A181 [Trivial][ILSEQ] +5CA19F A19F [Trivial][ILSEQ] +5CA1A0 A1A0 [Trivial][ILSEQ] +5CA1A1 A1A1 [Trivial][ILSEQ] +5CA1E0 A1E0 [Trivial][ILSEQ] +5CA1EF A1EF [Trivial][ILSEQ] +5CA1F9 A1F9 [Trivial][ILSEQ] +5CA1FA A1FA [Trivial][ILSEQ] +5CA1FC A1FC [Trivial][ILSEQ] +5CA1FD A1FD [Trivial][ILSEQ] +5CA1FE A1FE [Trivial][ILSEQ] +5CA1FF A1FF [Trivial][ILSEQ] +5CE000 E000 [Trivial][ILSEQ] +5CE008 E008 [Trivial][ILSEQ] +5CE009 E009 [Trivial][ILSEQ] +5CE00A E00A [Trivial][ILSEQ] +5CE00D E00D [Trivial][ILSEQ] +5CE01A E01A [Trivial][ILSEQ] +5CE022 E022 [Trivial][ILSEQ] +5CE025 E025 [Trivial][ILSEQ] +5CE027 NULL [SyntErr] +5CE030 E030 [Trivial][ILSEQ] +5CE03F E03F [Trivial][ILSEQ] +5CE040 E040 [Trivial][ILSEQ] +5CE05A E05A [Trivial][ILSEQ] +5CE05C NULL [SyntErr] +5CE05F E05F [Trivial][ILSEQ] +5CE061 E061 [Trivial][ILSEQ] +5CE062 E062 [Trivial][ILSEQ] +5CE06E E06E [Trivial][ILSEQ] +5CE072 E072 [Trivial][ILSEQ] +5CE074 E074 [Trivial][ILSEQ] +5CE07E E07E [Trivial][ILSEQ] +5CE07F E07F [Trivial][ILSEQ] +5CE080 E080 [Trivial][ILSEQ] +5CE081 E081 [Trivial][ILSEQ] +5CE09F E09F [Trivial][ILSEQ] +5CE0A0 E0A0 [Trivial][ILSEQ] +5CE0A1 E0A1 [Trivial][ILSEQ] +5CE0E0 E0E0 [Trivial][ILSEQ] +5CE0EF E0EF [Trivial][ILSEQ] +5CE0F9 E0F9 [Trivial][ILSEQ] +5CE0FA E0FA [Trivial][ILSEQ] +5CE0FC E0FC [Trivial][ILSEQ] +5CE0FD E0FD [Trivial][ILSEQ] +5CE0FE E0FE [Trivial][ILSEQ] +5CE0FF E0FF [Trivial][ILSEQ] +5CEF00 EF00 [Trivial][ILSEQ] +5CEF08 EF08 [Trivial][ILSEQ] +5CEF09 EF09 [Trivial][ILSEQ] +5CEF0A EF0A [Trivial][ILSEQ] +5CEF0D EF0D [Trivial][ILSEQ] +5CEF1A EF1A [Trivial][ILSEQ] +5CEF22 EF22 [Trivial][ILSEQ] +5CEF25 EF25 [Trivial][ILSEQ] +5CEF27 NULL [SyntErr] +5CEF30 EF30 [Trivial][ILSEQ] +5CEF3F EF3F [Trivial][ILSEQ] +5CEF40 EF40 [Trivial][ILSEQ] +5CEF5A EF5A [Trivial][ILSEQ] +5CEF5C NULL [SyntErr] +5CEF5F EF5F [Trivial][ILSEQ] +5CEF61 EF61 [Trivial][ILSEQ] +5CEF62 EF62 [Trivial][ILSEQ] +5CEF6E EF6E [Trivial][ILSEQ] +5CEF72 EF72 [Trivial][ILSEQ] +5CEF74 EF74 [Trivial][ILSEQ] +5CEF7E EF7E [Trivial][ILSEQ] +5CEF7F EF7F [Trivial][ILSEQ] +5CEF80 EF80 [Trivial][ILSEQ] +5CEF81 EF81 [Trivial][ILSEQ] +5CEF9F EF9F [Trivial][ILSEQ] +5CEFA0 EFA0 [Trivial][ILSEQ] +5CEFA1 EFA1 [Trivial][ILSEQ] +5CEFE0 EFE0 [Trivial][ILSEQ] +5CEFEF EFEF [Trivial][ILSEQ] +5CEFF9 EFF9 [Trivial][ILSEQ] +5CEFFA EFFA [Trivial][ILSEQ] +5CEFFC EFFC [Trivial][ILSEQ] +5CEFFD EFFD [Trivial][ILSEQ] +5CEFFE EFFE [Trivial][ILSEQ] +5CEFFF EFFF [Trivial][ILSEQ] +5CF900 F900 [Trivial][ILSEQ] +5CF908 F908 [Trivial][ILSEQ] +5CF909 F909 [Trivial][ILSEQ] +5CF90A F90A [Trivial][ILSEQ] +5CF90D F90D [Trivial][ILSEQ] +5CF91A F91A [Trivial][ILSEQ] +5CF922 F922 [Trivial][ILSEQ] +5CF925 F925 [Trivial][ILSEQ] +5CF927 NULL [SyntErr] +5CF930 F930 [Trivial][ILSEQ] +5CF93F F93F [Trivial][ILSEQ] +5CF940 F940 [Trivial][ILSEQ] +5CF95A F95A [Trivial][ILSEQ] +5CF95C NULL [SyntErr] +5CF95F F95F [Trivial][ILSEQ] +5CF961 F961 [Trivial][ILSEQ] +5CF962 F962 [Trivial][ILSEQ] +5CF96E F96E [Trivial][ILSEQ] +5CF972 F972 [Trivial][ILSEQ] +5CF974 F974 [Trivial][ILSEQ] +5CF97E F97E [Trivial][ILSEQ] +5CF97F F97F [Trivial][ILSEQ] +5CF980 F980 [Trivial][ILSEQ] +5CF981 F981 [Trivial][ILSEQ] +5CF99F F99F [Trivial][ILSEQ] +5CF9A0 F9A0 [Trivial][ILSEQ] +5CF9A1 F9A1 [Trivial][ILSEQ] +5CF9E0 F9E0 [Trivial][ILSEQ] +5CF9EF F9EF [Trivial][ILSEQ] +5CF9F9 F9F9 [Trivial][ILSEQ] +5CF9FA F9FA [Trivial][ILSEQ] +5CF9FC F9FC [Trivial][ILSEQ] +5CF9FD F9FD [Trivial][ILSEQ] +5CF9FE F9FE [Trivial][ILSEQ] +5CF9FF F9FF [Trivial][ILSEQ] +5CFA00 FA00 [Trivial][ILSEQ] +5CFA08 FA08 [Trivial][ILSEQ] +5CFA09 FA09 [Trivial][ILSEQ] +5CFA0A FA0A [Trivial][ILSEQ] +5CFA0D FA0D [Trivial][ILSEQ] +5CFA1A FA1A [Trivial][ILSEQ] +5CFA22 FA22 [Trivial][ILSEQ] +5CFA25 FA25 [Trivial][ILSEQ] +5CFA27 NULL [SyntErr] +5CFA30 FA30 [Trivial][ILSEQ] +5CFA3F FA3F [Trivial][ILSEQ] +5CFA40 FA40 [Trivial][ILSEQ] +5CFA5A FA5A [Trivial][ILSEQ] +5CFA5C NULL [SyntErr] +5CFA5F FA5F [Trivial][ILSEQ] +5CFA61 FA61 [Trivial][ILSEQ] +5CFA62 FA62 [Trivial][ILSEQ] +5CFA6E FA6E [Trivial][ILSEQ] +5CFA72 FA72 [Trivial][ILSEQ] +5CFA74 FA74 [Trivial][ILSEQ] +5CFA7E FA7E [Trivial][ILSEQ] +5CFA7F FA7F [Trivial][ILSEQ] +5CFA80 FA80 [Trivial][ILSEQ] +5CFA81 FA81 [Trivial][ILSEQ] +5CFA9F FA9F [Trivial][ILSEQ] +5CFAA0 FAA0 [Trivial][ILSEQ] +5CFAA1 FAA1 [Trivial][ILSEQ] +5CFAE0 FAE0 [Trivial][ILSEQ] +5CFAEF FAEF [Trivial][ILSEQ] +5CFAF9 FAF9 [Trivial][ILSEQ] +5CFAFA FAFA [Trivial][ILSEQ] +5CFAFC FAFC [Trivial][ILSEQ] +5CFAFD FAFD [Trivial][ILSEQ] +5CFAFE FAFE [Trivial][ILSEQ] +5CFAFF FAFF [Trivial][ILSEQ] +5CFC00 FC00 [Trivial][ILSEQ] +5CFC08 FC08 [Trivial][ILSEQ] +5CFC09 FC09 [Trivial][ILSEQ] +5CFC0A FC0A [Trivial][ILSEQ] +5CFC0D FC0D [Trivial][ILSEQ] +5CFC1A FC1A [Trivial][ILSEQ] +5CFC22 FC22 [Trivial][ILSEQ] +5CFC25 FC25 [Trivial][ILSEQ] +5CFC27 NULL [SyntErr] +5CFC30 FC30 [Trivial][ILSEQ] +5CFC3F FC3F [Trivial][ILSEQ] +5CFC40 FC40 [Trivial][ILSEQ] +5CFC5A FC5A [Trivial][ILSEQ] +5CFC5C NULL [SyntErr] +5CFC5F FC5F [Trivial][ILSEQ] +5CFC61 FC61 [Trivial][ILSEQ] +5CFC62 FC62 [Trivial][ILSEQ] +5CFC6E FC6E [Trivial][ILSEQ] +5CFC72 FC72 [Trivial][ILSEQ] +5CFC74 FC74 [Trivial][ILSEQ] +5CFC7E FC7E [Trivial][ILSEQ] +5CFC7F FC7F [Trivial][ILSEQ] +5CFC80 FC80 [Trivial][ILSEQ] +5CFC81 FC81 [Trivial][ILSEQ] +5CFC9F FC9F [Trivial][ILSEQ] +5CFCA0 FCA0 [Trivial][ILSEQ] +5CFCA1 FCA1 [Trivial][ILSEQ] +5CFCE0 FCE0 [Trivial][ILSEQ] +5CFCEF FCEF [Trivial][ILSEQ] +5CFCF9 FCF9 [Trivial][ILSEQ] +5CFCFA FCFA [Trivial][ILSEQ] +5CFCFC FCFC [Trivial][ILSEQ] +5CFCFD FCFD [Trivial][ILSEQ] +5CFCFE FCFE [Trivial][ILSEQ] +5CFCFF FCFF [Trivial][ILSEQ] +5CFD00 FD00 [Trivial][ILSEQ] +5CFD08 FD08 [Trivial][ILSEQ] +5CFD09 FD09 [Trivial][ILSEQ] +5CFD0A FD0A [Trivial][ILSEQ] +5CFD0D FD0D [Trivial][ILSEQ] +5CFD1A FD1A [Trivial][ILSEQ] +5CFD22 FD22 [Trivial][ILSEQ] +5CFD25 FD25 [Trivial][ILSEQ] +5CFD27 NULL [SyntErr] +5CFD30 FD30 [Trivial][ILSEQ] +5CFD3F FD3F [Trivial][ILSEQ] +5CFD40 FD40 [Trivial][ILSEQ] +5CFD5A FD5A [Trivial][ILSEQ] +5CFD5C NULL [SyntErr] +5CFD5F FD5F [Trivial][ILSEQ] +5CFD61 FD61 [Trivial][ILSEQ] +5CFD62 FD62 [Trivial][ILSEQ] +5CFD6E FD6E [Trivial][ILSEQ] +5CFD72 FD72 [Trivial][ILSEQ] +5CFD74 FD74 [Trivial][ILSEQ] +5CFD7E FD7E [Trivial][ILSEQ] +5CFD7F FD7F [Trivial][ILSEQ] +5CFD80 FD80 [Trivial][ILSEQ] +5CFD81 FD81 [Trivial][ILSEQ] +5CFD9F FD9F [Trivial][ILSEQ] +5CFDA0 FDA0 [Trivial][ILSEQ] +5CFDA1 FDA1 [Trivial][ILSEQ] +5CFDE0 FDE0 [Trivial][ILSEQ] +5CFDEF FDEF [Trivial][ILSEQ] +5CFDF9 FDF9 [Trivial][ILSEQ] +5CFDFA FDFA [Trivial][ILSEQ] +5CFDFC FDFC [Trivial][ILSEQ] +5CFDFD FDFD [Trivial][ILSEQ] +5CFDFE FDFE [Trivial][ILSEQ] +5CFDFF FDFF [Trivial][ILSEQ] +5CFE00 FE00 [Trivial][ILSEQ] +5CFE08 FE08 [Trivial][ILSEQ] +5CFE09 FE09 [Trivial][ILSEQ] +5CFE0A FE0A [Trivial][ILSEQ] +5CFE0D FE0D [Trivial][ILSEQ] +5CFE1A FE1A [Trivial][ILSEQ] +5CFE22 FE22 [Trivial][ILSEQ] +5CFE25 FE25 [Trivial][ILSEQ] +5CFE27 NULL [SyntErr] +5CFE30 FE30 [Trivial][ILSEQ] +5CFE3F FE3F [Trivial][ILSEQ] +5CFE40 FE40 [Trivial][ILSEQ] +5CFE5A FE5A [Trivial][ILSEQ] +5CFE5C NULL [SyntErr] +5CFE5F FE5F [Trivial][ILSEQ] +5CFE61 FE61 [Trivial][ILSEQ] +5CFE62 FE62 [Trivial][ILSEQ] +5CFE6E FE6E [Trivial][ILSEQ] +5CFE72 FE72 [Trivial][ILSEQ] +5CFE74 FE74 [Trivial][ILSEQ] +5CFE7E FE7E [Trivial][ILSEQ] +5CFE7F FE7F [Trivial][ILSEQ] +5CFE80 FE80 [Trivial][ILSEQ] +5CFE81 FE81 [Trivial][ILSEQ] +5CFE9F FE9F [Trivial][ILSEQ] +5CFEA0 FEA0 [Trivial][ILSEQ] +5CFEA1 FEA1 [Trivial][ILSEQ] +5CFEE0 FEE0 [Trivial][ILSEQ] +5CFEEF FEEF [Trivial][ILSEQ] +5CFEF9 FEF9 [Trivial][ILSEQ] +5CFEFA FEFA [Trivial][ILSEQ] +5CFEFC FEFC [Trivial][ILSEQ] +5CFEFD FEFD [Trivial][ILSEQ] +5CFEFE FEFE [Trivial][ILSEQ] +5CFEFF FEFF [Trivial][ILSEQ] +5CFF00 FF00 [Trivial][ILSEQ] +5CFF08 FF08 [Trivial][ILSEQ] +5CFF09 FF09 [Trivial][ILSEQ] +5CFF0A FF0A [Trivial][ILSEQ] +5CFF0D FF0D [Trivial][ILSEQ] +5CFF1A FF1A [Trivial][ILSEQ] +5CFF22 FF22 [Trivial][ILSEQ] +5CFF25 FF25 [Trivial][ILSEQ] +5CFF27 NULL [SyntErr] +5CFF30 FF30 [Trivial][ILSEQ] +5CFF3F FF3F [Trivial][ILSEQ] +5CFF40 FF40 [Trivial][ILSEQ] +5CFF5A FF5A [Trivial][ILSEQ] +5CFF5C NULL [SyntErr] +5CFF5F FF5F [Trivial][ILSEQ] +5CFF61 FF61 [Trivial][ILSEQ] +5CFF62 FF62 [Trivial][ILSEQ] +5CFF6E FF6E [Trivial][ILSEQ] +5CFF72 FF72 [Trivial][ILSEQ] +5CFF74 FF74 [Trivial][ILSEQ] +5CFF7E FF7E [Trivial][ILSEQ] +5CFF7F FF7F [Trivial][ILSEQ] +5CFF80 FF80 [Trivial][ILSEQ] +5CFF81 FF81 [Trivial][ILSEQ] +5CFF9F FF9F [Trivial][ILSEQ] +5CFFA0 FFA0 [Trivial][ILSEQ] +5CFFA1 FFA1 [Trivial][ILSEQ] +5CFFE0 FFE0 [Trivial][ILSEQ] +5CFFEF FFEF [Trivial][ILSEQ] +5CFFF9 FFF9 [Trivial][ILSEQ] +5CFFFA FFFA [Trivial][ILSEQ] +5CFFFC FFFC [Trivial][ILSEQ] +5CFFFD FFFD [Trivial][ILSEQ] +5CFFFE FFFE [Trivial][ILSEQ] +5CFFFF FFFF [Trivial][ILSEQ] +5C005C00 0000 [Trivial] +5C005C08 0008 [Trivial] +5C005C09 0009 [Trivial] +5C005C0A 000A [Trivial] +5C005C0D 000D [Trivial] +5C005C1A 001A [Trivial] +5C005C22 0022 [Trivial] +5C005C25 005C25 [Regular] +5C005C27 0027 [Trivial] +5C005C30 0000 [Regular] +5C005C3F 003F [Trivial] +5C005C40 0040 [Trivial] +5C005C5A 001A [Regular] +5C005C5C 005C [Regular] +5C005C5F 005C5F [Regular] +5C005C61 0061 [Trivial] +5C005C62 0008 [Regular] +5C005C6E 000A [Regular] +5C005C72 000D [Regular] +5C005C74 0009 [Regular] +5C005C7E 007E [Trivial] +5C005C7F 007F [Trivial] +5C005C80 0080 [Trivial][ILSEQ] +5C005C81 0081 [Trivial][ILSEQ] +5C005C9F 009F [Trivial][ILSEQ] +5C005CA0 00A0 [Trivial][ILSEQ] +5C005CA1 00A1 [Trivial][ILSEQ] +5C005CE0 00E0 [Trivial][ILSEQ] +5C005CEF 00EF [Trivial][ILSEQ] +5C005CF9 00F9 [Trivial][ILSEQ] +5C005CFA 00FA [Trivial][ILSEQ] +5C005CFC 00FC [Trivial][ILSEQ] +5C005CFD 00FD [Trivial][ILSEQ] +5C005CFE 00FE [Trivial][ILSEQ] +5C005CFF 00FF [Trivial][ILSEQ] +5C085C00 0800 [Trivial] +5C085C08 0808 [Trivial] +5C085C09 0809 [Trivial] +5C085C0A 080A [Trivial] +5C085C0D 080D [Trivial] +5C085C1A 081A [Trivial] +5C085C22 0822 [Trivial] +5C085C25 085C25 [Regular] +5C085C27 0827 [Trivial] +5C085C30 0800 [Regular] +5C085C3F 083F [Trivial] +5C085C40 0840 [Trivial] +5C085C5A 081A [Regular] +5C085C5C 085C [Regular] +5C085C5F 085C5F [Regular] +5C085C61 0861 [Trivial] +5C085C62 0808 [Regular] +5C085C6E 080A [Regular] +5C085C72 080D [Regular] +5C085C74 0809 [Regular] +5C085C7E 087E [Trivial] +5C085C7F 087F [Trivial] +5C085C80 0880 [Trivial][ILSEQ] +5C085C81 0881 [Trivial][ILSEQ] +5C085C9F 089F [Trivial][ILSEQ] +5C085CA0 08A0 [Trivial][ILSEQ] +5C085CA1 08A1 [Trivial][ILSEQ] +5C085CE0 08E0 [Trivial][ILSEQ] +5C085CEF 08EF [Trivial][ILSEQ] +5C085CF9 08F9 [Trivial][ILSEQ] +5C085CFA 08FA [Trivial][ILSEQ] +5C085CFC 08FC [Trivial][ILSEQ] +5C085CFD 08FD [Trivial][ILSEQ] +5C085CFE 08FE [Trivial][ILSEQ] +5C085CFF 08FF [Trivial][ILSEQ] +5C095C00 0900 [Trivial] +5C095C08 0908 [Trivial] +5C095C09 0909 [Trivial] +5C095C0A 090A [Trivial] +5C095C0D 090D [Trivial] +5C095C1A 091A [Trivial] +5C095C22 0922 [Trivial] +5C095C25 095C25 [Regular] +5C095C27 0927 [Trivial] +5C095C30 0900 [Regular] +5C095C3F 093F [Trivial] +5C095C40 0940 [Trivial] +5C095C5A 091A [Regular] +5C095C5C 095C [Regular] +5C095C5F 095C5F [Regular] +5C095C61 0961 [Trivial] +5C095C62 0908 [Regular] +5C095C6E 090A [Regular] +5C095C72 090D [Regular] +5C095C74 0909 [Regular] +5C095C7E 097E [Trivial] +5C095C7F 097F [Trivial] +5C095C80 0980 [Trivial][ILSEQ] +5C095C81 0981 [Trivial][ILSEQ] +5C095C9F 099F [Trivial][ILSEQ] +5C095CA0 09A0 [Trivial][ILSEQ] +5C095CA1 09A1 [Trivial][ILSEQ] +5C095CE0 09E0 [Trivial][ILSEQ] +5C095CEF 09EF [Trivial][ILSEQ] +5C095CF9 09F9 [Trivial][ILSEQ] +5C095CFA 09FA [Trivial][ILSEQ] +5C095CFC 09FC [Trivial][ILSEQ] +5C095CFD 09FD [Trivial][ILSEQ] +5C095CFE 09FE [Trivial][ILSEQ] +5C095CFF 09FF [Trivial][ILSEQ] +5C0A5C00 0A00 [Trivial] +5C0A5C08 0A08 [Trivial] +5C0A5C09 0A09 [Trivial] +5C0A5C0A 0A0A [Trivial] +5C0A5C0D 0A0D [Trivial] +5C0A5C1A 0A1A [Trivial] +5C0A5C22 0A22 [Trivial] +5C0A5C25 0A5C25 [Regular] +5C0A5C27 0A27 [Trivial] +5C0A5C30 0A00 [Regular] +5C0A5C3F 0A3F [Trivial] +5C0A5C40 0A40 [Trivial] +5C0A5C5A 0A1A [Regular] +5C0A5C5C 0A5C [Regular] +5C0A5C5F 0A5C5F [Regular] +5C0A5C61 0A61 [Trivial] +5C0A5C62 0A08 [Regular] +5C0A5C6E 0A0A [Regular] +5C0A5C72 0A0D [Regular] +5C0A5C74 0A09 [Regular] +5C0A5C7E 0A7E [Trivial] +5C0A5C7F 0A7F [Trivial] +5C0A5C80 0A80 [Trivial][ILSEQ] +5C0A5C81 0A81 [Trivial][ILSEQ] +5C0A5C9F 0A9F [Trivial][ILSEQ] +5C0A5CA0 0AA0 [Trivial][ILSEQ] +5C0A5CA1 0AA1 [Trivial][ILSEQ] +5C0A5CE0 0AE0 [Trivial][ILSEQ] +5C0A5CEF 0AEF [Trivial][ILSEQ] +5C0A5CF9 0AF9 [Trivial][ILSEQ] +5C0A5CFA 0AFA [Trivial][ILSEQ] +5C0A5CFC 0AFC [Trivial][ILSEQ] +5C0A5CFD 0AFD [Trivial][ILSEQ] +5C0A5CFE 0AFE [Trivial][ILSEQ] +5C0A5CFF 0AFF [Trivial][ILSEQ] +5C0D5C00 0D00 [Trivial] +5C0D5C08 0D08 [Trivial] +5C0D5C09 0D09 [Trivial] +5C0D5C0A 0D0A [Trivial] +5C0D5C0D 0D0D [Trivial] +5C0D5C1A 0D1A [Trivial] +5C0D5C22 0D22 [Trivial] +5C0D5C25 0D5C25 [Regular] +5C0D5C27 0D27 [Trivial] +5C0D5C30 0D00 [Regular] +5C0D5C3F 0D3F [Trivial] +5C0D5C40 0D40 [Trivial] +5C0D5C5A 0D1A [Regular] +5C0D5C5C 0D5C [Regular] +5C0D5C5F 0D5C5F [Regular] +5C0D5C61 0D61 [Trivial] +5C0D5C62 0D08 [Regular] +5C0D5C6E 0D0A [Regular] +5C0D5C72 0D0D [Regular] +5C0D5C74 0D09 [Regular] +5C0D5C7E 0D7E [Trivial] +5C0D5C7F 0D7F [Trivial] +5C0D5C80 0D80 [Trivial][ILSEQ] +5C0D5C81 0D81 [Trivial][ILSEQ] +5C0D5C9F 0D9F [Trivial][ILSEQ] +5C0D5CA0 0DA0 [Trivial][ILSEQ] +5C0D5CA1 0DA1 [Trivial][ILSEQ] +5C0D5CE0 0DE0 [Trivial][ILSEQ] +5C0D5CEF 0DEF [Trivial][ILSEQ] +5C0D5CF9 0DF9 [Trivial][ILSEQ] +5C0D5CFA 0DFA [Trivial][ILSEQ] +5C0D5CFC 0DFC [Trivial][ILSEQ] +5C0D5CFD 0DFD [Trivial][ILSEQ] +5C0D5CFE 0DFE [Trivial][ILSEQ] +5C0D5CFF 0DFF [Trivial][ILSEQ] +5C1A5C00 1A00 [Trivial] +5C1A5C08 1A08 [Trivial] +5C1A5C09 1A09 [Trivial] +5C1A5C0A 1A0A [Trivial] +5C1A5C0D 1A0D [Trivial] +5C1A5C1A 1A1A [Trivial] +5C1A5C22 1A22 [Trivial] +5C1A5C25 1A5C25 [Regular] +5C1A5C27 1A27 [Trivial] +5C1A5C30 1A00 [Regular] +5C1A5C3F 1A3F [Trivial] +5C1A5C40 1A40 [Trivial] +5C1A5C5A 1A1A [Regular] +5C1A5C5C 1A5C [Regular] +5C1A5C5F 1A5C5F [Regular] +5C1A5C61 1A61 [Trivial] +5C1A5C62 1A08 [Regular] +5C1A5C6E 1A0A [Regular] +5C1A5C72 1A0D [Regular] +5C1A5C74 1A09 [Regular] +5C1A5C7E 1A7E [Trivial] +5C1A5C7F 1A7F [Trivial] +5C1A5C80 1A80 [Trivial][ILSEQ] +5C1A5C81 1A81 [Trivial][ILSEQ] +5C1A5C9F 1A9F [Trivial][ILSEQ] +5C1A5CA0 1AA0 [Trivial][ILSEQ] +5C1A5CA1 1AA1 [Trivial][ILSEQ] +5C1A5CE0 1AE0 [Trivial][ILSEQ] +5C1A5CEF 1AEF [Trivial][ILSEQ] +5C1A5CF9 1AF9 [Trivial][ILSEQ] +5C1A5CFA 1AFA [Trivial][ILSEQ] +5C1A5CFC 1AFC [Trivial][ILSEQ] +5C1A5CFD 1AFD [Trivial][ILSEQ] +5C1A5CFE 1AFE [Trivial][ILSEQ] +5C1A5CFF 1AFF [Trivial][ILSEQ] +5C225C00 2200 [Trivial] +5C225C08 2208 [Trivial] +5C225C09 2209 [Trivial] +5C225C0A 220A [Trivial] +5C225C0D 220D [Trivial] +5C225C1A 221A [Trivial] +5C225C22 2222 [Trivial] +5C225C25 225C25 [Regular] +5C225C27 2227 [Trivial] +5C225C30 2200 [Regular] +5C225C3F 223F [Trivial] +5C225C40 2240 [Trivial] +5C225C5A 221A [Regular] +5C225C5C 225C [Regular] +5C225C5F 225C5F [Regular] +5C225C61 2261 [Trivial] +5C225C62 2208 [Regular] +5C225C6E 220A [Regular] +5C225C72 220D [Regular] +5C225C74 2209 [Regular] +5C225C7E 227E [Trivial] +5C225C7F 227F [Trivial] +5C225C80 2280 [Trivial][ILSEQ] +5C225C81 2281 [Trivial][ILSEQ] +5C225C9F 229F [Trivial][ILSEQ] +5C225CA0 22A0 [Trivial][ILSEQ] +5C225CA1 22A1 [Trivial][ILSEQ] +5C225CE0 22E0 [Trivial][ILSEQ] +5C225CEF 22EF [Trivial][ILSEQ] +5C225CF9 22F9 [Trivial][ILSEQ] +5C225CFA 22FA [Trivial][ILSEQ] +5C225CFC 22FC [Trivial][ILSEQ] +5C225CFD 22FD [Trivial][ILSEQ] +5C225CFE 22FE [Trivial][ILSEQ] +5C225CFF 22FF [Trivial][ILSEQ] +5C255C00 5C2500 [Regular] +5C255C08 5C2508 [Regular] +5C255C09 5C2509 [Regular] +5C255C0A 5C250A [Regular] +5C255C0D 5C250D [Regular] +5C255C1A 5C251A [Regular] +5C255C22 5C2522 [Regular] +5C255C25 5C255C25 [Preserve][LIKE] +5C255C27 5C2527 [Regular] +5C255C30 5C2500 [Regular] +5C255C3F 5C253F [Regular] +5C255C40 5C2540 [Regular] +5C255C5A 5C251A [Regular] +5C255C5C 5C255C [Regular] +5C255C5F 5C255C5F [Preserve][LIKE] +5C255C61 5C2561 [Regular] +5C255C62 5C2508 [Regular] +5C255C6E 5C250A [Regular] +5C255C72 5C250D [Regular] +5C255C74 5C2509 [Regular] +5C255C7E 5C257E [Regular] +5C255C7F 5C257F [Regular] +5C255C80 5C2580 [Regular][ILSEQ] +5C255C81 5C2581 [Regular][ILSEQ] +5C255C9F 5C259F [Regular][ILSEQ] +5C255CA0 5C25A0 [Regular][ILSEQ] +5C255CA1 5C25A1 [Regular][ILSEQ] +5C255CE0 5C25E0 [Regular][ILSEQ] +5C255CEF 5C25EF [Regular][ILSEQ] +5C255CF9 5C25F9 [Regular][ILSEQ] +5C255CFA 5C25FA [Regular][ILSEQ] +5C255CFC 5C25FC [Regular][ILSEQ] +5C255CFD 5C25FD [Regular][ILSEQ] +5C255CFE 5C25FE [Regular][ILSEQ] +5C255CFF 5C25FF [Regular][ILSEQ] +5C275C00 2700 [Trivial] +5C275C08 2708 [Trivial] +5C275C09 2709 [Trivial] +5C275C0A 270A [Trivial] +5C275C0D 270D [Trivial] +5C275C1A 271A [Trivial] +5C275C22 2722 [Trivial] +5C275C25 275C25 [Regular] +5C275C27 2727 [Trivial] +5C275C30 2700 [Regular] +5C275C3F 273F [Trivial] +5C275C40 2740 [Trivial] +5C275C5A 271A [Regular] +5C275C5C 275C [Regular] +5C275C5F 275C5F [Regular] +5C275C61 2761 [Trivial] +5C275C62 2708 [Regular] +5C275C6E 270A [Regular] +5C275C72 270D [Regular] +5C275C74 2709 [Regular] +5C275C7E 277E [Trivial] +5C275C7F 277F [Trivial] +5C275C80 2780 [Trivial][ILSEQ] +5C275C81 2781 [Trivial][ILSEQ] +5C275C9F 279F [Trivial][ILSEQ] +5C275CA0 27A0 [Trivial][ILSEQ] +5C275CA1 27A1 [Trivial][ILSEQ] +5C275CE0 27E0 [Trivial][ILSEQ] +5C275CEF 27EF [Trivial][ILSEQ] +5C275CF9 27F9 [Trivial][ILSEQ] +5C275CFA 27FA [Trivial][ILSEQ] +5C275CFC 27FC [Trivial][ILSEQ] +5C275CFD 27FD [Trivial][ILSEQ] +5C275CFE 27FE [Trivial][ILSEQ] +5C275CFF 27FF [Trivial][ILSEQ] +5C305C00 0000 [Regular] +5C305C08 0008 [Regular] +5C305C09 0009 [Regular] +5C305C0A 000A [Regular] +5C305C0D 000D [Regular] +5C305C1A 001A [Regular] +5C305C22 0022 [Regular] +5C305C25 005C25 [Regular] +5C305C27 0027 [Regular] +5C305C30 0000 [Regular] +5C305C3F 003F [Regular] +5C305C40 0040 [Regular] +5C305C5A 001A [Regular] +5C305C5C 005C [Regular] +5C305C5F 005C5F [Regular] +5C305C61 0061 [Regular] +5C305C62 0008 [Regular] +5C305C6E 000A [Regular] +5C305C72 000D [Regular] +5C305C74 0009 [Regular] +5C305C7E 007E [Regular] +5C305C7F 007F [Regular] +5C305C80 0080 [Regular][ILSEQ] +5C305C81 0081 [Regular][ILSEQ] +5C305C9F 009F [Regular][ILSEQ] +5C305CA0 00A0 [Regular][ILSEQ] +5C305CA1 00A1 [Regular][ILSEQ] +5C305CE0 00E0 [Regular][ILSEQ] +5C305CEF 00EF [Regular][ILSEQ] +5C305CF9 00F9 [Regular][ILSEQ] +5C305CFA 00FA [Regular][ILSEQ] +5C305CFC 00FC [Regular][ILSEQ] +5C305CFD 00FD [Regular][ILSEQ] +5C305CFE 00FE [Regular][ILSEQ] +5C305CFF 00FF [Regular][ILSEQ] +5C3F5C00 3F00 [Trivial] +5C3F5C08 3F08 [Trivial] +5C3F5C09 3F09 [Trivial] +5C3F5C0A 3F0A [Trivial] +5C3F5C0D 3F0D [Trivial] +5C3F5C1A 3F1A [Trivial] +5C3F5C22 3F22 [Trivial] +5C3F5C25 3F5C25 [Regular] +5C3F5C27 3F27 [Trivial] +5C3F5C30 3F00 [Regular] +5C3F5C3F 3F3F [Trivial] +5C3F5C40 3F40 [Trivial] +5C3F5C5A 3F1A [Regular] +5C3F5C5C 3F5C [Regular] +5C3F5C5F 3F5C5F [Regular] +5C3F5C61 3F61 [Trivial] +5C3F5C62 3F08 [Regular] +5C3F5C6E 3F0A [Regular] +5C3F5C72 3F0D [Regular] +5C3F5C74 3F09 [Regular] +5C3F5C7E 3F7E [Trivial] +5C3F5C7F 3F7F [Trivial] +5C3F5C80 3F80 [Trivial][ILSEQ] +5C3F5C81 3F81 [Trivial][ILSEQ] +5C3F5C9F 3F9F [Trivial][ILSEQ] +5C3F5CA0 3FA0 [Trivial][ILSEQ] +5C3F5CA1 3FA1 [Trivial][ILSEQ] +5C3F5CE0 3FE0 [Trivial][ILSEQ] +5C3F5CEF 3FEF [Trivial][ILSEQ] +5C3F5CF9 3FF9 [Trivial][ILSEQ] +5C3F5CFA 3FFA [Trivial][ILSEQ] +5C3F5CFC 3FFC [Trivial][ILSEQ] +5C3F5CFD 3FFD [Trivial][ILSEQ] +5C3F5CFE 3FFE [Trivial][ILSEQ] +5C3F5CFF 3FFF [Trivial][ILSEQ] +5C405C00 4000 [Trivial] +5C405C08 4008 [Trivial] +5C405C09 4009 [Trivial] +5C405C0A 400A [Trivial] +5C405C0D 400D [Trivial] +5C405C1A 401A [Trivial] +5C405C22 4022 [Trivial] +5C405C25 405C25 [Regular] +5C405C27 4027 [Trivial] +5C405C30 4000 [Regular] +5C405C3F 403F [Trivial] +5C405C40 4040 [Trivial] +5C405C5A 401A [Regular] +5C405C5C 405C [Regular] +5C405C5F 405C5F [Regular] +5C405C61 4061 [Trivial] +5C405C62 4008 [Regular] +5C405C6E 400A [Regular] +5C405C72 400D [Regular] +5C405C74 4009 [Regular] +5C405C7E 407E [Trivial] +5C405C7F 407F [Trivial] +5C405C80 4080 [Trivial][ILSEQ] +5C405C81 4081 [Trivial][ILSEQ] +5C405C9F 409F [Trivial][ILSEQ] +5C405CA0 40A0 [Trivial][ILSEQ] +5C405CA1 40A1 [Trivial][ILSEQ] +5C405CE0 40E0 [Trivial][ILSEQ] +5C405CEF 40EF [Trivial][ILSEQ] +5C405CF9 40F9 [Trivial][ILSEQ] +5C405CFA 40FA [Trivial][ILSEQ] +5C405CFC 40FC [Trivial][ILSEQ] +5C405CFD 40FD [Trivial][ILSEQ] +5C405CFE 40FE [Trivial][ILSEQ] +5C405CFF 40FF [Trivial][ILSEQ] +5C5A5C00 1A00 [Regular] +5C5A5C08 1A08 [Regular] +5C5A5C09 1A09 [Regular] +5C5A5C0A 1A0A [Regular] +5C5A5C0D 1A0D [Regular] +5C5A5C1A 1A1A [Regular] +5C5A5C22 1A22 [Regular] +5C5A5C25 1A5C25 [Regular] +5C5A5C27 1A27 [Regular] +5C5A5C30 1A00 [Regular] +5C5A5C3F 1A3F [Regular] +5C5A5C40 1A40 [Regular] +5C5A5C5A 1A1A [Regular] +5C5A5C5C 1A5C [Regular] +5C5A5C5F 1A5C5F [Regular] +5C5A5C61 1A61 [Regular] +5C5A5C62 1A08 [Regular] +5C5A5C6E 1A0A [Regular] +5C5A5C72 1A0D [Regular] +5C5A5C74 1A09 [Regular] +5C5A5C7E 1A7E [Regular] +5C5A5C7F 1A7F [Regular] +5C5A5C80 1A80 [Regular][ILSEQ] +5C5A5C81 1A81 [Regular][ILSEQ] +5C5A5C9F 1A9F [Regular][ILSEQ] +5C5A5CA0 1AA0 [Regular][ILSEQ] +5C5A5CA1 1AA1 [Regular][ILSEQ] +5C5A5CE0 1AE0 [Regular][ILSEQ] +5C5A5CEF 1AEF [Regular][ILSEQ] +5C5A5CF9 1AF9 [Regular][ILSEQ] +5C5A5CFA 1AFA [Regular][ILSEQ] +5C5A5CFC 1AFC [Regular][ILSEQ] +5C5A5CFD 1AFD [Regular][ILSEQ] +5C5A5CFE 1AFE [Regular][ILSEQ] +5C5A5CFF 1AFF [Regular][ILSEQ] +5C5C5C00 5C00 [Regular] +5C5C5C08 5C08 [Regular] +5C5C5C09 5C09 [Regular] +5C5C5C0A 5C0A [Regular] +5C5C5C0D 5C0D [Regular] +5C5C5C1A 5C1A [Regular] +5C5C5C22 5C22 [Regular] +5C5C5C25 5C5C25 [Regular] +5C5C5C27 5C27 [Regular] +5C5C5C30 5C00 [Regular] +5C5C5C3F 5C3F [Regular] +5C5C5C40 5C40 [Regular] +5C5C5C5A 5C1A [Regular] +5C5C5C5C 5C5C [Regular] +5C5C5C5F 5C5C5F [Regular] +5C5C5C61 5C61 [Regular] +5C5C5C62 5C08 [Regular] +5C5C5C6E 5C0A [Regular] +5C5C5C72 5C0D [Regular] +5C5C5C74 5C09 [Regular] +5C5C5C7E 5C7E [Regular] +5C5C5C7F 5C7F [Regular] +5C5C5C80 5C80 [Regular][ILSEQ] +5C5C5C81 5C81 [Regular][ILSEQ] +5C5C5C9F 5C9F [Regular][ILSEQ] +5C5C5CA0 5CA0 [Regular][ILSEQ] +5C5C5CA1 5CA1 [Regular][ILSEQ] +5C5C5CE0 5CE0 [Regular][ILSEQ] +5C5C5CEF 5CEF [Regular][ILSEQ] +5C5C5CF9 5CF9 [Regular][ILSEQ] +5C5C5CFA 5CFA [Regular][ILSEQ] +5C5C5CFC 5CFC [Regular][ILSEQ] +5C5C5CFD 5CFD [Regular][ILSEQ] +5C5C5CFE 5CFE [Regular][ILSEQ] +5C5C5CFF 5CFF [Regular][ILSEQ] +5C5F5C00 5C5F00 [Regular] +5C5F5C08 5C5F08 [Regular] +5C5F5C09 5C5F09 [Regular] +5C5F5C0A 5C5F0A [Regular] +5C5F5C0D 5C5F0D [Regular] +5C5F5C1A 5C5F1A [Regular] +5C5F5C22 5C5F22 [Regular] +5C5F5C25 5C5F5C25 [Preserve][LIKE] +5C5F5C27 5C5F27 [Regular] +5C5F5C30 5C5F00 [Regular] +5C5F5C3F 5C5F3F [Regular] +5C5F5C40 5C5F40 [Regular] +5C5F5C5A 5C5F1A [Regular] +5C5F5C5C 5C5F5C [Regular] +5C5F5C5F 5C5F5C5F [Preserve][LIKE] +5C5F5C61 5C5F61 [Regular] +5C5F5C62 5C5F08 [Regular] +5C5F5C6E 5C5F0A [Regular] +5C5F5C72 5C5F0D [Regular] +5C5F5C74 5C5F09 [Regular] +5C5F5C7E 5C5F7E [Regular] +5C5F5C7F 5C5F7F [Regular] +5C5F5C80 5C5F80 [Regular][ILSEQ] +5C5F5C81 5C5F81 [Regular][ILSEQ] +5C5F5C9F 5C5F9F [Regular][ILSEQ] +5C5F5CA0 5C5FA0 [Regular][ILSEQ] +5C5F5CA1 5C5FA1 [Regular][ILSEQ] +5C5F5CE0 5C5FE0 [Regular][ILSEQ] +5C5F5CEF 5C5FEF [Regular][ILSEQ] +5C5F5CF9 5C5FF9 [Regular][ILSEQ] +5C5F5CFA 5C5FFA [Regular][ILSEQ] +5C5F5CFC 5C5FFC [Regular][ILSEQ] +5C5F5CFD 5C5FFD [Regular][ILSEQ] +5C5F5CFE 5C5FFE [Regular][ILSEQ] +5C5F5CFF 5C5FFF [Regular][ILSEQ] +5C615C00 6100 [Trivial] +5C615C08 6108 [Trivial] +5C615C09 6109 [Trivial] +5C615C0A 610A [Trivial] +5C615C0D 610D [Trivial] +5C615C1A 611A [Trivial] +5C615C22 6122 [Trivial] +5C615C25 615C25 [Regular] +5C615C27 6127 [Trivial] +5C615C30 6100 [Regular] +5C615C3F 613F [Trivial] +5C615C40 6140 [Trivial] +5C615C5A 611A [Regular] +5C615C5C 615C [Regular] +5C615C5F 615C5F [Regular] +5C615C61 6161 [Trivial] +5C615C62 6108 [Regular] +5C615C6E 610A [Regular] +5C615C72 610D [Regular] +5C615C74 6109 [Regular] +5C615C7E 617E [Trivial] +5C615C7F 617F [Trivial] +5C615C80 6180 [Trivial][ILSEQ] +5C615C81 6181 [Trivial][ILSEQ] +5C615C9F 619F [Trivial][ILSEQ] +5C615CA0 61A0 [Trivial][ILSEQ] +5C615CA1 61A1 [Trivial][ILSEQ] +5C615CE0 61E0 [Trivial][ILSEQ] +5C615CEF 61EF [Trivial][ILSEQ] +5C615CF9 61F9 [Trivial][ILSEQ] +5C615CFA 61FA [Trivial][ILSEQ] +5C615CFC 61FC [Trivial][ILSEQ] +5C615CFD 61FD [Trivial][ILSEQ] +5C615CFE 61FE [Trivial][ILSEQ] +5C615CFF 61FF [Trivial][ILSEQ] +5C625C00 0800 [Regular] +5C625C08 0808 [Regular] +5C625C09 0809 [Regular] +5C625C0A 080A [Regular] +5C625C0D 080D [Regular] +5C625C1A 081A [Regular] +5C625C22 0822 [Regular] +5C625C25 085C25 [Regular] +5C625C27 0827 [Regular] +5C625C30 0800 [Regular] +5C625C3F 083F [Regular] +5C625C40 0840 [Regular] +5C625C5A 081A [Regular] +5C625C5C 085C [Regular] +5C625C5F 085C5F [Regular] +5C625C61 0861 [Regular] +5C625C62 0808 [Regular] +5C625C6E 080A [Regular] +5C625C72 080D [Regular] +5C625C74 0809 [Regular] +5C625C7E 087E [Regular] +5C625C7F 087F [Regular] +5C625C80 0880 [Regular][ILSEQ] +5C625C81 0881 [Regular][ILSEQ] +5C625C9F 089F [Regular][ILSEQ] +5C625CA0 08A0 [Regular][ILSEQ] +5C625CA1 08A1 [Regular][ILSEQ] +5C625CE0 08E0 [Regular][ILSEQ] +5C625CEF 08EF [Regular][ILSEQ] +5C625CF9 08F9 [Regular][ILSEQ] +5C625CFA 08FA [Regular][ILSEQ] +5C625CFC 08FC [Regular][ILSEQ] +5C625CFD 08FD [Regular][ILSEQ] +5C625CFE 08FE [Regular][ILSEQ] +5C625CFF 08FF [Regular][ILSEQ] +5C6E5C00 0A00 [Regular] +5C6E5C08 0A08 [Regular] +5C6E5C09 0A09 [Regular] +5C6E5C0A 0A0A [Regular] +5C6E5C0D 0A0D [Regular] +5C6E5C1A 0A1A [Regular] +5C6E5C22 0A22 [Regular] +5C6E5C25 0A5C25 [Regular] +5C6E5C27 0A27 [Regular] +5C6E5C30 0A00 [Regular] +5C6E5C3F 0A3F [Regular] +5C6E5C40 0A40 [Regular] +5C6E5C5A 0A1A [Regular] +5C6E5C5C 0A5C [Regular] +5C6E5C5F 0A5C5F [Regular] +5C6E5C61 0A61 [Regular] +5C6E5C62 0A08 [Regular] +5C6E5C6E 0A0A [Regular] +5C6E5C72 0A0D [Regular] +5C6E5C74 0A09 [Regular] +5C6E5C7E 0A7E [Regular] +5C6E5C7F 0A7F [Regular] +5C6E5C80 0A80 [Regular][ILSEQ] +5C6E5C81 0A81 [Regular][ILSEQ] +5C6E5C9F 0A9F [Regular][ILSEQ] +5C6E5CA0 0AA0 [Regular][ILSEQ] +5C6E5CA1 0AA1 [Regular][ILSEQ] +5C6E5CE0 0AE0 [Regular][ILSEQ] +5C6E5CEF 0AEF [Regular][ILSEQ] +5C6E5CF9 0AF9 [Regular][ILSEQ] +5C6E5CFA 0AFA [Regular][ILSEQ] +5C6E5CFC 0AFC [Regular][ILSEQ] +5C6E5CFD 0AFD [Regular][ILSEQ] +5C6E5CFE 0AFE [Regular][ILSEQ] +5C6E5CFF 0AFF [Regular][ILSEQ] +5C725C00 0D00 [Regular] +5C725C08 0D08 [Regular] +5C725C09 0D09 [Regular] +5C725C0A 0D0A [Regular] +5C725C0D 0D0D [Regular] +5C725C1A 0D1A [Regular] +5C725C22 0D22 [Regular] +5C725C25 0D5C25 [Regular] +5C725C27 0D27 [Regular] +5C725C30 0D00 [Regular] +5C725C3F 0D3F [Regular] +5C725C40 0D40 [Regular] +5C725C5A 0D1A [Regular] +5C725C5C 0D5C [Regular] +5C725C5F 0D5C5F [Regular] +5C725C61 0D61 [Regular] +5C725C62 0D08 [Regular] +5C725C6E 0D0A [Regular] +5C725C72 0D0D [Regular] +5C725C74 0D09 [Regular] +5C725C7E 0D7E [Regular] +5C725C7F 0D7F [Regular] +5C725C80 0D80 [Regular][ILSEQ] +5C725C81 0D81 [Regular][ILSEQ] +5C725C9F 0D9F [Regular][ILSEQ] +5C725CA0 0DA0 [Regular][ILSEQ] +5C725CA1 0DA1 [Regular][ILSEQ] +5C725CE0 0DE0 [Regular][ILSEQ] +5C725CEF 0DEF [Regular][ILSEQ] +5C725CF9 0DF9 [Regular][ILSEQ] +5C725CFA 0DFA [Regular][ILSEQ] +5C725CFC 0DFC [Regular][ILSEQ] +5C725CFD 0DFD [Regular][ILSEQ] +5C725CFE 0DFE [Regular][ILSEQ] +5C725CFF 0DFF [Regular][ILSEQ] +5C745C00 0900 [Regular] +5C745C08 0908 [Regular] +5C745C09 0909 [Regular] +5C745C0A 090A [Regular] +5C745C0D 090D [Regular] +5C745C1A 091A [Regular] +5C745C22 0922 [Regular] +5C745C25 095C25 [Regular] +5C745C27 0927 [Regular] +5C745C30 0900 [Regular] +5C745C3F 093F [Regular] +5C745C40 0940 [Regular] +5C745C5A 091A [Regular] +5C745C5C 095C [Regular] +5C745C5F 095C5F [Regular] +5C745C61 0961 [Regular] +5C745C62 0908 [Regular] +5C745C6E 090A [Regular] +5C745C72 090D [Regular] +5C745C74 0909 [Regular] +5C745C7E 097E [Regular] +5C745C7F 097F [Regular] +5C745C80 0980 [Regular][ILSEQ] +5C745C81 0981 [Regular][ILSEQ] +5C745C9F 099F [Regular][ILSEQ] +5C745CA0 09A0 [Regular][ILSEQ] +5C745CA1 09A1 [Regular][ILSEQ] +5C745CE0 09E0 [Regular][ILSEQ] +5C745CEF 09EF [Regular][ILSEQ] +5C745CF9 09F9 [Regular][ILSEQ] +5C745CFA 09FA [Regular][ILSEQ] +5C745CFC 09FC [Regular][ILSEQ] +5C745CFD 09FD [Regular][ILSEQ] +5C745CFE 09FE [Regular][ILSEQ] +5C745CFF 09FF [Regular][ILSEQ] +5C7E5C00 7E00 [Trivial] +5C7E5C08 7E08 [Trivial] +5C7E5C09 7E09 [Trivial] +5C7E5C0A 7E0A [Trivial] +5C7E5C0D 7E0D [Trivial] +5C7E5C1A 7E1A [Trivial] +5C7E5C22 7E22 [Trivial] +5C7E5C25 7E5C25 [Regular] +5C7E5C27 7E27 [Trivial] +5C7E5C30 7E00 [Regular] +5C7E5C3F 7E3F [Trivial] +5C7E5C40 7E40 [Trivial] +5C7E5C5A 7E1A [Regular] +5C7E5C5C 7E5C [Regular] +5C7E5C5F 7E5C5F [Regular] +5C7E5C61 7E61 [Trivial] +5C7E5C62 7E08 [Regular] +5C7E5C6E 7E0A [Regular] +5C7E5C72 7E0D [Regular] +5C7E5C74 7E09 [Regular] +5C7E5C7E 7E7E [Trivial] +5C7E5C7F 7E7F [Trivial] +5C7E5C80 7E80 [Trivial][ILSEQ] +5C7E5C81 7E81 [Trivial][ILSEQ] +5C7E5C9F 7E9F [Trivial][ILSEQ] +5C7E5CA0 7EA0 [Trivial][ILSEQ] +5C7E5CA1 7EA1 [Trivial][ILSEQ] +5C7E5CE0 7EE0 [Trivial][ILSEQ] +5C7E5CEF 7EEF [Trivial][ILSEQ] +5C7E5CF9 7EF9 [Trivial][ILSEQ] +5C7E5CFA 7EFA [Trivial][ILSEQ] +5C7E5CFC 7EFC [Trivial][ILSEQ] +5C7E5CFD 7EFD [Trivial][ILSEQ] +5C7E5CFE 7EFE [Trivial][ILSEQ] +5C7E5CFF 7EFF [Trivial][ILSEQ] +5C7F5C00 7F00 [Trivial] +5C7F5C08 7F08 [Trivial] +5C7F5C09 7F09 [Trivial] +5C7F5C0A 7F0A [Trivial] +5C7F5C0D 7F0D [Trivial] +5C7F5C1A 7F1A [Trivial] +5C7F5C22 7F22 [Trivial] +5C7F5C25 7F5C25 [Regular] +5C7F5C27 7F27 [Trivial] +5C7F5C30 7F00 [Regular] +5C7F5C3F 7F3F [Trivial] +5C7F5C40 7F40 [Trivial] +5C7F5C5A 7F1A [Regular] +5C7F5C5C 7F5C [Regular] +5C7F5C5F 7F5C5F [Regular] +5C7F5C61 7F61 [Trivial] +5C7F5C62 7F08 [Regular] +5C7F5C6E 7F0A [Regular] +5C7F5C72 7F0D [Regular] +5C7F5C74 7F09 [Regular] +5C7F5C7E 7F7E [Trivial] +5C7F5C7F 7F7F [Trivial] +5C7F5C80 7F80 [Trivial][ILSEQ] +5C7F5C81 7F81 [Trivial][ILSEQ] +5C7F5C9F 7F9F [Trivial][ILSEQ] +5C7F5CA0 7FA0 [Trivial][ILSEQ] +5C7F5CA1 7FA1 [Trivial][ILSEQ] +5C7F5CE0 7FE0 [Trivial][ILSEQ] +5C7F5CEF 7FEF [Trivial][ILSEQ] +5C7F5CF9 7FF9 [Trivial][ILSEQ] +5C7F5CFA 7FFA [Trivial][ILSEQ] +5C7F5CFC 7FFC [Trivial][ILSEQ] +5C7F5CFD 7FFD [Trivial][ILSEQ] +5C7F5CFE 7FFE [Trivial][ILSEQ] +5C7F5CFF 7FFF [Trivial][ILSEQ] +5C805C00 8000 [Trivial][ILSEQ] +5C805C08 8008 [Trivial][ILSEQ] +5C805C09 8009 [Trivial][ILSEQ] +5C805C0A 800A [Trivial][ILSEQ] +5C805C0D 800D [Trivial][ILSEQ] +5C805C1A 801A [Trivial][ILSEQ] +5C805C22 8022 [Trivial][ILSEQ] +5C805C25 805C25 [Regular][ILSEQ] +5C805C27 8027 [Trivial][ILSEQ] +5C805C30 8000 [Regular][ILSEQ] +5C805C3F 803F [Trivial][ILSEQ] +5C805C40 8040 [Trivial][ILSEQ] +5C805C5A 801A [Regular][ILSEQ] +5C805C5C 805C [Regular][ILSEQ] +5C805C5F 805C5F [Regular][ILSEQ] +5C805C61 8061 [Trivial][ILSEQ] +5C805C62 8008 [Regular][ILSEQ] +5C805C6E 800A [Regular][ILSEQ] +5C805C72 800D [Regular][ILSEQ] +5C805C74 8009 [Regular][ILSEQ] +5C805C7E 807E [Trivial][ILSEQ] +5C805C7F 807F [Trivial][ILSEQ] +5C805C80 8080 [Trivial][ILSEQ] +5C805C81 8081 [Trivial][ILSEQ] +5C805C9F 809F [Trivial][ILSEQ] +5C805CA0 80A0 [Trivial][ILSEQ] +5C805CA1 80A1 [Trivial][ILSEQ] +5C805CE0 80E0 [Trivial][ILSEQ] +5C805CEF 80EF [Trivial][ILSEQ] +5C805CF9 80F9 [Trivial][ILSEQ] +5C805CFA 80FA [Trivial][ILSEQ] +5C805CFC 80FC [Trivial][ILSEQ] +5C805CFD 80FD [Trivial][ILSEQ] +5C805CFE 80FE [Trivial][ILSEQ] +5C805CFF 80FF [Trivial][ILSEQ] +5C815C00 8100 [Trivial][ILSEQ] +5C815C08 8108 [Trivial][ILSEQ] +5C815C09 8109 [Trivial][ILSEQ] +5C815C0A 810A [Trivial][ILSEQ] +5C815C0D 810D [Trivial][ILSEQ] +5C815C1A 811A [Trivial][ILSEQ] +5C815C22 8122 [Trivial][ILSEQ] +5C815C25 815C25 [Regular][ILSEQ] +5C815C27 8127 [Trivial][ILSEQ] +5C815C30 8100 [Regular][ILSEQ] +5C815C3F 813F [Trivial][ILSEQ] +5C815C40 8140 [Trivial][ILSEQ] +5C815C5A 811A [Regular][ILSEQ] +5C815C5C 815C [Regular][ILSEQ] +5C815C5F 815C5F [Regular][ILSEQ] +5C815C61 8161 [Trivial][ILSEQ] +5C815C62 8108 [Regular][ILSEQ] +5C815C6E 810A [Regular][ILSEQ] +5C815C72 810D [Regular][ILSEQ] +5C815C74 8109 [Regular][ILSEQ] +5C815C7E 817E [Trivial][ILSEQ] +5C815C7F 817F [Trivial][ILSEQ] +5C815C80 8180 [Trivial][ILSEQ] +5C815C81 8181 [Trivial][ILSEQ] +5C815C9F 819F [Trivial][ILSEQ] +5C815CA0 81A0 [Trivial][ILSEQ] +5C815CA1 81A1 [Trivial][ILSEQ] +5C815CE0 81E0 [Trivial][ILSEQ] +5C815CEF 81EF [Trivial][ILSEQ] +5C815CF9 81F9 [Trivial][ILSEQ] +5C815CFA 81FA [Trivial][ILSEQ] +5C815CFC 81FC [Trivial][ILSEQ] +5C815CFD 81FD [Trivial][ILSEQ] +5C815CFE 81FE [Trivial][ILSEQ] +5C815CFF 81FF [Trivial][ILSEQ] +5C9F5C00 9F00 [Trivial][ILSEQ] +5C9F5C08 9F08 [Trivial][ILSEQ] +5C9F5C09 9F09 [Trivial][ILSEQ] +5C9F5C0A 9F0A [Trivial][ILSEQ] +5C9F5C0D 9F0D [Trivial][ILSEQ] +5C9F5C1A 9F1A [Trivial][ILSEQ] +5C9F5C22 9F22 [Trivial][ILSEQ] +5C9F5C25 9F5C25 [Regular][ILSEQ] +5C9F5C27 9F27 [Trivial][ILSEQ] +5C9F5C30 9F00 [Regular][ILSEQ] +5C9F5C3F 9F3F [Trivial][ILSEQ] +5C9F5C40 9F40 [Trivial][ILSEQ] +5C9F5C5A 9F1A [Regular][ILSEQ] +5C9F5C5C 9F5C [Regular][ILSEQ] +5C9F5C5F 9F5C5F [Regular][ILSEQ] +5C9F5C61 9F61 [Trivial][ILSEQ] +5C9F5C62 9F08 [Regular][ILSEQ] +5C9F5C6E 9F0A [Regular][ILSEQ] +5C9F5C72 9F0D [Regular][ILSEQ] +5C9F5C74 9F09 [Regular][ILSEQ] +5C9F5C7E 9F7E [Trivial][ILSEQ] +5C9F5C7F 9F7F [Trivial][ILSEQ] +5C9F5C80 9F80 [Trivial][ILSEQ] +5C9F5C81 9F81 [Trivial][ILSEQ] +5C9F5C9F 9F9F [Trivial][ILSEQ] +5C9F5CA0 9FA0 [Trivial][ILSEQ] +5C9F5CA1 9FA1 [Trivial][ILSEQ] +5C9F5CE0 9FE0 [Trivial][ILSEQ] +5C9F5CEF 9FEF [Trivial][ILSEQ] +5C9F5CF9 9FF9 [Trivial][ILSEQ] +5C9F5CFA 9FFA [Trivial][ILSEQ] +5C9F5CFC 9FFC [Trivial][ILSEQ] +5C9F5CFD 9FFD [Trivial][ILSEQ] +5C9F5CFE 9FFE [Trivial][ILSEQ] +5C9F5CFF 9FFF [Trivial][ILSEQ] +5CA05C00 A000 [Trivial][ILSEQ] +5CA05C08 A008 [Trivial][ILSEQ] +5CA05C09 A009 [Trivial][ILSEQ] +5CA05C0A A00A [Trivial][ILSEQ] +5CA05C0D A00D [Trivial][ILSEQ] +5CA05C1A A01A [Trivial][ILSEQ] +5CA05C22 A022 [Trivial][ILSEQ] +5CA05C25 A05C25 [Regular][ILSEQ] +5CA05C27 A027 [Trivial][ILSEQ] +5CA05C30 A000 [Regular][ILSEQ] +5CA05C3F A03F [Trivial][ILSEQ] +5CA05C40 A040 [Trivial][ILSEQ] +5CA05C5A A01A [Regular][ILSEQ] +5CA05C5C A05C [Regular][ILSEQ] +5CA05C5F A05C5F [Regular][ILSEQ] +5CA05C61 A061 [Trivial][ILSEQ] +5CA05C62 A008 [Regular][ILSEQ] +5CA05C6E A00A [Regular][ILSEQ] +5CA05C72 A00D [Regular][ILSEQ] +5CA05C74 A009 [Regular][ILSEQ] +5CA05C7E A07E [Trivial][ILSEQ] +5CA05C7F A07F [Trivial][ILSEQ] +5CA05C80 A080 [Trivial][ILSEQ] +5CA05C81 A081 [Trivial][ILSEQ] +5CA05C9F A09F [Trivial][ILSEQ] +5CA05CA0 A0A0 [Trivial][ILSEQ] +5CA05CA1 A0A1 [Trivial][ILSEQ] +5CA05CE0 A0E0 [Trivial][ILSEQ] +5CA05CEF A0EF [Trivial][ILSEQ] +5CA05CF9 A0F9 [Trivial][ILSEQ] +5CA05CFA A0FA [Trivial][ILSEQ] +5CA05CFC A0FC [Trivial][ILSEQ] +5CA05CFD A0FD [Trivial][ILSEQ] +5CA05CFE A0FE [Trivial][ILSEQ] +5CA05CFF A0FF [Trivial][ILSEQ] +5CA15C00 A100 [Trivial][ILSEQ] +5CA15C08 A108 [Trivial][ILSEQ] +5CA15C09 A109 [Trivial][ILSEQ] +5CA15C0A A10A [Trivial][ILSEQ] +5CA15C0D A10D [Trivial][ILSEQ] +5CA15C1A A11A [Trivial][ILSEQ] +5CA15C22 A122 [Trivial][ILSEQ] +5CA15C25 A15C25 [Regular][ILSEQ] +5CA15C27 A127 [Trivial][ILSEQ] +5CA15C30 A100 [Regular][ILSEQ] +5CA15C3F A13F [Trivial][ILSEQ] +5CA15C40 A140 [Trivial][ILSEQ] +5CA15C5A A11A [Regular][ILSEQ] +5CA15C5C A15C [Regular][ILSEQ] +5CA15C5F A15C5F [Regular][ILSEQ] +5CA15C61 A161 [Trivial][ILSEQ] +5CA15C62 A108 [Regular][ILSEQ] +5CA15C6E A10A [Regular][ILSEQ] +5CA15C72 A10D [Regular][ILSEQ] +5CA15C74 A109 [Regular][ILSEQ] +5CA15C7E A17E [Trivial][ILSEQ] +5CA15C7F A17F [Trivial][ILSEQ] +5CA15C80 A180 [Trivial][ILSEQ] +5CA15C81 A181 [Trivial][ILSEQ] +5CA15C9F A19F [Trivial][ILSEQ] +5CA15CA0 A1A0 [Trivial][ILSEQ] +5CA15CA1 A1A1 [Trivial][ILSEQ] +5CA15CE0 A1E0 [Trivial][ILSEQ] +5CA15CEF A1EF [Trivial][ILSEQ] +5CA15CF9 A1F9 [Trivial][ILSEQ] +5CA15CFA A1FA [Trivial][ILSEQ] +5CA15CFC A1FC [Trivial][ILSEQ] +5CA15CFD A1FD [Trivial][ILSEQ] +5CA15CFE A1FE [Trivial][ILSEQ] +5CA15CFF A1FF [Trivial][ILSEQ] +5CE05C00 E000 [Trivial][ILSEQ] +5CE05C08 E008 [Trivial][ILSEQ] +5CE05C09 E009 [Trivial][ILSEQ] +5CE05C0A E00A [Trivial][ILSEQ] +5CE05C0D E00D [Trivial][ILSEQ] +5CE05C1A E01A [Trivial][ILSEQ] +5CE05C22 E022 [Trivial][ILSEQ] +5CE05C25 E05C25 [Regular][ILSEQ] +5CE05C27 E027 [Trivial][ILSEQ] +5CE05C30 E000 [Regular][ILSEQ] +5CE05C3F E03F [Trivial][ILSEQ] +5CE05C40 E040 [Trivial][ILSEQ] +5CE05C5A E01A [Regular][ILSEQ] +5CE05C5C E05C [Regular][ILSEQ] +5CE05C5F E05C5F [Regular][ILSEQ] +5CE05C61 E061 [Trivial][ILSEQ] +5CE05C62 E008 [Regular][ILSEQ] +5CE05C6E E00A [Regular][ILSEQ] +5CE05C72 E00D [Regular][ILSEQ] +5CE05C74 E009 [Regular][ILSEQ] +5CE05C7E E07E [Trivial][ILSEQ] +5CE05C7F E07F [Trivial][ILSEQ] +5CE05C80 E080 [Trivial][ILSEQ] +5CE05C81 E081 [Trivial][ILSEQ] +5CE05C9F E09F [Trivial][ILSEQ] +5CE05CA0 E0A0 [Trivial][ILSEQ] +5CE05CA1 E0A1 [Trivial][ILSEQ] +5CE05CE0 E0E0 [Trivial][ILSEQ] +5CE05CEF E0EF [Trivial][ILSEQ] +5CE05CF9 E0F9 [Trivial][ILSEQ] +5CE05CFA E0FA [Trivial][ILSEQ] +5CE05CFC E0FC [Trivial][ILSEQ] +5CE05CFD E0FD [Trivial][ILSEQ] +5CE05CFE E0FE [Trivial][ILSEQ] +5CE05CFF E0FF [Trivial][ILSEQ] +5CEF5C00 EF00 [Trivial][ILSEQ] +5CEF5C08 EF08 [Trivial][ILSEQ] +5CEF5C09 EF09 [Trivial][ILSEQ] +5CEF5C0A EF0A [Trivial][ILSEQ] +5CEF5C0D EF0D [Trivial][ILSEQ] +5CEF5C1A EF1A [Trivial][ILSEQ] +5CEF5C22 EF22 [Trivial][ILSEQ] +5CEF5C25 EF5C25 [Regular][ILSEQ] +5CEF5C27 EF27 [Trivial][ILSEQ] +5CEF5C30 EF00 [Regular][ILSEQ] +5CEF5C3F EF3F [Trivial][ILSEQ] +5CEF5C40 EF40 [Trivial][ILSEQ] +5CEF5C5A EF1A [Regular][ILSEQ] +5CEF5C5C EF5C [Regular][ILSEQ] +5CEF5C5F EF5C5F [Regular][ILSEQ] +5CEF5C61 EF61 [Trivial][ILSEQ] +5CEF5C62 EF08 [Regular][ILSEQ] +5CEF5C6E EF0A [Regular][ILSEQ] +5CEF5C72 EF0D [Regular][ILSEQ] +5CEF5C74 EF09 [Regular][ILSEQ] +5CEF5C7E EF7E [Trivial][ILSEQ] +5CEF5C7F EF7F [Trivial][ILSEQ] +5CEF5C80 EF80 [Trivial][ILSEQ] +5CEF5C81 EF81 [Trivial][ILSEQ] +5CEF5C9F EF9F [Trivial][ILSEQ] +5CEF5CA0 EFA0 [Trivial][ILSEQ] +5CEF5CA1 EFA1 [Trivial][ILSEQ] +5CEF5CE0 EFE0 [Trivial][ILSEQ] +5CEF5CEF EFEF [Trivial][ILSEQ] +5CEF5CF9 EFF9 [Trivial][ILSEQ] +5CEF5CFA EFFA [Trivial][ILSEQ] +5CEF5CFC EFFC [Trivial][ILSEQ] +5CEF5CFD EFFD [Trivial][ILSEQ] +5CEF5CFE EFFE [Trivial][ILSEQ] +5CEF5CFF EFFF [Trivial][ILSEQ] +5CF95C00 F900 [Trivial][ILSEQ] +5CF95C08 F908 [Trivial][ILSEQ] +5CF95C09 F909 [Trivial][ILSEQ] +5CF95C0A F90A [Trivial][ILSEQ] +5CF95C0D F90D [Trivial][ILSEQ] +5CF95C1A F91A [Trivial][ILSEQ] +5CF95C22 F922 [Trivial][ILSEQ] +5CF95C25 F95C25 [Regular][ILSEQ] +5CF95C27 F927 [Trivial][ILSEQ] +5CF95C30 F900 [Regular][ILSEQ] +5CF95C3F F93F [Trivial][ILSEQ] +5CF95C40 F940 [Trivial][ILSEQ] +5CF95C5A F91A [Regular][ILSEQ] +5CF95C5C F95C [Regular][ILSEQ] +5CF95C5F F95C5F [Regular][ILSEQ] +5CF95C61 F961 [Trivial][ILSEQ] +5CF95C62 F908 [Regular][ILSEQ] +5CF95C6E F90A [Regular][ILSEQ] +5CF95C72 F90D [Regular][ILSEQ] +5CF95C74 F909 [Regular][ILSEQ] +5CF95C7E F97E [Trivial][ILSEQ] +5CF95C7F F97F [Trivial][ILSEQ] +5CF95C80 F980 [Trivial][ILSEQ] +5CF95C81 F981 [Trivial][ILSEQ] +5CF95C9F F99F [Trivial][ILSEQ] +5CF95CA0 F9A0 [Trivial][ILSEQ] +5CF95CA1 F9A1 [Trivial][ILSEQ] +5CF95CE0 F9E0 [Trivial][ILSEQ] +5CF95CEF F9EF [Trivial][ILSEQ] +5CF95CF9 F9F9 [Trivial][ILSEQ] +5CF95CFA F9FA [Trivial][ILSEQ] +5CF95CFC F9FC [Trivial][ILSEQ] +5CF95CFD F9FD [Trivial][ILSEQ] +5CF95CFE F9FE [Trivial][ILSEQ] +5CF95CFF F9FF [Trivial][ILSEQ] +5CFA5C00 FA00 [Trivial][ILSEQ] +5CFA5C08 FA08 [Trivial][ILSEQ] +5CFA5C09 FA09 [Trivial][ILSEQ] +5CFA5C0A FA0A [Trivial][ILSEQ] +5CFA5C0D FA0D [Trivial][ILSEQ] +5CFA5C1A FA1A [Trivial][ILSEQ] +5CFA5C22 FA22 [Trivial][ILSEQ] +5CFA5C25 FA5C25 [Regular][ILSEQ] +5CFA5C27 FA27 [Trivial][ILSEQ] +5CFA5C30 FA00 [Regular][ILSEQ] +5CFA5C3F FA3F [Trivial][ILSEQ] +5CFA5C40 FA40 [Trivial][ILSEQ] +5CFA5C5A FA1A [Regular][ILSEQ] +5CFA5C5C FA5C [Regular][ILSEQ] +5CFA5C5F FA5C5F [Regular][ILSEQ] +5CFA5C61 FA61 [Trivial][ILSEQ] +5CFA5C62 FA08 [Regular][ILSEQ] +5CFA5C6E FA0A [Regular][ILSEQ] +5CFA5C72 FA0D [Regular][ILSEQ] +5CFA5C74 FA09 [Regular][ILSEQ] +5CFA5C7E FA7E [Trivial][ILSEQ] +5CFA5C7F FA7F [Trivial][ILSEQ] +5CFA5C80 FA80 [Trivial][ILSEQ] +5CFA5C81 FA81 [Trivial][ILSEQ] +5CFA5C9F FA9F [Trivial][ILSEQ] +5CFA5CA0 FAA0 [Trivial][ILSEQ] +5CFA5CA1 FAA1 [Trivial][ILSEQ] +5CFA5CE0 FAE0 [Trivial][ILSEQ] +5CFA5CEF FAEF [Trivial][ILSEQ] +5CFA5CF9 FAF9 [Trivial][ILSEQ] +5CFA5CFA FAFA [Trivial][ILSEQ] +5CFA5CFC FAFC [Trivial][ILSEQ] +5CFA5CFD FAFD [Trivial][ILSEQ] +5CFA5CFE FAFE [Trivial][ILSEQ] +5CFA5CFF FAFF [Trivial][ILSEQ] +5CFC5C00 FC00 [Trivial][ILSEQ] +5CFC5C08 FC08 [Trivial][ILSEQ] +5CFC5C09 FC09 [Trivial][ILSEQ] +5CFC5C0A FC0A [Trivial][ILSEQ] +5CFC5C0D FC0D [Trivial][ILSEQ] +5CFC5C1A FC1A [Trivial][ILSEQ] +5CFC5C22 FC22 [Trivial][ILSEQ] +5CFC5C25 FC5C25 [Regular][ILSEQ] +5CFC5C27 FC27 [Trivial][ILSEQ] +5CFC5C30 FC00 [Regular][ILSEQ] +5CFC5C3F FC3F [Trivial][ILSEQ] +5CFC5C40 FC40 [Trivial][ILSEQ] +5CFC5C5A FC1A [Regular][ILSEQ] +5CFC5C5C FC5C [Regular][ILSEQ] +5CFC5C5F FC5C5F [Regular][ILSEQ] +5CFC5C61 FC61 [Trivial][ILSEQ] +5CFC5C62 FC08 [Regular][ILSEQ] +5CFC5C6E FC0A [Regular][ILSEQ] +5CFC5C72 FC0D [Regular][ILSEQ] +5CFC5C74 FC09 [Regular][ILSEQ] +5CFC5C7E FC7E [Trivial][ILSEQ] +5CFC5C7F FC7F [Trivial][ILSEQ] +5CFC5C80 FC80 [Trivial][ILSEQ] +5CFC5C81 FC81 [Trivial][ILSEQ] +5CFC5C9F FC9F [Trivial][ILSEQ] +5CFC5CA0 FCA0 [Trivial][ILSEQ] +5CFC5CA1 FCA1 [Trivial][ILSEQ] +5CFC5CE0 FCE0 [Trivial][ILSEQ] +5CFC5CEF FCEF [Trivial][ILSEQ] +5CFC5CF9 FCF9 [Trivial][ILSEQ] +5CFC5CFA FCFA [Trivial][ILSEQ] +5CFC5CFC FCFC [Trivial][ILSEQ] +5CFC5CFD FCFD [Trivial][ILSEQ] +5CFC5CFE FCFE [Trivial][ILSEQ] +5CFC5CFF FCFF [Trivial][ILSEQ] +5CFD5C00 FD00 [Trivial][ILSEQ] +5CFD5C08 FD08 [Trivial][ILSEQ] +5CFD5C09 FD09 [Trivial][ILSEQ] +5CFD5C0A FD0A [Trivial][ILSEQ] +5CFD5C0D FD0D [Trivial][ILSEQ] +5CFD5C1A FD1A [Trivial][ILSEQ] +5CFD5C22 FD22 [Trivial][ILSEQ] +5CFD5C25 FD5C25 [Regular][ILSEQ] +5CFD5C27 FD27 [Trivial][ILSEQ] +5CFD5C30 FD00 [Regular][ILSEQ] +5CFD5C3F FD3F [Trivial][ILSEQ] +5CFD5C40 FD40 [Trivial][ILSEQ] +5CFD5C5A FD1A [Regular][ILSEQ] +5CFD5C5C FD5C [Regular][ILSEQ] +5CFD5C5F FD5C5F [Regular][ILSEQ] +5CFD5C61 FD61 [Trivial][ILSEQ] +5CFD5C62 FD08 [Regular][ILSEQ] +5CFD5C6E FD0A [Regular][ILSEQ] +5CFD5C72 FD0D [Regular][ILSEQ] +5CFD5C74 FD09 [Regular][ILSEQ] +5CFD5C7E FD7E [Trivial][ILSEQ] +5CFD5C7F FD7F [Trivial][ILSEQ] +5CFD5C80 FD80 [Trivial][ILSEQ] +5CFD5C81 FD81 [Trivial][ILSEQ] +5CFD5C9F FD9F [Trivial][ILSEQ] +5CFD5CA0 FDA0 [Trivial][ILSEQ] +5CFD5CA1 FDA1 [Trivial][ILSEQ] +5CFD5CE0 FDE0 [Trivial][ILSEQ] +5CFD5CEF FDEF [Trivial][ILSEQ] +5CFD5CF9 FDF9 [Trivial][ILSEQ] +5CFD5CFA FDFA [Trivial][ILSEQ] +5CFD5CFC FDFC [Trivial][ILSEQ] +5CFD5CFD FDFD [Trivial][ILSEQ] +5CFD5CFE FDFE [Trivial][ILSEQ] +5CFD5CFF FDFF [Trivial][ILSEQ] +5CFE5C00 FE00 [Trivial][ILSEQ] +5CFE5C08 FE08 [Trivial][ILSEQ] +5CFE5C09 FE09 [Trivial][ILSEQ] +5CFE5C0A FE0A [Trivial][ILSEQ] +5CFE5C0D FE0D [Trivial][ILSEQ] +5CFE5C1A FE1A [Trivial][ILSEQ] +5CFE5C22 FE22 [Trivial][ILSEQ] +5CFE5C25 FE5C25 [Regular][ILSEQ] +5CFE5C27 FE27 [Trivial][ILSEQ] +5CFE5C30 FE00 [Regular][ILSEQ] +5CFE5C3F FE3F [Trivial][ILSEQ] +5CFE5C40 FE40 [Trivial][ILSEQ] +5CFE5C5A FE1A [Regular][ILSEQ] +5CFE5C5C FE5C [Regular][ILSEQ] +5CFE5C5F FE5C5F [Regular][ILSEQ] +5CFE5C61 FE61 [Trivial][ILSEQ] +5CFE5C62 FE08 [Regular][ILSEQ] +5CFE5C6E FE0A [Regular][ILSEQ] +5CFE5C72 FE0D [Regular][ILSEQ] +5CFE5C74 FE09 [Regular][ILSEQ] +5CFE5C7E FE7E [Trivial][ILSEQ] +5CFE5C7F FE7F [Trivial][ILSEQ] +5CFE5C80 FE80 [Trivial][ILSEQ] +5CFE5C81 FE81 [Trivial][ILSEQ] +5CFE5C9F FE9F [Trivial][ILSEQ] +5CFE5CA0 FEA0 [Trivial][ILSEQ] +5CFE5CA1 FEA1 [Trivial][ILSEQ] +5CFE5CE0 FEE0 [Trivial][ILSEQ] +5CFE5CEF FEEF [Trivial][ILSEQ] +5CFE5CF9 FEF9 [Trivial][ILSEQ] +5CFE5CFA FEFA [Trivial][ILSEQ] +5CFE5CFC FEFC [Trivial][ILSEQ] +5CFE5CFD FEFD [Trivial][ILSEQ] +5CFE5CFE FEFE [Trivial][ILSEQ] +5CFE5CFF FEFF [Trivial][ILSEQ] +5CFF5C00 FF00 [Trivial][ILSEQ] +5CFF5C08 FF08 [Trivial][ILSEQ] +5CFF5C09 FF09 [Trivial][ILSEQ] +5CFF5C0A FF0A [Trivial][ILSEQ] +5CFF5C0D FF0D [Trivial][ILSEQ] +5CFF5C1A FF1A [Trivial][ILSEQ] +5CFF5C22 FF22 [Trivial][ILSEQ] +5CFF5C25 FF5C25 [Regular][ILSEQ] +5CFF5C27 FF27 [Trivial][ILSEQ] +5CFF5C30 FF00 [Regular][ILSEQ] +5CFF5C3F FF3F [Trivial][ILSEQ] +5CFF5C40 FF40 [Trivial][ILSEQ] +5CFF5C5A FF1A [Regular][ILSEQ] +5CFF5C5C FF5C [Regular][ILSEQ] +5CFF5C5F FF5C5F [Regular][ILSEQ] +5CFF5C61 FF61 [Trivial][ILSEQ] +5CFF5C62 FF08 [Regular][ILSEQ] +5CFF5C6E FF0A [Regular][ILSEQ] +5CFF5C72 FF0D [Regular][ILSEQ] +5CFF5C74 FF09 [Regular][ILSEQ] +5CFF5C7E FF7E [Trivial][ILSEQ] +5CFF5C7F FF7F [Trivial][ILSEQ] +5CFF5C80 FF80 [Trivial][ILSEQ] +5CFF5C81 FF81 [Trivial][ILSEQ] +5CFF5C9F FF9F [Trivial][ILSEQ] +5CFF5CA0 FFA0 [Trivial][ILSEQ] +5CFF5CA1 FFA1 [Trivial][ILSEQ] +5CFF5CE0 FFE0 [Trivial][ILSEQ] +5CFF5CEF FFEF [Trivial][ILSEQ] +5CFF5CF9 FFF9 [Trivial][ILSEQ] +5CFF5CFA FFFA [Trivial][ILSEQ] +5CFF5CFC FFFC [Trivial][ILSEQ] +5CFF5CFD FFFD [Trivial][ILSEQ] +5CFF5CFE FFFE [Trivial][ILSEQ] +5CFF5CFF FFFF [Trivial][ILSEQ] +DROP TABLE t1; +DROP PROCEDURE p1; +DROP PROCEDURE p2; +DROP FUNCTION unescape; +DROP FUNCTION unescape_type; +DROP FUNCTION wellformedness; +DROP FUNCTION mysql_real_escape_string_generated; +DROP FUNCTION iswellformed; +DROP TABLE allbytes; +# End of ctype_backslash.inc +# +# End of 10.0 tests +# diff --git a/mysql-test/r/derived_view.result b/mysql-test/r/derived_view.result index 87267124ff0..e359a8f89c5 100644 --- a/mysql-test/r/derived_view.result +++ b/mysql-test/r/derived_view.result @@ -2406,6 +2406,94 @@ deallocate prepare stmt; drop table t1,t2; set optimizer_switch=@save_optimizer_switch5740; # +# Bug mdev-5721: possible long key access to a materialized derived table +# (see also the test case for Bug#13261277 that is actually the same bug) +# +CREATE TABLE t1 ( +id varchar(255) NOT NULL DEFAULT '', +familyid int(11) DEFAULT NULL, +withdrawndate date DEFAULT NULL, +KEY index_td_familyid_id (familyid,id) +) ENGINE=MyISAM DEFAULT CHARSET=utf8; +CREATE TABLE t2 ( +id int(11) NOT NULL AUTO_INCREMENT, +activefromts datetime NOT NULL DEFAULT '0000-00-00 00:00:00', +shortdescription text, +useraccessfamily varchar(512) DEFAULT NULL, +serialized longtext, +PRIMARY KEY (id) +) ENGINE=MyISAM DEFAULT CHARSET=utf8; +insert into t1 values ('picture/89/1369722032695.pmd',89,NULL); +insert into t1 values ('picture/90/1369832057370.pmd',90,NULL); +insert into t2 values (38,'2013-03-04 07:49:22','desc','CODE','string'); +EXPLAIN +SELECT * FROM t2 x, +(SELECT t2.useraccessfamily, t2.serialized AS picturesubuser, COUNT(*) +FROM t2, t1 GROUP BY t2.useraccessfamily, picturesubuser) y +WHERE x.useraccessfamily = y.useraccessfamily; +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY x system NULL NULL NULL NULL 1 +1 PRIMARY <derived2> ALL NULL NULL NULL NULL 2 Using where +2 DERIVED t2 system NULL NULL NULL NULL 1 +2 DERIVED t1 index NULL index_td_familyid_id 772 NULL 2 Using index +SELECT * FROM t2 x, +(SELECT t2.useraccessfamily, t2.serialized AS picturesubuser, COUNT(*) +FROM t2, t1 GROUP BY t2.useraccessfamily, picturesubuser) y +WHERE x.useraccessfamily = y.useraccessfamily; +id activefromts shortdescription useraccessfamily serialized useraccessfamily picturesubuser COUNT(*) +38 2013-03-04 07:49:22 desc CODE string CODE string 2 +DROP TABLE t1,t2; +# +# Bug#13261277: Unchecked key length caused missing records. +# +CREATE TABLE t1 ( +col_varchar varchar(1024) CHARACTER SET utf8 DEFAULT NULL, +stub1 varchar(1024) CHARACTER SET utf8 DEFAULT NULL, +stub2 varchar(1024) CHARACTER SET utf8 DEFAULT NULL, +stub3 varchar(1024) CHARACTER SET utf8 DEFAULT NULL +); +INSERT INTO t1 VALUES +('d','d','l','ther'), +(NULL,'s','NJBIQ','trzetuchv'), +(-715390976,'coul','MYWFB','cfhtrzetu'), +(1696792576,'f','i\'s','c'), + (1,'i','ltpemcfhtr','gsltpemcf'), + (-663027712,'mgsltpemcf','sa','amgsltpem'), + (-1686700032,'JPRVK','i','vamgsltpe'), + (NULL,'STUNB','UNVJV','u'), + (5,'oka','qyihvamgsl','AXSMD'), + (NULL,'tqwmqyihva','h','yntqwmqyi'), + (3,'EGMJN','e','e'); +CREATE TABLE t2 ( +col_varchar varchar(10) DEFAULT NULL, +col_int INT DEFAULT NULL +); +INSERT INTO t2 VALUES ('d',9); +set optimizer_switch='derived_merge=off,derived_with_keys=on'; +SET @save_heap_size= @@max_heap_table_size; +SET @@max_heap_table_size= 16384; +SELECT t2.col_int +FROM t2 +RIGHT JOIN ( SELECT * FROM t1 ) AS dt +ON t2.col_varchar = dt.col_varchar +WHERE t2.col_int IS NOT NULL ; +col_int +9 +# Shouldn't use auto_key0 for derived table +EXPLAIN +SELECT t2.col_int +FROM t2 +RIGHT JOIN ( SELECT * FROM t1 ) AS dt +ON t2.col_varchar = dt.col_varchar +WHERE t2.col_int IS NOT NULL ; +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY t2 system NULL NULL NULL NULL 1 +1 PRIMARY <derived2> ALL NULL NULL NULL NULL 11 Using where +2 DERIVED t1 ALL NULL NULL NULL NULL 11 +SET @@max_heap_table_size= @save_heap_size; +SET optimizer_switch=@save_optimizer_switch; +DROP TABLE t1,t2; +# # end of 5.3 tests # set optimizer_switch=@exit_optimizer_switch; diff --git a/mysql-test/r/features.result b/mysql-test/r/features.result index 7b6a352ab0c..66d2c6bf71d 100644 --- a/mysql-test/r/features.result +++ b/mysql-test/r/features.result @@ -1,6 +1,8 @@ drop table if exists t1; +flush status; show status like "feature%"; Variable_name Value +Feature_delay_key_write 0 Feature_dynamic_columns 0 Feature_fulltext 0 Feature_gis 0 @@ -138,3 +140,17 @@ upd1 show status like "feature_xml"; Variable_name Value Feature_xml 2 +# +# Feature delayed_keys +# +create table t1 (a int, key(a)) engine=myisam delay_key_write=1; +insert into t1 values(1); +insert into t1 values(2); +drop table t1; +create table t1 (a int, key(a)) engine=aria delay_key_write=1; +insert into t1 values(1); +insert into t1 values(2); +drop table t1; +show status like "feature_delay_key_write"; +Variable_name Value +Feature_delay_key_write 2 diff --git a/mysql-test/r/flush-innodb.result b/mysql-test/r/flush-innodb.result index 6a97d33225e..d596ffbbd51 100644 --- a/mysql-test/r/flush-innodb.result +++ b/mysql-test/r/flush-innodb.result @@ -1,7 +1,7 @@ FLUSH TABLES WITH READ LOCK AND DISABLE CHECKPOINT; UNLOCK TABLES; CREATE TABLE t1 ( m MEDIUMTEXT ) ENGINE=InnoDB; -INSERT INTO t1 VALUES ( REPEAT('i',1048576) ); +INSERT INTO t1 VALUES ( REPEAT('i',65535) ); DROP TABLE t1; # diff --git a/mysql-test/r/func_group.result b/mysql-test/r/func_group.result index 8e2bdeae93c..ac076ec4348 100644 --- a/mysql-test/r/func_group.result +++ b/mysql-test/r/func_group.result @@ -1,4 +1,4 @@ -drop table if exists t1,t2; +drop table if exists t1,t2,t3,t4,t5,t6; set @sav_dpi= @@div_precision_increment; set div_precision_increment= 5; show variables like 'div_precision_increment'; @@ -2239,3 +2239,34 @@ explain select MIN(b) from t1 where b >= inet_aton('192.168.119.32'); id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE NULL NULL NULL NULL NULL NULL NULL Select tables optimized away DROP TABLE t1; +# +# MDEV-6743 crash in GROUP_CONCAT(IF () ORDER BY 1) +# +CREATE TABLE t1 (pk INT, t2_id INT, t5_id INT, PRIMARY KEY (pk)); +INSERT INTO t1 VALUES (1,3,12),(2,3,15); +CREATE TABLE t2 (pk INT, PRIMARY KEY (pk)); +INSERT INTO t2 VALUES (4),(5); +CREATE TABLE t3 (t2_id INT, t4_id INT); +INSERT INTO t3 VALUES (6,11),(7,12); +CREATE TABLE t4 (id INT); +INSERT INTO t4 VALUES (13),(14); +CREATE TABLE t5 (pk INT, f VARCHAR(50), t6_id INT, PRIMARY KEY (pk)); +INSERT INTO t5 VALUES (9,'FOO',NULL); +CREATE TABLE t6 (pk INT, f VARCHAR(120), b TINYINT(4), PRIMARY KEY (pk)); +PREPARE stmt FROM " + SELECT t1.t2_id, GROUP_CONCAT(IF (t6.b, t6.f, t5.f) ORDER BY 1) + FROM t1 + JOIN t2 ON t1.t2_id = t2.pk + JOIN t3 ON t2.pk = t3.t2_id + JOIN t4 ON t4.id = t3.t4_id + JOIN t5 ON t1.t5_id = t5.pk + LEFT JOIN t6 ON t6.pk = t5.t6_id + GROUP BY t1.t2_id +"; +EXECUTE stmt; +t2_id GROUP_CONCAT(IF (t6.b, t6.f, t5.f) ORDER BY 1) +EXECUTE stmt; +t2_id GROUP_CONCAT(IF (t6.b, t6.f, t5.f) ORDER BY 1) +EXECUTE stmt; +t2_id GROUP_CONCAT(IF (t6.b, t6.f, t5.f) ORDER BY 1) +DROP TABLE t1,t2,t3,t4,t5,t6; diff --git a/mysql-test/r/func_str.result b/mysql-test/r/func_str.result index e5edabb0130..bc5f6951184 100644 --- a/mysql-test/r/func_str.result +++ b/mysql-test/r/func_str.result @@ -845,7 +845,7 @@ explain extended select concat('*',space(5),'*'); id select_type table type possible_keys key key_len ref rows filtered Extra 1 SIMPLE NULL NULL NULL NULL NULL NULL NULL NULL No tables used Warnings: -Note 1003 select concat('*',repeat(' ',5),'*') AS `concat('*',space(5),'*')` +Note 1003 select concat('*',space(5),'*') AS `concat('*',space(5),'*')` explain extended select reverse('abc'); id select_type table type possible_keys key key_len ref rows filtered Extra 1 SIMPLE NULL NULL NULL NULL NULL NULL NULL NULL No tables used @@ -2083,7 +2083,7 @@ select space(4294967295); space(4294967295) NULL Warnings: -Warning 1301 Result of repeat() was larger than max_allowed_packet (1048576) - truncated +Warning 1301 Result of space() was larger than max_allowed_packet (1048576) - truncated select space(-4294967296); space(-4294967296) @@ -2091,7 +2091,7 @@ select space(4294967296); space(4294967296) NULL Warnings: -Warning 1301 Result of repeat() was larger than max_allowed_packet (1048576) - truncated +Warning 1301 Result of space() was larger than max_allowed_packet (1048576) - truncated select space(-4294967297); space(-4294967297) @@ -2099,7 +2099,7 @@ select space(4294967297); space(4294967297) NULL Warnings: -Warning 1301 Result of repeat() was larger than max_allowed_packet (1048576) - truncated +Warning 1301 Result of space() was larger than max_allowed_packet (1048576) - truncated select space(-18446744073709551615); space(-18446744073709551615) @@ -2110,7 +2110,7 @@ select space(18446744073709551615); space(18446744073709551615) NULL Warnings: -Warning 1301 Result of repeat() was larger than max_allowed_packet (1048576) - truncated +Warning 1301 Result of space() was larger than max_allowed_packet (1048576) - truncated select space(-18446744073709551616); space(-18446744073709551616) @@ -2123,7 +2123,7 @@ NULL Warnings: Warning 1916 Got overflow when converting '18446744073709551616' to INT. Value truncated. Warning 1916 Got overflow when converting '18446744073709551616' to INT. Value truncated. -Warning 1301 Result of repeat() was larger than max_allowed_packet (1048576) - truncated +Warning 1301 Result of space() was larger than max_allowed_packet (1048576) - truncated select space(-18446744073709551617); space(-18446744073709551617) @@ -2136,7 +2136,7 @@ NULL Warnings: Warning 1916 Got overflow when converting '18446744073709551617' to INT. Value truncated. Warning 1916 Got overflow when converting '18446744073709551617' to INT. Value truncated. -Warning 1301 Result of repeat() was larger than max_allowed_packet (1048576) - truncated +Warning 1301 Result of space() was larger than max_allowed_packet (1048576) - truncated select rpad('hello', -1, '1'); rpad('hello', -1, '1') NULL @@ -2961,6 +2961,9 @@ replace(var, '00000000', table_name) (( t2 ++ t2 )) drop procedure foo; drop table t1,t2; +select md5(_filename "a"), sha(_filename "a"); +md5(_filename "a") sha(_filename "a") +0cc175b9c0f1b6a831c399e269772661 86f7e437faa5a7fce15d1ddcb9eaeaea377667b8 # # End of 5.5 tests # @@ -4510,3 +4513,20 @@ SELECT FROM_BASE64(TO_BASE64(dt1)) FROM t1; FROM_BASE64(TO_BASE64(dt1)) 2011-01-01 02:03:04 DROP TABLE t1; +SELECT SPACE(@@global.max_allowed_packet*2); +SPACE(@@global.max_allowed_packet*2) +NULL +Warnings: +Warning 1301 Result of space() was larger than max_allowed_packet (1048576) - truncated +SET NAMES latin1; +PREPARE stmt FROM "SELECT COLLATION(space(2))"; +EXECUTE stmt; +COLLATION(space(2)) +latin1_swedish_ci +SET NAMES latin2; +EXECUTE stmt; +COLLATION(space(2)) +latin2_general_ci +# +# End of 5.6 tests +# diff --git a/mysql-test/r/func_time.result b/mysql-test/r/func_time.result index db68f08cbba..bf07595bc3a 100644 --- a/mysql-test/r/func_time.result +++ b/mysql-test/r/func_time.result @@ -2023,10 +2023,72 @@ SEC_TO_TIME(1.12)+0.1 decimal(14,2) YES NULL SEC_TO_TIME(1.123456)+0.1 decimal(18,6) YES NULL SEC_TO_TIME(1.1234567)+0.1 decimal(18,6) YES NULL DROP TABLE t1; +CREATE TABLE t1 (a DATE) ENGINE=MyISAM; +INSERT INTO t1 VALUES ('2005-05-04'),('2000-02-23'); +SELECT * FROM t1 GROUP BY FROM_UNIXTIME(concat(a,'10'))*1; +a +2000-02-23 +2005-05-04 +SELECT * FROM t1 GROUP BY (-FROM_UNIXTIME(concat(a,'10')))*1; +a +2005-05-04 +2000-02-23 +SELECT * FROM t1 GROUP BY (-FROM_UNIXTIME(concat(a,'10'))); +a +2005-05-04 +2000-02-23 +SELECT * FROM t1 GROUP BY ABS(FROM_UNIXTIME(concat(a,'10'))); +a +2000-02-23 +2005-05-04 +SELECT * FROM t1 GROUP BY @a:=(FROM_UNIXTIME(concat(a,'10'))*1); +a +2000-02-23 +2005-05-04 +DROP TABLE t1; +SET TIME_ZONE='+02:00'; +# +# MDEV-6302 Wrong result set when using GROUP BY FROM_UNIXTIME(...)+0 +# +CREATE TABLE t1 (a DATE); +INSERT INTO t1 VALUES ('2005-05-04'),('2000-02-23'); +SELECT a, FROM_UNIXTIME(CONCAT(a,'10')) AS f1, FROM_UNIXTIME(CONCAT(a,'10'))+0 AS f2 FROM t1; +a f1 f2 +2005-05-04 1970-01-01 02:33:25 19700101023325.000000 +2000-02-23 1970-01-01 02:33:20 19700101023320.000000 +SELECT * FROM t1 GROUP BY FROM_UNIXTIME(CONCAT(a,'10'))+0; +a +2000-02-23 +2005-05-04 +DROP TABLE t1; +CREATE TABLE t1 (a DATE) ENGINE=MyISAM; +INSERT INTO t1 VALUES ('2005-05-04'),('2000-02-23'); +SELECT * FROM t1 GROUP BY FROM_UNIXTIME(concat(a,'10'))/1; +a +2000-02-23 +2005-05-04 +DROP TABLE t1; +CREATE TABLE t1 (a DATE); +INSERT INTO t1 VALUES ('2005-05-04'); +SELECT CONCAT(FROM_UNIXTIME(CONCAT(a,'10')) MOD FROM_UNIXTIME(CONCAT(a,'10'))) AS f2 FROM t1; +f2 +0.000000 +SELECT CHAR_LENGTH(CONCAT(FROM_UNIXTIME(CONCAT(a,'10')) MOD FROM_UNIXTIME(CONCAT(a,'10')))) AS f2 FROM t1; +f2 +8 +CREATE TABLE t2 AS SELECT CONCAT(FROM_UNIXTIME(CONCAT(a,'10')) MOD FROM_UNIXTIME(CONCAT(a,'10'))) AS f2 FROM t1; +SHOW CREATE TABLE t2; +Table Create Table +t2 CREATE TABLE `t2` ( + `f2` varchar(26) DEFAULT NULL +) ENGINE=MyISAM DEFAULT CHARSET=latin1 +SELECT * FROM t2; +f2 +0.000000 +DROP TABLE t1,t2; # # MDEV-4635 Crash in UNIX_TIMESTAMP(STR_TO_DATE('2020','%Y')) # -SET TIME_ZONE='+02:00'; SELECT UNIX_TIMESTAMP(STR_TO_DATE('2020','%Y')); UNIX_TIMESTAMP(STR_TO_DATE('2020','%Y')) NULL @@ -2600,3 +2662,18 @@ SELECT COALESCE(TIME'10:20:30',DATE'2001-01-01'); COALESCE(TIME'10:20:30',DATE'2001-01-01') 2014-04-15 10:20:30 SET timestamp=DEFAULT; +# +# MDEV-5750 Assertion `ltime->year == 0' fails on a query with EXTRACT DAY_MINUTE and TIME column +# +CREATE TABLE t1 ( d DATE, t TIME ); +INSERT INTO t1 VALUES ('2008-12-05','22:34:09'),('2005-03-27','14:26:02'); +SELECT EXTRACT(DAY_MINUTE FROM GREATEST(t,d)), GREATEST(t,d) FROM t1; +EXTRACT(DAY_MINUTE FROM GREATEST(t,d)) GREATEST(t,d) +342259 838:59:59 +342259 838:59:59 +Warnings: +Warning 1292 Truncated incorrect time value: '9336:00:00' +Warning 1292 Truncated incorrect time value: '9336:00:00' +Warning 1292 Truncated incorrect time value: '2952:00:00' +Warning 1292 Truncated incorrect time value: '2952:00:00' +DROP TABLE t1; diff --git a/mysql-test/r/gis-debug.result b/mysql-test/r/gis-debug.result new file mode 100644 index 00000000000..8593f434c2b --- /dev/null +++ b/mysql-test/r/gis-debug.result @@ -0,0 +1,294 @@ +SET @tmp=ST_GIS_DEBUG(1); +DROP TABLE IF EXISTS p1; +CREATE PROCEDURE p1(dist DOUBLE, geom TEXT) +BEGIN +DECLARE g GEOMETRY; +SET g=GeomFromText(geom); +SELECT geom AS `-----`; +SELECT dist, GeometryType(@buf:=ST_Buffer(g, dist)) AS `buffer`, ROUND(ST_AREA(@buf),2) AS buf_area; +END| +# +# Testing ST_BUFFER with positive distance +# +----- +POINT(0 0)) +dist buffer buf_area +1 POLYGON 3.14 +----- +LineString(0 1, 1 1)) +dist buffer buf_area +1 POLYGON 5.14 +----- +LineString(9 9,8 1,1 5,0 0) +dist buffer buf_area +1 POLYGON 44.63 +----- +Polygon((2 2,2 8,8 8,8 2,2 2)) +dist buffer buf_area +1 POLYGON 63.14 +----- +Polygon((0 0,0 8,8 8,8 0,0 0),(2 2,6 2,6 6,2 6,2 2)) +dist buffer buf_area +1 POLYGON 95.14 +----- +Polygon((0 0, 0 8, 8 8, 8 10, -10 10, -10 0, 0 0)) +dist buffer buf_area +1 POLYGON 174.93 +----- +MultiPoint(9 9,8 1,1 5) +dist buffer buf_area +1 MULTIPOLYGON 9.42 +----- +MultiLineString((0 0,2 2)) +dist buffer buf_area +1 POLYGON 8.80 +----- +MultiLineString((0 0,2 2,0 4)) +dist buffer buf_area +1 POLYGON 14.24 +----- +MultiLineString((0 0,2 2),(0 2,2 0)) +dist buffer buf_area +1 POLYGON 13.59 +----- +MultiLineString((2 2,2 8,-2 8),(-6 -6, 6 6),(10 10, 14 14)) +dist buffer buf_area +1 MULTIPOLYGON 70.06 +----- +MultiPolygon(((2 2,2 8,8 8,8 2,2 2)), ((9 9,8 1,1 5,9 9))) +dist buffer buf_area +1 POLYGON 73.18 +----- +MultiPolygon(((2 2,2 8,8 8,8 2,2 2), (4 4,4 6,6 6,6 4,4 4)),((9 9,8 1,1 5,9 9))) +dist buffer buf_area +1 POLYGON 73.18 +----- +GeometryCollection(Point(0 0)) +dist buffer buf_area +1 POLYGON 3.14 +----- +GeometryCollection(LineString(0 0, 2 2))) +dist buffer buf_area +1 POLYGON 8.80 +----- +GeometryCollection(Polygon((2 2,2 8,8 8,8 2,2 2)))) +dist buffer buf_area +1 POLYGON 63.14 +----- +GeometryCollection(MultiPoint(9 9,8 1,1 5)) +dist buffer buf_area +1 MULTIPOLYGON 9.42 +----- +GeometryCollection(MultiLineString((0 0,0 1),(3 0,3 1))) +dist buffer buf_area +1 MULTIPOLYGON 10.28 +----- +GeometryCollection(MultiPolygon(((0 0, 3 0, 3 3, 0 3, 0 0)),((6 6,6 9,9 9,9 6,6 6)))) +dist buffer buf_area +1 MULTIPOLYGON 48.28 +----- +GeometryCollection(Point(9 9),LineString(1 5,0 0),Polygon((2 2,2 8,8 8,8 2,2 2))) +dist buffer buf_area +1 POLYGON 75.92 +# +# Testing ST_BUFFER with zero distance +# +----- +POINT(0 0)) +dist buffer buf_area +0 POINT 0.00 +----- +LineString(0 1, 1 1)) +dist buffer buf_area +0 LINESTRING 0.00 +----- +LineString(9 9,8 1,1 5,0 0) +dist buffer buf_area +0 LINESTRING 0.00 +----- +Polygon((2 2,2 8,8 8,8 2,2 2)) +dist buffer buf_area +0 POLYGON 36.00 +----- +Polygon((0 0,0 8,8 8,8 0,0 0),(2 2,6 2,6 6,2 6,2 2)) +dist buffer buf_area +0 POLYGON 48.00 +----- +Polygon((0 0, 0 8, 8 8, 8 10, -10 10, -10 0, 0 0)) +dist buffer buf_area +0 POLYGON 116.00 +----- +MultiPoint(9 9,8 1,1 5) +dist buffer buf_area +0 MULTIPOINT NULL +----- +MultiLineString((0 0,2 2)) +dist buffer buf_area +0 MULTILINESTRING NULL +----- +MultiLineString((0 0,2 2,0 4)) +dist buffer buf_area +0 MULTILINESTRING NULL +----- +MultiLineString((0 0,2 2),(0 2,2 0)) +dist buffer buf_area +0 MULTILINESTRING NULL +----- +MultiLineString((2 2,2 8,-2 8),(-6 -6, 6 6),(10 10, 14 14)) +dist buffer buf_area +0 MULTILINESTRING NULL +----- +MultiPolygon(((2 2,2 8,8 8,8 2,2 2)), ((9 9,8 1,1 5,9 9))) +dist buffer buf_area +0 MULTIPOLYGON 66.00 +----- +MultiPolygon(((2 2,2 8,8 8,8 2,2 2), (4 4,4 6,6 6,6 4,4 4)),((9 9,8 1,1 5,9 9))) +dist buffer buf_area +0 MULTIPOLYGON 62.00 +----- +GeometryCollection(Point(0 0)) +dist buffer buf_area +0 GEOMETRYCOLLECTION 0.00 +----- +GeometryCollection(LineString(0 0, 2 2))) +dist buffer buf_area +0 GEOMETRYCOLLECTION 0.00 +----- +GeometryCollection(Polygon((2 2,2 8,8 8,8 2,2 2)))) +dist buffer buf_area +0 GEOMETRYCOLLECTION 36.00 +----- +GeometryCollection(MultiPoint(9 9,8 1,1 5)) +dist buffer buf_area +0 GEOMETRYCOLLECTION NULL +----- +GeometryCollection(MultiLineString((0 0,0 1),(3 0,3 1))) +dist buffer buf_area +0 GEOMETRYCOLLECTION NULL +----- +GeometryCollection(MultiPolygon(((0 0, 3 0, 3 3, 0 3, 0 0)),((6 6,6 9,9 9,9 6,6 6)))) +dist buffer buf_area +0 GEOMETRYCOLLECTION 18.00 +----- +GeometryCollection(Point(9 9),LineString(1 5,0 0),Polygon((2 2,2 8,8 8,8 2,2 2))) +dist buffer buf_area +0 GEOMETRYCOLLECTION 36.00 +# +# Testing ST_BUFFER with negative distance +# +----- +POINT(0 0)) +dist buffer buf_area +-1 GEOMETRYCOLLECTION 0.00 +----- +LineString(0 1, 1 1)) +dist buffer buf_area +-1 GEOMETRYCOLLECTION 0.00 +----- +LineString(9 9,8 1,1 5,0 0) +dist buffer buf_area +-1 GEOMETRYCOLLECTION 0.00 +----- +Polygon((2 2,2 8,8 8,8 2,2 2)) +dist buffer buf_area +-1 POLYGON 16.00 +----- +MultiPoint(9 9,8 1,1 5) +dist buffer buf_area +-1 GEOMETRYCOLLECTION 0.00 +----- +MultiLineString((0 0,2 2)) +dist buffer buf_area +-1 GEOMETRYCOLLECTION 0.00 +----- +MultiLineString((0 0,2 2,0 4)) +dist buffer buf_area +-1 GEOMETRYCOLLECTION 0.00 +----- +MultiLineString((0 0,2 2),(0 2,2 0)) +dist buffer buf_area +-1 GEOMETRYCOLLECTION 0.00 +----- +MultiLineString((2 2,2 8,-2 8),(-6 -6, 6 6),(10 10, 14 14)) +dist buffer buf_area +-1 GEOMETRYCOLLECTION 0.00 +----- +GeometryCollection(Point(0 0)) +dist buffer buf_area +-1 GEOMETRYCOLLECTION 0.00 +----- +GeometryCollection(LineString(0 0, 2 2))) +dist buffer buf_area +-1 GEOMETRYCOLLECTION 0.00 +----- +GeometryCollection(Polygon((2 2,2 8,8 8,8 2,2 2)))) +dist buffer buf_area +-1 POLYGON 16.00 +----- +GeometryCollection(MultiPoint(9 9,8 1,1 5)) +dist buffer buf_area +-1 GEOMETRYCOLLECTION 0.00 +----- +GeometryCollection(MultiLineString((0 0,0 1),(3 0,3 1))) +dist buffer buf_area +-1 GEOMETRYCOLLECTION 0.00 +----- +GeometryCollection(Point(9 9),LineString(1 5,0 0),Polygon((2 2,2 8,8 8,8 2,2 2))) +dist buffer buf_area +-1 POLYGON 16.00 +SELECT ST_CONTAINS( +GeomFromText('MULTIPOLYGON(((0 0, 0 5, 5 5, 5 0, 0 0)),((6 6, 6 11, 11 11, 11 6, 6 6)))'), +GeomFromText('POINT(5 10)')); +ST_CONTAINS( +GeomFromText('MULTIPOLYGON(((0 0, 0 5, 5 5, 5 0, 0 0)),((6 6, 6 11, 11 11, 11 6, 6 6)))'), +GeomFromText('POINT(5 10)')) +0 +SELECT AsText(ST_UNION( +GeomFromText('MULTIPOLYGON(((0 0, 0 5, 5 5, 5 0, 0 0)),((6 6, 6 11, 11 11, 11 6, 6 6)))'), +GeomFromText('POINT(5 10)'))); +AsText(ST_UNION( +GeomFromText('MULTIPOLYGON(((0 0, 0 5, 5 5, 5 0, 0 0)),((6 6, 6 11, 11 11, 11 6, 6 6)))'), +GeomFromText('POINT(5 10)'))) +GEOMETRYCOLLECTION(POLYGON((0 0,0 5,5 5,5 0,0 0)),POLYGON((6 6,6 11,11 11,11 6,6 6)),POINT(5 10)) +DROP PROCEDURE p1; +# +# Bug #13833019 ASSERTION `T1->RESULT_RANGE' FAILED IN GCALC_OPERATION_REDUCER::END_COUPLE +# +SELECT GeometryType(ST_BUFFER(MULTIPOLYGONFROMTEXT('MULTIPOLYGON(((0 0,9 4,3 3,0 0)),((2 2,2 2,8 8,2 3,2 2)))'), 3)); +GeometryType(ST_BUFFER(MULTIPOLYGONFROMTEXT('MULTIPOLYGON(((0 0,9 4,3 3,0 0)),((2 2,2 2,8 8,2 3,2 2)))'), 3)) +POLYGON +# +# Bug #13832749 HANDLE_FATAL_SIGNAL IN GCALC_FUNCTION::COUNT_INTERNAL +# +SELECT GeometryType(ST_BUFFER(MULTIPOLYGONFROMTEXT('MULTIPOLYGON(((3 5,2 5,2 4,3 4,3 5)),((2 2,2 8,8 8,8 2,2 2), (4 4,4 6,6 6,6 4,4 4)), ((9 9,8 1,1 5,9 9)))'),1)); +GeometryType(ST_BUFFER(MULTIPOLYGONFROMTEXT('MULTIPOLYGON(((3 5,2 5,2 4,3 4,3 5)),((2 2,2 8,8 8,8 2,2 2), (4 4,4 6,6 6,6 4,4 4)), ((9 9,8 1,1 5,9 9)))'),1)) +POLYGON +# +# Bug#13358363 - ASSERTION: N > 0 && N < SINUSES_CALCULATED*2+1 | GET_N_SINCOS/ADD_EDGE_BUFFER +# +DO ST_BUFFER(ST_GEOMCOLLFROMTEXT('linestring(1 1,2 2)'),''); +SELECT ST_WITHIN( +LINESTRINGFROMTEXT(' LINESTRING(3 8,9 2,3 8,3 3,7 6,4 7,4 7,8 1) '), +ST_BUFFER(MULTIPOLYGONFROMTEXT(' MULTIPOLYGON(((3 5,2 5,2 4,3 4,3 5)),((2 2,2 8,8 8,8 2,2 2),(4 4,4 6,6 6,6 4,4 4)),((0 5,3 5,3 2,1 2,1 1,3 1,3 0,0 0,0 3,2 3,2 4,0 4,0 5))) '), +ST_NUMINTERIORRINGS(POLYGONFROMTEXT('POLYGON((3 5,2 4,2 5,3 5)) ')))); +ST_WITHIN( +LINESTRINGFROMTEXT(' LINESTRING(3 8,9 2,3 8,3 3,7 6,4 7,4 7,8 1) '), +ST_BUFFER(MULTIPOLYGONFROMTEXT(' MULTIPOLYGON(((3 5,2 5,2 4,3 4,3 5)),((2 2,2 8,8 8,8 2,2 2),(4 4,4 6,6 6,6 4,4 4)),((0 5,3 5,3 2,1 2,1 1,3 1,3 0,0 0,0 3,2 3,2 4,0 4,0 5))) ') +0 +SELECT ST_DIMENSION(ST_BUFFER(POLYGONFROMTEXT(' POLYGON((3 5,2 5,2 4,3 4,3 5)) '), +ST_NUMINTERIORRINGS(POLYGONFROMTEXT(' POLYGON((0 0,9 3,4 2,0 0))')))); +ST_DIMENSION(ST_BUFFER(POLYGONFROMTEXT(' POLYGON((3 5,2 5,2 4,3 4,3 5)) '), +ST_NUMINTERIORRINGS(POLYGONFROMTEXT(' POLYGON((0 0,9 3,4 2,0 0))')))) +2 +SELECT ST_NUMINTERIORRINGS( +ST_ENVELOPE(ST_BUFFER(MULTIPOLYGONFROMTEXT('MULTIPOLYGON(((3 5,2 5,2 4,3 4,3 5))) '), +SRID(MULTILINESTRINGFROMTEXT('MULTILINESTRING((2 2,4 2,1 2,2 4,2 2)) '))))); +ST_NUMINTERIORRINGS( +ST_ENVELOPE(ST_BUFFER(MULTIPOLYGONFROMTEXT('MULTIPOLYGON(((3 5,2 5,2 4,3 4,3 5))) '), +SRID(MULTILINESTRINGFROMTEXT('MULTILINESTRING((2 2,4 2,1 2,2 4,2 2)) '))))) +0 +SELECT ASTEXT(ST_BUFFER(POLYGONFROMTEXT(' POLYGON((9 9,5 2,4 5,9 9))'), +SRID(GEOMETRYFROMTEXT(' MULTIPOINT(8 4,5 0,7 8,6 9,3 4,7 3,5 5) ')))); +ASTEXT(ST_BUFFER(POLYGONFROMTEXT(' POLYGON((9 9,5 2,4 5,9 9))'), +SRID(GEOMETRYFROMTEXT(' MULTIPOINT(8 4,5 0,7 8,6 9,3 4,7 3,5 5) ')))) +POLYGON((9 9,5 2,4 5,9 9)) diff --git a/mysql-test/r/gis-precise.result b/mysql-test/r/gis-precise.result index 71eed65b2ea..c0b8b85d216 100644 --- a/mysql-test/r/gis-precise.result +++ b/mysql-test/r/gis-precise.result @@ -156,19 +156,19 @@ POLYGON((1 0,0.950932325672582 0.001204543794827595,0.9019828596704393 0.0048152 create table t1(geom geometrycollection); insert into t1 values (geomfromtext('POLYGON((0 0, 10 10, 0 8, 0 0))')); insert into t1 values (geomfromtext('POLYGON((1 1, 10 10, 0 8, 1 1))')); -select astext(geom), area(geom),area(ST_buffer(geom,2)) from t1; -astext(geom) area(geom) area(ST_buffer(geom,2)) -POLYGON((0 0,10 10,0 8,0 0)) 40 117.2416763959153 -POLYGON((1 1,10 10,0 8,1 1)) 36 108.55539589266459 -select astext(ST_buffer(geom,2)) from t1; -astext(ST_buffer(geom,2)) -POLYGON((0 -2,-0.09813534865483604 -1.9975909124103448,-0.1960342806591212 -1.9903694533443936,-0.2934609489107236 -1.978353019929562,-0.3901806440322566 -1.9615705608064609,-0.4859603598065278 -1.940062506389088,-0.5805693545089246 -1.9138806714644179,-0.6737797067844402 -1.8830881303660416,-0.7653668647301796 -1.8477590650225735,-0.8551101868605642 -1.8079785862468867,-0.9427934736519952 -1.7638425286967099,-1.0282054883864433 -1.7154572200005442,-1.1111404660392044 -1.6629392246050905,-1.1913986089848667 -1.6064150629612897,-1.268786568327291 -1.546020906725474,-1.3431179096940367 -1.4819022507099182,-1.414213562373095 -1.414213562373095,-1.4819022507099182 -1.3431179096940367,-1.546020906725474 -1.268786568327291,-1.6064150629612897 -1.1913986089848667,-1.6629392246050905 -1.1111404660392044,-1.7154572200005442 -1.0282054883864433,-1.7638425286967099 -0.9427934736519952,-1.8079785862468867 -0.8551101868605642,-1.8477590650225735 -0.7653668647301796,-1.8830881303660416 -0.6737797067844402,-1.9138806714644179 -0.5805693545089246,-1.940062506389088 -0.4859603598065278,-1.9615705608064609 -0.3901806440322566,-1.978353019929562 -0.2934609489107236,-1.9903694533443936 -0.1960342806591212,-1.9975909124103448 -0.09813534865483604,-2 0,-2 8,-1.9976924709932495 8.096045777298562,-1.9905734200023315 8.193952209526529,-1.978658903288988 8.291391393893539,-1.9619776239675701 8.388128590869789,-1.940569768701071 8.483930752074583,-1.9144869108879337 8.578567081710304,-1.8837918864172196 8.67180959256969,-1.848558642291444 8.763433655277009,-1.8088720584817741 8.853218539439872,-1.7648277434447421 8.940947945408109,-1.716531803793098 9.02641052535855,-1.6641005886756872 9.109400392450459,-1.6076604094821603 9.189717616824955,-1.5473472355477698 9.267168707253568,-1.483306366591334 9.341567077275533,-1.415692082675486 9.412733494700944,-1.3446672725324915 9.480496513396783,-1.2704030411510234 9.54469288631567,-1.1930782975692584 9.605167958772302,-1.1128793238673322 9.661776041020147,-1.0299993263974934 9.714380759230782,-0.9446379703330767 9.762855384030411,-0.8570008986576141 9.807083135802014,-0.7672992367528719 9.846957466017683,-0.6757490837793165 9.882382313923348,-0.5825709920743072 9.913272337957553,-0.4879894358221987 9.939553121346753,-0.3922322702763681 9.96116135138184,9.607767729723632 11.96116135138184,9.609819355967744 11.96157056080646,9.706539051089276 11.978353019929562,9.803965719340878 11.990369453344393,9.901864651345164 11.997590912410345,10 12,10.098135348654836 11.997590912410345,10.196034280659122 11.990369453344393,10.293460948910724 11.978353019929562,10.390180644032256 11.96157056080646,10.485960359806528 11.940062506389088,10.580569354508924 11.913880671464417,10.67377970678444 11.88308813036604,10.76536686473018 11.847759065022574,10.855110186860564 11.807978586246886,10.942793473651996 11.76384252869671,11.028205488386444 11.715457220000545,11.111140466039204 11.66293922460509,11.191398608984866 11.60641506296129,11.268786568327291 11.546020906725474,11.343117909694037 11.481902250709918,11.414213562373096 11.414213562373096,11.481902250709918 11.343117909694037,11.546020906725474 11.268786568327291,11.60641506296129 11.191398608984866,11.66293922460509 11.111140466039204,11.715457220000545 11.028205488386444,11.76384252869671 10.942793473651996,11.807978586246886 10.855110186860564,11.847759065022574 10.76536686473018,11.88308813036604 10.67377970678444,11.913880671464417 10.580569354508924,11.940062506389088 10.485960359806528,11.96157056080646 10.390180644032256,11.978353019929562 10.293460948910724,11.990369453344393 10.196034280659122,11.997590912410345 10.098135348654836,12 10,11.997590912410345 9.901864651345164,11.990369453344393 9.803965719340878,11.978353019929562 9.706539051089276,11.96157056080646 9.609819355967744,11.940062506389088 9.514039640193472,11.913880671464417 9.419430645491076,11.88308813036604 9.32622029321556,11.847759065022574 9.23463313526982,11.807978586246886 9.144889813139436,11.76384252869671 9.057206526348004,11.715457220000545 8.971794511613556,11.66293922460509 8.888859533960796,11.60641506296129 8.808601391015134,11.546020906725474 8.731213431672709,11.481902250709918 8.656882090305963,11.414213562373096 8.585786437626904,1.414213562373095 -1.414213562373095,1.3431179096940367 -1.4819022507099182,1.268786568327291 -1.546020906725474,1.1913986089848667 -1.6064150629612897,1.1111404660392044 -1.6629392246050905,1.0282054883864433 -1.7154572200005442,0.9427934736519952 -1.7638425286967099,0.8551101868605642 -1.8079785862468867,0.7653668647301796 -1.8477590650225735,0.6737797067844402 -1.8830881303660416,0.5805693545089246 -1.9138806714644179,0.4859603598065278 -1.940062506389088,0.3901806440322566 -1.9615705608064609,0.2934609489107236 -1.978353019929562,0.1960342806591212 -1.9903694533443936,0.09813534865483604 -1.9975909124103448,0 -2)) -POLYGON((0.9892698494111194 -0.9999712157599518,0.8911488380683092 -0.9970356593075951,0.7932900587088283 -0.9892890690323013,0.6959292617035704 -0.9767501071485654,0.5993009977403192 -0.959448981113848,0.5036380527705995 -0.9374273708561667,0.40917088720792716 -0.9107383283634973,0.3161270807284893 -0.8794461498768888,0.2247307840117696 -0.843626220995187,0.13520217874192864 -0.8033648350645226,0.04775694717084156 -0.7587589852900836,-0.03739424747933939 -0.7099161310709878,-0.12004626852233802 -0.6569539391211774,-0.19999999999999996 -0.5999999999999999,-0.27706282637007584 -0.5391915207353741,-0.35104909653393324 -0.47467499428004234,-0.42178057108631606 -0.40660584659721555,-0.4890868517096818 -0.3351480622258147,-0.5528057916786753 -0.26047378922735365,-0.6127838864857904 -0.18276292446617926,-0.6688766436471771 -0.10220268022216916,-0.7209489307976877 -0.018987133179951154,-0.7688753012365837 0.06668324311882912,-0.8125402961396226 0.15460206123382925,-0.8518387227094812 0.2445575170314307,-0.8866759075944177 0.33633289993945015,-0.9169679249646674 0.4297071150218881,-0.9426417986971172 0.5244552156159955,-0.9636356781811806 0.6203489452484875,-0.9798989873223332 0.717157287525381,-1.9798989873223332 7.717157287525381,-1.990163308912474 7.8018838627003015,-1.9974871681520578 7.899775187364235,-1.9999989058443504 7.997907962380466,-1.9976924709932495 8.096045777298562,-1.9905734200023315 8.193952209526529,-1.978658903288988 8.291391393893539,-1.9619776239675701 8.388128590869789,-1.940569768701071 8.483930752074583,-1.9144869108879337 8.578567081710304,-1.8837918864172196 8.67180959256969,-1.848558642291444 8.763433655277009,-1.8088720584817741 8.853218539439872,-1.7648277434447421 8.940947945408109,-1.716531803793098 9.02641052535855,-1.6641005886756872 9.109400392450459,-1.6076604094821603 9.189717616824955,-1.5473472355477698 9.267168707253568,-1.483306366591334 9.341567077275533,-1.415692082675486 9.412733494700944,-1.3446672725324915 9.480496513396783,-1.2704030411510234 9.54469288631567,-1.1930782975692584 9.605167958772302,-1.1128793238673322 9.661776041020147,-1.0299993263974934 9.714380759230782,-0.9446379703330767 9.762855384030411,-0.8570008986576141 9.807083135802014,-0.7672992367528719 9.846957466017683,-0.6757490837793165 9.882382313923348,-0.5825709920743072 9.913272337957553,-0.4879894358221987 9.939553121346753,-0.3922322702763681 9.96116135138184,9.607767729723632 11.96116135138184,9.609819355967744 11.96157056080646,9.706539051089276 11.978353019929562,9.803965719340878 11.990369453344393,9.901864651345164 11.997590912410345,10 12,10.098135348654836 11.997590912410345,10.196034280659122 11.990369453344393,10.293460948910724 11.978353019929562,10.390180644032256 11.96157056080646,10.485960359806528 11.940062506389088,10.580569354508924 11.913880671464417,10.67377970678444 11.88308813036604,10.76536686473018 11.847759065022574,10.855110186860564 11.807978586246886,10.942793473651996 11.76384252869671,11.028205488386444 11.715457220000545,11.111140466039204 11.66293922460509,11.191398608984866 11.60641506296129,11.268786568327291 11.546020906725474,11.343117909694037 11.481902250709918,11.414213562373096 11.414213562373096,11.481902250709918 11.343117909694037,11.546020906725474 11.268786568327291,11.60641506296129 11.191398608984866,11.66293922460509 11.111140466039204,11.715457220000545 11.028205488386444,11.76384252869671 10.942793473651996,11.807978586246886 10.855110186860564,11.847759065022574 10.76536686473018,11.88308813036604 10.67377970678444,11.913880671464417 10.580569354508924,11.940062506389088 10.485960359806528,11.96157056080646 10.390180644032256,11.978353019929562 10.293460948910724,11.990369453344393 10.196034280659122,11.997590912410345 10.098135348654836,12 10,11.997590912410345 9.901864651345164,11.990369453344393 9.803965719340878,11.978353019929562 9.706539051089276,11.96157056080646 9.609819355967744,11.940062506389088 9.514039640193472,11.913880671464417 9.419430645491076,11.88308813036604 9.32622029321556,11.847759065022574 9.23463313526982,11.807978586246886 9.144889813139436,11.76384252869671 9.057206526348004,11.715457220000545 8.971794511613556,11.66293922460509 8.888859533960796,11.60641506296129 8.808601391015134,11.546020906725474 8.731213431672709,11.481902250709918 8.656882090305963,11.414213562373096 8.585786437626904,2.414213562373095 -0.4142135623730949,2.4066058465972153 -0.42178057108631606,2.335148062225815 -0.4890868517096818,2.260473789227354 -0.5528057916786753,2.1827629244661795 -0.6127838864857904,2.1022026802221694 -0.6688766436471771,2.018987133179951 -0.7209489307976877,1.9333167568811709 -0.7688753012365837,1.8453979387661708 -0.8125402961396226,1.7554424829685693 -0.8518387227094812,1.6636671000605499 -0.8866759075944177,1.570292884978112 -0.9169679249646674,1.4755447843840046 -0.9426417986971172,1.3796510547515126 -0.9636356781811806,1.282842712474619 -0.9798989873223332,1.1853529773292786 -0.9913925463843567,1.0874167106265484 -0.9980886663767536,0.9892698494111194 -0.9999712157599518)) +select astext(geom), area(geom),round(area(ST_buffer(geom,2)), 7) from t1; +astext(geom) area(geom) round(area(ST_buffer(geom,2)), 7) +POLYGON((0 0,10 10,0 8,0 0)) 40 117.2416764 +POLYGON((1 1,10 10,0 8,1 1)) 36 108.5553959 +select ST_NUMPOINTS(ST_EXTERIORRING(ST_buffer(geom,2))) from t1; +ST_NUMPOINTS(ST_EXTERIORRING(ST_buffer(geom,2))) +133 +134 set @geom=geomfromtext('LINESTRING(2 1, 4 2, 2 3, 2 5)'); set @buff=ST_buffer(@geom,1); -select astext(@buff); -astext(@buff) -POLYGON((2.0218594008566466 0.00023894525032219782,1.9727771204112932 0.00037061126290494073,1.9237604222673113 0.002910472030148492,1.8749273919438858 0.0078524088049996,1.8263956724883341 0.015184516028905026,1.7782821810637013 0.024889130013345362,1.7307028272850733 0.03694287149320841,1.683772233983162 0.05131670194948634,1.6376034610678665 0.06797599356561079,1.592307733157046 0.08688061264889702,1.5479941716266756 0.10798501631612445,1.504769531727891 0.13123836221033125,1.46273794540424 0.1565846309845056,1.4220006704287085 0.18396276125709976,1.382655846464876 0.21330679671424568,1.3447982586398712 0.24454604500429356,1.3085191091986976 0.2776052480418776,1.2739057977900368 0.3124047633112361,1.241041710912841 0.34886075573200737,1.2100060210309511 0.38688539962528223,1.1808734958396978 0.4263870902933562,1.1537143181439746 0.46727066470347056,1.1285939167817136 0.5094376307438929,1.1055728090000843 0.5527864045000421,1.0847064546641425 0.5972125549790352,1.0660451226491614 0.6426090556930975,1.0496337697385036 0.6888665424957445,1.0355119323187965 0.7358735770495916,1.0237136311333106 0.7835169152910685,1.0142672893230111 0.8316817802452878,1.0071956639527206 0.8802521385338314,1.0025157911873577 0.9291109799093207,1.0002389452503222 0.9781405991433534,1.000370611262905 1.0272228795887068,1.0029104720301485 1.0762395777326887,1.0078524088049996 1.1250726080561142,1.015184516028905 1.1736043275116659,1.0248891300133454 1.2217178189362987,1.0369428714932085 1.2692971727149267,1.0513167019494865 1.316227766016838,1.0679759935656108 1.3623965389321335,1.086880612648897 1.407692266842954,1.1079850163161244 1.4520058283733244,1.1312383622103312 1.495230468272109,1.1565846309845056 1.53726205459576,1.1839627612570998 1.5779993295712915,1.2133067967142457 1.617344153535124,1.2445460450042936 1.6552017413601288,1.2776052480418776 1.6914808908013024,1.3124047633112361 1.7260942022099632,1.3488607557320074 1.758958289087159,1.3868853996252821 1.7899939789690489,1.4263870902933562 1.8191265041603022,1.4672706647034706 1.8462856818560254,1.5094376307438928 1.8714060832182864,1.5527864045000421 1.8944271909999157,1.7639320225002106 2,1.5527864045000421 2.1055728090000843,1.5286032631740025 2.118078735651645,1.4858972558067784 2.1422713899997277,1.4444297669803978 2.1685303876974547,1.4043006955075668 2.196792468519355,1.3656067158363545 2.226989546637263,1.3284410451529816 2.259048874645041,1.2928932188134525 2.2928932188134525,1.2590488746450408 2.3284410451529816,1.2269895466372631 2.3656067158363543,1.1967924685193552 2.4043006955075668,1.1685303876974547 2.444429766980398,1.1422713899997279 2.4858972558067784,1.118078735651645 2.5286032631740025,1.0960107068765566 2.572444906569718,1.0761204674887133 2.6173165676349104,1.0584559348169793 2.66311014660778,1.043059664267791 2.709715322745538,1.029968746805456 2.757019820096736,1.0192147195967696 2.8049096779838716,1.0108234900352189 2.853269525544638,1.0048152733278033 2.9019828596704396,1.0012045437948276 2.950932325672582,1 3,1 5,1.0048152733278033 5.098017140329561,1.0108234900352189 5.146730474455362,1.0192147195967696 5.195090322016128,1.029968746805456 5.242980179903264,1.043059664267791 5.290284677254462,1.0584559348169793 5.33688985339222,1.0761204674887133 5.38268343236509,1.0960107068765566 5.427555093430282,1.118078735651645 5.471396736825998,1.1422713899997279 5.514102744193222,1.1685303876974547 5.555570233019602,1.1967924685193552 5.595699304492434,1.2269895466372631 5.634393284163646,1.2590488746450408 5.671558954847018,1.2928932188134525 5.707106781186548,1.3284410451529816 5.740951125354959,1.3656067158363545 5.773010453362737,1.4043006955075668 5.803207531480645,1.4444297669803978 5.831469612302545,1.4858972558067784 5.857728610000272,1.5286032631740025 5.881921264348355,1.572444906569718 5.903989293123443,1.6173165676349102 5.923879532511287,1.6631101466077798 5.941544065183021,1.7097153227455377 5.956940335732209,1.7570198200967362 5.970031253194544,1.8049096779838716 5.98078528040323,1.853269525544638 5.989176509964781,1.9019828596704393 5.995184726672197,1.950932325672582 5.998795456205173,2 6,2.049067674327418 5.998795456205173,2.0980171403295604 5.995184726672197,2.146730474455362 5.989176509964781,2.1950903220161284 5.98078528040323,2.242980179903264 5.970031253194544,2.290284677254462 5.956940335732209,2.33688985339222 5.941544065183021,2.3826834323650896 5.923879532511287,2.427555093430282 5.903989293123443,2.4713967368259975 5.881921264348355,2.5141027441932216 5.857728610000272,2.555570233019602 5.831469612302545,2.5956993044924332 5.803207531480645,2.6343932841636457 5.773010453362737,2.6715589548470184 5.740951125354959,2.7071067811865475 5.707106781186548,2.740951125354959 5.671558954847018,2.773010453362737 5.634393284163646,2.803207531480645 5.595699304492434,2.8314696123025453 5.555570233019602,2.8577286100002723 5.514102744193222,2.881921264348355 5.471396736825998,2.9039892931234434 5.427555093430282,2.923879532511287 5.38268343236509,2.9415440651830207 5.33688985339222,2.956940335732209 5.290284677254462,2.970031253194544 5.242980179903264,2.9807852804032304 5.195090322016128,2.989176509964781 5.146730474455362,2.9951847266721967 5.098017140329561,2.9987954562051726 5.049067674327418,3 5,3 3.618033988749895,4.447213595499958 2.8944271909999157,4.452005828373324 2.8920149836838753,4.4952304682721085 2.8687616377896688,4.53726205459576 2.8434153690154944,4.577999329571291 2.8160372387429002,4.617344153535124 2.786693203285754,4.655201741360129 2.7554539549957067,4.691480890801302 2.7223947519581224,4.726094202209963 2.6875952366887637,4.758958289087159 2.6511392442679926,4.789993978969049 2.613114600374718,4.819126504160303 2.573612909706644,4.846285681856025 2.5327293352965294,4.871406083218286 2.490562369256107,4.894427190999916 2.447213595499958,4.9152935453358575 2.402787445020965,4.933954877350839 2.3573909443069025,4.950366230261497 2.3111334575042557,4.964488067681204 2.2641264229504086,4.976286368866689 2.2164830847089316,4.985732710676989 2.1683182197547124,4.992804336047279 2.1197478614661684,4.997484208812643 2.070889020090679,4.999761054749678 2.0218594008566466,4.999629388737095 1.9727771204112932,4.997089527969852 1.9237604222673113,4.992147591195001 1.8749273919438858,4.984815483971095 1.8263956724883341,4.975110869986654 1.7782821810637013,4.963057128506792 1.7307028272850733,4.948683298050514 1.683772233983162,4.932024006434389 1.6376034610678665,4.913119387351103 1.592307733157046,4.892014983683875 1.5479941716266756,4.868761637789669 1.504769531727891,4.843415369015494 1.46273794540424,4.816037238742901 1.4220006704287085,4.786693203285754 1.382655846464876,4.755453954995707 1.3447982586398712,4.722394751958122 1.3085191091986976,4.687595236688764 1.2739057977900368,4.651139244267993 1.241041710912841,4.613114600374717 1.2100060210309511,4.573612909706644 1.1808734958396978,4.53272933529653 1.1537143181439746,4.490562369256107 1.1285939167817136,4.447213595499958 1.1055728090000843,2.447213595499958 0.10557280900008414,2.3573909443069025 0.06604512264916129,2.3111334575042557 0.04963376973850353,2.2641264229504086 0.03551193231879646,2.2164830847089316 0.023713631133310598,2.1683182197547124 0.014267289323011023,2.1197478614661684 0.007195663952720532,2.070889020090679 0.0025157911873575634,2.0218594008566466 0.00023894525032219782)) +select ST_NUMPOINTS(ST_EXTERIORRING(@buff)); +ST_NUMPOINTS(ST_EXTERIORRING(@buff)) +202 DROP TABLE t1; select st_touches(geomfromtext('point(0 0)'), geomfromtext('point(1 1)')); st_touches(geomfromtext('point(0 0)'), geomfromtext('point(1 1)')) @@ -200,6 +200,31 @@ result SELECT ST_Equals(PointFromText('POINT (12 13)'),PointFromText('POINT (12 13)')) as result; result 1 +# +# BUG#11755628/47429: INTERSECTION FUNCTION CRASHED MYSQLD +# BUG#11759650/51979: UNION/INTERSECTION OF POLYGONS CRASHES MYSQL +# +SELECT ASTEXT(ST_UNION(GEOMFROMTEXT('POLYGON((525000 183300,525400 +183300,525400 18370, 525000 183700,525000 183300))'), +geomfromtext('POLYGON((525298.67 183511.53,525296.57 +183510.39,525296.42 183510.31,525289.11 183506.62,525283.17 +183503.47,525280.98 183502.26,525278.63 183500.97,525278.39 +183500.84,525276.79 183500,525260.7 183491.55,525263.95 +183484.75,525265.58 183481.95,525278.97 183488.73,525276.5 +183493.45,525275.5 183495.7,525280.35 183498.2,525282.3 +183499.1,525282.2 183499.3,525283.55 183500,525301.75 +183509.35,525304.45 183504.25,525307.85 183504.95,525304.5 +183510.83,525302.81 183513.8,525298.67 183511.53),(525275.06 +183489.89,525272.06 183488.37,525268.94 183494.51,525271.94 +183496.03,525275.06 183489.89),(525263.26 183491.55,525266.15 +183493.04,525269.88 183485.82,525266.99 183484.33,525263.26 +183491.55))'))) st_u; +st_u +MULTIPOLYGON(((525400 18370,525000.9677614468 183300,525400 183300,525400 18370)),((525000 183300,525000 183700,525000.9677614468 183300,525000 183300)),((525265.58 183481.95,525263.95 183484.75,525260.7 183491.55,525276.79 183500,525278.39 183500.84,525278.63 183500.97,525280.98 183502.26,525283.17 183503.47,525289.11 183506.62,525296.42 183510.31,525296.57 183510.39,525298.67 183511.53,525302.81 183513.8,525304.5 183510.83,525307.85 183504.95,525304.45 183504.25,525301.75 183509.35,525283.55 183500,525282.2 183499.3,525282.3 183499.1,525280.35 183498.2,525275.5 183495.7,525276.5 183493.45,525278.97 183488.73,525265.58 183481.95),(525266.99 183484.33,525263.26 183491.55,525266.15 183493.04,525269.88 183485.82,525266.99 183484.33),(525272.06 183488.37,525268.94 183494.51,525271.94 183496.03,525275.06 183489.89,525272.06 183488.37))) +SET @a=0x0000000001030000000200000005000000000000000000000000000000000000000000000000002440000000000000000000000000000024400000000000002440000000000000000000000000000024400000000000000000000000000000000000000000000000000000F03F000000000000F03F0000000000000040000000000000F03F00000000000000400000000000000040000000000000F03F0000000000000040000000000000F03F000000000000F03F; +SELECT ASTEXT(TOUCHES(@a, GEOMFROMTEXT('point(0 0)'))) t; +t +NULL SELECT astext(ST_UNION ( PolyFromText('POLYGON(( 2 2 ,3 2,2 7,2 2),( 0 0,8 2,1 9,0 0))'), ExteriorRing( Envelope( MultiLineStringFromText('MULTILINESTRING((3 4,5 3),(3 0,0 5))'))))); @@ -230,15 +255,15 @@ MULTIPOLYGONFROMTEXT('MULTIPOLYGON(((2 2,2 8,8 8,8 2,2 2),(4 4,4 6,6 6,6 4,4 4)) ((2 2,5 2,4 4,2 8,2 2)))'), MULTIPOLY POLYGON((0 2,1 4,1 3,2 3,2 4,1 4,1.5 5,2 5,2 8,8 8,8 2,0 2),(4 4,4 6,6 6,6 4,4 4)) -SELECT ASTEXT(ST_UNION( +SELECT ROUND(ST_LENGTH(ST_UNION( MULTILINESTRINGFROMTEXT('MULTILINESTRING((6 2,4 0,3 5,3 6,4 3,6 4,3 9,0 7,3 7,8 4,2 9,5 0), (8 2,1 3,9 0,4 4))'), -MULTILINESTRINGFROMTEXT('MULTILINESTRING((2 5,6 7,9 7,5 2,1 6,3 6))'))); -ASTEXT(ST_UNION( +MULTILINESTRINGFROMTEXT('MULTILINESTRING((2 5,6 7,9 7,5 2,1 6,3 6))'))), 7); +ROUND(ST_LENGTH(ST_UNION( MULTILINESTRINGFROMTEXT('MULTILINESTRING((6 2,4 0,3 5,3 6,4 3,6 4,3 9,0 7,3 7,8 4,2 9,5 0), (8 2,1 3,9 0,4 4))'), -MULTILINESTRINGFROMTEXT('MULTILINESTRING((2 5,6 7,9 7,5 2,1 6,3 6))'))) -MULTILINESTRING((3.5945945945945947 2.027027027027027,4 0,4.75 0.75),(5 0,4.75 0.75),(5.363636363636363 1.3636363636363635,9 0,6.173913043478262 2.260869565217391),(4.75 0.75,4.428571428571429 1.7142857142857142),(4.75 0.75,5.363636363636363 1.3636363636363635),(5.363636363636363 1.3636363636363635,4.428571428571429 1.7142857142857142),(5.363636363636363 1.3636363636363635,6 2),(4.428571428571429 1.7142857142857142,3.5945945945945947 2.027027027027027),(4.428571428571429 1.7142857142857142,4.15 2.55),(4.5 2.5,5 2,5.3076923076923075 2.3846153846153846),(8 2,6.173913043478262 2.260869565217391),(3.5945945945945947 2.027027027027027,1 3,3.4705882352941178 2.6470588235294117),(3.5945945945945947 2.027027027027027,3.4705882352941178 2.6470588235294117),(6.173913043478262 2.260869565217391,5.3076923076923075 2.3846153846153846),(6.173913043478262 2.260869565217391,5.585365853658536 2.7317073170731705),(5.3076923076923075 2.3846153846153846,4.5 2.5),(5.3076923076923075 2.3846153846153846,5.585365853658536 2.7317073170731705),(4.5 2.5,4.15 2.55),(4.5 2.5,4 3),(4.15 2.55,3.4705882352941178 2.6470588235294117),(4.15 2.55,4 3),(3.4705882352941178 2.6470588235294117,3.25 3.75),(5.585365853658536 2.7317073170731705,4.769230769230769 3.3846153846153846),(5.585365853658536 2.7317073170731705,7.054054054054054 4.5675675675675675),(4 3,3.25 3.75),(4 3,3.142857142857143 5.571428571428571),(4 3,4.769230769230769 3.3846153846153846),(4.769230769230769 3.3846153846153846,4 4),(4.769230769230769 3.3846153846153846,6 4,4.875 5.875),(3.25 3.75,2 5),(3.25 3.75,3 5,3 5.5),(7.054054054054054 4.5675675675675675,8 4,7.16 4.7),(7.054054054054054 4.5675675675675675,4.875 5.875),(7.054054054054054 4.5675675675675675,7.16 4.7),(7.16 4.7,5 6.5),(7.16 4.7,9 7,6 7,5 6.5),(2 5,1 6,3 6),(2 5,3 5.5),(3 5.5,3 6),(3 5.5,3.142857142857143 5.571428571428571),(3.142857142857143 5.571428571428571,3 6),(3.142857142857143 5.571428571428571,4.363636363636363 6.181818181818182),(4.875 5.875,4.363636363636363 6.181818181818182),(4.875 5.875,4.615384615384615 6.3076923076923075),(3 6,2.6666666666666665 7),(4.363636363636363 6.181818181818182,3 7,2.6666666666666665 7),(4.363636363636363 6.181818181818182,4.615384615384615 6.3076923076923075),(4.615384615384615 6.3076923076923075,4 7.333333333333333),(4.615384615384615 6.3076923076923075,5 6.5),(5 6.5,4 7.333333333333333),(2.1818181818181817 8.454545454545455,0 7,2.6666666666666665 7),(2.6666666666666665 7,2.1818181818181817 8.454545454545455),(4 7.333333333333333,2.444444444444444 8.62962962962963),(4 7.333333333333333,3 9,2.444444444444444 8.62962962962963),(2.1818181818181817 8.454545454545455,2 9,2.444444444444444 8.62962962962963),(2.1818181818181817 8.454545454545455,2.444444444444444 8.62962962962963)) +MULTILINESTRINGFROMTEXT('MULTILINESTRING((2 5,6 7,9 7,5 2,1 6,3 6) +90.2783626 SELECT ST_NUMGEOMETRIES((ST_UNION(ST_UNION( MULTILINESTRINGFROMTEXT('MULTILINESTRING((2 0,4 2,0 2,1 5,0 3,7 0,8 5,5 8), (6 2,4 0,3 5,3 6,4 3,6 4,3 9,0 7,3 7,8 4,2 9,5 0), @@ -434,9 +459,6 @@ ST_WITHIN( MULTIPOINTFROMTEXT(' MULTIPOINT( 2 9 , 2 9 , 4 9 , 9 1 ) ') , POLYGON SELECT ST_INTERSECTS( GeomFromText('MULTILINESTRING( ( 4030 3045 , 3149 2461 , 3004 3831 , 3775 2976 ) )') , GeomFromText('LINESTRING(3058.41 3187.91,3081.52 3153.19,3042.99 3127.57,3019.89 3162.29,3039.07 3175.05,3039.07 3175.05,3058.41 3187.91,3081.52 3153.19,3042.99 3127.57,3019.89 3162.29)') ); ST_INTERSECTS( GeomFromText('MULTILINESTRING( ( 4030 3045 , 3149 2461 , 3004 3831 , 3775 2976 ) )') , GeomFromText('LINESTRING(3058.41 3187.91,3081.52 3153.19,3042.99 3127.57,3019.89 3162.29,3039.07 3175.05,3039.07 3175.05,3058.41 3187.91,3081.52 3153.19, 1 -select ASTEXT(ST_BUFFER(ST_GEOMCOLLFROMTEXT(' GEOMETRYCOLLECTION(LINESTRING(100 100, 31 10, 77 80), POLYGON((0 0,4 7,1 1,0 0)), POINT(20 20))'), -3)); -ASTEXT(ST_BUFFER(ST_GEOMCOLLFROMTEXT(' GEOMETRYCOLLECTION(LINESTRING(100 100, 31 10, 77 80), POLYGON((0 0,4 7,1 1,0 0)), POINT(20 20))'), -3)) -POLYGON((3.999999999999999 6.999999999999998,4 7,3.999999999999999 6.999999999999998)) SELECT ST_NUMPOINTS(ST_EXTERIORRING(ST_BUFFER( POLYGONFROMTEXT( 'POLYGON( ( 0.0 -3.0, -2.910427500435995 0.727606875108998, -0.910427500435995 8.727606875108998, @@ -455,3 +477,296 @@ ST_NUMPOINTS(ST_EXTERIORRING(ST_BUFFER( POLYGONFROMTEXT( 'POLYGON( ( 0.0 -3.0, select astext(buffer(st_linestringfromwkb(linestring(point(-1,1), point(-1,-2))),-1)); astext(buffer(st_linestringfromwkb(linestring(point(-1,1), point(-1,-2))),-1)) GEOMETRYCOLLECTION EMPTY +DROP TABLE IF EXISTS p1; +CREATE PROCEDURE p1(dist DOUBLE, geom TEXT) +BEGIN +DECLARE g GEOMETRY; +SET g=GeomFromText(geom); +SELECT geom AS `-----`; +SELECT dist, GeometryType(@buf:=ST_Buffer(g, dist)) AS `buffer`, ROUND(ST_AREA(@buf),2) AS buf_area; +END| +# +# Testing ST_BUFFER with positive distance +# +----- +POINT(0 0)) +dist buffer buf_area +1 POLYGON 3.14 +----- +LineString(0 1, 1 1)) +dist buffer buf_area +1 POLYGON 5.14 +----- +LineString(9 9,8 1,1 5,0 0) +dist buffer buf_area +1 POLYGON 44.63 +----- +Polygon((2 2,2 8,8 8,8 2,2 2)) +dist buffer buf_area +1 POLYGON 63.14 +----- +Polygon((0 0,0 8,8 8,8 0,0 0),(2 2,6 2,6 6,2 6,2 2)) +dist buffer buf_area +1 POLYGON 95.14 +----- +Polygon((0 0, 0 8, 8 8, 8 10, -10 10, -10 0, 0 0)) +dist buffer buf_area +1 POLYGON 174.93 +----- +MultiPoint(9 9,8 1,1 5) +dist buffer buf_area +1 MULTIPOLYGON 9.42 +----- +MultiLineString((0 0,2 2)) +dist buffer buf_area +1 POLYGON 8.80 +----- +MultiLineString((0 0,2 2,0 4)) +dist buffer buf_area +1 POLYGON 14.24 +----- +MultiLineString((0 0,2 2),(0 2,2 0)) +dist buffer buf_area +1 POLYGON 13.59 +----- +MultiLineString((2 2,2 8,-2 8),(-6 -6, 6 6),(10 10, 14 14)) +dist buffer buf_area +1 MULTIPOLYGON 70.06 +----- +MultiPolygon(((2 2,2 8,8 8,8 2,2 2)), ((9 9,8 1,1 5,9 9))) +dist buffer buf_area +1 POLYGON 73.18 +----- +MultiPolygon(((2 2,2 8,8 8,8 2,2 2), (4 4,4 6,6 6,6 4,4 4)),((9 9,8 1,1 5,9 9))) +dist buffer buf_area +1 POLYGON 73.18 +----- +GeometryCollection(Point(0 0)) +dist buffer buf_area +1 POLYGON 3.14 +----- +GeometryCollection(LineString(0 0, 2 2))) +dist buffer buf_area +1 POLYGON 8.80 +----- +GeometryCollection(Polygon((2 2,2 8,8 8,8 2,2 2)))) +dist buffer buf_area +1 POLYGON 63.14 +----- +GeometryCollection(MultiPoint(9 9,8 1,1 5)) +dist buffer buf_area +1 MULTIPOLYGON 9.42 +----- +GeometryCollection(MultiLineString((0 0,0 1),(3 0,3 1))) +dist buffer buf_area +1 MULTIPOLYGON 10.28 +----- +GeometryCollection(MultiPolygon(((0 0, 3 0, 3 3, 0 3, 0 0)),((6 6,6 9,9 9,9 6,6 6)))) +dist buffer buf_area +1 MULTIPOLYGON 48.28 +----- +GeometryCollection(Point(9 9),LineString(1 5,0 0),Polygon((2 2,2 8,8 8,8 2,2 2))) +dist buffer buf_area +1 POLYGON 75.92 +# +# Testing ST_BUFFER with zero distance +# +----- +POINT(0 0)) +dist buffer buf_area +0 POINT 0.00 +----- +LineString(0 1, 1 1)) +dist buffer buf_area +0 LINESTRING 0.00 +----- +LineString(9 9,8 1,1 5,0 0) +dist buffer buf_area +0 LINESTRING 0.00 +----- +Polygon((2 2,2 8,8 8,8 2,2 2)) +dist buffer buf_area +0 POLYGON 36.00 +----- +Polygon((0 0,0 8,8 8,8 0,0 0),(2 2,6 2,6 6,2 6,2 2)) +dist buffer buf_area +0 POLYGON 48.00 +----- +Polygon((0 0, 0 8, 8 8, 8 10, -10 10, -10 0, 0 0)) +dist buffer buf_area +0 POLYGON 116.00 +----- +MultiPoint(9 9,8 1,1 5) +dist buffer buf_area +0 MULTIPOINT NULL +----- +MultiLineString((0 0,2 2)) +dist buffer buf_area +0 MULTILINESTRING NULL +----- +MultiLineString((0 0,2 2,0 4)) +dist buffer buf_area +0 MULTILINESTRING NULL +----- +MultiLineString((0 0,2 2),(0 2,2 0)) +dist buffer buf_area +0 MULTILINESTRING NULL +----- +MultiLineString((2 2,2 8,-2 8),(-6 -6, 6 6),(10 10, 14 14)) +dist buffer buf_area +0 MULTILINESTRING NULL +----- +MultiPolygon(((2 2,2 8,8 8,8 2,2 2)), ((9 9,8 1,1 5,9 9))) +dist buffer buf_area +0 MULTIPOLYGON 66.00 +----- +MultiPolygon(((2 2,2 8,8 8,8 2,2 2), (4 4,4 6,6 6,6 4,4 4)),((9 9,8 1,1 5,9 9))) +dist buffer buf_area +0 MULTIPOLYGON 62.00 +----- +GeometryCollection(Point(0 0)) +dist buffer buf_area +0 GEOMETRYCOLLECTION 0.00 +----- +GeometryCollection(LineString(0 0, 2 2))) +dist buffer buf_area +0 GEOMETRYCOLLECTION 0.00 +----- +GeometryCollection(Polygon((2 2,2 8,8 8,8 2,2 2)))) +dist buffer buf_area +0 GEOMETRYCOLLECTION 36.00 +----- +GeometryCollection(MultiPoint(9 9,8 1,1 5)) +dist buffer buf_area +0 GEOMETRYCOLLECTION NULL +----- +GeometryCollection(MultiLineString((0 0,0 1),(3 0,3 1))) +dist buffer buf_area +0 GEOMETRYCOLLECTION NULL +----- +GeometryCollection(MultiPolygon(((0 0, 3 0, 3 3, 0 3, 0 0)),((6 6,6 9,9 9,9 6,6 6)))) +dist buffer buf_area +0 GEOMETRYCOLLECTION 18.00 +----- +GeometryCollection(Point(9 9),LineString(1 5,0 0),Polygon((2 2,2 8,8 8,8 2,2 2))) +dist buffer buf_area +0 GEOMETRYCOLLECTION 36.00 +# +# Testing ST_BUFFER with negative distance +# +----- +POINT(0 0)) +dist buffer buf_area +-1 GEOMETRYCOLLECTION 0.00 +----- +LineString(0 1, 1 1)) +dist buffer buf_area +-1 GEOMETRYCOLLECTION 0.00 +----- +LineString(9 9,8 1,1 5,0 0) +dist buffer buf_area +-1 GEOMETRYCOLLECTION 0.00 +----- +Polygon((2 2,2 8,8 8,8 2,2 2)) +dist buffer buf_area +-1 POLYGON 16.00 +----- +MultiPoint(9 9,8 1,1 5) +dist buffer buf_area +-1 GEOMETRYCOLLECTION 0.00 +----- +MultiLineString((0 0,2 2)) +dist buffer buf_area +-1 GEOMETRYCOLLECTION 0.00 +----- +MultiLineString((0 0,2 2,0 4)) +dist buffer buf_area +-1 GEOMETRYCOLLECTION 0.00 +----- +MultiLineString((0 0,2 2),(0 2,2 0)) +dist buffer buf_area +-1 GEOMETRYCOLLECTION 0.00 +----- +MultiLineString((2 2,2 8,-2 8),(-6 -6, 6 6),(10 10, 14 14)) +dist buffer buf_area +-1 GEOMETRYCOLLECTION 0.00 +----- +GeometryCollection(Point(0 0)) +dist buffer buf_area +-1 GEOMETRYCOLLECTION 0.00 +----- +GeometryCollection(LineString(0 0, 2 2))) +dist buffer buf_area +-1 GEOMETRYCOLLECTION 0.00 +----- +GeometryCollection(Polygon((2 2,2 8,8 8,8 2,2 2)))) +dist buffer buf_area +-1 POLYGON 16.00 +----- +GeometryCollection(MultiPoint(9 9,8 1,1 5)) +dist buffer buf_area +-1 GEOMETRYCOLLECTION 0.00 +----- +GeometryCollection(MultiLineString((0 0,0 1),(3 0,3 1))) +dist buffer buf_area +-1 GEOMETRYCOLLECTION 0.00 +----- +GeometryCollection(Point(9 9),LineString(1 5,0 0),Polygon((2 2,2 8,8 8,8 2,2 2))) +dist buffer buf_area +-1 POLYGON 16.00 +SELECT ST_CONTAINS( +GeomFromText('MULTIPOLYGON(((0 0, 0 5, 5 5, 5 0, 0 0)),((6 6, 6 11, 11 11, 11 6, 6 6)))'), +GeomFromText('POINT(5 10)')); +ST_CONTAINS( +GeomFromText('MULTIPOLYGON(((0 0, 0 5, 5 5, 5 0, 0 0)),((6 6, 6 11, 11 11, 11 6, 6 6)))'), +GeomFromText('POINT(5 10)')) +0 +SELECT AsText(ST_UNION( +GeomFromText('MULTIPOLYGON(((0 0, 0 5, 5 5, 5 0, 0 0)),((6 6, 6 11, 11 11, 11 6, 6 6)))'), +GeomFromText('POINT(5 10)'))); +AsText(ST_UNION( +GeomFromText('MULTIPOLYGON(((0 0, 0 5, 5 5, 5 0, 0 0)),((6 6, 6 11, 11 11, 11 6, 6 6)))'), +GeomFromText('POINT(5 10)'))) +GEOMETRYCOLLECTION(POLYGON((0 0,0 5,5 5,5 0,0 0)),POLYGON((6 6,6 11,11 11,11 6,6 6)),POINT(5 10)) +DROP PROCEDURE p1; +# +# Bug #13833019 ASSERTION `T1->RESULT_RANGE' FAILED IN GCALC_OPERATION_REDUCER::END_COUPLE +# +SELECT GeometryType(ST_BUFFER(MULTIPOLYGONFROMTEXT('MULTIPOLYGON(((0 0,9 4,3 3,0 0)),((2 2,2 2,8 8,2 3,2 2)))'), 3)); +GeometryType(ST_BUFFER(MULTIPOLYGONFROMTEXT('MULTIPOLYGON(((0 0,9 4,3 3,0 0)),((2 2,2 2,8 8,2 3,2 2)))'), 3)) +POLYGON +# +# Bug #13832749 HANDLE_FATAL_SIGNAL IN GCALC_FUNCTION::COUNT_INTERNAL +# +SELECT GeometryType(ST_BUFFER(MULTIPOLYGONFROMTEXT('MULTIPOLYGON(((3 5,2 5,2 4,3 4,3 5)),((2 2,2 8,8 8,8 2,2 2), (4 4,4 6,6 6,6 4,4 4)), ((9 9,8 1,1 5,9 9)))'),1)); +GeometryType(ST_BUFFER(MULTIPOLYGONFROMTEXT('MULTIPOLYGON(((3 5,2 5,2 4,3 4,3 5)),((2 2,2 8,8 8,8 2,2 2), (4 4,4 6,6 6,6 4,4 4)), ((9 9,8 1,1 5,9 9)))'),1)) +POLYGON +# +# Bug#13358363 - ASSERTION: N > 0 && N < SINUSES_CALCULATED*2+1 | GET_N_SINCOS/ADD_EDGE_BUFFER +# +DO ST_BUFFER(ST_GEOMCOLLFROMTEXT('linestring(1 1,2 2)'),''); +SELECT ST_WITHIN( +LINESTRINGFROMTEXT(' LINESTRING(3 8,9 2,3 8,3 3,7 6,4 7,4 7,8 1) '), +ST_BUFFER(MULTIPOLYGONFROMTEXT(' MULTIPOLYGON(((3 5,2 5,2 4,3 4,3 5)),((2 2,2 8,8 8,8 2,2 2),(4 4,4 6,6 6,6 4,4 4)),((0 5,3 5,3 2,1 2,1 1,3 1,3 0,0 0,0 3,2 3,2 4,0 4,0 5))) '), +ST_NUMINTERIORRINGS(POLYGONFROMTEXT('POLYGON((3 5,2 4,2 5,3 5)) ')))); +ST_WITHIN( +LINESTRINGFROMTEXT(' LINESTRING(3 8,9 2,3 8,3 3,7 6,4 7,4 7,8 1) '), +ST_BUFFER(MULTIPOLYGONFROMTEXT(' MULTIPOLYGON(((3 5,2 5,2 4,3 4,3 5)),((2 2,2 8,8 8,8 2,2 2),(4 4,4 6,6 6,6 4,4 4)),((0 5,3 5,3 2,1 2,1 1,3 1,3 0,0 0,0 3,2 3,2 4,0 4,0 5))) ') +0 +SELECT ST_DIMENSION(ST_BUFFER(POLYGONFROMTEXT(' POLYGON((3 5,2 5,2 4,3 4,3 5)) '), +ST_NUMINTERIORRINGS(POLYGONFROMTEXT(' POLYGON((0 0,9 3,4 2,0 0))')))); +ST_DIMENSION(ST_BUFFER(POLYGONFROMTEXT(' POLYGON((3 5,2 5,2 4,3 4,3 5)) '), +ST_NUMINTERIORRINGS(POLYGONFROMTEXT(' POLYGON((0 0,9 3,4 2,0 0))')))) +2 +SELECT ST_NUMINTERIORRINGS( +ST_ENVELOPE(ST_BUFFER(MULTIPOLYGONFROMTEXT('MULTIPOLYGON(((3 5,2 5,2 4,3 4,3 5))) '), +SRID(MULTILINESTRINGFROMTEXT('MULTILINESTRING((2 2,4 2,1 2,2 4,2 2)) '))))); +ST_NUMINTERIORRINGS( +ST_ENVELOPE(ST_BUFFER(MULTIPOLYGONFROMTEXT('MULTIPOLYGON(((3 5,2 5,2 4,3 4,3 5))) '), +SRID(MULTILINESTRINGFROMTEXT('MULTILINESTRING((2 2,4 2,1 2,2 4,2 2)) '))))) +0 +SELECT ASTEXT(ST_BUFFER(POLYGONFROMTEXT(' POLYGON((9 9,5 2,4 5,9 9))'), +SRID(GEOMETRYFROMTEXT(' MULTIPOINT(8 4,5 0,7 8,6 9,3 4,7 3,5 5) ')))); +ASTEXT(ST_BUFFER(POLYGONFROMTEXT(' POLYGON((9 9,5 2,4 5,9 9))'), +SRID(GEOMETRYFROMTEXT(' MULTIPOINT(8 4,5 0,7 8,6 9,3 4,7 3,5 5) ')))) +POLYGON((9 9,5 2,4 5,9 9)) diff --git a/mysql-test/r/gis.result b/mysql-test/r/gis.result index 9acdb1a87c2..a1d2ec862b7 100644 --- a/mysql-test/r/gis.result +++ b/mysql-test/r/gis.result @@ -834,6 +834,17 @@ create table t1 (g geometry not null); insert into t1 values(default); ERROR 22003: Cannot get geometry object from data you send to the GEOMETRY field drop table t1; +CREATE TABLE t1 (a GEOMETRY); +CREATE VIEW v1 AS SELECT GeomFromwkb(ASBINARY(a)) FROM t1; +CREATE VIEW v2 AS SELECT a FROM t1; +DESCRIBE v1; +Field Type Null Key Default Extra +GeomFromwkb(ASBINARY(a)) geometry YES NULL +DESCRIBE v2; +Field Type Null Key Default Extra +a geometry YES NULL +DROP VIEW v1,v2; +DROP TABLE t1; create table t1 (name VARCHAR(100), square GEOMETRY); INSERT INTO t1 VALUES("center", GeomFromText('POLYGON (( 0 0, 0 2, 2 2, 2 0, 0 0))')); INSERT INTO t1 VALUES("small", GeomFromText('POLYGON (( 0 0, 0 1, 1 1, 1 0, 0 0))')); diff --git a/mysql-test/r/grant5.result b/mysql-test/r/grant5.result new file mode 100644 index 00000000000..2df394c0432 --- /dev/null +++ b/mysql-test/r/grant5.result @@ -0,0 +1,2 @@ +SHOW GRANTS FOR root@invalid_host; +ERROR 42000: There is no such grant defined for user 'root' on host 'invalid_host' diff --git a/mysql-test/r/group_min_max.result b/mysql-test/r/group_min_max.result index 81cdad8c523..06a8a8a06b8 100644 --- a/mysql-test/r/group_min_max.result +++ b/mysql-test/r/group_min_max.result @@ -3536,7 +3536,7 @@ COUNT(DISTINCT a, b) SUM(DISTINCT a) 0 NULL EXPLAIN SELECT SUM(DISTINCT a), MAX(b) FROM t2 GROUP BY a; id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t2 range NULL a 5 NULL 9 Using index for group-by +1 SIMPLE t2 index NULL a 15 NULL 16 Using index SELECT SUM(DISTINCT a), MAX(b) FROM t2 GROUP BY a; SUM(DISTINCT a) MAX(b) 1 8 @@ -3564,7 +3564,7 @@ SELECT 42 * (a + c + COUNT(DISTINCT c, a, b)) FROM t2 GROUP BY a, b, c; 168 EXPLAIN SELECT (SUM(DISTINCT a) + MAX(b)) FROM t2 GROUP BY a; id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t2 range NULL a 5 NULL 9 Using index for group-by +1 SIMPLE t2 index NULL a 15 NULL 16 Using index SELECT (SUM(DISTINCT a) + MAX(b)) FROM t2 GROUP BY a; (SUM(DISTINCT a) + MAX(b)) 9 @@ -3593,6 +3593,58 @@ id select_type table type possible_keys key key_len ref rows Extra drop table t1; # End of test#50539. # +# Bug#17217128 - BAD INTERACTION BETWEEN MIN/MAX AND +# "HAVING SUM(DISTINCT)": WRONG RESULTS. +# +CREATE TABLE t (a INT, b INT, KEY(a,b)); +INSERT INTO t VALUES (1,1), (2,2), (3,3), (4,4), (1,0), (3,2), (4,5); +ANALYZE TABLE t; +Table Op Msg_type Msg_text +test.t analyze status OK +SELECT a, SUM(DISTINCT a), MIN(b) FROM t GROUP BY a; +a SUM(DISTINCT a) MIN(b) +1 1 0 +2 2 2 +3 3 2 +4 4 4 +EXPLAIN SELECT a, SUM(DISTINCT a), MIN(b) FROM t GROUP BY a; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t index NULL a 10 NULL 7 Using index +SELECT a, SUM(DISTINCT a), MAX(b) FROM t GROUP BY a; +a SUM(DISTINCT a) MAX(b) +1 1 1 +2 2 2 +3 3 3 +4 4 5 +EXPLAIN SELECT a, SUM(DISTINCT a), MAX(b) FROM t GROUP BY a; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t index NULL a 10 NULL 7 Using index +SELECT a, MAX(b) FROM t GROUP BY a HAVING SUM(DISTINCT a); +a MAX(b) +1 1 +2 2 +3 3 +4 5 +EXPLAIN SELECT a, MAX(b) FROM t GROUP BY a HAVING SUM(DISTINCT a); +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t index NULL a 10 NULL 7 Using index +SELECT SUM(DISTINCT a), MIN(b), MAX(b) FROM t; +SUM(DISTINCT a) MIN(b) MAX(b) +10 0 5 +EXPLAIN SELECT SUM(DISTINCT a), MIN(b), MAX(b) FROM t; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t index NULL a 10 NULL 7 Using index +SELECT a, SUM(DISTINCT a), MIN(b), MAX(b) FROM t GROUP BY a; +a SUM(DISTINCT a) MIN(b) MAX(b) +1 1 0 1 +2 2 2 2 +3 3 2 3 +4 4 4 5 +EXPLAIN SELECT a, SUM(DISTINCT a), MIN(b), MAX(b) FROM t GROUP BY a; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t index NULL a 10 NULL 7 Using index +DROP TABLE t; +# # MDEV-4219 A simple select query returns random data (upstream bug#68473) # drop table if exists faulty; diff --git a/mysql-test/r/group_min_max_innodb.result b/mysql-test/r/group_min_max_innodb.result index 320c4b2b750..f3511b0ad4a 100644 --- a/mysql-test/r/group_min_max_innodb.result +++ b/mysql-test/r/group_min_max_innodb.result @@ -118,3 +118,171 @@ COUNT(DISTINCT a) 1 DROP TABLE t1; End of 5.5 tests +# +# Bug#17909656 - WRONG RESULTS FOR A SIMPLE QUERY WITH GROUP BY +# +CREATE TABLE t0 ( +i1 INTEGER NOT NULL +); +INSERT INTO t0 VALUES (1),(2),(3),(4),(5),(6),(7),(8),(9),(10), +(11),(12),(13),(14),(15),(16),(17),(18),(19),(20), +(21),(22),(23),(24),(25),(26),(27),(28),(29),(30); +CREATE TABLE t1 ( +c1 CHAR(1) NOT NULL, +i1 INTEGER NOT NULL, +i2 INTEGER NOT NULL, +UNIQUE KEY k1 (c1,i2) +) ENGINE=InnoDB; +INSERT INTO t1 SELECT 'A',i1,i1 FROM t0; +INSERT INTO t1 SELECT 'B',i1,i1 FROM t0; +INSERT INTO t1 SELECT 'C',i1,i1 FROM t0; +INSERT INTO t1 SELECT 'D',i1,i1 FROM t0; +INSERT INTO t1 SELECT 'E',i1,i1 FROM t0; +INSERT INTO t1 SELECT 'F',i1,i1 FROM t0; +CREATE TABLE t2 ( +c1 CHAR(1) NOT NULL, +i1 INTEGER NOT NULL, +i2 INTEGER NOT NULL, +UNIQUE KEY k2 (c1,i1,i2) +) ENGINE=InnoDB; +INSERT INTO t2 SELECT 'A',i1,i1 FROM t0; +INSERT INTO t2 SELECT 'B',i1,i1 FROM t0; +INSERT INTO t2 SELECT 'C',i1,i1 FROM t0; +INSERT INTO t2 SELECT 'D',i1,i1 FROM t0; +INSERT INTO t2 SELECT 'E',i1,i1 FROM t0; +INSERT INTO t2 SELECT 'F',i1,i1 FROM t0; +ANALYZE TABLE t1; +ANALYZE TABLE t2; +EXPLAIN SELECT c1, max(i2) FROM t1 WHERE (c1 = 'C' AND i2 = 17) OR ( c1 = 'F') +GROUP BY c1; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 range k1 k1 5 NULL 31 Using where; Using index +SELECT c1, max(i2) FROM t1 WHERE (c1 = 'C' AND i2 = 17) OR ( c1 = 'F') +GROUP BY c1; +c1 max(i2) +C 17 +F 30 +EXPLAIN SELECT c1, max(i2) FROM t1 WHERE (c1 = 'C' OR ( c1 = 'F' AND i2 = 17)) +GROUP BY c1; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 range k1 k1 5 NULL 31 Using where; Using index +SELECT c1, max(i2) FROM t1 WHERE (c1 = 'C' OR ( c1 = 'F' AND i2 = 17)) +GROUP BY c1; +c1 max(i2) +C 30 +F 17 +EXPLAIN SELECT c1, max(i2) FROM t1 WHERE (c1 = 'C' OR c1 = 'F' ) AND ( i2 = 17 ) +GROUP BY c1; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 range k1 k1 5 NULL 2 Using where; Using index +SELECT c1, max(i2) FROM t1 WHERE (c1 = 'C' OR c1 = 'F' ) AND ( i2 = 17 ) +GROUP BY c1; +c1 max(i2) +C 17 +F 17 +EXPLAIN SELECT c1, max(i2) FROM t1 +WHERE ((c1 = 'C' AND (i2 = 40 OR i2 = 30)) OR ( c1 = 'F' AND (i2 = 40 ))) +GROUP BY c1; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 range k1 k1 5 NULL 3 Using where; Using index +SELECT c1, max(i2) FROM t1 +WHERE ((c1 = 'C' AND (i2 = 40 OR i2 = 30)) OR ( c1 = 'F' AND (i2 = 40 ))) +GROUP BY c1; +c1 max(i2) +C 30 +EXPLAIN SELECT c1, i1, max(i2) FROM t2 +WHERE (c1 = 'C' OR ( c1 = 'F' AND i1 < 35)) AND ( i2 = 17 ) +GROUP BY c1,i1; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t2 range k2 k2 5 NULL 59 Using where; Using index +SELECT c1, i1, max(i2) FROM t2 +WHERE (c1 = 'C' OR ( c1 = 'F' AND i1 < 35)) AND ( i2 = 17 ) +GROUP BY c1,i1; +c1 i1 max(i2) +C 17 17 +F 17 17 +EXPLAIN SELECT c1, i1, max(i2) FROM t2 +WHERE (((c1 = 'C' AND i1 < 40) OR ( c1 = 'F' AND i1 < 35)) AND ( i2 = 17 )) +GROUP BY c1,i1; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t2 range k2 k2 5 NULL 58 Using where; Using index +SELECT c1, i1, max(i2) FROM t2 +WHERE (((c1 = 'C' AND i1 < 40) OR ( c1 = 'F' AND i1 < 35)) AND ( i2 = 17 )) +GROUP BY c1,i1; +c1 i1 max(i2) +C 17 17 +F 17 17 +EXPLAIN SELECT c1, i1, max(i2) FROM t2 +WHERE ((c1 = 'C' AND i1 < 40) OR ( c1 = 'F' AND i1 < 35) OR ( i2 = 17 )) +GROUP BY c1,i1; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t2 index k2 k2 9 NULL 180 Using where; Using index +SELECT c1, i1, max(i2) FROM t2 +WHERE ((c1 = 'C' AND i1 < 40) OR ( c1 = 'F' AND i1 < 35) OR ( i2 = 17 )) +GROUP BY c1,i1; +c1 i1 max(i2) +A 17 17 +B 17 17 +C 1 1 +C 2 2 +C 3 3 +C 4 4 +C 5 5 +C 6 6 +C 7 7 +C 8 8 +C 9 9 +C 10 10 +C 11 11 +C 12 12 +C 13 13 +C 14 14 +C 15 15 +C 16 16 +C 17 17 +C 18 18 +C 19 19 +C 20 20 +C 21 21 +C 22 22 +C 23 23 +C 24 24 +C 25 25 +C 26 26 +C 27 27 +C 28 28 +C 29 29 +C 30 30 +D 17 17 +E 17 17 +F 1 1 +F 2 2 +F 3 3 +F 4 4 +F 5 5 +F 6 6 +F 7 7 +F 8 8 +F 9 9 +F 10 10 +F 11 11 +F 12 12 +F 13 13 +F 14 14 +F 15 15 +F 16 16 +F 17 17 +F 18 18 +F 19 19 +F 20 20 +F 21 21 +F 22 22 +F 23 23 +F 24 24 +F 25 25 +F 26 26 +F 27 27 +F 28 28 +F 29 29 +F 30 30 +DROP TABLE t0,t1,t2; diff --git a/mysql-test/r/have_mysql_upgrade.result b/mysql-test/r/have_mysql_upgrade.result deleted file mode 100644 index 952bea420f9..00000000000 --- a/mysql-test/r/have_mysql_upgrade.result +++ /dev/null @@ -1,2 +0,0 @@ -have_mysql_upgrade -1 diff --git a/mysql-test/r/huge_frm-6224.result b/mysql-test/r/huge_frm-6224.result new file mode 100644 index 00000000000..3772317c04d --- /dev/null +++ b/mysql-test/r/huge_frm-6224.result @@ -0,0 +1 @@ +ERROR HY000: The definition for table `t1` is too big diff --git a/mysql-test/r/innodb_load_xa.result b/mysql-test/r/innodb_load_xa.result new file mode 100644 index 00000000000..85e6d52c098 --- /dev/null +++ b/mysql-test/r/innodb_load_xa.result @@ -0,0 +1,21 @@ +install plugin innodb soname 'ha_innodb'; +Warnings: +Warning 1105 Cannot enable tc-log at run-time. XA features of InnoDB are disabled +select engine,support,transactions,xa from information_schema.engines where engine='innodb'; +engine support transactions xa +InnoDB YES YES NO +create table t1 (a int) engine=innodb; +start transaction; +insert t1 values (1); +insert t1 values (2); +commit; +include/show_binlog_events.inc +Log_name Pos Event_type Server_id End_log_pos Info +mysqld-bin.000001 # Gtid # # GTID #-#-# +mysqld-bin.000001 # Query # # use `test`; create table t1 (a int) engine=innodb +mysqld-bin.000001 # Gtid # # BEGIN GTID #-#-# +mysqld-bin.000001 # Query # # use `test`; insert t1 values (1) +mysqld-bin.000001 # Query # # use `test`; insert t1 values (2) +mysqld-bin.000001 # Query # # COMMIT +drop table t1; +uninstall plugin innodb; diff --git a/mysql-test/r/innodb_mysql_lock2.result b/mysql-test/r/innodb_mysql_lock2.result index 17dd747de6f..54203c140a2 100644 --- a/mysql-test/r/innodb_mysql_lock2.result +++ b/mysql-test/r/innodb_mysql_lock2.result @@ -331,13 +331,14 @@ Success: 'update v2 set j= j-10 where j = 3' takes shared row locks on 't1'. # 4.1 SELECT/SET with a stored function which does not # modify data and uses SELECT in its turn. # -# In theory there is no need to take row locks on the table +# There is no need to take row locks on the table # being selected from in SF as the call to such function -# won't get into the binary log. In practice, however, we -# discover that fact too late in the process to be able to -# affect the decision what locks should be taken. -# Hence, strong locks are taken in this case. -Success: 'select f1()' takes shared row locks on 't1'. +# won't get into the binary log. +# +# However in practice innodb takes strong lock on tables +# being selected from within SF, when SF is called from +# non SELECT statements like 'set' statement below. +Success: 'select f1()' doesn't take row locks on 't1'. Success: 'set @a:= f1()' takes shared row locks on 't1'. # # 4.2 INSERT (or other statement which modifies data) with @@ -364,13 +365,15 @@ Success: 'set @a:= f2()' takes shared row locks on 't1'. # modify data and reads a table through subselect # in a control construct. # -# Again, in theory a call to this function won't get to the -# binary log and thus no locking is needed. But in practice -# we don't detect this fact early enough (get_lock_type_for_table()) -# to avoid taking row locks. -Success: 'select f3()' takes shared row locks on 't1'. +# Call to this function won't get to the +# binary log and thus no locking is needed. +# +# However in practice innodb takes strong lock on tables +# being selected from within SF, when SF is called from +# non SELECT statements like 'set' statement below. +Success: 'select f3()' doesn't take row locks on 't1'. Success: 'set @a:= f3()' takes shared row locks on 't1'. -Success: 'select f4()' takes shared row locks on 't1'. +Success: 'select f4()' doesn't take row locks on 't1'. Success: 'set @a:= f4()' takes shared row locks on 't1'. # # 4.5. INSERT (or other statement which modifies data) with @@ -398,13 +401,15 @@ Success: 'set @a:= f5()' takes shared row locks on 't1'. # doesn't modify data and reads tables through # a view. # -# Once again, in theory, calls to such functions won't -# get into the binary log and thus don't need row -# locks. But in practice this fact is discovered -# too late to have any effect. -Success: 'select f6()' takes shared row locks on 't1'. +# Calls to such functions won't get into +# the binary log and thus don't need row locks. +# +# However in practice innodb takes strong lock on tables +# being selected from within SF, when SF is called from +# non SELECT statements like 'set' statement below. +Success: 'select f6()' doesn't take row locks on 't1'. Success: 'set @a:= f6()' takes shared row locks on 't1'. -Success: 'select f7()' takes shared row locks on 't1'. +Success: 'select f7()' doesn't take row locks on 't1'. Success: 'set @a:= f7()' takes shared row locks on 't1'. # # 4.8 INSERT which uses stored function which @@ -431,10 +436,9 @@ Success: 'select f9()' takes shared row locks on 't1'. # data and reads a table indirectly, by calling another # function. # -# In theory, calls to such functions won't get into the binary -# log and thus don't need to acquire row locks. But in practice -# this fact is discovered too late to have any effect. -Success: 'select f10()' takes shared row locks on 't1'. +# Calls to such functions won't get into the binary +# log and thus don't need to acquire row locks. +Success: 'select f10()' doesn't take row locks on 't1'. # # 4.11 INSERT which uses a stored function which doesn't modify # data and reads a table indirectly, by calling another @@ -494,10 +498,9 @@ Success: 'select f14()' takes shared row locks on 't1'. # 5.3 SELECT that calls a function that doesn't modify data and # uses a CALL statement that reads a table via SELECT. # -# In theory, calls to such functions won't get into the binary -# log and thus don't need to acquire row locks. But in practice -# this fact is discovered too late to have any effect. -Success: 'select f15()' takes shared row locks on 't1'. +# Calls to such functions won't get into the binary +# log and thus don't need to acquire row locks. +Success: 'select f15()' doesn't take row locks on 't1'. # # 5.4 INSERT which calls function which doesn't modify data and # uses CALL statement which reads table through SELECT. diff --git a/mysql-test/r/innodb_mysql_sync.result b/mysql-test/r/innodb_mysql_sync.result index 21e9cd04c22..49d69d13e40 100644 --- a/mysql-test/r/innodb_mysql_sync.result +++ b/mysql-test/r/innodb_mysql_sync.result @@ -86,7 +86,10 @@ SET DEBUG_SYNC= 'now SIGNAL killed'; # Reaping: OPTIMIZE TABLE t1 Table Op Msg_type Msg_text test.t1 optimize note Table does not support optimize, doing recreate + analyze instead +test.t1 optimize error Query execution was interrupted test.t1 optimize status Operation failed +Warnings: +Error 1317 Query execution was interrupted # Connection default DROP TABLE t1; SET DEBUG_SYNC= 'RESET'; diff --git a/mysql-test/r/ipv4_and_ipv6.result b/mysql-test/r/ipv4_and_ipv6.result new file mode 100644 index 00000000000..f15aeba65fa --- /dev/null +++ b/mysql-test/r/ipv4_and_ipv6.result @@ -0,0 +1,58 @@ +=============Test of '::1' ======================================== +mysqld is alive +CREATE USER testuser@'::1' identified by '1234'; +GRANT ALL ON test.* TO testuser@'::1'; +SHOW GRANTS FOR testuser@'::1'; +Grants for testuser@::1 +GRANT USAGE ON *.* TO 'testuser'@'::1' IDENTIFIED BY PASSWORD '*A4B6157319038724E3560894F7F932C8886EBFCF' +GRANT ALL PRIVILEGES ON `test`.* TO 'testuser'@'::1' +SET @nip= inet_aton('::1'); +SELECT @nip; +@nip +NULL +SELECT inet_ntoa(@nip); +inet_ntoa(@nip) +NULL +SELECT USER(); +USER() +root@localhost +SELECT current_user(); +current_user() +root@localhost +SHOW PROCESSLIST; +REVOKE ALL ON test.* FROM testuser@'::1'; +RENAME USER testuser@'::1' to testuser1@'::1'; +SET PASSWORD FOR testuser1@'::1' = PASSWORD ('9876'); +SELECT USER(); +USER() +root@localhost +DROP USER testuser1@'::1'; +=============Test of '127.0.0.1' (IPv4) =========================== +mysqld is alive +CREATE USER testuser@'127.0.0.1' identified by '1234'; +GRANT ALL ON test.* TO testuser@'127.0.0.1'; +SHOW GRANTS FOR testuser@'127.0.0.1'; +Grants for testuser@127.0.0.1 +GRANT USAGE ON *.* TO 'testuser'@'127.0.0.1' IDENTIFIED BY PASSWORD '*A4B6157319038724E3560894F7F932C8886EBFCF' +GRANT ALL PRIVILEGES ON `test`.* TO 'testuser'@'127.0.0.1' +SET @nip= inet_aton('127.0.0.1'); +SELECT @nip; +@nip +2130706433 +SELECT inet_ntoa(@nip); +inet_ntoa(@nip) +127.0.0.1 +SELECT USER(); +USER() +root@localhost +SELECT current_user(); +current_user() +root@localhost +SHOW PROCESSLIST; +REVOKE ALL ON test.* FROM testuser@'127.0.0.1'; +RENAME USER testuser@'127.0.0.1' to testuser1@'127.0.0.1'; +SET PASSWORD FOR testuser1@'127.0.0.1' = PASSWORD ('9876'); +SELECT USER(); +USER() +root@localhost +DROP USER testuser1@'127.0.0.1'; diff --git a/mysql-test/r/join_cache.result b/mysql-test/r/join_cache.result index 53812bfa227..253fb61dc27 100644 --- a/mysql-test/r/join_cache.result +++ b/mysql-test/r/join_cache.result @@ -5591,7 +5591,7 @@ set optimizer_switch=@tmp_optimizer_switch; DROP TABLE t1,t2,t3; # # Bug #1058071: LEFT JOIN using blobs -# (mdev-564) when join buffer size is small +# (MDEV-564) when join buffer size is small # CREATE TABLE t1 ( col269 decimal(31,10) unsigned DEFAULT NULL, @@ -5656,6 +5656,154 @@ id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE t1 ALL NULL NULL NULL NULL 10 1 SIMPLE t2 ALL NULL NULL NULL NULL 10 Using where; Using join buffer (flat, BNL join) drop table t0,t1,t2; +# MDEV-6292: huge performance degradation for a sequence +# of LEFT JOIN operations when using join buffer +# +CREATE TABLE t1 ( +id int(11) NOT NULL AUTO_INCREMENT, +col1 varchar(255) NOT NULL DEFAULT '', +PRIMARY KEY (id) +) ENGINE=INNODB; +CREATE TABLE t2 ( +id int(11) NOT NULL AUTO_INCREMENT, +parent_id smallint(3) NOT NULL DEFAULT '0', +col2 varchar(25) NOT NULL DEFAULT '', +PRIMARY KEY (id) +) ENGINE=INNODB; +set join_buffer_size=8192; +set join_cache_level=0; +set @init_time:=now(); +SELECT t.* +FROM +t1 t +LEFT JOIN t2 c1 ON c1.parent_id = t.id AND c1.col2 = "val" + LEFT JOIN t2 c2 ON c2.parent_id = t.id AND c2.col2 = "val" + LEFT JOIN t2 c3 ON c3.parent_id = t.id AND c3.col2 = "val" + LEFT JOIN t2 c4 ON c4.parent_id = t.id AND c4.col2 = "val" + LEFT JOIN t2 c5 ON c5.parent_id = t.id AND c5.col2 = "val" + LEFT JOIN t2 c6 ON c6.parent_id = t.id AND c6.col2 = "val" + LEFT JOIN t2 c7 ON c7.parent_id = t.id AND c7.col2 = "val" + LEFT JOIN t2 c8 ON c8.parent_id = t.id AND c8.col2 = "val" + LEFT JOIN t2 c9 ON c9.parent_id = t.id AND c9.col2 = "val" + LEFT JOIN t2 c10 ON c10.parent_id = t.id AND c10.col2 = "val" + LEFT JOIN t2 c11 ON c11.parent_id = t.id AND c11.col2 = "val" + LEFT JOIN t2 c12 ON c12.parent_id = t.id AND c12.col2 = "val" + LEFT JOIN t2 c13 ON c13.parent_id = t.id AND c13.col2 = "val" + LEFT JOIN t2 c14 ON c14.parent_id = t.id AND c14.col2 = "val" + LEFT JOIN t2 c15 ON c15.parent_id = t.id AND c15.col2 = "val" + LEFT JOIN t2 c16 ON c16.parent_id = t.id AND c16.col2 = "val" + LEFT JOIN t2 c17 ON c17.parent_id = t.id AND c17.col2 = "val" + LEFT JOIN t2 c18 ON c18.parent_id = t.id AND c18.col2 = "val" + LEFT JOIN t2 c19 ON c19.parent_id = t.id AND c19.col2 = "val" + LEFT JOIN t2 c20 ON c20.parent_id = t.id AND c20.col2 = "val" + LEFT JOIN t2 c21 ON c21.parent_id = t.id AND c21.col2 = "val" + LEFT JOIN t2 c22 ON c22.parent_id = t.id AND c22.col2 = "val" + LEFT JOIN t2 c23 ON c23.parent_id = t.id AND c23.col2 = "val" + LEFT JOIN t2 c24 ON c24.parent_id = t.id AND c24.col2 = "val" + LEFT JOIN t2 c25 ON c25.parent_id = t.id AND c25.col2 = "val" +ORDER BY +col1; +id col1 +select timestampdiff(second, @init_time, now()) <= 1; +timestampdiff(second, @init_time, now()) <= 1 +1 +set join_cache_level=2; +set @init_time:=now(); +SELECT t.* +FROM +t1 t +LEFT JOIN t2 c1 ON c1.parent_id = t.id AND c1.col2 = "val" + LEFT JOIN t2 c2 ON c2.parent_id = t.id AND c2.col2 = "val" + LEFT JOIN t2 c3 ON c3.parent_id = t.id AND c3.col2 = "val" + LEFT JOIN t2 c4 ON c4.parent_id = t.id AND c4.col2 = "val" + LEFT JOIN t2 c5 ON c5.parent_id = t.id AND c5.col2 = "val" + LEFT JOIN t2 c6 ON c6.parent_id = t.id AND c6.col2 = "val" + LEFT JOIN t2 c7 ON c7.parent_id = t.id AND c7.col2 = "val" + LEFT JOIN t2 c8 ON c8.parent_id = t.id AND c8.col2 = "val" + LEFT JOIN t2 c9 ON c9.parent_id = t.id AND c9.col2 = "val" + LEFT JOIN t2 c10 ON c10.parent_id = t.id AND c10.col2 = "val" + LEFT JOIN t2 c11 ON c11.parent_id = t.id AND c11.col2 = "val" + LEFT JOIN t2 c12 ON c12.parent_id = t.id AND c12.col2 = "val" + LEFT JOIN t2 c13 ON c13.parent_id = t.id AND c13.col2 = "val" + LEFT JOIN t2 c14 ON c14.parent_id = t.id AND c14.col2 = "val" + LEFT JOIN t2 c15 ON c15.parent_id = t.id AND c15.col2 = "val" + LEFT JOIN t2 c16 ON c16.parent_id = t.id AND c16.col2 = "val" + LEFT JOIN t2 c17 ON c17.parent_id = t.id AND c17.col2 = "val" + LEFT JOIN t2 c18 ON c18.parent_id = t.id AND c18.col2 = "val" + LEFT JOIN t2 c19 ON c19.parent_id = t.id AND c19.col2 = "val" + LEFT JOIN t2 c20 ON c20.parent_id = t.id AND c20.col2 = "val" + LEFT JOIN t2 c21 ON c21.parent_id = t.id AND c21.col2 = "val" + LEFT JOIN t2 c22 ON c22.parent_id = t.id AND c22.col2 = "val" + LEFT JOIN t2 c23 ON c23.parent_id = t.id AND c23.col2 = "val" + LEFT JOIN t2 c24 ON c24.parent_id = t.id AND c24.col2 = "val" + LEFT JOIN t2 c25 ON c25.parent_id = t.id AND c25.col2 = "val" +ORDER BY +col1; +id col1 +select timestampdiff(second, @init_time, now()) <= 1; +timestampdiff(second, @init_time, now()) <= 1 +1 +EXPLAIN +SELECT t.* +FROM +t1 t +LEFT JOIN t2 c1 ON c1.parent_id = t.id AND c1.col2 = "val" + LEFT JOIN t2 c2 ON c2.parent_id = t.id AND c2.col2 = "val" + LEFT JOIN t2 c3 ON c3.parent_id = t.id AND c3.col2 = "val" + LEFT JOIN t2 c4 ON c4.parent_id = t.id AND c4.col2 = "val" + LEFT JOIN t2 c5 ON c5.parent_id = t.id AND c5.col2 = "val" + LEFT JOIN t2 c6 ON c6.parent_id = t.id AND c6.col2 = "val" + LEFT JOIN t2 c7 ON c7.parent_id = t.id AND c7.col2 = "val" + LEFT JOIN t2 c8 ON c8.parent_id = t.id AND c8.col2 = "val" + LEFT JOIN t2 c9 ON c9.parent_id = t.id AND c9.col2 = "val" + LEFT JOIN t2 c10 ON c10.parent_id = t.id AND c10.col2 = "val" + LEFT JOIN t2 c11 ON c11.parent_id = t.id AND c11.col2 = "val" + LEFT JOIN t2 c12 ON c12.parent_id = t.id AND c12.col2 = "val" + LEFT JOIN t2 c13 ON c13.parent_id = t.id AND c13.col2 = "val" + LEFT JOIN t2 c14 ON c14.parent_id = t.id AND c14.col2 = "val" + LEFT JOIN t2 c15 ON c15.parent_id = t.id AND c15.col2 = "val" + LEFT JOIN t2 c16 ON c16.parent_id = t.id AND c16.col2 = "val" + LEFT JOIN t2 c17 ON c17.parent_id = t.id AND c17.col2 = "val" + LEFT JOIN t2 c18 ON c18.parent_id = t.id AND c18.col2 = "val" + LEFT JOIN t2 c19 ON c19.parent_id = t.id AND c19.col2 = "val" + LEFT JOIN t2 c20 ON c20.parent_id = t.id AND c20.col2 = "val" + LEFT JOIN t2 c21 ON c21.parent_id = t.id AND c21.col2 = "val" + LEFT JOIN t2 c22 ON c22.parent_id = t.id AND c22.col2 = "val" + LEFT JOIN t2 c23 ON c23.parent_id = t.id AND c23.col2 = "val" + LEFT JOIN t2 c24 ON c24.parent_id = t.id AND c24.col2 = "val" + LEFT JOIN t2 c25 ON c25.parent_id = t.id AND c25.col2 = "val" +ORDER BY +col1; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t ALL NULL NULL NULL NULL 1 Using temporary; Using filesort +1 SIMPLE c1 ALL NULL NULL NULL NULL 1 Using where; Using join buffer (flat, BNL join) +1 SIMPLE c2 ALL NULL NULL NULL NULL 1 Using where; Using join buffer (incremental, BNL join) +1 SIMPLE c3 ALL NULL NULL NULL NULL 1 Using where; Using join buffer (incremental, BNL join) +1 SIMPLE c4 ALL NULL NULL NULL NULL 1 Using where; Using join buffer (incremental, BNL join) +1 SIMPLE c5 ALL NULL NULL NULL NULL 1 Using where; Using join buffer (incremental, BNL join) +1 SIMPLE c6 ALL NULL NULL NULL NULL 1 Using where; Using join buffer (incremental, BNL join) +1 SIMPLE c7 ALL NULL NULL NULL NULL 1 Using where; Using join buffer (incremental, BNL join) +1 SIMPLE c8 ALL NULL NULL NULL NULL 1 Using where; Using join buffer (incremental, BNL join) +1 SIMPLE c9 ALL NULL NULL NULL NULL 1 Using where; Using join buffer (incremental, BNL join) +1 SIMPLE c10 ALL NULL NULL NULL NULL 1 Using where; Using join buffer (incremental, BNL join) +1 SIMPLE c11 ALL NULL NULL NULL NULL 1 Using where; Using join buffer (incremental, BNL join) +1 SIMPLE c12 ALL NULL NULL NULL NULL 1 Using where; Using join buffer (incremental, BNL join) +1 SIMPLE c13 ALL NULL NULL NULL NULL 1 Using where; Using join buffer (incremental, BNL join) +1 SIMPLE c14 ALL NULL NULL NULL NULL 1 Using where; Using join buffer (incremental, BNL join) +1 SIMPLE c15 ALL NULL NULL NULL NULL 1 Using where; Using join buffer (incremental, BNL join) +1 SIMPLE c16 ALL NULL NULL NULL NULL 1 Using where; Using join buffer (incremental, BNL join) +1 SIMPLE c17 ALL NULL NULL NULL NULL 1 Using where; Using join buffer (incremental, BNL join) +1 SIMPLE c18 ALL NULL NULL NULL NULL 1 Using where; Using join buffer (incremental, BNL join) +1 SIMPLE c19 ALL NULL NULL NULL NULL 1 Using where; Using join buffer (incremental, BNL join) +1 SIMPLE c20 ALL NULL NULL NULL NULL 1 Using where; Using join buffer (incremental, BNL join) +1 SIMPLE c21 ALL NULL NULL NULL NULL 1 Using where; Using join buffer (incremental, BNL join) +1 SIMPLE c22 ALL NULL NULL NULL NULL 1 Using where; Using join buffer (incremental, BNL join) +1 SIMPLE c23 ALL NULL NULL NULL NULL 1 Using where; Using join buffer (incremental, BNL join) +1 SIMPLE c24 ALL NULL NULL NULL NULL 1 Using where; Using join buffer (incremental, BNL join) +1 SIMPLE c25 ALL NULL NULL NULL NULL 1 Using where; Using join buffer (incremental, BNL join) +set join_buffer_size=default; +set join_cache_level = default; +DROP TABLE t1,t2; # # MDEV-5123 Remove duplicated conditions pushed both to join_tab->select_cond and join_tab->cache_select->cond for blocked joins. # @@ -5704,7 +5852,7 @@ select @counter; drop table t1,t2,t3; set expensive_subquery_limit=default; # -# mdev-6071: EXPLAIN chooses to use join buffer while execution turns it down +# MDEV-6071: EXPLAIN chooses to use join buffer while execution turns it down # create table t1 (a int); insert into t1 values diff --git a/mysql-test/r/join_nested_jcl6.result b/mysql-test/r/join_nested_jcl6.result index 6b5a50ba978..3b47645ca79 100644 --- a/mysql-test/r/join_nested_jcl6.result +++ b/mysql-test/r/join_nested_jcl6.result @@ -705,18 +705,18 @@ t0.b=t1.b AND (t8.b=t9.b OR t8.c IS NULL) AND (t9.a=1); a b a b a b a b a b a b a b a b a b a b -1 2 3 2 4 2 1 2 3 2 2 2 6 2 2 2 0 2 1 2 -1 2 3 2 4 2 1 2 4 2 2 2 6 2 2 2 0 2 1 2 1 2 3 2 4 2 1 2 3 2 3 1 6 2 1 1 NULL NULL 1 1 1 2 3 2 4 2 1 2 4 2 3 1 6 2 1 1 NULL NULL 1 1 +1 2 3 2 4 2 1 2 3 2 2 2 6 2 2 2 0 2 1 2 +1 2 3 2 4 2 1 2 4 2 2 2 6 2 2 2 0 2 1 2 1 2 3 2 4 2 1 2 3 2 3 1 6 2 1 1 NULL NULL 1 2 1 2 3 2 4 2 1 2 4 2 3 1 6 2 1 1 NULL NULL 1 2 1 2 3 2 4 2 1 2 3 2 3 3 NULL NULL NULL NULL NULL NULL 1 1 1 2 3 2 4 2 1 2 4 2 3 3 NULL NULL NULL NULL NULL NULL 1 1 1 2 3 2 4 2 1 2 3 2 3 3 NULL NULL NULL NULL NULL NULL 1 2 1 2 3 2 4 2 1 2 4 2 3 3 NULL NULL NULL NULL NULL NULL 1 2 -1 2 3 2 5 3 NULL NULL NULL NULL 2 2 6 2 2 2 0 2 1 2 1 2 3 2 5 3 NULL NULL NULL NULL 3 1 6 2 1 1 NULL NULL 1 1 +1 2 3 2 5 3 NULL NULL NULL NULL 2 2 6 2 2 2 0 2 1 2 1 2 3 2 5 3 NULL NULL NULL NULL 3 1 6 2 1 1 NULL NULL 1 2 1 2 3 2 5 3 NULL NULL NULL NULL 3 3 NULL NULL NULL NULL NULL NULL 1 1 1 2 3 2 5 3 NULL NULL NULL NULL 3 3 NULL NULL NULL NULL NULL NULL 1 2 diff --git a/mysql-test/r/join_outer_jcl6.result b/mysql-test/r/join_outer_jcl6.result index 6f3da3efdd7..80a52ad45a2 100644 --- a/mysql-test/r/join_outer_jcl6.result +++ b/mysql-test/r/join_outer_jcl6.result @@ -696,9 +696,9 @@ insert into t2 values (1,3), (2,3); insert into t3 values (2,4), (3,4); select * from t1 left join t2 on b1 = a1 left join t3 on c1 = a1 and b1 is null; a1 a2 b1 b2 c1 c2 +3 2 NULL NULL 3 4 1 2 1 3 NULL NULL 2 2 2 3 NULL NULL -3 2 NULL NULL 3 4 explain select * from t1 left join t2 on b1 = a1 left join t3 on c1 = a1 and b1 is null; id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE t1 ALL NULL NULL NULL NULL 3 diff --git a/mysql-test/r/key_cache.result b/mysql-test/r/key_cache.result index fad980c810c..8634beb290f 100644 --- a/mysql-test/r/key_cache.result +++ b/mysql-test/r/key_cache.result @@ -2,6 +2,7 @@ drop table if exists t1, t2, t3; SET @save_key_buffer_size=@@key_buffer_size; SET @save_key_cache_block_size=@@key_cache_block_size; SET @save_key_cache_segments=@@key_cache_segments; +SET @save_key_cache_file_hash_size=@@key_cache_file_hash_size; SELECT @@key_buffer_size, @@small.key_buffer_size; @@key_buffer_size @@small.key_buffer_size 2097152 131072 @@ -84,15 +85,15 @@ select @@key_buffer_size; select @@key_cache_block_size; @@key_cache_block_size 1024 +select @@key_cache_file_hash_size; +@@key_cache_file_hash_size +512 set global keycache1.key_buffer_size=1024*1024; create table t1 (p int primary key, a char(10)) delay_key_write=1; create table t2 (p int primary key, i int, a char(10), key k1(i), key k2(a)); -show status like 'key_blocks_used'; -Variable_name Value -Key_blocks_used 0 -show status like 'key_blocks_unused'; -Variable_name Value -Key_blocks_unused KEY_BLOCKS_UNUSED +select @org_key_blocks_unused-unused_blocks as key_blocks_unused, used_blocks as key_blocks_used from information_schema.key_caches where key_cache_name="default"; +key_blocks_unused key_blocks_used +0 0 insert into t1 values (1, 'qqqq'), (11, 'yyyy'); insert into t2 values (1, 1, 'qqqq'), (2, 1, 'pppp'), (3, 1, 'yyyy'), (4, 3, 'zzzz'); @@ -108,12 +109,9 @@ p i a 4 3 zzzz update t1 set p=2 where p=1; update t2 set i=2 where i=1; -show status like 'key_blocks_used'; -Variable_name Value -Key_blocks_used 4 -show status like 'key_blocks_unused'; -Variable_name Value -Key_blocks_unused KEY_BLOCKS_UNUSED +select @org_key_blocks_unused-unused_blocks as key_blocks_unused, used_blocks as key_blocks_used from information_schema.key_caches where key_cache_name="default"; +key_blocks_unused key_blocks_used +4 4 cache index t1 key (`primary`) in keycache1; Table Op Msg_type Msg_text test.t1 assign_to_keycache status OK @@ -270,12 +268,9 @@ Table Op Msg_type Msg_text test.t1 assign_to_keycache status OK test.t2 assign_to_keycache status OK drop table t1,t2,t3; -show status like 'key_blocks_used'; -Variable_name Value -Key_blocks_used 4 -show status like 'key_blocks_unused'; -Variable_name Value -Key_blocks_unused KEY_BLOCKS_UNUSED +select @org_key_blocks_unused-unused_blocks as key_blocks_unused, used_blocks as key_blocks_used from information_schema.key_caches where key_cache_name="default"; +key_blocks_unused key_blocks_used +0 4 create table t1 (a int primary key); cache index t1 in keycache2; Table Op Msg_type Msg_text @@ -558,6 +553,7 @@ KEY_CACHE_NAME SEGMENTS SEGMENT_NUMBER FULL_SIZE BLOCK_SIZE USED_BLOCKS UNUSED_B default 1 NULL 2097152 1024 4 # 0 0 0 0 0 small NULL NULL 1048576 1024 1 # 0 0 0 0 0 set global key_buffer_size=32*1024; +set global key_cache_file_hash_size=128; select @@key_buffer_size; @@key_buffer_size 32768 @@ -833,3 +829,4 @@ set global keycache1.key_buffer_size=0; set global keycache2.key_buffer_size=0; set global key_buffer_size=@save_key_buffer_size; set global key_cache_segments=@save_key_cache_segments; +set global key_cache_file_hash_size=@save_key_cache_file_hash_size; diff --git a/mysql-test/r/kill_processlist-6619.result b/mysql-test/r/kill_processlist-6619.result new file mode 100644 index 00000000000..588c8e6d139 --- /dev/null +++ b/mysql-test/r/kill_processlist-6619.result @@ -0,0 +1,14 @@ +connect con1,localhost,root,,; +SHOW PROCESSLIST; +Id User Host db Command Time State Info Progress +# root # test Sleep # # NULL 0.000 +# root # test Query # # SHOW PROCESSLIST 0.000 +connection default; +KILL QUERY con_id; +connection con1; +SHOW PROCESSLIST; +ERROR 70100: Query execution was interrupted +SHOW PROCESSLIST; +Id User Host db Command Time State Info Progress +# root # test Sleep # # NULL 0.000 +# root # test Query # # SHOW PROCESSLIST 0.000 diff --git a/mysql-test/r/lock_sync.result b/mysql-test/r/lock_sync.result index 8fe94679e70..219cc08342e 100644 --- a/mysql-test/r/lock_sync.result +++ b/mysql-test/r/lock_sync.result @@ -27,6 +27,7 @@ drop table if exists t0, t1, t2, t3, t4, t5; drop view if exists v1, v2; drop procedure if exists p1; drop procedure if exists p2; +drop procedure if exists p3; drop function if exists f1; drop function if exists f2; drop function if exists f3; @@ -42,6 +43,8 @@ drop function if exists f12; drop function if exists f13; drop function if exists f14; drop function if exists f15; +drop function if exists f16; +drop function if exists f17; create table t1 (i int primary key); insert into t1 values (1), (2), (3), (4), (5); create table t2 (j int primary key); @@ -146,6 +149,26 @@ declare k int; call p2(k); return k; end| +create function f16() returns int +begin +create temporary table if not exists temp1 (a int); +insert into temp1 select * from t1; +drop temporary table temp1; +return 1; +end| +create function f17() returns int +begin +declare j int; +select i from t1 where i = 1 into j; +call p3; +return 1; +end| +create procedure p3() +begin +create temporary table if not exists temp1 (a int); +insert into temp1 select * from t1; +drop temporary table temp1; +end| create trigger t4_bi before insert on t4 for each row begin declare k int; @@ -185,6 +208,7 @@ end| # once during its execution. show create procedure p1; show create procedure p2; +show create procedure p3; show create function f1; show create function f2; show create function f3; @@ -200,6 +224,8 @@ show create function f12; show create function f13; show create function f14; show create function f15; +show create function f16; +show create function f17; # Switch back to connection 'default'. # # 1. Statements that read tables and do not use subqueries. @@ -359,14 +385,11 @@ Success: 'update v2 set j= j-10 where j = 3' doesn't allow concurrent inserts in # 4.1 SELECT/SET with a stored function which does not # modify data and uses SELECT in its turn. # -# In theory there is no need to take strong locks on the table +# There is no need to take strong locks on the table # being selected from in SF as the call to such function -# won't get into the binary log. In practice, however, we -# discover that fact too late in the process to be able to -# affect the decision what locks should be taken. -# Hence, strong locks are taken in this case. -Success: 'select f1()' doesn't allow concurrent inserts into 't1'. -Success: 'set @a:= f1()' doesn't allow concurrent inserts into 't1'. +# won't get into the binary log. +Success: 'select f1()' allows concurrent inserts into 't1'. +Success: 'set @a:= f1()' allows concurrent inserts into 't1'. # # 4.2 INSERT (or other statement which modifies data) with # a stored function which does not modify data and uses @@ -392,14 +415,12 @@ Success: 'set @a:= f2()' doesn't allow concurrent inserts into 't1'. # modify data and reads a table through subselect # in a control construct. # -# Again, in theory a call to this function won't get to the -# binary log and thus no strong lock is needed. But in practice -# we don't detect this fact early enough (get_lock_type_for_table()) -# to avoid taking a strong lock. -Success: 'select f3()' doesn't allow concurrent inserts into 't1'. -Success: 'set @a:= f3()' doesn't allow concurrent inserts into 't1'. -Success: 'select f4()' doesn't allow concurrent inserts into 't1'. -Success: 'set @a:= f4()' doesn't allow concurrent inserts into 't1'. +# Call to this function won't get to the +# binary log and thus no strong lock is needed. +Success: 'select f3()' allows concurrent inserts into 't1'. +Success: 'set @a:= f3()' allows concurrent inserts into 't1'. +Success: 'select f4()' allows concurrent inserts into 't1'. +Success: 'set @a:= f4()' allows concurrent inserts into 't1'. # # 4.5. INSERT (or other statement which modifies data) with # a stored function which does not modify data and reads @@ -426,14 +447,13 @@ Success: 'set @a:= f5()' doesn't allow concurrent inserts into 't1'. # doesn't modify data and reads tables through # a view. # -# Once again, in theory, calls to such functions won't -# get into the binary log and thus don't need strong -# locks. But in practice this fact is discovered -# too late to have any effect. -Success: 'select f6()' doesn't allow concurrent inserts into 't1'. -Success: 'set @a:= f6()' doesn't allow concurrent inserts into 't1'. -Success: 'select f7()' doesn't allow concurrent inserts into 't1'. -Success: 'set @a:= f7()' doesn't allow concurrent inserts into 't1'. +# Calls to such functions won't get into +# the binary log and thus don't need strong +# locks. +Success: 'select f6()' allows concurrent inserts into 't1'. +Success: 'set @a:= f6()' allows concurrent inserts into 't1'. +Success: 'select f7()' allows concurrent inserts into 't1'. +Success: 'set @a:= f7()' allows concurrent inserts into 't1'. # # 4.8 INSERT which uses stored function which # doesn't modify data and reads a table @@ -459,10 +479,9 @@ Success: 'select f9()' doesn't allow concurrent inserts into 't1'. # data and reads a table indirectly, by calling another # function. # -# In theory, calls to such functions won't get into the binary -# log and thus don't need to acquire strong locks. But in practice -# this fact is discovered too late to have any effect. -Success: 'select f10()' doesn't allow concurrent inserts into 't1'. +# Calls to such functions won't get into the binary +# log and thus don't need to acquire strong locks. +Success: 'select f10()' allows concurrent inserts into 't1'. # # 4.11 INSERT which uses a stored function which doesn't modify # data and reads a table indirectly, by calling another @@ -501,6 +520,26 @@ Success: 'select f12((select i+10 from t1 where i=1))' allows concurrent inserts # uses. Therefore it should take strong locks on the data it reads. Success: 'insert into t2 values (f13((select i+10 from t1 where i=1)))' doesn't allow concurrent inserts into 't1'. # +# 4.15 SELECT/SET with a stored function which +# inserts data into a temporary table using +# SELECT on t1. +# +# Since this statement is written to the binary log it should +# be serialized with concurrent statements affecting the data it +# uses. Therefore it should take strong locks on the data it reads. +Success: 'select f16()' doesn't allow concurrent inserts into 't1'. +Success: 'set @a:= f16()' doesn't allow concurrent inserts into 't1'. +# +# 4.16 SELECT/SET with a stored function which call procedure +# which inserts data into a temporary table using +# SELECT on t1. +# +# Since this statement is written to the binary log it should +# be serialized with concurrent statements affecting the data it +# uses. Therefore it should take strong locks on the data it reads. +Success: 'select f17()' doesn't allow concurrent inserts into 't1'. +Success: 'set @a:= f17()' doesn't allow concurrent inserts into 't1'. +# # 5. Statements that read tables through stored procedures. # # @@ -522,10 +561,9 @@ Success: 'select f14()' doesn't allow concurrent inserts into 't1'. # 5.3 SELECT that calls a function that doesn't modify data and # uses a CALL statement that reads a table via SELECT. # -# In theory, calls to such functions won't get into the binary -# log and thus don't need to acquire strong locks. But in practice -# this fact is discovered too late to have any effect. -Success: 'select f15()' doesn't allow concurrent inserts into 't1'. +# Calls to such functions won't get into the binary +# log and thus don't need to acquire strong locks. +Success: 'select f15()' allows concurrent inserts into 't1'. # # 5.4 INSERT which calls function which doesn't modify data and # uses CALL statement which reads table through SELECT. @@ -585,9 +623,12 @@ drop function f12; drop function f13; drop function f14; drop function f15; +drop function f16; +drop function f17; drop view v1, v2; drop procedure p1; drop procedure p2; +drop procedure p3; drop table t1, t2, t3, t4, t5; set @@global.concurrent_insert= @old_concurrent_insert; # diff --git a/mysql-test/r/log_tables_upgrade.result b/mysql-test/r/log_tables_upgrade.result index 6f7bd64eba3..9900f6d6b5a 100644 --- a/mysql-test/r/log_tables_upgrade.result +++ b/mysql-test/r/log_tables_upgrade.result @@ -11,13 +11,8 @@ Table Op Msg_type Msg_text test.bug49823 repair status OK RENAME TABLE general_log TO renamed_general_log; RENAME TABLE test.bug49823 TO general_log; -Phase 1/3: Fixing table and database names -Phase 2/3: Checking and upgrading tables +Phase 1/4: Checking mysql database Processing databases -information_schema -mtr -mtr.global_suppressions OK -mtr.test_suppressions OK mysql mysql.column_stats OK mysql.columns_priv OK @@ -48,9 +43,16 @@ mysql.time_zone_name OK mysql.time_zone_transition OK mysql.time_zone_transition_type OK mysql.user OK +Phase 2/4: Running 'mysql_fix_privilege_tables'... +Phase 3/4: Fixing table and database names +Phase 4/4: Checking and upgrading tables +Processing databases +information_schema +mtr +mtr.global_suppressions OK +mtr.test_suppressions OK performance_schema test -Phase 3/3: Running 'mysql_fix_privilege_tables'... OK DROP TABLE general_log; RENAME TABLE renamed_general_log TO general_log; diff --git a/mysql-test/r/max_statement_time.result b/mysql-test/r/max_statement_time.result index 2681575daea..38334c59baa 100644 --- a/mysql-test/r/max_statement_time.result +++ b/mysql-test/r/max_statement_time.result @@ -112,7 +112,7 @@ UPDATE t1 SET a = 2; ERROR 70100: Query execution was interrupted (max_statement_time exceeded) SHOW WARNINGS; Level Code Message -Error 1967 Query execution was interrupted (max_statement_time exceeded) +Error 1968 Query execution was interrupted (max_statement_time exceeded) ROLLBACK; DROP TABLE t1; diff --git a/mysql-test/r/myisam_explain_non_select_all.result b/mysql-test/r/myisam_explain_non_select_all.result index a9eeee8548c..688c1ccfec1 100644 --- a/mysql-test/r/myisam_explain_non_select_all.result +++ b/mysql-test/r/myisam_explain_non_select_all.result @@ -755,6 +755,7 @@ Variable_name Value # Status of "equivalent" SELECT query execution: Variable_name Value Handler_read_rnd_next 11 +Sort_priority_queue_sorts 1 Sort_rows 1 Sort_scan 1 # Status of testing query execution: @@ -1231,6 +1232,7 @@ Variable_name Value # Status of "equivalent" SELECT query execution: Variable_name Value Handler_read_rnd_next 27 +Sort_priority_queue_sorts 1 Sort_rows 5 Sort_scan 1 # Status of testing query execution: @@ -1275,6 +1277,7 @@ Variable_name Value # Status of "equivalent" SELECT query execution: Variable_name Value Handler_read_rnd_next 27 +Sort_priority_queue_sorts 1 Sort_rows 1 Sort_scan 1 # Status of testing query execution: @@ -1361,6 +1364,7 @@ Variable_name Value # Status of "equivalent" SELECT query execution: Variable_name Value Handler_read_rnd_next 27 +Sort_priority_queue_sorts 1 Sort_rows 1 Sort_scan 1 # Status of testing query execution: @@ -1407,6 +1411,7 @@ Variable_name Value Variable_name Value Handler_read_rnd 1 Handler_read_rnd_next 27 +Sort_priority_queue_sorts 1 Sort_rows 1 Sort_scan 1 # Status of testing query execution: @@ -1540,6 +1545,7 @@ Variable_name Value # Status of "equivalent" SELECT query execution: Variable_name Value Handler_read_rnd_next 27 +Sort_priority_queue_sorts 1 Sort_rows 5 Sort_scan 1 # Status of testing query execution: @@ -1668,6 +1674,7 @@ Variable_name Value # Status of "equivalent" SELECT query execution: Variable_name Value Handler_read_rnd_next 27 +Sort_priority_queue_sorts 1 Sort_rows 5 Sort_scan 1 # Status of testing query execution: @@ -1675,6 +1682,7 @@ Variable_name Value Handler_read_rnd 5 Handler_read_rnd_next 27 Handler_update 5 +Sort_priority_queue_sorts 1 Sort_rows 5 Sort_scan 1 @@ -1712,6 +1720,7 @@ Variable_name Value # Status of "equivalent" SELECT query execution: Variable_name Value Handler_read_rnd_next 27 +Sort_priority_queue_sorts 1 Sort_rows 1 Sort_scan 1 # Status of testing query execution: @@ -1719,6 +1728,7 @@ Variable_name Value Handler_read_rnd 1 Handler_read_rnd_next 27 Handler_update 1 +Sort_priority_queue_sorts 1 Sort_rows 1 Sort_scan 1 @@ -1799,12 +1809,14 @@ Variable_name Value # Status of "equivalent" SELECT query execution: Variable_name Value Handler_read_rnd_next 27 +Sort_priority_queue_sorts 1 Sort_rows 1 Sort_scan 1 # Status of testing query execution: Variable_name Value Handler_read_rnd 1 Handler_read_rnd_next 27 +Sort_priority_queue_sorts 1 Sort_rows 1 Sort_scan 1 @@ -1844,12 +1856,14 @@ Variable_name Value Variable_name Value Handler_read_rnd 1 Handler_read_rnd_next 27 +Sort_priority_queue_sorts 1 Sort_rows 1 Sort_scan 1 # Status of testing query execution: Variable_name Value Handler_read_rnd 1 Handler_read_rnd_next 27 +Sort_priority_queue_sorts 1 Sort_rows 1 Sort_scan 1 @@ -1977,6 +1991,7 @@ Variable_name Value # Status of "equivalent" SELECT query execution: Variable_name Value Handler_read_rnd_next 27 +Sort_priority_queue_sorts 1 Sort_rows 5 Sort_scan 1 # Status of testing query execution: @@ -1984,6 +1999,7 @@ Variable_name Value Handler_read_rnd 5 Handler_read_rnd_next 27 Handler_update 4 +Sort_priority_queue_sorts 1 Sort_rows 5 Sort_scan 1 @@ -2067,6 +2083,7 @@ Variable_name Value Variable_name Value Handler_read_key 1 Handler_read_next 2 +Sort_priority_queue_sorts 1 Sort_range 1 Sort_rows 2 # Status of testing query execution: @@ -2075,6 +2092,7 @@ Handler_read_key 1 Handler_read_next 2 Handler_read_rnd 2 Handler_update 2 +Sort_priority_queue_sorts 1 Sort_range 1 Sort_rows 2 @@ -2105,6 +2123,7 @@ Variable_name Value Variable_name Value Handler_read_key 1 Handler_read_next 2 +Sort_priority_queue_sorts 1 Sort_range 1 Sort_rows 2 # Status of testing query execution: @@ -2625,6 +2644,7 @@ Variable_name Value Variable_name Value Handler_read_key 3 Handler_read_rnd_next 10 +Sort_priority_queue_sorts 1 Sort_rows 3 Sort_scan 1 # Status of testing query execution: @@ -2632,6 +2652,7 @@ Variable_name Value Handler_read_key 3 Handler_read_rnd_next 8 Handler_update 1 +Sort_priority_queue_sorts 1 Sort_rows 3 Sort_scan 1 @@ -2674,12 +2695,14 @@ Variable_name Value Variable_name Value Handler_read_key 3 Handler_read_rnd_next 10 +Sort_priority_queue_sorts 1 Sort_rows 3 Sort_scan 1 # Status of testing query execution: Variable_name Value Handler_read_key 3 Handler_read_rnd_next 10 +Sort_priority_queue_sorts 1 Sort_rows 3 Sort_scan 1 @@ -2724,12 +2747,14 @@ Variable_name Value Variable_name Value Handler_read_key 3 Handler_read_rnd_next 10 +Sort_priority_queue_sorts 1 Sort_rows 3 Sort_scan 1 # Status of testing query execution: Variable_name Value Handler_read_key 3 Handler_read_rnd_next 10 +Sort_priority_queue_sorts 1 Sort_rows 3 Sort_scan 1 diff --git a/mysql-test/r/mysql_client_test_comp.result b/mysql-test/r/mysql_client_test_comp.result new file mode 100644 index 00000000000..b9cac467b92 --- /dev/null +++ b/mysql-test/r/mysql_client_test_comp.result @@ -0,0 +1,4 @@ +SET @old_slow_query_log= @@global.slow_query_log; +call mtr.add_suppression(" Error reading file './client_test_db/test_frm_bug.frm'"); +ok +SET @@global.slow_query_log= @old_slow_query_log; diff --git a/mysql-test/r/mysql_upgrade.result b/mysql-test/r/mysql_upgrade.result index 08eafcc1d5e..1cc448b3ba2 100644 --- a/mysql-test/r/mysql_upgrade.result +++ b/mysql-test/r/mysql_upgrade.result @@ -1,11 +1,6 @@ Run mysql_upgrade once -Phase 1/3: Fixing table and database names -Phase 2/3: Checking and upgrading tables +Phase 1/4: Checking mysql database Processing databases -information_schema -mtr -mtr.global_suppressions OK -mtr.test_suppressions OK mysql mysql.column_stats OK mysql.columns_priv OK @@ -35,20 +30,22 @@ mysql.time_zone_name OK mysql.time_zone_transition OK mysql.time_zone_transition_type OK mysql.user OK +Phase 2/4: Running 'mysql_fix_privilege_tables'... +Phase 3/4: Fixing table and database names +Phase 4/4: Checking and upgrading tables +Processing databases +information_schema +mtr +mtr.global_suppressions OK +mtr.test_suppressions OK performance_schema test -Phase 3/3: Running 'mysql_fix_privilege_tables'... OK Run it again - should say already completed This installation of MySQL is already upgraded to VERSION, use --force if you still need to run mysql_upgrade Force should run it regardless of wether it's been run before -Phase 1/3: Fixing table and database names -Phase 2/3: Checking and upgrading tables +Phase 1/4: Checking mysql database Processing databases -information_schema -mtr -mtr.global_suppressions OK -mtr.test_suppressions OK mysql mysql.column_stats OK mysql.columns_priv OK @@ -78,20 +75,22 @@ mysql.time_zone_name OK mysql.time_zone_transition OK mysql.time_zone_transition_type OK mysql.user OK +Phase 2/4: Running 'mysql_fix_privilege_tables'... +Phase 3/4: Fixing table and database names +Phase 4/4: Checking and upgrading tables +Processing databases +information_schema +mtr +mtr.global_suppressions OK +mtr.test_suppressions OK performance_schema test -Phase 3/3: Running 'mysql_fix_privilege_tables'... OK CREATE USER mysqltest1@'%' IDENTIFIED by 'sakila'; GRANT ALL ON *.* TO mysqltest1@'%'; Run mysql_upgrade with password protected account -Phase 1/3: Fixing table and database names -Phase 2/3: Checking and upgrading tables +Phase 1/4: Checking mysql database Processing databases -information_schema -mtr -mtr.global_suppressions OK -mtr.test_suppressions OK mysql mysql.column_stats OK mysql.columns_priv OK @@ -121,9 +120,16 @@ mysql.time_zone_name OK mysql.time_zone_transition OK mysql.time_zone_transition_type OK mysql.user OK +Phase 2/4: Running 'mysql_fix_privilege_tables'... +Phase 3/4: Fixing table and database names +Phase 4/4: Checking and upgrading tables +Processing databases +information_schema +mtr +mtr.global_suppressions OK +mtr.test_suppressions OK performance_schema test -Phase 3/3: Running 'mysql_fix_privilege_tables'... OK DROP USER mysqltest1@'%'; Version check failed. Got the following error when calling the 'mysql' command line client @@ -133,13 +139,8 @@ Run mysql_upgrade with a non existing server socket mysqlcheck: Got error: 2005: Unknown MySQL server host 'not_existing_host' (errno) when trying to connect FATAL ERROR: Upgrade failed set GLOBAL sql_mode='STRICT_ALL_TABLES,ANSI_QUOTES,NO_ZERO_DATE'; -Phase 1/3: Fixing table and database names -Phase 2/3: Checking and upgrading tables +Phase 1/4: Checking mysql database Processing databases -information_schema -mtr -mtr.global_suppressions OK -mtr.test_suppressions OK mysql mysql.column_stats OK mysql.columns_priv OK @@ -169,9 +170,16 @@ mysql.time_zone_name OK mysql.time_zone_transition OK mysql.time_zone_transition_type OK mysql.user OK +Phase 2/4: Running 'mysql_fix_privilege_tables'... +Phase 3/4: Fixing table and database names +Phase 4/4: Checking and upgrading tables +Processing databases +information_schema +mtr +mtr.global_suppressions OK +mtr.test_suppressions OK performance_schema test -Phase 3/3: Running 'mysql_fix_privilege_tables'... OK set GLOBAL sql_mode=default; # @@ -182,13 +190,8 @@ CREATE PROCEDURE testproc() BEGIN END; UPDATE mysql.proc SET character_set_client = NULL WHERE name LIKE 'testproc'; UPDATE mysql.proc SET collation_connection = NULL WHERE name LIKE 'testproc'; UPDATE mysql.proc SET db_collation = NULL WHERE name LIKE 'testproc'; -Phase 1/3: Fixing table and database names -Phase 2/3: Checking and upgrading tables +Phase 1/4: Checking mysql database Processing databases -information_schema -mtr -mtr.global_suppressions OK -mtr.test_suppressions OK mysql mysql.column_stats OK mysql.columns_priv OK @@ -218,9 +221,16 @@ mysql.time_zone_name OK mysql.time_zone_transition OK mysql.time_zone_transition_type OK mysql.user OK +Phase 2/4: Running 'mysql_fix_privilege_tables'... +Phase 3/4: Fixing table and database names +Phase 4/4: Checking and upgrading tables +Processing databases +information_schema +mtr +mtr.global_suppressions OK +mtr.test_suppressions OK performance_schema test -Phase 3/3: Running 'mysql_fix_privilege_tables'... OK CALL testproc(); DROP PROCEDURE testproc; @@ -234,13 +244,8 @@ WARNING: NULL values of the 'db_collation' column ('mysql.proc' table) have been GRANT USAGE ON *.* TO 'user3'@'%'; GRANT ALL PRIVILEGES ON `roelt`.`test2` TO 'user3'@'%'; Run mysql_upgrade with all privileges on a user -Phase 1/3: Fixing table and database names -Phase 2/3: Checking and upgrading tables +Phase 1/4: Checking mysql database Processing databases -information_schema -mtr -mtr.global_suppressions OK -mtr.test_suppressions OK mysql mysql.column_stats OK mysql.columns_priv OK @@ -270,9 +275,16 @@ mysql.time_zone_name OK mysql.time_zone_transition OK mysql.time_zone_transition_type OK mysql.user OK +Phase 2/4: Running 'mysql_fix_privilege_tables'... +Phase 3/4: Fixing table and database names +Phase 4/4: Checking and upgrading tables +Processing databases +information_schema +mtr +mtr.global_suppressions OK +mtr.test_suppressions OK performance_schema test -Phase 3/3: Running 'mysql_fix_privilege_tables'... OK SHOW GRANTS FOR 'user3'@'%'; Grants for user3@% @@ -280,8 +292,39 @@ GRANT USAGE ON *.* TO 'user3'@'%' GRANT ALL PRIVILEGES ON `roelt`.`test2` TO 'user3'@'%' DROP USER 'user3'@'%'; End of 5.1 tests -The --upgrade-system-tables option was used, databases won't be touched. -Phase 3/3: Running 'mysql_fix_privilege_tables'... +The --upgrade-system-tables option was used, user tables won't be touched. +Phase 1/4: Checking mysql database +Processing databases +mysql +mysql.column_stats OK +mysql.columns_priv OK +mysql.db OK +mysql.event OK +mysql.func OK +mysql.gtid_slave_pos OK +mysql.help_category OK +mysql.help_keyword OK +mysql.help_relation OK +mysql.help_topic OK +mysql.host OK +mysql.index_stats OK +mysql.innodb_index_stats OK +mysql.innodb_table_stats OK +mysql.plugin OK +mysql.proc OK +mysql.procs_priv OK +mysql.proxies_priv OK +mysql.roles_mapping OK +mysql.servers OK +mysql.table_stats OK +mysql.tables_priv OK +mysql.time_zone OK +mysql.time_zone_leap_second OK +mysql.time_zone_name OK +mysql.time_zone_transition OK +mysql.time_zone_transition_type OK +mysql.user OK +Phase 2/4: Running 'mysql_fix_privilege_tables'... OK # # Bug#11827359 60223: MYSQL_UPGRADE PROBLEM WITH OPTION @@ -289,13 +332,8 @@ OK # # Droping the previously created mysql_upgrade_info file.. # Running mysql_upgrade with --skip-write-binlog.. -Phase 1/3: Fixing table and database names -Phase 2/3: Checking and upgrading tables +Phase 1/4: Checking mysql database Processing databases -information_schema -mtr -mtr.global_suppressions OK -mtr.test_suppressions OK mysql mysql.column_stats OK mysql.columns_priv OK @@ -325,9 +363,16 @@ mysql.time_zone_name OK mysql.time_zone_transition OK mysql.time_zone_transition_type OK mysql.user OK +Phase 2/4: Running 'mysql_fix_privilege_tables'... +Phase 3/4: Fixing table and database names +Phase 4/4: Checking and upgrading tables +Processing databases +information_schema +mtr +mtr.global_suppressions OK +mtr.test_suppressions OK performance_schema test -Phase 3/3: Running 'mysql_fix_privilege_tables'... OK # # MDEV-4332 Increase username length from 16 characters @@ -341,13 +386,8 @@ GRANT INSERT ON mysql.user TO very_long_user_name_number_2; GRANT UPDATE (User) ON mysql.db TO very_long_user_name_number_1; GRANT UPDATE (User) ON mysql.db TO very_long_user_name_number_2; CREATE PROCEDURE test.pr() BEGIN END; -Phase 1/3: Fixing table and database names -Phase 2/3: Checking and upgrading tables +Phase 1/4: Checking mysql database Processing databases -information_schema -mtr -mtr.global_suppressions OK -mtr.test_suppressions OK mysql mysql.column_stats OK mysql.columns_priv OK @@ -377,9 +417,16 @@ mysql.time_zone_name OK mysql.time_zone_transition OK mysql.time_zone_transition_type OK mysql.user OK +Phase 2/4: Running 'mysql_fix_privilege_tables'... +Phase 3/4: Fixing table and database names +Phase 4/4: Checking and upgrading tables +Processing databases +information_schema +mtr +mtr.global_suppressions OK +mtr.test_suppressions OK performance_schema test -Phase 3/3: Running 'mysql_fix_privilege_tables'... OK SELECT definer FROM mysql.proc WHERE db = 'test' AND name = 'pr'; definer diff --git a/mysql-test/r/mysql_upgrade_no_innodb.result b/mysql-test/r/mysql_upgrade_no_innodb.result index ad6f4617657..320dcfe5d3b 100644 --- a/mysql-test/r/mysql_upgrade_no_innodb.result +++ b/mysql-test/r/mysql_upgrade_no_innodb.result @@ -1,3 +1,46 @@ -The --upgrade-system-tables option was used, databases won't be touched. -Phase 3/3: Running 'mysql_fix_privilege_tables'... +The --upgrade-system-tables option was used, user tables won't be touched. +Phase 1/4: Checking mysql database +Processing databases +mysql +mysql.column_stats OK +mysql.columns_priv OK +mysql.db OK +mysql.event OK +mysql.func OK +mysql.gtid_slave_pos OK +mysql.help_category OK +mysql.help_keyword OK +mysql.help_relation OK +mysql.help_topic OK +mysql.host OK +mysql.index_stats OK +mysql.innodb_index_stats +Error : Unknown storage engine 'InnoDB' +error : Corrupt +mysql.innodb_table_stats +Error : Unknown storage engine 'InnoDB' +error : Corrupt +mysql.plugin OK +mysql.proc OK +mysql.procs_priv OK +mysql.proxies_priv OK +mysql.roles_mapping OK +mysql.servers OK +mysql.table_stats OK +mysql.tables_priv OK +mysql.time_zone OK +mysql.time_zone_leap_second OK +mysql.time_zone_name OK +mysql.time_zone_transition OK +mysql.time_zone_transition_type OK +mysql.user OK + +Repairing tables +mysql.innodb_index_stats +Error : Unknown storage engine 'InnoDB' +error : Corrupt +mysql.innodb_table_stats +Error : Unknown storage engine 'InnoDB' +error : Corrupt +Phase 2/4: Running 'mysql_fix_privilege_tables'... OK diff --git a/mysql-test/r/mysql_upgrade_ssl.result b/mysql-test/r/mysql_upgrade_ssl.result index 87b7d51e8b7..14671aa6409 100644 --- a/mysql-test/r/mysql_upgrade_ssl.result +++ b/mysql-test/r/mysql_upgrade_ssl.result @@ -1,13 +1,8 @@ # # Bug#55672 mysql_upgrade dies with internal error # -Phase 1/3: Fixing table and database names -Phase 2/3: Checking and upgrading tables +Phase 1/4: Checking mysql database Processing databases -information_schema -mtr -mtr.global_suppressions OK -mtr.test_suppressions OK mysql mysql.column_stats OK mysql.columns_priv OK @@ -37,7 +32,14 @@ mysql.time_zone_name OK mysql.time_zone_transition OK mysql.time_zone_transition_type OK mysql.user OK +Phase 2/4: Running 'mysql_fix_privilege_tables'... +Phase 3/4: Fixing table and database names +Phase 4/4: Checking and upgrading tables +Processing databases +information_schema +mtr +mtr.global_suppressions OK +mtr.test_suppressions OK performance_schema test -Phase 3/3: Running 'mysql_fix_privilege_tables'... OK diff --git a/mysql-test/r/mysqld--help.result b/mysql-test/r/mysqld--help.result index 0d7e2240769..d7ecd0fda5a 100644 --- a/mysql-test/r/mysqld--help.result +++ b/mysql-test/r/mysqld--help.result @@ -279,6 +279,11 @@ The following options may be given as the first argument: The default size of key cache blocks --key-cache-division-limit=# The minimum percentage of warm blocks in key cache + --key-cache-file-hash-size=# + Number of hash buckets for open and changed files. If + you have a lot of MyISAM files open you should increase + this for faster flush of changes. A good value is + probably 1/10 of number of possible open MyISAM files. --key-cache-segments=# The number of segments in a key cache -L, --language=name Client error messages in given language. May be given as @@ -1031,8 +1036,8 @@ The following options may be given as the first argument: created to handle remaining clients. --thread-stack=# The stack size for each thread --time-format=name The TIME format (ignored) - --timed-mutexes Specify whether to time mutexes (only InnoDB mutexes are - currently supported) + --timed-mutexes Specify whether to time mutexes. Deprecated, has no + effect. --tmp-table-size=# If an internal in-memory temporary table exceeds this size, MySQL will automatically convert it to an on-disk MyISAM or Aria table @@ -1154,6 +1159,7 @@ key-buffer-size 134217728 key-cache-age-threshold 300 key-cache-block-size 1024 key-cache-division-limit 100 +key-cache-file-hash-size 512 key-cache-segments 0 large-pages FALSE lc-messages en_US diff --git a/mysql-test/r/mysqltest.result b/mysql-test/r/mysqltest.result index 7228daa2712..e258b1d156f 100644 --- a/mysql-test/r/mysqltest.result +++ b/mysql-test/r/mysqltest.result @@ -680,6 +680,9 @@ txt b is b and more is more txt a is a and less is more +sflfdt 'ABCDfF bbddff h' bs txt; +txt +ABCDfF bbddff h create table t2 ( a char(10)); garbage; ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'garbage' at line 1 diff --git a/mysql-test/r/order_by_innodb.result b/mysql-test/r/order_by_innodb.result new file mode 100644 index 00000000000..3c6c4053741 --- /dev/null +++ b/mysql-test/r/order_by_innodb.result @@ -0,0 +1,13 @@ +drop table if exists t0,t1,t2,t3; +# +# MDEV-6434: Wrong result (extra rows) with ORDER BY, multiple-column index, InnoDB +# +CREATE TABLE t1 (a INT, b INT, c INT, d TEXT, KEY idx(a,b,c)) ENGINE=InnoDB; +INSERT INTO t1 (a,c) VALUES +(8, 9),(8, 10),(13, 15),(16, 17),(16, 18),(16, 19),(20, 21), +(20, 22),(20, 24),(20, 25),(20, 26),(20, 27),(20, 28); +SELECT * FROM t1 WHERE a = 8 AND (b = 1 OR b IS NULL) ORDER BY c; +a b c d +8 NULL 9 NULL +8 NULL 10 NULL +DROP TABLE t1; diff --git a/mysql-test/r/order_by_sortkey.result b/mysql-test/r/order_by_sortkey.result index 717780f0af2..c1d9609eb47 100644 --- a/mysql-test/r/order_by_sortkey.result +++ b/mysql-test/r/order_by_sortkey.result @@ -45,6 +45,7 @@ FLUSH STATUS; SHOW SESSION STATUS LIKE 'Sort%'; Variable_name Value Sort_merge_passes 0 +Sort_priority_queue_sorts 0 Sort_range 0 Sort_rows 0 Sort_scan 0 @@ -153,6 +154,7 @@ f0 f1 f2 SHOW SESSION STATUS LIKE 'Sort%'; Variable_name Value Sort_merge_passes 0 +Sort_priority_queue_sorts 1 Sort_range 0 Sort_rows 100 Sort_scan 1 diff --git a/mysql-test/r/partition.result b/mysql-test/r/partition.result index b8011656415..233494238a5 100644 --- a/mysql-test/r/partition.result +++ b/mysql-test/r/partition.result @@ -2562,6 +2562,50 @@ id id2 dob address city hours_worked_per_week weeks_worked_last_year 16 16 1949-11-07 address16 city16 40 52 50 50 1923-09-08 address50 city50 40 52 drop table t1; +# +# MDEV-6322: The PARTITION engine can return wrong query results +# +CREATE TABLE t1 ( +CustomerID varchar(5) DEFAULT NULL, +CompanyName varchar(40) DEFAULT NULL, +ContactName varchar(30) DEFAULT NULL, +ContactTitle varchar(30) DEFAULT NULL, +Address varchar(60) DEFAULT NULL, +City varchar(15) DEFAULT NULL, +Region varchar(15) DEFAULT NULL, +PostalCode varchar(10) DEFAULT NULL, +Country varchar(15) NOT NULL, +Phone varchar(24) DEFAULT NULL, +Fax varchar(24) DEFAULT NULL +) ENGINE=MyISAM DEFAULT CHARSET=latin1 +PARTITION BY LIST COLUMNS(Country) +(PARTITION p1 VALUES IN ('Germany','Austria','Switzerland','Poland'), +PARTITION p2 VALUES IN ('USA','Canada','Mexico'), +PARTITION p3 VALUES IN ('Spain','Portugal','Italy'), +PARTITION p4 VALUES IN ('UK','Ireland'), +PARTITION p5 VALUES IN ('France','Belgium'), +PARTITION p6 VALUES IN ('Sweden','Finland','Denmark','Norway'), +PARTITION p7 VALUES IN ('Venezuela','Argentina','Brazil') +); +INSERT INTO t1 (CustomerID, City, Country) VALUES +('ANATR','México D.F','Mexico'), +('ANTON','México D.F','Mexico'), +('BOTTM','Tsawassen','Canada'), +('CENTC','México D.F','Mexico'), +('GREAL','Eugene','USA'), +('HUNGC','Elgin','USA'), +('LAUGB','Vancouver','Canada'), +('LAZYK','Walla Walla','USA'), +('LETSS','San Francisco','USA'), +('LONEP','Portland','USA'); +SELECT * FROM t1 WHERE Country = 'USA'; +CustomerID CompanyName ContactName ContactTitle Address City Region PostalCode Country Phone Fax +GREAL NULL NULL NULL NULL Eugene NULL NULL USA NULL NULL +HUNGC NULL NULL NULL NULL Elgin NULL NULL USA NULL NULL +LAZYK NULL NULL NULL NULL Walla Walla NULL NULL USA NULL NULL +LETSS NULL NULL NULL NULL San Francisco NULL NULL USA NULL NULL +LONEP NULL NULL NULL NULL Portland NULL NULL USA NULL NULL +DROP TABLE t1; CREATE TABLE t1 ( d DATE NOT NULL) PARTITION BY RANGE( YEAR(d) ) ( PARTITION p0 VALUES LESS THAN (1960), diff --git a/mysql-test/r/partition_innodb.result b/mysql-test/r/partition_innodb.result index 82ccbe9c6a8..92c9c01db2d 100644 --- a/mysql-test/r/partition_innodb.result +++ b/mysql-test/r/partition_innodb.result @@ -694,6 +694,34 @@ count(*) drop table t3; drop table t1,t2; # +# MySQL Bug#71095: Wrong results with PARTITION BY LIST COLUMNS() +# +create table t1(c1 int, c2 int, c3 int, c4 int, +primary key(c1,c2)) engine=InnoDB +partition by list columns(c2) +(partition p1 values in (1,2) engine=InnoDB, +partition p2 values in (3,4) engine=InnoDB); +insert into t1 values (1,1,1,1),(2,3,1,1); +select * from t1 where c1=2 and c2=3; +c1 c2 c3 c4 +2 3 1 1 +drop table t1; +# +# MySQL Bug#72803: Wrong "Impossible where" with LIST partitioning +# also MDEV-6240: Wrong "Impossible where" with LIST partitioning +# +CREATE TABLE t1 ( d DATE) ENGINE = InnoDB +PARTITION BY LIST COLUMNS (d) +( +PARTITION p0 VALUES IN ('1990-01-01','1991-01-01'), +PARTITION p1 VALUES IN ('1981-01-01') +); +INSERT INTO t1 (d) VALUES ('1991-01-01'); +SELECT * FROM t1 WHERE d = '1991-01-01'; +d +1991-01-01 +DROP TABLE t1; +# # MDEV-5963: InnoDB: Assertion failure in file row0sel.cc line 2503, # Failing assertion: 0 with "key ptr now exceeds key end by 762 bytes" # (independent testcase for Oracle Bug#13947868) diff --git a/mysql-test/r/partition_pruning.result b/mysql-test/r/partition_pruning.result index 0a4cf9932c0..e52c2c7d886 100644 --- a/mysql-test/r/partition_pruning.result +++ b/mysql-test/r/partition_pruning.result @@ -3302,6 +3302,120 @@ id select_type table partitions type possible_keys key key_len ref rows Extra 1 SIMPLE t1 p0,p1,p2 ALL NULL NULL NULL NULL 100 Using where drop table t0, t1; # +# Bug#71095: Wrong results with PARTITION BY LIST COLUMNS() +# +CREATE TABLE t1 +(c1 int, +c2 int, +c3 int, +c4 int, +PRIMARY KEY (c1,c2)) +PARTITION BY LIST COLUMNS (c2) +(PARTITION p1 VALUES IN (1,2), +PARTITION p2 VALUES IN (3,4)); +INSERT INTO t1 VALUES (1, 1, 1, 1), (2, 3, 1, 1); +INSERT INTO t1 VALUES (1, 2, 1, 1), (2, 4, 1, 1); +SELECT * FROM t1 WHERE c1 = 1 AND c2 < 1; +c1 c2 c3 c4 +SELECT * FROM t1 WHERE c1 = 1 AND c2 <= 1; +c1 c2 c3 c4 +1 1 1 1 +SELECT * FROM t1 WHERE c1 = 1 AND c2 = 1; +c1 c2 c3 c4 +1 1 1 1 +SELECT * FROM t1 WHERE c1 = 1 AND c2 >= 1; +c1 c2 c3 c4 +1 1 1 1 +1 2 1 1 +SELECT * FROM t1 WHERE c1 = 1 AND c2 > 1; +c1 c2 c3 c4 +1 2 1 1 +SELECT * FROM t1 WHERE c1 = 1 AND c2 < 3; +c1 c2 c3 c4 +1 1 1 1 +1 2 1 1 +SELECT * FROM t1 WHERE c1 = 1 AND c2 <= 3; +c1 c2 c3 c4 +1 1 1 1 +1 2 1 1 +SELECT * FROM t1 WHERE c1 = 2 AND c2 <= 3; +c1 c2 c3 c4 +2 3 1 1 +SELECT * FROM t1 WHERE c1 = 2 AND c2 = 3; +c1 c2 c3 c4 +2 3 1 1 +SELECT * FROM t1 WHERE c1 = 2 AND c2 >= 3; +c1 c2 c3 c4 +2 3 1 1 +2 4 1 1 +SELECT * FROM t1 WHERE c1 = 2 AND c2 > 3; +c1 c2 c3 c4 +2 4 1 1 +SELECT * FROM t1 WHERE c1 = 2 AND c2 < 4; +c1 c2 c3 c4 +2 3 1 1 +SELECT * FROM t1 WHERE c1 = 2 AND c2 <= 4; +c1 c2 c3 c4 +2 3 1 1 +2 4 1 1 +SELECT * FROM t1 WHERE c1 = 2 AND c2 = 4; +c1 c2 c3 c4 +2 4 1 1 +SELECT * FROM t1 WHERE c1 = 2 AND c2 >= 4; +c1 c2 c3 c4 +2 4 1 1 +SELECT * FROM t1 WHERE c1 = 2 AND c2 > 4; +c1 c2 c3 c4 +EXPLAIN PARTITIONS SELECT * FROM t1 WHERE c1 = 1 AND c2 < 1; +id select_type table partitions type possible_keys key key_len ref rows Extra +1 SIMPLE NULL NULL NULL NULL NULL NULL NULL NULL Impossible WHERE noticed after reading const tables +EXPLAIN PARTITIONS SELECT * FROM t1 WHERE c1 = 1 AND c2 <= 1; +id select_type table partitions type possible_keys key key_len ref rows Extra +1 SIMPLE t1 p1 range PRIMARY PRIMARY 8 NULL 1 Using where +EXPLAIN PARTITIONS SELECT * FROM t1 WHERE c1 = 1 AND c2 = 1; +id select_type table partitions type possible_keys key key_len ref rows Extra +1 SIMPLE t1 p1 const PRIMARY PRIMARY 8 const,const 1 +EXPLAIN PARTITIONS SELECT * FROM t1 WHERE c1 = 1 AND c2 >= 1; +id select_type table partitions type possible_keys key key_len ref rows Extra +1 SIMPLE t1 p1,p2 range PRIMARY PRIMARY 8 NULL 2 Using where +EXPLAIN PARTITIONS SELECT * FROM t1 WHERE c1 = 1 AND c2 > 1; +id select_type table partitions type possible_keys key key_len ref rows Extra +1 SIMPLE t1 p1,p2 range PRIMARY PRIMARY 8 NULL 2 Using where +EXPLAIN PARTITIONS SELECT * FROM t1 WHERE c1 = 1 AND c2 < 3; +id select_type table partitions type possible_keys key key_len ref rows Extra +1 SIMPLE t1 p1 range PRIMARY PRIMARY 8 NULL 1 Using where +EXPLAIN PARTITIONS SELECT * FROM t1 WHERE c1 = 1 AND c2 <= 3; +id select_type table partitions type possible_keys key key_len ref rows Extra +1 SIMPLE t1 p1,p2 range PRIMARY PRIMARY 8 NULL 2 Using where +EXPLAIN PARTITIONS SELECT * FROM t1 WHERE c1 = 2 AND c2 <= 3; +id select_type table partitions type possible_keys key key_len ref rows Extra +1 SIMPLE t1 p1,p2 range PRIMARY PRIMARY 8 NULL 2 Using where +EXPLAIN PARTITIONS SELECT * FROM t1 WHERE c1 = 2 AND c2 = 3; +id select_type table partitions type possible_keys key key_len ref rows Extra +1 SIMPLE t1 p2 const PRIMARY PRIMARY 8 const,const 1 +EXPLAIN PARTITIONS SELECT * FROM t1 WHERE c1 = 2 AND c2 >= 3; +id select_type table partitions type possible_keys key key_len ref rows Extra +1 SIMPLE t1 p2 range PRIMARY PRIMARY 8 NULL 1 Using where +EXPLAIN PARTITIONS SELECT * FROM t1 WHERE c1 = 2 AND c2 > 3; +id select_type table partitions type possible_keys key key_len ref rows Extra +1 SIMPLE t1 p2 range PRIMARY PRIMARY 8 NULL 1 Using where +EXPLAIN PARTITIONS SELECT * FROM t1 WHERE c1 = 2 AND c2 < 4; +id select_type table partitions type possible_keys key key_len ref rows Extra +1 SIMPLE t1 p1,p2 range PRIMARY PRIMARY 8 NULL 2 Using where +EXPLAIN PARTITIONS SELECT * FROM t1 WHERE c1 = 2 AND c2 <= 4; +id select_type table partitions type possible_keys key key_len ref rows Extra +1 SIMPLE t1 p1,p2 range PRIMARY PRIMARY 8 NULL 2 Using where +EXPLAIN PARTITIONS SELECT * FROM t1 WHERE c1 = 2 AND c2 = 4; +id select_type table partitions type possible_keys key key_len ref rows Extra +1 SIMPLE t1 p2 const PRIMARY PRIMARY 8 const,const 1 +EXPLAIN PARTITIONS SELECT * FROM t1 WHERE c1 = 2 AND c2 >= 4; +id select_type table partitions type possible_keys key key_len ref rows Extra +1 SIMPLE t1 p2 range PRIMARY PRIMARY 8 NULL 1 Using where +EXPLAIN PARTITIONS SELECT * FROM t1 WHERE c1 = 2 AND c2 > 4; +id select_type table partitions type possible_keys key key_len ref rows Extra +1 SIMPLE NULL NULL NULL NULL NULL NULL NULL NULL Impossible WHERE noticed after reading const tables +DROP TABLE t1; +# # MDEV-6239: Partition pruning is not working as expected in an inner query # create table t1 diff --git a/mysql-test/r/plugin_loaderr.result b/mysql-test/r/plugin_loaderr.result index 95e5ec794d2..d1189217355 100644 --- a/mysql-test/r/plugin_loaderr.result +++ b/mysql-test/r/plugin_loaderr.result @@ -8,3 +8,6 @@ PLUGIN_TYPE STORAGE ENGINE PLUGIN_LIBRARY NULL PLUGIN_LIBRARY_VERSION NULL LOAD_OPTION ON +# +# MDEV-6351 --plugin=force has no effect for built-in plugins +# diff --git a/mysql-test/r/rpl_mysqldump_slave.result b/mysql-test/r/rpl_mysqldump_slave.result index 4b29ff99f61..9d2fe860f47 100644 --- a/mysql-test/r/rpl_mysqldump_slave.result +++ b/mysql-test/r/rpl_mysqldump_slave.result @@ -4,18 +4,59 @@ include/master-slave.inc # New --dump-slave, --apply-slave-statements functionality # use test; +-- SET GLOBAL gtid_slave_pos=''; CHANGE MASTER '' TO MASTER_LOG_FILE='master-bin.000001', MASTER_LOG_POS=BINLOG_START; STOP ALL SLAVES; +-- SET GLOBAL gtid_slave_pos=''; CHANGE MASTER '' TO MASTER_LOG_FILE='master-bin.000001', MASTER_LOG_POS=BINLOG_START; START ALL SLAVES; STOP ALL SLAVES; +-- SET GLOBAL gtid_slave_pos=''; CHANGE MASTER '' TO MASTER_HOST='127.0.0.1', MASTER_PORT=MASTER_MYPORT, MASTER_LOG_FILE='master-bin.000001', MASTER_LOG_POS=BINLOG_START; START ALL SLAVES; start slave; Warnings: Note 1254 Slave is already running +-- SET GLOBAL gtid_slave_pos=''; CHANGE MASTER '' TO MASTER_LOG_FILE='master-bin.000001', MASTER_LOG_POS=BINLOG_START; start slave; Warnings: Note 1254 Slave is already running +*** Test mysqldump --dump-slave GTID functionality. +SET gtid_seq_no = 1000; +CREATE TABLE t1 (a INT PRIMARY KEY); +DROP TABLE t1; +CREATE TABLE t2 (a INT PRIMARY KEY); +DROP TABLE t2; + +1. --dump-slave=1 + +SET GLOBAL gtid_slave_pos='0-1-1001'; +CHANGE MASTER '' TO MASTER_USE_GTID=slave_pos; +-- CHANGE MASTER '' TO MASTER_LOG_FILE='master-bin.000001', MASTER_LOG_POS=BINLOG_START; + +2. --dump-slave=2 + +-- SET GLOBAL gtid_slave_pos='0-1-1001'; +-- CHANGE MASTER '' TO MASTER_USE_GTID=slave_pos; +-- CHANGE MASTER '' TO MASTER_LOG_FILE='master-bin.000001', MASTER_LOG_POS=BINLOG_START; +*** Test mysqldump --master-data GTID functionality. + +1. --master-data=1 + +-- CHANGE MASTER TO MASTER_LOG_FILE='slave-bin.000001', MASTER_LOG_POS=BINLOG_START; +CHANGE MASTER TO MASTER_USE_GTID=slave_pos; +SET GLOBAL gtid_slave_pos='0-2-1003'; + +2. --master-data=2 + +-- CHANGE MASTER TO MASTER_LOG_FILE='slave-bin.000001', MASTER_LOG_POS=BINLOG_START; +-- CHANGE MASTER TO MASTER_USE_GTID=slave_pos; +-- SET GLOBAL gtid_slave_pos='0-2-1003'; + +3. --master-data --single-transaction + +-- CHANGE MASTER TO MASTER_LOG_FILE='slave-bin.000001', MASTER_LOG_POS=BINLOG_START; +CHANGE MASTER TO MASTER_USE_GTID=slave_pos; +SET GLOBAL gtid_slave_pos='0-2-1003'; include/rpl_end.inc diff --git a/mysql-test/r/sighup-6580.result b/mysql-test/r/sighup-6580.result new file mode 100644 index 00000000000..b77a7c6407f --- /dev/null +++ b/mysql-test/r/sighup-6580.result @@ -0,0 +1,3 @@ +select 'ok'; +ok +ok diff --git a/mysql-test/r/single_delete_update.result b/mysql-test/r/single_delete_update.result index d4534a2c2d8..9332effeb56 100644 --- a/mysql-test/r/single_delete_update.result +++ b/mysql-test/r/single_delete_update.result @@ -18,6 +18,7 @@ NULL 15 SHOW SESSION STATUS LIKE 'Sort%'; Variable_name Value Sort_merge_passes 0 +Sort_priority_queue_sorts 0 Sort_range 0 Sort_rows 0 Sort_scan 0 @@ -36,6 +37,7 @@ DELETE FROM t2 WHERE i > 10 AND i <= 18 ORDER BY i LIMIT 5; SHOW SESSION STATUS LIKE 'Sort%'; Variable_name Value Sort_merge_passes 0 +Sort_priority_queue_sorts 0 Sort_range 0 Sort_rows 0 Sort_scan 0 @@ -71,6 +73,7 @@ NULL 15 SHOW SESSION STATUS LIKE 'Sort%'; Variable_name Value Sort_merge_passes 0 +Sort_priority_queue_sorts 1 Sort_range 0 Sort_rows 5 Sort_scan 1 @@ -89,6 +92,7 @@ DELETE FROM t2 WHERE i > 10 AND i <= 18 ORDER BY i LIMIT 5; SHOW SESSION STATUS LIKE 'Sort%'; Variable_name Value Sort_merge_passes 0 +Sort_priority_queue_sorts 0 Sort_range 0 Sort_rows 8 Sort_scan 1 @@ -121,6 +125,7 @@ a b c d SHOW SESSION STATUS LIKE 'Sort%'; Variable_name Value Sort_merge_passes 0 +Sort_priority_queue_sorts 1 Sort_range 0 Sort_rows 1 Sort_scan 1 @@ -139,6 +144,7 @@ DELETE FROM t2 WHERE b = 10 ORDER BY a, c LIMIT 5; SHOW SESSION STATUS LIKE 'Sort%'; Variable_name Value Sort_merge_passes 0 +Sort_priority_queue_sorts 0 Sort_range 0 Sort_rows 1 Sort_scan 1 @@ -175,6 +181,7 @@ a b c d SHOW SESSION STATUS LIKE 'Sort%'; Variable_name Value Sort_merge_passes 0 +Sort_priority_queue_sorts 0 Sort_range 0 Sort_rows 0 Sort_scan 0 @@ -193,6 +200,7 @@ DELETE FROM t2 WHERE b = 10 ORDER BY a, c LIMIT 5; SHOW SESSION STATUS LIKE 'Sort%'; Variable_name Value Sort_merge_passes 0 +Sort_priority_queue_sorts 0 Sort_range 0 Sort_rows 0 Sort_scan 0 @@ -223,6 +231,7 @@ a b c d SHOW SESSION STATUS LIKE 'Sort%'; Variable_name Value Sort_merge_passes 0 +Sort_priority_queue_sorts 1 Sort_range 0 Sort_rows 1 Sort_scan 1 @@ -241,6 +250,7 @@ DELETE FROM t2 WHERE b = 10 ORDER BY a, c LIMIT 5; SHOW SESSION STATUS LIKE 'Sort%'; Variable_name Value Sort_merge_passes 0 +Sort_priority_queue_sorts 0 Sort_range 0 Sort_rows 1 Sort_scan 1 @@ -269,6 +279,7 @@ a b c d SHOW SESSION STATUS LIKE 'Sort%'; Variable_name Value Sort_merge_passes 0 +Sort_priority_queue_sorts 1 Sort_range 0 Sort_rows 1 Sort_scan 1 @@ -287,6 +298,7 @@ DELETE FROM t2 WHERE b = 10 ORDER BY a, c LIMIT 5; SHOW SESSION STATUS LIKE 'Sort%'; Variable_name Value Sort_merge_passes 0 +Sort_priority_queue_sorts 0 Sort_range 0 Sort_rows 1 Sort_scan 1 @@ -318,6 +330,7 @@ NULL 13 13 SHOW SESSION STATUS LIKE 'Sort%'; Variable_name Value Sort_merge_passes 0 +Sort_priority_queue_sorts 0 Sort_range 1 Sort_rows 4 Sort_scan 0 @@ -341,6 +354,7 @@ DELETE FROM t2 WHERE key1 < 13 or key2 < 14 ORDER BY key1; SHOW SESSION STATUS LIKE 'Sort%'; Variable_name Value Sort_merge_passes 0 +Sort_priority_queue_sorts 0 Sort_range 1 Sort_rows 4 Sort_scan 0 @@ -378,6 +392,7 @@ NULL 14 SHOW SESSION STATUS LIKE 'Sort%'; Variable_name Value Sort_merge_passes 0 +Sort_priority_queue_sorts 0 Sort_range 0 Sort_rows 0 Sort_scan 0 @@ -396,6 +411,7 @@ DELETE FROM t2 WHERE i > 10 AND i <= 18 ORDER BY i DESC LIMIT 5; SHOW SESSION STATUS LIKE 'Sort%'; Variable_name Value Sort_merge_passes 0 +Sort_priority_queue_sorts 0 Sort_range 0 Sort_rows 0 Sort_scan 0 @@ -431,6 +447,7 @@ a b c SHOW SESSION STATUS LIKE 'Sort%'; Variable_name Value Sort_merge_passes 0 +Sort_priority_queue_sorts 1 Sort_range 0 Sort_rows 5 Sort_scan 1 @@ -449,6 +466,7 @@ DELETE FROM t2 ORDER BY a, b DESC LIMIT 5; SHOW SESSION STATUS LIKE 'Sort%'; Variable_name Value Sort_merge_passes 0 +Sort_priority_queue_sorts 0 Sort_range 0 Sort_rows 16 Sort_scan 1 @@ -493,6 +511,7 @@ a b c SHOW SESSION STATUS LIKE 'Sort%'; Variable_name Value Sort_merge_passes 0 +Sort_priority_queue_sorts 0 Sort_range 0 Sort_rows 0 Sort_scan 0 @@ -517,6 +536,7 @@ a b c SHOW SESSION STATUS LIKE 'Sort%'; Variable_name Value Sort_merge_passes 0 +Sort_priority_queue_sorts 0 Sort_range 0 Sort_rows 0 Sort_scan 0 @@ -535,6 +555,7 @@ DELETE FROM t2 ORDER BY a DESC, b DESC LIMIT 5; SHOW SESSION STATUS LIKE 'Sort%'; Variable_name Value Sort_merge_passes 0 +Sort_priority_queue_sorts 0 Sort_range 0 Sort_rows 0 Sort_scan 0 @@ -571,6 +592,7 @@ NULL 15 SHOW SESSION STATUS LIKE 'Sort%'; Variable_name Value Sort_merge_passes 0 +Sort_priority_queue_sorts 0 Sort_range 0 Sort_rows 0 Sort_scan 0 @@ -589,6 +611,7 @@ UPDATE t2 SET a = 10 WHERE i > 10 AND i <= 18 ORDER BY i LIMIT 5; SHOW SESSION STATUS LIKE 'Sort%'; Variable_name Value Sort_merge_passes 0 +Sort_priority_queue_sorts 0 Sort_range 0 Sort_rows 0 Sort_scan 0 @@ -629,6 +652,7 @@ NULL 15 SHOW SESSION STATUS LIKE 'Sort%'; Variable_name Value Sort_merge_passes 0 +Sort_priority_queue_sorts 1 Sort_range 0 Sort_rows 5 Sort_scan 1 @@ -647,6 +671,7 @@ UPDATE t2 SET a = 10 WHERE i > 10 AND i <= 18 ORDER BY i LIMIT 5; SHOW SESSION STATUS LIKE 'Sort%'; Variable_name Value Sort_merge_passes 0 +Sort_priority_queue_sorts 1 Sort_range 0 Sort_rows 5 Sort_scan 1 @@ -684,6 +709,7 @@ a b c d SHOW SESSION STATUS LIKE 'Sort%'; Variable_name Value Sort_merge_passes 0 +Sort_priority_queue_sorts 1 Sort_range 0 Sort_rows 1 Sort_scan 1 @@ -702,6 +728,7 @@ UPDATE t2 SET d = 10 WHERE b = 10 ORDER BY a, c LIMIT 5; SHOW SESSION STATUS LIKE 'Sort%'; Variable_name Value Sort_merge_passes 0 +Sort_priority_queue_sorts 1 Sort_range 0 Sort_rows 1 Sort_scan 1 @@ -738,6 +765,7 @@ a b c d SHOW SESSION STATUS LIKE 'Sort%'; Variable_name Value Sort_merge_passes 0 +Sort_priority_queue_sorts 0 Sort_range 0 Sort_rows 0 Sort_scan 0 @@ -756,6 +784,7 @@ UPDATE t2 SET d = 10 WHERE b = 10 ORDER BY a, c LIMIT 5; SHOW SESSION STATUS LIKE 'Sort%'; Variable_name Value Sort_merge_passes 0 +Sort_priority_queue_sorts 0 Sort_range 0 Sort_rows 0 Sort_scan 0 @@ -786,6 +815,7 @@ a b c d SHOW SESSION STATUS LIKE 'Sort%'; Variable_name Value Sort_merge_passes 0 +Sort_priority_queue_sorts 1 Sort_range 0 Sort_rows 1 Sort_scan 1 @@ -804,6 +834,7 @@ UPDATE t2 SET d = 10 WHERE b = 10 ORDER BY a, c LIMIT 5; SHOW SESSION STATUS LIKE 'Sort%'; Variable_name Value Sort_merge_passes 0 +Sort_priority_queue_sorts 1 Sort_range 0 Sort_rows 1 Sort_scan 1 @@ -833,6 +864,7 @@ a b c d SHOW SESSION STATUS LIKE 'Sort%'; Variable_name Value Sort_merge_passes 0 +Sort_priority_queue_sorts 1 Sort_range 0 Sort_rows 1 Sort_scan 1 @@ -851,6 +883,7 @@ UPDATE t2 SET d = 10 WHERE b = 10 ORDER BY a, c LIMIT 5; SHOW SESSION STATUS LIKE 'Sort%'; Variable_name Value Sort_merge_passes 0 +Sort_priority_queue_sorts 1 Sort_range 0 Sort_rows 1 Sort_scan 1 @@ -883,6 +916,7 @@ NULL 13 13 SHOW SESSION STATUS LIKE 'Sort%'; Variable_name Value Sort_merge_passes 0 +Sort_priority_queue_sorts 0 Sort_range 1 Sort_rows 4 Sort_scan 0 @@ -906,6 +940,7 @@ UPDATE t2 SET i = 123 WHERE key1 < 13 or key2 < 14 ORDER BY key1; SHOW SESSION STATUS LIKE 'Sort%'; Variable_name Value Sort_merge_passes 0 +Sort_priority_queue_sorts 0 Sort_range 1 Sort_rows 4 Sort_scan 0 @@ -947,6 +982,7 @@ NULL 14 SHOW SESSION STATUS LIKE 'Sort%'; Variable_name Value Sort_merge_passes 0 +Sort_priority_queue_sorts 0 Sort_range 0 Sort_rows 0 Sort_scan 0 @@ -965,6 +1001,7 @@ UPDATE t2 SET a = 10 WHERE i > 10 AND i <= 18 ORDER BY i DESC LIMIT 5; SHOW SESSION STATUS LIKE 'Sort%'; Variable_name Value Sort_merge_passes 0 +Sort_priority_queue_sorts 0 Sort_range 0 Sort_rows 0 Sort_scan 0 @@ -1005,6 +1042,7 @@ a b c SHOW SESSION STATUS LIKE 'Sort%'; Variable_name Value Sort_merge_passes 0 +Sort_priority_queue_sorts 1 Sort_range 0 Sort_rows 5 Sort_scan 1 @@ -1023,6 +1061,7 @@ UPDATE t2 SET c = 10 ORDER BY a, b DESC LIMIT 5; SHOW SESSION STATUS LIKE 'Sort%'; Variable_name Value Sort_merge_passes 0 +Sort_priority_queue_sorts 1 Sort_range 0 Sort_rows 5 Sort_scan 1 @@ -1061,6 +1100,7 @@ a b c SHOW SESSION STATUS LIKE 'Sort%'; Variable_name Value Sort_merge_passes 0 +Sort_priority_queue_sorts 0 Sort_range 0 Sort_rows 0 Sort_scan 0 @@ -1085,6 +1125,7 @@ a b c SHOW SESSION STATUS LIKE 'Sort%'; Variable_name Value Sort_merge_passes 0 +Sort_priority_queue_sorts 0 Sort_range 0 Sort_rows 0 Sort_scan 0 @@ -1103,6 +1144,7 @@ UPDATE t2 SET c = 10 ORDER BY a DESC, b DESC LIMIT 5; SHOW SESSION STATUS LIKE 'Sort%'; Variable_name Value Sort_merge_passes 0 +Sort_priority_queue_sorts 0 Sort_range 0 Sort_rows 0 Sort_scan 0 diff --git a/mysql-test/r/sp-bugs.result b/mysql-test/r/sp-bugs.result index b45944a3795..ccccacd09a5 100644 --- a/mysql-test/r/sp-bugs.result +++ b/mysql-test/r/sp-bugs.result @@ -275,3 +275,9 @@ END $$ CALL test_5531(1); DROP PROCEDURE test_5531; DROP TABLE t1; +create procedure sp() begin +commit; +end| +start transaction; +call sp(); +drop procedure sp; diff --git a/mysql-test/r/sp-bugs2.result b/mysql-test/r/sp-bugs2.result new file mode 100644 index 00000000000..3e48f0fb5d2 --- /dev/null +++ b/mysql-test/r/sp-bugs2.result @@ -0,0 +1,23 @@ +CREATE TABLE t1 (i INT); +SET @a = 2; +CREATE TABLE IF NOT EXISTS t2 (i INT) ENGINE = MyISAM +AS SELECT * FROM t1; +CREATE TABLE IF NOT EXISTS t2 (i INT) ENGINE = MyISAM +AS SELECT * FROM t1; +Warnings: +Note 1050 Table 't2' already exists +DROP TABLE t2; +CREATE PROCEDURE sp() +BEGIN +REPEAT +CREATE TABLE IF NOT EXISTS t2 (i INT) ENGINE = MyISAM +AS SELECT * FROM t1; +SET @a = @a - 1; +UNTIL @a = 0 +END REPEAT ; +END | +CALL sp(); +Warnings: +Note 1050 Table 't2' already exists +DROP PROCEDURE sp; +DROP TABLE t1, t2; diff --git a/mysql-test/r/stat_tables-enospc.result b/mysql-test/r/stat_tables-enospc.result new file mode 100644 index 00000000000..f0d76f04eee --- /dev/null +++ b/mysql-test/r/stat_tables-enospc.result @@ -0,0 +1,10 @@ +call mtr.add_suppression("No space left on device"); +create table t1 (a varchar(255), b varchar(255), c varchar(255)); +set use_stat_tables=PREFERABLY, optimizer_use_condition_selectivity=3; +set debug_dbug='+d,simulate_file_write_error'; +analyze table t1; +Table Op Msg_type Msg_text +test.t1 analyze Error Error writing file 'tmp-file' (Errcode: 28 "No space left on device") +test.t1 analyze status Operation failed +set debug_dbug=''; +drop table t1; diff --git a/mysql-test/r/subselect_mat.result b/mysql-test/r/subselect_mat.result index 30b020cbcf6..3385612874b 100644 --- a/mysql-test/r/subselect_mat.result +++ b/mysql-test/r/subselect_mat.result @@ -2108,6 +2108,43 @@ EXECUTE stmt; a DROP TABLE t1, t2; DROP VIEW v2; +# +# MDEV-6289 : Unexpected results when querying information_schema +# +CREATE TABLE t1 ( +id int(11) unsigned NOT NULL AUTO_INCREMENT, +db varchar(254) NOT NULL DEFAULT '', +PRIMARY KEY (id), +UNIQUE KEY db (db) +) DEFAULT CHARSET=utf8; +INSERT INTO t1 (db) VALUES ('mysqltest1'),('mysqltest2'),('mysqltest3'),('mysqltest4'); +drop database if exists mysqltest1; +drop database if exists mysqltest2; +drop database if exists mysqltest3; +drop database if exists mysqltest4; +create database mysqltest1; +create database mysqltest2; +create database mysqltest3; +create database mysqltest4; +SELECT db FROM t1 WHERE db IN (SELECT SCHEMA_NAME FROM information_schema.schemata) ORDER BY db DESC; +db +mysqltest4 +mysqltest3 +mysqltest2 +mysqltest1 +EXPLAIN EXTENDED +SELECT db FROM t1 WHERE db IN (SELECT SCHEMA_NAME FROM information_schema.schemata) ORDER BY db DESC; +id select_type table type possible_keys key key_len ref rows filtered Extra +1 PRIMARY <subquery2> ALL distinct_key NULL NULL NULL 2 100.00 Using temporary; Using filesort +1 PRIMARY t1 eq_ref db db 764 information_schema.schemata.SCHEMA_NAME 1 100.00 Using where; Using index +2 MATERIALIZED schemata ALL NULL NULL NULL NULL NULL NULL +Warnings: +Note 1003 select `test`.`t1`.`db` AS `db` from `test`.`t1` semi join (`information_schema`.`schemata`) where (`test`.`t1`.`db` = `information_schema`.`schemata`.`SCHEMA_NAME`) order by `test`.`t1`.`db` desc +drop table t1; +drop database mysqltest1; +drop database mysqltest2; +drop database mysqltest3; +drop database mysqltest4; # End of 5.5 tests set @subselect_mat_test_optimizer_switch_value=null; set @@optimizer_switch='materialization=on,in_to_exists=off,semijoin=off'; diff --git a/mysql-test/r/subselect_sj_mat.result b/mysql-test/r/subselect_sj_mat.result index 7417ab56ff8..38690a2d6a1 100644 --- a/mysql-test/r/subselect_sj_mat.result +++ b/mysql-test/r/subselect_sj_mat.result @@ -2148,4 +2148,41 @@ EXECUTE stmt; a DROP TABLE t1, t2; DROP VIEW v2; +# +# MDEV-6289 : Unexpected results when querying information_schema +# +CREATE TABLE t1 ( +id int(11) unsigned NOT NULL AUTO_INCREMENT, +db varchar(254) NOT NULL DEFAULT '', +PRIMARY KEY (id), +UNIQUE KEY db (db) +) DEFAULT CHARSET=utf8; +INSERT INTO t1 (db) VALUES ('mysqltest1'),('mysqltest2'),('mysqltest3'),('mysqltest4'); +drop database if exists mysqltest1; +drop database if exists mysqltest2; +drop database if exists mysqltest3; +drop database if exists mysqltest4; +create database mysqltest1; +create database mysqltest2; +create database mysqltest3; +create database mysqltest4; +SELECT db FROM t1 WHERE db IN (SELECT SCHEMA_NAME FROM information_schema.schemata) ORDER BY db DESC; +db +mysqltest4 +mysqltest3 +mysqltest2 +mysqltest1 +EXPLAIN EXTENDED +SELECT db FROM t1 WHERE db IN (SELECT SCHEMA_NAME FROM information_schema.schemata) ORDER BY db DESC; +id select_type table type possible_keys key key_len ref rows filtered Extra +1 PRIMARY <subquery2> ALL distinct_key NULL NULL NULL 2 100.00 Using temporary; Using filesort +1 PRIMARY t1 eq_ref db db 764 information_schema.schemata.SCHEMA_NAME 1 100.00 Using where; Using index +2 MATERIALIZED schemata ALL NULL NULL NULL NULL NULL NULL +Warnings: +Note 1003 select `test`.`t1`.`db` AS `db` from `test`.`t1` semi join (`information_schema`.`schemata`) where (`test`.`t1`.`db` = `information_schema`.`schemata`.`SCHEMA_NAME`) order by `test`.`t1`.`db` desc +drop table t1; +drop database mysqltest1; +drop database mysqltest2; +drop database mysqltest3; +drop database mysqltest4; # End of 5.5 tests diff --git a/mysql-test/r/table_options-5867.result b/mysql-test/r/table_options-5867.result new file mode 100644 index 00000000000..21041c7c5c3 --- /dev/null +++ b/mysql-test/r/table_options-5867.result @@ -0,0 +1,37 @@ +install soname 'ha_example'; +set sql_mode='ignore_bad_table_options'; +create table t1 ( +a int complex='c,f,f,f' invalid=3 +) engine=example ull=10000 str='dskj' one_or_two='one' yesno=0 +foobar=barfoo; +Warnings: +Warning 1911 Unknown option 'invalid' +Warning 1911 Unknown option 'foobar' +create table t2 (a int, key (a) some_option=2014); +Warnings: +Warning 1911 Unknown option 'some_option' +show create table t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) DEFAULT NULL `complex`='c,f,f,f' `invalid`=3 +) ENGINE=EXAMPLE DEFAULT CHARSET=latin1 `ull`=10000 `str`='dskj' `one_or_two`='one' `yesno`=0 `foobar`=barfoo `VAROPT`='5' +show create table t2; +Table Create Table +t2 CREATE TABLE `t2` ( + `a` int(11) DEFAULT NULL, + KEY `a` (`a`) `some_option`=2014 +) ENGINE=MyISAM DEFAULT CHARSET=latin1 +set sql_mode=''; +show create table t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) DEFAULT NULL `complex`='c,f,f,f' /* `invalid`=3 */ +) ENGINE=EXAMPLE DEFAULT CHARSET=latin1 `ull`=10000 `str`='dskj' `one_or_two`='one' `yesno`=0 /* `foobar`=barfoo */ `VAROPT`='5' +show create table t2; +Table Create Table +t2 CREATE TABLE `t2` ( + `a` int(11) DEFAULT NULL, + KEY `a` (`a`) /* `some_option`=2014 */ +) ENGINE=MyISAM DEFAULT CHARSET=latin1 +drop table t1, t2; +uninstall soname 'ha_example'; diff --git a/mysql-test/r/type_bit.result b/mysql-test/r/type_bit.result index e6f4db26c23..639a97be27b 100644 --- a/mysql-test/r/type_bit.result +++ b/mysql-test/r/type_bit.result @@ -722,7 +722,12 @@ DROP TABLE t1; CREATE TABLE IF NOT EXISTS t1 ( f1 bit(2) NOT NULL default b'' ) ENGINE=MyISAM DEFAULT CHARSET=latin1 COLLATE=latin1_general_ci; -ERROR 42000: Invalid default value for 'f1' +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `f1` bit(2) NOT NULL DEFAULT b'0' +) ENGINE=MyISAM DEFAULT CHARSET=latin1 COLLATE=latin1_general_ci +DROP TABLE t1; create table t1bit7 (a1 bit(7) not null) engine=MyISAM; create table t2bit7 (b1 bit(7)) engine=MyISAM; insert into t1bit7 values (b'1100000'); diff --git a/mysql-test/r/type_bit_innodb.result b/mysql-test/r/type_bit_innodb.result index 9bdd8658690..80fc942e77c 100644 --- a/mysql-test/r/type_bit_innodb.result +++ b/mysql-test/r/type_bit_innodb.result @@ -413,3 +413,12 @@ a ` drop table t1; End of 5.0 tests +create table t1(f1 bit(2) not null default b'10',f2 bit(14) not null default b'11110000111100'); +insert into t1 (f1) values (default); +insert into t1 values (b'',b''),('',''); +select hex(f1), hex(f2) from t1; +hex(f1) hex(f2) +2 3C3C +0 0 +0 0 +drop table t1; diff --git a/mysql-test/r/type_decimal.result b/mysql-test/r/type_decimal.result index fa36e9b5567..f8649f030bb 100644 --- a/mysql-test/r/type_decimal.result +++ b/mysql-test/r/type_decimal.result @@ -810,10 +810,10 @@ c1 drop table t1; SELECT 1 % .123456789123456789123456789123456789123456789123456789123456789123456789123456789 AS '%'; % -0.012345687012345687012345687012345687012345687012345687012345687012345687000000000 +0.012345687012345687012345687012 SELECT MOD(1, .123456789123456789123456789123456789123456789123456789123456789123456789123456789) AS 'MOD()'; MOD() -0.012345687012345687012345687012345687012345687012345687012345687012345687000000000 +0.012345687012345687012345687012 create table t1 (f1 decimal(6,6),f2 decimal(6,6) zerofill); insert into t1 values (-0.123456,0.123456); select group_concat(f1),group_concat(f2) from t1; diff --git a/mysql-test/r/type_newdecimal.result b/mysql-test/r/type_newdecimal.result index 5b3594fe503..fb10e65c0ce 100644 --- a/mysql-test/r/type_newdecimal.result +++ b/mysql-test/r/type_newdecimal.result @@ -703,7 +703,7 @@ select .7777777777777777777777777777777777777 * 777777777777777777.777777777777777777700000000000 select .7777777777777777777777777777777777777 - 0.1; .7777777777777777777777777777777777777 - 0.1 -0.6777777777777777777777777777777777777 +0.677777777777777777777777777778 select .343434343434343434 + .343434343434343434; .343434343434343434 + .343434343434343434 0.686868686868686868 @@ -1840,7 +1840,7 @@ Warnings: Note 1265 Data truncated for column 'c1' at row 4 DESC t2; Field Type Null Key Default Extra -c1 decimal(32,30) YES NULL +c1 decimal(33,30) YES NULL DROP TABLE t1,t2; CREATE TABLE t1 (a DECIMAL(30,30)); INSERT INTO t1 VALUES (0.1),(0.2),(0.3); @@ -1851,7 +1851,7 @@ Note 1265 Data truncated for column 'c1' at row 2 Note 1265 Data truncated for column 'c1' at row 3 DESC t2; Field Type Null Key Default Extra -c1 decimal(34,0) YES NULL +c1 decimal(33,30) YES NULL DROP TABLE t1,t2; CREATE TABLE t1 (a DECIMAL(30,30)); INSERT INTO t1 VALUES (0.1),(0.2),(0.3); diff --git a/mysql-test/r/union.result b/mysql-test/r/union.result index 6d99cad30f0..4ecac34d9fa 100644 --- a/mysql-test/r/union.result +++ b/mysql-test/r/union.result @@ -1876,6 +1876,40 @@ SELECT(SELECT 1 AS a FROM dual ORDER BY a DESC LIMIT 1) AS dev; dev 1 # +# Bug #17059925 : UNIONS COMPUTES ROWS_EXAMINED INCORRECTLY +# +SET @old_slow_query_log= @@global.slow_query_log; +SET @old_log_output= @@global.log_output; +SET @old_long_query_time= @@long_query_time; +SET GLOBAL log_output= "TABLE"; +SET GLOBAL slow_query_log= ON; +SET SESSION long_query_time= 0; +CREATE TABLE t17059925 (a INT); +CREATE TABLE t2 (b INT); +CREATE TABLE t3 (c INT); +INSERT INTO t17059925 VALUES (1), (2), (3); +INSERT INTO t2 VALUES (4), (5), (6); +INSERT INTO t3 VALUES (7), (8), (9); +TRUNCATE table mysql.slow_log; +SELECT * FROM t17059925 UNION SELECT * FROM t2 UNION SELECT * FROM t3; +a +1 +2 +3 +4 +5 +6 +7 +8 +9 +SELECT sql_text, rows_examined FROM mysql.slow_log WHERE sql_text LIKE '%SELECT%t17059925%'; +sql_text rows_examined +SELECT * FROM t17059925 UNION SELECT * FROM t2 UNION SELECT * FROM t3 18 +DROP TABLE t17059925, t2, t3; +SET @@long_query_time= @old_long_query_time; +SET @@global.log_output= @old_log_output; +SET @@global.slow_query_log= @old_slow_query_log; +# # lp:1010729: Unexpected syntax error from UNION # (bug #54382) with single-table join nest # diff --git a/mysql-test/r/variables.result b/mysql-test/r/variables.result index 2ba64ed3003..a42c0c5abcb 100644 --- a/mysql-test/r/variables.result +++ b/mysql-test/r/variables.result @@ -189,6 +189,8 @@ select @@concurrent_insert; @@concurrent_insert AUTO set global timed_mutexes=ON; +Warnings: +Warning 1287 '@@timed_mutexes' is deprecated and will be removed in a future release. show variables like 'timed_mutexes'; Variable_name Value timed_mutexes ON @@ -196,6 +198,8 @@ select * from information_schema.session_variables where variable_name like 'tim VARIABLE_NAME VARIABLE_VALUE TIMED_MUTEXES ON set global timed_mutexes=0; +Warnings: +Warning 1287 '@@timed_mutexes' is deprecated and will be removed in a future release. show variables like 'timed_mutexes'; Variable_name Value timed_mutexes OFF @@ -1789,4 +1793,13 @@ set session rand_seed1=DEFAULT; ERROR 42000: Variable 'rand_seed1' doesn't have a default value set autocommit = values(v); ERROR 42S22: Unknown column 'v' in 'field list' +set session sql_mode=ansi_quotes; +select * from information_schema.session_variables where variable_name='sql_mode'; +VARIABLE_NAME VARIABLE_VALUE +SQL_MODE ANSI_QUOTES +show global status like 'foobar'; +Variable_name Value +select * from information_schema.session_variables where variable_name='sql_mode'; +VARIABLE_NAME VARIABLE_VALUE +SQL_MODE ANSI_QUOTES End of 5.5 tests diff --git a/mysql-test/r/view.result b/mysql-test/r/view.result index 64b329e9e01..ca08c53cabe 100644 --- a/mysql-test/r/view.result +++ b/mysql-test/r/view.result @@ -4789,6 +4789,45 @@ DROP DATABASE IF EXISTS nodb; CREATE VIEW nodb.a AS SELECT 1; ERROR 42000: Unknown database 'nodb' # +# BUG#14117018 - MYSQL SERVER CREATES INVALID VIEW DEFINITION +# BUG#18405221 - SHOW CREATE VIEW OUTPUT INCORRECT +# +CREATE VIEW v1 AS (SELECT '' FROM DUAL); +CREATE VIEW v2 AS (SELECT 'BUG#14117018' AS col1 FROM DUAL) UNION ALL +(SELECT '' FROM DUAL); +CREATE VIEW v3 AS (SELECT 'BUG#14117018' AS col1 FROM DUAL) UNION ALL +(SELECT '' FROM DUAL) UNION ALL +(SELECT '' FROM DUAL); +CREATE VIEW v4 AS (SELECT 'BUG#14117018' AS col1 FROM DUAL) UNION ALL +(SELECT '' AS col2 FROM DUAL) UNION ALL +(SELECT '' FROM DUAL); +CREATE VIEW v5 AS (SELECT 'buggy' AS col1, 'fix' as col2 FROM DUAL) UNION ALL +(SELECT 'buggy' as a, 'fix' as a FROM DUAL); +# Name for the column in select1 is set properly with or +# without this fix. +SHOW CREATE VIEW v1; +View Create View character_set_client collation_connection +v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS (select '' AS `Name_exp_1`) latin1 latin1_swedish_ci +# Name for the column in select2 is set with this fix. +# Without this fix, name would not have set for the +# columns in select2. +SHOW CREATE VIEW v2; +View Create View character_set_client collation_connection +v2 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v2` AS (select 'BUG#14117018' AS `col1`) union all (select '' AS `Name_exp_1`) latin1 latin1_swedish_ci +# Name for the field item in select2 & select3 is set with this fix. +# Without this fix, name would not have set for the +# columns in select2 & select3. +SHOW CREATE VIEW v3; +View Create View character_set_client collation_connection +v3 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v3` AS (select 'BUG#14117018' AS `col1`) union all (select '' AS `Name_exp_1`) union all (select '' AS `Name_exp_1`) latin1 latin1_swedish_ci +# Name for the field item in select3 is set with this fix. +# Without this fix, name would not have set for the +# columns in select3. +SHOW CREATE VIEW v4; +View Create View character_set_client collation_connection +v4 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v4` AS (select 'BUG#14117018' AS `col1`) union all (select '' AS `col2`) union all (select '' AS `Name_exp_1`) latin1 latin1_swedish_ci +DROP VIEW v1, v2, v3, v4, v5; +# # lp:833600 Wrong result with view + outer join + uncorrelated subquery (non-semijoin) # CREATE TABLE t1 ( a int, b int ); @@ -5300,6 +5339,61 @@ NULL 8 drop view v1; drop table t1,t2,t3; SET optimizer_switch=@save_optimizer_switch_MDEV_3874; +CREATE TABLE `t1` ( +`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT, +`f0` int(11) unsigned NOT NULL DEFAULT '0', +`f1` int(11) unsigned NOT NULL DEFAULT '0', +PRIMARY KEY (`id`), +UNIQUE KEY `id` (`id`) +); +CREATE TABLE `t2` ( +`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT, +`f02` bigint(20) unsigned NOT NULL DEFAULT '0', +`f03` int(11) unsigned NOT NULL DEFAULT '0', +PRIMARY KEY (`id`), +UNIQUE KEY `id` (`id`) +); +CREATE ALGORITHM=UNDEFINED SQL SECURITY DEFINER VIEW `v1` AS +SELECT +`t1`.`f0` AS `f0`, +`t1`.`f1` AS `f1`, +`t2`.`f02` AS `f02`, +`t2`.`f03` AS `f03` +FROM +(`t1` LEFT JOIN `t2` ON((`t1`.`id` = `t2`.`f02`))); +CREATE FUNCTION `f1`( +p0 BIGINT(20) UNSIGNED +) +RETURNS bigint(20) unsigned +DETERMINISTIC +CONTAINS SQL +SQL SECURITY DEFINER +COMMENT '' +BEGIN +DECLARE k0 INTEGER UNSIGNED DEFAULT 0; +DECLARE lResult INTEGER UNSIGNED DEFAULT 0; +SET k0 = 0; +WHILE k0 < 1 DO +SELECT COUNT(*) as `f00` INTO lResult FROM `v1` WHERE `v1`.`f0` = p0; -- BUG +SET k0 = k0 + 1; +END WHILE; +RETURN(k0); +END| +SELECT `f1`(1); +`f1`(1) +1 +SELECT `f1`(1); +`f1`(1) +1 +SELECT `f1`(1); +`f1`(1) +1 +SELECT `f1`(1); +`f1`(1) +1 +DROP FUNCTION f1; +DROP VIEW v1; +DROP TABLE t1, t2; # ----------------------------------------------------------------- # -- End of 5.5 tests. # ----------------------------------------------------------------- diff --git a/mysql-test/std_data/mdev6020-mysql-bin.000001 b/mysql-test/std_data/mdev6020-mysql-bin.000001 Binary files differnew file mode 100644 index 00000000000..49853674e9f --- /dev/null +++ b/mysql-test/std_data/mdev6020-mysql-bin.000001 diff --git a/mysql-test/std_data/new-format-relay-log-win.info b/mysql-test/std_data/new-format-relay-log-win.info new file mode 100644 index 00000000000..e00383b5565 --- /dev/null +++ b/mysql-test/std_data/new-format-relay-log-win.info @@ -0,0 +1,6 @@ +5 +.\slave-relay-bin.000001 +4 + +0 +0 diff --git a/mysql-test/std_data/new-format-relay-log.info b/mysql-test/std_data/new-format-relay-log.info new file mode 100644 index 00000000000..883dec1f66b --- /dev/null +++ b/mysql-test/std_data/new-format-relay-log.info @@ -0,0 +1,6 @@ +5 +./slave-relay-bin.000001 +4 + +0 +0 diff --git a/mysql-test/std_data/old-format-relay-log-win.info b/mysql-test/std_data/old-format-relay-log-win.info new file mode 100644 index 00000000000..7673de6b956 --- /dev/null +++ b/mysql-test/std_data/old-format-relay-log-win.info @@ -0,0 +1,4 @@ +.\slave-relay-bin.000001 +4 + +0 diff --git a/mysql-test/std_data/old-format-relay-log.info b/mysql-test/std_data/old-format-relay-log.info new file mode 100644 index 00000000000..6043b4058f6 --- /dev/null +++ b/mysql-test/std_data/old-format-relay-log.info @@ -0,0 +1,4 @@ +./slave-relay-bin.000001 +4 + +0 diff --git a/mysql-test/suite/archive/partition_archive.result b/mysql-test/suite/archive/partition_archive.result index bb3e531a2ed..eb1fca46522 100644 --- a/mysql-test/suite/archive/partition_archive.result +++ b/mysql-test/suite/archive/partition_archive.result @@ -127,3 +127,29 @@ select count(*) from t1; count(*) 100 drop table t1; +# +#BUG 18618561: FAILED ALTER TABLE ENGINE CHANGE WITH PARTITIONS +# CORRUPTS FRM +CREATE TABLE t1 (fld1 INT PRIMARY KEY) ENGINE= MYISAM PARTITION BY HASH(fld1) +PARTITIONS 5; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `fld1` int(11) NOT NULL, + PRIMARY KEY (`fld1`) +) ENGINE=MyISAM DEFAULT CHARSET=latin1 +/*!50100 PARTITION BY HASH (fld1) +PARTITIONS 5 */ +ALTER TABLE t1 ENGINE= ARCHIVE; +ERROR HY000: Can't create table `test`.`#sql-temporary` (errno: 140 "Wrong create options") +#After the patch, the ENGINE is correctly displayed as MyISAM +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `fld1` int(11) NOT NULL, + PRIMARY KEY (`fld1`) +) ENGINE=MyISAM DEFAULT CHARSET=latin1 +/*!50100 PARTITION BY HASH (fld1) +PARTITIONS 5 */ +#Cleanup. +DROP TABLE t1; diff --git a/mysql-test/suite/archive/partition_archive.test b/mysql-test/suite/archive/partition_archive.test index be2abeada73..899f266c09c 100644 --- a/mysql-test/suite/archive/partition_archive.test +++ b/mysql-test/suite/archive/partition_archive.test @@ -129,3 +129,21 @@ show create table t1; select count(*) from t1; drop table t1; + +--echo # +--echo #BUG 18618561: FAILED ALTER TABLE ENGINE CHANGE WITH PARTITIONS +--echo # CORRUPTS FRM + +CREATE TABLE t1 (fld1 INT PRIMARY KEY) ENGINE= MYISAM PARTITION BY HASH(fld1) +PARTITIONS 5; +SHOW CREATE TABLE t1; + +--replace_regex /#sql-[0-9a-f_]*/#sql-temporary/ +--error ER_CANT_CREATE_TABLE +ALTER TABLE t1 ENGINE= ARCHIVE; + +--echo #After the patch, the ENGINE is correctly displayed as MyISAM +SHOW CREATE TABLE t1; + +--echo #Cleanup. +DROP TABLE t1; diff --git a/mysql-test/suite/binlog/r/binlog_unsafe.result b/mysql-test/suite/binlog/r/binlog_unsafe.result index 3c5b27c6eaf..5b6adf5600e 100644 --- a/mysql-test/suite/binlog/r/binlog_unsafe.result +++ b/mysql-test/suite/binlog/r/binlog_unsafe.result @@ -2706,8 +2706,6 @@ Warnings: Note 1592 Unsafe statement written to the binary log using statement format since BINLOG_FORMAT = STATEMENT. CREATE... REPLACE SELECT is unsafe because the order in which rows are retrieved by the SELECT determines which (if any) rows are replaced. This order cannot be predicted and may differ on master and the slave. INSERT INTO insert_2_keys VALUES (1, 2) ON DUPLICATE KEY UPDATE a=VALUES(a)+10, b=VALUES(b)+10; -Warnings: -Note 1592 Unsafe statement written to the binary log using statement format since BINLOG_FORMAT = STATEMENT. INSERT... ON DUPLICATE KEY UPDATE on a table with more than one UNIQUE KEY is unsafe DROP TABLE filler_table; DROP TABLE insert_table; DROP TABLE update_table; diff --git a/mysql-test/suite/binlog/t/binlog_killed.test b/mysql-test/suite/binlog/t/binlog_killed.test index 0a2ba084d78..73759ee5aa5 100644 --- a/mysql-test/suite/binlog/t/binlog_killed.test +++ b/mysql-test/suite/binlog/t/binlog_killed.test @@ -353,6 +353,10 @@ drop function bug27563; # common cleanup # +connection default; +disconnect con1; +disconnect con2; + drop table t1,t2,t3; --echo end of the tests diff --git a/mysql-test/suite/engines/README b/mysql-test/suite/engines/README index 0f89d5af67f..fde920dbc25 100644 --- a/mysql-test/suite/engines/README +++ b/mysql-test/suite/engines/README @@ -1,17 +1,13 @@ - - - -This directory includes a set of three test suites aimed as testing functionality -in an engine independent way, that is - the tests should work identically against -different engines. - +This directory includes a set of three test suites aimed as testing +functionality in an engine independent way, that is - the tests should +work identically against different engines. The following suites are included: 1) 'funcs' suite ------------- - A collection of functional tests covering basic engine and server functionality that can be run - against iany engine. + A collection of functional tests covering basic engine and server + functionality that can be run against any engine. To run the test suite: cd INSTALL_DIR/mysql-test @@ -19,7 +15,7 @@ The following suites are included: 2) 'iuds' suite ------------ - Similar to the above focused on insert/update/delete operations of different different data types. + Similar to the above focused on insert/update/delete operations of different data types. To run the test suite: cd INSTALL_DIR/mysql-test @@ -44,13 +40,13 @@ The following suites are included: 3) Copy the 'init_innodb.txt' file to 'init_<engine>.txt file and change its content to be "init_<engine>". 4) In the 't' directory copy the "init_innodb.test" file to "init_<engine>.test" and change the value of the '$engine' variable to <engine>. - 5) In the 'r' directory copy "the init_innodb.result" file to "init_<engine>.result" and change refrences + 5) In the 'r' directory copy "the init_innodb.result" file to "init_<engine>.result" and change references to 'InnoDB' to <engine>. Known Issues ------------ -1) The folowing tests in the 'iuds' suite: +1) The following tests in the 'iuds' suite: - delete_decimal - insert_decimal - update_decimal @@ -63,9 +59,9 @@ Known Issues - ix_unique_string_length (bug 52056, masked by an 'Out of memory error' on some 32-bit platforms) Add the '--force' option to prevent the test run from aborting. -3) Some of the rpl_xxx tests in the 'funcs' suite require a secific binlog_forat setting and will be - skipped otherwise. - -4) Some of the rpl_xxx tests in the 'funcs' suite will report a 'Statement unsafe for replication' warning - when run againsr a server configured to use statement based replication. +3) Some of the rpl_xxx tests in the 'funcs' suite require a specific + binlog_format setting and will be skipped otherwise. +4) Some of the rpl_xxx tests in the 'funcs' suite will report a + 'Statement unsafe for replication' warning when run against a + server configured to use statement based replication. diff --git a/mysql-test/suite/engines/iuds/r/insert_time.result b/mysql-test/suite/engines/iuds/r/insert_time.result index dceba37ae8e..6680886aad1 100644 --- a/mysql-test/suite/engines/iuds/r/insert_time.result +++ b/mysql-test/suite/engines/iuds/r/insert_time.result @@ -5035,9 +5035,9 @@ CAST(0.2359591234567e6 AS TIME) 23:59:59 SELECT CAST(0.2359591234567e+30 AS TIME); CAST(0.2359591234567e+30 AS TIME) -NULL +838:59:59 Warnings: -Warning 1292 Incorrect datetime value: '2.359591234567e29' +Warning 1292 Truncated incorrect time value: '2.359591234567e29' select cast('100:55:50' as time) < cast('24:00:00' as time); cast('100:55:50' as time) < cast('24:00:00' as time) 0 diff --git a/mysql-test/suite/engines/iuds/suite.opt b/mysql-test/suite/engines/iuds/suite.opt new file mode 100644 index 00000000000..e5648163418 --- /dev/null +++ b/mysql-test/suite/engines/iuds/suite.opt @@ -0,0 +1,2 @@ +--timezone=GMT-3 + diff --git a/mysql-test/suite/innodb/include/innodb_simulate_comp_failures.inc b/mysql-test/suite/innodb/include/innodb_simulate_comp_failures.inc new file mode 100644 index 00000000000..47494d60375 --- /dev/null +++ b/mysql-test/suite/innodb/include/innodb_simulate_comp_failures.inc @@ -0,0 +1,147 @@ +--echo # +--echo # Testing robustness against random compression failures +--echo # + +--source include/not_embedded.inc +--source include/have_innodb.inc + +--disable_query_log +# record the file format in order to restore in the end. +--let $file_format_save = `SELECT @@innodb_file_format` +--let $file_format_max_save = `SELECT @@innodb_file_format_max` +--let $simulate_comp_failures_save = `SELECT @@innodb_simulate_comp_failures` + +--disable_warnings +DROP TABLE IF EXISTS t1; +SET GLOBAL INNODB_FILE_FORMAT='Barracuda'; +--enable_warnings + +# since this test generates lot of errors in log, suppress checking errors +call mtr.add_suppression(".*"); +--enable_query_log + +# create the table with compressed pages of size 8K. +CREATE TABLE t1(id INT AUTO_INCREMENT PRIMARY KEY, msg VARCHAR(255), KEY msg_i(msg)) ENGINE=INNODB ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=8; + +# percentage of compressions that will be forced to fail +SET GLOBAL innodb_simulate_comp_failures = 25; + +--disable_query_log +--disable_result_log + +let $num_inserts_ind = $num_inserts; +while ($num_inserts_ind) +{ + let $repeat = `select floor(rand() * 10)`; + eval +INSERT INTO t1(id, msg) +VALUES ($num_inserts_ind, REPEAT('abcdefghijklmnopqrstuvwxyz', $repeat)); + dec $num_inserts_ind; +} + +--enable_query_log +--enable_result_log + +SELECT COUNT(*) FROM t1; + +--disable_query_log +--disable_result_log + +# do random ops, making sure that some pages will get fragmented and reorganized. +let $num_ops_ind = $num_ops; + +while($num_ops_ind) +{ + let $idx = `select floor(rand()*$num_inserts)`; + let $insert_or_update = `select floor(rand()*3)`; + + let $repeat = `select floor(rand() * 9) + 1`; + + let $msg = query_get_value(`select repeat('abcdefghijklmnopqrstuvwxyz', $repeat) as x`, x, 1); + + let $single_or_multi = `select floor(rand()*10)`; + + if ($insert_or_update) + { + let $cnt = query_get_value(SELECT COUNT(*) cnt FROM t1 WHERE id=$idx, cnt, 1); + + if ($cnt) + { + let $update = `select floor(rand()*2)`; + + if ($update) + { + if ($single_or_multi) + { + eval UPDATE t1 SET msg=\"$msg\" WHERE id=$idx; + } + + if (!$single_or_multi) + { + eval UPDATE t1 SET msg=\"$msg\" WHERE id >= $idx - 100 AND id <= $idx + 100; + } + + } + + if (!$update) + { + if ($single_or_multi) + { + eval INSERT INTO t1(msg, id) VALUES (\"$msg\", $idx) ON DUPLICATE KEY UPDATE msg=VALUES(msg), id = VALUES(id); + } + + if (!$single_or_multi) + { + let $diff = 200; + + while ($diff) + { + eval INSERT INTO t1(msg, id) VALUES (\"$msg\", $idx + 100 - $diff) ON DUPLICATE KEY UPDATE msg=VALUES(msg), id=VALUES(id); + + dec $diff; + } + } + } + } + + if (!$cnt) + { + let $null_msg = `select floor(rand()*2)`; + + if ($null_msg) + { + eval INSERT INTO t1(id,msg) VALUES ($idx, NULL); + } + + if (!$null_msg) + { + eval INSERT INTO t1(id, msg) VALUES ($idx, \"$msg\"); + } + } + } + + if (!$insert_or_update) + { + if ($single_or_multi) + { + eval DELETE from t1 WHERE id=$idx; + } + + if (!$single_or_multi) + { + eval DELETE from t1 WHERE id >= $idx - 100 AND id <= $idx + 100; + } + } + + dec $num_ops_ind; +} + +# final cleanup +DROP TABLE t1; + +# restore innodb_file_format and innodb_file_format_max +eval SET GLOBAL innodb_file_format = \"$file_format_save\"; +eval SET GLOBAL innodb_file_format_max = \"$file_format_max_save\"; +eval SET GLOBAL innodb_simulate_comp_failures = $simulate_comp_failures_save; + +--enable_query_log diff --git a/mysql-test/suite/innodb/r/blob_unique2pk.result b/mysql-test/suite/innodb/r/blob_unique2pk.result new file mode 100644 index 00000000000..57953dc8624 --- /dev/null +++ b/mysql-test/suite/innodb/r/blob_unique2pk.result @@ -0,0 +1,15 @@ +create table t1 (f1 tinyblob not null) engine=innodb; +alter table t1 add unique index (f1(255)); +drop table t1; +create table t1 (f1 tinyblob not null) engine=innodb; +alter table t1 add unique index (f1(356)); +show create table t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `f1` tinyblob NOT NULL, + UNIQUE KEY `f1` (`f1`(255)) +) ENGINE=InnoDB DEFAULT CHARSET=latin1 +drop table t1; +create table t1 (f1 point not null) engine=innodb; +alter table t1 add unique index (f1); +drop table t1; diff --git a/mysql-test/suite/innodb/r/innodb-alter-table-disk-full.result b/mysql-test/suite/innodb/r/innodb-alter-table-disk-full.result new file mode 100644 index 00000000000..ffeacae7951 --- /dev/null +++ b/mysql-test/suite/innodb/r/innodb-alter-table-disk-full.result @@ -0,0 +1,50 @@ +create table t1(a int not null primary key, b int) engine=innodb; +create procedure innodb_insert_proc (repeat_count int) +begin +declare current_num int; +set current_num = 0; +while current_num < repeat_count do +insert into t1 values(current_num, current_num); +set current_num = current_num + 1; +end while; +end// +commit; +set autocommit=0; +call innodb_insert_proc(10000); +commit; +set autocommit=1; +set DEBUG_DBUG='+d,ib_os_aio_func_io_failure_28'; +alter table t1 add testcol int; +ERROR HY000: The table 't1' is full +show create table t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) NOT NULL, + `b` int(11) DEFAULT NULL, + PRIMARY KEY (`a`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1 +set DEBUG_DBUG='+d,ib_os_aio_func_io_failure_28_2'; +alter table t1 add testcol int; +ERROR HY000: The table 't1' is full +show create table t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) NOT NULL, + `b` int(11) DEFAULT NULL, + PRIMARY KEY (`a`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1 +set DEBUG_DBUG=NULL; +alter table t1 add testcol2 int; +show create table t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) NOT NULL, + `b` int(11) DEFAULT NULL, + `testcol2` int(11) DEFAULT NULL, + PRIMARY KEY (`a`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1 +select count(1) from t1; +count(1) +10000 +drop procedure innodb_insert_proc; +drop table t1; diff --git a/mysql-test/suite/innodb/r/innodb-fk.result b/mysql-test/suite/innodb/r/innodb-fk.result new file mode 100644 index 00000000000..cf883d83874 --- /dev/null +++ b/mysql-test/suite/innodb/r/innodb-fk.result @@ -0,0 +1,35 @@ +# +# Bug #18806829 OPENING INNODB TABLES WITH MANY FOREIGN KEY +# REFERENCES IS SLOW/CRASHES SEMAPHORE +# +create table t1 (f1 int primary key) engine=innodb; +insert into t1 values (5); +insert into t1 values (2882); +insert into t1 values (10); +update t1 set f1 = 28 where f1 = 2882; +select * from fk_120; +f1 +5 +10 +28 +select * from fk_1; +f1 +5 +10 +28 +select * from fk_50; +f1 +5 +10 +28 +drop table t1; +# +# Check if restrict is working fine. +# +create table t1 (f1 int primary key) engine=innodb; +delete from t1 where f1 = 29; +ERROR 23000: Cannot delete or update a parent row: a foreign key constraint fails (`test`.`fk_29`, CONSTRAINT `pc29` FOREIGN KEY (`f1`) REFERENCES `t1` (`f1`)) +select * from fk_29; +f1 +29 +drop table t1; diff --git a/mysql-test/suite/innodb/r/innodb-stats-initialize-failure.result b/mysql-test/suite/innodb/r/innodb-stats-initialize-failure.result new file mode 100644 index 00000000000..ef2d3182b92 --- /dev/null +++ b/mysql-test/suite/innodb/r/innodb-stats-initialize-failure.result @@ -0,0 +1,32 @@ +call mtr.add_suppression("InnoDB: Warning: Index.*"); +set DEBUG_DBUG='+d,ib_ha_innodb_stat_not_initialized'; +create table t1(a int not null primary key, b int, c int, key(b), key(c)) engine=innodb; +create procedure innodb_insert_proc (repeat_count int) +begin +declare current_num int; +set current_num = 0; +while current_num < repeat_count do +insert into t1 values(current_num, current_num, current_num); +set current_num = current_num + 1; +end while; +end// +commit; +set autocommit=0; +call innodb_insert_proc(10000); +commit; +set autocommit=1; +select count(1) from t1; +count(1) +10000 +select count(1) from t1 where a between 5 and 100; +count(1) +96 +select count(1) from t1 where b between 5 and 256; +count(1) +252 +select count(1) from t1 where c between 7 and 787; +count(1) +781 +set DEBUG_DBUG=NULL; +drop procedure innodb_insert_proc; +drop table t1; diff --git a/mysql-test/suite/innodb/r/innodb_bug34300.result b/mysql-test/suite/innodb/r/innodb_bug34300.result index ae9fee81ad7..bf07febca73 100644 --- a/mysql-test/suite/innodb/r/innodb_bug34300.result +++ b/mysql-test/suite/innodb/r/innodb_bug34300.result @@ -1,4 +1,3 @@ +ERROR 42000: Row size too large (> ####). Changing some columns to TEXT or BLOB or using ROW_FORMAT=DYNAMIC or ROW_FORMAT=COMPRESSED may help. In current row format, BLOB prefix of 768 bytes is stored inline. f4 f8 -xxx zzz f4 f8 -xxx zzz diff --git a/mysql-test/suite/innodb/r/innodb_simulate_comp_failures.result b/mysql-test/suite/innodb/r/innodb_simulate_comp_failures.result new file mode 100644 index 00000000000..cb7a3b9c282 --- /dev/null +++ b/mysql-test/suite/innodb/r/innodb_simulate_comp_failures.result @@ -0,0 +1,8 @@ +# +# Testing robustness against random compression failures +# +CREATE TABLE t1(id INT AUTO_INCREMENT PRIMARY KEY, msg VARCHAR(255), KEY msg_i(msg)) ENGINE=INNODB ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=8; +SET GLOBAL innodb_simulate_comp_failures = 25; +SELECT COUNT(*) FROM t1; +COUNT(*) +100000 diff --git a/mysql-test/suite/innodb/r/innodb_simulate_comp_failures_small.result b/mysql-test/suite/innodb/r/innodb_simulate_comp_failures_small.result new file mode 100644 index 00000000000..92ae1637ad5 --- /dev/null +++ b/mysql-test/suite/innodb/r/innodb_simulate_comp_failures_small.result @@ -0,0 +1,8 @@ +# +# Testing robustness against random compression failures +# +CREATE TABLE t1(id INT AUTO_INCREMENT PRIMARY KEY, msg VARCHAR(255), KEY msg_i(msg)) ENGINE=INNODB ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=8; +SET GLOBAL innodb_simulate_comp_failures = 25; +SELECT COUNT(*) FROM t1; +COUNT(*) +1000 diff --git a/mysql-test/suite/innodb/t/blob_unique2pk.test b/mysql-test/suite/innodb/t/blob_unique2pk.test new file mode 100644 index 00000000000..ff6720690dd --- /dev/null +++ b/mysql-test/suite/innodb/t/blob_unique2pk.test @@ -0,0 +1,20 @@ +--source include/have_innodb.inc + + +# +# Bug#16368875 INNODB: FAILING ASSERTION: PRIMARY_KEY_NO == -1 || PRIMARY_KEY_NO == 0 +# +create table t1 (f1 tinyblob not null) engine=innodb; +alter table t1 add unique index (f1(255)); +drop table t1; + +create table t1 (f1 tinyblob not null) engine=innodb; +alter table t1 add unique index (f1(356)); +show create table t1; +drop table t1; + +create table t1 (f1 point not null) engine=innodb; +alter table t1 add unique index (f1); +drop table t1; + + diff --git a/mysql-test/suite/innodb/t/innodb-alter-table-disk-full-master.opt b/mysql-test/suite/innodb/t/innodb-alter-table-disk-full-master.opt new file mode 100644 index 00000000000..9c2ee7846b6 --- /dev/null +++ b/mysql-test/suite/innodb/t/innodb-alter-table-disk-full-master.opt @@ -0,0 +1 @@ +--innodb-use-native-aio=0 diff --git a/mysql-test/suite/innodb/t/innodb-alter-table-disk-full.test b/mysql-test/suite/innodb/t/innodb-alter-table-disk-full.test new file mode 100644 index 00000000000..4e3a7bfdae6 --- /dev/null +++ b/mysql-test/suite/innodb/t/innodb-alter-table-disk-full.test @@ -0,0 +1,47 @@ +# MDEV-6288: Innodb causes server crash after disk full, then can't ALTER TABLE any more +--source include/have_innodb.inc + +# DEBUG_SYNC must be compiled in. +--source include/have_debug_sync.inc + +create table t1(a int not null primary key, b int) engine=innodb; + +delimiter //; +create procedure innodb_insert_proc (repeat_count int) +begin + declare current_num int; + set current_num = 0; + while current_num < repeat_count do + insert into t1 values(current_num, current_num); + set current_num = current_num + 1; + end while; +end// +delimiter ;// +commit; + +set autocommit=0; +call innodb_insert_proc(10000); +commit; +set autocommit=1; + +# This caused crash earlier +set DEBUG_DBUG='+d,ib_os_aio_func_io_failure_28'; +--error 1114 +alter table t1 add testcol int; +show create table t1; + +# This caused crash earlier +set DEBUG_DBUG='+d,ib_os_aio_func_io_failure_28_2'; +--error 1114 +alter table t1 add testcol int; +show create table t1; + +set DEBUG_DBUG=NULL; +alter table t1 add testcol2 int; +show create table t1; + +select count(1) from t1; + +drop procedure innodb_insert_proc; +drop table t1; + diff --git a/mysql-test/suite/innodb/t/innodb-fk.test b/mysql-test/suite/innodb/t/innodb-fk.test new file mode 100644 index 00000000000..9839cd2d084 --- /dev/null +++ b/mysql-test/suite/innodb/t/innodb-fk.test @@ -0,0 +1,86 @@ +--source include/have_innodb.inc +--source include/not_embedded.inc + +--echo # +--echo # Bug #18806829 OPENING INNODB TABLES WITH MANY FOREIGN KEY +--echo # REFERENCES IS SLOW/CRASHES SEMAPHORE +--echo # + +create table t1 (f1 int primary key) engine=innodb; +insert into t1 values (5); +insert into t1 values (2882); +insert into t1 values (10); + +let $fk_tables = 120; + +--disable_query_log +let $i = $fk_tables; +while ($i) +{ + eval create table fk_$i (f1 int primary key, + constraint pc$i foreign key (f1) references t1(f1) + on delete cascade on update cascade) engine=innodb; + eval insert into fk_$i values (5); + eval insert into fk_$i values (2882); + eval insert into fk_$i values (10); + dec $i; +} +--enable_query_log + +--source include/restart_mysqld.inc + +update t1 set f1 = 28 where f1 = 2882; + +select * from fk_120; +select * from fk_1; +select * from fk_50; + +--disable_query_log +let $i = $fk_tables; +while ($i) +{ + eval drop table fk_$i; + dec $i; +} +--enable_query_log + +drop table t1; + +--echo # +--echo # Check if restrict is working fine. +--echo # + +create table t1 (f1 int primary key) engine=innodb; + +let $fk_tables = 30; + +--disable_query_log +let $i = $fk_tables; +while ($i) +{ + eval create table fk_$i (f1 int primary key, + constraint pc$i foreign key (f1) references t1(f1) + on delete restrict on update restrict) engine=innodb; + eval insert into t1 values ($i); + eval insert into fk_$i values ($i); + dec $i; +} +--enable_query_log + +--source include/restart_mysqld.inc + +--error ER_ROW_IS_REFERENCED_2 +delete from t1 where f1 = 29; +select * from fk_29; + +--disable_query_log +let $i = $fk_tables; +while ($i) +{ + eval drop table fk_$i; + dec $i; +} +--enable_query_log + +drop table t1; + diff --git a/mysql-test/suite/innodb/t/innodb-stats-initialize-failure.test b/mysql-test/suite/innodb/t/innodb-stats-initialize-failure.test new file mode 100644 index 00000000000..e480f0caf07 --- /dev/null +++ b/mysql-test/suite/innodb/t/innodb-stats-initialize-failure.test @@ -0,0 +1,39 @@ +# MDEV-6424: Mariadb server crashes with assertion failure in file ha_innodb.cc +--source include/have_innodb.inc + +# DEBUG_SYNC must be compiled in. +--source include/have_debug_sync.inc + +call mtr.add_suppression("InnoDB: Warning: Index.*"); +# This caused crash earlier +set DEBUG_DBUG='+d,ib_ha_innodb_stat_not_initialized'; +create table t1(a int not null primary key, b int, c int, key(b), key(c)) engine=innodb; + +delimiter //; +create procedure innodb_insert_proc (repeat_count int) +begin + declare current_num int; + set current_num = 0; + while current_num < repeat_count do + insert into t1 values(current_num, current_num, current_num); + set current_num = current_num + 1; + end while; +end// +delimiter ;// +commit; + +set autocommit=0; +call innodb_insert_proc(10000); +commit; +set autocommit=1; + +select count(1) from t1; +select count(1) from t1 where a between 5 and 100; +select count(1) from t1 where b between 5 and 256; +select count(1) from t1 where c between 7 and 787; + +set DEBUG_DBUG=NULL; + +drop procedure innodb_insert_proc; +drop table t1; + diff --git a/mysql-test/suite/innodb/t/innodb_bug34300.test b/mysql-test/suite/innodb/t/innodb_bug34300.test index 11682ad7828..800f326707d 100644 --- a/mysql-test/suite/innodb/t/innodb_bug34300.test +++ b/mysql-test/suite/innodb/t/innodb_bug34300.test @@ -1,31 +1,32 @@ ---source include/have_innodb.inc # # Bug#34300 Tinyblob & tinytext fields currupted after export/import and alter in 5.1 # http://bugs.mysql.com/34300 # +-- source include/have_innodb.inc + -- disable_query_log -- disable_result_log -call mtr.add_suppression("InnoDB: Warning: a long semaphore wait:"); -call mtr.add_suppression("the age of the last checkpoint is"); +call mtr.add_suppression("InnoDB: The total blob data length"); # set packet size and reconnect let $max_packet=`select @@global.max_allowed_packet`; SET @@global.max_allowed_packet=16777216; --connect (newconn, localhost, root,,) -DROP TABLE IF EXISTS bug34300; +--enable_result_log + CREATE TABLE bug34300 ( f4 TINYTEXT, f6 MEDIUMTEXT, f8 TINYBLOB ) ENGINE=InnoDB; +--replace_regex /\(> [0-9]*\)/(> ####)/ +--error ER_TOO_BIG_ROWSIZE INSERT INTO bug34300 VALUES ('xxx', repeat('a', 8459264), 'zzz'); --- enable_result_log - SELECT f4, f8 FROM bug34300; ALTER TABLE bug34300 ADD COLUMN (f10 INT); diff --git a/mysql-test/suite/innodb/t/innodb_simulate_comp_failures-master.opt b/mysql-test/suite/innodb/t/innodb_simulate_comp_failures-master.opt new file mode 100644 index 00000000000..fae32059249 --- /dev/null +++ b/mysql-test/suite/innodb/t/innodb_simulate_comp_failures-master.opt @@ -0,0 +1,2 @@ +--innodb-file-per-table + diff --git a/mysql-test/suite/innodb/t/innodb_simulate_comp_failures.test b/mysql-test/suite/innodb/t/innodb_simulate_comp_failures.test new file mode 100644 index 00000000000..a940a926f85 --- /dev/null +++ b/mysql-test/suite/innodb/t/innodb_simulate_comp_failures.test @@ -0,0 +1,8 @@ +--source include/big_test.inc +# test takes too long with valgrind +--source include/not_valgrind.inc +--let $num_inserts = 100000 +--let $num_ops = 30000 +--source suite/innodb/include/innodb_simulate_comp_failures.inc +# clean exit +--exit diff --git a/mysql-test/suite/innodb/t/innodb_simulate_comp_failures_small-master.opt b/mysql-test/suite/innodb/t/innodb_simulate_comp_failures_small-master.opt new file mode 100644 index 00000000000..fae32059249 --- /dev/null +++ b/mysql-test/suite/innodb/t/innodb_simulate_comp_failures_small-master.opt @@ -0,0 +1,2 @@ +--innodb-file-per-table + diff --git a/mysql-test/suite/innodb/t/innodb_simulate_comp_failures_small.test b/mysql-test/suite/innodb/t/innodb_simulate_comp_failures_small.test new file mode 100644 index 00000000000..1677a092e0c --- /dev/null +++ b/mysql-test/suite/innodb/t/innodb_simulate_comp_failures_small.test @@ -0,0 +1,5 @@ +--let $num_inserts = 1000 +--let $num_ops = 30 +--source suite/innodb/include/innodb_simulate_comp_failures.inc +# clean exit +--exit diff --git a/mysql-test/suite/maria/maria3.result b/mysql-test/suite/maria/maria3.result index 021cc8fc357..74eed530bd9 100644 --- a/mysql-test/suite/maria/maria3.result +++ b/mysql-test/suite/maria/maria3.result @@ -314,6 +314,7 @@ aria_max_sort_file_size 9223372036853727232 aria_pagecache_age_threshold 300 aria_pagecache_buffer_size 8388608 aria_pagecache_division_limit 100 +aria_pagecache_file_hash_size 512 aria_page_checksum OFF aria_recover NORMAL aria_repair_threads 1 diff --git a/mysql-test/suite/maria/maria_partition.result b/mysql-test/suite/maria/maria_partition.result index 372230c0b71..1c4f0fbaf05 100644 --- a/mysql-test/suite/maria/maria_partition.result +++ b/mysql-test/suite/maria/maria_partition.result @@ -33,3 +33,18 @@ insert into t1 values (2); select * from t2 left join t1 on (t2.a=t1.a) where t2.a='bbb'; a a drop table t1,t2; +CREATE TABLE t1 (pk INT PRIMARY KEY) ENGINE=Aria PARTITION BY KEY() PARTITIONS 2; +CREATE VIEW v1 AS SELECT * FROM t1; +LOCK TABLE v1 WRITE; +CREATE TABLE v1 (i INT); +ERROR HY000: Table 'v1' was not locked with LOCK TABLES +INSERT INTO v1 VALUES (1); +UNLOCK TABLES; +check table t1; +Table Op Msg_type Msg_text +test.t1 check status OK +SELECT * FROM t1; +pk +1 +drop table t1; +drop view v1; diff --git a/mysql-test/suite/maria/maria_partition.test b/mysql-test/suite/maria/maria_partition.test index 47571c7a4be..ca2651bcdc3 100644 --- a/mysql-test/suite/maria/maria_partition.test +++ b/mysql-test/suite/maria/maria_partition.test @@ -49,6 +49,28 @@ insert into t1 values (2); select * from t2 left join t1 on (t2.a=t1.a) where t2.a='bbb'; drop table t1,t2; +# +# MDEV-6493 +# Assertion `table->file->stats.records > 0 || error' +# failure, or 'Invalid write' valgrind warnings, or crash on scenario +# with Aria table, view, LOCK TABLES # +# + +CREATE TABLE t1 (pk INT PRIMARY KEY) ENGINE=Aria PARTITION BY KEY() PARTITIONS 2; +CREATE VIEW v1 AS SELECT * FROM t1; + +LOCK TABLE v1 WRITE; +--error 1100 +CREATE TABLE v1 (i INT); +INSERT INTO v1 VALUES (1); +UNLOCK TABLES; +check table t1; + +SELECT * FROM t1; + +drop table t1; +drop view v1; + # Set defaults back --disable_result_log --disable_query_log diff --git a/mysql-test/suite/multi_source/gtid.result b/mysql-test/suite/multi_source/gtid.result index 8e6028afa17..ce926ddc995 100644 --- a/mysql-test/suite/multi_source/gtid.result +++ b/mysql-test/suite/multi_source/gtid.result @@ -141,8 +141,8 @@ include/reset_master_slave.inc SET GLOBAL gtid_domain_id=0; STOP ALL SLAVES; Warnings: -Note 1938 SLAVE 'slave2' stopped Note 1938 SLAVE 'slave1' stopped +Note 1938 SLAVE 'slave2' stopped include/reset_master_slave.inc SET GLOBAL gtid_domain_id=0; include/reset_master_slave.inc diff --git a/mysql-test/suite/multi_source/gtid.test b/mysql-test/suite/multi_source/gtid.test index 0ab486b1f41..7a085823693 100644 --- a/mysql-test/suite/multi_source/gtid.test +++ b/mysql-test/suite/multi_source/gtid.test @@ -141,12 +141,14 @@ DROP TABLE t3; SET GLOBAL gtid_domain_id=0; --let $wait_condition= SELECT COUNT(*)=0 FROM information_schema.tables WHERE table_name IN ("t1", "t2", "t3") AND table_schema = "test" --source include/wait_condition.inc +--sorted_result STOP ALL SLAVES; --source reset_master_slave.inc --disconnect slave1 --connection slave2 SET GLOBAL gtid_domain_id=0; +--sorted_result STOP ALL SLAVES; --source reset_master_slave.inc --disconnect slave2 diff --git a/mysql-test/suite/multi_source/gtid_ignore_duplicates.result b/mysql-test/suite/multi_source/gtid_ignore_duplicates.result index 9bd09330ae7..5426091b635 100644 --- a/mysql-test/suite/multi_source/gtid_ignore_duplicates.result +++ b/mysql-test/suite/multi_source/gtid_ignore_duplicates.result @@ -245,8 +245,8 @@ a SET GLOBAL gtid_domain_id=0; STOP ALL SLAVES; Warnings: -Note 1938 SLAVE 'c2a' stopped Note 1938 SLAVE 'b2a' stopped +Note 1938 SLAVE 'c2a' stopped SET GLOBAL slave_parallel_threads= @old_parallel; SET GLOBAL gtid_ignore_duplicates= @old_ignore_duplicates; SET GLOBAL gtid_domain_id=0; diff --git a/mysql-test/suite/multi_source/gtid_ignore_duplicates.test b/mysql-test/suite/multi_source/gtid_ignore_duplicates.test index 3e98d7c76cb..cf1c750fc19 100644 --- a/mysql-test/suite/multi_source/gtid_ignore_duplicates.test +++ b/mysql-test/suite/multi_source/gtid_ignore_duplicates.test @@ -261,24 +261,28 @@ SELECT * FROM t1 WHERE a >= 20 ORDER BY a; # Clean up. --connection server_1 SET GLOBAL gtid_domain_id=0; +--sorted_result STOP ALL SLAVES; SET GLOBAL slave_parallel_threads= @old_parallel; SET GLOBAL gtid_ignore_duplicates= @old_ignore_duplicates; --connection server_2 SET GLOBAL gtid_domain_id=0; +--sorted_result STOP ALL SLAVES; SET GLOBAL slave_parallel_threads= @old_parallel; SET GLOBAL gtid_ignore_duplicates= @old_ignore_duplicates; --connection server_3 SET GLOBAL gtid_domain_id=0; +--sorted_result STOP ALL SLAVES; SET GLOBAL slave_parallel_threads= @old_parallel; SET GLOBAL gtid_ignore_duplicates= @old_ignore_duplicates; --connection server_4 SET GLOBAL gtid_domain_id=0; +--sorted_result STOP ALL SLAVES; SET GLOBAL slave_parallel_threads= @old_parallel; SET GLOBAL gtid_ignore_duplicates= @old_ignore_duplicates; diff --git a/mysql-test/suite/multi_source/load_data.result b/mysql-test/suite/multi_source/load_data.result index ef55abc5321..3a04156ce6a 100644 --- a/mysql-test/suite/multi_source/load_data.result +++ b/mysql-test/suite/multi_source/load_data.result @@ -23,8 +23,8 @@ drop table t1; drop table t2; stop all slaves; Warnings: -Note 1938 SLAVE 'master2' stopped Note 1938 SLAVE '' stopped +Note 1938 SLAVE 'master2' stopped include/reset_master_slave.inc include/reset_master_slave.inc include/reset_master_slave.inc diff --git a/mysql-test/suite/multi_source/load_data.test b/mysql-test/suite/multi_source/load_data.test index e6e1399cbf3..ca2391a9c8d 100644 --- a/mysql-test/suite/multi_source/load_data.test +++ b/mysql-test/suite/multi_source/load_data.test @@ -58,6 +58,7 @@ drop table t2; --sync_with_master 0,'master2' --connection slave +--sorted_result stop all slaves; --source reset_master_slave.inc diff --git a/mysql-test/suite/perfschema/r/ortho_iter.result b/mysql-test/suite/perfschema/r/ortho_iter.result index b6abc8b9489..ac8d538dbfa 100644 --- a/mysql-test/suite/perfschema/r/ortho_iter.result +++ b/mysql-test/suite/perfschema/r/ortho_iter.result @@ -1,4 +1,6 @@ drop procedure if exists check_instrument; +truncate table performance_schema.events_statements_summary_by_digest; +flush status; create procedure check_instrument(in instr_name varchar(128)) begin declare count_expected integer; diff --git a/mysql-test/suite/perfschema/t/ortho_iter.test b/mysql-test/suite/perfschema/t/ortho_iter.test index 0856e266386..a9990e17409 100644 --- a/mysql-test/suite/perfschema/t/ortho_iter.test +++ b/mysql-test/suite/perfschema/t/ortho_iter.test @@ -9,6 +9,10 @@ drop procedure if exists check_instrument; --enable_warnings +# reset counters +truncate table performance_schema.events_statements_summary_by_digest; +flush status; + delimiter $; create procedure check_instrument(in instr_name varchar(128)) begin diff --git a/mysql-test/suite/plugins/r/false_dupes-6543.result b/mysql-test/suite/plugins/r/false_dupes-6543.result new file mode 100644 index 00000000000..22accaaae8a --- /dev/null +++ b/mysql-test/suite/plugins/r/false_dupes-6543.result @@ -0,0 +1,5 @@ +install soname 'ha_federated'; +install soname 'ha_federated'; +install soname 'ha_federatedx'; +ERROR HY000: Function 'FEDERATED' already exists +uninstall soname 'ha_federated'; diff --git a/mysql-test/suite/plugins/r/feedback_plugin_load.result b/mysql-test/suite/plugins/r/feedback_plugin_load.result index 443b91bf0cc..e5d1296ae53 100644 --- a/mysql-test/suite/plugins/r/feedback_plugin_load.result +++ b/mysql-test/suite/plugins/r/feedback_plugin_load.result @@ -10,3 +10,12 @@ FEEDBACK_SEND_RETRY_WAIT 60 FEEDBACK_SEND_TIMEOUT 60 FEEDBACK_URL http://mariadb.org/feedback_plugin/post FEEDBACK_USER_INFO mysql-test +SELECT VARIABLE_VALUE>0, VARIABLE_NAME FROM INFORMATION_SCHEMA.FEEDBACK +WHERE VARIABLE_NAME LIKE 'Collation used %' +ORDER BY VARIABLE_NAME; +VARIABLE_VALUE>0 VARIABLE_NAME +1 Collation used binary +1 Collation used latin1_bin +1 Collation used latin1_swedish_ci +1 Collation used utf8_bin +1 Collation used utf8_general_ci diff --git a/mysql-test/suite/plugins/r/feedback_plugin_send.result b/mysql-test/suite/plugins/r/feedback_plugin_send.result index 2852240ca5b..304294b417e 100644 --- a/mysql-test/suite/plugins/r/feedback_plugin_send.result +++ b/mysql-test/suite/plugins/r/feedback_plugin_send.result @@ -4,12 +4,21 @@ ACTIVE select * from information_schema.feedback where variable_name like 'feed%' and variable_name not like '%_uid'; VARIABLE_NAME VARIABLE_VALUE -FEEDBACK used 2 +FEEDBACK used 3 FEEDBACK version 1.1 FEEDBACK_SEND_RETRY_WAIT 60 FEEDBACK_SEND_TIMEOUT 60 FEEDBACK_URL http://mariadb.org/feedback_plugin/post FEEDBACK_USER_INFO mysql-test +SELECT VARIABLE_VALUE>0, VARIABLE_NAME FROM INFORMATION_SCHEMA.FEEDBACK +WHERE VARIABLE_NAME LIKE 'Collation used %' +ORDER BY VARIABLE_NAME; +VARIABLE_VALUE>0 VARIABLE_NAME +1 Collation used binary +1 Collation used latin1_bin +1 Collation used latin1_swedish_ci +1 Collation used utf8_bin +1 Collation used utf8_general_ci feedback plugin: report to 'http://mariadb.org/feedback_plugin/post' was sent feedback plugin: server replied 'ok' feedback plugin: report to 'http://mariadb.org/feedback_plugin/post' was sent diff --git a/mysql-test/suite/plugins/t/false_dupes-6543.test b/mysql-test/suite/plugins/t/false_dupes-6543.test new file mode 100644 index 00000000000..ebdbe00e47c --- /dev/null +++ b/mysql-test/suite/plugins/t/false_dupes-6543.test @@ -0,0 +1,18 @@ +# +# MDEV-6543 Crash if enable 'federatedx' when 'federated' plugin already enabled, and vice-versa +# +if(!$HA_FEDERATED_SO) { + skip Needs ha_federated.so; +} +if(!$HA_FEDERATEDX_SO) { + skip Needs ha_federatedx.so; +} + +install soname 'ha_federated'; +# note: no error below! install soname ignores already loaded plugins +install soname 'ha_federated'; +# note: an error here, even though plugin name is the same! +--error ER_UDF_EXISTS +install soname 'ha_federatedx'; +uninstall soname 'ha_federated'; + diff --git a/mysql-test/suite/plugins/t/feedback_plugin_load.test b/mysql-test/suite/plugins/t/feedback_plugin_load.test index 5ad301667b4..b1bcb2a6bbd 100644 --- a/mysql-test/suite/plugins/t/feedback_plugin_load.test +++ b/mysql-test/suite/plugins/t/feedback_plugin_load.test @@ -8,3 +8,16 @@ select plugin_status from information_schema.plugins where plugin_name='feedback --sorted_result select * from information_schema.feedback where variable_name like 'feed%' and variable_name not like '%_uid'; + + +# Embedded server does not use the table mysqld.user and thus +# does not automatically use latin1_bin on startup. Use it manually. +--disable_query_log +if (`SELECT VERSION() LIKE '%embedded%'`) +{ + DO _latin1'test' COLLATE latin1_bin; +} +--enable_query_log +SELECT VARIABLE_VALUE>0, VARIABLE_NAME FROM INFORMATION_SCHEMA.FEEDBACK +WHERE VARIABLE_NAME LIKE 'Collation used %' +ORDER BY VARIABLE_NAME; diff --git a/mysql-test/suite/roles/ip-6401.result b/mysql-test/suite/roles/ip-6401.result new file mode 100644 index 00000000000..a9876eb8273 --- /dev/null +++ b/mysql-test/suite/roles/ip-6401.result @@ -0,0 +1,13 @@ +create role r1; +create user foo@'127.0.0.1'; +grant r1 to foo@'127.0.0.1'; +show grants; +Grants for foo@127.0.0.1 +GRANT r1 TO 'foo'@'127.0.0.1' +GRANT USAGE ON *.* TO 'foo'@'127.0.0.1' +set role r1; +select * from information_schema.enabled_roles; +ROLE_NAME +r1 +drop user foo@'127.0.0.1'; +drop role r1; diff --git a/mysql-test/suite/roles/ip-6401.test b/mysql-test/suite/roles/ip-6401.test new file mode 100644 index 00000000000..34d8553afa3 --- /dev/null +++ b/mysql-test/suite/roles/ip-6401.test @@ -0,0 +1,13 @@ +--source include/not_embedded.inc +create role r1; +create user foo@'127.0.0.1'; +grant r1 to foo@'127.0.0.1'; + +--connect (con1,127.0.0.1,foo,,) +show grants; +set role r1; +select * from information_schema.enabled_roles; + +connection default; +drop user foo@'127.0.0.1'; +drop role r1; diff --git a/mysql-test/suite/rpl/r/create_or_replace2.result b/mysql-test/suite/rpl/r/create_or_replace2.result new file mode 100644 index 00000000000..b96a0f8ae13 --- /dev/null +++ b/mysql-test/suite/rpl/r/create_or_replace2.result @@ -0,0 +1,31 @@ +include/master-slave.inc +[connection master] +# +# MDEV-6525 ; Problems with CREATE OR REPLACE under lock +# +CREATE TABLE t1 (a INT) ENGINE=InnoDB; +CREATE FUNCTION f1() RETURNS INT RETURN ( SELECT MAX(a) FROM t1 ); +connect con1,localhost,root,,test; +CREATE TEMPORARY TABLE tmp (b INT) ENGINE=InnoDB; +LOCK TABLE t1 WRITE; +SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED; +CREATE OR REPLACE TABLE t1 LIKE tmp; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `b` int(11) DEFAULT NULL +) ENGINE=InnoDB DEFAULT CHARSET=latin1 +connection default; +set session lock_wait_timeout=1; +SELECT f1(); +ERROR HY000: Lock wait timeout exceeded; try restarting transaction +set session lock_wait_timeout=@@global.lock_wait_timeout; +SELECT f1(); +connection con1; +unlock tables; +connection default; +ERROR 42S22: Unknown column 'a' in 'field list' +disconnect con1; +drop function f1; +drop table t1; +include/rpl_end.inc diff --git a/mysql-test/suite/rpl/r/failed_create_view-6409.result b/mysql-test/suite/rpl/r/failed_create_view-6409.result new file mode 100644 index 00000000000..6b04f2960d1 --- /dev/null +++ b/mysql-test/suite/rpl/r/failed_create_view-6409.result @@ -0,0 +1,15 @@ +create table v1 (a int); +include/master-slave.inc +[connection master] +create table t1 (a int); +create view v1 as select * from t1; +ERROR 42S01: Table 'v1' already exists +show tables; +Tables_in_test +t1 +v1 +show tables; +Tables_in_test +t1 +drop table if exists t1, v1; +include/rpl_end.inc diff --git a/mysql-test/suite/rpl/r/kill_hard-6290.result b/mysql-test/suite/rpl/r/kill_hard-6290.result new file mode 100644 index 00000000000..27b62416368 --- /dev/null +++ b/mysql-test/suite/rpl/r/kill_hard-6290.result @@ -0,0 +1,4 @@ +include/master-slave.inc +[connection master] +kill user test2@nohost; +include/rpl_end.inc diff --git a/mysql-test/suite/rpl/r/rpl_checksum.result b/mysql-test/suite/rpl/r/rpl_checksum.result index 2229ab63ee4..d88258f3b65 100644 --- a/mysql-test/suite/rpl/r/rpl_checksum.result +++ b/mysql-test/suite/rpl/r/rpl_checksum.result @@ -128,7 +128,33 @@ insert into t3 value (1, @@global.binlog_checksum); drop table t1, t2, t3; set @@global.binlog_checksum = @master_save_binlog_checksum; set @@global.master_verify_checksum = @save_master_verify_checksum; +*** Bug#59123 / MDEV-5799: INCIDENT_EVENT checksum written to error log as garbage characters *** +CREATE TABLE t4 (a INT PRIMARY KEY); +INSERT INTO t4 VALUES (1); +SET sql_log_bin=0; +CALL mtr.add_suppression("\\[ERROR\\] Can't generate a unique log-filename"); +SET sql_log_bin=1; +SET @old_dbug= @@GLOBAL.debug_dbug; +SET debug_dbug= '+d,binlog_inject_new_name_error'; +FLUSH LOGS; +ERROR HY000: Can't generate a unique log-filename master-bin.(1-999) + +SET debug_dbug= @old_dbug; +INSERT INTO t4 VALUES (2); +include/wait_for_slave_sql_error.inc [errno=1590] +Last_SQL_Error = 'The incident LOST_EVENTS occured on the master. Message: error writing to the binary log' +SELECT * FROM t4 ORDER BY a; +a +1 +STOP SLAVE IO_THREAD; +SET sql_slave_skip_counter= 1; +include/start_slave.inc +SELECT * FROM t4 ORDER BY a; +a +1 +2 set @@global.binlog_checksum = @slave_save_binlog_checksum; set @@global.slave_sql_verify_checksum = @save_slave_sql_verify_checksum; End of tests +DROP TABLE t4; include/rpl_end.inc diff --git a/mysql-test/suite/rpl/r/rpl_gtid_basic.result b/mysql-test/suite/rpl/r/rpl_gtid_basic.result index fd33221814d..e8e5bf36f84 100644 --- a/mysql-test/suite/rpl/r/rpl_gtid_basic.result +++ b/mysql-test/suite/rpl/r/rpl_gtid_basic.result @@ -61,13 +61,7 @@ include/stop_slave.inc INSERT INTO t1 VALUES (5, "m1a"); INSERT INTO t2 VALUES (5, "i1a"); CHANGE MASTER TO master_host = '127.0.0.1', master_port = MASTER_PORT, -MASTER_USE_GTID=SLAVE_POS; -SET GLOBAL sql_slave_skip_counter=1; -ERROR HY000: When using GTID, @@sql_slave_skip_counter can not be used. Instead, setting @@gtid_slave_pos explicitly can be used to skip to after a given GTID position. -CHANGE MASTER TO master_host = '127.0.0.1', master_port = MASTER_PORT, MASTER_USE_GTID=CURRENT_POS; -SET GLOBAL sql_slave_skip_counter=10; -ERROR HY000: When using GTID, @@sql_slave_skip_counter can not be used. Instead, setting @@gtid_slave_pos explicitly can be used to skip to after a given GTID position. include/start_slave.inc SELECT * FROM t1 ORDER BY a; a b @@ -191,14 +185,8 @@ master-bin.000001 # include/show_binlog_events.inc Log_name Pos Event_type Server_id End_log_pos Info master-bin.000001 # Format_desc # # SERVER_VERSION, BINLOG_VERSION -master-bin.000001 # Gtid_list # # [1-2-20,0-1-10,0-3-30] +master-bin.000001 # Gtid_list # # [#-#-#] master-bin.000001 # Binlog_checkpoint # # master-bin.000001 -SELECT @@GLOBAL.gtid_binlog_pos; -@@GLOBAL.gtid_binlog_pos -1-2-20,0-3-30 -SELECT @@GLOBAL.gtid_binlog_state; -@@GLOBAL.gtid_binlog_state -1-2-20,0-1-10,0-3-30 SET GLOBAL gtid_binlog_state = @old_state; ERROR HY000: This operation is not allowed if any GTID has been logged to the binary log. Run RESET MASTER first to erase the log RESET MASTER; @@ -258,7 +246,7 @@ a include/stop_slave.inc SET gtid_domain_id= 1; INSERT INTO t1 VALUES (3); -SET @pos= '1-1-1,0-1-110'; +SET @pos= 'POS'; SELECT master_gtid_wait(@pos, 0); master_gtid_wait(@pos, 0) -1 @@ -322,5 +310,108 @@ master_gtid_wait('2-1-10') 0 master_gtid_wait('2-1-10') 0 +*** Test sql_gtid_slave_pos when used with GTID *** +include/stop_slave.inc +SET gtid_domain_id=2; +SET gtid_seq_no=1000; +INSERT INTO t1 VALUES (10); +INSERT INTO t1 VALUES (11); +SET sql_slave_skip_counter= 1; +include/start_slave.inc +SELECT * FROM t1 WHERE a >= 10 ORDER BY a; +a +11 +SELECT IF(LOCATE("2-1-1001", @@GLOBAL.gtid_slave_pos)>0, "Ok", CONCAT("ERROR! expected GTID 2-1-1001 not found in gtid_slave_pos: ", @@GLOBAL.gtid_slave_pos)) AS status; +status +Ok +include/stop_slave.inc +SET gtid_domain_id=2; +SET gtid_seq_no=1010; +INSERT INTO t1 VALUES (12); +INSERT INTO t1 VALUES (13); +SET sql_slave_skip_counter= 2; +include/start_slave.inc +SELECT * FROM t1 WHERE a >= 10 ORDER BY a; +a +11 +13 +SELECT IF(LOCATE("2-1-1011", @@GLOBAL.gtid_slave_pos)>0, "Ok", CONCAT("ERROR! expected GTID 2-1-1011 not found in gtid_slave_pos: ", @@GLOBAL.gtid_slave_pos)) AS status; +status +Ok +include/stop_slave.inc +SET gtid_domain_id=2; +SET gtid_seq_no=1020; +INSERT INTO t1 VALUES (14); +INSERT INTO t1 VALUES (15); +INSERT INTO t1 VALUES (16); +SET sql_slave_skip_counter= 3; +include/start_slave.inc +SELECT * FROM t1 WHERE a >= 10 ORDER BY a; +a +11 +13 +15 +16 +SELECT IF(LOCATE("2-1-1022", @@GLOBAL.gtid_slave_pos)>0, "Ok", CONCAT("ERROR! expected GTID 2-1-1022 not found in gtid_slave_pos: ", @@GLOBAL.gtid_slave_pos)) AS status; +status +Ok +include/stop_slave.inc +SET gtid_domain_id=2; +SET gtid_seq_no=1030; +INSERT INTO t1 VALUES (17); +INSERT INTO t1 VALUES (18); +INSERT INTO t1 VALUES (19); +SET sql_slave_skip_counter= 5; +include/start_slave.inc +SELECT * FROM t1 WHERE a >= 10 ORDER BY a; +a +11 +13 +15 +16 +19 +SELECT IF(LOCATE("2-1-1032", @@GLOBAL.gtid_slave_pos)>0, "Ok", CONCAT("ERROR! expected GTID 2-1-1032 not found in gtid_slave_pos: ", @@GLOBAL.gtid_slave_pos)) AS status; +status +Ok +include/stop_slave.inc +SET gtid_domain_id=3; +SET gtid_seq_no=100; +CREATE TABLE t2 (a INT PRIMARY KEY); +DROP TABLE t2; +SET gtid_domain_id=2; +SET gtid_seq_no=1040; +INSERT INTO t1 VALUES (20); +SET @saved_mode= @@GLOBAL.slave_ddl_exec_mode; +SET GLOBAL slave_ddl_exec_mode=STRICT; +SET sql_slave_skip_counter=1; +START SLAVE UNTIL master_gtid_pos="3-1-100"; +include/sync_with_master_gtid.inc +include/wait_for_slave_sql_to_stop.inc +SELECT * FROM t2; +ERROR 42S02: Table 'test.t2' doesn't exist +SELECT IF(LOCATE("3-1-100", @@GLOBAL.gtid_slave_pos)>0, "Ok", CONCAT("ERROR! expected GTID 3-1-100 not found in gtid_slave_pos: ", @@GLOBAL.gtid_slave_pos)) AS status; +status +Ok +SET sql_log_bin=0; +CALL mtr.add_suppression("Slave: Unknown table 'test\\.t2' Error_code: 1051"); +SET sql_log_bin=1; +START SLAVE; +include/wait_for_slave_sql_error.inc [errno=1051] +SELECT IF(LOCATE("3-1-100", @@GLOBAL.gtid_slave_pos)>0, "Ok", CONCAT("ERROR! expected GTID 3-1-100 not found in gtid_slave_pos: ", @@GLOBAL.gtid_slave_pos)) AS status; +status +Ok +STOP SLAVE IO_THREAD; +SET sql_slave_skip_counter=2; +include/start_slave.inc +SELECT * FROM t1 WHERE a >= 20 ORDER BY a; +a +20 +SELECT IF(LOCATE("3-1-101", @@GLOBAL.gtid_slave_pos)>0, "Ok", CONCAT("ERROR! expected GTID 3-1-101 not found in gtid_slave_pos: ", @@GLOBAL.gtid_slave_pos)) AS status; +status +Ok +SELECT IF(LOCATE("2-1-1040", @@GLOBAL.gtid_slave_pos)>0, "Ok", CONCAT("ERROR! expected GTID 2-1-1040 not found in gtid_slave_pos: ", @@GLOBAL.gtid_slave_pos)) AS status; +status +Ok +SET GLOBAL slave_ddl_exec_mode= @saved_mode; DROP TABLE t1; include/rpl_end.inc diff --git a/mysql-test/suite/rpl/r/rpl_gtid_crash.result b/mysql-test/suite/rpl/r/rpl_gtid_crash.result index fdbd1cc6898..debd107221f 100644 --- a/mysql-test/suite/rpl/r/rpl_gtid_crash.result +++ b/mysql-test/suite/rpl/r/rpl_gtid_crash.result @@ -46,7 +46,7 @@ master-bin.000002 # master-bin.000003 # SHOW BINLOG EVENTS IN 'master-bin.000003' LIMIT 1,1; Log_name Pos Event_type Server_id End_log_pos Info -master-bin.000003 # Gtid_list # # [1-1-2,2-1-1,0-1-1] +master-bin.000003 # Gtid_list # # # SET SESSION debug_dbug="+d,crash_dispatch_command_before"; SELECT 1; Got one of the listed errors @@ -58,7 +58,7 @@ master-bin.000003 # master-bin.000004 # SHOW BINLOG EVENTS IN 'master-bin.000004' LIMIT 1,1; Log_name Pos Event_type Server_id End_log_pos Info -master-bin.000004 # Gtid_list # # [1-1-2,0-1-1,2-1-1] +master-bin.000004 # Gtid_list # # # SELECT * FROM t1 ORDER BY a; a 1 @@ -112,5 +112,98 @@ SHOW VARIABLES like 'gtid_strict_mode'; Variable_name Value gtid_strict_mode ON include/start_slave.inc +*** MDEV-6462: Incorrect recovery on a slave reconnecting to crashed master *** +set sql_log_bin= 0; +call mtr.add_suppression("Error writing file 'master-bin'"); +set sql_log_bin= 1; +set sql_log_bin= 0; +call mtr.add_suppression("The server_id of master server changed in the middle of GTID"); +call mtr.add_suppression("Unexpected change of master binlog file name in the middle of GTID"); +set sql_log_bin= 1; +SET GLOBAL debug_dbug="+d,inject_error_writing_xid"; +BEGIN; +INSERT INTO t1 VALUES (11); +COMMIT; +ERROR HY000: Error writing file 'master-bin' (errno: 11 "Resource temporarily unavailable") +SET GLOBAL debug_dbug="+d,crash_dispatch_command_before"; +COMMIT; +Got one of the listed errors +SELECT @@GLOBAL.server_id; +@@GLOBAL.server_id +3 +SELECT * from t1 WHERE a > 10 ORDER BY a; +a +# Wait 30 seconds for SQL thread to catch up with IO thread +SELECT * from t1 WHERE a > 10 ORDER BY a; +a +# Repeat this with additional transactions on the master +SET GLOBAL debug_dbug="+d,inject_error_writing_xid"; +BEGIN; +INSERT INTO t1 VALUES (12); +COMMIT; +ERROR HY000: Error writing file 'master-bin' (errno: 11 "Resource temporarily unavailable") +SET GLOBAL debug_dbug="+d,crash_dispatch_command_before"; +COMMIT; +Got one of the listed errors +SELECT @@GLOBAL.server_id; +@@GLOBAL.server_id +1 +INSERT INTO t1 VALUES (13); +INSERT INTO t1 VALUES (14); +SELECT * from t1 WHERE a > 10 ORDER BY a; +a +13 +14 +include/save_master_gtid.inc +include/sync_with_master_gtid.inc +SELECT * from t1 WHERE a > 10 ORDER BY a; +a +13 +14 +SET GLOBAL debug_dbug="+d,inject_error_writing_xid"; +BEGIN; +INSERT INTO t1 VALUES (21); +COMMIT; +ERROR HY000: Error writing file 'master-bin' (errno: 11 "Resource temporarily unavailable") +SET GLOBAL debug_dbug="+d,crash_dispatch_command_before"; +COMMIT; +Got one of the listed errors +SELECT @@GLOBAL.server_id; +@@GLOBAL.server_id +1 +SELECT * from t1 WHERE a > 10 ORDER BY a; +a +13 +14 +# Wait 30 seconds for SQL thread to catch up with IO thread +SELECT * from t1 WHERE a > 10 ORDER BY a; +a +13 +14 +# Repeat this with additional transactions on the master +SET GLOBAL debug_dbug="+d,inject_error_writing_xid"; +BEGIN; +INSERT INTO t1 VALUES (22); +COMMIT; +ERROR HY000: Error writing file 'master-bin' (errno: 11 "Resource temporarily unavailable") +SET GLOBAL debug_dbug="+d,crash_dispatch_command_before"; +COMMIT; +Got one of the listed errors +INSERT INTO t1 VALUES (23); +INSERT INTO t1 VALUES (24); +SELECT * from t1 WHERE a > 10 ORDER BY a; +a +13 +14 +23 +24 +include/save_master_gtid.inc +include/sync_with_master_gtid.inc +SELECT * from t1 WHERE a > 10 ORDER BY a; +a +13 +14 +23 +24 DROP TABLE t1; include/rpl_end.inc diff --git a/mysql-test/suite/rpl/r/rpl_gtid_errorlog.result b/mysql-test/suite/rpl/r/rpl_gtid_errorlog.result new file mode 100644 index 00000000000..204615201d9 --- /dev/null +++ b/mysql-test/suite/rpl/r/rpl_gtid_errorlog.result @@ -0,0 +1,42 @@ +include/master-slave.inc +[connection master] +*** Test MDEV-6120, output of current GTID when a replication error is logged to the errorlog *** +CREATE TABLE t1(a INT PRIMARY KEY); +include/stop_slave.inc +CHANGE MASTER TO master_use_gtid=slave_pos; +INSERT INTO t1 VALUES (1); +SET gtid_seq_no=100; +INSERT INTO t1 VALUES (2); +INSERT INTO t1 VALUES (3); +INSERT INTO t1 VALUES (4); +SET sql_log_bin=0; +INSERT INTO t1 VALUES (2); +SET sql_log_bin=1; +START SLAVE; +include/wait_for_slave_sql_error.inc [errno=1062] +include/stop_slave.inc +SET GLOBAL gtid_slave_pos= "0-1-100"; +include/start_slave.inc +SELECT * FROM t1 ORDER BY a; +a +1 +2 +3 +4 +SET @dbug_save= @@debug_dbug; +SET debug_dbug= '+d,incident_database_resync_on_replace'; +REPLACE INTO t1 VALUES (5); +SET debug_dbug= @dbug_save; +include/wait_for_slave_sql_error.inc [errno=1590] +include/stop_slave.inc +SET sql_slave_skip_counter=1; +include/start_slave.inc +SELECT * FROM t1 ORDER BY a; +a +1 +2 +3 +4 +5 +DROP TABLE t1; +include/rpl_end.inc diff --git a/mysql-test/suite/rpl/r/rpl_gtid_master_promote.result b/mysql-test/suite/rpl/r/rpl_gtid_master_promote.result index 132c01f5f55..599c35164dd 100644 --- a/mysql-test/suite/rpl/r/rpl_gtid_master_promote.result +++ b/mysql-test/suite/rpl/r/rpl_gtid_master_promote.result @@ -119,7 +119,7 @@ a b 2 3 *** Now replicate all extra changes from 3,4,5 to 2, in preparation for making 2 the new master. *** CHANGE MASTER TO master_host = '127.0.0.1', master_port = SERVER_MYPORT_3; -START SLAVE UNTIL master_gtid_pos = "1-1-1,0-1-3,3-1-7,2-1-4"; +START SLAVE UNTIL master_gtid_pos = "SERVER3_POS"; include/wait_for_slave_to_stop.inc SELECT * FROM t1 ORDER BY a; a @@ -142,7 +142,7 @@ a b 3 1 3 3 CHANGE MASTER TO master_host = '127.0.0.1', master_port = SERVER_MYPORT_4; -START SLAVE UNTIL master_gtid_pos = "1-1-7,0-1-3,3-1-4,2-1-1"; +START SLAVE UNTIL master_gtid_pos = "SERVER4_POS"; include/wait_for_slave_to_stop.inc SELECT * FROM t1 ORDER BY a; a @@ -168,7 +168,7 @@ a b 3 1 3 3 CHANGE MASTER TO master_host = '127.0.0.1', master_port = SERVER_MYPORT_5; -START SLAVE UNTIL master_gtid_pos = "1-1-4,0-1-3,3-1-1,2-1-7"; +START SLAVE UNTIL master_gtid_pos = "SERVER5_POS"; include/wait_for_slave_to_stop.inc SELECT * FROM t1 ORDER BY a; a diff --git a/mysql-test/suite/rpl/r/rpl_gtid_stop_start.result b/mysql-test/suite/rpl/r/rpl_gtid_stop_start.result index ddcbaf8dffd..60c8e4666b9 100644 --- a/mysql-test/suite/rpl/r/rpl_gtid_stop_start.result +++ b/mysql-test/suite/rpl/r/rpl_gtid_stop_start.result @@ -34,10 +34,10 @@ master-bin.000003 # Gtid_list # # [0-1-3] FLUSH LOGS; SHOW BINLOG EVENTS IN 'master-bin.000004' LIMIT 1,1; Log_name Pos Event_type Server_id End_log_pos Info -master-bin.000004 # Gtid_list # # [1-1-1,0-1-4] +master-bin.000004 # Gtid_list # # # SHOW BINLOG EVENTS IN 'master-bin.000005' LIMIT 1,1; Log_name Pos Event_type Server_id End_log_pos Info -master-bin.000005 # Gtid_list # # [1-1-1,0-1-4] +master-bin.000005 # Gtid_list # # # show binary logs; Log_name File_size master-bin.000002 # diff --git a/mysql-test/suite/rpl/r/rpl_gtid_until.result b/mysql-test/suite/rpl/r/rpl_gtid_until.result index 53100af4c71..d86b5cac78c 100644 --- a/mysql-test/suite/rpl/r/rpl_gtid_until.result +++ b/mysql-test/suite/rpl/r/rpl_gtid_until.result @@ -168,7 +168,7 @@ a include/stop_slave.inc CREATE TABLE t3 (a INT); DROP TABLE t3; -START SLAVE UNTIL master_gtid_pos='1-1-5,2-1-5,0-1-6'; +START SLAVE UNTIL master_gtid_pos='UNTIL_CONDITION'; include/wait_for_slave_to_stop.inc SHOW CREATE TABLE t3; Table Create Table diff --git a/mysql-test/suite/rpl/r/rpl_heartbeat_debug.result b/mysql-test/suite/rpl/r/rpl_heartbeat_debug.result new file mode 100644 index 00000000000..b9dec686e4a --- /dev/null +++ b/mysql-test/suite/rpl/r/rpl_heartbeat_debug.result @@ -0,0 +1,25 @@ +include/master-slave.inc +[connection master] +include/stop_slave.inc +set @restore_slave_net_timeout= @@global.slave_net_timeout; +set @@global.slave_net_timeout= 10; +show status like 'Slave_heartbeat_period';; +Variable_name Slave_heartbeat_period +Value 60.000 +SET @save_dbug= @@GLOBAL.debug_dbug; +SET GLOBAL debug_dbug="+d,simulate_slave_heartbeat_network_error"; +CALL mtr.add_suppression('SET @master_heartbeat_period to master failed with error'); +CALL mtr.add_suppression('Master command COM_REGISTER_SLAVE failed: failed registering on master, reconnecting to try again'); +include/start_slave.inc +drop table if exists t1; +CREATE TABLE t1 (a INT PRIMARY KEY); +INSERT INTO t1 VALUES (1); +SELECT * FROM t1; +a +1 +drop table t1; +include/stop_slave.inc +SET GLOBAL debug_dbug=@save_dbug; +set @@global.slave_net_timeout= @restore_slave_net_timeout; +include/start_slave.inc +include/rpl_end.inc diff --git a/mysql-test/suite/rpl/r/rpl_known_bugs_detection.result b/mysql-test/suite/rpl/r/rpl_known_bugs_detection.result index 26c6d96e786..ea738b710fd 100644 --- a/mysql-test/suite/rpl/r/rpl_known_bugs_detection.result +++ b/mysql-test/suite/rpl/r/rpl_known_bugs_detection.result @@ -5,8 +5,6 @@ call mtr.add_suppression("Unsafe statement written to the binary log using state CREATE TABLE t1 (a INT NOT NULL PRIMARY KEY AUTO_INCREMENT, b INT, UNIQUE(b)); INSERT INTO t1(b) VALUES(1),(1),(2) ON DUPLICATE KEY UPDATE t1.b=10; -Warnings: -Note 1592 Unsafe statement written to the binary log using statement format since BINLOG_FORMAT = STATEMENT. INSERT... ON DUPLICATE KEY UPDATE on a table with more than one UNIQUE KEY is unsafe SELECT * FROM t1; a b 1 10 diff --git a/mysql-test/suite/rpl/r/rpl_loaddata.result b/mysql-test/suite/rpl/r/rpl_loaddata.result index 310c88cc419..eaf3fb6a947 100644 --- a/mysql-test/suite/rpl/r/rpl_loaddata.result +++ b/mysql-test/suite/rpl/r/rpl_loaddata.result @@ -4,12 +4,12 @@ select last_insert_id(); last_insert_id() 0 create table t1(a int not null auto_increment, b int, primary key(a) ); -load data infile '../../std_data/rpl_loaddata.dat' into table t1; +load data infile '../../std_data/rpl_loaddata.dat' into table t1; select last_insert_id(); last_insert_id() 1 create temporary table t2 (day date,id int(9),category enum('a','b','c'),name varchar(60)); -load data infile '../../std_data/rpl_loaddata2.dat' into table t2 fields terminated by ',' optionally enclosed by '%' escaped by '@' lines terminated by '\n##\n' starting by '>' ignore 1 lines; +load data infile '../../std_data/rpl_loaddata2.dat' into table t2 fields terminated by ',' optionally enclosed by '%' escaped by '@' lines terminated by '\n##\n' starting by '>' ignore 1 lines; create table t3 (day date,id int(9),category enum('a','b','c'),name varchar(60)); insert into t3 select * from t2; select * from t1; @@ -26,7 +26,7 @@ drop table t2; drop table t3; create table t1(a int, b int, unique(b)); insert into t1 values(1,10); -load data infile '../../std_data/rpl_loaddata.dat' into table t1; +load data infile '../../std_data/rpl_loaddata.dat' into table t1; call mtr.add_suppression("Slave SQL.*Error .Duplicate entry .10. for key .b.. on query.* error.* 1062"); call mtr.add_suppression("Slave SQL.*Query caused different errors on master and slave.*Error on master:.*error code=1062.*Error on slave:.*error.* 0"); include/wait_for_slave_sql_error_and_skip.inc [errno=1062] @@ -34,7 +34,7 @@ include/check_slave_no_error.inc set sql_log_bin=0; delete from t1; set sql_log_bin=1; -load data infile '../../std_data/rpl_loaddata.dat' into table t1; +load data infile '../../std_data/rpl_loaddata.dat' into table t1; include/wait_for_slave_sql_error.inc [errno=1062] include/stop_slave_io.inc change master to master_user='test'; @@ -45,7 +45,7 @@ start slave; set sql_log_bin=0; delete from t1; set sql_log_bin=1; -load data infile '../../std_data/rpl_loaddata.dat' into table t1; +load data infile '../../std_data/rpl_loaddata.dat' into table t1; include/wait_for_slave_sql_error.inc [errno=1062] stop slave; reset slave; @@ -53,7 +53,7 @@ include/check_slave_no_error.inc reset master; create table t2 (day date,id int(9),category enum('a','b','c'),name varchar(60), unique(day)) engine=MyISAM; -load data infile '../../std_data/rpl_loaddata2.dat' into table t2 fields +load data infile '../../std_data/rpl_loaddata2.dat' into table t2 fields terminated by ',' optionally enclosed by '%' escaped by '@' lines terminated by '\n##\n' starting by '>' ignore 1 lines; ERROR 23000: Duplicate entry '2003-03-22' for key 'day' @@ -68,7 +68,7 @@ day id category name 2003-03-22 2161 c asdf alter table t2 drop key day; delete from t2; -load data infile '../../std_data/rpl_loaddata2.dat' into table t2 fields +load data infile '../../std_data/rpl_loaddata2.dat' into table t2 fields terminated by ',' optionally enclosed by '%' escaped by '@' lines terminated by '\n##\n' starting by '>' ignore 1 lines; ERROR 23000: Duplicate entry '2003-03-22' for key 'day' @@ -76,7 +76,7 @@ include/wait_for_slave_sql_error.inc [errno=0] drop table t1, t2; drop table t1, t2; CREATE TABLE t1 (word CHAR(20) NOT NULL PRIMARY KEY) ENGINE=INNODB; -LOAD DATA INFILE "../../std_data/words.dat" INTO TABLE t1; +LOAD DATA INFILE "../../std_data/words.dat" INTO TABLE t1; ERROR 23000: Duplicate entry 'Aarhus' for key 'PRIMARY' DROP TABLE t1; include/rpl_reset.inc @@ -88,16 +88,16 @@ use b48297_db1; CREATE TABLE t1 (c1 VARCHAR(256)) engine=MyISAM;; use b42897_db2; ### assertion: works with cross-referenced database -LOAD DATA LOCAL INFILE 'MYSQLTEST_VARDIR/std_data/loaddata5.dat' INTO TABLE b48297_db1.t1; +LOAD DATA LOCAL INFILE 'MYSQLTEST_VARDIR/std_data/loaddata5.dat' INTO TABLE b48297_db1.t1; use b48297_db1; ### assertion: works with fully qualified name on current database -LOAD DATA LOCAL INFILE 'MYSQLTEST_VARDIR/std_data/loaddata5.dat' INTO TABLE b48297_db1.t1; +LOAD DATA LOCAL INFILE 'MYSQLTEST_VARDIR/std_data/loaddata5.dat' INTO TABLE b48297_db1.t1; ### assertion: works without fully qualified name on current database -LOAD DATA LOCAL INFILE 'MYSQLTEST_VARDIR/std_data/loaddata5.dat' INTO TABLE t1; +LOAD DATA LOCAL INFILE 'MYSQLTEST_VARDIR/std_data/loaddata5.dat' INTO TABLE t1; ### create connection without default database ### connect (conn2,localhost,root,,*NO-ONE*); ### assertion: works without stating the default database -LOAD DATA LOCAL INFILE 'MYSQLTEST_VARDIR/std_data/loaddata5.dat' INTO TABLE b48297_db1.t1; +LOAD DATA LOCAL INFILE 'MYSQLTEST_VARDIR/std_data/loaddata5.dat' INTO TABLE b48297_db1.t1; ### disconnect and switch back to master connection use b48297_db1; include/diff_tables.inc [master:b48297_db1.t1, slave:b48297_db1.t1] diff --git a/mysql-test/suite/rpl/r/rpl_mdev6020.result b/mysql-test/suite/rpl/r/rpl_mdev6020.result new file mode 100644 index 00000000000..0855f578cfc --- /dev/null +++ b/mysql-test/suite/rpl/r/rpl_mdev6020.result @@ -0,0 +1,49 @@ +include/master-slave.inc +[connection master] +include/stop_slave.inc +include/rpl_stop_server.inc [server_number=1] +include/rpl_start_server.inc [server_number=1] +SET SQL_LOG_BIN=0; +ALTER TABLE mysql.gtid_slave_pos ENGINE = InnoDB; +SET SQL_LOG_BIN=1; +SET @old_engine= @@GLOBAL.default_storage_engine; +SET GLOBAL default_storage_engine=InnoDB; +SET @old_parallel= @@GLOBAL.slave_parallel_threads; +SET GLOBAL slave_parallel_threads=12; +CHANGE MASTER TO master_host='127.0.0.1', master_port=SERVER_MYPORT_1, master_user='root', master_log_file='master-bin.000001', master_log_pos=4; +include/start_slave.inc +SET SQL_LOG_BIN=0; +ALTER TABLE mysql.gtid_slave_pos ENGINE = InnoDB; +SET SQL_LOG_BIN=1; +SELECT @@gtid_slave_pos; +@@gtid_slave_pos +0-1-1381 +CHECKSUM TABLE table0_int_autoinc, table0_key_pk_parts_2_int_autoinc, table100_int_autoinc, table100_key_pk_parts_2_int_autoinc, table10_int_autoinc, table10_key_pk_parts_2_int_autoinc, table1_int_autoinc, table1_key_pk_parts_2_int_autoinc, table2_int_autoinc, table2_key_pk_parts_2_int_autoinc; +Table Checksum +test.table0_int_autoinc 3623174395 +test.table0_key_pk_parts_2_int_autoinc 2888328157 +test.table100_int_autoinc 3624823809 +test.table100_key_pk_parts_2_int_autoinc 3316583308 +test.table10_int_autoinc 1615053718 +test.table10_key_pk_parts_2_int_autoinc 4147461080 +test.table1_int_autoinc 478809705 +test.table1_key_pk_parts_2_int_autoinc 3032208641 +test.table2_int_autoinc 854763867 +test.table2_key_pk_parts_2_int_autoinc 4231615291 +include/stop_slave.inc +SET GLOBAL default_storage_engine= @old_engine; +SET GLOBAL slave_parallel_threads=@old_parallel; +SET sql_log_bin=0; +DROP TABLE table0_int_autoinc; +DROP TABLE table0_key_pk_parts_2_int_autoinc; +DROP TABLE table100_int_autoinc; +DROP TABLE table100_key_pk_parts_2_int_autoinc; +DROP TABLE table10_int_autoinc; +DROP TABLE table10_key_pk_parts_2_int_autoinc; +DROP TABLE table1_int_autoinc; +DROP TABLE table1_key_pk_parts_2_int_autoinc; +DROP TABLE table2_int_autoinc; +DROP TABLE table2_key_pk_parts_2_int_autoinc; +SET sql_log_bin=1; +include/start_slave.inc +include/rpl_end.inc diff --git a/mysql-test/suite/rpl/r/rpl_mdev6386.result b/mysql-test/suite/rpl/r/rpl_mdev6386.result new file mode 100644 index 00000000000..352b9d07fef --- /dev/null +++ b/mysql-test/suite/rpl/r/rpl_mdev6386.result @@ -0,0 +1,56 @@ +include/master-slave.inc +[connection master] +ALTER TABLE mysql.gtid_slave_pos ENGINE = InnoDB; +FLUSH LOGS; +CREATE TABLE t1 (a INT PRIMARY KEY, b INT) Engine=InnoDB; +include/stop_slave.inc +SET sql_log_bin= 0; +INSERT INTO t1 VALUES (1, 2); +SET sql_log_bin= 1; +CHANGE MASTER TO master_use_gtid= current_pos; +Contents on slave before: +SELECT * FROM t1 ORDER BY a; +a b +1 2 +SET @old_parallel= @@GLOBAL.slave_parallel_threads; +SET GLOBAL slave_parallel_threads=8; +CREATE TEMPORARY TABLE t2 LIKE t1; +INSERT INTO t2 VALUE (1, 1); +INSERT INTO t2 VALUE (2, 1); +INSERT INTO t2 VALUE (3, 1); +INSERT INTO t2 VALUE (4, 1); +INSERT INTO t2 VALUE (5, 1); +INSERT INTO t1 SELECT * FROM t2; +DROP TEMPORARY TABLE t2; +Contents on master: +SELECT * FROM t1 ORDER BY a; +a b +1 1 +2 1 +3 1 +4 1 +5 1 +START SLAVE; +include/wait_for_slave_sql_error.inc [errno=1062] +STOP SLAVE IO_THREAD; +Contents on slave on slave error: +SELECT * FROM t1 ORDER BY a; +a b +1 2 +SET sql_log_bin= 0; +DELETE FROM t1 WHERE a=1; +SET sql_log_bin= 1; +include/start_slave.inc +Contents on slave after: +SELECT * FROM t1 ORDER BY a; +a b +1 1 +2 1 +3 1 +4 1 +5 1 +DROP TABLE t1; +include/stop_slave.inc +SET GLOBAL slave_parallel_threads= @old_parallel; +include/start_slave.inc +include/rpl_end.inc diff --git a/mysql-test/suite/rpl/r/rpl_parallel.result b/mysql-test/suite/rpl/r/rpl_parallel.result index 20b75cbbdab..fb86d46b01e 100644 --- a/mysql-test/suite/rpl/r/rpl_parallel.result +++ b/mysql-test/suite/rpl/r/rpl_parallel.result @@ -314,7 +314,7 @@ SET debug_sync='now WAIT_FOR t1_ready'; KILL THD_ID; SET debug_sync='now WAIT_FOR t2_killed'; SET debug_sync='now SIGNAL t1_cont'; -include/wait_for_slave_sql_error.inc [errno=1317,1964] +include/wait_for_slave_sql_error.inc [errno=1317,1927,1964] STOP SLAVE IO_THREAD; SELECT * FROM t3 WHERE a >= 30 ORDER BY a; a b @@ -398,7 +398,7 @@ SET debug_sync='now WAIT_FOR t1_ready'; KILL THD_ID; SET debug_sync='now WAIT_FOR t2_killed'; SET debug_sync='now SIGNAL t1_cont'; -include/wait_for_slave_sql_error.inc [errno=1317,1964] +include/wait_for_slave_sql_error.inc [errno=1317,1927,1964] SET debug_sync='RESET'; SET GLOBAL slave_parallel_threads=0; SET GLOBAL slave_parallel_threads=10; @@ -481,7 +481,7 @@ SET debug_sync='now WAIT_FOR t1_ready'; KILL THD_ID; SET debug_sync='now WAIT_FOR t2_killed'; SET debug_sync='now SIGNAL t1_cont'; -include/wait_for_slave_sql_error.inc [errno=1317,1964] +include/wait_for_slave_sql_error.inc [errno=1317,1927,1964] SELECT * FROM t3 WHERE a >= 50 ORDER BY a; a b 51 51 @@ -793,6 +793,7 @@ SET debug_sync='now WAIT_FOR master_queued2'; SET debug_sync='now SIGNAL master_cont1'; SET debug_sync='RESET'; include/start_slave.inc +include/stop_slave.inc SELECT * FROM t4 ORDER BY a; a b 1 NULL @@ -801,6 +802,42 @@ a b 5 NULL 6 6 7 NULL +DELETE FROM t4; +INSERT INTO t4 VALUES (1,NULL), (2,2), (3,NULL), (4,4), (5, NULL), (6, 6); +SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued1 WAIT_FOR master_cont1'; +UPDATE t4 SET b=NULL WHERE a=6; +SET debug_sync='now WAIT_FOR master_queued1'; +SET @old_format= @@SESSION.binlog_format; +SET binlog_format='statement'; +SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued2'; +DELETE FROM t4 WHERE b <= 1; +SET debug_sync='now WAIT_FOR master_queued2'; +SET debug_sync='now SIGNAL master_cont1'; +SET @old_format=@@GLOBAL.binlog_format; +SET debug_sync='RESET'; +SET @old_dbug= @@GLOBAL.debug_dbug; +SET GLOBAL debug_dbug="+d,disable_thd_need_ordering_with"; +include/start_slave.inc +SET GLOBAL debug_dbug=@old_dbug; +SELECT * FROM t4 ORDER BY a; +a b +1 NULL +2 2 +3 NULL +4 4 +5 NULL +6 NULL +SET @last_gtid= 'GTID'; +SELECT IF(@@gtid_slave_pos LIKE CONCAT('%',@last_gtid,'%'), "GTID found ok", +CONCAT("GTID ", @last_gtid, " not found in gtid_slave_pos=", @@gtid_slave_pos)) +AS result; +result +GTID found ok +SELECT "ROW FOUND" AS `Is the row found?` + FROM mysql.gtid_slave_pos +WHERE CONCAT(domain_id, "-", server_id, "-", seq_no) = @last_gtid; +Is the row found? +ROW FOUND *** MDEV-5938: Exec_master_log_pos not updated at log rotate in parallel replication *** include/stop_slave.inc SET GLOBAL slave_parallel_threads=1; @@ -819,11 +856,78 @@ test_check OK test_check OK +*** MDEV_6435: Incorrect error handling when query binlogged partially on master with "killed" error *** +CREATE TABLE t6 (a INT) ENGINE=MyISAM; +CREATE TRIGGER tr AFTER INSERT ON t6 FOR EACH ROW SET @a = 1; +SET @old_format= @@binlog_format; +SET binlog_format= statement; +SET debug_sync='sp_head_execute_before_loop SIGNAL ready WAIT_FOR cont'; +INSERT INTO t6 VALUES (1), (2), (3); +SET debug_sync='now WAIT_FOR ready'; +KILL QUERY CONID; +SET debug_sync='now SIGNAL cont'; +ERROR 70100: Query execution was interrupted +SET binlog_format= @old_format; +SET debug_sync='RESET'; +SET debug_sync='RESET'; +include/wait_for_slave_sql_error.inc [errno=1317] +STOP SLAVE IO_THREAD; +SET GLOBAL gtid_slave_pos= 'AFTER_ERROR_GTID_POS'; +include/start_slave.inc +INSERT INTO t6 VALUES (4); +SELECT * FROM t6 ORDER BY a; +a +1 +4 +SELECT * FROM t6 ORDER BY a; +a +4 +*** MDEV-6551: Some replication errors are ignored if slave_parallel_threads > 0 *** +INSERT INTO t2 VALUES (31); +include/save_master_gtid.inc +include/sync_with_master_gtid.inc +include/stop_slave.inc +SET GLOBAL slave_parallel_threads= 0; +include/start_slave.inc +SET sql_log_bin= 0; +INSERT INTO t2 VALUES (32); +SET sql_log_bin= 1; +INSERT INTO t2 VALUES (32); +FLUSH LOGS; +INSERT INTO t2 VALUES (33); +INSERT INTO t2 VALUES (34); +SELECT * FROM t2 WHERE a >= 30 ORDER BY a; +a +31 +32 +33 +34 +include/save_master_gtid.inc +include/wait_for_slave_sql_error.inc [errno=1062] +include/stop_slave_io.inc +SET GLOBAL slave_parallel_threads=10; +START SLAVE; +include/wait_for_slave_sql_error.inc [errno=1062] +START SLAVE SQL_THREAD; +include/wait_for_slave_sql_error.inc [errno=1062] +SELECT * FROM t2 WHERE a >= 30 ORDER BY a; +a +31 +32 +SET sql_slave_skip_counter= 1; +include/start_slave.inc +include/sync_with_master_gtid.inc +SELECT * FROM t2 WHERE a >= 30 ORDER BY a; +a +31 +32 +33 +34 include/stop_slave.inc SET GLOBAL slave_parallel_threads=@old_parallel_threads; include/start_slave.inc SET DEBUG_SYNC= 'RESET'; DROP function foo; -DROP TABLE t1,t2,t3,t4,t5; +DROP TABLE t1,t2,t3,t4,t5,t6; SET DEBUG_SYNC= 'RESET'; include/rpl_end.inc diff --git a/mysql-test/suite/rpl/r/rpl_parallel_retry.result b/mysql-test/suite/rpl/r/rpl_parallel_retry.result new file mode 100644 index 00000000000..cd12d92430b --- /dev/null +++ b/mysql-test/suite/rpl/r/rpl_parallel_retry.result @@ -0,0 +1,196 @@ +include/rpl_init.inc [topology=1->2] +*** Test retry of transactions that fail to replicate due to deadlock or similar temporary error. *** +ALTER TABLE mysql.gtid_slave_pos ENGINE=InnoDB; +CREATE TABLE t1 (a int PRIMARY KEY, b INT) ENGINE=InnoDB; +INSERT INTO t1 VALUES (1,1); +SET sql_log_bin=0; +CREATE FUNCTION foo(x INT, d1 VARCHAR(500), d2 VARCHAR(500)) +RETURNS INT DETERMINISTIC +BEGIN +RETURN x; +END +|| +SET sql_log_bin=1; +SET @old_parallel_threads=@@GLOBAL.slave_parallel_threads; +include/stop_slave.inc +SET GLOBAL slave_parallel_threads=5; +include/start_slave.inc +SET sql_log_bin=0; +CREATE FUNCTION foo(x INT, d1 VARCHAR(500), d2 VARCHAR(500)) +RETURNS INT DETERMINISTIC +BEGIN +IF d1 != '' THEN +SET debug_sync = d1; +END IF; +IF d2 != '' THEN +SET debug_sync = d2; +END IF; +RETURN x; +END +|| +SET sql_log_bin=1; +include/stop_slave.inc +SET gtid_seq_no = 100; +BEGIN; +INSERT INTO t1 VALUES (2,1); +UPDATE t1 SET b=b+1 WHERE a=1; +INSERT INTO t1 VALUES (3,1); +COMMIT; +SELECT * FROM t1 ORDER BY a; +a b +1 2 +2 1 +3 1 +SET @old_dbug= @@GLOBAL.debug_dbug; +SET GLOBAL debug_dbug="+d,rpl_parallel_simulate_temp_err_gtid_0_x_100"; +include/start_slave.inc +SET GLOBAL debug_dbug=@old_dbug; +retries +1 +SELECT * FROM t1 ORDER BY a; +a b +1 2 +2 1 +3 1 +*** Test that double retry works when the first retry also fails with temp error *** +include/stop_slave.inc +SET gtid_seq_no = 100; +SET @old_server_id = @@server_id; +SET server_id = 10; +BEGIN; +INSERT INTO t1 VALUES (4,1); +UPDATE t1 SET b=b+1 WHERE a=1; +INSERT INTO t1 VALUES (5,1); +INSERT INTO t1 VALUES (6,1); +COMMIT; +SET server_id = @old_server_id; +SELECT * FROM t1 ORDER BY a; +a b +1 3 +2 1 +3 1 +4 1 +5 1 +6 1 +SET @old_dbug= @@GLOBAL.debug_dbug; +SET GLOBAL debug_dbug="+d,rpl_parallel_simulate_temp_err_gtid_0_x_100,rpl_parallel_simulate_double_temp_err_gtid_0_x_100"; +include/start_slave.inc +SET GLOBAL debug_dbug=@old_dbug; +retries +2 +SELECT * FROM t1 ORDER BY a; +a b +1 3 +2 1 +3 1 +4 1 +5 1 +6 1 +*** Test too many retries, eventually causing failure. *** +include/stop_slave.inc +SET gtid_seq_no = 100; +SET @old_server_id = @@server_id; +SET server_id = 11; +BEGIN; +INSERT INTO t1 VALUES (7,1); +UPDATE t1 SET b=b+1 WHERE a=1; +INSERT INTO t1 VALUES (8,1); +INSERT INTO t1 VALUES (9,1); +COMMIT; +SET server_id = @old_server_id; +SELECT * FROM t1 ORDER BY a; +a b +1 4 +2 1 +3 1 +4 1 +5 1 +6 1 +7 1 +8 1 +9 1 +SET sql_log_bin=0; +CALL mtr.add_suppression("Slave worker thread retried transaction 10 time\\(s\\) in vain, giving up"); +CALL mtr.add_suppression("Slave: Deadlock found when trying to get lock; try restarting transaction"); +SET sql_log_bin=1; +SET @old_dbug= @@GLOBAL.debug_dbug; +SET GLOBAL debug_dbug="+d,rpl_parallel_simulate_temp_err_gtid_0_x_100,rpl_parallel_simulate_infinite_temp_err_gtid_0_x_100"; +START SLAVE; +include/wait_for_slave_sql_error.inc [errno=1213] +SET GLOBAL debug_dbug=@old_dbug; +retries +10 +SELECT * FROM t1 ORDER BY a; +a b +1 3 +2 1 +3 1 +4 1 +5 1 +6 1 +STOP SLAVE IO_THREAD; +include/start_slave.inc +SELECT * FROM t1 ORDER BY a; +a b +1 4 +2 1 +3 1 +4 1 +5 1 +6 1 +7 1 +8 1 +9 1 +*** Test retry of event group that spans multiple relay log files. *** +CREATE TABLE t2 (a int PRIMARY KEY, b BLOB) ENGINE=InnoDB; +INSERT INTO t2 VALUES (1,"Hulubullu"); +include/stop_slave.inc +SET @old_max= @@GLOBAL.max_relay_log_size; +SET GLOBAL max_relay_log_size=4096; +SET gtid_seq_no = 100; +SET @old_server_id = @@server_id; +SET server_id = 12; +BEGIN; +INSERT INTO t1 VALUES (10, 4); +COMMIT; +SET server_id = @old_server_id; +SELECT * FROM t1 WHERE a >= 10 ORDER BY a; +a b +10 4 +SELECT a, LENGTH(b) FROM t2 ORDER BY a; +a LENGTH(b) +1 9 +2 5006 +3 5012 +SET @old_dbug= @@GLOBAL.debug_dbug; +SET GLOBAL debug_dbug="+d,rpl_parallel_simulate_temp_err_gtid_0_x_100"; +include/start_slave.inc +SET GLOBAL debug_dbug=@old_dbug; +retries +1 +SELECT * FROM t1 WHERE a >= 10 ORDER BY a; +a b +10 4 +SELECT a, LENGTH(b) FROM t2 ORDER BY a; +a LENGTH(b) +1 9 +2 5006 +3 5012 +INSERT INTO t1 VALUES (11,11); +SELECT * FROM t1 WHERE a >= 10 ORDER BY a; +a b +10 4 +11 11 +SELECT a, LENGTH(b) FROM t2 ORDER BY a; +a LENGTH(b) +1 9 +2 5006 +3 5012 +4 5000 +SET GLOBAL max_relay_log_size=@old_max; +include/stop_slave.inc +SET GLOBAL slave_parallel_threads=@old_parallel_threads; +include/start_slave.inc +DROP TABLE t1, t2; +DROP function foo; +include/rpl_end.inc diff --git a/mysql-test/suite/rpl/r/rpl_parallel_temptable.result b/mysql-test/suite/rpl/r/rpl_parallel_temptable.result new file mode 100644 index 00000000000..61eba2cab2f --- /dev/null +++ b/mysql-test/suite/rpl/r/rpl_parallel_temptable.result @@ -0,0 +1,123 @@ +include/rpl_init.inc [topology=1->2] +*** MDEV-6321: close_temporary_tables() in format description event not serialised correctly *** +SET @old_parallel_threads=@@GLOBAL.slave_parallel_threads; +include/stop_slave.inc +SET GLOBAL slave_parallel_threads=5; +CHANGE MASTER TO master_use_gtid= current_pos; +include/start_slave.inc +CREATE TABLE t1 (a INT PRIMARY KEY, b VARCHAR(100) CHARACTER SET utf8); +include/stop_slave.inc +SET gtid_domain_id= 1; +INSERT INTO t1 VALUES (1, 0); +CREATE TEMPORARY TABLE t2 (a int); +SET gtid_domain_id= 2; +CREATE TEMPORARY TABLE t3 (a INT PRIMARY KEY); +CREATE TEMPORARY TABLE t4 (a int); +INSERT INTO t3 VALUES (100); +INSERT INTO t4 SELECT a+1 FROM t3; +INSERT INTO t2 VALUES (2), (4), (6), (8), (10), (12), (14), (16), (18), (20); +INSERT INTO t2 VALUES (3), (6), (9), (12), (15), (18); +INSERT INTO t2 VALUES (4), (8), (12), (16), (20); +INSERT INTO t3 SELECT a+2 FROM t4; +INSERT INTO t4 SELECT a+4 FROM t3; +INSERT INTO t2 VALUES (5), (10), (15), (20); +INSERT INTO t2 VALUES (6), (12), (18); +INSERT INTO t2 VALUES (7), (14); +INSERT INTO t2 VALUES (8), (16); +INSERT INTO t2 VALUES (9), (18); +INSERT INTO t2 VALUES (10), (20); +INSERT INTO t3 SELECT a+8 FROM t4; +INSERT INTO t4 SELECT a+16 FROM t3; +INSERT INTO t2 VALUES (11); +INSERT INTO t2 VALUES (12); +INSERT INTO t2 VALUES (13); +INSERT INTO t3 SELECT a+32 FROM t4; +INSERT INTO t2 VALUES (14); +INSERT INTO t2 VALUES (15); +INSERT INTO t2 VALUES (16); +INSERT INTO t4 SELECT a+64 FROM t3; +INSERT INTO t2 VALUES (17); +INSERT INTO t2 VALUES (18); +INSERT INTO t2 VALUES (19); +INSERT INTO t3 SELECT a+128 FROM t4; +INSERT INTO t2 VALUES (20); +INSERT INTO t1 SELECT a, a MOD 7 FROM t3; +INSERT INTO t1 SELECT a, a MOD 7 FROM t4; +INSERT INTO t1 SELECT a, COUNT(*) FROM t2 GROUP BY a; +FLUSH TABLES; +SET SESSION debug_dbug="+d,crash_dispatch_command_before"; +SELECT 1; +Got one of the listed errors +INSERT INTO t1 VALUES (0, 1); +include/start_slave.inc +SELECT * FROM t1 WHERE a <= 20 ORDER BY a; +a b +0 1 +1 0 +2 1 +3 1 +4 2 +5 1 +6 3 +7 1 +8 3 +9 2 +10 3 +11 1 +12 5 +13 1 +14 3 +15 3 +16 4 +17 1 +18 5 +19 1 +20 5 +SELECT COUNT(*) FROM t1 WHERE a BETWEEN 100+0 AND 100+256; +COUNT(*) +55 +SHOW STATUS LIKE 'Slave_open_temp_tables'; +Variable_name Value +Slave_open_temp_tables 0 +*** Test that if master logged partial event group before crash, we finish that group correctly before executing format description event *** +include/stop_slave.inc +CALL mtr.add_suppression("Statement accesses nontransactional table as well as transactional or temporary table, and writes to any of them"); +SET gtid_domain_id= 1; +DELETE FROM t1; +ALTER TABLE t1 ENGINE=InnoDB; +CREATE TEMPORARY TABLE t2 (a INT PRIMARY KEY); +INSERT INTO t2 VALUES (1); +INSERT INTO t2 VALUES (2); +SET gtid_domain_id= 2; +CREATE TEMPORARY TABLE t3 (a INT PRIMARY KEY); +INSERT INTO t3 VALUES (10); +INSERT INTO t3 VALUES (20); +INSERT INTO t1 SELECT a, 'server_1' FROM t2; +INSERT INTO t1 SELECT a, 'default' FROM t3; +INSERT INTO t1 SELECT a+2, '+server_1' FROM t2; +FLUSH TABLES; +SET SESSION debug_dbug="+d,crash_before_writing_xid"; +INSERT INTO t1 SELECT a+4, '++server_1' FROM t2; +Got one of the listed errors +INSERT INTO t1 VALUES (0, 1); +include/save_master_gtid.inc +include/start_slave.inc +include/sync_with_master_gtid.inc +SELECT * FROM t1 ORDER BY a; +a b +0 1 +1 server_1 +2 server_1 +3 +server_1 +4 +server_1 +10 default +20 default +SHOW STATUS LIKE 'Slave_open_temp_tables'; +Variable_name Value +Slave_open_temp_tables 0 +FLUSH LOGS; +include/stop_slave.inc +SET GLOBAL slave_parallel_threads=@old_parallel_threads; +include/start_slave.inc +DROP TABLE t1; +include/rpl_end.inc diff --git a/mysql-test/suite/rpl/r/rpl_read_new_relay_log_info.result b/mysql-test/suite/rpl/r/rpl_read_new_relay_log_info.result new file mode 100644 index 00000000000..e659c3ee283 --- /dev/null +++ b/mysql-test/suite/rpl/r/rpl_read_new_relay_log_info.result @@ -0,0 +1,14 @@ +include/master-slave.inc +[connection master] +CREATE TABLE t1 (a INT); +INSERT INTO t1 VALUES (1); +DROP TABLE t1; +==== Check that we can understand the new format of relay-log.info ==== +include/stop_slave.inc +RESET SLAVE; +# Read relay-log.info +START SLAVE IO_THREAD; +include/wait_for_slave_io_to_start.inc +# Check that relay log coordinates are equal to those saved in new-format_relay-log.info += , 0, slave-relay-bin.000001, 4 +include/rpl_end.inc diff --git a/mysql-test/suite/rpl/r/rpl_read_old_relay_log_info.result b/mysql-test/suite/rpl/r/rpl_read_old_relay_log_info.result new file mode 100644 index 00000000000..7a9d3b795d8 --- /dev/null +++ b/mysql-test/suite/rpl/r/rpl_read_old_relay_log_info.result @@ -0,0 +1,14 @@ +include/master-slave.inc +[connection master] +CREATE TABLE t1 (a INT); +INSERT INTO t1 VALUES (1); +DROP TABLE t1; +==== Check that we still understand the old format of relay-log.info ==== +include/stop_slave.inc +RESET SLAVE; +# Read relay-log.info +START SLAVE IO_THREAD; +include/wait_for_slave_io_to_start.inc +# Check that relay log coordinates are equal to those we saved in old-format_relay-log.info += , 0, slave-relay-bin.000001, 4 +include/rpl_end.inc diff --git a/mysql-test/suite/rpl/r/rpl_semi_sync_uninstall_plugin.result b/mysql-test/suite/rpl/r/rpl_semi_sync_uninstall_plugin.result new file mode 100644 index 00000000000..0809af5f943 --- /dev/null +++ b/mysql-test/suite/rpl/r/rpl_semi_sync_uninstall_plugin.result @@ -0,0 +1,63 @@ +include/master-slave.inc +[connection master] +call mtr.add_suppression("Read semi-sync reply network error"); +call mtr.add_suppression("Timeout waiting for reply of binlog"); +INSTALL PLUGIN rpl_semi_sync_master SONAME 'semisync_master'; +[connection slave] +INSTALL PLUGIN rpl_semi_sync_slave SONAME 'semisync_slave'; +UNINSTALL PLUGIN rpl_semi_sync_slave; +[connection master] +UNINSTALL PLUGIN rpl_semi_sync_master; +CREATE TABLE t1(i int); +INSERT INTO t1 values (1); +DROP TABLE t1; +[connection slave] +include/install_semisync.inc +[connection slave] +UNINSTALL PLUGIN rpl_semi_sync_slave; +Warnings: +Warning 1620 Plugin is busy and will be uninstalled on shutdown +select plugin_name,plugin_status from information_schema.plugins where plugin_name like 'rpl_%'; +plugin_name plugin_status +rpl_semi_sync_slave DELETED +[connection master] +UNINSTALL PLUGIN rpl_semi_sync_master; +Warnings: +Warning 1620 Plugin is busy and will be uninstalled on shutdown +select plugin_name,plugin_status from information_schema.plugins where plugin_name like 'rpl_%'; +plugin_name plugin_status +rpl_semi_sync_master DELETED +CREATE TABLE t1(i int); +INSERT INTO t1 values (2); +DROP TABLE t1; +[connection slave] +show status like "Rpl_semi_sync_slave_status"; +Variable_name Value +Rpl_semi_sync_slave_status ON +[connection master] +show status like "Rpl_semi_sync_master_status"; +Variable_name Value +Rpl_semi_sync_master_status ON +show status like "Rpl_semi_sync_master_clients"; +Variable_name Value +Rpl_semi_sync_master_clients 1 +select plugin_name,plugin_status from information_schema.plugins where plugin_name like 'rpl_%'; +plugin_name plugin_status +rpl_semi_sync_master DELETED +[connection slave] +include/stop_slave.inc +select plugin_name,plugin_status from information_schema.plugins where plugin_name like 'rpl_%'; +plugin_name plugin_status +[connection master] +create table t2 (a int); +drop table t2; +[connection slave] +include/start_slave.inc +select plugin_name,plugin_status from information_schema.plugins where plugin_name like 'rpl_%'; +plugin_name plugin_status +[connection master] +CREATE TABLE t1(i int); +INSERT INTO t1 values (3); +DROP TABLE t1; +[connection slave] +include/rpl_end.inc diff --git a/mysql-test/suite/rpl/r/rpl_skip_incident.result b/mysql-test/suite/rpl/r/rpl_skip_incident.result new file mode 100644 index 00000000000..1dc0508af32 --- /dev/null +++ b/mysql-test/suite/rpl/r/rpl_skip_incident.result @@ -0,0 +1,25 @@ +include/master-slave.inc +[connection master] +**** On Master **** +CREATE TABLE t1 (a INT); +INSERT INTO t1 VALUES (1),(2),(3); +SELECT * FROM t1; +a +1 +2 +3 +REPLACE INTO t1 VALUES (4); +SELECT * FROM t1; +a +1 +2 +3 +4 +include/check_slave_is_running.inc +Should have two binary logs here +show binary logs; +Log_name File_size +master-bin.000001 # +master-bin.000002 # +DROP TABLE t1; +include/rpl_end.inc diff --git a/mysql-test/suite/rpl/r/rpl_stop_slave.result b/mysql-test/suite/rpl/r/rpl_stop_slave.result index 5959ee09993..b93ecce3597 100644 --- a/mysql-test/suite/rpl/r/rpl_stop_slave.result +++ b/mysql-test/suite/rpl/r/rpl_stop_slave.result @@ -94,10 +94,12 @@ DROP TABLE t1, t2; CREATE TABLE t1 (c1 INT KEY, c2 INT) ENGINE=InnoDB; CREATE TABLE t2 (c1 INT) ENGINE=MyISAM; INSERT INTO t1 VALUES(1, 1); +include/stop_slave.inc [connection master] +include/stop_dump_threads.inc SET GLOBAL debug_dbug= '+d,dump_thread_wait_before_send_xid,*'; [connection slave] -include/restart_slave.inc +include/start_slave.inc BEGIN; UPDATE t1 SET c2 = 2 WHERE c1 = 1; [connection master] @@ -116,6 +118,9 @@ SET DEBUG_SYNC= 'now WAIT_FOR signal.continued'; [connection slave] include/wait_for_slave_to_stop.inc [connection slave1] +[connection master] +include/stop_dump_threads.inc +[connection slave1] include/start_slave.inc [connection master] DROP TABLE t1, t2; diff --git a/mysql-test/suite/rpl/r/rpl_table_options.result b/mysql-test/suite/rpl/r/rpl_table_options.result index d69570a1709..a94d6e9bc2f 100644 --- a/mysql-test/suite/rpl/r/rpl_table_options.result +++ b/mysql-test/suite/rpl/r/rpl_table_options.result @@ -12,6 +12,12 @@ show create table t1; Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) NOT NULL +) ENGINE=MyISAM DEFAULT CHARSET=latin1 /* `ull`=12340 */ +set sql_mode=ignore_bad_table_options; +show create table t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) NOT NULL ) ENGINE=MyISAM DEFAULT CHARSET=latin1 `ull`=12340 drop table t1; set storage_engine=default; diff --git a/mysql-test/suite/rpl/t/create_or_replace2.test b/mysql-test/suite/rpl/t/create_or_replace2.test new file mode 100644 index 00000000000..79c92a7ec5b --- /dev/null +++ b/mysql-test/suite/rpl/t/create_or_replace2.test @@ -0,0 +1,44 @@ +--source include/have_innodb.inc +--source include/have_binlog_format_row_or_statement.inc +--source include/have_metadata_lock_info.inc +--source include/master-slave.inc +--enable_connect_log + +--echo # +--echo # MDEV-6525 ; Problems with CREATE OR REPLACE under lock +--echo # + +CREATE TABLE t1 (a INT) ENGINE=InnoDB; +CREATE FUNCTION f1() RETURNS INT RETURN ( SELECT MAX(a) FROM t1 ); + +--connect (con1,localhost,root,,test) + +CREATE TEMPORARY TABLE tmp (b INT) ENGINE=InnoDB; +LOCK TABLE t1 WRITE; + +SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED; + +CREATE OR REPLACE TABLE t1 LIKE tmp; +SHOW CREATE TABLE t1; + +--connection default +set session lock_wait_timeout=1; +--error 1205 +SELECT f1(); + +set session lock_wait_timeout=@@global.lock_wait_timeout; +--send SELECT f1() +--connection con1 +# This is here just in case, any timeout should be ok +--sleep 1 +unlock tables; +--connection default +--error 1054 +--reap +--disconnect con1 + +# Cleanup +drop function f1; +drop table t1; +--disable_connect_log +--source include/rpl_end.inc diff --git a/mysql-test/suite/rpl/t/failed_create_view-6409.test b/mysql-test/suite/rpl/t/failed_create_view-6409.test new file mode 100644 index 00000000000..5d96e6f8a93 --- /dev/null +++ b/mysql-test/suite/rpl/t/failed_create_view-6409.test @@ -0,0 +1,24 @@ +# +# MDEV-6409 CREATE VIEW replication problem if error occurs in mysql_register_view +# + +# +# +# verify that failed CREATE VIEW is not replicated + +create table v1 (a int); + +source include/master-slave.inc; + +connection master; +create table t1 (a int); +--error ER_TABLE_EXISTS_ERROR +create view v1 as select * from t1; +show tables; +sync_slave_with_master; +show tables; + +connection master; +drop table if exists t1, v1; + +--source include/rpl_end.inc diff --git a/mysql-test/suite/rpl/t/kill_hard-6290.test b/mysql-test/suite/rpl/t/kill_hard-6290.test new file mode 100644 index 00000000000..7624235666a --- /dev/null +++ b/mysql-test/suite/rpl/t/kill_hard-6290.test @@ -0,0 +1,11 @@ +# +# MDEV-6290 Crash in KILL HARD QUERY USER x@y when slave threads are running +# + +# this test doesn't depend on the binlog format, no need to run it three times +--source include/have_binlog_format_mixed.inc + +--source include/master-slave.inc +--connection server_2 +kill user test2@nohost; +--source include/rpl_end.inc diff --git a/mysql-test/suite/rpl/t/rpl_checksum.test b/mysql-test/suite/rpl/t/rpl_checksum.test index 0f0b84aa632..bd0ab7ecc9c 100644 --- a/mysql-test/suite/rpl/t/rpl_checksum.test +++ b/mysql-test/suite/rpl/t/rpl_checksum.test @@ -260,9 +260,67 @@ AAAAAAAAAAAAAAAAAAAx+apMEzgNAAgAEgAEBAQEEgAAVAAEGggAAAAICAgCAA== #connection slave; sync_slave_with_master; + + +--echo *** Bug#59123 / MDEV-5799: INCIDENT_EVENT checksum written to error log as garbage characters *** + +--connection master + +--source include/wait_for_binlog_checkpoint.inc +CREATE TABLE t4 (a INT PRIMARY KEY); +INSERT INTO t4 VALUES (1); + +SET sql_log_bin=0; +CALL mtr.add_suppression("\\[ERROR\\] Can't generate a unique log-filename"); +SET sql_log_bin=1; +SET @old_dbug= @@GLOBAL.debug_dbug; +SET debug_dbug= '+d,binlog_inject_new_name_error'; +--error ER_NO_UNIQUE_LOGFILE +FLUSH LOGS; +SET debug_dbug= @old_dbug; + +INSERT INTO t4 VALUES (2); + +--connection slave +--let $slave_sql_errno= 1590 +--source include/wait_for_slave_sql_error.inc + +# Search the error log for the error message. +# The bug was that 4 garbage bytes were output in the middle of the error +# message; by searching for a pattern that spans that location, we can +# catch the error. +let $log_error_= `SELECT @@GLOBAL.log_error`; +if(!$log_error_) +{ + # MySQL Server on windows is started with --console and thus + # does not know the location of its .err log, use default location + let $log_error_ = $MYSQLTEST_VARDIR/log/mysqld.2.err; +} +--let SEARCH_FILE= $log_error_ +--let SEARCH_RANGE=-50000 +--let SEARCH_PATTERN= Slave SQL: The incident LOST_EVENTS occured on the master\. Message: error writing to the binary log, Internal MariaDB error code: 1590 +--source include/search_pattern_in_file.inc + +SELECT * FROM t4 ORDER BY a; +STOP SLAVE IO_THREAD; +SET sql_slave_skip_counter= 1; +--source include/start_slave.inc + +--connection master +--save_master_pos + +--connection slave +--sync_with_master +SELECT * FROM t4 ORDER BY a; + + +--connection slave set @@global.binlog_checksum = @slave_save_binlog_checksum; set @@global.slave_sql_verify_checksum = @save_slave_sql_verify_checksum; --echo End of tests +--connection master +DROP TABLE t4; + --source include/rpl_end.inc diff --git a/mysql-test/suite/rpl/t/rpl_gtid_basic.test b/mysql-test/suite/rpl/t/rpl_gtid_basic.test index 3f2d5e1e321..5ecff519aef 100644 --- a/mysql-test/suite/rpl/t/rpl_gtid_basic.test +++ b/mysql-test/suite/rpl/t/rpl_gtid_basic.test @@ -69,15 +69,7 @@ save_master_pos; connection server_4; --replace_result $MASTER_MYPORT MASTER_PORT eval CHANGE MASTER TO master_host = '127.0.0.1', master_port = $MASTER_MYPORT, - MASTER_USE_GTID=SLAVE_POS; -# Test that sql_slave_skip_counter is prevented in GTID mode. ---error ER_SLAVE_SKIP_NOT_IN_GTID -SET GLOBAL sql_slave_skip_counter=1; ---replace_result $MASTER_MYPORT MASTER_PORT -eval CHANGE MASTER TO master_host = '127.0.0.1', master_port = $MASTER_MYPORT, MASTER_USE_GTID=CURRENT_POS; ---error ER_SLAVE_SKIP_NOT_IN_GTID -SET GLOBAL sql_slave_skip_counter=10; --source include/start_slave.inc sync_with_master; SELECT * FROM t1 ORDER BY a; @@ -184,8 +176,8 @@ SET GLOBAL gtid_binlog_state = '0-1-10,1-2-20,0-3-30'; --let $binlog_file= master-bin.000001 --let $binlog_start= 4 --source include/show_binlog_events.inc -SELECT @@GLOBAL.gtid_binlog_pos; -SELECT @@GLOBAL.gtid_binlog_state; +#SELECT @@GLOBAL.gtid_binlog_pos; +#SELECT @@GLOBAL.gtid_binlog_state; --error ER_BINLOG_MUST_BE_EMPTY SET GLOBAL gtid_binlog_state = @old_state; RESET MASTER; @@ -262,6 +254,7 @@ INSERT INTO t1 VALUES (3); --let $pos= `SELECT @@gtid_binlog_pos` --connection s1 +--replace_result $pos POS eval SET @pos= '$pos'; SELECT master_gtid_wait(@pos, 0); SELECT * FROM t1 WHERE a >= 3; @@ -374,6 +367,120 @@ reap; reap; +--echo *** Test sql_gtid_slave_pos when used with GTID *** + +--connection server_2 +--source include/stop_slave.inc + +--connection server_1 +SET gtid_domain_id=2; +SET gtid_seq_no=1000; +INSERT INTO t1 VALUES (10); +INSERT INTO t1 VALUES (11); +--save_master_pos + +--connection server_2 +SET sql_slave_skip_counter= 1; +--source include/start_slave.inc +--sync_with_master +SELECT * FROM t1 WHERE a >= 10 ORDER BY a; +SELECT IF(LOCATE("2-1-1001", @@GLOBAL.gtid_slave_pos)>0, "Ok", CONCAT("ERROR! expected GTID 2-1-1001 not found in gtid_slave_pos: ", @@GLOBAL.gtid_slave_pos)) AS status; + +--source include/stop_slave.inc + +--connection server_1 +SET gtid_domain_id=2; +SET gtid_seq_no=1010; +INSERT INTO t1 VALUES (12); +INSERT INTO t1 VALUES (13); +--save_master_pos + +--connection server_2 +SET sql_slave_skip_counter= 2; +--source include/start_slave.inc +--sync_with_master +SELECT * FROM t1 WHERE a >= 10 ORDER BY a; +SELECT IF(LOCATE("2-1-1011", @@GLOBAL.gtid_slave_pos)>0, "Ok", CONCAT("ERROR! expected GTID 2-1-1011 not found in gtid_slave_pos: ", @@GLOBAL.gtid_slave_pos)) AS status; + +--source include/stop_slave.inc + +--connection server_1 +SET gtid_domain_id=2; +SET gtid_seq_no=1020; +INSERT INTO t1 VALUES (14); +INSERT INTO t1 VALUES (15); +INSERT INTO t1 VALUES (16); +--save_master_pos + +--connection server_2 +SET sql_slave_skip_counter= 3; +--source include/start_slave.inc +--sync_with_master +SELECT * FROM t1 WHERE a >= 10 ORDER BY a; +SELECT IF(LOCATE("2-1-1022", @@GLOBAL.gtid_slave_pos)>0, "Ok", CONCAT("ERROR! expected GTID 2-1-1022 not found in gtid_slave_pos: ", @@GLOBAL.gtid_slave_pos)) AS status; + +--source include/stop_slave.inc + +--connection server_1 +SET gtid_domain_id=2; +SET gtid_seq_no=1030; +INSERT INTO t1 VALUES (17); +INSERT INTO t1 VALUES (18); +INSERT INTO t1 VALUES (19); +--save_master_pos + +--connection server_2 +SET sql_slave_skip_counter= 5; +--source include/start_slave.inc +--sync_with_master +SELECT * FROM t1 WHERE a >= 10 ORDER BY a; +SELECT IF(LOCATE("2-1-1032", @@GLOBAL.gtid_slave_pos)>0, "Ok", CONCAT("ERROR! expected GTID 2-1-1032 not found in gtid_slave_pos: ", @@GLOBAL.gtid_slave_pos)) AS status; + + +--source include/stop_slave.inc + +--connection server_1 +SET gtid_domain_id=3; +SET gtid_seq_no=100; +CREATE TABLE t2 (a INT PRIMARY KEY); +DROP TABLE t2; +SET gtid_domain_id=2; +SET gtid_seq_no=1040; +INSERT INTO t1 VALUES (20); +--save_master_pos + +--connection server_2 +SET @saved_mode= @@GLOBAL.slave_ddl_exec_mode; +SET GLOBAL slave_ddl_exec_mode=STRICT; +SET sql_slave_skip_counter=1; +START SLAVE UNTIL master_gtid_pos="3-1-100"; +--let $master_pos=3-1-100 +--source include/sync_with_master_gtid.inc +--source include/wait_for_slave_sql_to_stop.inc +--error ER_NO_SUCH_TABLE +SELECT * FROM t2; +SELECT IF(LOCATE("3-1-100", @@GLOBAL.gtid_slave_pos)>0, "Ok", CONCAT("ERROR! expected GTID 3-1-100 not found in gtid_slave_pos: ", @@GLOBAL.gtid_slave_pos)) AS status; + +# Start the slave again, it should fail on the DROP TABLE as the table is not there. +SET sql_log_bin=0; +CALL mtr.add_suppression("Slave: Unknown table 'test\\.t2' Error_code: 1051"); +SET sql_log_bin=1; +START SLAVE; +--let $slave_sql_errno=1051 +--source include/wait_for_slave_sql_error.inc +SELECT IF(LOCATE("3-1-100", @@GLOBAL.gtid_slave_pos)>0, "Ok", CONCAT("ERROR! expected GTID 3-1-100 not found in gtid_slave_pos: ", @@GLOBAL.gtid_slave_pos)) AS status; + +STOP SLAVE IO_THREAD; +SET sql_slave_skip_counter=2; +--source include/start_slave.inc +--sync_with_master + +SELECT * FROM t1 WHERE a >= 20 ORDER BY a; +SELECT IF(LOCATE("3-1-101", @@GLOBAL.gtid_slave_pos)>0, "Ok", CONCAT("ERROR! expected GTID 3-1-101 not found in gtid_slave_pos: ", @@GLOBAL.gtid_slave_pos)) AS status; +SELECT IF(LOCATE("2-1-1040", @@GLOBAL.gtid_slave_pos)>0, "Ok", CONCAT("ERROR! expected GTID 2-1-1040 not found in gtid_slave_pos: ", @@GLOBAL.gtid_slave_pos)) AS status; + +SET GLOBAL slave_ddl_exec_mode= @saved_mode; + --connection server_1 DROP TABLE t1; diff --git a/mysql-test/suite/rpl/t/rpl_gtid_crash.test b/mysql-test/suite/rpl/t/rpl_gtid_crash.test index e02816e1d30..0caad2a12fe 100644 --- a/mysql-test/suite/rpl/t/rpl_gtid_crash.test +++ b/mysql-test/suite/rpl/t/rpl_gtid_crash.test @@ -100,7 +100,7 @@ SET gtid_domain_id= 2; INSERT INTO t1 VALUES (3); FLUSH LOGS; --source include/show_binary_logs.inc ---replace_column 2 # 4 # 5 # +--replace_column 2 # 4 # 5 # 6 # SHOW BINLOG EVENTS IN 'master-bin.000003' LIMIT 1,1; --write_file $MYSQLTEST_VARDIR/tmp/mysqld.1.expect @@ -120,7 +120,7 @@ EOF --source include/wait_until_connected_again.inc --source include/show_binary_logs.inc ---replace_column 2 # 4 # 5 # +--replace_column 2 # 4 # 5 # 6 # SHOW BINLOG EVENTS IN 'master-bin.000004' LIMIT 1,1; --save_master_pos @@ -294,6 +294,192 @@ SHOW VARIABLES like 'gtid_strict_mode'; eval SET GLOBAL gtid_strict_mode= $old_gtid_strict; --enable_query_log + +--echo *** MDEV-6462: Incorrect recovery on a slave reconnecting to crashed master *** + +--connection server_1 +set sql_log_bin= 0; +call mtr.add_suppression("Error writing file 'master-bin'"); +set sql_log_bin= 1; +--connection server_2 +set sql_log_bin= 0; +call mtr.add_suppression("The server_id of master server changed in the middle of GTID"); +call mtr.add_suppression("Unexpected change of master binlog file name in the middle of GTID"); +set sql_log_bin= 1; + +--connection server_1 +--write_file $MYSQLTEST_VARDIR/tmp/mysqld.1.expect +wait +EOF + +SET GLOBAL debug_dbug="+d,inject_error_writing_xid"; +BEGIN; +INSERT INTO t1 VALUES (11); +--error ER_ERROR_ON_WRITE +COMMIT; +SET GLOBAL debug_dbug="+d,crash_dispatch_command_before"; +--error 2006,2013 +COMMIT; + +--source include/wait_until_disconnected.inc + +# Simulate that we reconnect to a different server (new server_id). +--append_file $MYSQLTEST_VARDIR/tmp/mysqld.1.expect +restart: --server-id=3 +EOF + +--enable_reconnect +--source include/wait_until_connected_again.inc + +SELECT @@GLOBAL.server_id; +SELECT * from t1 WHERE a > 10 ORDER BY a; + +--echo # Wait 30 seconds for SQL thread to catch up with IO thread +--connection server_2 +--let $wait_timeout= 300 +while ($wait_timeout != 0) +{ + --let $read_log_pos= query_get_value('SHOW SLAVE STATUS', Read_Master_Log_Pos, 1) + --let $exec_log_pos= query_get_value('SHOW SLAVE STATUS', Exec_Master_Log_Pos, 1) + if ($read_log_pos == $exec_log_pos) + { + --let $wait_timeout= 0 + } + if ($read_log_pos != $exec_log_pos) + { + --sleep 0.1 + --dec $wait_timeout + } +} +if ($read_log_pos != $exec_log_pos) +{ + --die Timeout wait for SQL thread to catch up with IO thread +} + +SELECT * from t1 WHERE a > 10 ORDER BY a; + +--echo # Repeat this with additional transactions on the master + +--connection server_1 +--write_file $MYSQLTEST_VARDIR/tmp/mysqld.1.expect +wait +EOF + +SET GLOBAL debug_dbug="+d,inject_error_writing_xid"; +BEGIN; +INSERT INTO t1 VALUES (12); +--error ER_ERROR_ON_WRITE +COMMIT; +SET GLOBAL debug_dbug="+d,crash_dispatch_command_before"; +--error 2006,2013 +COMMIT; + +--source include/wait_until_disconnected.inc + +# Simulate that we reconnect to a different server (new server_id). +--append_file $MYSQLTEST_VARDIR/tmp/mysqld.1.expect +restart: --server-id=1 +EOF + +--enable_reconnect +--source include/wait_until_connected_again.inc + +SELECT @@GLOBAL.server_id; +INSERT INTO t1 VALUES (13); +INSERT INTO t1 VALUES (14); +SELECT * from t1 WHERE a > 10 ORDER BY a; +--source include/save_master_gtid.inc + +--connection server_2 +--source include/sync_with_master_gtid.inc +SELECT * from t1 WHERE a > 10 ORDER BY a; + +--connection server_1 +--write_file $MYSQLTEST_VARDIR/tmp/mysqld.1.expect +wait +EOF + +SET GLOBAL debug_dbug="+d,inject_error_writing_xid"; +BEGIN; +INSERT INTO t1 VALUES (21); +--error ER_ERROR_ON_WRITE +COMMIT; +SET GLOBAL debug_dbug="+d,crash_dispatch_command_before"; +--error 2006,2013 +COMMIT; + +--source include/wait_until_disconnected.inc + +# Simulate that we reconnect to the same server (same server_id). +--append_file $MYSQLTEST_VARDIR/tmp/mysqld.1.expect +restart +EOF + +--enable_reconnect +--source include/wait_until_connected_again.inc + +SELECT @@GLOBAL.server_id; +SELECT * from t1 WHERE a > 10 ORDER BY a; + +--echo # Wait 30 seconds for SQL thread to catch up with IO thread +--connection server_2 +--let $wait_timeout= 300 +while ($wait_timeout != 0) +{ + --let $read_log_pos= query_get_value('SHOW SLAVE STATUS', Read_Master_Log_Pos, 1) + --let $exec_log_pos= query_get_value('SHOW SLAVE STATUS', Exec_Master_Log_Pos, 1) + if ($read_log_pos == $exec_log_pos) + { + --let $wait_timeout= 0 + } + if ($read_log_pos != $exec_log_pos) + { + --sleep 0.1 + --dec $wait_timeout + } +} +if ($read_log_pos != $exec_log_pos) +{ + --die Timeout wait for SQL thread to catch up with IO thread +} + +SELECT * from t1 WHERE a > 10 ORDER BY a; + +--echo # Repeat this with additional transactions on the master + +--connection server_1 +--write_file $MYSQLTEST_VARDIR/tmp/mysqld.1.expect +wait +EOF + +SET GLOBAL debug_dbug="+d,inject_error_writing_xid"; +BEGIN; +INSERT INTO t1 VALUES (22); +--error ER_ERROR_ON_WRITE +COMMIT; +SET GLOBAL debug_dbug="+d,crash_dispatch_command_before"; +--error 2006,2013 +COMMIT; + +--source include/wait_until_disconnected.inc + +--append_file $MYSQLTEST_VARDIR/tmp/mysqld.1.expect +restart +EOF + +--enable_reconnect +--source include/wait_until_connected_again.inc + +INSERT INTO t1 VALUES (23); +INSERT INTO t1 VALUES (24); +SELECT * from t1 WHERE a > 10 ORDER BY a; +--source include/save_master_gtid.inc + +--connection server_2 +--source include/sync_with_master_gtid.inc +SELECT * from t1 WHERE a > 10 ORDER BY a; + + --connection server_1 DROP TABLE t1; diff --git a/mysql-test/suite/rpl/t/rpl_gtid_errorlog.test b/mysql-test/suite/rpl/t/rpl_gtid_errorlog.test new file mode 100644 index 00000000000..24298e9893a --- /dev/null +++ b/mysql-test/suite/rpl/t/rpl_gtid_errorlog.test @@ -0,0 +1,76 @@ +--source include/have_debug.inc +--source include/master-slave.inc + +--echo *** Test MDEV-6120, output of current GTID when a replication error is logged to the errorlog *** +--connection master +CREATE TABLE t1(a INT PRIMARY KEY); +--sync_slave_with_master + +--connection slave +--source include/stop_slave.inc +CHANGE MASTER TO master_use_gtid=slave_pos; + +--connection master +INSERT INTO t1 VALUES (1); +SET gtid_seq_no=100; +INSERT INTO t1 VALUES (2); +INSERT INTO t1 VALUES (3); +INSERT INTO t1 VALUES (4); +--save_master_pos + +--connection slave +SET sql_log_bin=0; +INSERT INTO t1 VALUES (2); +SET sql_log_bin=1; + +START SLAVE; +--let $slave_sql_errno=1062 +--source include/wait_for_slave_sql_error.inc + +--source include/stop_slave.inc +# Skip the problem event from the master. +SET GLOBAL gtid_slave_pos= "0-1-100"; +--source include/start_slave.inc +--sync_with_master + +SELECT * FROM t1 ORDER BY a; + +--connection master + +SET @dbug_save= @@debug_dbug; +SET debug_dbug= '+d,incident_database_resync_on_replace'; +REPLACE INTO t1 VALUES (5); +SET debug_dbug= @dbug_save; +--save_master_pos + +--connection slave +--let $slave_sql_errno=1590 +--source include/wait_for_slave_sql_error.inc +--source include/stop_slave.inc +SET sql_slave_skip_counter=1; +--source include/start_slave.inc +--sync_with_master + +SELECT * FROM t1 ORDER BY a; + + +# Check error log for correct messages. +let $log_error_= `SELECT @@GLOBAL.log_error`; +if(!$log_error_) +{ + # MySQL Server on windows is started with --console and thus + # does not know the location of its .err log, use default location + let $log_error_ = $MYSQLTEST_VARDIR/log/mysqld.2.err; +} +--let SEARCH_FILE=$log_error_ +--let SEARCH_RANGE=-50000 +--let SEARCH_PATTERN=Slave SQL: Error 'Duplicate entry .* on query\. .*Query: '.*', Gtid 0-1-100, Internal MariaDB error code:|Slave SQL: Could not execute Write_rows.*table test.t1; Duplicate entry.*, Gtid 0-1-100, Internal MariaDB error +--source include/search_pattern_in_file.inc +--let SEARCH_PATTERN=Slave SQL: The incident LOST_EVENTS occured on the master\. Message: <none>, Internal MariaDB error code: 1590 +--source include/search_pattern_in_file.inc + + +--connection master +DROP TABLE t1; + +--source include/rpl_end.inc diff --git a/mysql-test/suite/rpl/t/rpl_gtid_master_promote.test b/mysql-test/suite/rpl/t/rpl_gtid_master_promote.test index f3cc4aca135..bd5343d7558 100644 --- a/mysql-test/suite/rpl/t/rpl_gtid_master_promote.test +++ b/mysql-test/suite/rpl/t/rpl_gtid_master_promote.test @@ -139,6 +139,7 @@ SELECT * FROM t4 ORDER BY a,b; --connection server_2 --replace_result $SERVER_MYPORT_3 SERVER_MYPORT_3 eval CHANGE MASTER TO master_host = '127.0.0.1', master_port = $SERVER_MYPORT_3; +--replace_result $server3_pos SERVER3_POS eval START SLAVE UNTIL master_gtid_pos = "$server3_pos"; --source include/wait_for_slave_to_stop.inc SELECT * FROM t1 ORDER BY a; @@ -151,6 +152,7 @@ SELECT * FROM t4 ORDER BY a,b; --connection server_2 --replace_result $SERVER_MYPORT_4 SERVER_MYPORT_4 eval CHANGE MASTER TO master_host = '127.0.0.1', master_port = $SERVER_MYPORT_4; +--replace_result $server4_pos SERVER4_POS eval START SLAVE UNTIL master_gtid_pos = "$server4_pos"; --source include/wait_for_slave_to_stop.inc SELECT * FROM t1 ORDER BY a; @@ -163,6 +165,7 @@ SELECT * FROM t4 ORDER BY a,b; --connection server_2 --replace_result $SERVER_MYPORT_5 SERVER_MYPORT_5 eval CHANGE MASTER TO master_host = '127.0.0.1', master_port = $SERVER_MYPORT_5; +--replace_result $server5_pos SERVER5_POS eval START SLAVE UNTIL master_gtid_pos = "$server5_pos"; --source include/wait_for_slave_to_stop.inc SELECT * FROM t1 ORDER BY a; diff --git a/mysql-test/suite/rpl/t/rpl_gtid_reconnect.test b/mysql-test/suite/rpl/t/rpl_gtid_reconnect.test index 226c50dbc97..22cf10afba3 100644 --- a/mysql-test/suite/rpl/t/rpl_gtid_reconnect.test +++ b/mysql-test/suite/rpl/t/rpl_gtid_reconnect.test @@ -144,6 +144,7 @@ INSERT INTO t1 VALUES (20); --connection server_2 --source include/start_slave.inc +--sync_with_master SELECT * FROM t1 ORDER BY a; diff --git a/mysql-test/suite/rpl/t/rpl_gtid_stop_start.test b/mysql-test/suite/rpl/t/rpl_gtid_stop_start.test index 1f0532f9922..b57714aaa57 100644 --- a/mysql-test/suite/rpl/t/rpl_gtid_stop_start.test +++ b/mysql-test/suite/rpl/t/rpl_gtid_stop_start.test @@ -63,7 +63,7 @@ INSERT INTO t1 VALUES (4); --replace_column 2 # 4 # 5 # SHOW BINLOG EVENTS IN 'master-bin.000003' LIMIT 1,1; FLUSH LOGS; ---replace_column 2 # 4 # 5 # +--replace_column 2 # 4 # 5 # 6 # SHOW BINLOG EVENTS IN 'master-bin.000004' LIMIT 1,1; --write_file $MYSQLTEST_VARDIR/tmp/mysqld.1.expect @@ -83,7 +83,7 @@ EOF --enable_reconnect --source include/wait_until_connected_again.inc ---replace_column 2 # 4 # 5 # +--replace_column 2 # 4 # 5 # 6 # SHOW BINLOG EVENTS IN 'master-bin.000005' LIMIT 1,1; --source include/show_binary_logs.inc diff --git a/mysql-test/suite/rpl/t/rpl_gtid_until.test b/mysql-test/suite/rpl/t/rpl_gtid_until.test index 68ebd464fd1..4dde7da38a5 100644 --- a/mysql-test/suite/rpl/t/rpl_gtid_until.test +++ b/mysql-test/suite/rpl/t/rpl_gtid_until.test @@ -175,6 +175,7 @@ DROP TABLE t3; --save_master_pos --connection server_2 +--replace_result $until_condition UNTIL_CONDITION eval START SLAVE UNTIL master_gtid_pos='$until_condition'; --source include/wait_for_slave_to_stop.inc SHOW CREATE TABLE t3; diff --git a/mysql-test/suite/rpl/t/rpl_heartbeat_debug.test b/mysql-test/suite/rpl/t/rpl_heartbeat_debug.test new file mode 100644 index 00000000000..7cdf67d6532 --- /dev/null +++ b/mysql-test/suite/rpl/t/rpl_heartbeat_debug.test @@ -0,0 +1,52 @@ +# Testing master to slave heartbeat protocol, test cases that need debug build. + +--source include/master-slave.inc +--source include/have_debug.inc + +connection slave; +--source include/stop_slave.inc +set @restore_slave_net_timeout= @@global.slave_net_timeout; +--disable_warnings +set @@global.slave_net_timeout= 10; +--enable_warnings + +### +### Checking the range +### + +# +# default period slave_net_timeout/2 +# +--query_vertical show status like 'Slave_heartbeat_period'; +SET @save_dbug= @@GLOBAL.debug_dbug; +SET GLOBAL debug_dbug="+d,simulate_slave_heartbeat_network_error"; +CALL mtr.add_suppression('SET @master_heartbeat_period to master failed with error'); +CALL mtr.add_suppression('Master command COM_REGISTER_SLAVE failed: failed registering on master, reconnecting to try again'); +--source include/start_slave.inc + + +connection master; +--disable_warnings +drop table if exists t1; +--enable_warnings + +CREATE TABLE t1 (a INT PRIMARY KEY); +INSERT INTO t1 VALUES (1); + +sync_slave_with_master; + +--connection slave +SELECT * FROM t1; + +connection master; +drop table t1; + +connection slave; +--source include/stop_slave.inc +--disable_warnings +SET GLOBAL debug_dbug=@save_dbug; +set @@global.slave_net_timeout= @restore_slave_net_timeout; +--enable_warnings +--source include/start_slave.inc + +--source include/rpl_end.inc diff --git a/mysql-test/suite/rpl/t/rpl_mdev6020.test b/mysql-test/suite/rpl/t/rpl_mdev6020.test new file mode 100644 index 00000000000..2fd342f5eda --- /dev/null +++ b/mysql-test/suite/rpl/t/rpl_mdev6020.test @@ -0,0 +1,70 @@ +--source include/have_innodb.inc +--source include/have_partition.inc +--source include/have_binlog_format_mixed_or_row.inc +--source include/master-slave.inc + +--connection slave +--source include/stop_slave.inc + +--connection master +--let $datadir= `SELECT @@datadir` + +--let $rpl_server_number= 1 +--source include/rpl_stop_server.inc + +--remove_file $datadir/master-bin.000001 +--remove_file $datadir/master-bin.state +--copy_file $MYSQL_TEST_DIR/std_data/mdev6020-mysql-bin.000001 $datadir/master-bin.000001 + +--let $rpl_server_number= 1 +--source include/rpl_start_server.inc + +--source include/wait_until_connected_again.inc + +--connection slave +SET SQL_LOG_BIN=0; +ALTER TABLE mysql.gtid_slave_pos ENGINE = InnoDB; +SET SQL_LOG_BIN=1; +SET @old_engine= @@GLOBAL.default_storage_engine; +SET GLOBAL default_storage_engine=InnoDB; +SET @old_parallel= @@GLOBAL.slave_parallel_threads; +SET GLOBAL slave_parallel_threads=12; +--replace_result $SERVER_MYPORT_1 SERVER_MYPORT_1 +eval CHANGE MASTER TO master_host='127.0.0.1', master_port=$SERVER_MYPORT_1, master_user='root', master_log_file='master-bin.000001', master_log_pos=4; +--source include/start_slave.inc + +--connection master +SET SQL_LOG_BIN=0; +ALTER TABLE mysql.gtid_slave_pos ENGINE = InnoDB; +SET SQL_LOG_BIN=1; +--save_master_pos + +--connection slave +--sync_with_master + +SELECT @@gtid_slave_pos; +CHECKSUM TABLE table0_int_autoinc, table0_key_pk_parts_2_int_autoinc, table100_int_autoinc, table100_key_pk_parts_2_int_autoinc, table10_int_autoinc, table10_key_pk_parts_2_int_autoinc, table1_int_autoinc, table1_key_pk_parts_2_int_autoinc, table2_int_autoinc, table2_key_pk_parts_2_int_autoinc; + +--source include/stop_slave.inc + + +SET GLOBAL default_storage_engine= @old_engine; +SET GLOBAL slave_parallel_threads=@old_parallel; +SET sql_log_bin=0; +DROP TABLE table0_int_autoinc; +DROP TABLE table0_key_pk_parts_2_int_autoinc; +DROP TABLE table100_int_autoinc; +DROP TABLE table100_key_pk_parts_2_int_autoinc; +DROP TABLE table10_int_autoinc; +DROP TABLE table10_key_pk_parts_2_int_autoinc; +DROP TABLE table1_int_autoinc; +DROP TABLE table1_key_pk_parts_2_int_autoinc; +DROP TABLE table2_int_autoinc; +DROP TABLE table2_key_pk_parts_2_int_autoinc; +SET sql_log_bin=1; + +--source include/start_slave.inc + +--connection master + +--source include/rpl_end.inc diff --git a/mysql-test/suite/rpl/t/rpl_mdev6386-slave.opt b/mysql-test/suite/rpl/t/rpl_mdev6386-slave.opt new file mode 100644 index 00000000000..a5d959ae7fe --- /dev/null +++ b/mysql-test/suite/rpl/t/rpl_mdev6386-slave.opt @@ -0,0 +1 @@ +--disable-log-slave-updates diff --git a/mysql-test/suite/rpl/t/rpl_mdev6386.test b/mysql-test/suite/rpl/t/rpl_mdev6386.test new file mode 100644 index 00000000000..5513d15a77d --- /dev/null +++ b/mysql-test/suite/rpl/t/rpl_mdev6386.test @@ -0,0 +1,71 @@ +--source include/have_innodb.inc +--source include/master-slave.inc + +--connection master +# ToDo: Remove this FLUSH LOGS when MDEV-6403 is fixed. +ALTER TABLE mysql.gtid_slave_pos ENGINE = InnoDB; +FLUSH LOGS; +CREATE TABLE t1 (a INT PRIMARY KEY, b INT) Engine=InnoDB; +--sync_slave_with_master + +--connection slave +--source include/stop_slave.inc +# Provoke a duplicate key error on replication. +SET sql_log_bin= 0; +INSERT INTO t1 VALUES (1, 2); +SET sql_log_bin= 1; +CHANGE MASTER TO master_use_gtid= current_pos; +--echo Contents on slave before: +SELECT * FROM t1 ORDER BY a; + +SET @old_parallel= @@GLOBAL.slave_parallel_threads; +SET GLOBAL slave_parallel_threads=8; + +--connection master + +CREATE TEMPORARY TABLE t2 LIKE t1; +INSERT INTO t2 VALUE (1, 1); +INSERT INTO t2 VALUE (2, 1); +INSERT INTO t2 VALUE (3, 1); +INSERT INTO t2 VALUE (4, 1); +INSERT INTO t2 VALUE (5, 1); +INSERT INTO t1 SELECT * FROM t2; +DROP TEMPORARY TABLE t2; +--save_master_pos +--echo Contents on master: +SELECT * FROM t1 ORDER BY a; + +--connection slave +START SLAVE; +# The slave will stop with a duplicate key error. +# The bug was 1) that the next DROP TEMPORARY TABLE would be allowed to run +# anyway, and 2) that then record_gtid() would get an error during commit +# (since the prior commit failed), and this error was not correctly handled, +# which caused an assertion about closing tables while a statement was still +# active. +--let $slave_sql_errno=1062 +--source include/wait_for_slave_sql_error.inc + +STOP SLAVE IO_THREAD; +--echo Contents on slave on slave error: +SELECT * FROM t1 ORDER BY a; + +# Resolve the duplicate key error so replication can be resumed. +SET sql_log_bin= 0; +DELETE FROM t1 WHERE a=1; +SET sql_log_bin= 1; + +--source include/start_slave.inc +--sync_with_master +--echo Contents on slave after: +SELECT * FROM t1 ORDER BY a; + +--connection master +DROP TABLE t1; + +--connection slave +--source include/stop_slave.inc +SET GLOBAL slave_parallel_threads= @old_parallel; +--source include/start_slave.inc + +--source include/rpl_end.inc diff --git a/mysql-test/suite/rpl/t/rpl_mysql_upgrade.test b/mysql-test/suite/rpl/t/rpl_mysql_upgrade.test index 398768e87bf..e99a233ac34 100644 --- a/mysql-test/suite/rpl/t/rpl_mysql_upgrade.test +++ b/mysql-test/suite/rpl/t/rpl_mysql_upgrade.test @@ -5,11 +5,9 @@ # options are added into mysql_upgrade. These options control whether sql # statements are binlogged or not. ############################################################################# ---source include/master-slave.inc --source include/have_innodb.inc - -# Only run test if "mysql_upgrade" is found ---source include/have_mysql_upgrade.inc +--source include/mysql_upgrade_preparation.inc +--source include/master-slave.inc call mtr.add_suppression("Unsafe statement written to the binary log using statement format since BINLOG_FORMAT = STATEMENT"); call mtr.add_suppression("table or database name 'mysqltest-1'"); diff --git a/mysql-test/suite/rpl/t/rpl_parallel.test b/mysql-test/suite/rpl/t/rpl_parallel.test index 9b68d6648e0..4f01ef7765b 100644 --- a/mysql-test/suite/rpl/t/rpl_parallel.test +++ b/mysql-test/suite/rpl/t/rpl_parallel.test @@ -438,7 +438,7 @@ SET debug_sync='now WAIT_FOR t2_killed'; # Now we can allow T1 to proceed. SET debug_sync='now SIGNAL t1_cont'; ---let $slave_sql_errno= 1317,1964 +--let $slave_sql_errno= 1317,1927,1964 --source include/wait_for_slave_sql_error.inc STOP SLAVE IO_THREAD; SELECT * FROM t3 WHERE a >= 30 ORDER BY a; @@ -573,7 +573,7 @@ SET debug_sync='now WAIT_FOR t2_killed'; # Now we can allow T1 to proceed. SET debug_sync='now SIGNAL t1_cont'; ---let $slave_sql_errno= 1317,1964 +--let $slave_sql_errno= 1317,1927,1964 --source include/wait_for_slave_sql_error.inc # Now we have to disable the debug_sync statements, so they do not trigger @@ -712,7 +712,7 @@ SET debug_sync='now WAIT_FOR t2_killed'; # Now we can allow T1 to proceed. SET debug_sync='now SIGNAL t1_cont'; ---let $slave_sql_errno= 1317,1964 +--let $slave_sql_errno= 1317,1927,1964 --source include/wait_for_slave_sql_error.inc SELECT * FROM t3 WHERE a >= 50 ORDER BY a; @@ -1246,8 +1246,76 @@ SET debug_sync='RESET'; --connection server_2 --source include/start_slave.inc --sync_with_master +--source include/stop_slave.inc + +SELECT * FROM t4 ORDER BY a; + + +# MDEV-6549, failing to update gtid_slave_pos for a transaction that was retried. +# The problem was that when a transaction updates the mysql.gtid_slave_pos +# table, it clears the flag that marks that there is a GTID position that +# needs to be updated. Then, if the transaction got killed after that due +# to a deadlock, the subsequent retry would fail to notice that the GTID needs +# to be recorded in gtid_slave_pos. +# +# (In the original bug report, the symptom was an assertion; this was however +# just a side effect of the missing update of gtid_slave_pos, which also +# happened to cause a missing clear of OPTION_GTID_BEGIN). +--connection server_1 +DELETE FROM t4; +INSERT INTO t4 VALUES (1,NULL), (2,2), (3,NULL), (4,4), (5, NULL), (6, 6); + +# Create two transactions that can run in parallel on the slave but cause +# a deadlock if the second runs before the first. +--connection con1 +SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued1 WAIT_FOR master_cont1'; +send UPDATE t4 SET b=NULL WHERE a=6; +--connection server_1 +SET debug_sync='now WAIT_FOR master_queued1'; + +--connection con2 +# Must use statement-based binlogging. Otherwise the transaction will not be +# binlogged at all, as it modifies no rows. +SET @old_format= @@SESSION.binlog_format; +SET binlog_format='statement'; +SET debug_sync='commit_after_release_LOCK_prepare_ordered SIGNAL master_queued2'; +send DELETE FROM t4 WHERE b <= 1; + +--connection server_1 +SET debug_sync='now WAIT_FOR master_queued2'; +SET debug_sync='now SIGNAL master_cont1'; + +--connection con1 +REAP; +--connection con2 +REAP; +SET @old_format=@@GLOBAL.binlog_format; +SET debug_sync='RESET'; +--save_master_pos +--let $last_gtid= `SELECT @@last_gtid` + +--connection server_2 +# Disable the usual skip of gap locks for transactions that are run in +# parallel, using DBUG. This allows the deadlock to occur, and this in turn +# triggers a retry of the second transaction, and the code that was buggy and +# caused the gtid_slave_pos update to be skipped in the retry. +SET @old_dbug= @@GLOBAL.debug_dbug; +SET GLOBAL debug_dbug="+d,disable_thd_need_ordering_with"; +--source include/start_slave.inc +--sync_with_master +SET GLOBAL debug_dbug=@old_dbug; SELECT * FROM t4 ORDER BY a; +# Check that the GTID of the second transaction was correctly recorded in +# gtid_slave_pos, in the variable as well as in the table. +--replace_result $last_gtid GTID +eval SET @last_gtid= '$last_gtid'; +SELECT IF(@@gtid_slave_pos LIKE CONCAT('%',@last_gtid,'%'), "GTID found ok", + CONCAT("GTID ", @last_gtid, " not found in gtid_slave_pos=", @@gtid_slave_pos)) + AS result; +SELECT "ROW FOUND" AS `Is the row found?` + FROM mysql.gtid_slave_pos + WHERE CONCAT(domain_id, "-", server_id, "-", seq_no) = @last_gtid; --echo *** MDEV-5938: Exec_master_log_pos not updated at log rotate in parallel replication *** @@ -1277,6 +1345,7 @@ eval SELECT IF('$io_pos' = '$sql_pos', "OK", "Not ok, $io_pos <> $sql_pos") AS t --connection server_1 FLUSH LOGS; +--source include/wait_for_binlog_checkpoint.inc --save_master_pos --connection server_2 @@ -1291,6 +1360,112 @@ eval SELECT IF('$io_pos' = '$sql_pos', "OK", "Not ok, $io_pos <> $sql_pos") AS t --enable_query_log +--echo *** MDEV_6435: Incorrect error handling when query binlogged partially on master with "killed" error *** + +--connection server_1 +CREATE TABLE t6 (a INT) ENGINE=MyISAM; +CREATE TRIGGER tr AFTER INSERT ON t6 FOR EACH ROW SET @a = 1; + +--connection con1 +SET @old_format= @@binlog_format; +SET binlog_format= statement; +--let $conid = `SELECT CONNECTION_ID()` +SET debug_sync='sp_head_execute_before_loop SIGNAL ready WAIT_FOR cont'; +send INSERT INTO t6 VALUES (1), (2), (3); + +--connection server_1 +SET debug_sync='now WAIT_FOR ready'; +--replace_result $conid CONID +eval KILL QUERY $conid; +SET debug_sync='now SIGNAL cont'; + +--connection con1 +--error ER_QUERY_INTERRUPTED +--reap +SET binlog_format= @old_format; +SET debug_sync='RESET'; +--let $after_error_gtid_pos= `SELECT @@gtid_binlog_pos` + +--connection server_1 +SET debug_sync='RESET'; + + +--connection server_2 +--let $slave_sql_errno= 1317 +--source include/wait_for_slave_sql_error.inc +STOP SLAVE IO_THREAD; +--replace_result $after_error_gtid_pos AFTER_ERROR_GTID_POS +eval SET GLOBAL gtid_slave_pos= '$after_error_gtid_pos'; +--source include/start_slave.inc + +--connection server_1 +INSERT INTO t6 VALUES (4); +SELECT * FROM t6 ORDER BY a; +--save_master_pos + +--connection server_2 +--sync_with_master +SELECT * FROM t6 ORDER BY a; + + +--echo *** MDEV-6551: Some replication errors are ignored if slave_parallel_threads > 0 *** + +--connection server_1 +INSERT INTO t2 VALUES (31); +--source include/save_master_gtid.inc + +--connection server_2 +--source include/sync_with_master_gtid.inc +--source include/stop_slave.inc +SET GLOBAL slave_parallel_threads= 0; +--source include/start_slave.inc + +# Force a duplicate key error on the slave. +SET sql_log_bin= 0; +INSERT INTO t2 VALUES (32); +SET sql_log_bin= 1; + +--connection server_1 +INSERT INTO t2 VALUES (32); +# Rotate the binlog; the bug is triggered when the master binlog file changes +# after the event group that causes the duplicate key error. +FLUSH LOGS; +INSERT INTO t2 VALUES (33); +INSERT INTO t2 VALUES (34); +SELECT * FROM t2 WHERE a >= 30 ORDER BY a; +--source include/save_master_gtid.inc + +--connection server_2 +--let $slave_sql_errno= 1062 +--source include/wait_for_slave_sql_error.inc + +--connection server_2 +--source include/stop_slave_io.inc +SET GLOBAL slave_parallel_threads=10; +START SLAVE; + +--let $slave_sql_errno= 1062 +--source include/wait_for_slave_sql_error.inc + +# Note: IO thread is still running at this point. +# The bug seems to have been that restarting the SQL thread after an error with +# the IO thread still running, somehow picks up a later relay log position and +# thus ends up skipping the failing event, rather than re-executing. + +START SLAVE SQL_THREAD; +--let $slave_sql_errno= 1062 +--source include/wait_for_slave_sql_error.inc + +SELECT * FROM t2 WHERE a >= 30 ORDER BY a; + +# Skip the duplicate error, so we can proceed. +SET sql_slave_skip_counter= 1; +--source include/start_slave.inc +--source include/sync_with_master_gtid.inc + +SELECT * FROM t2 WHERE a >= 30 ORDER BY a; + + --connection server_2 --source include/stop_slave.inc SET GLOBAL slave_parallel_threads=@old_parallel_threads; @@ -1299,7 +1474,7 @@ SET DEBUG_SYNC= 'RESET'; --connection server_1 DROP function foo; -DROP TABLE t1,t2,t3,t4,t5; +DROP TABLE t1,t2,t3,t4,t5,t6; SET DEBUG_SYNC= 'RESET'; --source include/rpl_end.inc diff --git a/mysql-test/suite/rpl/t/rpl_parallel_retry.test b/mysql-test/suite/rpl/t/rpl_parallel_retry.test new file mode 100644 index 00000000000..d3be6262cb0 --- /dev/null +++ b/mysql-test/suite/rpl/t/rpl_parallel_retry.test @@ -0,0 +1,220 @@ +--source include/have_innodb.inc +--source include/have_debug.inc +--source include/have_debug_sync.inc +--let $rpl_topology=1->2 +--source include/rpl_init.inc + +--echo *** Test retry of transactions that fail to replicate due to deadlock or similar temporary error. *** + +--connection server_1 +ALTER TABLE mysql.gtid_slave_pos ENGINE=InnoDB; +CREATE TABLE t1 (a int PRIMARY KEY, b INT) ENGINE=InnoDB; +INSERT INTO t1 VALUES (1,1); +--save_master_pos + +# Use a stored function to inject a debug_sync into the appropriate THD. +# The function does nothing on the master, and on the slave it injects the +# desired debug_sync action(s). +SET sql_log_bin=0; +--delimiter || +CREATE FUNCTION foo(x INT, d1 VARCHAR(500), d2 VARCHAR(500)) + RETURNS INT DETERMINISTIC + BEGIN + RETURN x; + END +|| +--delimiter ; +SET sql_log_bin=1; + +--connection server_2 +SET @old_parallel_threads=@@GLOBAL.slave_parallel_threads; +--source include/stop_slave.inc +SET GLOBAL slave_parallel_threads=5; +--source include/start_slave.inc +--sync_with_master +SET sql_log_bin=0; +--delimiter || +CREATE FUNCTION foo(x INT, d1 VARCHAR(500), d2 VARCHAR(500)) + RETURNS INT DETERMINISTIC + BEGIN + IF d1 != '' THEN + SET debug_sync = d1; + END IF; + IF d2 != '' THEN + SET debug_sync = d2; + END IF; + RETURN x; + END +|| +--delimiter ; +SET sql_log_bin=1; +--source include/stop_slave.inc + +--connection server_1 +SET gtid_seq_no = 100; +BEGIN; +INSERT INTO t1 VALUES (2,1); +UPDATE t1 SET b=b+1 WHERE a=1; +INSERT INTO t1 VALUES (3,1); +COMMIT; +SELECT * FROM t1 ORDER BY a; +--save_master_pos + +--connection server_2 +SET @old_dbug= @@GLOBAL.debug_dbug; +SET GLOBAL debug_dbug="+d,rpl_parallel_simulate_temp_err_gtid_0_x_100"; +let $old_retry= query_get_value(SHOW STATUS LIKE 'Slave_retried_transactions', Value, 1); +--source include/start_slave.inc +--sync_with_master +SET GLOBAL debug_dbug=@old_dbug; +let $new_retry= query_get_value(SHOW STATUS LIKE 'Slave_retried_transactions', Value, 1); +--disable_query_log +eval SELECT $new_retry - $old_retry AS retries; +--enable_query_log + +SELECT * FROM t1 ORDER BY a; + + +--echo *** Test that double retry works when the first retry also fails with temp error *** +--source include/stop_slave.inc + +--connection server_1 +SET gtid_seq_no = 100; +SET @old_server_id = @@server_id; +SET server_id = 10; +BEGIN; +INSERT INTO t1 VALUES (4,1); +UPDATE t1 SET b=b+1 WHERE a=1; +INSERT INTO t1 VALUES (5,1); +INSERT INTO t1 VALUES (6,1); +COMMIT; +SET server_id = @old_server_id; +SELECT * FROM t1 ORDER BY a; +--save_master_pos + +--connection server_2 +SET @old_dbug= @@GLOBAL.debug_dbug; +SET GLOBAL debug_dbug="+d,rpl_parallel_simulate_temp_err_gtid_0_x_100,rpl_parallel_simulate_double_temp_err_gtid_0_x_100"; +let $old_retry= query_get_value(SHOW STATUS LIKE 'Slave_retried_transactions', Value, 1); +--source include/start_slave.inc +--sync_with_master +SET GLOBAL debug_dbug=@old_dbug; +let $new_retry= query_get_value(SHOW STATUS LIKE 'Slave_retried_transactions', Value, 1); +--disable_query_log +eval SELECT $new_retry - $old_retry AS retries; +--enable_query_log + +SELECT * FROM t1 ORDER BY a; + + +--echo *** Test too many retries, eventually causing failure. *** +--source include/stop_slave.inc + +--connection server_1 +SET gtid_seq_no = 100; +SET @old_server_id = @@server_id; +SET server_id = 11; +BEGIN; +INSERT INTO t1 VALUES (7,1); +UPDATE t1 SET b=b+1 WHERE a=1; +INSERT INTO t1 VALUES (8,1); +INSERT INTO t1 VALUES (9,1); +COMMIT; +SET server_id = @old_server_id; +SELECT * FROM t1 ORDER BY a; +--save_master_pos + +--connection server_2 +SET sql_log_bin=0; +CALL mtr.add_suppression("Slave worker thread retried transaction 10 time\\(s\\) in vain, giving up"); +CALL mtr.add_suppression("Slave: Deadlock found when trying to get lock; try restarting transaction"); +SET sql_log_bin=1; + +SET @old_dbug= @@GLOBAL.debug_dbug; +SET GLOBAL debug_dbug="+d,rpl_parallel_simulate_temp_err_gtid_0_x_100,rpl_parallel_simulate_infinite_temp_err_gtid_0_x_100"; +let $old_retry= query_get_value(SHOW STATUS LIKE 'Slave_retried_transactions', Value, 1); +START SLAVE; +--let $slave_sql_errno= 1213 +--let $slave_timeout= 10 +--source include/wait_for_slave_sql_error.inc +SET GLOBAL debug_dbug=@old_dbug; +let $new_retry= query_get_value(SHOW STATUS LIKE 'Slave_retried_transactions', Value, 1); +--disable_query_log +eval SELECT $new_retry - $old_retry AS retries; +--enable_query_log + +SELECT * FROM t1 ORDER BY a; +STOP SLAVE IO_THREAD; +--source include/start_slave.inc +--sync_with_master +SELECT * FROM t1 ORDER BY a; + +--echo *** Test retry of event group that spans multiple relay log files. *** + +--connection server_1 +CREATE TABLE t2 (a int PRIMARY KEY, b BLOB) ENGINE=InnoDB; +INSERT INTO t2 VALUES (1,"Hulubullu"); +--save_master_pos + +--connection server_2 +--sync_with_master +--source include/stop_slave.inc +SET @old_max= @@GLOBAL.max_relay_log_size; +SET GLOBAL max_relay_log_size=4096; + +--connection server_1 +--let $big= `SELECT REPEAT("*", 5000)` +SET gtid_seq_no = 100; +SET @old_server_id = @@server_id; +SET server_id = 12; +BEGIN; +--disable_query_log +eval INSERT INTO t2 VALUES (2, CONCAT("Hello ", "$big")); +eval INSERT INTO t2 VALUES (3, CONCAT("Long data: ", "$big")); +--enable_query_log +INSERT INTO t1 VALUES (10, 4); +COMMIT; +SET server_id = @old_server_id; +SELECT * FROM t1 WHERE a >= 10 ORDER BY a; +SELECT a, LENGTH(b) FROM t2 ORDER BY a; +--save_master_pos + +--connection server_2 +SET @old_dbug= @@GLOBAL.debug_dbug; +SET GLOBAL debug_dbug="+d,rpl_parallel_simulate_temp_err_gtid_0_x_100"; +let $old_retry= query_get_value(SHOW STATUS LIKE 'Slave_retried_transactions', Value, 1); +--source include/start_slave.inc +--sync_with_master +SET GLOBAL debug_dbug=@old_dbug; +let $new_retry= query_get_value(SHOW STATUS LIKE 'Slave_retried_transactions', Value, 1); +--disable_query_log +eval SELECT $new_retry - $old_retry AS retries; +--enable_query_log + +SELECT * FROM t1 WHERE a >= 10 ORDER BY a; +SELECT a, LENGTH(b) FROM t2 ORDER BY a; + +--connection server_1 +INSERT INTO t1 VALUES (11,11); +--disable_query_log +eval INSERT INTO t2 VALUES (4, "$big"); +--enable_query_log +--save_master_pos + +--connection server_2 +--sync_with_master +SELECT * FROM t1 WHERE a >= 10 ORDER BY a; +SELECT a, LENGTH(b) FROM t2 ORDER BY a; +SET GLOBAL max_relay_log_size=@old_max; + + +--connection server_2 +--source include/stop_slave.inc +SET GLOBAL slave_parallel_threads=@old_parallel_threads; +--source include/start_slave.inc + +--connection server_1 +DROP TABLE t1, t2; +DROP function foo; + +--source include/rpl_end.inc diff --git a/mysql-test/suite/rpl/t/rpl_parallel_temptable-master.opt b/mysql-test/suite/rpl/t/rpl_parallel_temptable-master.opt new file mode 100644 index 00000000000..425fda95086 --- /dev/null +++ b/mysql-test/suite/rpl/t/rpl_parallel_temptable-master.opt @@ -0,0 +1 @@ +--skip-stack-trace --skip-core-file diff --git a/mysql-test/suite/rpl/t/rpl_parallel_temptable.test b/mysql-test/suite/rpl/t/rpl_parallel_temptable.test new file mode 100644 index 00000000000..8eb397c3460 --- /dev/null +++ b/mysql-test/suite/rpl/t/rpl_parallel_temptable.test @@ -0,0 +1,222 @@ +--source include/have_innodb.inc +--source include/have_debug.inc +--source include/have_binlog_format_statement.inc +--let $rpl_topology=1->2 +--source include/rpl_init.inc + +--echo *** MDEV-6321: close_temporary_tables() in format description event not serialised correctly *** + +--connection server_2 +SET @old_parallel_threads=@@GLOBAL.slave_parallel_threads; +--source include/stop_slave.inc +SET GLOBAL slave_parallel_threads=5; +CHANGE MASTER TO master_use_gtid= current_pos; +--source include/start_slave.inc + +--connection server_1 +CREATE TABLE t1 (a INT PRIMARY KEY, b VARCHAR(100) CHARACTER SET utf8); +--save_master_pos + +--connection server_2 +--sync_with_master +--source include/stop_slave.inc + + +--connection server_1 +SET gtid_domain_id= 1; +INSERT INTO t1 VALUES (1, 0); + +CREATE TEMPORARY TABLE t2 (a int); + +--connection default +SET gtid_domain_id= 2; +CREATE TEMPORARY TABLE t3 (a INT PRIMARY KEY); +CREATE TEMPORARY TABLE t4 (a int); +INSERT INTO t3 VALUES (100); +INSERT INTO t4 SELECT a+1 FROM t3; + +--connection server_1 +INSERT INTO t2 VALUES (2), (4), (6), (8), (10), (12), (14), (16), (18), (20); +INSERT INTO t2 VALUES (3), (6), (9), (12), (15), (18); +INSERT INTO t2 VALUES (4), (8), (12), (16), (20); + +--connection default +INSERT INTO t3 SELECT a+2 FROM t4; +INSERT INTO t4 SELECT a+4 FROM t3; + +--connection server_1 +INSERT INTO t2 VALUES (5), (10), (15), (20); +INSERT INTO t2 VALUES (6), (12), (18); +INSERT INTO t2 VALUES (7), (14); +INSERT INTO t2 VALUES (8), (16); +INSERT INTO t2 VALUES (9), (18); +INSERT INTO t2 VALUES (10), (20); + +--connection default +INSERT INTO t3 SELECT a+8 FROM t4; +INSERT INTO t4 SELECT a+16 FROM t3; + +--connection server_1 +INSERT INTO t2 VALUES (11); +INSERT INTO t2 VALUES (12); +INSERT INTO t2 VALUES (13); + +--connection default +INSERT INTO t3 SELECT a+32 FROM t4; + +--connection server_1 +INSERT INTO t2 VALUES (14); +INSERT INTO t2 VALUES (15); +INSERT INTO t2 VALUES (16); + +--connection default +INSERT INTO t4 SELECT a+64 FROM t3; + +--connection server_1 +INSERT INTO t2 VALUES (17); +INSERT INTO t2 VALUES (18); +INSERT INTO t2 VALUES (19); + +--connection default +INSERT INTO t3 SELECT a+128 FROM t4; + +--connection server_1 +INSERT INTO t2 VALUES (20); + +--connection default +INSERT INTO t1 SELECT a, a MOD 7 FROM t3; +INSERT INTO t1 SELECT a, a MOD 7 FROM t4; + +--connection server_1 +INSERT INTO t1 SELECT a, COUNT(*) FROM t2 GROUP BY a; + +# Crash the master server, so that temporary tables are implicitly dropped. +--write_file $MYSQLTEST_VARDIR/tmp/mysqld.1.expect +wait +EOF + +FLUSH TABLES; +SET SESSION debug_dbug="+d,crash_dispatch_command_before"; +--error 2006,2013 +SELECT 1; + +--source include/wait_until_disconnected.inc +--connection default +--source include/wait_until_disconnected.inc + +--append_file $MYSQLTEST_VARDIR/tmp/mysqld.1.expect +restart +EOF + +--connection default +--enable_reconnect +--source include/wait_until_connected_again.inc + +--connection server_1 +--enable_reconnect +--source include/wait_until_connected_again.inc + +INSERT INTO t1 VALUES (0, 1); +--save_master_pos + +--connection server_2 +# Start the slave replicating the events. +# The bug was that the format description event written after the crash could +# be fetched ahead of the execution of the temporary table events and executed +# out-of-band. This would cause drop of all temporary tables and thus failure +# for execution of remaining events. + +--source include/start_slave.inc +--sync_with_master + +SELECT * FROM t1 WHERE a <= 20 ORDER BY a; +SELECT COUNT(*) FROM t1 WHERE a BETWEEN 100+0 AND 100+256; +SHOW STATUS LIKE 'Slave_open_temp_tables'; + + +--echo *** Test that if master logged partial event group before crash, we finish that group correctly before executing format description event *** + +--source include/stop_slave.inc + +--connection server_1 +CALL mtr.add_suppression("Statement accesses nontransactional table as well as transactional or temporary table, and writes to any of them"); +SET gtid_domain_id= 1; +DELETE FROM t1; +ALTER TABLE t1 ENGINE=InnoDB; +CREATE TEMPORARY TABLE t2 (a INT PRIMARY KEY); +INSERT INTO t2 VALUES (1); +INSERT INTO t2 VALUES (2); + +--connection default +SET gtid_domain_id= 2; +CREATE TEMPORARY TABLE t3 (a INT PRIMARY KEY); +INSERT INTO t3 VALUES (10); +INSERT INTO t3 VALUES (20); + +--connection server_1 +INSERT INTO t1 SELECT a, 'server_1' FROM t2; + +--connection default +INSERT INTO t1 SELECT a, 'default' FROM t3; + +--connection server_1 +INSERT INTO t1 SELECT a+2, '+server_1' FROM t2; + +# Crash the master server in the middle of writing an event group. +--write_file $MYSQLTEST_VARDIR/tmp/mysqld.1.expect +wait +EOF + +FLUSH TABLES; +SET SESSION debug_dbug="+d,crash_before_writing_xid"; +--error 2006,2013 +INSERT INTO t1 SELECT a+4, '++server_1' FROM t2; + +--source include/wait_until_disconnected.inc +--connection default +--source include/wait_until_disconnected.inc + +--append_file $MYSQLTEST_VARDIR/tmp/mysqld.1.expect +restart +EOF + +--connection default +--enable_reconnect +--source include/wait_until_connected_again.inc + +--connection server_1 +--enable_reconnect +--source include/wait_until_connected_again.inc + +INSERT INTO t1 VALUES (0, 1); +#--save_master_pos +--source include/save_master_gtid.inc + +--connection server_2 +# Start the slave replicating the events. +# The main thing to test here is that the slave will know that it +# needs to abort the partially received event group, so that the +# execution of format_description event will not wait infinitely +# for a commit of the incomplete group that never happens. + +--source include/start_slave.inc +#--sync_with_master +--source include/sync_with_master_gtid.inc + +SELECT * FROM t1 ORDER BY a; +SHOW STATUS LIKE 'Slave_open_temp_tables'; + +--connection server_1 +# This FLUSH can be removed once MDEV-6608 is fixed. +FLUSH LOGS; + + +--connection server_2 +--source include/stop_slave.inc +SET GLOBAL slave_parallel_threads=@old_parallel_threads; +--source include/start_slave.inc + +--connection server_1 +DROP TABLE t1; + +--source include/rpl_end.inc diff --git a/mysql-test/suite/rpl/t/rpl_read_new_relay_log_info.test b/mysql-test/suite/rpl/t/rpl_read_new_relay_log_info.test new file mode 100644 index 00000000000..1e2c8ce2d9f --- /dev/null +++ b/mysql-test/suite/rpl/t/rpl_read_new_relay_log_info.test @@ -0,0 +1,43 @@ +# ==== Purpose ==== +# +# - Verify that the post-WL#344 format of relay_log.info can be parsed. + +--source include/master-slave.inc + +CREATE TABLE t1 (a INT); +INSERT INTO t1 VALUES (1); +DROP TABLE t1; +--sync_slave_with_master + +--echo ==== Check that we can understand the new format of relay-log.info ==== +--source include/stop_slave.inc + +RESET SLAVE; +--let $MYSQLD_DATADIR= `select @@datadir` + +# the new version of relay_log.info comes in two versions: with path +# separator '/' (most systems) and with path separator '\' (windows) +if ($SYSTEM_PATH_SEPARATOR != /) { + --let $file_suffix= -win +} +--copy_file $MYSQL_TEST_DIR/std_data/new-format-relay-log$file_suffix.info $MYSQLD_DATADIR/relay-log.info + +--echo # Read relay-log.info +START SLAVE IO_THREAD; +--source include/wait_for_slave_io_to_start.inc +--echo # Check that relay log coordinates are equal to those saved in new-format_relay-log.info +--let $master_file= query_get_value(SHOW SLAVE STATUS, Relay_Master_Log_File, 1) +--let $master_pos= query_get_value(SHOW SLAVE STATUS, Exec_Master_Log_Pos, 1) +--let $relay_log_file= query_get_value(SHOW SLAVE STATUS, Relay_Log_File, 1) +--let $relay_log_pos= query_get_value(SHOW SLAVE STATUS, Relay_Log_Pos, 1) +--echo $master_file= $master_file, $master_pos, $relay_log_file, $relay_log_pos +if (`SELECT "$master_file" != "" OR + "$master_pos" != "0" OR + "$relay_log_file" != "slave-relay-bin.000001" OR + "$relay_log_pos" != "4"`) { + --echo ERROR: log coordinates changed + --die log coordinates changed +} + +--let $rpl_only_running_threads= 1 +--source include/rpl_end.inc diff --git a/mysql-test/suite/rpl/t/rpl_read_old_relay_log_info.test b/mysql-test/suite/rpl/t/rpl_read_old_relay_log_info.test new file mode 100644 index 00000000000..ce345445c08 --- /dev/null +++ b/mysql-test/suite/rpl/t/rpl_read_old_relay_log_info.test @@ -0,0 +1,44 @@ +# ==== Purpose ==== +# +# - Verify that the pre-WL#344 format of relay_log.info can still be +# parsed. + +--source include/master-slave.inc + +CREATE TABLE t1 (a INT); +INSERT INTO t1 VALUES (1); +DROP TABLE t1; +--sync_slave_with_master + +--echo ==== Check that we still understand the old format of relay-log.info ==== +--source include/stop_slave.inc + +RESET SLAVE; +--let $MYSQLD_DATADIR= `select @@datadir` + +# the old version of relay_log.info comes in two versions: with path +# separator '/' (most systems) and with path separator '\' (windows) +if ($SYSTEM_PATH_SEPARATOR != /) { + --let $file_suffix= -win +} +--copy_file $MYSQL_TEST_DIR/std_data/old-format-relay-log$file_suffix.info $MYSQLD_DATADIR/relay-log.info + +--echo # Read relay-log.info +START SLAVE IO_THREAD; +--source include/wait_for_slave_io_to_start.inc +--echo # Check that relay log coordinates are equal to those we saved in old-format_relay-log.info +--let $master_file= query_get_value(SHOW SLAVE STATUS, Relay_Master_Log_File, 1) +--let $master_pos= query_get_value(SHOW SLAVE STATUS, Exec_Master_Log_Pos, 1) +--let $relay_log_file= query_get_value(SHOW SLAVE STATUS, Relay_Log_File, 1) +--let $relay_log_pos= query_get_value(SHOW SLAVE STATUS, Relay_Log_Pos, 1) +--echo $master_file= $master_file, $master_pos, $relay_log_file, $relay_log_pos +if (`SELECT "$master_file" != "" OR + "$master_pos" != "0" OR + "$relay_log_file" != "slave-relay-bin.000001" OR + "$relay_log_pos" != "4"`) { + --echo ERROR: log coordinates changed + --die log coordinates changed +} + +--let $rpl_only_running_threads= 1 +--source include/rpl_end.inc diff --git a/mysql-test/suite/rpl/t/rpl_semi_sync_uninstall_plugin.test b/mysql-test/suite/rpl/t/rpl_semi_sync_uninstall_plugin.test new file mode 100644 index 00000000000..4ee345e54ba --- /dev/null +++ b/mysql-test/suite/rpl/t/rpl_semi_sync_uninstall_plugin.test @@ -0,0 +1,138 @@ +############################################################################### +# Bug#17638477 UNINSTALL AND INSTALL SEMI-SYNC PLUGIN CAUSES SLAVES TO BREAK +# Problem: Uninstallation of Semi sync plugin should be blocked when it is +# in use. +# Test case: Uninstallation of semi sync should be allowed +# On Master: +# 1) When there is no dump thread +# 2) When there are no semi sync slaves (i.e., async replication). +# On Slave: +# 1) When there is no I/O thread +# 2) When there are no semi sync enabled I/O thread (i.e.,async replication). +############################################################################### + +--source include/have_semisync_plugin.inc +--source include/not_embedded.inc +--source include/have_binlog_format_statement.inc +--source include/master-slave.inc + +call mtr.add_suppression("Read semi-sync reply network error"); +call mtr.add_suppression("Timeout waiting for reply of binlog"); + +############################################################################### +# Case 1: Uninstallation of semi sync plugins should be allowed when it is +# not in use i.e., when asynchronous replication is active. +############################################################################### +# Step 1.1: Install semi sync master plugin on master +INSTALL PLUGIN rpl_semi_sync_master SONAME 'semisync_master'; + +# Step 1.2: Install semi sync slave plugin on slave +--connection slave +--echo [connection slave] +INSTALL PLUGIN rpl_semi_sync_slave SONAME 'semisync_slave'; + +# Step 1.3: Uninstallation of semisync plugin on master and slave should be +# allowed at this state as there is no semi sync replication enabled between +# master and slave. +UNINSTALL PLUGIN rpl_semi_sync_slave; +--connection master +--echo [connection master] +UNINSTALL PLUGIN rpl_semi_sync_master; + +# Step 1.4: Check that replication is working fine at the end of the test case. +CREATE TABLE t1(i int); +INSERT INTO t1 values (1); +DROP TABLE t1; +--sync_slave_with_master +--echo [connection slave] + +############################################################################### +# Case 2: Uninstallation of semi sync plugins should be disallowed +# when it is in use i.e., when semi sync replication is active +############################################################################### +# Step 2.1: Install and enable semi sync replication between master and slave +--source include/install_semisync.inc + +# Step 2.2: Check that rpl_semi_sync_slave uninstallation on Slave is not +# possible at this state +--connection slave +--echo [connection slave] +UNINSTALL PLUGIN rpl_semi_sync_slave; +select plugin_name,plugin_status from information_schema.plugins where plugin_name like 'rpl_%'; + +# Step 2.3: Check that rpl_semi_sync_master uninstallation on Master is not +# possible at this state +--connection master +--echo [connection master] +UNINSTALL PLUGIN rpl_semi_sync_master; +select plugin_name,plugin_status from information_schema.plugins where plugin_name like 'rpl_%'; + +# Step 2.4: Check that replication is working fine at the end of the test case. +CREATE TABLE t1(i int); +INSERT INTO t1 values (2); +DROP TABLE t1; +--sync_slave_with_master +--echo [connection slave] + +# Step 2.5: Make sure rpl_semi_sync_master_status on Master and +# rpl_semi_sync_slave_staus on Slave are ON +show status like "Rpl_semi_sync_slave_status"; + +############################################################################### +# Case 3: Uninstallation of semi sync plugin should be disallowed when there +# are semi sync slaves even though rpl_semi_sync_master_enabled= OFF;. +############################################################################### +# Step 3.1: Disable semi sync on master +--connection master +--echo [connection master] +show status like "Rpl_semi_sync_master_status"; + +# Step 3.2: Check that still Rpl_semi_sync_master_clients is 1 +show status like "Rpl_semi_sync_master_clients"; + +# Step 3.3: Since Rpl_semi_sync_master_clients is 1, uninstallation of +# rpl_semi_sync_master should be disallowed. +select plugin_name,plugin_status from information_schema.plugins where plugin_name like 'rpl_%'; + +############################################################################### +# Case 4: Uninstallation of semi sync plugin should be allowed when it is not +# in use. Same as Case 1 but this case is to check the case after enabling and +# disabling semi sync replication. +############################################################################### + +# Step 4.1: Stop IO thread on slave. +--connection slave +--echo [connection slave] +--source include/stop_slave.inc + +# Step 4.2: Disable semi sync on slave. +select plugin_name,plugin_status from information_schema.plugins where plugin_name like 'rpl_%'; + +--connection master +--echo [connection master] +# Send something to the slave so that the master would notice that nobody's listening. +create table t2 (a int); drop table t2; +# and wait for plugin to be unloaded automatically +let $wait_condition=select count(*) = 0 from information_schema.plugins where plugin_name like 'rpl_%'; +--source include/wait_condition.inc + +--connection slave +--echo [connection slave] + +# Step 4.3: Start IO thread on slave. +--source include/start_slave.inc + +# Step 4.4: Uninstall semi sync plugin, it should be successful now. +select plugin_name,plugin_status from information_schema.plugins where plugin_name like 'rpl_%'; + +# Step 4.7: Check that replication is working fine at the end of the test case +--connection master +--echo [connection master] +CREATE TABLE t1(i int); +INSERT INTO t1 values (3); +DROP TABLE t1; +--sync_slave_with_master +--echo [connection slave] + +# Cleanup +source include/rpl_end.inc; diff --git a/mysql-test/suite/rpl/t/rpl_skip_incident-master.opt b/mysql-test/suite/rpl/t/rpl_skip_incident-master.opt new file mode 100644 index 00000000000..912801debc4 --- /dev/null +++ b/mysql-test/suite/rpl/t/rpl_skip_incident-master.opt @@ -0,0 +1 @@ +--loose-debug=+d,incident_database_resync_on_replace diff --git a/mysql-test/suite/rpl/t/rpl_skip_incident-slave.opt b/mysql-test/suite/rpl/t/rpl_skip_incident-slave.opt new file mode 100644 index 00000000000..bc90d5ea6c1 --- /dev/null +++ b/mysql-test/suite/rpl/t/rpl_skip_incident-slave.opt @@ -0,0 +1 @@ +--slave-skip-error=1590 diff --git a/mysql-test/suite/rpl/t/rpl_skip_incident.test b/mysql-test/suite/rpl/t/rpl_skip_incident.test new file mode 100644 index 00000000000..959fde9374e --- /dev/null +++ b/mysql-test/suite/rpl/t/rpl_skip_incident.test @@ -0,0 +1,28 @@ +--source include/master-slave.inc +--source include/have_debug.inc + +--echo **** On Master **** +CREATE TABLE t1 (a INT); + +INSERT INTO t1 VALUES (1),(2),(3); +SELECT * FROM t1; + +# This will generate an incident log event and store it in the binary +# log before the replace statement. +REPLACE INTO t1 VALUES (4); + +--sync_slave_with_master + +# Now, we should have inserted the row into the table and the slave +# should be running. We should also have rotated to a new binary log. + +SELECT * FROM t1; +source include/check_slave_is_running.inc; + +connection master; + +--echo Should have two binary logs here +--source include/show_binary_logs.inc +DROP TABLE t1; +--sync_slave_with_master +--source include/rpl_end.inc diff --git a/mysql-test/suite/rpl/t/rpl_sp.test b/mysql-test/suite/rpl/t/rpl_sp.test index 00815ab9d7e..c978a145a92 100644 --- a/mysql-test/suite/rpl/t/rpl_sp.test +++ b/mysql-test/suite/rpl/t/rpl_sp.test @@ -614,7 +614,7 @@ show function status like '%mysqltestbug36570%'; connection master; flush logs; let $MYSQLD_DATADIR= `select @@datadir`; ---replace_regex s/$MYSQL_TEST_DIR/MYSQL_TEST_DIR/ s/TIMESTAMP=[0-9]*/TIMESTAMP=t/ +--replace_regex /$MYSQL_TEST_DIR/MYSQL_TEST_DIR/ /TIMESTAMP=[0-9]*/TIMESTAMP=t/ --exec $MYSQL_BINLOG --short-form $MYSQLD_DATADIR/master-bin.000001 use test; drop procedure mysqltestbug36570_p1; diff --git a/mysql-test/suite/rpl/t/rpl_stop_slave.test b/mysql-test/suite/rpl/t/rpl_stop_slave.test index d9d7f39c321..340738f8cb2 100644 --- a/mysql-test/suite/rpl/t/rpl_stop_slave.test +++ b/mysql-test/suite/rpl/t/rpl_stop_slave.test @@ -74,14 +74,17 @@ CREATE TABLE t2 (c1 INT) ENGINE=MyISAM; INSERT INTO t1 VALUES(1, 1); sync_slave_with_master; +--source include/stop_slave.inc --source include/rpl_connection_master.inc +# make sure that there are no zombie threads +--source include/stop_dump_threads.inc let $debug_save= `SELECT @@GLOBAL.debug`; SET GLOBAL debug_dbug= '+d,dump_thread_wait_before_send_xid,*'; --source include/rpl_connection_slave.inc -source include/restart_slave_sql.inc; +--source include/start_slave.inc BEGIN; UPDATE t1 SET c2 = 2 WHERE c1 = 1; @@ -93,6 +96,10 @@ INSERT INTO t2 VALUES(1); UPDATE t1 SET c2 = 3 WHERE c1 = 1; COMMIT; +# wait for the dump thread reach the sync point +--let $wait_condition= select count(*)=1 from information_schema.processlist where state LIKE '%debug sync point%' and command='Binlog Dump' +--source include/wait_condition.inc + --source include/rpl_connection_slave1.inc let $show_statement= SHOW PROCESSLIST; let $field= Info; @@ -105,6 +112,7 @@ send STOP SLAVE; ROLLBACK; --source include/rpl_connection_master.inc + SET DEBUG_SYNC= 'now SIGNAL signal.continue'; SET DEBUG_SYNC= 'now WAIT_FOR signal.continued'; @@ -113,12 +121,25 @@ source include/wait_for_slave_to_stop.inc; --source include/rpl_connection_slave1.inc reap; -source include/start_slave.inc; +# Slave has stopped, thence lets make sure that +# we kill the zombie dump threads. Also, make +# sure that we disable the DBUG_EXECUTE_IF +# that would set the dump thread to wait --source include/rpl_connection_master.inc -DROP TABLE t1, t2; --disable_query_log eval SET GLOBAL debug_dbug= '$debug_save'; --enable_query_log +# make sure that there are no zombie threads +--source include/stop_dump_threads.inc + +--source include/rpl_connection_slave1.inc +# now the dump thread on the master will start +# from a clean slate, i.e. without the +# DBUG_EXECUTE_IF set +source include/start_slave.inc; + +--source include/rpl_connection_master.inc +DROP TABLE t1, t2; --source include/rpl_end.inc SET DEBUG_SYNC= 'RESET'; diff --git a/mysql-test/suite/rpl/t/rpl_table_options.test b/mysql-test/suite/rpl/t/rpl_table_options.test index 12ff1ca457b..3f52444a3c7 100644 --- a/mysql-test/suite/rpl/t/rpl_table_options.test +++ b/mysql-test/suite/rpl/t/rpl_table_options.test @@ -23,6 +23,8 @@ show create table t1; sync_slave_with_master; connection slave; show create table t1; +set sql_mode=ignore_bad_table_options; +show create table t1; connection master; drop table t1; diff --git a/mysql-test/suite/storage_engine/insert_with_keys.result b/mysql-test/suite/storage_engine/insert_with_keys.result index 39e42b306d2..694c91397b0 100644 --- a/mysql-test/suite/storage_engine/insert_with_keys.result +++ b/mysql-test/suite/storage_engine/insert_with_keys.result @@ -152,3 +152,22 @@ a b 5 e 6 f DROP TABLE t1; +CREATE TABLE t1 (a <INT_COLUMN> UNIQUE KEY, b <INT_COLUMN> UNIQUE KEY, c <INT_COLUMN>) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>; +INSERT INTO t1 VALUES(1,1,0); +INSERT INTO t1 VALUES(2,3,0); +INSERT INTO t1 VALUES(3,2,0); +INSERT INTO t1 VALUES(1,1,0) ON DUPLICATE KEY UPDATE c=c+1; +INSERT INTO t1 VALUES(2,3,0) ON DUPLICATE KEY UPDATE c=c+1; +INSERT INTO t1 VALUES(3,2,0) ON DUPLICATE KEY UPDATE c=c+1; +INSERT INTO t1 VALUES(2,5,0) ON DUPLICATE KEY UPDATE c=c+1; +INSERT INTO t1 VALUES(3,5,0) ON DUPLICATE KEY UPDATE c=c+1; +INSERT INTO t1 VALUES(5,3,0) ON DUPLICATE KEY UPDATE c=c+1; +INSERT INTO t1 VALUES(6,2,0) ON DUPLICATE KEY UPDATE c=c+1; +INSERT INTO t1 VALUES(1,3,0) ON DUPLICATE KEY UPDATE c=c+1; +INSERT INTO t1 VALUES(2,2,0) ON DUPLICATE KEY UPDATE c=c+1; +SELECT * FROM t1; +a b c +1 1 2 +2 3 4 +3 2 3 +DROP TABLE t1; diff --git a/mysql-test/suite/storage_engine/insert_with_keys.test b/mysql-test/suite/storage_engine/insert_with_keys.test index c44b6c712e0..f62246407f2 100644 --- a/mysql-test/suite/storage_engine/insert_with_keys.test +++ b/mysql-test/suite/storage_engine/insert_with_keys.test @@ -139,5 +139,35 @@ if (!$mysql_errname) DROP TABLE t1; } +--let $create_definition = a $int_indexed_col UNIQUE KEY, b $int_indexed_col UNIQUE KEY, c $int_col +--source create_table.inc +if ($mysql_errname) +{ + --let $my_last_stmt = $create_statement + --let $functionality = Multiple unique keys + --source unexpected_result.inc +} +if (!$mysql_errname) +{ + INSERT INTO t1 VALUES(1,1,0); + INSERT INTO t1 VALUES(2,3,0); + INSERT INTO t1 VALUES(3,2,0); + + INSERT INTO t1 VALUES(1,1,0) ON DUPLICATE KEY UPDATE c=c+1; + INSERT INTO t1 VALUES(2,3,0) ON DUPLICATE KEY UPDATE c=c+1; + INSERT INTO t1 VALUES(3,2,0) ON DUPLICATE KEY UPDATE c=c+1; + + INSERT INTO t1 VALUES(2,5,0) ON DUPLICATE KEY UPDATE c=c+1; + INSERT INTO t1 VALUES(3,5,0) ON DUPLICATE KEY UPDATE c=c+1; + INSERT INTO t1 VALUES(5,3,0) ON DUPLICATE KEY UPDATE c=c+1; + INSERT INTO t1 VALUES(6,2,0) ON DUPLICATE KEY UPDATE c=c+1; + INSERT INTO t1 VALUES(1,3,0) ON DUPLICATE KEY UPDATE c=c+1; + INSERT INTO t1 VALUES(2,2,0) ON DUPLICATE KEY UPDATE c=c+1; + + --sorted_result + SELECT * FROM t1; + DROP TABLE t1; +} + --source cleanup_engine.inc diff --git a/mysql-test/suite/sys_vars/r/aria_pagecache_file_hash_size_basic.result b/mysql-test/suite/sys_vars/r/aria_pagecache_file_hash_size_basic.result new file mode 100644 index 00000000000..0bdd56c298f --- /dev/null +++ b/mysql-test/suite/sys_vars/r/aria_pagecache_file_hash_size_basic.result @@ -0,0 +1,21 @@ +select @@global.aria_pagecache_file_hash_size; +@@global.aria_pagecache_file_hash_size +512 +select @@session.aria_pagecache_file_hash_size; +ERROR HY000: Variable 'aria_pagecache_file_hash_size' is a GLOBAL variable +show global variables like 'aria_pagecache_file_hash_size'; +Variable_name Value +aria_pagecache_file_hash_size 512 +show session variables like 'aria_pagecache_file_hash_size'; +Variable_name Value +aria_pagecache_file_hash_size 512 +select * from information_schema.global_variables where variable_name='aria_pagecache_file_hash_size'; +VARIABLE_NAME VARIABLE_VALUE +ARIA_PAGECACHE_FILE_HASH_SIZE 512 +select * from information_schema.session_variables where variable_name='aria_pagecache_file_hash_size'; +VARIABLE_NAME VARIABLE_VALUE +ARIA_PAGECACHE_FILE_HASH_SIZE 512 +set global aria_pagecache_file_hash_size=200; +ERROR HY000: Variable 'aria_pagecache_file_hash_size' is a read only variable +set session aria_pagecache_file_hash_size=200; +ERROR HY000: Variable 'aria_pagecache_file_hash_size' is a read only variable diff --git a/mysql-test/suite/sys_vars/r/character_set_client_func.result b/mysql-test/suite/sys_vars/r/character_set_client_func.result index 82c1548d438..3e39b24c8db 100644 --- a/mysql-test/suite/sys_vars/r/character_set_client_func.result +++ b/mysql-test/suite/sys_vars/r/character_set_client_func.result @@ -30,7 +30,7 @@ SET @@session.character_set_client = utf8; INSERT INTO t1 values('è'); SELECT hex(a),CHAR_LENGTH(a) FROM t1; hex(a) CHAR_LENGTH(a) -03 1 +033F 2 DELETE FROM t1; DROP TABLE IF EXISTS t1; SET @@global.character_set_client = @global_character_set_client; diff --git a/mysql-test/suite/sys_vars/r/innodb_simulate_comp_failures_basic.result b/mysql-test/suite/sys_vars/r/innodb_simulate_comp_failures_basic.result new file mode 100644 index 00000000000..7a6c9ca2db6 --- /dev/null +++ b/mysql-test/suite/sys_vars/r/innodb_simulate_comp_failures_basic.result @@ -0,0 +1,77 @@ +SET @start_global_value = @@global.innodb_simulate_comp_failures; +SELECT @start_global_value; +@start_global_value +0 +Valid values are between 0 and 99 +select @@global.innodb_simulate_comp_failures between 0 and 99; +@@global.innodb_simulate_comp_failures between 0 and 99 +1 +select @@global.innodb_simulate_comp_failures; +@@global.innodb_simulate_comp_failures +0 +select @@session.innodb_simulate_comp_failures; +ERROR HY000: Variable 'innodb_simulate_comp_failures' is a GLOBAL variable +show global variables like 'innodb_simulate_comp_failures'; +Variable_name Value +innodb_simulate_comp_failures 0 +show session variables like 'innodb_simulate_comp_failures'; +Variable_name Value +innodb_simulate_comp_failures 0 +select * from information_schema.global_variables where variable_name='innodb_simulate_comp_failures'; +VARIABLE_NAME VARIABLE_VALUE +INNODB_SIMULATE_COMP_FAILURES 0 +select * from information_schema.session_variables where variable_name='innodb_simulate_comp_failures'; +VARIABLE_NAME VARIABLE_VALUE +INNODB_SIMULATE_COMP_FAILURES 0 +set global innodb_simulate_comp_failures=10; +select @@global.innodb_simulate_comp_failures; +@@global.innodb_simulate_comp_failures +10 +select * from information_schema.global_variables where variable_name='innodb_simulate_comp_failures'; +VARIABLE_NAME VARIABLE_VALUE +INNODB_SIMULATE_COMP_FAILURES 10 +select * from information_schema.session_variables where variable_name='innodb_simulate_comp_failures'; +VARIABLE_NAME VARIABLE_VALUE +INNODB_SIMULATE_COMP_FAILURES 10 +set session innodb_simulate_comp_failures=1; +ERROR HY000: Variable 'innodb_simulate_comp_failures' is a GLOBAL variable and should be set with SET GLOBAL +set global innodb_simulate_comp_failures=1.1; +ERROR 42000: Incorrect argument type to variable 'innodb_simulate_comp_failures' +set global innodb_simulate_comp_failures=1e1; +ERROR 42000: Incorrect argument type to variable 'innodb_simulate_comp_failures' +set global innodb_simulate_comp_failures="foo"; +ERROR 42000: Incorrect argument type to variable 'innodb_simulate_comp_failures' +set global innodb_simulate_comp_failures=-7; +Warnings: +Warning 1292 Truncated incorrect innodb_simulate_comp_failures value: '-7' +select @@global.innodb_simulate_comp_failures; +@@global.innodb_simulate_comp_failures +0 +select * from information_schema.global_variables where variable_name='innodb_simulate_comp_failures'; +VARIABLE_NAME VARIABLE_VALUE +INNODB_SIMULATE_COMP_FAILURES 0 +set global innodb_simulate_comp_failures=106; +Warnings: +Warning 1292 Truncated incorrect innodb_simulate_comp_failures value: '106' +select @@global.innodb_simulate_comp_failures; +@@global.innodb_simulate_comp_failures +99 +select * from information_schema.global_variables where variable_name='innodb_simulate_comp_failures'; +VARIABLE_NAME VARIABLE_VALUE +INNODB_SIMULATE_COMP_FAILURES 99 +set global innodb_simulate_comp_failures=0; +select @@global.innodb_simulate_comp_failures; +@@global.innodb_simulate_comp_failures +0 +set global innodb_simulate_comp_failures=99; +select @@global.innodb_simulate_comp_failures; +@@global.innodb_simulate_comp_failures +99 +set global innodb_simulate_comp_failures=DEFAULT; +select @@global.innodb_simulate_comp_failures; +@@global.innodb_simulate_comp_failures +0 +SET @@global.innodb_simulate_comp_failures = @start_global_value; +SELECT @@global.innodb_simulate_comp_failures; +@@global.innodb_simulate_comp_failures +0 diff --git a/mysql-test/suite/sys_vars/r/key_cache_file_hash_size_basic.result b/mysql-test/suite/sys_vars/r/key_cache_file_hash_size_basic.result new file mode 100644 index 00000000000..52ebfc98cdc --- /dev/null +++ b/mysql-test/suite/sys_vars/r/key_cache_file_hash_size_basic.result @@ -0,0 +1,114 @@ +SET @start_value = @@global.key_cache_file_hash_size; +SELECT @start_value; +@start_value +512 +'#--------------------FN_DYNVARS_056_01------------------------#' +SET @@global.key_cache_file_hash_size = DEFAULT; +SELECT @@global.key_cache_file_hash_size; +@@global.key_cache_file_hash_size +512 +'#---------------------FN_DYNVARS_056_02-------------------------#' +SET @@global.key_cache_file_hash_size = @start_value; +SELECT @@global.key_cache_file_hash_size = 300; +@@global.key_cache_file_hash_size = 300 +0 +'#--------------------FN_DYNVARS_056_03------------------------#' +SET @@global.key_cache_file_hash_size = 128; +SET @@global.key_cache_file_hash_size = 16384; +SELECT @@global.key_cache_file_hash_size; +@@global.key_cache_file_hash_size +16384 +'#--------------------FN_DYNVARS_056_04-------------------------#' +SET @@global.key_cache_file_hash_size = -1; +Warnings: +Warning 1292 Truncated incorrect key_cache_file_hash_size value: '-1' +SELECT @@global.key_cache_file_hash_size; +@@global.key_cache_file_hash_size +128 +SET @@global.key_cache_file_hash_size = 42949672951; +Warnings: +Warning 1292 Truncated incorrect key_cache_file_hash_size value: '42949672951' +SELECT @@global.key_cache_file_hash_size; +@@global.key_cache_file_hash_size +16384 +SET @@global.key_cache_file_hash_size = 10000.01; +ERROR 42000: Incorrect argument type to variable 'key_cache_file_hash_size' +SELECT @@global.key_cache_file_hash_size; +@@global.key_cache_file_hash_size +16384 +SET @@global.key_cache_file_hash_size = -1024; +Warnings: +Warning 1292 Truncated incorrect key_cache_file_hash_size value: '-1024' +SELECT @@global.key_cache_file_hash_size; +@@global.key_cache_file_hash_size +128 +SET @@global.key_cache_file_hash_size = 99; +Warnings: +Warning 1292 Truncated incorrect key_cache_file_hash_size value: '99' +SELECT @@global.key_cache_file_hash_size; +@@global.key_cache_file_hash_size +128 +SET @@global.key_cache_file_hash_size = ON; +ERROR 42000: Incorrect argument type to variable 'key_cache_file_hash_size' +SELECT @@global.key_cache_file_hash_size; +@@global.key_cache_file_hash_size +128 +SET @@global.key_cache_file_hash_size = 'test'; +ERROR 42000: Incorrect argument type to variable 'key_cache_file_hash_size' +SELECT @@global.key_cache_file_hash_size; +@@global.key_cache_file_hash_size +128 +'#-------------------FN_DYNVARS_056_05----------------------------#' +SET @@session.key_cache_file_hash_size = 0; +ERROR HY000: Variable 'key_cache_file_hash_size' is a GLOBAL variable and should be set with SET GLOBAL +SELECT @@session.key_cache_file_hash_size; +ERROR HY000: Variable 'key_cache_file_hash_size' is a GLOBAL variable +'#----------------------FN_DYNVARS_056_06------------------------#' +SELECT @@global.key_cache_file_hash_size = VARIABLE_VALUE +FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES +WHERE VARIABLE_NAME='key_cache_file_hash_size'; +@@global.key_cache_file_hash_size = VARIABLE_VALUE +1 +SELECT @@key_cache_file_hash_size = VARIABLE_VALUE +FROM INFORMATION_SCHEMA.SESSION_VARIABLES +WHERE VARIABLE_NAME='key_cache_file_hash_size'; +@@key_cache_file_hash_size = VARIABLE_VALUE +1 +'#---------------------FN_DYNVARS_056_07----------------------#' +SET @@global.key_cache_file_hash_size = TRUE; +Warnings: +Warning 1292 Truncated incorrect key_cache_file_hash_size value: '1' +SELECT @@global.key_cache_file_hash_size; +@@global.key_cache_file_hash_size +128 +SET @@global.key_cache_file_hash_size = FALSE; +Warnings: +Warning 1292 Truncated incorrect key_cache_file_hash_size value: '0' +SELECT @@global.key_cache_file_hash_size; +@@global.key_cache_file_hash_size +128 +'#---------------------FN_DYNVARS_056_08----------------------#' +SET @@global.key_cache_file_hash_size = 150; +SELECT @@key_cache_file_hash_size = @@global.key_cache_file_hash_size; +@@key_cache_file_hash_size = @@global.key_cache_file_hash_size +1 +'#---------------------FN_DYNVARS_056_09----------------------#' +SET key_cache_file_hash_size = 8000; +ERROR HY000: Variable 'key_cache_file_hash_size' is a GLOBAL variable and should be set with SET GLOBAL +SELECT @@key_cache_file_hash_size; +@@key_cache_file_hash_size +150 +SET local.key_cache_file_hash_size = 10; +ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'key_cache_file_hash_size = 10' at line 1 +SELECT local.key_cache_file_hash_size; +ERROR 42S02: Unknown table 'local' in field list +SET global.key_cache_file_hash_size = 10; +ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'key_cache_file_hash_size = 10' at line 1 +SELECT global.key_cache_file_hash_size; +ERROR 42S02: Unknown table 'global' in field list +SELECT key_cache_file_hash_size = @@session.key_cache_file_hash_size; +ERROR 42S22: Unknown column 'key_cache_file_hash_size' in 'field list' +SET @@global.key_cache_file_hash_size = @start_value; +SELECT @@global.key_cache_file_hash_size; +@@global.key_cache_file_hash_size +512 diff --git a/mysql-test/suite/sys_vars/r/timed_mutexes_basic.result b/mysql-test/suite/sys_vars/r/timed_mutexes_basic.result index 50a5285b0d7..8c295fe8063 100644 --- a/mysql-test/suite/sys_vars/r/timed_mutexes_basic.result +++ b/mysql-test/suite/sys_vars/r/timed_mutexes_basic.result @@ -4,7 +4,11 @@ SELECT @global_start_value; 0 '#--------------------FN_DYNVARS_177_01------------------------#' SET @@global.timed_mutexes = 1; +Warnings: +Warning 1287 '@@timed_mutexes' is deprecated and will be removed in a future release. SET @@global.timed_mutexes = DEFAULT; +Warnings: +Warning 1287 '@@timed_mutexes' is deprecated and will be removed in a future release. SELECT @@global.timed_mutexes; @@global.timed_mutexes 0 @@ -17,15 +21,21 @@ SELECT @@timed_mutexes; SELECT global.timed_mutexes; ERROR 42S02: Unknown table 'global' in field list SET global timed_mutexes = 1; +Warnings: +Warning 1287 '@@timed_mutexes' is deprecated and will be removed in a future release. SELECT @@global.timed_mutexes; @@global.timed_mutexes 1 '#--------------------FN_DYNVARS_177_03------------------------#' SET @@global.timed_mutexes = 0; +Warnings: +Warning 1287 '@@timed_mutexes' is deprecated and will be removed in a future release. SELECT @@global.timed_mutexes; @@global.timed_mutexes 0 SET @@global.timed_mutexes = 1; +Warnings: +Warning 1287 '@@timed_mutexes' is deprecated and will be removed in a future release. SELECT @@global.timed_mutexes; @@global.timed_mutexes 1 @@ -82,23 +92,33 @@ VARIABLE_VALUE ON '#---------------------FN_DYNVARS_177_08-------------------------#' SET @@global.timed_mutexes = OFF; +Warnings: +Warning 1287 '@@timed_mutexes' is deprecated and will be removed in a future release. SELECT @@global.timed_mutexes; @@global.timed_mutexes 0 SET @@global.timed_mutexes = ON; +Warnings: +Warning 1287 '@@timed_mutexes' is deprecated and will be removed in a future release. SELECT @@global.timed_mutexes; @@global.timed_mutexes 1 '#---------------------FN_DYNVARS_177_09----------------------#' SET @@global.timed_mutexes = TRUE; +Warnings: +Warning 1287 '@@timed_mutexes' is deprecated and will be removed in a future release. SELECT @@global.timed_mutexes; @@global.timed_mutexes 1 SET @@global.timed_mutexes = FALSE; +Warnings: +Warning 1287 '@@timed_mutexes' is deprecated and will be removed in a future release. SELECT @@global.timed_mutexes; @@global.timed_mutexes 0 SET @@global.timed_mutexes = @global_start_value; +Warnings: +Warning 1287 '@@timed_mutexes' is deprecated and will be removed in a future release. SELECT @@global.timed_mutexes; @@global.timed_mutexes 0 diff --git a/mysql-test/suite/sys_vars/t/aria_pagecache_file_hash_size_basic.test b/mysql-test/suite/sys_vars/t/aria_pagecache_file_hash_size_basic.test new file mode 100644 index 00000000000..8bedb498e2c --- /dev/null +++ b/mysql-test/suite/sys_vars/t/aria_pagecache_file_hash_size_basic.test @@ -0,0 +1,22 @@ +# ulong readonly + +--source include/have_maria.inc +# +# show the global and session values; +# +select @@global.aria_pagecache_file_hash_size; +--error ER_INCORRECT_GLOBAL_LOCAL_VAR +select @@session.aria_pagecache_file_hash_size; +show global variables like 'aria_pagecache_file_hash_size'; +show session variables like 'aria_pagecache_file_hash_size'; +select * from information_schema.global_variables where variable_name='aria_pagecache_file_hash_size'; +select * from information_schema.session_variables where variable_name='aria_pagecache_file_hash_size'; + +# +# show that it's read-only +# +--error ER_INCORRECT_GLOBAL_LOCAL_VAR +set global aria_pagecache_file_hash_size=200; +--error ER_INCORRECT_GLOBAL_LOCAL_VAR +set session aria_pagecache_file_hash_size=200; + diff --git a/mysql-test/suite/sys_vars/t/innodb_simulate_comp_failures_basic.test b/mysql-test/suite/sys_vars/t/innodb_simulate_comp_failures_basic.test new file mode 100644 index 00000000000..97e69e3f324 --- /dev/null +++ b/mysql-test/suite/sys_vars/t/innodb_simulate_comp_failures_basic.test @@ -0,0 +1,64 @@ +--source include/have_innodb.inc + +SET @start_global_value = @@global.innodb_simulate_comp_failures; +SELECT @start_global_value; + +# +# exists as global only +# + +--echo Valid values are between 0 and 99 +select @@global.innodb_simulate_comp_failures between 0 and 99; +select @@global.innodb_simulate_comp_failures; + +--error ER_INCORRECT_GLOBAL_LOCAL_VAR +select @@session.innodb_simulate_comp_failures; + +show global variables like 'innodb_simulate_comp_failures'; +show session variables like 'innodb_simulate_comp_failures'; +select * from information_schema.global_variables where variable_name='innodb_simulate_comp_failures'; +select * from information_schema.session_variables where variable_name='innodb_simulate_comp_failures'; + +# +# show that it's writable +# + +set global innodb_simulate_comp_failures=10; +select @@global.innodb_simulate_comp_failures; +select * from information_schema.global_variables where variable_name='innodb_simulate_comp_failures'; +select * from information_schema.session_variables where variable_name='innodb_simulate_comp_failures'; + +--error ER_GLOBAL_VARIABLE +set session innodb_simulate_comp_failures=1; + +# +# incorrect types +# + +--error ER_WRONG_TYPE_FOR_VAR +set global innodb_simulate_comp_failures=1.1; +--error ER_WRONG_TYPE_FOR_VAR +set global innodb_simulate_comp_failures=1e1; +--error ER_WRONG_TYPE_FOR_VAR +set global innodb_simulate_comp_failures="foo"; + +set global innodb_simulate_comp_failures=-7; +select @@global.innodb_simulate_comp_failures; +select * from information_schema.global_variables where variable_name='innodb_simulate_comp_failures'; +set global innodb_simulate_comp_failures=106; +select @@global.innodb_simulate_comp_failures; +select * from information_schema.global_variables where variable_name='innodb_simulate_comp_failures'; + +# +# min/max/DEFAULT values +# + +set global innodb_simulate_comp_failures=0; +select @@global.innodb_simulate_comp_failures; +set global innodb_simulate_comp_failures=99; +select @@global.innodb_simulate_comp_failures; +set global innodb_simulate_comp_failures=DEFAULT; +select @@global.innodb_simulate_comp_failures; + +SET @@global.innodb_simulate_comp_failures = @start_global_value; +SELECT @@global.innodb_simulate_comp_failures; diff --git a/mysql-test/suite/sys_vars/t/key_cache_file_hash_size_basic.test b/mysql-test/suite/sys_vars/t/key_cache_file_hash_size_basic.test new file mode 100644 index 00000000000..deebe708d3d --- /dev/null +++ b/mysql-test/suite/sys_vars/t/key_cache_file_hash_size_basic.test @@ -0,0 +1,168 @@ +################# mysql-test\t\key_cache_file_hash_size.test ################## +# # +# Variable Name: key_cache_file_hash_size # +# Scope: GLOBAL # +# Access Type: Dynamic # +# Data Type: numeric # +# Default Value: 300 # +# Range: 100-4294967295 # +# # +# # +# Creation Date: 2008-02-07 # +# Author: Salman # +# # +# Description: Test Cases of Dynamic System Variable key_cache_file_hash_size # +# that checks the behavior of this variable in the following ways# +# * Default Value # +# * Valid & Invalid values # +# * Scope & Access method # +# * Data Integrity # +# # +# Reference: http://dev.mysql.com/doc/refman/5.1/en/ # +# server-system-variables.html # +# # +############################################################################### + +--source include/load_sysvars.inc + +######################################################################## +# START OF key_cache_file_hash_size TESTS # +######################################################################## + + +############################################################################# +# Saving initial value of key_cache_file_hash_size in a temporary variable # +############################################################################# + +SET @start_value = @@global.key_cache_file_hash_size; +SELECT @start_value; + + +--echo '#--------------------FN_DYNVARS_056_01------------------------#' +################################################################################ +# Display the DEFAULT value of key_cache_file_hash_size # +################################################################################ + +SET @@global.key_cache_file_hash_size = DEFAULT; +SELECT @@global.key_cache_file_hash_size; + + +--echo '#---------------------FN_DYNVARS_056_02-------------------------#' +############################################### +# Verify default value of variable # +############################################### + +SET @@global.key_cache_file_hash_size = @start_value; +SELECT @@global.key_cache_file_hash_size = 300; + + +--echo '#--------------------FN_DYNVARS_056_03------------------------#' +############################################################################### +# Change the value of key_cache_file_hash_size to a valid value # +############################################################################### + +SET @@global.key_cache_file_hash_size = 128; +SET @@global.key_cache_file_hash_size = 16384; +SELECT @@global.key_cache_file_hash_size; + +--echo '#--------------------FN_DYNVARS_056_04-------------------------#' +########################################################################### +# Change the value of key_cache_file_hash_size to invalid value # +########################################################################### + +SET @@global.key_cache_file_hash_size = -1; +SELECT @@global.key_cache_file_hash_size; +SET @@global.key_cache_file_hash_size = 42949672951; +SELECT @@global.key_cache_file_hash_size; +--Error ER_WRONG_TYPE_FOR_VAR +SET @@global.key_cache_file_hash_size = 10000.01; +SELECT @@global.key_cache_file_hash_size; +SET @@global.key_cache_file_hash_size = -1024; +SELECT @@global.key_cache_file_hash_size; +SET @@global.key_cache_file_hash_size = 99; +SELECT @@global.key_cache_file_hash_size; + +--Error ER_WRONG_TYPE_FOR_VAR +SET @@global.key_cache_file_hash_size = ON; +SELECT @@global.key_cache_file_hash_size; +--Error ER_WRONG_TYPE_FOR_VAR +SET @@global.key_cache_file_hash_size = 'test'; +SELECT @@global.key_cache_file_hash_size; + + +--echo '#-------------------FN_DYNVARS_056_05----------------------------#' +########################################################################### +# Test if accessing session key_cache_file_hash_size gives error # +########################################################################### + +--Error ER_GLOBAL_VARIABLE +SET @@session.key_cache_file_hash_size = 0; +--Error ER_INCORRECT_GLOBAL_LOCAL_VAR +SELECT @@session.key_cache_file_hash_size; + + +--echo '#----------------------FN_DYNVARS_056_06------------------------#' +############################################################################## +# Check if the value in GLOBAL & SESSION Tables matches values in variable # +############################################################################## + +SELECT @@global.key_cache_file_hash_size = VARIABLE_VALUE +FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES +WHERE VARIABLE_NAME='key_cache_file_hash_size'; + +SELECT @@key_cache_file_hash_size = VARIABLE_VALUE +FROM INFORMATION_SCHEMA.SESSION_VARIABLES +WHERE VARIABLE_NAME='key_cache_file_hash_size'; + + +--echo '#---------------------FN_DYNVARS_056_07----------------------#' +################################################################### +# Check if TRUE and FALSE values can be used on variable # +################################################################### + +SET @@global.key_cache_file_hash_size = TRUE; +SELECT @@global.key_cache_file_hash_size; +SET @@global.key_cache_file_hash_size = FALSE; +SELECT @@global.key_cache_file_hash_size; + + +--echo '#---------------------FN_DYNVARS_056_08----------------------#' +######################################################################################################## +# Check if accessing variable with SESSION,LOCAL and without SCOPE points to same session variable # +######################################################################################################## + +SET @@global.key_cache_file_hash_size = 150; +SELECT @@key_cache_file_hash_size = @@global.key_cache_file_hash_size; + + +--echo '#---------------------FN_DYNVARS_056_09----------------------#' +########################################################################## ####### +# Check if key_cache_file_hash_size can be accessed with and without @@ sign # +################################################################################## + +--Error ER_GLOBAL_VARIABLE +SET key_cache_file_hash_size = 8000; +SELECT @@key_cache_file_hash_size; +--Error ER_PARSE_ERROR +SET local.key_cache_file_hash_size = 10; +--Error ER_UNKNOWN_TABLE +SELECT local.key_cache_file_hash_size; +--Error ER_PARSE_ERROR +SET global.key_cache_file_hash_size = 10; +--Error ER_UNKNOWN_TABLE +SELECT global.key_cache_file_hash_size; +--Error ER_BAD_FIELD_ERROR +SELECT key_cache_file_hash_size = @@session.key_cache_file_hash_size; + + +############################## +# Restore initial value # +############################## + +SET @@global.key_cache_file_hash_size = @start_value; +SELECT @@global.key_cache_file_hash_size; + + +######################################################################## +# END OF key_cache_file_hash_size TESTS # +######################################################################## diff --git a/mysql-test/suite/sys_vars/t/report_port_basic.test b/mysql-test/suite/sys_vars/t/report_port_basic.test index 903289230bd..dfe64dd4e8d 100644 --- a/mysql-test/suite/sys_vars/t/report_port_basic.test +++ b/mysql-test/suite/sys_vars/t/report_port_basic.test @@ -2,7 +2,7 @@ # # only global # ---replace_regex s/[0-9]+/DEFAULT_MASTER_PORT/ +--replace_regex /[0-9]+/DEFAULT_MASTER_PORT/ select @@global.report_port; --error ER_INCORRECT_GLOBAL_LOCAL_VAR select @@session.report_port; diff --git a/mysql-test/t/create_or_replace.test b/mysql-test/t/create_or_replace.test index 384768a87d5..2bdd23c21f6 100644 --- a/mysql-test/t/create_or_replace.test +++ b/mysql-test/t/create_or_replace.test @@ -332,6 +332,40 @@ select * from information_schema.metadata_lock_info; drop table t1; unlock tables; +--echo # +--echo # MDEV-6560 +--echo # Assertion `! is_set() ' failed in Diagnostics_area::set_ok_status +--echo # + +CREATE TABLE t1 (col_int_nokey INT) ENGINE=InnoDB; + +CREATE OR REPLACE TEMPORARY TABLE tmp LIKE t1; +LOCK TABLE t1 WRITE; + +--connect (con1,localhost,root,,test) +--let $con_id = `SELECT CONNECTION_ID()` +--send CREATE OR REPLACE TABLE t1 LIKE tmp +--connection default +--replace_result $con_id con_id +--eval KILL QUERY $con_id + +--connection con1 +--error 0,ER_QUERY_INTERRUPTED +--reap +--send CREATE OR REPLACE TABLE t1 (a int) + +--connection default +--replace_result $con_id con_id +--eval KILL QUERY $con_id + +--connection con1 +--error 0,ER_QUERY_INTERRUPTED +--reap +--disconnect con1 +--connection default + +drop table t1; + # # Cleanup # diff --git a/mysql-test/t/ctype_big5.test b/mysql-test/t/ctype_big5.test index bfce30bc7e0..5c0bdff4633 100644 --- a/mysql-test/t/ctype_big5.test +++ b/mysql-test/t/ctype_big5.test @@ -169,3 +169,81 @@ set collation_connection=big5_bin; --echo # --echo # End of 5.6 tests --echo # + +--echo # +--echo # Start of 10.0 tests +--echo # + +let $ctype_unescape_combinations=selected; +--source include/ctype_unescape.inc + +--character_set big5 +SET NAMES big5; +--source include/ctype_E05C.inc + + +# +# Checking unassigned character 0xC840 in an ENUM +# + +SET NAMES big5; +CREATE TABLE t1 (a ENUM('È@') CHARACTER SET big5); +SHOW CREATE TABLE t1; +INSERT INTO t1 VALUES ('È@'); +INSERT INTO t1 VALUES (_big5 0xC840); +INSERT INTO t1 VALUES (0xC840); +SELECT HEX(a),a FROM t1; +DROP TABLE t1; + +SET NAMES binary; +CREATE TABLE t1 (a ENUM('È@') CHARACTER SET big5); +SHOW CREATE TABLE t1; +INSERT INTO t1 VALUES ('È@'); +INSERT INTO t1 VALUES (_big5 0xC840); +INSERT INTO t1 VALUES (0xC840); +SELECT HEX(a),a FROM t1; +DROP TABLE t1; + + +# +# Checking unassigned character in CHAR, VARCHAR, TEXT +# + +SET NAMES big5; +CREATE TABLE t1 ( + c1 CHAR(10) CHARACTER SET big5, + c2 VARCHAR(10) CHARACTER SET big5, + c3 TEXT CHARACTER SET big5 +); +INSERT INTO t1 VALUES ('È@','È@','È@'); +INSERT INTO t1 VALUES (_big5 0xC840,_big5 0xC840,_big5 0xC840); +INSERT INTO t1 VALUES (0xC840,0xC840,0xC840); +SELECT HEX(c1),HEX(c2),HEX(c3) FROM t1; +DROP TABLE t1; + +SET NAMES binary; +CREATE TABLE t1 ( + c1 CHAR(10) CHARACTER SET big5, + c2 VARCHAR(10) CHARACTER SET big5, + c3 TEXT CHARACTER SET big5 +); +INSERT INTO t1 VALUES ('È@','È@','È@'); +INSERT INTO t1 VALUES (_big5 0xC840,_big5 0xC840,_big5 0xC840); +INSERT INTO t1 VALUES (0xC840,0xC840,0xC840); +SELECT HEX(c1),HEX(c2),HEX(c3) FROM t1; +DROP TABLE t1; + + +# +# Checking binary->big5 conversion of an unassigned character 0xC840 in optimizer +# +SET NAMES binary; +CREATE TABLE t1 (a VARCHAR(10) CHARACTER SET big5, KEY(a)); +INSERT INTO t1 VALUES (0xC840),(0xC841),(0xC842); +SELECT HEX(a) FROM t1 WHERE a='È@'; +SELECT HEX(a) FROM t1 IGNORE KEY(a) WHERE a='È@'; +DROP TABLE t1; + +--echo # +--echo # End of 10.0 tests +--echo # diff --git a/mysql-test/t/ctype_cp932_binlog_stm.test b/mysql-test/t/ctype_cp932_binlog_stm.test index ca8853b9ea1..304c9f5d05c 100644 --- a/mysql-test/t/ctype_cp932_binlog_stm.test +++ b/mysql-test/t/ctype_cp932_binlog_stm.test @@ -185,3 +185,14 @@ set collation_connection=cp932_bin; --echo # --echo # End of 5.6 tests --echo # + + +--echo # +--echo # Start of 10.0 tests +--echo # +SET NAMES cp932; +--source include/ctype_E05C.inc + +--echo # +--echo # End of 10.0 tests +--echo # diff --git a/mysql-test/t/ctype_eucjpms.test b/mysql-test/t/ctype_eucjpms.test index 264d9156a0c..49ca81850ed 100644 --- a/mysql-test/t/ctype_eucjpms.test +++ b/mysql-test/t/ctype_eucjpms.test @@ -520,3 +520,23 @@ set collation_connection=eucjpms_bin; --echo # --echo # End of 5.6 tests --echo # + + +--echo # +--echo # Start of 10.0 tests +--echo # + +--echo # +--echo # MDEV-6776 ujis and eucjmps erroneously accept 0x8EA0 as a valid byte sequence +--echo # +CREATE TABLE t1 (a VARCHAR(10) CHARACTER SET eucjpms); +INSERT INTO t1 VALUES (0x8EA0); +SELECT HEX(a), CHAR_LENGTH(a) FROM t1; +DROP TABLE t1; +--error ER_INVALID_CHARACTER_STRING +SELECT _eucjpms 0x8EA0; + + +--echo # +--echo # End of 10.0 tests +--echo # diff --git a/mysql-test/t/ctype_gb2312.test b/mysql-test/t/ctype_gb2312.test index 9fccdffc70b..e3dd448f54c 100644 --- a/mysql-test/t/ctype_gb2312.test +++ b/mysql-test/t/ctype_gb2312.test @@ -149,3 +149,14 @@ set collation_connection=gb2312_bin; --echo # --echo # End of 5.6 tests --echo # + +--echo # +--echo # Start of 10.0 tests +--echo # + +let $ctype_unescape_combinations=selected; +--source include/ctype_unescape.inc + +--echo # +--echo # End of 10.0 tests +--echo # diff --git a/mysql-test/t/ctype_gbk.test b/mysql-test/t/ctype_gbk.test index 09e9e92e59f..d44009b6109 100644 --- a/mysql-test/t/ctype_gbk.test +++ b/mysql-test/t/ctype_gbk.test @@ -187,3 +187,19 @@ set collation_connection=gbk_bin; --echo # --echo # End of 5.6 tests --echo # + +--echo # +--echo # Start of 10.0 tests +--echo # + +let $ctype_unescape_combinations=selected; +--source include/ctype_unescape.inc + +--character_set gbk +SET NAMES gbk; +--source include/ctype_E05C.inc + + +--echo # +--echo # End of 10.0 tests +--echo # diff --git a/mysql-test/t/ctype_latin1.test b/mysql-test/t/ctype_latin1.test index 99ac2d19b70..5da1534029b 100644 --- a/mysql-test/t/ctype_latin1.test +++ b/mysql-test/t/ctype_latin1.test @@ -202,3 +202,37 @@ set names latin1; --echo # --echo # End of 5.6 tests --echo # + +--echo # +--echo # Start of 10.0 tests +--echo # + +let $ctype_unescape_combinations=selected; +--source include/ctype_unescape.inc + +--echo # +--echo # MDEV-6752 Trailing incomplete characters are not replaced to question marks on conversion +--echo # +SET NAMES utf8, character_set_connection=latin1; +SELECT 'Â'; +SELECT HEX('Â'); +SELECT HEX(CAST('Â' AS CHAR CHARACTER SET utf8)); +SELECT HEX(CAST('Â' AS CHAR CHARACTER SET latin1)); +SELECT HEX(CONVERT('Â' USING utf8)); +SELECT HEX(CONVERT('Â' USING latin1)); +SELECT 'Âx'; +SELECT HEX('Âx'); +SELECT HEX(CAST('Âx' AS CHAR CHARACTER SET utf8)); +SELECT HEX(CAST('Âx' AS CHAR CHARACTER SET latin1)); +SELECT HEX(CONVERT('Âx' USING utf8)); +SELECT HEX(CONVERT('Âx' USING latin1)); +SET NAMES utf8; +CREATE TABLE t1 (a VARCHAR(10) CHARACTER SET latin1); +INSERT INTO t1 VALUES ('Â'),('Â#'); +SHOW WARNINGS; +SELECT HEX(a),a FROM t1; +DROP TABLE t1; + +--echo # +--echo # End of 10.0 tests +--echo # diff --git a/mysql-test/t/ctype_partitions.test b/mysql-test/t/ctype_partitions.test new file mode 100644 index 00000000000..f80a2c98a1b --- /dev/null +++ b/mysql-test/t/ctype_partitions.test @@ -0,0 +1,29 @@ +--source include/have_partition.inc + +--echo # +--echo # MDEV-6255 DUPLICATE KEY Errors on SELECT .. GROUP BY that uses temporary and filesort +--echo # + +# cp1251_ukrainian_ci: 0x20 SPACE is equal to 0x60 GRAVE ACCENT +CREATE TABLE t1 (a VARCHAR(10) CHARACTER SET cp1251 COLLATE cp1251_ukrainian_ci); +INSERT INTO t1 VALUES (0x20),(0x60),(0x6060),(0x606060); +SELECT HEX(a) FROM t1 WHERE a=0x60; +ALTER TABLE t1 PARTITION BY KEY(a) PARTITIONS 3; +SELECT HEX(a) FROM t1 WHERE a=0x60; +DROP TABLE t1; + +# koi8u_general_ci: 0x20 SPACE is equal to 0x60 GRAVE ACCENT +CREATE TABLE t1 (a VARCHAR(10) CHARACTER SET koi8u COLLATE koi8u_general_ci); +INSERT INTO t1 VALUES (0x20),(0x60),(0x6060),(0x606060); +SELECT HEX(a) FROM t1 WHERE a=0x60; +ALTER TABLE t1 PARTITION BY KEY(a) PARTITIONS 3; +SELECT HEX(a) FROM t1 WHERE a=0x60; +DROP TABLE t1; + +# cp1250_general_ci: 0x20 SPACE is equal to 0xA0 NO-BREAK SPACE +CREATE TABLE t1 (a VARCHAR(10) CHARACTER SET cp1250 COLLATE cp1250_general_ci); +INSERT INTO t1 VALUES (0x20),(0xA0),(0xA0A0),(0xA0A0A0); +SELECT HEX(a) FROM t1 WHERE a=0xA0; +ALTER TABLE t1 PARTITION BY KEY(a) PARTITIONS 3; +SELECT HEX(a) FROM t1 WHERE a=0xA0; +DROP TABLE t1; diff --git a/mysql-test/t/ctype_sjis.test b/mysql-test/t/ctype_sjis.test index 4b0535fa418..ae110b20cb2 100644 --- a/mysql-test/t/ctype_sjis.test +++ b/mysql-test/t/ctype_sjis.test @@ -215,3 +215,19 @@ set collation_connection=sjis_bin; --echo # --echo # End of 5.6 tests --echo # + +--echo # +--echo # Start of 10.0 tests +--echo # + +let $ctype_unescape_combinations=selected; +--source include/ctype_unescape.inc + +--character_set sjis +SET NAMES sjis; +--source include/ctype_E05C.inc + + +--echo # +--echo # End of 10.0 tests +--echo # diff --git a/mysql-test/t/ctype_swe7.test b/mysql-test/t/ctype_swe7.test new file mode 100644 index 00000000000..7d1ef89b374 --- /dev/null +++ b/mysql-test/t/ctype_swe7.test @@ -0,0 +1,19 @@ +--echo # +--echo # Start of 10.0 tests +--echo # + +SET NAMES swe7; + +# +# Test escape sequences. +# This also covers: +# MDEV-6737 Stored routines do now work with swe7: "The table mysql.proc is missing, corrupt, or contains bad data" +# as uses stored functions actively. +# + +let $ctype_unescape_combinations=selected; +--source include/ctype_unescape.inc + +--echo # +--echo # End of 10.0 tests +--echo # diff --git a/mysql-test/t/ctype_ucs.test b/mysql-test/t/ctype_ucs.test index 09294b60a04..d269fb35dfe 100644 --- a/mysql-test/t/ctype_ucs.test +++ b/mysql-test/t/ctype_ucs.test @@ -796,6 +796,23 @@ DROP TABLE t1; --echo # +--echo # MDEV-5745 analyze MySQL fix for bug#12368495 +--echo # +SELECT CHAR_LENGTH(TRIM(LEADING 0x000000 FROM _ucs2 0x0061)); +SELECT CHAR_LENGTH(TRIM(LEADING 0x0001 FROM _ucs2 0x0061)); +SELECT CHAR_LENGTH(TRIM(LEADING 0x00 FROM _ucs2 0x0061)); + +SELECT CHAR_LENGTH(TRIM(TRAILING 0x000000 FROM _ucs2 0x0061)); +SELECT CHAR_LENGTH(TRIM(TRAILING 0x0001 FROM _ucs2 0x0061)); +SELECT CHAR_LENGTH(TRIM(TRAILING 0x61 FROM _ucs2 0x0061)); + +SELECT CHAR_LENGTH(TRIM(BOTH 0x000000 FROM _ucs2 0x0061)); +SELECT CHAR_LENGTH(TRIM(BOTH 0x0001 FROM _ucs2 0x0061)); +SELECT CHAR_LENGTH(TRIM(BOTH 0x61 FROM _ucs2 0x0061)); +SELECT CHAR_LENGTH(TRIM(BOTH 0x00 FROM _ucs2 0x0061)); + + +--echo # --echo # End of 5.5 tests --echo # @@ -874,3 +891,24 @@ DROP TABLE t1; --echo # --echo # End of 5.6 tests --echo # + + +--echo # +--echo # Start of 10.0 tests +--echo # + +--echo # +--echo # MDEV-6661 PI() does not work well in UCS2/UTF16/UTF32 context +--echo # +SELECT CONCAT(CONVERT('pi=' USING ucs2),PI()) AS PI; + +--echo # +--echo # MDEV-6695 Bad column name for UCS2 string literals +--echo # +SET NAMES utf8, character_set_connection=ucs2; +SELECT 'a','aa'; + + +--echo # +--echo # End of 10.0 tests +--echo # diff --git a/mysql-test/t/ctype_ujis.test b/mysql-test/t/ctype_ujis.test index 29262966bca..48dc0e63058 100644 --- a/mysql-test/t/ctype_ujis.test +++ b/mysql-test/t/ctype_ujis.test @@ -1349,3 +1349,23 @@ set collation_connection=ujis_bin; --echo # --echo # End of 5.6 tests --echo # + + +--echo # +--echo # Start of 10.0 tests +--echo # + +--echo # +--echo # MDEV-6776 ujis and eucjmps erroneously accept 0x8EA0 as a valid byte sequence +--echo # +CREATE TABLE t1 (a VARCHAR(10) CHARACTER SET ujis); +INSERT INTO t1 VALUES (0x8EA0); +SELECT HEX(a), CHAR_LENGTH(a) FROM t1; +DROP TABLE t1; +--error ER_INVALID_CHARACTER_STRING +SELECT _ujis 0x8EA0; + + +--echo # +--echo # End of 10.0 tests +--echo # diff --git a/mysql-test/t/ctype_utf16.test b/mysql-test/t/ctype_utf16.test index a05f119ca93..c92889da2dc 100644 --- a/mysql-test/t/ctype_utf16.test +++ b/mysql-test/t/ctype_utf16.test @@ -816,3 +816,39 @@ set collation_connection=utf16_bin; --echo # End of 5.6 tests --echo # +--echo # +--echo # Start of 10.0 tests +--echo # + +--echo # +--echo # MDEV-6661 PI() does not work well in UCS2/UTF16/UTF32 context +--echo # +SELECT CONCAT(CONVERT('pi=' USING utf16),PI()) AS PI; + +--echo # +--echo # MDEV-6666 Malformed result for CONCAT(utf8_column, binary_string) +--echo # + +SET NAMES utf8mb4; +CREATE TABLE t1 (a VARCHAR(10) CHARACTER SET utf16); +INSERT INTO t1 VALUES ('a'); +--error ER_INVALID_CHARACTER_STRING +SELECT CONCAT(a,0xD800) FROM t1; +SELECT CONCAT(a,0xD800DC00) FROM t1; +SELECT CONCAT(a,0x00FF) FROM t1; +DROP TABLE t1; +--error ER_INVALID_CHARACTER_STRING +SELECT CONCAT(_utf16'a' COLLATE utf16_unicode_ci, _binary 0xD800); +PREPARE stmt FROM "SELECT CONCAT(_utf16'a' COLLATE utf16_unicode_ci, ?)"; +SET @arg00=_binary 0xD800; +--error ER_INVALID_CHARACTER_STRING +EXECUTE stmt USING @arg00; +SET @arg00=_binary 0xD800DC00; +EXECUTE stmt USING @arg00; +SET @arg00=_binary 0x00FF; +EXECUTE stmt USING @arg00; +DEALLOCATE PREPARE stmt; + +--echo # +--echo # End of 10.0 tests +--echo # diff --git a/mysql-test/t/ctype_utf32.test b/mysql-test/t/ctype_utf32.test index 2fbe452a716..e6583f990ca 100644 --- a/mysql-test/t/ctype_utf32.test +++ b/mysql-test/t/ctype_utf32.test @@ -1,4 +1,5 @@ -- source include/have_utf32.inc +-- source include/have_utf8mb4.inc SET TIME_ZONE = '+03:00'; @@ -873,6 +874,22 @@ ORDER BY l DESC; SELECT '2010-10-10 10:10:10' + INTERVAL GeometryType(GeomFromText('POINT(1 1)')) hour_second; --echo # +--echo # MDEV-5745 analyze MySQL fix for bug#12368495 +--echo # +SELECT CHAR_LENGTH(TRIM(LEADING 0x0000000000 FROM _utf32 0x00000061)); +SELECT CHAR_LENGTH(TRIM(LEADING 0x0001 FROM _utf32 0x00000061)); +SELECT CHAR_LENGTH(TRIM(LEADING 0x00 FROM _utf32 0x00000061)); + +SELECT CHAR_LENGTH(TRIM(TRAILING 0x0000000000 FROM _utf32 0x00000061)); +SELECT CHAR_LENGTH(TRIM(TRAILING 0x0001 FROM _utf32 0x00000061)); +SELECT CHAR_LENGTH(TRIM(TRAILING 0x61 FROM _utf32 0x00000061)); + +SELECT CHAR_LENGTH(TRIM(BOTH 0x0000000000 FROM _utf32 0x00000061)); +SELECT CHAR_LENGTH(TRIM(BOTH 0x0001 FROM _utf32 0x00000061)); +SELECT CHAR_LENGTH(TRIM(BOTH 0x61 FROM _utf32 0x00000061)); +SELECT CHAR_LENGTH(TRIM(BOTH 0x00 FROM _utf32 0x00000061)); + +--echo # --echo # End of 5.5 tests --echo # @@ -902,3 +919,40 @@ set collation_connection=utf32_bin; --echo # +--echo # +--echo # Start of 10.0 tests +--echo # + +--echo # +--echo # MDEV-6661 PI() does not work well in UCS2/UTF16/UTF32 context +--echo # +SELECT CONCAT(CONVERT('pi=' USING utf32),PI()) AS PI; + +--echo # +--echo # MDEV-6666 Malformed result for CONCAT(utf8_column, binary_string) +--echo # + +SET NAMES utf8mb4; +CREATE TABLE t1 (a VARCHAR(10) CHARACTER SET utf32); +INSERT INTO t1 VALUES ('a'); +--error ER_INVALID_CHARACTER_STRING +SELECT CONCAT(a,0x20FFFF) FROM t1; +SELECT CONCAT(a,0x010000) FROM t1; +SELECT CONCAT(a,0x00FF) FROM t1; +DROP TABLE t1; +--error ER_INVALID_CHARACTER_STRING +SELECT CONCAT(_utf32'a' COLLATE utf32_unicode_ci, _binary 0x20FFFF); +PREPARE stmt FROM "SELECT CONCAT(_utf32'a' COLLATE utf32_unicode_ci, ?)"; +SET @arg00=_binary 0x20FFFF; +--error ER_INVALID_CHARACTER_STRING +EXECUTE stmt USING @arg00; +SET @arg00=_binary 0x010000; +EXECUTE stmt USING @arg00; +SET @arg00=_binary 0x00FF; +EXECUTE stmt USING @arg00; +DEALLOCATE PREPARE stmt; + +--echo # +--echo # End of 10.0 tests +--echo # + diff --git a/mysql-test/t/ctype_utf8.test b/mysql-test/t/ctype_utf8.test index 468804130f4..e02d5a915b7 100644 --- a/mysql-test/t/ctype_utf8.test +++ b/mysql-test/t/ctype_utf8.test @@ -1654,3 +1654,93 @@ set max_sort_length=default; --echo # End of 5.6 tests --echo # +--echo # +--echo # Start of 10.0 tests +--echo # + +--echo # +--echo # MDEV-6666 Malformed result for CONCAT(utf8_column, binary_string) +--echo # + +CREATE TABLE t1 (a VARCHAR(10) CHARACTER SET utf8); +INSERT INTO t1 VALUES ('a'); +--error ER_INVALID_CHARACTER_STRING +SELECT CONCAT(a,0xFF) FROM t1; +SELECT CONCAT(a,0xC3BF) FROM t1; +DROP TABLE t1; +--error ER_INVALID_CHARACTER_STRING +SELECT CONCAT('a' COLLATE utf8_unicode_ci, _binary 0xFF); +PREPARE stmt FROM "SELECT CONCAT('a' COLLATE utf8_unicode_ci, ?)"; +SET @arg00=_binary 0xFF; +--error ER_INVALID_CHARACTER_STRING +EXECUTE stmt USING @arg00; +DEALLOCATE PREPARE stmt; +SET NAMES latin1; +PREPARE stmt FROM "SELECT CONCAT(_utf8'a' COLLATE utf8_unicode_ci, ?)"; +EXECUTE stmt USING @no_such_var; +DEALLOCATE PREPARE stmt; +SET NAMES utf8; + +--echo # +--echo # MDEV-6679 Different optimizer plan for "a BETWEEN 'string' AND ?" and "a BETWEEN ? AND 'string'" +--echo # +SET NAMES utf8, collation_connection=utf8_swedish_ci; +CREATE TABLE t1 (a VARCHAR(10) CHARACTER SET utf8, b INT NOT NULL DEFAULT 0, key(a)); +INSERT INTO t1 (a) VALUES ('a'),('b'),('c'),('d'),('¢'); +SET @arg='¢'; +PREPARE stmt FROM "EXPLAIN SELECT * FROM t1 WHERE a BETWEEN _utf8'¢' and ?"; +EXECUTE stmt USING @arg; +PREPARE stmt FROM "EXPLAIN SELECT * FROM t1 WHERE a between ? and _utf8'¢'"; +EXECUTE stmt USING @arg; +DEALLOCATE PREPARE stmt; +DROP TABLE t1; + +-- echo # +-- echo # MDEV-6683 A parameter and a string literal with the same values are not recognized as equal by the optimizer +-- echo # + +SET NAMES utf8, collation_connection=utf8_swedish_ci; +CREATE TABLE t1 (a VARCHAR(10) CHARACTER SET latin1, b INT NOT NULL DEFAULT 0, key(a)); +INSERT INTO t1 (a) VALUES ('a'),('b'),('c'),('d'),('¢'); +SET @arg='¢'; +PREPARE stmt FROM "EXPLAIN SELECT * FROM t1 WHERE a BETWEEN _utf8'¢' and ?"; +EXECUTE stmt USING @arg; +PREPARE stmt FROM "EXPLAIN SELECT * FROM t1 WHERE a between ? and _utf8'¢'"; +EXECUTE stmt USING @arg; +DEALLOCATE PREPARE stmt; +DROP TABLE t1; + +--echo # +--echo # MDEV-6688 Illegal mix of collation with bit string B'01100001' +--echo # +CREATE TABLE t1 (a VARCHAR(10) CHARACTER SET latin1, b INT); +INSERT INTO t1 VALUES ('a',1); +SELECT CONCAT(a, IF(b>10, _utf8 X'61', _utf8 X'61')) FROM t1; +SELECT CONCAT(a, IF(b>10, _utf8 X'61', _utf8 B'01100001')) FROM t1; +DROP TABLE t1; + +--echo # +--echo # MDEV-6694 Illegal mix of collation with a PS parameter +--echo # +SET NAMES utf8; +CREATE TABLE t1 (a INT, b VARCHAR(10) CHARACTER SET latin1); +INSERT INTO t1 VALUES (1,'a'); +SELECT CONCAT(b,IF(a,'b','b')) FROM t1; +PREPARE stmt FROM "SELECT CONCAT(b,IF(a,?,?)) FROM t1"; +SET @b='b'; +EXECUTE stmt USING @b,@b; +SET @b=''; +EXECUTE stmt USING @b,@b; +SET @b='Ñ'; +--error ER_CANT_AGGREGATE_2COLLATIONS +EXECUTE stmt USING @b,@b; +DEALLOCATE PREPARE stmt; +DROP TABLE t1; + + +let $ctype_unescape_combinations=selected; +--source include/ctype_unescape.inc + +--echo # +--echo # End of 10.0 tests +--echo # diff --git a/mysql-test/t/derived_view.test b/mysql-test/t/derived_view.test index de08b1c4d0d..67899837bb2 100644 --- a/mysql-test/t/derived_view.test +++ b/mysql-test/t/derived_view.test @@ -1731,6 +1731,99 @@ drop table t1,t2; set optimizer_switch=@save_optimizer_switch5740; --echo # +--echo # Bug mdev-5721: possible long key access to a materialized derived table +--echo # (see also the test case for Bug#13261277 that is actually the same bug) +--echo # + +CREATE TABLE t1 ( + id varchar(255) NOT NULL DEFAULT '', + familyid int(11) DEFAULT NULL, + withdrawndate date DEFAULT NULL, + KEY index_td_familyid_id (familyid,id) +) ENGINE=MyISAM DEFAULT CHARSET=utf8; + +CREATE TABLE t2 ( + id int(11) NOT NULL AUTO_INCREMENT, + activefromts datetime NOT NULL DEFAULT '0000-00-00 00:00:00', + shortdescription text, + useraccessfamily varchar(512) DEFAULT NULL, + serialized longtext, + PRIMARY KEY (id) +) ENGINE=MyISAM DEFAULT CHARSET=utf8; + +insert into t1 values ('picture/89/1369722032695.pmd',89,NULL); +insert into t1 values ('picture/90/1369832057370.pmd',90,NULL); +insert into t2 values (38,'2013-03-04 07:49:22','desc','CODE','string'); + +EXPLAIN +SELECT * FROM t2 x, +(SELECT t2.useraccessfamily, t2.serialized AS picturesubuser, COUNT(*) + FROM t2, t1 GROUP BY t2.useraccessfamily, picturesubuser) y +WHERE x.useraccessfamily = y.useraccessfamily; + +SELECT * FROM t2 x, +(SELECT t2.useraccessfamily, t2.serialized AS picturesubuser, COUNT(*) + FROM t2, t1 GROUP BY t2.useraccessfamily, picturesubuser) y +WHERE x.useraccessfamily = y.useraccessfamily; + +DROP TABLE t1,t2; + +--echo # +--echo # Bug#13261277: Unchecked key length caused missing records. +--echo # + +CREATE TABLE t1 ( + col_varchar varchar(1024) CHARACTER SET utf8 DEFAULT NULL, + stub1 varchar(1024) CHARACTER SET utf8 DEFAULT NULL, + stub2 varchar(1024) CHARACTER SET utf8 DEFAULT NULL, + stub3 varchar(1024) CHARACTER SET utf8 DEFAULT NULL +); + +INSERT INTO t1 VALUES + ('d','d','l','ther'), + (NULL,'s','NJBIQ','trzetuchv'), + (-715390976,'coul','MYWFB','cfhtrzetu'), + (1696792576,'f','i\'s','c'), + (1,'i','ltpemcfhtr','gsltpemcf'), + (-663027712,'mgsltpemcf','sa','amgsltpem'), + (-1686700032,'JPRVK','i','vamgsltpe'), + (NULL,'STUNB','UNVJV','u'), + (5,'oka','qyihvamgsl','AXSMD'), + (NULL,'tqwmqyihva','h','yntqwmqyi'), + (3,'EGMJN','e','e'); + +CREATE TABLE t2 ( + col_varchar varchar(10) DEFAULT NULL, + col_int INT DEFAULT NULL +); + +INSERT INTO t2 VALUES ('d',9); + +set optimizer_switch='derived_merge=off,derived_with_keys=on'; + +SET @save_heap_size= @@max_heap_table_size; +SET @@max_heap_table_size= 16384; + +SELECT t2.col_int +FROM t2 + RIGHT JOIN ( SELECT * FROM t1 ) AS dt + ON t2.col_varchar = dt.col_varchar +WHERE t2.col_int IS NOT NULL ; + +--echo # Shouldn't use auto_key0 for derived table +EXPLAIN +SELECT t2.col_int +FROM t2 + RIGHT JOIN ( SELECT * FROM t1 ) AS dt + ON t2.col_varchar = dt.col_varchar +WHERE t2.col_int IS NOT NULL ; + +SET @@max_heap_table_size= @save_heap_size; +SET optimizer_switch=@save_optimizer_switch; + +DROP TABLE t1,t2; + +--echo # --echo # end of 5.3 tests --echo # diff --git a/mysql-test/t/features.test b/mysql-test/t/features.test index cdfc9413da5..f2ac5a5bba6 100644 --- a/mysql-test/t/features.test +++ b/mysql-test/t/features.test @@ -6,6 +6,8 @@ drop table if exists t1; --enable_warnings +flush status; + show status like "feature%"; --echo # @@ -109,3 +111,20 @@ select updatexml('<div><div><span>1</span><span>2</span></div></div>', '/','<tr><td>1</td><td>2</td></tr>') as upd1; --replace_result 4 2 show status like "feature_xml"; + + +--echo # +--echo # Feature delayed_keys +--echo # + +create table t1 (a int, key(a)) engine=myisam delay_key_write=1; +insert into t1 values(1); +insert into t1 values(2); +drop table t1; + +create table t1 (a int, key(a)) engine=aria delay_key_write=1; +insert into t1 values(1); +insert into t1 values(2); +drop table t1; + +show status like "feature_delay_key_write"; diff --git a/mysql-test/t/flush-innodb.test b/mysql-test/t/flush-innodb.test index 7a877b977ce..acf9493693a 100644 --- a/mysql-test/t/flush-innodb.test +++ b/mysql-test/t/flush-innodb.test @@ -4,7 +4,7 @@ FLUSH TABLES WITH READ LOCK AND DISABLE CHECKPOINT; UNLOCK TABLES; CREATE TABLE t1 ( m MEDIUMTEXT ) ENGINE=InnoDB; -INSERT INTO t1 VALUES ( REPEAT('i',1048576) ); +INSERT INTO t1 VALUES ( REPEAT('i',65535) ); DROP TABLE t1; diff --git a/mysql-test/t/func_group.test b/mysql-test/t/func_group.test index 363f089e8d7..bd3ed4ad32d 100644 --- a/mysql-test/t/func_group.test +++ b/mysql-test/t/func_group.test @@ -3,7 +3,7 @@ # --disable_warnings -drop table if exists t1,t2; +drop table if exists t1,t2,t3,t4,t5,t6; --enable_warnings set @sav_dpi= @@div_precision_increment; @@ -1528,3 +1528,40 @@ insert into t1 (b) values (INET_ATON('192.168.200.200')); explain select MIN(b) from t1 where b >= inet_aton('192.168.119.32'); DROP TABLE t1; +--echo # +--echo # MDEV-6743 crash in GROUP_CONCAT(IF () ORDER BY 1) +--echo # + +CREATE TABLE t1 (pk INT, t2_id INT, t5_id INT, PRIMARY KEY (pk)); +INSERT INTO t1 VALUES (1,3,12),(2,3,15); + +CREATE TABLE t2 (pk INT, PRIMARY KEY (pk)); +INSERT INTO t2 VALUES (4),(5); + +CREATE TABLE t3 (t2_id INT, t4_id INT); +INSERT INTO t3 VALUES (6,11),(7,12); + +CREATE TABLE t4 (id INT); +INSERT INTO t4 VALUES (13),(14); + +CREATE TABLE t5 (pk INT, f VARCHAR(50), t6_id INT, PRIMARY KEY (pk)); +INSERT INTO t5 VALUES (9,'FOO',NULL); + +CREATE TABLE t6 (pk INT, f VARCHAR(120), b TINYINT(4), PRIMARY KEY (pk)); + +PREPARE stmt FROM " + SELECT t1.t2_id, GROUP_CONCAT(IF (t6.b, t6.f, t5.f) ORDER BY 1) + FROM t1 + JOIN t2 ON t1.t2_id = t2.pk + JOIN t3 ON t2.pk = t3.t2_id + JOIN t4 ON t4.id = t3.t4_id + JOIN t5 ON t1.t5_id = t5.pk + LEFT JOIN t6 ON t6.pk = t5.t6_id + GROUP BY t1.t2_id +"; + +EXECUTE stmt; +EXECUTE stmt; +EXECUTE stmt; + +DROP TABLE t1,t2,t3,t4,t5,t6; diff --git a/mysql-test/t/func_str.test b/mysql-test/t/func_str.test index 61a237cb447..6369609bea3 100644 --- a/mysql-test/t/func_str.test +++ b/mysql-test/t/func_str.test @@ -1595,6 +1595,11 @@ call foo('(( 00000000 ++ 00000000 ))'); drop procedure foo; drop table t1,t2; +# +# Bug#18786138 SHA/MD5 HASHING FUNCTIONS DIE WITH "FILENAME" CHARACTER SET +# +select md5(_filename "a"), sha(_filename "a"); + --echo # --echo # End of 5.5 tests --echo # @@ -1714,3 +1719,26 @@ SELECT FROM_BASE64(TO_BASE64(t1)) FROM t1; SELECT FROM_BASE64(TO_BASE64(d1)) FROM t1; SELECT FROM_BASE64(TO_BASE64(dt1)) FROM t1; DROP TABLE t1; + + +# +# BUG #12735829: SPACE() FUNCTION WARNING REFERS TO REPEAT() IN ER_WARN_ALLOWED_PACKET_OVERFLOWED +# + +SELECT SPACE(@@global.max_allowed_packet*2); + + +# +# BUG #11746123-23637: CHARSET AND COLLATION OF THE FUNCTION SPACE() +# + +SET NAMES latin1; +PREPARE stmt FROM "SELECT COLLATION(space(2))"; +EXECUTE stmt; +SET NAMES latin2; +EXECUTE stmt; + + +--echo # +--echo # End of 5.6 tests +--echo # diff --git a/mysql-test/t/func_time.test b/mysql-test/t/func_time.test index 6bea1aab392..a3f488a8d1e 100644 --- a/mysql-test/t/func_time.test +++ b/mysql-test/t/func_time.test @@ -1243,13 +1243,47 @@ CREATE TABLE t1 AS SELECT SHOW COLUMNS FROM t1; DROP TABLE t1; +CREATE TABLE t1 (a DATE) ENGINE=MyISAM; +INSERT INTO t1 VALUES ('2005-05-04'),('2000-02-23'); +SELECT * FROM t1 GROUP BY FROM_UNIXTIME(concat(a,'10'))*1; +SELECT * FROM t1 GROUP BY (-FROM_UNIXTIME(concat(a,'10')))*1; +SELECT * FROM t1 GROUP BY (-FROM_UNIXTIME(concat(a,'10'))); +SELECT * FROM t1 GROUP BY ABS(FROM_UNIXTIME(concat(a,'10'))); +SELECT * FROM t1 GROUP BY @a:=(FROM_UNIXTIME(concat(a,'10'))*1); + +DROP TABLE t1; + +SET TIME_ZONE='+02:00'; + +--echo # +--echo # MDEV-6302 Wrong result set when using GROUP BY FROM_UNIXTIME(...)+0 +--echo # +CREATE TABLE t1 (a DATE); +INSERT INTO t1 VALUES ('2005-05-04'),('2000-02-23'); +SELECT a, FROM_UNIXTIME(CONCAT(a,'10')) AS f1, FROM_UNIXTIME(CONCAT(a,'10'))+0 AS f2 FROM t1; +SELECT * FROM t1 GROUP BY FROM_UNIXTIME(CONCAT(a,'10'))+0; +DROP TABLE t1; + +CREATE TABLE t1 (a DATE) ENGINE=MyISAM; +INSERT INTO t1 VALUES ('2005-05-04'),('2000-02-23'); +SELECT * FROM t1 GROUP BY FROM_UNIXTIME(concat(a,'10'))/1; +DROP TABLE t1; + +CREATE TABLE t1 (a DATE); +INSERT INTO t1 VALUES ('2005-05-04'); +SELECT CONCAT(FROM_UNIXTIME(CONCAT(a,'10')) MOD FROM_UNIXTIME(CONCAT(a,'10'))) AS f2 FROM t1; +SELECT CHAR_LENGTH(CONCAT(FROM_UNIXTIME(CONCAT(a,'10')) MOD FROM_UNIXTIME(CONCAT(a,'10')))) AS f2 FROM t1; +CREATE TABLE t2 AS SELECT CONCAT(FROM_UNIXTIME(CONCAT(a,'10')) MOD FROM_UNIXTIME(CONCAT(a,'10'))) AS f2 FROM t1; +SHOW CREATE TABLE t2; +SELECT * FROM t2; +DROP TABLE t1,t2; + --echo # --echo # MDEV-4635 Crash in UNIX_TIMESTAMP(STR_TO_DATE('2020','%Y')) --echo # -SET TIME_ZONE='+02:00'; SELECT UNIX_TIMESTAMP(STR_TO_DATE('2020','%Y')); -SET TIME_ZONE=DEFAULT; +SET TIME_ZONE=DEFAULT; --echo # --echo # MDEV-4863 COALESCE(time_or_datetime) returns wrong results in numeric context @@ -1589,3 +1623,11 @@ SELECT IFNULL(TIME'10:20:30',DATE'2001-01-01'); SELECT CASE WHEN 1 THEN TIME'10:20:30' ELSE DATE'2001-01-01' END; SELECT COALESCE(TIME'10:20:30',DATE'2001-01-01'); SET timestamp=DEFAULT; + +--echo # +--echo # MDEV-5750 Assertion `ltime->year == 0' fails on a query with EXTRACT DAY_MINUTE and TIME column +--echo # +CREATE TABLE t1 ( d DATE, t TIME ); +INSERT INTO t1 VALUES ('2008-12-05','22:34:09'),('2005-03-27','14:26:02'); +SELECT EXTRACT(DAY_MINUTE FROM GREATEST(t,d)), GREATEST(t,d) FROM t1; +DROP TABLE t1; diff --git a/mysql-test/t/gis-debug.test b/mysql-test/t/gis-debug.test new file mode 100644 index 00000000000..a0647a2c9f4 --- /dev/null +++ b/mysql-test/t/gis-debug.test @@ -0,0 +1,6 @@ +--source include/have_geometry.inc +--source include/have_debug.inc + +SET @tmp=ST_GIS_DEBUG(1); + +--source include/gis_debug.inc diff --git a/mysql-test/t/gis-precise.test b/mysql-test/t/gis-precise.test index 0c6410b5a75..c6cf42e86e4 100644 --- a/mysql-test/t/gis-precise.test +++ b/mysql-test/t/gis-precise.test @@ -69,12 +69,15 @@ select astext(ST_Intersection(GeomFromText('LINESTRING(0 0, 50 45, 40 50, 0 0)') select astext(ST_Intersection(GeomFromText('LINESTRING(0 0, 50 45, 40 50)'), GeomFromText('LINESTRING(50 5, 55 10, 0 45)'))); select astext(ST_Intersection(GeomFromText('POLYGON((0 0, 50 45, 40 50, 0 0))'), GeomFromText('POINT(20 20)'))); select astext(ST_Intersection(GeomFromText('POLYGON((0 0, 50 45, 40 50, 0 0))'), GeomFromText('LINESTRING(-10 -10, 200 200)'))); +--replace_result 7.999999999999999 8 select astext(ST_Intersection(GeomFromText('POLYGON((0 0, 50 45, 40 50, 0 0))'), GeomFromText('LINESTRING(-10 -10, 200 200, 199 201, -11 -9)'))); +--replace_result 7.999999999999999 8 select astext(ST_UNION(GeomFromText('POLYGON((0 0, 50 45, 40 50, 0 0))'), GeomFromText('LINESTRING(-10 -10, 200 200, 199 201, -11 -9)'))); select astext(ST_intersection(geomfromtext('polygon((0 0, 1 0, 0 1, 0 0))'), geomfromtext('polygon((0 0, 1 1, 0 2, 0 0))'))); select astext(ST_symdifference(geomfromtext('polygon((0 0, 1 0, 0 1, 0 0))'), geomfromtext('polygon((0 0, 1 1, 0 2, 0 0))'))); +--replace_result 7.999999999999999 8 select astext(ST_UNION(GeomFromText('POLYGON((0 0, 50 45, 40 50, 0 0))'), GeomFromText('LINESTRING(-10 -10, 200 200, 199 201, -11 -9)'))); # Buffer() tests @@ -83,13 +86,13 @@ select astext(ST_buffer(geometryfromtext('point(1 1)'), 1)); create table t1(geom geometrycollection); insert into t1 values (geomfromtext('POLYGON((0 0, 10 10, 0 8, 0 0))')); insert into t1 values (geomfromtext('POLYGON((1 1, 10 10, 0 8, 1 1))')); -select astext(geom), area(geom),area(ST_buffer(geom,2)) from t1; -select astext(ST_buffer(geom,2)) from t1; +select astext(geom), area(geom),round(area(ST_buffer(geom,2)), 7) from t1; +select ST_NUMPOINTS(ST_EXTERIORRING(ST_buffer(geom,2))) from t1; set @geom=geomfromtext('LINESTRING(2 1, 4 2, 2 3, 2 5)'); set @buff=ST_buffer(@geom,1); --replace_result 40278744502097 40278744502096 -select astext(@buff); +select ST_NUMPOINTS(ST_EXTERIORRING(@buff)); # cleanup DROP TABLE t1; @@ -108,6 +111,32 @@ SELECT ST_Equals(PolyFromText('POLYGON((67 13, 67 18, 67 18, 59 18, 59 13, 67 13 SELECT ST_Equals(PolyFromText('POLYGON((67 13, 67 18, 67 18, 59 18, 59 13, 67 13) )'),PolyFromText('POLYGON((67 13, 67 18, 59 18, 59 13, 59 13, 67 13) )')) as result; SELECT ST_Equals(PointFromText('POINT (12 13)'),PointFromText('POINT (12 13)')) as result; + +--echo # +--echo # BUG#11755628/47429: INTERSECTION FUNCTION CRASHED MYSQLD +--echo # BUG#11759650/51979: UNION/INTERSECTION OF POLYGONS CRASHES MYSQL +--echo # + +SELECT ASTEXT(ST_UNION(GEOMFROMTEXT('POLYGON((525000 183300,525400 +183300,525400 18370, 525000 183700,525000 183300))'), +geomfromtext('POLYGON((525298.67 183511.53,525296.57 +183510.39,525296.42 183510.31,525289.11 183506.62,525283.17 +183503.47,525280.98 183502.26,525278.63 183500.97,525278.39 +183500.84,525276.79 183500,525260.7 183491.55,525263.95 +183484.75,525265.58 183481.95,525278.97 183488.73,525276.5 +183493.45,525275.5 183495.7,525280.35 183498.2,525282.3 +183499.1,525282.2 183499.3,525283.55 183500,525301.75 +183509.35,525304.45 183504.25,525307.85 183504.95,525304.5 +183510.83,525302.81 183513.8,525298.67 183511.53),(525275.06 +183489.89,525272.06 183488.37,525268.94 183494.51,525271.94 +183496.03,525275.06 183489.89),(525263.26 183491.55,525266.15 +183493.04,525269.88 183485.82,525266.99 183484.33,525263.26 +183491.55))'))) st_u; + +SET @a=0x0000000001030000000200000005000000000000000000000000000000000000000000000000002440000000000000000000000000000024400000000000002440000000000000000000000000000024400000000000000000000000000000000000000000000000000000F03F000000000000F03F0000000000000040000000000000F03F00000000000000400000000000000040000000000000F03F0000000000000040000000000000F03F000000000000F03F; +SELECT ASTEXT(TOUCHES(@a, GEOMFROMTEXT('point(0 0)'))) t; + + # bug #801243 Assertion `(0)' failed in Gis_geometry_collection::init_from_opresult on ST_UNION SELECT astext(ST_UNION ( @@ -135,11 +164,10 @@ SELECT ASTEXT(ST_INTERSECTION( #bug 804324 Assertion 0 in Gcalc_scan_iterator::pop_suitable_intersection ---replace_result 61538461538462 61538461538461 -SELECT ASTEXT(ST_UNION( +SELECT ROUND(ST_LENGTH(ST_UNION( MULTILINESTRINGFROMTEXT('MULTILINESTRING((6 2,4 0,3 5,3 6,4 3,6 4,3 9,0 7,3 7,8 4,2 9,5 0), (8 2,1 3,9 0,4 4))'), - MULTILINESTRINGFROMTEXT('MULTILINESTRING((2 5,6 7,9 7,5 2,1 6,3 6))'))); + MULTILINESTRINGFROMTEXT('MULTILINESTRING((2 5,6 7,9 7,5 2,1 6,3 6))'))), 7); SELECT ST_NUMGEOMETRIES((ST_UNION(ST_UNION( MULTILINESTRINGFROMTEXT('MULTILINESTRING((2 0,4 2,0 2,1 5,0 3,7 0,8 5,5 8), @@ -219,6 +247,7 @@ SELECT AsText(ST_UNION(POLYGONFROMTEXT('POLYGON((12 9, 3 6, 3 0, 12 9))'), POLYG #bug 841622 Assertion `t->rp->type == Gcalc_function::shape_line' failed in Gcalc_operation_reducer::end_line in maria-5.3-gis +--replace_result 276 278 SELECT ST_NUMPOINTS(ST_EXTERIORRING(ST_BUFFER(ST_UNION( MULTILINESTRINGFROMTEXT('MULTILINESTRING((3 4, 2 5, 7 6, 1 8),(0 0 ,1 6 ,0 1, 8 9, 2 4, 6 1, 3 5, 4 8), (9 3, 5 4, 1 8, 4 2, 5 8, 3 0))' ) , MULTILINESTRINGFROMTEXT('MULTILINESTRING((3 4, 3 1, 2 7, 4 2, 6 2, 1 5))') @@ -313,8 +342,8 @@ SELECT ST_WITHIN( MULTIPOINTFROMTEXT(' MULTIPOINT( 2 9 , 2 9 , 4 9 , 9 1 ) ') , SELECT ST_INTERSECTS( GeomFromText('MULTILINESTRING( ( 4030 3045 , 3149 2461 , 3004 3831 , 3775 2976 ) )') , GeomFromText('LINESTRING(3058.41 3187.91,3081.52 3153.19,3042.99 3127.57,3019.89 3162.29,3039.07 3175.05,3039.07 3175.05,3058.41 3187.91,3081.52 3153.19,3042.99 3127.57,3019.89 3162.29)') ); -#bug 977201 ST_BUFFER fails with the negative D -select ASTEXT(ST_BUFFER(ST_GEOMCOLLFROMTEXT(' GEOMETRYCOLLECTION(LINESTRING(100 100, 31 10, 77 80), POLYGON((0 0,4 7,1 1,0 0)), POINT(20 20))'), -3)); +#bug 977201 ST_BUFFER fails with the negative D. TODO - check the result deeper. +# select ASTEXT(ST_BUFFER(ST_GEOMCOLLFROMTEXT(' GEOMETRYCOLLECTION(LINESTRING(100 100, 31 10, 77 80), POLYGON((0 0,4 7,1 1,0 0)), POINT(20 20))'), -3)); #bug 986977 Assertion `!cur_p->event' failed in Gcalc_scan_iterator::arrange_event(int, int) SELECT ST_NUMPOINTS(ST_EXTERIORRING(ST_BUFFER( POLYGONFROMTEXT( 'POLYGON( ( 0.0 -3.0, @@ -328,3 +357,5 @@ SELECT ST_NUMPOINTS(ST_EXTERIORRING(ST_BUFFER( POLYGONFROMTEXT( 'POLYGON( ( 0.0 # MDEV-5615 crash in Gcalc_function::add_operation select astext(buffer(st_linestringfromwkb(linestring(point(-1,1), point(-1,-2))),-1)); +--source include/gis_debug.inc + diff --git a/mysql-test/t/gis.test b/mysql-test/t/gis.test index c38706959e4..d20e4c1711e 100644 --- a/mysql-test/t/gis.test +++ b/mysql-test/t/gis.test @@ -541,6 +541,18 @@ insert into t1 values(default); drop table t1; # +# Bug #27300: create view with geometry functions lost columns types +# +CREATE TABLE t1 (a GEOMETRY); +CREATE VIEW v1 AS SELECT GeomFromwkb(ASBINARY(a)) FROM t1; +CREATE VIEW v2 AS SELECT a FROM t1; +DESCRIBE v1; +DESCRIBE v2; + +DROP VIEW v1,v2; +DROP TABLE t1; + +# # Bug#24563: MBROverlaps does not seem to function propertly # Bug#54888: MBROverlaps missing in 5.1? # diff --git a/mysql-test/t/grant5.test b/mysql-test/t/grant5.test new file mode 100644 index 00000000000..db953d97fb3 --- /dev/null +++ b/mysql-test/t/grant5.test @@ -0,0 +1,7 @@ +-- source include/not_embedded.inc + +# +# MDEV-6625 SHOW GRANTS for current_user_name@wrong_host_name +# +--error ER_NONEXISTING_GRANT +SHOW GRANTS FOR root@invalid_host; diff --git a/mysql-test/t/group_min_max.test b/mysql-test/t/group_min_max.test index f1a287054ca..c809401bbf8 100644 --- a/mysql-test/t/group_min_max.test +++ b/mysql-test/t/group_min_max.test @@ -1418,6 +1418,31 @@ drop table t1; --echo # End of test#50539. --echo # +--echo # Bug#17217128 - BAD INTERACTION BETWEEN MIN/MAX AND +--echo # "HAVING SUM(DISTINCT)": WRONG RESULTS. +--echo # + +CREATE TABLE t (a INT, b INT, KEY(a,b)); +INSERT INTO t VALUES (1,1), (2,2), (3,3), (4,4), (1,0), (3,2), (4,5); +ANALYZE TABLE t; + +SELECT a, SUM(DISTINCT a), MIN(b) FROM t GROUP BY a; +EXPLAIN SELECT a, SUM(DISTINCT a), MIN(b) FROM t GROUP BY a; + +SELECT a, SUM(DISTINCT a), MAX(b) FROM t GROUP BY a; +EXPLAIN SELECT a, SUM(DISTINCT a), MAX(b) FROM t GROUP BY a; + +SELECT a, MAX(b) FROM t GROUP BY a HAVING SUM(DISTINCT a); +EXPLAIN SELECT a, MAX(b) FROM t GROUP BY a HAVING SUM(DISTINCT a); + +SELECT SUM(DISTINCT a), MIN(b), MAX(b) FROM t; +EXPLAIN SELECT SUM(DISTINCT a), MIN(b), MAX(b) FROM t; + +SELECT a, SUM(DISTINCT a), MIN(b), MAX(b) FROM t GROUP BY a; +EXPLAIN SELECT a, SUM(DISTINCT a), MIN(b), MAX(b) FROM t GROUP BY a; +DROP TABLE t; + +--echo # --echo # MDEV-4219 A simple select query returns random data (upstream bug#68473) --echo # diff --git a/mysql-test/t/group_min_max_innodb.test b/mysql-test/t/group_min_max_innodb.test index 7038eb2ff47..6967f847147 100644 --- a/mysql-test/t/group_min_max_innodb.test +++ b/mysql-test/t/group_min_max_innodb.test @@ -137,3 +137,96 @@ SELECT COUNT(DISTINCT a) FROM t1 WHERE b = 'b'; DROP TABLE t1; --echo End of 5.5 tests + +--echo # +--echo # Bug#17909656 - WRONG RESULTS FOR A SIMPLE QUERY WITH GROUP BY +--echo # + +CREATE TABLE t0 ( + i1 INTEGER NOT NULL +); + +INSERT INTO t0 VALUES (1),(2),(3),(4),(5),(6),(7),(8),(9),(10), + (11),(12),(13),(14),(15),(16),(17),(18),(19),(20), + (21),(22),(23),(24),(25),(26),(27),(28),(29),(30); + +CREATE TABLE t1 ( + c1 CHAR(1) NOT NULL, + i1 INTEGER NOT NULL, + i2 INTEGER NOT NULL, + UNIQUE KEY k1 (c1,i2) +) ENGINE=InnoDB; + +INSERT INTO t1 SELECT 'A',i1,i1 FROM t0; +INSERT INTO t1 SELECT 'B',i1,i1 FROM t0; +INSERT INTO t1 SELECT 'C',i1,i1 FROM t0; +INSERT INTO t1 SELECT 'D',i1,i1 FROM t0; +INSERT INTO t1 SELECT 'E',i1,i1 FROM t0; +INSERT INTO t1 SELECT 'F',i1,i1 FROM t0; + +CREATE TABLE t2 ( + c1 CHAR(1) NOT NULL, + i1 INTEGER NOT NULL, + i2 INTEGER NOT NULL, + UNIQUE KEY k2 (c1,i1,i2) +) ENGINE=InnoDB; + +INSERT INTO t2 SELECT 'A',i1,i1 FROM t0; +INSERT INTO t2 SELECT 'B',i1,i1 FROM t0; +INSERT INTO t2 SELECT 'C',i1,i1 FROM t0; +INSERT INTO t2 SELECT 'D',i1,i1 FROM t0; +INSERT INTO t2 SELECT 'E',i1,i1 FROM t0; +INSERT INTO t2 SELECT 'F',i1,i1 FROM t0; + +-- disable_result_log +ANALYZE TABLE t1; +ANALYZE TABLE t2; +-- enable_result_log + +let query= +SELECT c1, max(i2) FROM t1 WHERE (c1 = 'C' AND i2 = 17) OR ( c1 = 'F') +GROUP BY c1; +eval EXPLAIN $query; +eval $query; + +let query= +SELECT c1, max(i2) FROM t1 WHERE (c1 = 'C' OR ( c1 = 'F' AND i2 = 17)) +GROUP BY c1; +eval EXPLAIN $query; +eval $query; + +let query= +SELECT c1, max(i2) FROM t1 WHERE (c1 = 'C' OR c1 = 'F' ) AND ( i2 = 17 ) +GROUP BY c1; +eval EXPLAIN $query; +eval $query; + +let query= +SELECT c1, max(i2) FROM t1 +WHERE ((c1 = 'C' AND (i2 = 40 OR i2 = 30)) OR ( c1 = 'F' AND (i2 = 40 ))) +GROUP BY c1; +eval EXPLAIN $query; +eval $query; + +let query= +SELECT c1, i1, max(i2) FROM t2 +WHERE (c1 = 'C' OR ( c1 = 'F' AND i1 < 35)) AND ( i2 = 17 ) +GROUP BY c1,i1; +eval EXPLAIN $query; +eval $query; + +let query= +SELECT c1, i1, max(i2) FROM t2 +WHERE (((c1 = 'C' AND i1 < 40) OR ( c1 = 'F' AND i1 < 35)) AND ( i2 = 17 )) +GROUP BY c1,i1; +eval EXPLAIN $query; +eval $query; + +let query= +SELECT c1, i1, max(i2) FROM t2 +WHERE ((c1 = 'C' AND i1 < 40) OR ( c1 = 'F' AND i1 < 35) OR ( i2 = 17 )) +GROUP BY c1,i1; +eval EXPLAIN $query; +eval $query; + +DROP TABLE t0,t1,t2; diff --git a/mysql-test/t/huge_frm-6224.test b/mysql-test/t/huge_frm-6224.test new file mode 100644 index 00000000000..418722a7b51 --- /dev/null +++ b/mysql-test/t/huge_frm-6224.test @@ -0,0 +1,20 @@ +# +# MDEV-6224 Incorrect information in file when *.frm is > 256K +# +# verify that huge frms are rejected during creation, not on opening +# +--source include/have_partition.inc + +let $n=5646; +let $a=create table t1 (a int) engine=myisam partition by hash(a) partitions $n (; +dec $n; +while ($n) +{ + let $a=$a partition p01234567890123456789012345678901234567890123456789012345678$n,; + dec $n; +} + +--disable_query_log +--error ER_TABLE_DEFINITION_TOO_BIG +eval $a partition foo); + diff --git a/mysql-test/t/innodb_load_xa.opt b/mysql-test/t/innodb_load_xa.opt new file mode 100644 index 00000000000..4ff27e659ce --- /dev/null +++ b/mysql-test/t/innodb_load_xa.opt @@ -0,0 +1 @@ +--ignore-builtin-innodb --loose-innodb --log-bin diff --git a/mysql-test/t/innodb_load_xa.test b/mysql-test/t/innodb_load_xa.test new file mode 100644 index 00000000000..52862151b22 --- /dev/null +++ b/mysql-test/t/innodb_load_xa.test @@ -0,0 +1,18 @@ +# +# MDEV-6082 Assertion `0' fails in TC_LOG_DUMMY::log_and_order on DML after installing TokuDB at runtime on server with disabled InnoDB +# +--source include/not_embedded.inc + +if (!$HA_INNODB_SO) { + --skip Need InnoDB plugin +} +install plugin innodb soname 'ha_innodb'; +select engine,support,transactions,xa from information_schema.engines where engine='innodb'; +create table t1 (a int) engine=innodb; +start transaction; +insert t1 values (1); +insert t1 values (2); +commit; +--source include/show_binlog_events.inc +drop table t1; +uninstall plugin innodb; diff --git a/mysql-test/t/innodb_mysql_lock2.test b/mysql-test/t/innodb_mysql_lock2.test index b7259e771ae..640f9652462 100644 --- a/mysql-test/t/innodb_mysql_lock2.test +++ b/mysql-test/t/innodb_mysql_lock2.test @@ -440,15 +440,16 @@ let $wait_statement= $statement; --echo # 4.1 SELECT/SET with a stored function which does not --echo # modify data and uses SELECT in its turn. --echo # ---echo # In theory there is no need to take row locks on the table +--echo # There is no need to take row locks on the table --echo # being selected from in SF as the call to such function ---echo # won't get into the binary log. In practice, however, we ---echo # discover that fact too late in the process to be able to ---echo # affect the decision what locks should be taken. ---echo # Hence, strong locks are taken in this case. +--echo # won't get into the binary log. +--echo # +--echo # However in practice innodb takes strong lock on tables +--echo # being selected from within SF, when SF is called from +--echo # non SELECT statements like 'set' statement below. let $statement= select f1(); let $wait_statement= select i from t1 where i = 1 into j; ---source include/check_shared_row_lock.inc +--source include/check_no_row_lock.inc let $statement= set @a:= f1(); let $wait_statement= select i from t1 where i = 1 into j; --source include/check_shared_row_lock.inc @@ -486,19 +487,21 @@ let $wait_statement= select i from t1 where i = 1 into k; --echo # modify data and reads a table through subselect --echo # in a control construct. --echo # ---echo # Again, in theory a call to this function won't get to the ---echo # binary log and thus no locking is needed. But in practice ---echo # we don't detect this fact early enough (get_lock_type_for_table()) ---echo # to avoid taking row locks. +--echo # Call to this function won't get to the +--echo # binary log and thus no locking is needed. +--echo # +--echo # However in practice innodb takes strong lock on tables +--echo # being selected from within SF, when SF is called from +--echo # non SELECT statements like 'set' statement below. let $statement= select f3(); let $wait_statement= $statement; ---source include/check_shared_row_lock.inc +--source include/check_no_row_lock.inc let $statement= set @a:= f3(); let $wait_statement= $statement; --source include/check_shared_row_lock.inc let $statement= select f4(); let $wait_statement= $statement; ---source include/check_shared_row_lock.inc +--source include/check_no_row_lock.inc let $statement= set @a:= f4(); let $wait_statement= $statement; --source include/check_shared_row_lock.inc @@ -539,19 +542,21 @@ let $wait_statement= insert into t2 values ((select i from t1 where i = 1) + 5); --echo # doesn't modify data and reads tables through --echo # a view. --echo # ---echo # Once again, in theory, calls to such functions won't ---echo # get into the binary log and thus don't need row ---echo # locks. But in practice this fact is discovered ---echo # too late to have any effect. +--echo # Calls to such functions won't get into +--echo # the binary log and thus don't need row locks. +--echo # +--echo # However in practice innodb takes strong lock on tables +--echo # being selected from within SF, when SF is called from +--echo # non SELECT statements like 'set' statement below. let $statement= select f6(); let $wait_statement= select i from v1 where i = 1 into k; ---source include/check_shared_row_lock.inc +--source include/check_no_row_lock.inc let $statement= set @a:= f6(); let $wait_statement= select i from v1 where i = 1 into k; --source include/check_shared_row_lock.inc let $statement= select f7(); let $wait_statement= select j from v2 where j = 1 into k; ---source include/check_shared_row_lock.inc +--source include/check_no_row_lock.inc let $statement= set @a:= f7(); let $wait_statement= select j from v2 where j = 1 into k; --source include/check_shared_row_lock.inc @@ -592,12 +597,11 @@ let $wait_statement= update v2 set j=j+10 where j=1; --echo # data and reads a table indirectly, by calling another --echo # function. --echo # ---echo # In theory, calls to such functions won't get into the binary ---echo # log and thus don't need to acquire row locks. But in practice ---echo # this fact is discovered too late to have any effect. +--echo # Calls to such functions won't get into the binary +--echo # log and thus don't need to acquire row locks. let $statement= select f10(); let $wait_statement= select i from t1 where i = 1 into j; ---source include/check_shared_row_lock.inc +--source include/check_no_row_lock.inc --echo # --echo # 4.11 INSERT which uses a stored function which doesn't modify @@ -676,12 +680,11 @@ let $wait_statement= select i from t1 where i = 1 into p; --echo # 5.3 SELECT that calls a function that doesn't modify data and --echo # uses a CALL statement that reads a table via SELECT. --echo # ---echo # In theory, calls to such functions won't get into the binary ---echo # log and thus don't need to acquire row locks. But in practice ---echo # this fact is discovered too late to have any effect. +--echo # Calls to such functions won't get into the binary +--echo # log and thus don't need to acquire row locks. let $statement= select f15(); let $wait_statement= select i from t1 where i = 1 into p; ---source include/check_shared_row_lock.inc +--source include/check_no_row_lock.inc --echo # --echo # 5.4 INSERT which calls function which doesn't modify data and diff --git a/mysql-test/t/ipv4_and_ipv6.opt b/mysql-test/t/ipv4_and_ipv6.opt new file mode 100644 index 00000000000..a22a7b100c8 --- /dev/null +++ b/mysql-test/t/ipv4_and_ipv6.opt @@ -0,0 +1 @@ +--skip-name-resolve --bind-address=* diff --git a/mysql-test/t/ipv4_and_ipv6.test b/mysql-test/t/ipv4_and_ipv6.test new file mode 100644 index 00000000000..19ab4a253cc --- /dev/null +++ b/mysql-test/t/ipv4_and_ipv6.test @@ -0,0 +1,13 @@ +--source include/check_ipv6.inc +--source include/not_embedded.inc + +echo =============Test of '::1' ========================================; +let $IPv6= ::1; +--source include/ipv6_clients.inc +--source include/ipv6.inc + +echo =============Test of '127.0.0.1' (IPv4) ===========================; +let $IPv6= 127.0.0.1; +--source include/ipv6_clients.inc +--source include/ipv6.inc + diff --git a/mysql-test/t/join_cache.test b/mysql-test/t/join_cache.test index c60a06f0b0b..0e4610b9f54 100644 --- a/mysql-test/t/join_cache.test +++ b/mysql-test/t/join_cache.test @@ -3589,7 +3589,7 @@ DROP TABLE t1,t2,t3; --echo # --echo # Bug #1058071: LEFT JOIN using blobs ---echo # (mdev-564) when join buffer size is small +--echo # (MDEV-564) when join buffer size is small --echo # CREATE TABLE t1 ( @@ -3656,6 +3656,134 @@ explain select * from t0,t1 left join t2 on t1.b=t2.b order by t0.a, t1.a; drop table t0,t1,t2; +--echo # MDEV-6292: huge performance degradation for a sequence +--echo # of LEFT JOIN operations when using join buffer +--echo # + +--source include/have_innodb.inc + +CREATE TABLE t1 ( + id int(11) NOT NULL AUTO_INCREMENT, + col1 varchar(255) NOT NULL DEFAULT '', + PRIMARY KEY (id) +) ENGINE=INNODB; + +CREATE TABLE t2 ( + id int(11) NOT NULL AUTO_INCREMENT, + parent_id smallint(3) NOT NULL DEFAULT '0', + col2 varchar(25) NOT NULL DEFAULT '', + PRIMARY KEY (id) +) ENGINE=INNODB; + +set join_buffer_size=8192; + +set join_cache_level=0; + +set @init_time:=now(); +SELECT t.* +FROM + t1 t + LEFT JOIN t2 c1 ON c1.parent_id = t.id AND c1.col2 = "val" + LEFT JOIN t2 c2 ON c2.parent_id = t.id AND c2.col2 = "val" + LEFT JOIN t2 c3 ON c3.parent_id = t.id AND c3.col2 = "val" + LEFT JOIN t2 c4 ON c4.parent_id = t.id AND c4.col2 = "val" + LEFT JOIN t2 c5 ON c5.parent_id = t.id AND c5.col2 = "val" + LEFT JOIN t2 c6 ON c6.parent_id = t.id AND c6.col2 = "val" + LEFT JOIN t2 c7 ON c7.parent_id = t.id AND c7.col2 = "val" + LEFT JOIN t2 c8 ON c8.parent_id = t.id AND c8.col2 = "val" + LEFT JOIN t2 c9 ON c9.parent_id = t.id AND c9.col2 = "val" + LEFT JOIN t2 c10 ON c10.parent_id = t.id AND c10.col2 = "val" + LEFT JOIN t2 c11 ON c11.parent_id = t.id AND c11.col2 = "val" + LEFT JOIN t2 c12 ON c12.parent_id = t.id AND c12.col2 = "val" + LEFT JOIN t2 c13 ON c13.parent_id = t.id AND c13.col2 = "val" + LEFT JOIN t2 c14 ON c14.parent_id = t.id AND c14.col2 = "val" + LEFT JOIN t2 c15 ON c15.parent_id = t.id AND c15.col2 = "val" + LEFT JOIN t2 c16 ON c16.parent_id = t.id AND c16.col2 = "val" + LEFT JOIN t2 c17 ON c17.parent_id = t.id AND c17.col2 = "val" + LEFT JOIN t2 c18 ON c18.parent_id = t.id AND c18.col2 = "val" + LEFT JOIN t2 c19 ON c19.parent_id = t.id AND c19.col2 = "val" + LEFT JOIN t2 c20 ON c20.parent_id = t.id AND c20.col2 = "val" + LEFT JOIN t2 c21 ON c21.parent_id = t.id AND c21.col2 = "val" + LEFT JOIN t2 c22 ON c22.parent_id = t.id AND c22.col2 = "val" + LEFT JOIN t2 c23 ON c23.parent_id = t.id AND c23.col2 = "val" + LEFT JOIN t2 c24 ON c24.parent_id = t.id AND c24.col2 = "val" + LEFT JOIN t2 c25 ON c25.parent_id = t.id AND c25.col2 = "val" +ORDER BY + col1; +select timestampdiff(second, @init_time, now()) <= 1; + +set join_cache_level=2; + +set @init_time:=now(); +SELECT t.* +FROM + t1 t + LEFT JOIN t2 c1 ON c1.parent_id = t.id AND c1.col2 = "val" + LEFT JOIN t2 c2 ON c2.parent_id = t.id AND c2.col2 = "val" + LEFT JOIN t2 c3 ON c3.parent_id = t.id AND c3.col2 = "val" + LEFT JOIN t2 c4 ON c4.parent_id = t.id AND c4.col2 = "val" + LEFT JOIN t2 c5 ON c5.parent_id = t.id AND c5.col2 = "val" + LEFT JOIN t2 c6 ON c6.parent_id = t.id AND c6.col2 = "val" + LEFT JOIN t2 c7 ON c7.parent_id = t.id AND c7.col2 = "val" + LEFT JOIN t2 c8 ON c8.parent_id = t.id AND c8.col2 = "val" + LEFT JOIN t2 c9 ON c9.parent_id = t.id AND c9.col2 = "val" + LEFT JOIN t2 c10 ON c10.parent_id = t.id AND c10.col2 = "val" + LEFT JOIN t2 c11 ON c11.parent_id = t.id AND c11.col2 = "val" + LEFT JOIN t2 c12 ON c12.parent_id = t.id AND c12.col2 = "val" + LEFT JOIN t2 c13 ON c13.parent_id = t.id AND c13.col2 = "val" + LEFT JOIN t2 c14 ON c14.parent_id = t.id AND c14.col2 = "val" + LEFT JOIN t2 c15 ON c15.parent_id = t.id AND c15.col2 = "val" + LEFT JOIN t2 c16 ON c16.parent_id = t.id AND c16.col2 = "val" + LEFT JOIN t2 c17 ON c17.parent_id = t.id AND c17.col2 = "val" + LEFT JOIN t2 c18 ON c18.parent_id = t.id AND c18.col2 = "val" + LEFT JOIN t2 c19 ON c19.parent_id = t.id AND c19.col2 = "val" + LEFT JOIN t2 c20 ON c20.parent_id = t.id AND c20.col2 = "val" + LEFT JOIN t2 c21 ON c21.parent_id = t.id AND c21.col2 = "val" + LEFT JOIN t2 c22 ON c22.parent_id = t.id AND c22.col2 = "val" + LEFT JOIN t2 c23 ON c23.parent_id = t.id AND c23.col2 = "val" + LEFT JOIN t2 c24 ON c24.parent_id = t.id AND c24.col2 = "val" + LEFT JOIN t2 c25 ON c25.parent_id = t.id AND c25.col2 = "val" +ORDER BY + col1; +select timestampdiff(second, @init_time, now()) <= 1; + +EXPLAIN +SELECT t.* +FROM + t1 t + LEFT JOIN t2 c1 ON c1.parent_id = t.id AND c1.col2 = "val" + LEFT JOIN t2 c2 ON c2.parent_id = t.id AND c2.col2 = "val" + LEFT JOIN t2 c3 ON c3.parent_id = t.id AND c3.col2 = "val" + LEFT JOIN t2 c4 ON c4.parent_id = t.id AND c4.col2 = "val" + LEFT JOIN t2 c5 ON c5.parent_id = t.id AND c5.col2 = "val" + LEFT JOIN t2 c6 ON c6.parent_id = t.id AND c6.col2 = "val" + LEFT JOIN t2 c7 ON c7.parent_id = t.id AND c7.col2 = "val" + LEFT JOIN t2 c8 ON c8.parent_id = t.id AND c8.col2 = "val" + LEFT JOIN t2 c9 ON c9.parent_id = t.id AND c9.col2 = "val" + LEFT JOIN t2 c10 ON c10.parent_id = t.id AND c10.col2 = "val" + LEFT JOIN t2 c11 ON c11.parent_id = t.id AND c11.col2 = "val" + LEFT JOIN t2 c12 ON c12.parent_id = t.id AND c12.col2 = "val" + LEFT JOIN t2 c13 ON c13.parent_id = t.id AND c13.col2 = "val" + LEFT JOIN t2 c14 ON c14.parent_id = t.id AND c14.col2 = "val" + LEFT JOIN t2 c15 ON c15.parent_id = t.id AND c15.col2 = "val" + LEFT JOIN t2 c16 ON c16.parent_id = t.id AND c16.col2 = "val" + LEFT JOIN t2 c17 ON c17.parent_id = t.id AND c17.col2 = "val" + LEFT JOIN t2 c18 ON c18.parent_id = t.id AND c18.col2 = "val" + LEFT JOIN t2 c19 ON c19.parent_id = t.id AND c19.col2 = "val" + LEFT JOIN t2 c20 ON c20.parent_id = t.id AND c20.col2 = "val" + LEFT JOIN t2 c21 ON c21.parent_id = t.id AND c21.col2 = "val" + LEFT JOIN t2 c22 ON c22.parent_id = t.id AND c22.col2 = "val" + LEFT JOIN t2 c23 ON c23.parent_id = t.id AND c23.col2 = "val" + LEFT JOIN t2 c24 ON c24.parent_id = t.id AND c24.col2 = "val" + LEFT JOIN t2 c25 ON c25.parent_id = t.id AND c25.col2 = "val" +ORDER BY + col1; + +set join_buffer_size=default; +set join_cache_level = default; + +DROP TABLE t1,t2; + --echo # --echo # MDEV-5123 Remove duplicated conditions pushed both to join_tab->select_cond and join_tab->cache_select->cond for blocked joins. --echo # @@ -3701,7 +3829,7 @@ drop table t1,t2,t3; set expensive_subquery_limit=default; --echo # ---echo # mdev-6071: EXPLAIN chooses to use join buffer while execution turns it down +--echo # MDEV-6071: EXPLAIN chooses to use join buffer while execution turns it down --echo # create table t1 (a int); diff --git a/mysql-test/t/key_cache.test b/mysql-test/t/key_cache.test index 9098ca466b7..86e56a8301b 100644 --- a/mysql-test/t/key_cache.test +++ b/mysql-test/t/key_cache.test @@ -8,6 +8,7 @@ drop table if exists t1, t2, t3; SET @save_key_buffer_size=@@key_buffer_size; SET @save_key_cache_block_size=@@key_cache_block_size; SET @save_key_cache_segments=@@key_cache_segments; +SET @save_key_cache_file_hash_size=@@key_cache_file_hash_size; SELECT @@key_buffer_size, @@small.key_buffer_size; @@ -62,19 +63,19 @@ select @@keycache1.key_buffer_size; select @@keycache1.key_cache_block_size; select @@key_buffer_size; select @@key_cache_block_size; +select @@key_cache_file_hash_size; set global keycache1.key_buffer_size=1024*1024; +let org_key_blocks_unused=`select unused_blocks as unused from information_schema.key_caches where key_cache_name="default"`; +--disable_query_log +eval set @org_key_blocks_unused=$org_key_blocks_unused; +--enable_query_log + create table t1 (p int primary key, a char(10)) delay_key_write=1; create table t2 (p int primary key, i int, a char(10), key k1(i), key k2(a)); -show status like 'key_blocks_used'; - -# Following results differs on 64 and 32 bit systems because of different -# pointer sizes, which takes up different amount of space in key cache - ---replace_result 1812 KEY_BLOCKS_UNUSED 1793 KEY_BLOCKS_UNUSED 1674 KEY_BLOCKS_UNUSED 1818 KEY_BLOCKS_UNUSED 1824 KEY_BLOCKS_UNUSED -show status like 'key_blocks_unused'; +select @org_key_blocks_unused-unused_blocks as key_blocks_unused, used_blocks as key_blocks_used from information_schema.key_caches where key_cache_name="default"; insert into t1 values (1, 'qqqq'), (11, 'yyyy'); insert into t2 values (1, 1, 'qqqq'), (2, 1, 'pppp'), @@ -85,9 +86,7 @@ select * from t2; update t1 set p=2 where p=1; update t2 set i=2 where i=1; -show status like 'key_blocks_used'; ---replace_result 1808 KEY_BLOCKS_UNUSED 1789 KEY_BLOCKS_UNUSED 1670 KEY_BLOCKS_UNUSED 1814 KEY_BLOCKS_UNUSED 1820 KEY_BLOCKS_UNUSED -show status like 'key_blocks_unused'; +select @org_key_blocks_unused-unused_blocks as key_blocks_unused, used_blocks as key_blocks_used from information_schema.key_caches where key_cache_name="default"; cache index t1 key (`primary`) in keycache1; @@ -147,9 +146,7 @@ cache index t3 in keycache2; cache index t1,t2 in default; drop table t1,t2,t3; -show status like 'key_blocks_used'; ---replace_result 1812 KEY_BLOCKS_UNUSED 1793 KEY_BLOCKS_UNUSED 1674 KEY_BLOCKS_UNUSED 1818 KEY_BLOCKS_UNUSED 1824 KEY_BLOCKS_UNUSED -show status like 'key_blocks_unused'; +select @org_key_blocks_unused-unused_blocks as key_blocks_unused, used_blocks as key_blocks_used from information_schema.key_caches where key_cache_name="default"; create table t1 (a int primary key); cache index t1 in keycache2; @@ -304,7 +301,7 @@ select * from t2; update t1 set p=3 where p=1; update t2 set i=2 where i=1; ---replace_result 1808 KEY_BLOCKS_UNUSED 1670 KEY_BLOCKS_UNUSED 1789 KEY_BLOCKS_UNUSED +--replace_result 1804 KEY_BLOCKS_UNUSED 1801 KEY_BLOCKS_UNUSED 1663 KEY_BLOCKS_UNUSED 1782 KEY_BLOCKS_UNUSED show status like 'key_%'; --replace_column 7 # select * from information_schema.key_caches where segment_number is null; @@ -336,7 +333,8 @@ select * from t2; update t1 set p=3 where p=1; update t2 set i=2 where i=1; ---replace_result 1808 KEY_BLOCKS_UNUSED 1670 KEY_BLOCKS_UNUSED 1788 KEY_BLOCKS_UNUSED + +--replace_result 1800 KEY_BLOCKS_UNUSED 1794 KEY_BLOCKS_UNUSED 1656 KEY_BLOCKS_UNUSED 1775 KEY_BLOCKS_UNUSED show status like 'key_%'; --replace_column 7 # select * from information_schema.key_caches where segment_number is null; @@ -361,7 +359,7 @@ select * from t2; update t1 set p=3 where p=1; update t2 set i=2 where i=1; ---replace_result 1808 KEY_BLOCKS_UNUSED 1670 KEY_BLOCKS_UNUSED 1789 KEY_BLOCKS_UNUSED +--replace_result 1804 KEY_BLOCKS_UNUSED 1801 KEY_BLOCKS_UNUSED 1663 KEY_BLOCKS_UNUSED 1782 KEY_BLOCKS_UNUSED show status like 'key_%'; --replace_column 7 # select * from information_schema.key_caches where segment_number is null; @@ -378,6 +376,7 @@ select * from information_schema.key_caches where segment_number is null; # Switch back to 2 segments set global key_buffer_size=32*1024; +set global key_cache_file_hash_size=128; select @@key_buffer_size; set global key_cache_segments=2; select @@key_cache_segments; @@ -536,5 +535,6 @@ set global keycache2.key_buffer_size=0; set global key_buffer_size=@save_key_buffer_size; set global key_cache_segments=@save_key_cache_segments; +set global key_cache_file_hash_size=@save_key_cache_file_hash_size; # End of 5.2 tests diff --git a/mysql-test/t/kill_processlist-6619.test b/mysql-test/t/kill_processlist-6619.test new file mode 100644 index 00000000000..2333f02eac6 --- /dev/null +++ b/mysql-test/t/kill_processlist-6619.test @@ -0,0 +1,17 @@ +# +# MDEV-6619 SHOW PROCESSLIST returns empty result set after KILL QUERY +# +--source include/not_embedded.inc +--enable_connect_log +--connect (con1,localhost,root,,) +--let $con_id = `SELECT CONNECTION_ID()` +--replace_column 1 # 3 # 6 # 7 # +SHOW PROCESSLIST; +--connection default +--replace_result $con_id con_id +eval KILL QUERY $con_id; +--connection con1 +--error ER_QUERY_INTERRUPTED +SHOW PROCESSLIST; +--replace_column 1 # 3 # 6 # 7 # +SHOW PROCESSLIST; diff --git a/mysql-test/t/lock_sync.test b/mysql-test/t/lock_sync.test index d5ad7becd7d..f00080d917b 100644 --- a/mysql-test/t/lock_sync.test +++ b/mysql-test/t/lock_sync.test @@ -49,6 +49,7 @@ drop table if exists t0, t1, t2, t3, t4, t5; drop view if exists v1, v2; drop procedure if exists p1; drop procedure if exists p2; +drop procedure if exists p3; drop function if exists f1; drop function if exists f2; drop function if exists f3; @@ -64,6 +65,8 @@ drop function if exists f12; drop function if exists f13; drop function if exists f14; drop function if exists f15; +drop function if exists f16; +drop function if exists f17; --enable_warnings create table t1 (i int primary key); insert into t1 values (1), (2), (3), (4), (5); @@ -170,6 +173,26 @@ begin call p2(k); return k; end| +create function f16() returns int +begin + create temporary table if not exists temp1 (a int); + insert into temp1 select * from t1; + drop temporary table temp1; + return 1; +end| +create function f17() returns int +begin + declare j int; + select i from t1 where i = 1 into j; + call p3; + return 1; +end| +create procedure p3() +begin + create temporary table if not exists temp1 (a int); + insert into temp1 select * from t1; + drop temporary table temp1; +end| create trigger t4_bi before insert on t4 for each row begin declare k int; @@ -217,6 +240,7 @@ connection con1; --disable_result_log show create procedure p1; show create procedure p2; +show create procedure p3; show create function f1; show create function f2; show create function f3; @@ -232,6 +256,8 @@ show create function f12; show create function f13; show create function f14; show create function f15; +show create function f16; +show create function f17; --enable_result_log --echo # Switch back to connection 'default'. connection default; @@ -492,18 +518,15 @@ let $restore_table= t2; --echo # 4.1 SELECT/SET with a stored function which does not --echo # modify data and uses SELECT in its turn. --echo # ---echo # In theory there is no need to take strong locks on the table +--echo # There is no need to take strong locks on the table --echo # being selected from in SF as the call to such function ---echo # won't get into the binary log. In practice, however, we ---echo # discover that fact too late in the process to be able to ---echo # affect the decision what locks should be taken. ---echo # Hence, strong locks are taken in this case. +--echo # won't get into the binary log. let $statement= select f1(); let $restore_table= ; ---source include/check_no_concurrent_insert.inc +--source include/check_concurrent_insert.inc let $statement= set @a:= f1(); let $restore_table= ; ---source include/check_no_concurrent_insert.inc +--source include/check_concurrent_insert.inc --echo # --echo # 4.2 INSERT (or other statement which modifies data) with @@ -538,22 +561,20 @@ let $restore_table= t2; --echo # modify data and reads a table through subselect --echo # in a control construct. --echo # ---echo # Again, in theory a call to this function won't get to the ---echo # binary log and thus no strong lock is needed. But in practice ---echo # we don't detect this fact early enough (get_lock_type_for_table()) ---echo # to avoid taking a strong lock. +--echo # Call to this function won't get to the +--echo # binary log and thus no strong lock is needed. let $statement= select f3(); let $restore_table= ; ---source include/check_no_concurrent_insert.inc +--source include/check_concurrent_insert.inc let $statement= set @a:= f3(); let $restore_table= ; ---source include/check_no_concurrent_insert.inc +--source include/check_concurrent_insert.inc let $statement= select f4(); let $restore_table= ; ---source include/check_no_concurrent_insert.inc +--source include/check_concurrent_insert.inc let $statement= set @a:= f4(); let $restore_table= ; ---source include/check_no_concurrent_insert.inc +--source include/check_concurrent_insert.inc --echo # --echo # 4.5. INSERT (or other statement which modifies data) with @@ -591,22 +612,21 @@ let $restore_table= t2; --echo # doesn't modify data and reads tables through --echo # a view. --echo # ---echo # Once again, in theory, calls to such functions won't ---echo # get into the binary log and thus don't need strong ---echo # locks. But in practice this fact is discovered ---echo # too late to have any effect. +--echo # Calls to such functions won't get into +--echo # the binary log and thus don't need strong +--echo # locks. let $statement= select f6(); let $restore_table= t2; ---source include/check_no_concurrent_insert.inc +--source include/check_concurrent_insert.inc let $statement= set @a:= f6(); let $restore_table= t2; ---source include/check_no_concurrent_insert.inc +--source include/check_concurrent_insert.inc let $statement= select f7(); let $restore_table= t2; ---source include/check_no_concurrent_insert.inc +--source include/check_concurrent_insert.inc let $statement= set @a:= f7(); let $restore_table= t2; ---source include/check_no_concurrent_insert.inc +--source include/check_concurrent_insert.inc --echo # --echo # 4.8 INSERT which uses stored function which @@ -644,12 +664,11 @@ let $restore_table= t2; --echo # data and reads a table indirectly, by calling another --echo # function. --echo # ---echo # In theory, calls to such functions won't get into the binary ---echo # log and thus don't need to acquire strong locks. But in practice ---echo # this fact is discovered too late to have any effect. +--echo # Calls to such functions won't get into the binary +--echo # log and thus don't need to acquire strong locks. let $statement= select f10(); let $restore_table= ; ---source include/check_no_concurrent_insert.inc +--source include/check_concurrent_insert.inc --echo # --echo # 4.11 INSERT which uses a stored function which doesn't modify @@ -700,6 +719,36 @@ let $statement= insert into t2 values (f13((select i+10 from t1 where i=1))); let $restore_table= t2; --source include/check_no_concurrent_insert.inc +--echo # +--echo # 4.15 SELECT/SET with a stored function which +--echo # inserts data into a temporary table using +--echo # SELECT on t1. +--echo # +--echo # Since this statement is written to the binary log it should +--echo # be serialized with concurrent statements affecting the data it +--echo # uses. Therefore it should take strong locks on the data it reads. +let $statement= select f16(); +let $restore_table= ; +--source include/check_no_concurrent_insert.inc +let $statement= set @a:= f16(); +let $restore_table= ; +--source include/check_no_concurrent_insert.inc + +--echo # +--echo # 4.16 SELECT/SET with a stored function which call procedure +--echo # which inserts data into a temporary table using +--echo # SELECT on t1. +--echo # +--echo # Since this statement is written to the binary log it should +--echo # be serialized with concurrent statements affecting the data it +--echo # uses. Therefore it should take strong locks on the data it reads. +let $statement= select f17(); +let $restore_table= ; +--source include/check_no_concurrent_insert.inc +let $statement= set @a:= f17(); +let $restore_table= ; +--source include/check_no_concurrent_insert.inc + --echo # --echo # 5. Statements that read tables through stored procedures. @@ -730,12 +779,11 @@ let $restore_table= t2; --echo # 5.3 SELECT that calls a function that doesn't modify data and --echo # uses a CALL statement that reads a table via SELECT. --echo # ---echo # In theory, calls to such functions won't get into the binary ---echo # log and thus don't need to acquire strong locks. But in practice ---echo # this fact is discovered too late to have any effect. +--echo # Calls to such functions won't get into the binary +--echo # log and thus don't need to acquire strong locks. let $statement= select f15(); let $restore_table= ; ---source include/check_no_concurrent_insert.inc +--source include/check_concurrent_insert.inc --echo # --echo # 5.4 INSERT which calls function which doesn't modify data and @@ -800,7 +848,6 @@ let $statement= update t5 set l= 2 where l = 1; let $restore_table= t5; --source include/check_no_concurrent_insert.inc - --echo # Clean-up. drop function f1; drop function f2; @@ -817,9 +864,12 @@ drop function f12; drop function f13; drop function f14; drop function f15; +drop function f16; +drop function f17; drop view v1, v2; drop procedure p1; drop procedure p2; +drop procedure p3; drop table t1, t2, t3, t4, t5; disconnect con1; diff --git a/mysql-test/t/log_tables_upgrade.test b/mysql-test/t/log_tables_upgrade.test index feb2d8c4aa9..d08d74174db 100644 --- a/mysql-test/t/log_tables_upgrade.test +++ b/mysql-test/t/log_tables_upgrade.test @@ -1,12 +1,6 @@ ---source include/not_embedded.inc --source include/have_csv.inc --source include/have_innodb.inc - -# Only run test if "mysql_upgrade" is found ---require r/have_mysql_upgrade.result ---disable_query_log -select LENGTH("$MYSQL_UPGRADE")>0 as have_mysql_upgrade; ---enable_query_log +--source include/mysql_upgrade_preparation.inc --echo # --echo # Bug#49823: mysql_upgrade fatal error due to general_log / slow_low CSV NULL diff --git a/mysql-test/t/mysql_client_test-master.opt b/mysql-test/t/mysql_client_test-master.opt index 5b347aa0416..fcaf2b69fbc 100644 --- a/mysql-test/t/mysql_client_test-master.opt +++ b/mysql-test/t/mysql_client_test-master.opt @@ -1,3 +1,4 @@ --general-log --general-log-file=$MYSQLTEST_VARDIR/log/master.log --log-output=FILE,TABLE +--max-allowed-packet=32000000 diff --git a/mysql-test/t/mysql_client_test_comp-master.opt b/mysql-test/t/mysql_client_test_comp-master.opt new file mode 100644 index 00000000000..783093c900b --- /dev/null +++ b/mysql-test/t/mysql_client_test_comp-master.opt @@ -0,0 +1,2 @@ +--loose-enable-performance-schema +--max-allowed-packet=32000000 diff --git a/mysql-test/t/mysql_client_test_comp.test b/mysql-test/t/mysql_client_test_comp.test new file mode 100644 index 00000000000..0a6b0ba1130 --- /dev/null +++ b/mysql-test/t/mysql_client_test_comp.test @@ -0,0 +1,20 @@ +# run mysql_client_test with performance schema + +# No need to run this with embedded server +-- source include/not_embedded.inc + +# need to have the dynamic loading turned on for the client plugin tests +--source include/have_plugin_auth.inc + +SET @old_slow_query_log= @@global.slow_query_log; + +call mtr.add_suppression(" Error reading file './client_test_db/test_frm_bug.frm'"); + +--exec echo "$MYSQL_CLIENT_TEST" > $MYSQLTEST_VARDIR/log/mysql_client_test_comp.out.log 2>&1 +--exec $MYSQL_CLIENT_TEST --getopt-ll-test=25600M >> $MYSQLTEST_VARDIR/log/mysql_client_test_comp.out.log 2>&1 + +# End of test +echo ok; + +# Restore state changed by mysql_test_run +SET @@global.slow_query_log= @old_slow_query_log; diff --git a/mysql-test/t/mysql_client_test_nonblock-master.opt b/mysql-test/t/mysql_client_test_nonblock-master.opt index 034d5340a23..5775e707c5f 100644 --- a/mysql-test/t/mysql_client_test_nonblock-master.opt +++ b/mysql-test/t/mysql_client_test_nonblock-master.opt @@ -1 +1,2 @@ --general-log --general-log-file=$MYSQLTEST_VARDIR/log/master.log --log-output=FILE,TABLE +--max-allowed-packet=32000000 diff --git a/mysql-test/t/mysqltest.test b/mysql-test/t/mysqltest.test index ffbec36873e..ae59c713c3d 100644 --- a/mysql-test/t/mysqltest.test +++ b/mysql-test/t/mysqltest.test @@ -2053,7 +2053,7 @@ select "at" as col1, "AT" as col2, "c" as col3; --replace_regex /a/b/ /ct/d/ select "a" as col1, "ct" as col2; ---replace_regex /(strawberry)/raspberry and \1/ /blueberry/blackberry/ /potato/tomato/; +--replace_regex /(strawberry)/raspberry and \1/ /blueberry/blackberry/ /potato/tomato/ select "strawberry","blueberry","potato"; --error 1 @@ -2098,6 +2098,12 @@ select "a is a and less is more" as txt; select "a is a and less is more" as txt; --enable_query_log +# +# different delimiters +# +--replace_regex (a)[b] /c/d/ <e>{f}i {g\/\}}/h/ +select 'ABCDEF abcdef g/}' as txt; + #------------------------------------------------------------------------- # BUG #11754855 : Passing variable to --error #------------------------------------------------------------------------- diff --git a/mysql-test/t/order_by_innodb.test b/mysql-test/t/order_by_innodb.test new file mode 100644 index 00000000000..c20eaceb053 --- /dev/null +++ b/mysql-test/t/order_by_innodb.test @@ -0,0 +1,23 @@ +# +# ORDER BY handling (e.g. filesort) tests that require innodb +# +-- source include/have_innodb.inc + +--disable_warnings +drop table if exists t0,t1,t2,t3; +--enable_warnings + +--echo # +--echo # MDEV-6434: Wrong result (extra rows) with ORDER BY, multiple-column index, InnoDB +--echo # + +CREATE TABLE t1 (a INT, b INT, c INT, d TEXT, KEY idx(a,b,c)) ENGINE=InnoDB; + +INSERT INTO t1 (a,c) VALUES +(8, 9),(8, 10),(13, 15),(16, 17),(16, 18),(16, 19),(20, 21), +(20, 22),(20, 24),(20, 25),(20, 26),(20, 27),(20, 28); + +SELECT * FROM t1 WHERE a = 8 AND (b = 1 OR b IS NULL) ORDER BY c; + +DROP TABLE t1; + diff --git a/mysql-test/t/partition.test b/mysql-test/t/partition.test index 1e1150157c7..754677e9b37 100644 --- a/mysql-test/t/partition.test +++ b/mysql-test/t/partition.test @@ -2816,6 +2816,47 @@ select * from t1 IGNORE INDEX(dob, weeks_worked_last_year, hours_worked_per_week drop table t1; +--echo # +--echo # MDEV-6322: The PARTITION engine can return wrong query results +--echo # +CREATE TABLE t1 ( + CustomerID varchar(5) DEFAULT NULL, + CompanyName varchar(40) DEFAULT NULL, + ContactName varchar(30) DEFAULT NULL, + ContactTitle varchar(30) DEFAULT NULL, + Address varchar(60) DEFAULT NULL, + City varchar(15) DEFAULT NULL, + Region varchar(15) DEFAULT NULL, + PostalCode varchar(10) DEFAULT NULL, + Country varchar(15) NOT NULL, + Phone varchar(24) DEFAULT NULL, + Fax varchar(24) DEFAULT NULL +) ENGINE=MyISAM DEFAULT CHARSET=latin1 +PARTITION BY LIST COLUMNS(Country) +(PARTITION p1 VALUES IN ('Germany','Austria','Switzerland','Poland'), + PARTITION p2 VALUES IN ('USA','Canada','Mexico'), + PARTITION p3 VALUES IN ('Spain','Portugal','Italy'), + PARTITION p4 VALUES IN ('UK','Ireland'), + PARTITION p5 VALUES IN ('France','Belgium'), + PARTITION p6 VALUES IN ('Sweden','Finland','Denmark','Norway'), + PARTITION p7 VALUES IN ('Venezuela','Argentina','Brazil') +); + +INSERT INTO t1 (CustomerID, City, Country) VALUES +('ANATR','México D.F','Mexico'), +('ANTON','México D.F','Mexico'), +('BOTTM','Tsawassen','Canada'), +('CENTC','México D.F','Mexico'), +('GREAL','Eugene','USA'), +('HUNGC','Elgin','USA'), +('LAUGB','Vancouver','Canada'), +('LAZYK','Walla Walla','USA'), +('LETSS','San Francisco','USA'), +('LONEP','Portland','USA'); + +SELECT * FROM t1 WHERE Country = 'USA'; +DROP TABLE t1; + # # Test ALTER TABLE ADD/DROP PARTITION IF EXISTS # diff --git a/mysql-test/t/partition_innodb.test b/mysql-test/t/partition_innodb.test index 43f409731a6..1e2aacd474a 100644 --- a/mysql-test/t/partition_innodb.test +++ b/mysql-test/t/partition_innodb.test @@ -777,6 +777,34 @@ drop table t3; drop table t1,t2; --echo # +--echo # MySQL Bug#71095: Wrong results with PARTITION BY LIST COLUMNS() +--echo # +create table t1(c1 int, c2 int, c3 int, c4 int, +primary key(c1,c2)) engine=InnoDB +partition by list columns(c2) +(partition p1 values in (1,2) engine=InnoDB, +partition p2 values in (3,4) engine=InnoDB); + +insert into t1 values (1,1,1,1),(2,3,1,1); +select * from t1 where c1=2 and c2=3; +drop table t1; + +--echo # +--echo # MySQL Bug#72803: Wrong "Impossible where" with LIST partitioning +--echo # also MDEV-6240: Wrong "Impossible where" with LIST partitioning +--echo # +CREATE TABLE t1 ( d DATE) ENGINE = InnoDB +PARTITION BY LIST COLUMNS (d) +( + PARTITION p0 VALUES IN ('1990-01-01','1991-01-01'), + PARTITION p1 VALUES IN ('1981-01-01') +); + +INSERT INTO t1 (d) VALUES ('1991-01-01'); +SELECT * FROM t1 WHERE d = '1991-01-01'; +DROP TABLE t1; + +--echo # --echo # MDEV-5963: InnoDB: Assertion failure in file row0sel.cc line 2503, --echo # Failing assertion: 0 with "key ptr now exceeds key end by 762 bytes" --echo # (independent testcase for Oracle Bug#13947868) diff --git a/mysql-test/t/partition_pruning.test b/mysql-test/t/partition_pruning.test index 4c97bab454d..06ef99e1e70 100644 --- a/mysql-test/t/partition_pruning.test +++ b/mysql-test/t/partition_pruning.test @@ -1414,6 +1414,54 @@ explain partitions select * from t1 where a between 10 and 10+33; drop table t0, t1; --echo # +--echo # Bug#71095: Wrong results with PARTITION BY LIST COLUMNS() +--echo # +CREATE TABLE t1 +(c1 int, + c2 int, + c3 int, + c4 int, + PRIMARY KEY (c1,c2)) +PARTITION BY LIST COLUMNS (c2) +(PARTITION p1 VALUES IN (1,2), + PARTITION p2 VALUES IN (3,4)); +INSERT INTO t1 VALUES (1, 1, 1, 1), (2, 3, 1, 1); +INSERT INTO t1 VALUES (1, 2, 1, 1), (2, 4, 1, 1); +SELECT * FROM t1 WHERE c1 = 1 AND c2 < 1; +SELECT * FROM t1 WHERE c1 = 1 AND c2 <= 1; +SELECT * FROM t1 WHERE c1 = 1 AND c2 = 1; +SELECT * FROM t1 WHERE c1 = 1 AND c2 >= 1; +SELECT * FROM t1 WHERE c1 = 1 AND c2 > 1; +SELECT * FROM t1 WHERE c1 = 1 AND c2 < 3; +SELECT * FROM t1 WHERE c1 = 1 AND c2 <= 3; +SELECT * FROM t1 WHERE c1 = 2 AND c2 <= 3; +SELECT * FROM t1 WHERE c1 = 2 AND c2 = 3; +SELECT * FROM t1 WHERE c1 = 2 AND c2 >= 3; +SELECT * FROM t1 WHERE c1 = 2 AND c2 > 3; +SELECT * FROM t1 WHERE c1 = 2 AND c2 < 4; +SELECT * FROM t1 WHERE c1 = 2 AND c2 <= 4; +SELECT * FROM t1 WHERE c1 = 2 AND c2 = 4; +SELECT * FROM t1 WHERE c1 = 2 AND c2 >= 4; +SELECT * FROM t1 WHERE c1 = 2 AND c2 > 4; +EXPLAIN PARTITIONS SELECT * FROM t1 WHERE c1 = 1 AND c2 < 1; +EXPLAIN PARTITIONS SELECT * FROM t1 WHERE c1 = 1 AND c2 <= 1; +EXPLAIN PARTITIONS SELECT * FROM t1 WHERE c1 = 1 AND c2 = 1; +EXPLAIN PARTITIONS SELECT * FROM t1 WHERE c1 = 1 AND c2 >= 1; +EXPLAIN PARTITIONS SELECT * FROM t1 WHERE c1 = 1 AND c2 > 1; +EXPLAIN PARTITIONS SELECT * FROM t1 WHERE c1 = 1 AND c2 < 3; +EXPLAIN PARTITIONS SELECT * FROM t1 WHERE c1 = 1 AND c2 <= 3; +EXPLAIN PARTITIONS SELECT * FROM t1 WHERE c1 = 2 AND c2 <= 3; +EXPLAIN PARTITIONS SELECT * FROM t1 WHERE c1 = 2 AND c2 = 3; +EXPLAIN PARTITIONS SELECT * FROM t1 WHERE c1 = 2 AND c2 >= 3; +EXPLAIN PARTITIONS SELECT * FROM t1 WHERE c1 = 2 AND c2 > 3; +EXPLAIN PARTITIONS SELECT * FROM t1 WHERE c1 = 2 AND c2 < 4; +EXPLAIN PARTITIONS SELECT * FROM t1 WHERE c1 = 2 AND c2 <= 4; +EXPLAIN PARTITIONS SELECT * FROM t1 WHERE c1 = 2 AND c2 = 4; +EXPLAIN PARTITIONS SELECT * FROM t1 WHERE c1 = 2 AND c2 >= 4; +EXPLAIN PARTITIONS SELECT * FROM t1 WHERE c1 = 2 AND c2 > 4; +DROP TABLE t1; + +--echo # --echo # MDEV-6239: Partition pruning is not working as expected in an inner query --echo # diff --git a/mysql-test/t/plugin_loaderr.test b/mysql-test/t/plugin_loaderr.test index e319e2fb54d..7b98a94afd4 100644 --- a/mysql-test/t/plugin_loaderr.test +++ b/mysql-test/t/plugin_loaderr.test @@ -1,4 +1,6 @@ +--source include/not_embedded.inc + # We used an invalid command-line option and InnoDB failed to start. # Ignore all related warnings call mtr.add_suppression("InnoDB"); @@ -8,3 +10,17 @@ SELECT PLUGIN_NAME,PLUGIN_STATUS,PLUGIN_TYPE,PLUGIN_LIBRARY,PLUGIN_LIBRARY_VERSION,LOAD_OPTION FROM INFORMATION_SCHEMA.PLUGINS WHERE plugin_name = 'innodb'; +--echo # +--echo # MDEV-6351 --plugin=force has no effect for built-in plugins +--echo # +--exec echo "wait" > $MYSQLTEST_VARDIR/tmp/mysqld.1.expect +--shutdown_server +--source include/wait_until_disconnected.inc + +--error 1 +--exec $MYSQLD_CMD --innodb=force --innodb-page-size=6000 + +--exec echo "restart" > $MYSQLTEST_VARDIR/tmp/mysqld.1.expect +--enable_reconnect +--source include/wait_until_connected_again.inc +--disable_reconnect diff --git a/mysql-test/t/rpl_mysqldump_slave.test b/mysql-test/t/rpl_mysqldump_slave.test index 77fc4a050cc..345bdb82535 100644 --- a/mysql-test/t/rpl_mysqldump_slave.test +++ b/mysql-test/t/rpl_mysqldump_slave.test @@ -36,4 +36,53 @@ start slave; --exec $MYSQL_DUMP_SLAVE --compact --dump-slave no_such_db start slave; + +--echo *** Test mysqldump --dump-slave GTID functionality. + +--connection master +SET gtid_seq_no = 1000; +CREATE TABLE t1 (a INT PRIMARY KEY); +DROP TABLE t1; +--sync_slave_with_master + +--connection slave +# Inject a local transaction on the slave to check that this is not considered +# for --dump-slave. +CREATE TABLE t2 (a INT PRIMARY KEY); +DROP TABLE t2; + +--echo +--echo 1. --dump-slave=1 +--echo +--replace_regex /MASTER_LOG_POS=[0-9]+/MASTER_LOG_POS=BINLOG_START/ +--exec $MYSQL_DUMP_SLAVE --compact --dump-slave=1 --gtid test + +--echo +--echo 2. --dump-slave=2 +--echo +--replace_regex /MASTER_LOG_POS=[0-9]+/MASTER_LOG_POS=BINLOG_START/ +--exec $MYSQL_DUMP_SLAVE --compact --dump-slave=2 --gtid test + + +--echo *** Test mysqldump --master-data GTID functionality. +--echo +--echo 1. --master-data=1 +--echo +--replace_regex /MASTER_LOG_POS=[0-9]+/MASTER_LOG_POS=BINLOG_START/ +--exec $MYSQL_DUMP_SLAVE --compact --master-data=1 --gtid test + +--echo +--echo 2. --master-data=2 +--echo +--replace_regex /MASTER_LOG_POS=[0-9]+/MASTER_LOG_POS=BINLOG_START/ +--exec $MYSQL_DUMP_SLAVE --compact --master-data=2 --gtid test + +--echo +--echo 3. --master-data --single-transaction +--echo +--replace_regex /MASTER_LOG_POS=[0-9]+/MASTER_LOG_POS=BINLOG_START/ +--exec $MYSQL_DUMP_SLAVE --compact --master-data --single-transaction --gtid test + + + --source include/rpl_end.inc diff --git a/mysql-test/t/sighup-6580.test b/mysql-test/t/sighup-6580.test new file mode 100644 index 00000000000..c89b3d20a0a --- /dev/null +++ b/mysql-test/t/sighup-6580.test @@ -0,0 +1,10 @@ +# +# MDEV-6580 Assertion `thd' failed in my_malloc_size_cb_func upon writing status report into error log +# +source include/not_windows.inc; +source include/not_embedded.inc; +let $pid=`select @@pid_file`; +exec kill -1 `cat $pid`; + +select 'ok'; + diff --git a/mysql-test/t/sp-bugs.test b/mysql-test/t/sp-bugs.test index 8e6a25709aa..4671aee11e1 100644 --- a/mysql-test/t/sp-bugs.test +++ b/mysql-test/t/sp-bugs.test @@ -294,3 +294,16 @@ DELIMITER ;$$ CALL test_5531(1); DROP PROCEDURE test_5531; DROP TABLE t1; + +# +# MDEV-6601 Assertion `!thd->in_active_multi_stmt_transa ction() || thd->in_multi_stmt_transaction_mode()' failed on executing a stored procedure with commit +# +delimiter |; +create procedure sp() begin + commit; +end| +delimiter ;| +start transaction; +call sp(); +drop procedure sp; + diff --git a/mysql-test/t/sp-bugs2.test b/mysql-test/t/sp-bugs2.test new file mode 100644 index 00000000000..2579e3485c6 --- /dev/null +++ b/mysql-test/t/sp-bugs2.test @@ -0,0 +1,29 @@ +# +# MDEV-6610 Assertion `thd->is_error() || thd->killed' failed in mysql_execute_command on executing an SP with repeated CREATE TABLE .. SELECT +# +CREATE TABLE t1 (i INT); +SET @a = 2; + +CREATE TABLE IF NOT EXISTS t2 (i INT) ENGINE = MyISAM + AS SELECT * FROM t1; +CREATE TABLE IF NOT EXISTS t2 (i INT) ENGINE = MyISAM + AS SELECT * FROM t1; +DROP TABLE t2; + +--delimiter | +CREATE PROCEDURE sp() +BEGIN +REPEAT + CREATE TABLE IF NOT EXISTS t2 (i INT) ENGINE = MyISAM + AS SELECT * FROM t1; + SET @a = @a - 1; +UNTIL @a = 0 +END REPEAT ; +END | +--delimiter ; + +CALL sp(); + +DROP PROCEDURE sp; +DROP TABLE t1, t2; + diff --git a/mysql-test/t/stat_tables-enospc.test b/mysql-test/t/stat_tables-enospc.test new file mode 100644 index 00000000000..12e42f6adc0 --- /dev/null +++ b/mysql-test/t/stat_tables-enospc.test @@ -0,0 +1,23 @@ +# +# MDEV-6181 EITS could eat all tmpdir space and hang +# +# test that ANALYZE TABLE is immediately aborted when going out of disk space +--source include/have_debug.inc +call mtr.add_suppression("No space left on device"); +create table t1 (a varchar(255), b varchar(255), c varchar(255)); +--disable_query_log +let $i=10000; +while ($i) { + insert t1 values (repeat(format(rand(),10), 20), + repeat(format(rand(),10), 20), + repeat(format(rand(),10), 20)); + dec $i; +} +--enable_query_log +set use_stat_tables=PREFERABLY, optimizer_use_condition_selectivity=3; +set debug_dbug='+d,simulate_file_write_error'; +--replace_regex /'.*'/'tmp-file'/ +analyze table t1; +set debug_dbug=''; +drop table t1; + diff --git a/mysql-test/t/subselect_sj_mat.test b/mysql-test/t/subselect_sj_mat.test index 91b69a6a09c..912e9d5befd 100644 --- a/mysql-test/t/subselect_sj_mat.test +++ b/mysql-test/t/subselect_sj_mat.test @@ -1808,5 +1808,38 @@ EXECUTE stmt; DROP TABLE t1, t2; DROP VIEW v2; +--echo # +--echo # MDEV-6289 : Unexpected results when querying information_schema +--echo # +CREATE TABLE t1 ( + id int(11) unsigned NOT NULL AUTO_INCREMENT, + db varchar(254) NOT NULL DEFAULT '', + PRIMARY KEY (id), + UNIQUE KEY db (db) +) DEFAULT CHARSET=utf8; +INSERT INTO t1 (db) VALUES ('mysqltest1'),('mysqltest2'),('mysqltest3'),('mysqltest4'); + +--disable_warnings +drop database if exists mysqltest1; +drop database if exists mysqltest2; +drop database if exists mysqltest3; +drop database if exists mysqltest4; +--enable_warnings +create database mysqltest1; +create database mysqltest2; +create database mysqltest3; +create database mysqltest4; + +SELECT db FROM t1 WHERE db IN (SELECT SCHEMA_NAME FROM information_schema.schemata) ORDER BY db DESC; + +EXPLAIN EXTENDED +SELECT db FROM t1 WHERE db IN (SELECT SCHEMA_NAME FROM information_schema.schemata) ORDER BY db DESC; + +drop table t1; +drop database mysqltest1; +drop database mysqltest2; +drop database mysqltest3; +drop database mysqltest4; + --echo # End of 5.5 tests diff --git a/mysql-test/t/table_options-5867.test b/mysql-test/t/table_options-5867.test new file mode 100644 index 00000000000..153ec08e675 --- /dev/null +++ b/mysql-test/t/table_options-5867.test @@ -0,0 +1,30 @@ +# +# MDEV-5867 ALTER TABLE t1 ENGINE=InnoDB keeps bad options when t1 ENGINE is CONNECT +# +# verify that SHOW CREATE TABLE hides unknown options when IGNORE_BAD_TABLE_OPTIONS is not set + +--source include/have_example_plugin.inc +--source include/not_embedded.inc + +install soname 'ha_example'; + +set sql_mode='ignore_bad_table_options'; +create table t1 ( + a int complex='c,f,f,f' invalid=3 +) engine=example ull=10000 str='dskj' one_or_two='one' yesno=0 + foobar=barfoo; + +create table t2 (a int, key (a) some_option=2014); + +show create table t1; +show create table t2; + +set sql_mode=''; + +show create table t1; +show create table t2; + +drop table t1, t2; + +uninstall soname 'ha_example'; + diff --git a/mysql-test/t/type_bit.test b/mysql-test/t/type_bit.test index 8bedf9357ef..2ca608e76ff 100644 --- a/mysql-test/t/type_bit.test +++ b/mysql-test/t/type_bit.test @@ -362,11 +362,11 @@ f2 bit(14) NOT NULL default b'11110000111100' SHOW CREATE TABLE t1; DROP TABLE t1; ---error ER_INVALID_DEFAULT CREATE TABLE IF NOT EXISTS t1 ( f1 bit(2) NOT NULL default b'' ) ENGINE=MyISAM DEFAULT CHARSET=latin1 COLLATE=latin1_general_ci; - +SHOW CREATE TABLE t1; +DROP TABLE t1; # # Bug#31399 Wrong query result when doing join buffering over BIT fields diff --git a/mysql-test/t/type_bit_innodb.test b/mysql-test/t/type_bit_innodb.test index 7ba90bf08fa..27eaeda0f99 100644 --- a/mysql-test/t/type_bit_innodb.test +++ b/mysql-test/t/type_bit_innodb.test @@ -148,3 +148,12 @@ select * from t1; drop table t1; --echo End of 5.0 tests + +# +# MDEV-6052 Inconsistent results with bit type +# +create table t1(f1 bit(2) not null default b'10',f2 bit(14) not null default b'11110000111100'); +insert into t1 (f1) values (default); +insert into t1 values (b'',b''),('',''); +select hex(f1), hex(f2) from t1; +drop table t1; diff --git a/mysql-test/t/union.test b/mysql-test/t/union.test index 877509a9fc0..a5d7dae606f 100644 --- a/mysql-test/t/union.test +++ b/mysql-test/t/union.test @@ -1273,6 +1273,36 @@ SELECT(SELECT 1 AS a ORDER BY a) AS dev; SELECT(SELECT 1 AS a LIMIT 1) AS dev; SELECT(SELECT 1 AS a FROM dual ORDER BY a DESC LIMIT 1) AS dev; + +--echo # +--echo # Bug #17059925 : UNIONS COMPUTES ROWS_EXAMINED INCORRECTLY +--echo # + +## Save current state of slow log variables +SET @old_slow_query_log= @@global.slow_query_log; +SET @old_log_output= @@global.log_output; +SET @old_long_query_time= @@long_query_time; +SET GLOBAL log_output= "TABLE"; +SET GLOBAL slow_query_log= ON; +SET SESSION long_query_time= 0; + +CREATE TABLE t17059925 (a INT); +CREATE TABLE t2 (b INT); +CREATE TABLE t3 (c INT); +INSERT INTO t17059925 VALUES (1), (2), (3); +INSERT INTO t2 VALUES (4), (5), (6); +INSERT INTO t3 VALUES (7), (8), (9); +TRUNCATE table mysql.slow_log; +--sorted_result +SELECT * FROM t17059925 UNION SELECT * FROM t2 UNION SELECT * FROM t3; +SELECT sql_text, rows_examined FROM mysql.slow_log WHERE sql_text LIKE '%SELECT%t17059925%'; +DROP TABLE t17059925, t2, t3; + +## Reset to initial values +SET @@long_query_time= @old_long_query_time; +SET @@global.log_output= @old_log_output; +SET @@global.slow_query_log= @old_slow_query_log; + --echo # --echo # lp:1010729: Unexpected syntax error from UNION --echo # (bug #54382) with single-table join nest diff --git a/mysql-test/t/variables.test b/mysql-test/t/variables.test index e7e621081d6..79cd5e1b24d 100644 --- a/mysql-test/t/variables.test +++ b/mysql-test/t/variables.test @@ -1527,4 +1527,12 @@ set session rand_seed1=DEFAULT; --error ER_BAD_FIELD_ERROR set autocommit = values(v); +# +# MDEV-6673 I_S.SESSION_VARIABLES shows global values +# +set session sql_mode=ansi_quotes; +select * from information_schema.session_variables where variable_name='sql_mode'; +show global status like 'foobar'; +select * from information_schema.session_variables where variable_name='sql_mode'; + --echo End of 5.5 tests diff --git a/mysql-test/t/view.test b/mysql-test/t/view.test index d1d4b936aba..6029ad471f6 100644 --- a/mysql-test/t/view.test +++ b/mysql-test/t/view.test @@ -4700,6 +4700,47 @@ DROP DATABASE IF EXISTS nodb; --error ER_BAD_DB_ERROR CREATE VIEW nodb.a AS SELECT 1; + +--echo # +--echo # BUG#14117018 - MYSQL SERVER CREATES INVALID VIEW DEFINITION +--echo # BUG#18405221 - SHOW CREATE VIEW OUTPUT INCORRECT +--echo # + +CREATE VIEW v1 AS (SELECT '' FROM DUAL); +CREATE VIEW v2 AS (SELECT 'BUG#14117018' AS col1 FROM DUAL) UNION ALL + (SELECT '' FROM DUAL); +CREATE VIEW v3 AS (SELECT 'BUG#14117018' AS col1 FROM DUAL) UNION ALL + (SELECT '' FROM DUAL) UNION ALL + (SELECT '' FROM DUAL); +CREATE VIEW v4 AS (SELECT 'BUG#14117018' AS col1 FROM DUAL) UNION ALL + (SELECT '' AS col2 FROM DUAL) UNION ALL + (SELECT '' FROM DUAL); + +# In the second (and later) UNIONed queries, duplicate column names are allowed +CREATE VIEW v5 AS (SELECT 'buggy' AS col1, 'fix' as col2 FROM DUAL) UNION ALL + (SELECT 'buggy' as a, 'fix' as a FROM DUAL); + +--echo # Name for the column in select1 is set properly with or +--echo # without this fix. +SHOW CREATE VIEW v1; + +--echo # Name for the column in select2 is set with this fix. +--echo # Without this fix, name would not have set for the +--echo # columns in select2. +SHOW CREATE VIEW v2; + +--echo # Name for the field item in select2 & select3 is set with this fix. +--echo # Without this fix, name would not have set for the +--echo # columns in select2 & select3. +SHOW CREATE VIEW v3; + +--echo # Name for the field item in select3 is set with this fix. +--echo # Without this fix, name would not have set for the +--echo # columns in select3. +SHOW CREATE VIEW v4; + +DROP VIEW v1, v2, v3, v4, v5; + # Check that all connections opened by test cases in this file are really # gone so execution of other tests won't be affected by their presence. --source include/wait_until_count_sessions.inc @@ -5231,6 +5272,69 @@ drop view v1; drop table t1,t2,t3; SET optimizer_switch=@save_optimizer_switch_MDEV_3874; +# +# MDEV-5515: sub-bug test of 3rd execution crash +# + +CREATE TABLE `t1` ( + `id` bigint(20) unsigned NOT NULL AUTO_INCREMENT, + `f0` int(11) unsigned NOT NULL DEFAULT '0', + `f1` int(11) unsigned NOT NULL DEFAULT '0', + PRIMARY KEY (`id`), + UNIQUE KEY `id` (`id`) +); + +CREATE TABLE `t2` ( + `id` bigint(20) unsigned NOT NULL AUTO_INCREMENT, + `f02` bigint(20) unsigned NOT NULL DEFAULT '0', + `f03` int(11) unsigned NOT NULL DEFAULT '0', + PRIMARY KEY (`id`), + UNIQUE KEY `id` (`id`) +); + +CREATE ALGORITHM=UNDEFINED SQL SECURITY DEFINER VIEW `v1` AS + SELECT + `t1`.`f0` AS `f0`, + `t1`.`f1` AS `f1`, + `t2`.`f02` AS `f02`, + `t2`.`f03` AS `f03` + FROM + (`t1` LEFT JOIN `t2` ON((`t1`.`id` = `t2`.`f02`))); + +--delimiter | +CREATE FUNCTION `f1`( + p0 BIGINT(20) UNSIGNED + ) + RETURNS bigint(20) unsigned + DETERMINISTIC + CONTAINS SQL + SQL SECURITY DEFINER + COMMENT '' +BEGIN + +DECLARE k0 INTEGER UNSIGNED DEFAULT 0; +DECLARE lResult INTEGER UNSIGNED DEFAULT 0; + + SET k0 = 0; + WHILE k0 < 1 DO + SELECT COUNT(*) as `f00` INTO lResult FROM `v1` WHERE `v1`.`f0` = p0; -- BUG + SET k0 = k0 + 1; + END WHILE; + + RETURN(k0); +END| +--delimiter ; + + +SELECT `f1`(1); +SELECT `f1`(1); +SELECT `f1`(1); +SELECT `f1`(1); + +DROP FUNCTION f1; +DROP VIEW v1; +DROP TABLE t1, t2; + --echo # ----------------------------------------------------------------- --echo # -- End of 5.5 tests. --echo # ----------------------------------------------------------------- diff --git a/mysql-test/valgrind.supp b/mysql-test/valgrind.supp index 09af8903ad4..29080597040 100644 --- a/mysql-test/valgrind.supp +++ b/mysql-test/valgrind.supp @@ -412,6 +412,17 @@ fun:__libc_start_main } +# +# dl_init reports leaked memory in memalign on OpenSuse 12.3 + +{ + memory "loss" from _dl_init + Memcheck:Leak + fun:memalign + ... + fun:call_init + fun:_dl_init +} # # dlclose can allocate memory for error message, the memory will be @@ -1123,6 +1134,17 @@ { + OpenSSL still reachable. + Memcheck:Leak + fun:*alloc + fun:CRYPTO_malloc + obj:*libssl* + fun:SSL_COMP_get_compression_methods + fun:SSL_library_init +} + + +{ Problem with udf and libresolve Memcheck:Cond obj:*/libresolv*.so diff --git a/mysys/CMakeLists.txt b/mysys/CMakeLists.txt index 7f076b10e78..d432c22b966 100644 --- a/mysys/CMakeLists.txt +++ b/mysys/CMakeLists.txt @@ -1,4 +1,4 @@ -# Copyright (c) 2006, 2013, Oracle and/or its affiliates +# Copyright (c) 2006, 2014, Oracle and/or its affiliates # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -95,7 +95,6 @@ ADD_EXECUTABLE(thr_timer thr_timer.c) TARGET_LINK_LIBRARIES(thr_timer mysys) SET_TARGET_PROPERTIES(thr_timer PROPERTIES COMPILE_FLAGS "-DMAIN") -INSTALL_DEBUG_SYMBOLS(mysys) IF(MSVC) INSTALL_DEBUG_TARGET(mysys DESTINATION ${INSTALL_LIBDIR}/debug) ENDIF() diff --git a/mysys/charset.c b/mysys/charset.c index b7e535136a5..ad3eb78ae0e 100644 --- a/mysys/charset.c +++ b/mysys/charset.c @@ -483,6 +483,50 @@ void add_compiled_collation(struct charset_info_st *cs) static my_pthread_once_t charsets_initialized= MY_PTHREAD_ONCE_INIT; static my_pthread_once_t charsets_template= MY_PTHREAD_ONCE_INIT; +typedef struct +{ + ulonglong use_count; +} MY_COLLATION_STATISTICS; + + +static MY_COLLATION_STATISTICS my_collation_statistics[MY_ALL_CHARSETS_SIZE]; + + +my_bool my_collation_is_known_id(uint id) +{ + return id > 0 && id < array_elements(all_charsets) && all_charsets[id] ? + TRUE : FALSE; +} + + +/* + Collation use statistics functions do not lock + counters to avoid mutex contention. This can lose + some counter increments with high thread concurrency. + But this should be Ok, as we don't need exact numbers. +*/ +static inline void my_collation_statistics_inc_use_count(uint id) +{ + DBUG_ASSERT(my_collation_is_known_id(id)); + my_collation_statistics[id].use_count++; +} + + +ulonglong my_collation_statistics_get_use_count(uint id) +{ + DBUG_ASSERT(my_collation_is_known_id(id)); + return my_collation_statistics[id].use_count; +} + + +const char *my_collation_get_tailoring(uint id) +{ + /* all_charsets[id]->tailoring is never changed after server startup. */ + DBUG_ASSERT(my_collation_is_known_id(id)); + return all_charsets[id]->tailoring; +} + + static void init_available_charsets(void) { char fname[FN_REFLEN + sizeof(MY_CHARSET_INDEX)]; @@ -490,6 +534,7 @@ static void init_available_charsets(void) MY_CHARSET_LOADER loader; bzero((char*) &all_charsets,sizeof(all_charsets)); + bzero((char*) &my_collation_statistics, sizeof(my_collation_statistics)); init_compiled_charsets(MYF(0)); /* Copy compiled charsets */ @@ -608,7 +653,10 @@ get_internal_charset(MY_CHARSET_LOADER *loader, uint cs_number, myf flags) if ((cs= (struct charset_info_st*) all_charsets[cs_number])) { if (cs->state & MY_CS_READY) /* if CS is already initialized */ - return cs; + { + my_collation_statistics_inc_use_count(cs_number); + return cs; + } /* To make things thread safe we are not allowing other threads to interfere @@ -636,6 +684,7 @@ get_internal_charset(MY_CHARSET_LOADER *loader, uint cs_number, myf flags) else cs->state|= MY_CS_READY; } + my_collation_statistics_inc_use_count(cs_number); } else cs= NULL; diff --git a/mysys/ma_dyncol.c b/mysys/ma_dyncol.c index 3b5e05f1b01..c0508b97922 100644 --- a/mysys/ma_dyncol.c +++ b/mysys/ma_dyncol.c @@ -1610,7 +1610,7 @@ dynamic_new_column_store(DYNAMIC_COLUMN *str, my_bool new_str) { struct st_service_funcs *fmt= fmt_data + hdr->format; - void **columns_order; + void **UNINIT_VAR(columns_order); uchar *element; uint i; enum enum_dyncol_func_result rc= ER_DYNCOL_RESOURCE; diff --git a/mysys/mf_iocache2.c b/mysys/mf_iocache2.c index 22def2e0923..06dfc9f2079 100644 --- a/mysys/mf_iocache2.c +++ b/mysys/mf_iocache2.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved. +/* Copyright (c) 2000, 2014, Oracle and/or its affiliates. All rights reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -456,6 +456,13 @@ process_flags: goto err; } } + else if (*fmt == 'c') /* char type parameter */ + { + char par[2]; + par[0] = va_arg(args, int); + if (my_b_write(info, (uchar*) par, 1)) + goto err; + } else if (*fmt == 'b') /* Sized buffer parameter, only precision makes sense */ { char *par = va_arg(args, char *); diff --git a/mysys/mf_keycache.c b/mysys/mf_keycache.c index d4c4f8c9997..5505693ce2c 100644 --- a/mysys/mf_keycache.c +++ b/mysys/mf_keycache.c @@ -149,7 +149,8 @@ typedef struct st_keycache_wqueue struct st_my_thread_var *last_thread; /* circular list of waiting threads */ } KEYCACHE_WQUEUE; -#define CHANGED_BLOCKS_HASH 128 /* must be power of 2 */ +/* Default size of hash for changed files */ +#define MIN_CHANGED_BLOCKS_HASH_SIZE 128 /* Control block for a simple (non-partitioned) key cache */ @@ -165,6 +166,7 @@ typedef struct st_simple_key_cache_cb ulong age_threshold; /* age threshold for hot blocks */ ulonglong keycache_time; /* total number of block link operations */ uint hash_entries; /* max number of entries in the hash table */ + uint changed_blocks_hash_size; /* Number of hash buckets for file blocks */ int hash_links; /* max number of hash links */ int hash_links_used; /* number of hash links currently used */ int disk_blocks; /* max number of blocks in the cache */ @@ -191,8 +193,8 @@ typedef struct st_simple_key_cache_cb KEYCACHE_WQUEUE waiting_for_resize_cnt; KEYCACHE_WQUEUE waiting_for_hash_link; /* waiting for a free hash link */ KEYCACHE_WQUEUE waiting_for_block; /* requests waiting for a free block */ - BLOCK_LINK *changed_blocks[CHANGED_BLOCKS_HASH]; /* hash for dirty file bl.*/ - BLOCK_LINK *file_blocks[CHANGED_BLOCKS_HASH]; /* hash for other file bl.*/ + BLOCK_LINK **changed_blocks; /* hash for dirty file bl.*/ + BLOCK_LINK **file_blocks; /* hash for other file bl.*/ /* Statistics variables. These are reset in reset_key_cache_counters(). */ ulong global_blocks_changed; /* number of currently dirty blocks */ @@ -331,7 +333,7 @@ static void test_key_cache(SIMPLE_KEY_CACHE_CB *keycache, #define KEYCACHE_HASH(f, pos) \ ((KEYCACHE_BASE_EXPR(f, pos) / keycache->hash_factor) & \ (keycache->hash_entries-1)) -#define FILE_HASH(f) ((uint) (f) & (CHANGED_BLOCKS_HASH-1)) +#define FILE_HASH(f, cache) ((uint) (f) & (cache->changed_blocks_hash_size-1)) #define DEFAULT_KEYCACHE_DEBUG_LOG "keycache_debug.log" @@ -468,9 +470,10 @@ static inline uint next_power(uint value) */ static -int init_simple_key_cache(SIMPLE_KEY_CACHE_CB *keycache, uint key_cache_block_size, +int init_simple_key_cache(SIMPLE_KEY_CACHE_CB *keycache, + uint key_cache_block_size, size_t use_mem, uint division_limit, - uint age_threshold) + uint age_threshold, uint changed_blocks_hash_size) { ulong blocks, hash_links; size_t length; @@ -515,6 +518,11 @@ int init_simple_key_cache(SIMPLE_KEY_CACHE_CB *keycache, uint key_cache_block_si blocks= (ulong) (use_mem / (sizeof(BLOCK_LINK) + 2 * sizeof(HASH_LINK) + sizeof(HASH_LINK*) * 5/4 + key_cache_block_size)); + + /* Changed blocks hash needs to be a power of 2 */ + changed_blocks_hash_size= my_round_up_to_next_power(MY_MAX(changed_blocks_hash_size, + MIN_CHANGED_BLOCKS_HASH_SIZE)); + /* It doesn't make sense to have too few blocks (less than 8) */ if (blocks >= 8) { @@ -531,8 +539,9 @@ int init_simple_key_cache(SIMPLE_KEY_CACHE_CB *keycache, uint key_cache_block_si while ((length= (ALIGN_SIZE(blocks * sizeof(BLOCK_LINK)) + ALIGN_SIZE(hash_links * sizeof(HASH_LINK)) + ALIGN_SIZE(sizeof(HASH_LINK*) * - keycache->hash_entries))) + - ((size_t) blocks * keycache->key_cache_block_size) > use_mem) + keycache->hash_entries) + + sizeof(BLOCK_LINK*)* (changed_blocks_hash_size*2))) + + ((size_t) blocks * keycache->key_cache_block_size) > use_mem && blocks > 8) blocks--; /* Allocate memory for cache page buffers */ if ((keycache->block_mem= @@ -543,8 +552,17 @@ int init_simple_key_cache(SIMPLE_KEY_CACHE_CB *keycache, uint key_cache_block_si Allocate memory for blocks, hash_links and hash entries; For each block 2 hash links are allocated */ - if ((keycache->block_root= (BLOCK_LINK*) my_malloc(length, - MYF(0)))) + if (my_multi_malloc(MYF(MY_ZEROFILL), + &keycache->block_root, blocks * sizeof(BLOCK_LINK), + &keycache->hash_root, + sizeof(HASH_LINK*) * keycache->hash_entries, + &keycache->hash_link_root, + hash_links * sizeof(HASH_LINK), + &keycache->changed_blocks, + sizeof(BLOCK_LINK*) * changed_blocks_hash_size, + &keycache->file_blocks, + sizeof(BLOCK_LINK*) * changed_blocks_hash_size, + NullS)) break; my_large_free(keycache->block_mem); keycache->block_mem= 0; @@ -561,17 +579,6 @@ int init_simple_key_cache(SIMPLE_KEY_CACHE_CB *keycache, uint key_cache_block_si keycache->blocks_unused= blocks; keycache->disk_blocks= (int) blocks; keycache->hash_links= hash_links; - keycache->hash_root= (HASH_LINK**) ((char*) keycache->block_root + - ALIGN_SIZE(blocks*sizeof(BLOCK_LINK))); - keycache->hash_link_root= (HASH_LINK*) ((char*) keycache->hash_root + - ALIGN_SIZE((sizeof(HASH_LINK*) * - keycache->hash_entries))); - bzero((uchar*) keycache->block_root, - keycache->disk_blocks * sizeof(BLOCK_LINK)); - bzero((uchar*) keycache->hash_root, - keycache->hash_entries * sizeof(HASH_LINK*)); - bzero((uchar*) keycache->hash_link_root, - keycache->hash_links * sizeof(HASH_LINK)); keycache->hash_links_used= 0; keycache->free_hash_list= NULL; keycache->blocks_used= keycache->blocks_changed= 0; @@ -591,7 +598,7 @@ int init_simple_key_cache(SIMPLE_KEY_CACHE_CB *keycache, uint key_cache_block_si keycache->age_threshold= (age_threshold ? blocks * age_threshold / 100 : blocks); - + keycache->changed_blocks_hash_size= changed_blocks_hash_size; keycache->can_be_used= 1; keycache->waiting_for_hash_link.last_thread= NULL; @@ -602,10 +609,6 @@ int init_simple_key_cache(SIMPLE_KEY_CACHE_CB *keycache, uint key_cache_block_si keycache->disk_blocks, (long) keycache->block_root, keycache->hash_entries, (long) keycache->hash_root, keycache->hash_links, (long) keycache->hash_link_root)); - bzero((uchar*) keycache->changed_blocks, - sizeof(keycache->changed_blocks[0]) * CHANGED_BLOCKS_HASH); - bzero((uchar*) keycache->file_blocks, - sizeof(keycache->file_blocks[0]) * CHANGED_BLOCKS_HASH); } else { @@ -832,9 +835,10 @@ void finish_resize_simple_key_cache(SIMPLE_KEY_CACHE_CB *keycache, */ static -int resize_simple_key_cache(SIMPLE_KEY_CACHE_CB *keycache, uint key_cache_block_size, +int resize_simple_key_cache(SIMPLE_KEY_CACHE_CB *keycache, + uint key_cache_block_size, size_t use_mem, uint division_limit, - uint age_threshold) + uint age_threshold, uint changed_blocks_hash_size) { int blocks= 0; DBUG_ENTER("resize_simple_key_cache"); @@ -852,7 +856,8 @@ int resize_simple_key_cache(SIMPLE_KEY_CACHE_CB *keycache, uint key_cache_block_ /* The following will work even if use_mem is 0 */ blocks= init_simple_key_cache(keycache, key_cache_block_size, use_mem, - division_limit, age_threshold); + division_limit, age_threshold, + changed_blocks_hash_size); finish: finish_resize_simple_key_cache(keycache, 0); @@ -1248,7 +1253,7 @@ static void link_to_file_list(SIMPLE_KEY_CACHE_CB *keycache, DBUG_ASSERT(block->hash_link->file == file); if (unlink_block) unlink_changed(block); - link_changed(block, &keycache->file_blocks[FILE_HASH(file)]); + link_changed(block, &keycache->file_blocks[FILE_HASH(file, keycache)]); if (block->status & BLOCK_CHANGED) { block->status&= ~BLOCK_CHANGED; @@ -1289,7 +1294,7 @@ static void link_to_changed_list(SIMPLE_KEY_CACHE_CB *keycache, unlink_changed(block); link_changed(block, - &keycache->changed_blocks[FILE_HASH(block->hash_link->file)]); + &keycache->changed_blocks[FILE_HASH(block->hash_link->file, keycache)]); block->status|=BLOCK_CHANGED; keycache->blocks_changed++; keycache->global_blocks_changed++; @@ -3901,7 +3906,7 @@ static int flush_key_blocks_int(SIMPLE_KEY_CACHE_CB *keycache, to flush all dirty pages with minimum seek moves */ count= 0; - for (block= keycache->changed_blocks[FILE_HASH(file)] ; + for (block= keycache->changed_blocks[FILE_HASH(file, keycache)] ; block ; block= block->next_changed) { @@ -3934,7 +3939,7 @@ restart: last_in_flush= NULL; last_for_update= NULL; end= (pos= cache)+count; - for (block= keycache->changed_blocks[FILE_HASH(file)] ; + for (block= keycache->changed_blocks[FILE_HASH(file, keycache)] ; block ; block= next) { @@ -4156,7 +4161,7 @@ restart: do { found= 0; - for (block= keycache->file_blocks[FILE_HASH(file)] ; + for (block= keycache->file_blocks[FILE_HASH(file, keycache)] ; block ; block= next) { @@ -4397,6 +4402,7 @@ static int flush_all_key_blocks(SIMPLE_KEY_CACHE_CB *keycache) uint total_found; uint found; uint idx; + uint changed_blocks_hash_size= keycache->changed_blocks_hash_size; DBUG_ENTER("flush_all_key_blocks"); do @@ -4412,7 +4418,7 @@ static int flush_all_key_blocks(SIMPLE_KEY_CACHE_CB *keycache) { found= 0; /* Step over the whole changed_blocks hash array. */ - for (idx= 0; idx < CHANGED_BLOCKS_HASH; idx++) + for (idx= 0; idx < changed_blocks_hash_size; idx++) { /* If an array element is non-empty, use the first block from its @@ -4423,7 +4429,7 @@ static int flush_all_key_blocks(SIMPLE_KEY_CACHE_CB *keycache) same hash bucket, one of them will be flushed per iteration of the outer loop of phase 1. */ - if ((block= keycache->changed_blocks[idx])) + while ((block= keycache->changed_blocks[idx])) { found++; /* @@ -4435,7 +4441,6 @@ static int flush_all_key_blocks(SIMPLE_KEY_CACHE_CB *keycache) DBUG_RETURN(1); } } - } while (found); /* @@ -4450,7 +4455,7 @@ static int flush_all_key_blocks(SIMPLE_KEY_CACHE_CB *keycache) { found= 0; /* Step over the whole file_blocks hash array. */ - for (idx= 0; idx < CHANGED_BLOCKS_HASH; idx++) + for (idx= 0; idx < changed_blocks_hash_size; idx++) { /* If an array element is non-empty, use the first block from its @@ -4460,7 +4465,7 @@ static int flush_all_key_blocks(SIMPLE_KEY_CACHE_CB *keycache) same hash bucket, one of them will be flushed per iteration of the outer loop of phase 2. */ - if ((block= keycache->file_blocks[idx])) + while ((block= keycache->file_blocks[idx])) { total_found++; found++; @@ -4469,7 +4474,6 @@ static int flush_all_key_blocks(SIMPLE_KEY_CACHE_CB *keycache) DBUG_RETURN(1); } } - } while (found); /* @@ -4482,7 +4486,7 @@ static int flush_all_key_blocks(SIMPLE_KEY_CACHE_CB *keycache) #ifndef DBUG_OFF /* Now there should not exist any block any more. */ - for (idx= 0; idx < CHANGED_BLOCKS_HASH; idx++) + for (idx= 0; idx < changed_blocks_hash_size; idx++) { DBUG_ASSERT(!keycache->changed_blocks[idx]); DBUG_ASSERT(!keycache->file_blocks[idx]); @@ -5028,15 +5032,18 @@ static SIMPLE_KEY_CACHE_CB age_threshold age threshold (may be zero) DESCRIPTION - This function is the implementation of the init_key_cache interface function - that is employed by partitioned key caches. - The function builds and initializes an array of simple key caches, and then - initializes the control block structure of the type PARTITIONED_KEY_CACHE_CB - that is used for a partitioned key cache. The parameter keycache is - supposed to point to this structure. The number of partitions in the - partitioned key cache to be built must be passed through the field - 'partitions' of this structure. The parameter key_cache_block_size specifies - the size of the blocks in the the simple key caches to be built. + This function is the implementation of the init_key_cache + interface function that is employed by partitioned key caches. + + The function builds and initializes an array of simple key caches, + and then initializes the control block structure of the type + PARTITIONED_KEY_CACHE_CB that is used for a partitioned key + cache. The parameter keycache is supposed to point to this + structure. The number of partitions in the partitioned key cache + to be built must be passed through the field 'partitions' of this + structure. + The parameter key_cache_block_size specifies the size of the + blocks in the the simple key caches to be built. The parameters division_limit and age_threshold determine the initial values of those characteristics of the simple key caches that are used for midpoint insertion strategy. The parameter use_mem specifies the total @@ -5059,7 +5066,7 @@ static int init_partitioned_key_cache(PARTITIONED_KEY_CACHE_CB *keycache, uint key_cache_block_size, size_t use_mem, uint division_limit, - uint age_threshold) + uint age_threshold, uint changed_blocks_hash_size) { int i; size_t mem_per_cache; @@ -5103,7 +5110,8 @@ int init_partitioned_key_cache(PARTITIONED_KEY_CACHE_CB *keycache, } cnt= init_simple_key_cache(partition, key_cache_block_size, mem_per_cache, - division_limit, age_threshold); + division_limit, age_threshold, + changed_blocks_hash_size); if (cnt <= 0) { end_simple_key_cache(partition, 1); @@ -5222,7 +5230,8 @@ static int resize_partitioned_key_cache(PARTITIONED_KEY_CACHE_CB *keycache, uint key_cache_block_size, size_t use_mem, uint division_limit, - uint age_threshold) + uint age_threshold, + uint changed_blocks_hash_size) { uint i; uint partitions= keycache->partitions; @@ -5241,7 +5250,8 @@ int resize_partitioned_key_cache(PARTITIONED_KEY_CACHE_CB *keycache, } if (!err) blocks= init_partitioned_key_cache(keycache, key_cache_block_size, - use_mem, division_limit, age_threshold); + use_mem, division_limit, age_threshold, + changed_blocks_hash_size); if (blocks > 0) { for (i= 0; i < partitions; i++) @@ -5816,6 +5826,7 @@ static int repartition_key_cache_internal(KEY_CACHE *keycache, uint key_cache_block_size, size_t use_mem, uint division_limit, uint age_threshold, + uint changed_blocks_hash_size, uint partitions, my_bool use_op_lock); /* @@ -5828,8 +5839,11 @@ int repartition_key_cache_internal(KEY_CACHE *keycache, use_mem total memory to use for cache buffers/structures division_limit division limit (may be zero) age_threshold age threshold (may be zero) - partitions number of partitions in the key cache - use_op_lock if TRUE use keycache->op_lock, otherwise - ignore it + changed_blocks_hash_size Number of hash buckets to hold a link of different + files. Should be proportional to number of different + files sused. + partitions Number of partitions in the key cache + use_op_lock if TRUE use keycache->op_lock, otherwise - ignore it DESCRIPTION The function performs the actions required from init_key_cache(). @@ -5850,7 +5864,8 @@ int repartition_key_cache_internal(KEY_CACHE *keycache, static int init_key_cache_internal(KEY_CACHE *keycache, uint key_cache_block_size, size_t use_mem, uint division_limit, - uint age_threshold, uint partitions, + uint age_threshold, uint changed_blocks_hash_size, + uint partitions, my_bool use_op_lock) { void *keycache_cb; @@ -5901,7 +5916,7 @@ int init_key_cache_internal(KEY_CACHE *keycache, uint key_cache_block_size, keycache->can_be_used= 0; blocks= keycache->interface_funcs->init(keycache_cb, key_cache_block_size, use_mem, division_limit, - age_threshold); + age_threshold, changed_blocks_hash_size); keycache->partitions= partitions ? ((PARTITIONED_KEY_CACHE_CB *) keycache_cb)->partitions : 0; @@ -5956,10 +5971,12 @@ int init_key_cache_internal(KEY_CACHE *keycache, uint key_cache_block_size, int init_key_cache(KEY_CACHE *keycache, uint key_cache_block_size, size_t use_mem, uint division_limit, - uint age_threshold, uint partitions) + uint age_threshold, uint changed_blocks_hash_size, + uint partitions) { return init_key_cache_internal(keycache, key_cache_block_size, use_mem, - division_limit, age_threshold, partitions, 1); + division_limit, age_threshold, + changed_blocks_hash_size, partitions, 1); } @@ -5998,7 +6015,8 @@ int init_key_cache(KEY_CACHE *keycache, uint key_cache_block_size, */ int resize_key_cache(KEY_CACHE *keycache, uint key_cache_block_size, - size_t use_mem, uint division_limit, uint age_threshold) + size_t use_mem, uint division_limit, uint age_threshold, + uint changed_blocks_hash_size) { int blocks= -1; if (keycache->key_cache_inited) @@ -6008,6 +6026,7 @@ int resize_key_cache(KEY_CACHE *keycache, uint key_cache_block_size, blocks= repartition_key_cache_internal(keycache, key_cache_block_size, use_mem, division_limit, age_threshold, + changed_blocks_hash_size, (uint) keycache->param_partitions, 0); else @@ -6015,7 +6034,8 @@ int resize_key_cache(KEY_CACHE *keycache, uint key_cache_block_size, blocks= keycache->interface_funcs->resize(keycache->keycache_cb, key_cache_block_size, use_mem, division_limit, - age_threshold); + age_threshold, + changed_blocks_hash_size); if (keycache->partitions) keycache->partitions= @@ -6453,6 +6473,7 @@ static int repartition_key_cache_internal(KEY_CACHE *keycache, uint key_cache_block_size, size_t use_mem, uint division_limit, uint age_threshold, + uint changed_blocks_hash_size, uint partitions, my_bool use_op_lock) { uint blocks= -1; @@ -6462,10 +6483,12 @@ int repartition_key_cache_internal(KEY_CACHE *keycache, pthread_mutex_lock(&keycache->op_lock); keycache->interface_funcs->resize(keycache->keycache_cb, key_cache_block_size, 0, - division_limit, age_threshold); + division_limit, age_threshold, + changed_blocks_hash_size); end_key_cache_internal(keycache, 1, 0); blocks= init_key_cache_internal(keycache, key_cache_block_size, use_mem, - division_limit, age_threshold, partitions, + division_limit, age_threshold, + changed_blocks_hash_size, partitions, 0); if (use_op_lock) pthread_mutex_unlock(&keycache->op_lock); @@ -6510,10 +6533,12 @@ int repartition_key_cache_internal(KEY_CACHE *keycache, int repartition_key_cache(KEY_CACHE *keycache, uint key_cache_block_size, size_t use_mem, uint division_limit, - uint age_threshold, uint partitions) + uint age_threshold, uint changed_blocks_hash_size, + uint partitions) { return repartition_key_cache_internal(keycache, key_cache_block_size, use_mem, division_limit, age_threshold, + changed_blocks_hash_size, partitions, 1); } diff --git a/mysys/my_default.c b/mysys/my_default.c index 1e4038d17fb..8eb99d21b73 100644 --- a/mysys/my_default.c +++ b/mysys/my_default.c @@ -138,9 +138,8 @@ static int search_default_file_with_ext(Process_option_func func, - Windows: GetWindowsDirectory() - Windows: C:/ - Windows: Directory above where the executable is located - - Unix: /etc/ - - Unix: /etc/mysql/ - - Unix: --sysconfdir=<path> (compile-time option) + - Unix: /etc/ or the value of DEFAULT_SYSCONFDIR, if defined + - Unix: /etc/mysql/ unless DEFAULT_SYSCONFDIR is defined - ALL: getenv("MYSQL_HOME") - ALL: --defaults-extra-file=<path> (run-time option) - Unix: ~/ @@ -1224,12 +1223,12 @@ static const char **init_default_directories(MEM_ROOT *alloc) #else - errors += add_directory(alloc, "/etc/", dirs); - errors += add_directory(alloc, "/etc/mysql/", dirs); - #if defined(DEFAULT_SYSCONFDIR) if (DEFAULT_SYSCONFDIR[0]) errors += add_directory(alloc, DEFAULT_SYSCONFDIR, dirs); +#else + errors += add_directory(alloc, "/etc/", dirs); + errors += add_directory(alloc, "/etc/mysql/", dirs); #endif /* DEFAULT_SYSCONFDIR */ #endif diff --git a/mysys/my_thr_init.c b/mysys/my_thr_init.c index 5007cb01689..d9dbacc8524 100644 --- a/mysys/my_thr_init.c +++ b/mysys/my_thr_init.c @@ -38,22 +38,6 @@ mysql_mutex_t LOCK_localtime_r; #ifdef _MSC_VER static void install_sigabrt_handler(); #endif -#ifdef TARGET_OS_LINUX - -/* - Dummy thread spawned in my_thread_global_init() below to avoid - race conditions in NPTL pthread_exit code. -*/ - -static pthread_handler_t -nptl_pthread_exit_hack_handler(void *arg __attribute((unused))) -{ - /* Do nothing! */ - pthread_exit(0); - return 0; -} - -#endif /* TARGET_OS_LINUX */ static uint get_thread_lib(void); @@ -197,33 +181,6 @@ my_bool my_thread_global_init(void) thd_lib_detected= get_thread_lib(); -#ifdef TARGET_OS_LINUX - /* - BUG#24507: Race conditions inside current NPTL pthread_exit() - implementation. - - To avoid a possible segmentation fault during concurrent - executions of pthread_exit(), a dummy thread is spawned which - initializes internal variables of pthread lib. See bug description - for a full explanation. - - TODO: Remove this code when fixed versions of glibc6 are in common - use. - */ - if (thd_lib_detected == THD_LIB_NPTL) - { - pthread_t dummy_thread; - pthread_attr_t dummy_thread_attr; - - pthread_attr_init(&dummy_thread_attr); - pthread_attr_setdetachstate(&dummy_thread_attr, PTHREAD_CREATE_JOINABLE); - - if (pthread_create(&dummy_thread,&dummy_thread_attr, - nptl_pthread_exit_hack_handler, NULL) == 0) - (void)pthread_join(dummy_thread, NULL); - } -#endif /* TARGET_OS_LINUX */ - my_thread_init_common_mutex(); return 0; diff --git a/plugin/auth_socket/CMakeLists.txt b/plugin/auth_socket/CMakeLists.txt index 2c5a9c89fcb..5630cac33ce 100644 --- a/plugin/auth_socket/CMakeLists.txt +++ b/plugin/auth_socket/CMakeLists.txt @@ -22,21 +22,47 @@ int main() { getsockopt(0, SOL_SOCKET, SO_PEERCRED, &cred, 0); }" HAVE_PEERCRED) -IF (NOT HAVE_PEERCRED) - # Hi, OpenBSD! - CHECK_CXX_SOURCE_COMPILES( - "#include <sys/types.h> - #include <sys/socket.h> - int main() { - struct sockpeercred cred; - getsockopt(0, SOL_SOCKET, SO_PEERCRED, &cred, 0); - }" HAVE_SOCKPEERCRED) - ADD_DEFINITIONS(-Ducred=sockpeercred) -ENDIF() +IF (HAVE_PEERCRED) + ADD_DEFINITIONS(-DHAVE_PEERCRED) + SET(ok 1) +ELSE() + +# Hi, OpenBSD! +CHECK_CXX_SOURCE_COMPILES( +"#include <sys/types.h> +#include <sys/socket.h> +int main() { + struct sockpeercred cred; + getsockopt(0, SOL_SOCKET, SO_PEERCRED, &cred, 0); + }" HAVE_SOCKPEERCRED) -IF(HAVE_PEERCRED OR HAVE_SOCKPEERCRED) - SET(AUTH_SOCKET_OK 1) +IF (HAVE_SOCKPEERCRED) + ADD_DEFINITIONS(-DHAVE_SOCKPEERCRED) + SET(ok 1) +ELSE() + +# FreeBSD, is that you? +CHECK_CXX_SOURCE_COMPILES( +"#include <sys/types.h> +#include <sys/socket.h> +#include <sys/un.h> +#include <sys/ucred.h> +int main() { + struct xucred cred; + getsockopt(0, 0, LOCAL_PEERCRED, &cred, 0); + }" HAVE_XUCRED) + +IF (HAVE_XUCRED) + ADD_DEFINITIONS(-DHAVE_XUCRED) + SET(ok 1) +ELSE() + +# Who else? Anyone? +# C'mon, show your creativity, be different! ifdef's are fun, aren't they? + +ENDIF() +ENDIF() ENDIF() -MYSQL_ADD_PLUGIN(auth_socket auth_socket.c ONLY_IF AUTH_SOCKET_OK MODULE_ONLY) +MYSQL_ADD_PLUGIN(auth_socket auth_socket.c ONLY_IF ok MODULE_ONLY) diff --git a/plugin/auth_socket/auth_socket.c b/plugin/auth_socket/auth_socket.c index 779998395b0..601b76b6b5c 100644 --- a/plugin/auth_socket/auth_socket.c +++ b/plugin/auth_socket/auth_socket.c @@ -27,9 +27,29 @@ #define _GNU_SOURCE 1 /* for struct ucred */ #include <mysql/plugin_auth.h> -#include <sys/socket.h> -#include <pwd.h> #include <string.h> +#include <pwd.h> +#include <sys/socket.h> +#include <sys/types.h> + +#ifdef HAVE_PEERCRED +#define level SOL_SOCKET + +#elif defined HAVE_SOCKPEERCRED +#define level SOL_SOCKET +#define ucred sockpeercred + +#elif defined HAVE_XUCRED +#include <sys/un.h> +#include <sys/ucred.h> +#define level 0 +#define SO_PEERCRED LOCAL_PEERCRED +#define uid cr_uid +#define ucred xucred + +#else +#error impossible +#endif /** perform the unix socket based authentication @@ -63,7 +83,7 @@ static int socket_auth(MYSQL_PLUGIN_VIO *vio, MYSQL_SERVER_AUTH_INFO *info) return CR_ERROR; /* get the UID of the client process */ - if (getsockopt(vio_info.socket, SOL_SOCKET, SO_PEERCRED, &cred, &cred_len)) + if (getsockopt(vio_info.socket, level, SO_PEERCRED, &cred, &cred_len)) return CR_ERROR; if (cred_len != sizeof(cred)) diff --git a/plugin/feedback/feedback.cc b/plugin/feedback/feedback.cc index 54459ae6f17..f644bd597d9 100644 --- a/plugin/feedback/feedback.cc +++ b/plugin/feedback/feedback.cc @@ -217,7 +217,8 @@ int fill_feedback(THD *thd, TABLE_LIST *tables, COND *unused) tables->schema_table= i_s_feedback; res= res || fill_plugin_version(thd, tables) || fill_misc_data(thd, tables) - || fill_linux_info(thd, tables); + || fill_linux_info(thd, tables) + || fill_collation_statistics(thd, tables); return res; } diff --git a/plugin/feedback/feedback.h b/plugin/feedback/feedback.h index c5acbb5ef72..c2091afdedc 100644 --- a/plugin/feedback/feedback.h +++ b/plugin/feedback/feedback.h @@ -22,6 +22,7 @@ int fill_feedback(THD *thd, TABLE_LIST *tables, COND *cond); int fill_plugin_version(THD *thd, TABLE_LIST *tables); int fill_misc_data(THD *thd, TABLE_LIST *tables); int fill_linux_info(THD *thd, TABLE_LIST *tables); +int fill_collation_statistics(THD *thd, TABLE_LIST *tables); static const int SERVER_UID_SIZE= 29; extern char server_uid_buf[SERVER_UID_SIZE+1], *user_info; diff --git a/plugin/feedback/utils.cc b/plugin/feedback/utils.cc index 0510140aee9..b83b69be0ce 100644 --- a/plugin/feedback/utils.cc +++ b/plugin/feedback/utils.cc @@ -383,6 +383,25 @@ int fill_misc_data(THD *thd, TABLE_LIST *tables) return 0; } +int fill_collation_statistics(THD *thd, TABLE_LIST *tables) +{ + TABLE *table= tables->table; + for (uint id= 1; id < MY_ALL_CHARSETS_SIZE; id++) + { + ulonglong count; + if (my_collation_is_known_id(id) && + (count= my_collation_statistics_get_use_count(id))) + { + char name[MY_CS_NAME_SIZE + 32]; + size_t namelen= my_snprintf(name, sizeof(name), + "Collation used %s", + get_charset_name(id)); + INSERT2(name, namelen, (count, UNSIGNED)); + } + } + return 0; +}; + /** calculates the server unique identifier diff --git a/plugin/metadata_lock_info/metadata_lock_info.cc b/plugin/metadata_lock_info/metadata_lock_info.cc index 2a2d5e58e2b..b45ea012617 100644 --- a/plugin/metadata_lock_info/metadata_lock_info.cc +++ b/plugin/metadata_lock_info/metadata_lock_info.cc @@ -163,7 +163,7 @@ maria_declare_plugin(metadata_lock_info) NULL, NULL, NULL, - MariaDB_PLUGIN_MATURITY_BETA, + MariaDB_PLUGIN_MATURITY_GAMMA, } maria_declare_plugin_end; #else diff --git a/plugin/query_response_time/query_response_time.cc b/plugin/query_response_time/query_response_time.cc index 7ea93515e4e..2c426b0ce5c 100644 --- a/plugin/query_response_time/query_response_time.cc +++ b/plugin/query_response_time/query_response_time.cc @@ -227,7 +227,7 @@ public: char time[TIME_STRING_BUFFER_LENGTH]; char total[TOTAL_STRING_BUFFER_LENGTH]; if(i == bound_count()) - { + { assert(sizeof(TIME_OVERFLOW) <= TIME_STRING_BUFFER_LENGTH); assert(sizeof(TIME_OVERFLOW) <= TOTAL_STRING_BUFFER_LENGTH); memcpy(time,TIME_OVERFLOW,sizeof(TIME_OVERFLOW)); diff --git a/plugin/semisync/semisync_master_plugin.cc b/plugin/semisync/semisync_master_plugin.cc index b8240c80ef3..9eae7f03c34 100644 --- a/plugin/semisync/semisync_master_plugin.cc +++ b/plugin/semisync/semisync_master_plugin.cc @@ -441,7 +441,7 @@ maria_declare_plugin(semisync_master) semi_sync_master_status_vars, /* status variables */ semi_sync_master_system_vars, /* system variables */ "1.0", - MariaDB_PLUGIN_MATURITY_UNKNOWN + MariaDB_PLUGIN_MATURITY_GAMMA } maria_declare_plugin_end; diff --git a/plugin/semisync/semisync_slave_plugin.cc b/plugin/semisync/semisync_slave_plugin.cc index b98888ecd32..572ead214de 100644 --- a/plugin/semisync/semisync_slave_plugin.cc +++ b/plugin/semisync/semisync_slave_plugin.cc @@ -227,7 +227,7 @@ maria_declare_plugin(semisync_slave) semi_sync_slave_status_vars, /* status variables */ semi_sync_slave_system_vars, /* system variables */ "1.0", - MariaDB_PLUGIN_MATURITY_UNKNOWN + MariaDB_PLUGIN_MATURITY_GAMMA } maria_declare_plugin_end; diff --git a/plugin/win_auth_client/CMakeLists.txt b/plugin/win_auth_client/CMakeLists.txt index a017410252d..75ee55117bd 100644 --- a/plugin/win_auth_client/CMakeLists.txt +++ b/plugin/win_auth_client/CMakeLists.txt @@ -31,7 +31,6 @@ IF(WIN32) LINK_LIBRARIES Secur32 MODULE_ONLY COMPONENT SharedLibraries) - #INSTALL_DEBUG_SYMBOLS(auth_win_client) #IF(MSVC) # INSTALL_DEBUG_TARGET(auth_win_client DESTINATION ${INSTALL_LIBDIR}/debug) #ENDIF() diff --git a/scripts/CMakeLists.txt b/scripts/CMakeLists.txt index e46da850157..c0fc67c0564 100644 --- a/scripts/CMakeLists.txt +++ b/scripts/CMakeLists.txt @@ -221,12 +221,17 @@ INSTALL_SCRIPT( ENDIF() SET(prefix "${CMAKE_INSTALL_PREFIX}") -SET(sysconfdir ${prefix}) +IF(INSTALL_SYSCONFDIR) + SET(sysconfdir ${DEFAULT_SYSCONFDIR}) +ELSE() + SET(sysconfdir "/etc") +ENDIF() SET(bindir ${prefix}/${INSTALL_BINDIR}) SET(libexecdir ${prefix}/${INSTALL_SBINDIR}) SET(scriptdir ${prefix}/${INSTALL_BINDIR}) SET(datadir ${prefix}/${INSTALL_MYSQLSHAREDIR}) SET(pkgdatadir ${prefix}/${INSTALL_MYSQLSHAREDIR}) +SET(libsubdir ${INSTALL_LIBDIR}) SET(pkgincludedir ${prefix}/${INSTALL_INCLUDEDIR}) SET(pkglibdir ${prefix}/${INSTALL_LIBDIR}) SET(pkgplugindir ${prefix}/${INSTALL_PLUGINDIR}) diff --git a/scripts/mysql_config.sh b/scripts/mysql_config.sh index de874ef0424..52f6d563fe5 100644 --- a/scripts/mysql_config.sh +++ b/scripts/mysql_config.sh @@ -76,7 +76,8 @@ get_full_path () me=`get_full_path $0` -basedir=`echo $me | sed -e 's;/bin/mysql_config;;'` +# Script might have been renamed but assume mysql_<something>config<something> +basedir=`echo $me | sed -e 's;/bin/mysql_.*config.*;;'` ldata='@localstatedir@' execdir='@libexecdir@' @@ -85,11 +86,11 @@ bindir='@bindir@' # If installed, search for the compiled in directory first (might be "lib64") pkglibdir='@pkglibdir@' pkglibdir_rel=`echo $pkglibdir | sed -e "s;^$basedir/;;"` -fix_path pkglibdir $pkglibdir_rel lib64/mysql lib64 lib/mysql lib +fix_path pkglibdir $pkglibdir_rel @libsubdir@/mysql @libsubdir@ plugindir='@pkgplugindir@' plugindir_rel=`echo $plugindir | sed -e "s;^$basedir/;;"` -fix_path plugindir $plugindir_rel lib/mysql/plugin lib/plugin +fix_path plugindir $plugindir_rel @libsubdir@/mysql/plugin @libsubdir@/plugin pkgincludedir='@pkgincludedir@' fix_path pkgincludedir include/mysql @@ -131,10 +132,10 @@ Options: pkglibdir [$pkglibdir] plugindir [$plugindir] EOF - exit 0 + exit $1 } -if test $# -le 0; then usage; fi +if test $# -le 0; then usage 0 ; fi while test $# -gt 0; do case $1 in @@ -153,10 +154,10 @@ while test $# -gt 0; do pkgincludedir) echo "$pkgincludedir" ;; pkglibdir) echo "$pkglibdir" ;; plugindir) echo "$plugindir" ;; - *) usage ;; + *) usage 1 >&2 ;; esac ;; - *) usage ;; + *) usage 1 >&2 ;; esac shift diff --git a/scripts/mysql_system_tables_fix.sql b/scripts/mysql_system_tables_fix.sql index 96e4103b5bd..a7837b40e99 100644 --- a/scripts/mysql_system_tables_fix.sql +++ b/scripts/mysql_system_tables_fix.sql @@ -650,6 +650,22 @@ DROP TABLE tmp_proxies_priv; # Convering the host name to lower case for existing users UPDATE user SET host=LOWER( host ) WHERE LOWER( host ) <> host; +# update timestamp fields in the innodb stat tables +set @str="alter table mysql.innodb_index_stats modify last_update timestamp not null default current_timestamp on update current_timestamp"; +set @str=if(@have_innodb <> 0, @str, "set @dummy = 0"); +prepare stmt from @str; +execute stmt; + +set @str=replace(@str, "innodb_index_stats", "innodb_table_stats"); +prepare stmt from @str; +execute stmt; + +SET @innodb_index_stats_fk= (select count(*) from information_schema.referential_constraints where constraint_schema='mysql' and table_name = 'innodb_index_stats' and referenced_table_name = 'innodb_table_stats' and constraint_name = 'innodb_index_stats_ibfk_1'); +SET @str=IF(@innodb_index_stats_fk > 0 and @have_innodb > 0, "ALTER TABLE mysql.innodb_index_stats DROP FOREIGN KEY `innodb_index_stats_ibfk_1`", "SET @dummy = 0"); +PREPARE stmt FROM @str; +EXECUTE stmt; +DROP PREPARE stmt; + # MDEV-4332 longer user names alter table user modify User char(80) binary not null default ''; alter table db modify User char(80) binary not null default ''; @@ -669,12 +685,5 @@ alter table tables_priv modify Grantor char(141) COLLATE utf8_bin not null # This should not be needed, but gives us some extra testing that the above # changes was correct -set @have_innodb= (select count(engine) from information_schema.engines where engine='INNODB' and support != 'NO'); -SET @innodb_index_stats_fk= (select count(*) from information_schema.referential_constraints where constraint_schema='mysql' and table_name = 'innodb_index_stats' and referenced_table_name = 'innodb_table_stats' and constraint_name = 'innodb_index_stats_ibfk_1'); -SET @str=IF(@innodb_index_stats_fk > 0 and @have_innodb > 0, "ALTER TABLE mysql.innodb_index_stats DROP FOREIGN KEY `innodb_index_stats_ibfk_1`", "SET @dummy = 0"); -PREPARE stmt FROM @str; -EXECUTE stmt; -DROP PREPARE stmt; - flush privileges; diff --git a/scripts/mysqlaccess.conf b/scripts/mysqlaccess.conf index faf47da5f6c..faf47da5f6c 100755..100644 --- a/scripts/mysqlaccess.conf +++ b/scripts/mysqlaccess.conf diff --git a/scripts/mysqlaccess.sh b/scripts/mysqlaccess.sh index f422b6a7dc8..6fc090ab972 100644 --- a/scripts/mysqlaccess.sh +++ b/scripts/mysqlaccess.sh @@ -261,12 +261,12 @@ Release Notes: * log-file for debug-output : /tmp/mysqlaccess.log * default values are read from a configuration file $script.conf first this file is looked for in the current directory; if not - found it is looked for in /etc/ + found it is looked for in @sysconfdir@ Note that when default-values are given, these can't get overriden by empty (blanc) values! * CGI-BIN version with HTML and forms interface. Simply place the script in an ScriptAliased directory, make the configuration file - available in the that directory or in /etc, and point your browser + available in the that directory or in @sysconfdir@, and point your browser to the right URL. * copy the grant-rules to temporary tables, where you are safe to play with them. @@ -480,12 +480,12 @@ MySQLaccess::Report::Print_Header(); if (-f "./$script_conf") { require "./$script_conf"; } + elsif (-f "@prefix@/$script_conf") { + require "@prefix@/$script_conf"; + } elsif (-f "@sysconfdir@/$script_conf") { require "@sysconfdir@/$script_conf"; } - elsif (-f "/etc/$script_conf") { - require "/etc/$script_conf"; - } # **************************** # Read in all parameters @@ -950,8 +950,8 @@ sub MergeConfigFile { # ================================= sub MergeConfigFiles { my ($name,$pass,$uid,$gid,$quota,$comment,$gcos,$dir,$shell) = getpwuid $<; + MergeConfigFile("@prefix@/my.cnf"); MergeConfigFile("@sysconfdir@/my.cnf"); - MergeConfigFile("/etc/my.cnf"); MergeConfigFile("$dir/.my.cnf"); } diff --git a/scripts/mysqld_multi.sh b/scripts/mysqld_multi.sh index 4dd5eae003d..e3f8e50122c 100644 --- a/scripts/mysqld_multi.sh +++ b/scripts/mysqld_multi.sh @@ -503,9 +503,9 @@ sub list_defaults_files my %seen; # Don't list the same file more than once return grep { defined $_ and not $seen{$_}++ and -f $_ and -r $_ } - ('/etc/my.cnf', - '/etc/mysql/my.cnf', - '@sysconfdir@/my.cnf', + ('@sysconfdir@/my.cnf', + '@sysconfdir@/mysql/my.cnf', + '@prefix@/my.cnf', ($ENV{MYSQL_HOME} ? "$ENV{MYSQL_HOME}/my.cnf" : undef), $opt{'extra-file'}, ($ENV{HOME} ? "$ENV{HOME}/.my.cnf" : undef)); @@ -636,7 +636,7 @@ sub example { print <<EOF; # This is an example of a my.cnf file for $my_progname. -# Usually this file is located in home dir ~/.my.cnf or /etc/my.cnf +# Usually this file is located in home dir ~/.my.cnf or @sysconfdir@/my.cnf # # SOME IMPORTANT NOTES FOLLOW: # @@ -709,7 +709,7 @@ sub example # (as per Linux/Unix standard). You may even replace the # /etc/init.d/mysql.server script with it. # -# Before using, you must create a my.cnf file either in @sysconfdir@/my.cnf +# Before using, you must create a my.cnf file either in @prefix@/my.cnf # or /root/.my.cnf and add the [mysqld_multi] and [mysqld#] groups. # # The script can be found from support-files/mysqld_multi.server.sh diff --git a/scripts/mysqld_safe.sh b/scripts/mysqld_safe.sh index 4e7b098c8d8..f91d936c315 100644 --- a/scripts/mysqld_safe.sh +++ b/scripts/mysqld_safe.sh @@ -273,16 +273,6 @@ wsrep_recover_position() { } parse_arguments() { - # We only need to pass arguments through to the server if we don't - # handle them here. So, we collect unrecognized options (passed on - # the command line) into the args variable. - pick_args= - if test "$1" = PICK-ARGS-FROM-ARGV - then - pick_args=1 - shift - fi - for arg do val=`echo "$arg" | sed -e "s;--[^=]*=;;"` case "$arg" in @@ -340,11 +330,10 @@ parse_arguments() { --help) usage ;; *) - if test -n "$pick_args" - then - append_arg_to_args "$arg" - fi - ;; + case "$unrecognized_handling" in + collect) append_arg_to_args "$arg" ;; + complain) log_error "unknown option '$arg'" ;; + esac esac done } @@ -601,8 +590,16 @@ then SET_USER=0 fi +# If arguments come from [mysqld_safe] section of my.cnf +# we complain about unrecognized options +unrecognized_handling=complain parse_arguments `$print_defaults $defaults --loose-verbose mysqld_safe safe_mysqld mariadb_safe` -parse_arguments PICK-ARGS-FROM-ARGV "$@" + +# We only need to pass arguments through to the server if we don't +# handle them here. So, we collect unrecognized options (passed on +# the command line) into the args variable. +unrecognized_handling=collect +parse_arguments "$@" # diff --git a/scripts/mysqlhotcopy.sh b/scripts/mysqlhotcopy.sh index 1ef743725e5..b6ff7e1b28d 100644 --- a/scripts/mysqlhotcopy.sh +++ b/scripts/mysqlhotcopy.sh @@ -56,6 +56,9 @@ WARNING: THIS PROGRAM IS STILL IN BETA. Comments/patches welcome. # Documentation continued at end of file +# fix CORE::GLOBAL::die to return a predictable exit code +BEGIN { *CORE::GLOBAL::die= sub { warn @_; exit 1; }; } + my $VERSION = "1.23"; my $opt_tmpdir = $ENV{TMPDIR} || "/tmp"; diff --git a/scripts/mytop.sh b/scripts/mytop.sh index fe7765988fb..17a87e9efd2 100644 --- a/scripts/mytop.sh +++ b/scripts/mytop.sh @@ -70,7 +70,6 @@ sub GetShowStatus(); sub cmd_s; sub cmd_S; sub cmd_q; -sub FindProg($); ## Default Config Values @@ -1366,9 +1365,9 @@ sub GetInnoDBStatus() { if (not $config{pager}) { - if (not $config{pager} = FindProg('less')) + if (not $config{pager} = my_which('less')) { - $config{pager} = FindProg('more'); + $config{pager} = my_which('more'); } } @@ -1467,9 +1466,9 @@ sub GetShowVariables() { if (not $config{pager}) { - if (not $config{pager} = FindProg('less')) + if (not $config{pager} = my_which('less')) { - $config{pager} = FindProg('more'); + $config{pager} = my_which('more'); } } @@ -1825,25 +1824,6 @@ sub Execute($) return $sth; } -sub FindProg($) -{ - my $prog = shift; - my $found = undef; - my @search_dirs = ("/bin", "/usr/bin", "/usr/sbin", - "/usr/local/bin", "/usr/local/sbin"); - - for (@search_dirs) - { - my $loc = "$_/$prog"; - if (-e $loc) - { - $found = $loc; - last; - } - } - return $found; -} - #### #### my_which is used, because we can't assume that every system has the #### which -command. my_which can take only one argument at a time. diff --git a/sql-bench/graph-compare-results.sh b/sql-bench/graph-compare-results.sh index ddc9080acd6..ddc9080acd6 100644..100755 --- a/sql-bench/graph-compare-results.sh +++ b/sql-bench/graph-compare-results.sh diff --git a/sql-common/client.c b/sql-common/client.c index 72760560903..d5c03f6761d 100644 --- a/sql-common/client.c +++ b/sql-common/client.c @@ -764,7 +764,7 @@ cli_safe_read(MYSQL *mysql) restart: if (net->vio != 0) - len=my_net_read(net); + len= my_net_read_packet(net, 0); if (len == packet_error || len == 0) { diff --git a/sql/CMakeLists.txt b/sql/CMakeLists.txt index 32499662a7c..1c41ff481aa 100644 --- a/sql/CMakeLists.txt +++ b/sql/CMakeLists.txt @@ -51,7 +51,8 @@ ${CMAKE_CURRENT_BINARY_DIR}/lex_hash.h SET_SOURCE_FILES_PROPERTIES(${GEN_SOURCES} PROPERTIES GENERATED 1) -ADD_DEFINITIONS(-DMYSQL_SERVER -DHAVE_EVENT_SCHEDULER -DHAVE_POOL_OF_THREADS) +ADD_DEFINITIONS(-DMYSQL_SERVER -DHAVE_EVENT_SCHEDULER) + IF(SSL_DEFINES) ADD_DEFINITIONS(${SSL_DEFINES}) ENDIF() @@ -119,10 +120,16 @@ SET (SQL_SOURCE ${MYSYS_LIBWRAP_SOURCE} ) -IF(WIN32) - SET(SQL_SOURCE ${SQL_SOURCE} threadpool_win.cc) -ELSE() - SET(SQL_SOURCE ${SQL_SOURCE} threadpool_unix.cc) +IF (CMAKE_SYSTEM_NAME MATCHES "Linux" OR + CMAKE_SYSTEM_NAME MATCHES "Windows" OR + CMAKE_SYSTEM_NAME MATCHES "SunOS" OR + HAVE_KQUEUE) + ADD_DEFINITIONS(-DHAVE_POOL_OF_THREADS) + IF(WIN32) + SET(SQL_SOURCE ${SQL_SOURCE} threadpool_win.cc) + ELSE() + SET(SQL_SOURCE ${SQL_SOURCE} threadpool_unix.cc) + ENDIF() ENDIF() MYSQL_ADD_PLUGIN(partition ha_partition.cc STORAGE_ENGINE DEFAULT STATIC_ONLY @@ -241,7 +248,9 @@ RUN_BISON( ) # Gen_lex_hash -ADD_EXECUTABLE(gen_lex_hash gen_lex_hash.cc) +IF(NOT CMAKE_CROSSCOMPILING) + ADD_EXECUTABLE(gen_lex_hash gen_lex_hash.cc) +ENDIF() ADD_CUSTOM_COMMAND( OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/lex_hash.h diff --git a/sql/create_options.cc b/sql/create_options.cc index efae87e7533..5800003ed49 100644 --- a/sql/create_options.cc +++ b/sql/create_options.cc @@ -775,3 +775,20 @@ engine_option_value *merge_engine_table_options(engine_option_value *first, &first, &end); DBUG_RETURN(first); } + +bool is_engine_option_known(engine_option_value *opt, + ha_create_table_option *rules) +{ + if (!rules) + return false; + + for (; rules->name; rules++) + { + if (!my_strnncoll(system_charset_info, + (uchar*)rules->name, rules->name_length, + (uchar*)opt->name.str, opt->name.length)) + return true; + } + return false; +} + diff --git a/sql/create_options.h b/sql/create_options.h index d6b48822c49..eb21f291ff4 100644 --- a/sql/create_options.h +++ b/sql/create_options.h @@ -99,4 +99,6 @@ uchar *engine_table_options_frm_image(uchar *buff, bool engine_options_differ(void *old_struct, void *new_struct, ha_create_table_option *rules); +bool is_engine_option_known(engine_option_value *opt, + ha_create_table_option *rules); #endif diff --git a/sql/event_parse_data.cc b/sql/event_parse_data.cc index 44d89887c3b..4e2d5d3f5bf 100644 --- a/sql/event_parse_data.cc +++ b/sql/event_parse_data.cc @@ -564,7 +564,7 @@ Event_parse_data::init_definer(THD *thd) void Event_parse_data::check_originator_id(THD *thd) { /* Disable replicated events on slave. */ - if (IF_WSREP(WSREP(thd) && thd->wsrep_applier, 0) || + if ((WSREP(thd) && IF_WSREP(thd->wsrep_applier, 0)) || (thd->system_thread == SYSTEM_THREAD_SLAVE_SQL) || (thd->system_thread == SYSTEM_THREAD_SLAVE_IO)) { diff --git a/sql/field.h b/sql/field.h index fed6084fda2..f1679379fe8 100644 --- a/sql/field.h +++ b/sql/field.h @@ -183,6 +183,29 @@ inline bool is_temporal_type(enum_field_types type) return mysql_type_to_time_type(type) != MYSQL_TIMESTAMP_ERROR; } + +/** + Tests if field type is temporal and has time part, + i.e. represents TIME, DATETIME or TIMESTAMP types in SQL. + + @param type Field type, as returned by field->type(). + @retval true If field type is temporal type with time part. + @retval false If field type is not temporal type with time part. +*/ +inline bool is_temporal_type_with_time(enum_field_types type) +{ + switch (type) + { + case MYSQL_TYPE_TIME: + case MYSQL_TYPE_DATETIME: + case MYSQL_TYPE_TIMESTAMP: + return true; + default: + return false; + } +} + + /* Virtual_column_info is the class to contain additional characteristics that is specific for a virtual/computed diff --git a/sql/filesort.cc b/sql/filesort.cc index 5ca6be2a2f4..23cfd6a1817 100644 --- a/sql/filesort.cc +++ b/sql/filesort.cc @@ -225,6 +225,8 @@ ha_rows filesort(THD *thd, TABLE *table, SORT_FIELD *sortorder, uint s_length, table, num_rows, memory_available)) { DBUG_PRINT("info", ("filesort PQ is applicable")); + thd->query_plan_flags|= QPLAN_FILESORT_PRIORITY_QUEUE; + status_var_increment(thd->status_var.filesort_pq_sorts_); const size_t compare_length= param.sort_length; if (pq.init(param.max_rows, true, // max_at_top @@ -719,6 +721,9 @@ static ha_rows find_all_keys(Sort_param *param, SQL_SELECT *select, /* Temporary set for register_used_fields and register_field_in_read_map */ sort_form->read_set= &sort_form->tmp_set; register_used_fields(param); + if (quick_select) + select->quick->add_used_key_part_to_set(sort_form->read_set); + Item *sort_cond= !select ? 0 : !select->pre_idx_push_select_cond ? select->cond : select->pre_idx_push_select_cond; diff --git a/sql/ha_partition.cc b/sql/ha_partition.cc index da7f3aeff89..a63ec65c020 100644 --- a/sql/ha_partition.cc +++ b/sql/ha_partition.cc @@ -8586,8 +8586,7 @@ void ha_partition::get_auto_increment(ulonglong offset, ulonglong increment, ulonglong first_value_part, max_first_value; handler **file= m_file; first_value_part= max_first_value= *first_value; - /* Must lock and find highest value among all partitions. */ - lock_auto_increment(); + /* Must find highest value among all partitions. */ do { /* Only nb_desired_values = 1 makes sense */ @@ -8598,7 +8597,6 @@ void ha_partition::get_auto_increment(ulonglong offset, ulonglong increment, *first_value= first_value_part; /* log that the error was between table/partition handler */ sql_print_error("Partition failed to reserve auto_increment value"); - unlock_auto_increment(); DBUG_VOID_RETURN; } DBUG_PRINT("info", ("first_value_part: %lu", (ulong) first_value_part)); @@ -8606,7 +8604,6 @@ void ha_partition::get_auto_increment(ulonglong offset, ulonglong increment, } while (*(++file)); *first_value= max_first_value; *nb_reserved_values= 1; - unlock_auto_increment(); } else { diff --git a/sql/handler.cc b/sql/handler.cc index a24f18f4863..2251b2498e2 100644 --- a/sql/handler.cc +++ b/sql/handler.cc @@ -609,7 +609,19 @@ int ha_initialize_handlerton(st_plugin_int *plugin) savepoint_alloc_size+= tmp; hton2plugin[hton->slot]=plugin; if (hton->prepare) + { total_ha_2pc++; + if (tc_log && tc_log != get_tc_log_implementation()) + { + total_ha_2pc--; + hton->prepare= 0; + push_warning_printf(current_thd, Sql_condition::WARN_LEVEL_WARN, + ER_UNKNOWN_ERROR, + "Cannot enable tc-log at run-time. " + "XA features of %s are disabled", + plugin->name.str); + } + } break; } /* fall through */ @@ -3231,15 +3243,10 @@ void handler::get_auto_increment(ulonglong offset, ulonglong increment, if (error) { if (error == HA_ERR_END_OF_FILE || error == HA_ERR_KEY_NOT_FOUND) - { - /* No entry found, start with 1. */ - nr= 1; - } + /* No entry found, that's fine */; else - { - DBUG_ASSERT(0); - nr= ULONGLONG_MAX; - } + print_error(error, MYF(0)); + nr= 1; } else nr= ((ulonglong) table->next_number_field-> @@ -4736,11 +4743,13 @@ int ha_init_key_cache(const char *name, KEY_CACHE *key_cache, void *unused uint division_limit= (uint)key_cache->param_division_limit; uint age_threshold= (uint)key_cache->param_age_threshold; uint partitions= (uint)key_cache->param_partitions; + uint changed_blocks_hash_size= (uint)key_cache->changed_blocks_hash_size; mysql_mutex_unlock(&LOCK_global_system_variables); DBUG_RETURN(!init_key_cache(key_cache, tmp_block_size, tmp_buff_size, division_limit, age_threshold, + changed_blocks_hash_size, partitions)); } DBUG_RETURN(0); @@ -4761,10 +4770,12 @@ int ha_resize_key_cache(KEY_CACHE *key_cache) long tmp_block_size= (long) key_cache->param_block_size; uint division_limit= (uint)key_cache->param_division_limit; uint age_threshold= (uint)key_cache->param_age_threshold; + uint changed_blocks_hash_size= (uint)key_cache->changed_blocks_hash_size; mysql_mutex_unlock(&LOCK_global_system_variables); DBUG_RETURN(!resize_key_cache(key_cache, tmp_block_size, tmp_buff_size, - division_limit, age_threshold)); + division_limit, age_threshold, + changed_blocks_hash_size)); } DBUG_RETURN(0); } @@ -4804,10 +4815,12 @@ int ha_repartition_key_cache(KEY_CACHE *key_cache) uint division_limit= (uint)key_cache->param_division_limit; uint age_threshold= (uint)key_cache->param_age_threshold; uint partitions= (uint)key_cache->param_partitions; + uint changed_blocks_hash_size= (uint)key_cache->changed_blocks_hash_size; mysql_mutex_unlock(&LOCK_global_system_variables); DBUG_RETURN(!repartition_key_cache(key_cache, tmp_block_size, tmp_buff_size, division_limit, age_threshold, + changed_blocks_hash_size, partitions)); } DBUG_RETURN(0); diff --git a/sql/item.cc b/sql/item.cc index 0fefa54d849..8fc87149bc9 100644 --- a/sql/item.cc +++ b/sql/item.cc @@ -686,7 +686,7 @@ void Item::cleanup() { DBUG_ENTER("Item::cleanup"); DBUG_PRINT("enter", ("this: %p", this)); - fixed=0; + fixed= 0; marker= 0; join_tab_idx= MAX_TABLES; if (orig_name) @@ -1073,10 +1073,15 @@ void Item::set_name(const char *str, uint length, CHARSET_INFO *cs) name_length= 0; return; } - if (cs->ctype) - { - const char *str_start= str; + const char *str_start= str; + if (!cs->ctype || cs->mbminlen > 1) + { + str+= cs->cset->scan(cs, str, str + length, MY_SEQ_SPACES); + length-= str - str_start; + } + else + { /* This will probably need a better implementation in the future: a function in CHARSET_INFO structure. @@ -1086,21 +1091,21 @@ void Item::set_name(const char *str, uint length, CHARSET_INFO *cs) length--; str++; } - if (str != str_start && !is_autogenerated_name) - { - char buff[SAFE_NAME_LEN]; - strmake(buff, str_start, - MY_MIN(sizeof(buff)-1, length + (int) (str-str_start))); - - if (length == 0) - push_warning_printf(current_thd, Sql_condition::WARN_LEVEL_WARN, - ER_NAME_BECOMES_EMPTY, ER(ER_NAME_BECOMES_EMPTY), - buff); - else - push_warning_printf(current_thd, Sql_condition::WARN_LEVEL_WARN, - ER_REMOVED_SPACES, ER(ER_REMOVED_SPACES), - buff); - } + } + if (str != str_start && !is_autogenerated_name) + { + char buff[SAFE_NAME_LEN]; + strmake(buff, str_start, + MY_MIN(sizeof(buff)-1, length + (int) (str-str_start))); + + if (length == 0) + push_warning_printf(current_thd, Sql_condition::WARN_LEVEL_WARN, + ER_NAME_BECOMES_EMPTY, ER(ER_NAME_BECOMES_EMPTY), + buff); + else + push_warning_printf(current_thd, Sql_condition::WARN_LEVEL_WARN, + ER_REMOVED_SPACES, ER(ER_REMOVED_SPACES), + buff); } if (!my_charset_same(cs, system_charset_info)) { @@ -1166,6 +1171,8 @@ bool Item::eq(const Item *item, bool binary_cmp) const Item *Item::safe_charset_converter(CHARSET_INFO *tocs) { + if (!needs_charset_converter(tocs)) + return this; Item_func_conv_charset *conv= new Item_func_conv_charset(this, tocs, 1); return conv->safe ? conv : NULL; } @@ -1192,123 +1199,55 @@ Item *Item_num::safe_charset_converter(CHARSET_INFO *tocs) if (!(tocs->state & MY_CS_NONASCII)) return this; - Item_string *conv; - uint conv_errors; - char buf[64], buf2[64]; - String tmp(buf, sizeof(buf), &my_charset_bin); - String cstr(buf2, sizeof(buf2), &my_charset_bin); - String *ostr= val_str(&tmp); - char *ptr; - cstr.copy(ostr->ptr(), ostr->length(), ostr->charset(), tocs, &conv_errors); - if (conv_errors || !(conv= new Item_string(cstr.ptr(), cstr.length(), - cstr.charset(), - collation.derivation))) - { - /* - Safe conversion is not possible (or EOM). - We could not convert a string into the requested character set - without data loss. The target charset does not cover all the - characters from the string. Operation cannot be done correctly. - */ - return NULL; - } - if (!(ptr= current_thd->strmake(cstr.ptr(), cstr.length()))) - return NULL; - conv->str_value.set(ptr, cstr.length(), cstr.charset()); - /* Ensure that no one is going to change the result string */ - conv->str_value.mark_as_const(); - conv->fix_char_length(max_char_length()); - return conv; -} - - -Item *Item_static_float_func::safe_charset_converter(CHARSET_INFO *tocs) -{ - Item_string *conv; - char buf[64]; - String *s, tmp(buf, sizeof(buf), &my_charset_bin); - s= val_str(&tmp); - if ((conv= new Item_static_string_func(func_name, s->ptr(), s->length(), - s->charset()))) - { - conv->str_value.copy(); - conv->str_value.mark_as_const(); - } + Item *conv; + if ((conv= const_charset_converter(tocs, true))) + conv->fix_char_length(max_char_length()); return conv; } -Item *Item_string::safe_charset_converter(CHARSET_INFO *tocs) -{ - return charset_converter(tocs, true); -} - - /** - Convert a string item into the requested character set. + Create character set converter for constant items + using Item_null, Item_string or Item_static_string_func. @param tocs Character set to to convert the string to. @param lossless Whether data loss is acceptable. + @param func_name Function name, or NULL. - @return A new item representing the converted string. + @return this, if conversion is not needed, + NULL, if safe conversion is not possible, or + a new item representing the converted constant. */ -Item *Item_string::charset_converter(CHARSET_INFO *tocs, bool lossless) +Item *Item::const_charset_converter(CHARSET_INFO *tocs, + bool lossless, + const char *func_name) { - Item_string *conv; - uint conv_errors; - char *ptr; - String tmp, cstr, *ostr= val_str(&tmp); - cstr.copy(ostr->ptr(), ostr->length(), ostr->charset(), tocs, &conv_errors); - conv_errors= lossless && conv_errors; - if (conv_errors || !(conv= new Item_string(cstr.ptr(), cstr.length(), - cstr.charset(), - collation.derivation))) - { - /* - Safe conversion is not possible (or EOM). - We could not convert a string into the requested character set - without data loss. The target charset does not cover all the - characters from the string. Operation cannot be done correctly. - */ - return NULL; - } - if (!(ptr= current_thd->strmake(cstr.ptr(), cstr.length()))) - return NULL; - conv->str_value.set(ptr, cstr.length(), cstr.charset()); - /* Ensure that no one is going to change the result string */ - conv->str_value.mark_as_const(); - return conv; -} + DBUG_ASSERT(const_item()); + DBUG_ASSERT(fixed); + StringBuffer<64>tmp; + String *s= val_str(&tmp); + if (!s) + return new Item_null((char *) func_name, tocs); -Item *Item_param::safe_charset_converter(CHARSET_INFO *tocs) -{ - if (const_item()) + if (!needs_charset_converter(s->length(), tocs)) { - uint cnv_errors; - String *ostr= val_str(&cnvstr); - cnvitem->str_value.copy(ostr->ptr(), ostr->length(), - ostr->charset(), tocs, &cnv_errors); - if (cnv_errors) - return NULL; - cnvitem->str_value.mark_as_const(); - cnvitem->max_length= cnvitem->str_value.numchars() * tocs->mbmaxlen; - return cnvitem; + if (collation.collation == &my_charset_bin && tocs != &my_charset_bin && + !this->check_well_formed_result(s, true)) + return NULL; + return this; } - return Item::safe_charset_converter(tocs); -} - -Item *Item_static_string_func::safe_charset_converter(CHARSET_INFO *tocs) -{ - Item_string *conv; uint conv_errors; - String tmp, cstr, *ostr= val_str(&tmp); - cstr.copy(ostr->ptr(), ostr->length(), ostr->charset(), tocs, &conv_errors); - if (conv_errors || - !(conv= new Item_static_string_func(func_name, - cstr.ptr(), cstr.length(), - cstr.charset(), - collation.derivation))) + Item_string *conv= func_name ? + new Item_static_string_func(func_name, + s, tocs, &conv_errors, + collation.derivation, + collation.repertoire) : + new Item_string(s, tocs, &conv_errors, + collation.derivation, + collation.repertoire); + + if (!conv || (conv_errors && lossless)) { /* Safe conversion is not possible (or EOM). @@ -1318,23 +1257,28 @@ Item *Item_static_string_func::safe_charset_converter(CHARSET_INFO *tocs) */ return NULL; } - conv->str_value.copy(); - /* Ensure that no one is going to change the result string */ - conv->str_value.mark_as_const(); + if (s->charset() == &my_charset_bin && tocs != &my_charset_bin && + !conv->check_well_formed_result(true)) + return NULL; return conv; } -bool Item_string::eq(const Item *item, bool binary_cmp) const +Item *Item_param::safe_charset_converter(CHARSET_INFO *tocs) { - if (type() == item->type() && item->basic_const_item()) - { - if (binary_cmp) - return !stringcmp(&str_value, &item->str_value); - return (collation.collation == item->collation.collation && - !sortcmp(&str_value, &item->str_value, collation.collation)); - } - return 0; + /* + Return "this" if in prepare. result_type may change at execition time, + to it's possible that the converter will not be needed at all: + + PREPARE stmt FROM 'SELECT * FROM t1 WHERE field = ?'; + SET @@arg= 1; + EXECUTE stms USING @arg; + + In the above example result_type is STRING_RESULT at prepare time, + and INT_RESULT at execution time. + */ + return !const_item() || state == NULL_VALUE ? + this : const_charset_converter(tocs, true); } @@ -2123,7 +2067,7 @@ bool agg_item_collations(DTCollation &c, const char *fname, bool unknown_cs= 0; c.set(av[0]->collation); - for (i= 1, arg= &av[item_sep]; i < count; i++, arg++) + for (i= 1, arg= &av[item_sep]; i < count; i++, arg+= item_sep) { if (c.aggregate((*arg)->collation, flags)) { @@ -2202,33 +2146,10 @@ bool agg_item_set_converter(DTCollation &coll, const char *fname, for (i= 0, arg= args; i < nargs; i++, arg+= item_sep) { - Item* conv; - uint32 dummy_offset; - if (!String::needs_conversion(1, (*arg)->collation.collation, - coll.collation, - &dummy_offset)) - continue; - - /* - No needs to add converter if an "arg" is NUMERIC or DATETIME - value (which is pure ASCII) and at the same time target DTCollation - is ASCII-compatible. For example, no needs to rewrite: - SELECT * FROM t1 WHERE datetime_field = '2010-01-01'; - to - SELECT * FROM t1 WHERE CONVERT(datetime_field USING cs) = '2010-01-01'; - - TODO: avoid conversion of any values with - repertoire ASCII and 7bit-ASCII-compatible, - not only numeric/datetime origin. - */ - if ((*arg)->collation.derivation == DERIVATION_NUMERIC && - (*arg)->collation.repertoire == MY_REPERTOIRE_ASCII && - !((*arg)->collation.collation->state & MY_CS_NONASCII) && - !(coll.collation->state & MY_CS_NONASCII)) + Item* conv= (*arg)->safe_charset_converter(coll.collation); + if (conv == *arg) continue; - - if (!(conv= (*arg)->safe_charset_converter(coll.collation)) && - ((*arg)->collation.repertoire == MY_REPERTOIRE_ASCII)) + if (!conv && ((*arg)->collation.repertoire == MY_REPERTOIRE_ASCII)) conv= new Item_func_conv_charset(*arg, coll.collation, 1); if (!conv) @@ -3014,7 +2935,7 @@ String *Item_float::val_str(String *str) { // following assert is redundant, because fixed=1 assigned in constructor DBUG_ASSERT(fixed == 1); - str->set_real(value,decimals,&my_charset_bin); + str->set_real(value, decimals, &my_charset_numeric); return str; } @@ -3173,10 +3094,6 @@ my_decimal *Item_string::val_decimal(my_decimal *decimal_value) } -bool Item_null::eq(const Item *item, bool binary_cmp) const -{ return item->type() == type(); } - - double Item_null::val_real() { // following assert is redundant, because fixed=1 assigned in constructor @@ -3245,8 +3162,6 @@ Item_param::Item_param(uint pos_in_query_arg) : value is set. */ maybe_null= 1; - cnvitem= new Item_string("", 0, &my_charset_bin, DERIVATION_COERCIBLE); - cnvstr.set(cnvbuf, sizeof(cnvbuf), &my_charset_bin); } @@ -3806,18 +3721,14 @@ bool Item_param::convert_str_value(THD *thd) str_value.set_charset(value.cs_info.final_character_set_of_str_value); /* Here str_value is guaranteed to be in final_character_set_of_str_value */ - max_length= str_value.numchars() * str_value.charset()->mbmaxlen; - - /* For the strings converted to numeric form within some functions */ - decimals= NOT_FIXED_DEC; /* str_value_ptr is returned from val_str(). It must be not alloced to prevent it's modification by val_str() invoker. */ str_value_ptr.set(str_value.ptr(), str_value.length(), str_value.charset()); - /* Synchronize item charset with value charset */ - collation.set(str_value.charset(), DERIVATION_COERCIBLE); + /* Synchronize item charset and length with value charset */ + fix_charset_and_length_from_str_value(DERIVATION_COERCIBLE); } return rc; } @@ -3847,7 +3758,8 @@ Item_param::clone_item() case STRING_VALUE: case LONG_DATA_VALUE: return new Item_string(name, str_value.c_ptr_quick(), str_value.length(), - str_value.charset()); + str_value.charset(), + collation.derivation, collation.repertoire); case TIME_VALUE: break; case NO_VALUE: @@ -3859,30 +3771,21 @@ Item_param::clone_item() bool -Item_param::eq(const Item *arg, bool binary_cmp) const +Item_param::eq(const Item *item, bool binary_cmp) const { - Item *item; - if (!basic_const_item() || !arg->basic_const_item() || arg->type() != type()) + if (!basic_const_item()) return FALSE; - /* - We need to cast off const to call val_int(). This should be OK for - a basic constant. - */ - item= (Item*) arg; switch (state) { case NULL_VALUE: - return TRUE; + return null_eq(item); case INT_VALUE: - return value.integer == item->val_int() && - unsigned_flag == item->unsigned_flag; + return int_eq(value.integer, item); case REAL_VALUE: - return value.real == item->val_real(); + return real_eq(value.real, item); case STRING_VALUE: case LONG_DATA_VALUE: - if (binary_cmp) - return !stringcmp(&str_value, &item->str_value); - return !sortcmp(&str_value, &item->str_value, collation.collation); + return str_eq(&str_value, item, binary_cmp); default: break; } @@ -5380,13 +5283,6 @@ bool Item_field::vcol_in_partition_func_processor(uchar *int_arg) } -Item *Item_field::safe_charset_converter(CHARSET_INFO *tocs) -{ - no_const_subst= 1; - return Item::safe_charset_converter(tocs); -} - - void Item_field::cleanup() { DBUG_ENTER("Item_field::cleanup"); @@ -5692,10 +5588,7 @@ String *Item::check_well_formed_result(String *str, bool send_error) { /* Check whether we got a well-formed string */ CHARSET_INFO *cs= str->charset(); - int well_formed_error; - uint wlen= cs->cset->well_formed_len(cs, - str->ptr(), str->ptr() + str->length(), - str->length(), &well_formed_error); + uint wlen= str->well_formed_length(); if (wlen < str->length()) { THD *thd= current_thd; @@ -6183,24 +6076,6 @@ int Item_decimal::save_in_field(Field *field, bool no_conversions) } -bool Item_int::eq(const Item *arg, bool binary_cmp) const -{ - /* No need to check for null value as basic constant can't be NULL */ - if (arg->basic_const_item() && arg->type() == type()) - { - /* - We need to cast off const to call val_int(). This should be OK for - a basic constant. - */ - Item *item= (Item*) arg; - return (item->val_int() == value && - ((longlong) value >= 0 || - (item->unsigned_flag == unsigned_flag))); - } - return FALSE; -} - - Item *Item_int_with_ref::clone_item() { DBUG_ASSERT(ref->const_item()); @@ -6318,27 +6193,6 @@ void Item_float::print(String *str, enum_query_type query_type) } -/* - hex item - In string context this is a binary string. - In number context this is a longlong value. -*/ - -bool Item_float::eq(const Item *arg, bool binary_cmp) const -{ - if (arg->basic_const_item() && arg->type() == type()) - { - /* - We need to cast off const to call val_int(). This should be OK for - a basic constant. - */ - Item *item= (Item*) arg; - return item->val_real() == value; - } - return FALSE; -} - - inline uint char_val(char X) { return (uint) (X >= '0' && X <= '9' ? X-'0' : @@ -6394,8 +6248,6 @@ int Item_hex_hybrid::save_in_field(Field *field, bool no_conversions) ulonglong nr; uint32 length= str_value.length(); - if (!length) - return 1; if (length > 8) { @@ -6435,32 +6287,6 @@ void Item_hex_string::print(String *str, enum_query_type query_type) } -bool Item_hex_constant::eq(const Item *arg, bool binary_cmp) const -{ - if (arg->basic_const_item() && arg->type() == type() && - arg->cast_to_int_type() == cast_to_int_type()) - { - if (binary_cmp) - return !stringcmp(&str_value, &arg->str_value); - return !sortcmp(&str_value, &arg->str_value, collation.collation); - } - return FALSE; -} - - -Item *Item_hex_constant::safe_charset_converter(CHARSET_INFO *tocs) -{ - Item_string *conv; - String tmp, *str= val_str(&tmp); - - if (!(conv= new Item_string(str->ptr(), str->length(), tocs))) - return NULL; - conv->str_value.copy(); - conv->str_value.mark_as_const(); - return conv; -} - - /* bin item. In string context this is a binary string. diff --git a/sql/item.h b/sql/item.h index 59c30737a10..ff0c786ab94 100644 --- a/sql/item.h +++ b/sql/item.h @@ -679,11 +679,20 @@ public: /* Reuse size, only used by SP local variable assignment, otherwize 0 */ uint rsize; +protected: /* str_values's main purpose is to be used to cache the value in save_in_field */ String str_value; + +public: + /* + Cache val_str() into the own buffer, e.g. to evaluate constant + expressions with subqueries in the ORDER/GROUP clauses. + */ + String *val_str() { return val_str(&str_value); } + char * name; /* Name from select */ /* Original item name (if it was renamed)*/ char * orig_name; @@ -1099,9 +1108,47 @@ public: virtual cond_result eq_cmp_result() const { return COND_OK; } inline uint float_length(uint decimals_par) const { return decimals != NOT_FIXED_DEC ? (DBL_DIG+2+decimals_par) : DBL_DIG+8;} + /* Returns total number of decimal digits */ virtual uint decimal_precision() const; + /* Returns the number of integer part digits only */ inline int decimal_int_part() const { return my_decimal_int_part(decimal_precision(), decimals); } + /* + Returns the number of fractional digits only. + NOT_FIXED_DEC is replaced to the maximum possible number + of fractional digits, taking into account the data type. + */ + uint decimal_scale() const + { + return decimals < NOT_FIXED_DEC ? decimals : + is_temporal_type_with_time(field_type()) ? + TIME_SECOND_PART_DIGITS : + MY_MIN(max_length, DECIMAL_MAX_SCALE); + } + /* + Returns how many digits a divisor adds into a division result. + This is important when the integer part of the divisor can be 0. + In this example: + SELECT 1 / 0.000001; -> 1000000.0000 + the divisor adds 5 digits into the result precision. + + Currently this method only replaces NOT_FIXED_DEC to + TIME_SECOND_PART_DIGITS for temporal data types. + This method can be made virtual, to create more efficient (smaller) + data types for division results. + For example, in + SELECT 1/1.000001; + the divisor could provide no additional precision into the result, + so could any other items that are know to return a result + with non-zero integer part. + */ + uint divisor_precision_increment() const + { + return decimals < NOT_FIXED_DEC ? decimals : + is_temporal_type_with_time(field_type()) ? + TIME_SECOND_PART_DIGITS : + decimals; + } /** TIME or DATETIME precision of the item: 0..6 */ @@ -1258,7 +1305,6 @@ public: virtual bool intro_version(uchar *int_arg) { return 0; } virtual bool remove_dependence_processor(uchar * arg) { return 0; } - virtual bool remove_fixed(uchar * arg) { fixed= 0; return 0; } virtual bool cleanup_processor(uchar *arg); virtual bool collect_item_field_processor(uchar * arg) { return 0; } virtual bool add_field_to_set_processor(uchar * arg) { return 0; } @@ -1490,6 +1536,48 @@ public: virtual Item *expr_cache_insert_transformer(uchar *thd_arg) { return this; } virtual bool expr_cache_is_needed(THD *) { return FALSE; } virtual Item *safe_charset_converter(CHARSET_INFO *tocs); + bool needs_charset_converter(uint32 length, CHARSET_INFO *tocs) + { + /* + This will return "true" if conversion happens: + - between two non-binary different character sets + - from "binary" to "unsafe" character set + (those that can have non-well-formed string) + - from "binary" to UCS2-alike character set with mbminlen>1, + when prefix left-padding is needed for an incomplete character: + binary 0xFF -> ucs2 0x00FF) + */ + if (!String::needs_conversion_on_storage(length, + collation.collation, tocs)) + return false; + /* + No needs to add converter if an "arg" is NUMERIC or DATETIME + value (which is pure ASCII) and at the same time target DTCollation + is ASCII-compatible. For example, no needs to rewrite: + SELECT * FROM t1 WHERE datetime_field = '2010-01-01'; + to + SELECT * FROM t1 WHERE CONVERT(datetime_field USING cs) = '2010-01-01'; + + TODO: avoid conversion of any values with + repertoire ASCII and 7bit-ASCII-compatible, + not only numeric/datetime origin. + */ + if (collation.derivation == DERIVATION_NUMERIC && + collation.repertoire == MY_REPERTOIRE_ASCII && + !(collation.collation->state & MY_CS_NONASCII) && + !(tocs->state & MY_CS_NONASCII)) + return false; + return true; + } + bool needs_charset_converter(CHARSET_INFO *tocs) + { + // Pass 1 as length to force conversion if tocs->mbminlen>1. + return needs_charset_converter(1, tocs); + } + Item *const_charset_converter(CHARSET_INFO *tocs, bool lossless, + const char *func_name); + Item *const_charset_converter(CHARSET_INFO *tocs, bool lossless) + { return const_charset_converter(tocs, lossless, NULL); } void delete_self() { cleanup(); @@ -1649,12 +1737,102 @@ public: }; class sp_head; +class Item_string; -class Item_basic_constant :public Item + +/** + A common class for Item_basic_constant and Item_param +*/ +class Item_basic_value :public Item +{ + bool is_basic_value(const Item *item, Type type_arg) const + { + return item->basic_const_item() && item->type() == type_arg; + } + bool is_basic_value(Type type_arg) const + { + return basic_const_item() && type() == type_arg; + } + bool str_eq(const String *value, + const String *other, CHARSET_INFO *cs, bool binary_cmp) const + { + return binary_cmp ? + value->bin_eq(other) : + collation.collation == cs && value->eq(other, collation.collation); + } + +protected: + // Value metadata, e.g. to make string processing easier + class Metadata: private MY_STRING_METADATA + { + public: + Metadata(const String *str) + { + my_string_metadata_get(this, str->charset(), str->ptr(), str->length()); + } + Metadata(const String *str, uint repertoire) + { + MY_STRING_METADATA::repertoire= repertoire; + MY_STRING_METADATA::char_length= str->numchars(); + } + uint repertoire() const { return MY_STRING_METADATA::repertoire; } + size_t char_length() const { return MY_STRING_METADATA::char_length; } + }; + void fix_charset_and_length_from_str_value(Derivation dv, Metadata metadata) + { + /* + We have to have a different max_length than 'length' here to + ensure that we get the right length if we do use the item + to create a new table. In this case max_length must be the maximum + number of chars for a string of this type because we in Create_field:: + divide the max_length with mbmaxlen). + */ + collation.set(str_value.charset(), dv, metadata.repertoire()); + fix_char_length(metadata.char_length()); + decimals= NOT_FIXED_DEC; + } + void fix_charset_and_length_from_str_value(Derivation dv) + { + fix_charset_and_length_from_str_value(dv, Metadata(&str_value)); + } + Item_basic_value(): Item() {} + /* + In the xxx_eq() methods below we need to cast off "const" to + call val_xxx(). This is OK for Item_basic_constant and Item_param. + */ + bool null_eq(const Item *item) const + { + DBUG_ASSERT(is_basic_value(NULL_ITEM)); + return item->type() == NULL_ITEM; + } + bool str_eq(const String *value, const Item *item, bool binary_cmp) const + { + DBUG_ASSERT(is_basic_value(STRING_ITEM)); + return is_basic_value(item, STRING_ITEM) && + str_eq(value, ((Item_basic_value*)item)->val_str(NULL), + item->collation.collation, binary_cmp); + } + bool real_eq(double value, const Item *item) const + { + DBUG_ASSERT(is_basic_value(REAL_ITEM)); + return is_basic_value(item, REAL_ITEM) && + value == ((Item_basic_value*)item)->val_real(); + } + bool int_eq(longlong value, const Item *item) const + { + DBUG_ASSERT(is_basic_value(INT_ITEM)); + return is_basic_value(item, INT_ITEM) && + value == ((Item_basic_value*)item)->val_int() && + (value >= 0 || item->unsigned_flag == unsigned_flag); + } +}; + + +class Item_basic_constant :public Item_basic_value { table_map used_table_map; public: - Item_basic_constant(): Item(), used_table_map(0) {}; + Item_basic_constant(): Item_basic_value(), used_table_map(0) {}; void set_used_tables(table_map map) { used_table_map= map; } table_map used_tables() const { return used_table_map; } /* to prevent drop fixed flag (no need parent cleanup call) */ @@ -2195,7 +2373,6 @@ public: Item *replace_equal_field(uchar *arg); inline uint32 max_disp_length() { return field->max_display_length(); } Item_field *field_for_view_update() { return this; } - Item *safe_charset_converter(CHARSET_INFO *tocs); int fix_outer_field(THD *thd, Field **field, Item **reference); virtual Item *update_value_transformer(uchar *select_arg); virtual void print(String *str, enum_query_type query_type); @@ -2219,16 +2396,16 @@ public: class Item_null :public Item_basic_constant { public: - Item_null(char *name_par=0) + Item_null(char *name_par=0, CHARSET_INFO *cs= &my_charset_bin) { maybe_null= null_value= TRUE; max_length= 0; name= name_par ? name_par : (char*) "NULL"; fixed= 1; - collation.set(&my_charset_bin, DERIVATION_IGNORABLE); + collation.set(cs, DERIVATION_IGNORABLE); } enum Type type() const { return NULL_ITEM; } - bool eq(const Item *item, bool binary_cmp) const; + bool eq(const Item *item, bool binary_cmp) const { return null_eq(item); } double val_real(); longlong val_int(); String *val_str(String *str); @@ -2271,14 +2448,10 @@ public: /* Item represents one placeholder ('?') of prepared statement */ -class Item_param :public Item, +class Item_param :public Item_basic_value, private Settable_routine_parameter, public Rewritable_query_parameter { - char cnvbuf[MAX_FIELD_WIDTH]; - String cnvstr; - Item *cnvitem; - public: enum enum_item_param_state { @@ -2457,7 +2630,8 @@ public: Item_num *neg() { value= -value; return this; } uint decimal_precision() const { return (uint) (max_length - MY_TEST(value < 0)); } - bool eq(const Item *, bool binary_cmp) const; + bool eq(const Item *item, bool binary_cmp) const + { return int_eq(value, item); } bool check_partition_func_processor(uchar *bool_arg) { return FALSE;} bool check_vcol_func_processor(uchar *arg) { return FALSE;} }; @@ -2578,7 +2752,8 @@ public: { return new Item_float(name, value, decimals, max_length); } Item_num *neg() { value= -value; return this; } virtual void print(String *str, enum_query_type query_type); - bool eq(const Item *, bool binary_cmp) const; + bool eq(const Item *item, bool binary_cmp) const + { return real_eq(value, item); } }; @@ -2596,70 +2771,98 @@ public: str->append(func_name); } - Item *safe_charset_converter(CHARSET_INFO *tocs); + Item *safe_charset_converter(CHARSET_INFO *tocs) + { + return const_charset_converter(tocs, true, func_name); + } }; class Item_string :public Item_basic_constant { -public: - Item_string(const char *str,uint length, - CHARSET_INFO *cs, Derivation dv= DERIVATION_COERCIBLE, - uint repertoire= MY_REPERTOIRE_UNICODE30) - : m_cs_specified(FALSE) + bool m_cs_specified; +protected: + /** + Set the value of m_cs_specified attribute. + + m_cs_specified attribute shows whether character-set-introducer was + explicitly specified in the original query for this text literal or + not. The attribute makes sense (is used) only for views. + + This operation is to be called from the parser during parsing an input + query. + */ + inline void set_cs_specified(bool cs_specified) { - str_value.set_or_copy_aligned(str, length, cs); - collation.set(cs, dv, repertoire); - /* - We have to have a different max_length than 'length' here to - ensure that we get the right length if we do use the item - to create a new table. In this case max_length must be the maximum - number of chars for a string of this type because we in Create_field:: - divide the max_length with mbmaxlen). - */ - max_length= str_value.numchars()*cs->mbmaxlen; - set_name(str, length, cs); - decimals=NOT_FIXED_DEC; + m_cs_specified= cs_specified; + } + void fix_from_value(Derivation dv, const Metadata metadata) + { + fix_charset_and_length_from_str_value(dv, metadata); // it is constant => can be used without fix_fields (and frequently used) fixed= 1; } + void fix_and_set_name_from_value(Derivation dv, const Metadata metadata) + { + fix_from_value(dv, metadata); + set_name(str_value.ptr(), str_value.length(), str_value.charset()); + } +protected: /* Just create an item and do not fill string representation */ Item_string(CHARSET_INFO *cs, Derivation dv= DERIVATION_COERCIBLE) : m_cs_specified(FALSE) { collation.set(cs, dv); max_length= 0; - set_name(NULL, 0, cs); + set_name(NULL, 0, system_charset_info); decimals= NOT_FIXED_DEC; fixed= 1; } - Item_string(const char *name_par, const char *str, uint length, - CHARSET_INFO *cs, Derivation dv= DERIVATION_COERCIBLE, - uint repertoire= MY_REPERTOIRE_UNICODE30) +public: + // Constructors with the item name set from its value + Item_string(const char *str, uint length, CHARSET_INFO *cs, + Derivation dv, uint repertoire) : m_cs_specified(FALSE) { str_value.set_or_copy_aligned(str, length, cs); - collation.set(cs, dv, repertoire); - max_length= str_value.numchars()*cs->mbmaxlen; - set_name(name_par, 0, cs); - decimals=NOT_FIXED_DEC; - // it is constant => can be used without fix_fields (and frequently used) - fixed= 1; + fix_and_set_name_from_value(dv, Metadata(&str_value, repertoire)); } - /* - This is used in stored procedures to avoid memory leaks and - does a deep copy of its argument. - */ - void set_str_with_copy(const char *str_arg, uint length_arg) + Item_string(const char *str, uint length, + CHARSET_INFO *cs, Derivation dv= DERIVATION_COERCIBLE) + : m_cs_specified(FALSE) { - str_value.copy(str_arg, length_arg, collation.collation); - max_length= str_value.numchars() * collation.collation->mbmaxlen; + str_value.set_or_copy_aligned(str, length, cs); + fix_and_set_name_from_value(dv, Metadata(&str_value)); } - void set_repertoire_from_value() + Item_string(const String *str, CHARSET_INFO *tocs, uint *conv_errors, + Derivation dv, uint repertoire) + :m_cs_specified(false) { - collation.repertoire= my_string_repertoire(str_value.charset(), - str_value.ptr(), - str_value.length()); + if (str_value.copy(str, tocs, conv_errors)) + str_value.set("", 0, tocs); // EOM ? + str_value.mark_as_const(); + fix_and_set_name_from_value(dv, Metadata(&str_value, repertoire)); + } + // Constructors with an externally provided item name + Item_string(const char *name_par, const char *str, uint length, + CHARSET_INFO *cs, Derivation dv= DERIVATION_COERCIBLE) + :m_cs_specified(false) + { + str_value.set_or_copy_aligned(str, length, cs); + fix_from_value(dv, Metadata(&str_value)); + set_name(name_par, 0, system_charset_info); + } + Item_string(const char *name_par, const char *str, uint length, + CHARSET_INFO *cs, Derivation dv, uint repertoire) + :m_cs_specified(false) + { + str_value.set_or_copy_aligned(str, length, cs); + fix_from_value(dv, Metadata(&str_value, repertoire)); + set_name(name_par, 0, system_charset_info); + } + void print_value(String *to) const + { + str_value.print(to); } enum Type type() const { return STRING_ITEM; } double val_real(); @@ -2674,14 +2877,19 @@ public: enum Item_result result_type () const { return STRING_RESULT; } enum_field_types field_type() const { return MYSQL_TYPE_VARCHAR; } bool basic_const_item() const { return 1; } - bool eq(const Item *item, bool binary_cmp) const; + bool eq(const Item *item, bool binary_cmp) const + { + return str_eq(&str_value, item, binary_cmp); + } Item *clone_item() { return new Item_string(name, str_value.ptr(), - str_value.length(), collation.collation); + str_value.length(), collation.collation); + } + Item *safe_charset_converter(CHARSET_INFO *tocs) + { + return const_charset_converter(tocs, true); } - Item *safe_charset_converter(CHARSET_INFO *tocs); - Item *charset_converter(CHARSET_INFO *tocs, bool lossless); inline void append(char *str, uint length) { str_value.append(str, length); @@ -2715,23 +2923,79 @@ public: return m_cs_specified; } - /** - Set the value of m_cs_specified attribute. + String *check_well_formed_result(bool send_error) + { return Item::check_well_formed_result(&str_value, send_error); } - m_cs_specified attribute shows whether character-set-introducer was - explicitly specified in the original query for this text literal or - not. The attribute makes sense (is used) only for views. + enum_field_types odbc_temporal_literal_type(const LEX_STRING *type_str) const + { + /* + If string is a reasonably short pure ASCII string literal, + try to parse known ODBC style date, time or timestamp literals, + e.g: + SELECT {d'2001-01-01'}; + SELECT {t'10:20:30'}; + SELECT {ts'2001-01-01 10:20:30'}; + */ + if (collation.repertoire == MY_REPERTOIRE_ASCII && + str_value.length() < MAX_DATE_STRING_REP_LENGTH * 4) + { + if (type_str->length == 1) + { + if (type_str->str[0] == 'd') /* {d'2001-01-01'} */ + return MYSQL_TYPE_DATE; + else if (type_str->str[0] == 't') /* {t'10:20:30'} */ + return MYSQL_TYPE_TIME; + } + else if (type_str->length == 2) /* {ts'2001-01-01 10:20:30'} */ + { + if (type_str->str[0] == 't' && type_str->str[1] == 's') + return MYSQL_TYPE_DATETIME; + } + } + return MYSQL_TYPE_STRING; // Not a temporal literal + } +}; - This operation is to be called from the parser during parsing an input - query. - */ - inline void set_cs_specified(bool cs_specified) + +class Item_string_with_introducer :public Item_string +{ +public: + Item_string_with_introducer(const char *str, uint length, CHARSET_INFO *cs) + :Item_string(str, length, cs) { - m_cs_specified= cs_specified; + set_cs_specified(true); } + Item_string_with_introducer(const String *str, CHARSET_INFO *tocs) + :Item_string(str->ptr(), str->length(), tocs) + { + set_cs_specified(true); + } +}; -private: - bool m_cs_specified; + +class Item_string_sys :public Item_string +{ +public: + Item_string_sys(const char *str, uint length) + :Item_string(str, length, system_charset_info) + { } + Item_string_sys(const char *str) + :Item_string(str, strlen(str), system_charset_info) + { } +}; + + +class Item_string_ascii :public Item_string +{ +public: + Item_string_ascii(const char *str, uint length) + :Item_string(str, length, &my_charset_latin1, + DERIVATION_COERCIBLE, MY_REPERTOIRE_ASCII) + { } + Item_string_ascii(const char *str) + :Item_string(str, strlen(str), &my_charset_latin1, + DERIVATION_COERCIBLE, MY_REPERTOIRE_ASCII) + { } }; @@ -2751,7 +3015,17 @@ public: Derivation dv= DERIVATION_COERCIBLE) :Item_string(NullS, str, length, cs, dv), func_name(name_par) {} - Item *safe_charset_converter(CHARSET_INFO *tocs); + Item_static_string_func(const char *name_par, + const String *str, + CHARSET_INFO *tocs, uint *conv_errors, + Derivation dv, uint repertoire) + :Item_string(str, tocs, conv_errors, dv, repertoire), + func_name(name_par) + {} + Item *safe_charset_converter(CHARSET_INFO *tocs) + { + return const_charset_converter(tocs, true, func_name); + } virtual inline void print(String *str, enum_query_type query_type) { @@ -2854,11 +3128,19 @@ public: enum Type type() const { return VARBIN_ITEM; } enum Item_result result_type () const { return STRING_RESULT; } enum_field_types field_type() const { return MYSQL_TYPE_VARCHAR; } - virtual Item *safe_charset_converter(CHARSET_INFO *tocs); + virtual Item *safe_charset_converter(CHARSET_INFO *tocs) + { + return const_charset_converter(tocs, true); + } bool check_partition_func_processor(uchar *int_arg) {return FALSE;} bool check_vcol_func_processor(uchar *arg) { return FALSE;} bool basic_const_item() const { return 1; } - bool eq(const Item *item, bool binary_cmp) const; + bool eq(const Item *item, bool binary_cmp) const + { + return item->basic_const_item() && item->type() == type() && + item->cast_to_int_type() == cast_to_int_type() && + str_value.bin_eq(&((Item_hex_constant*)item)->str_value); + } String *val_str(String*) { DBUG_ASSERT(fixed == 1); return &str_value; } }; @@ -3654,7 +3936,7 @@ public: { ref= &outer_ref; set_properties(); - fixed= 0; + fixed= 0; /* reset flag set in set_properties() */ } Item_outer_ref(Name_resolution_context *context_arg, Item **item, const char *table_name_arg, const char *field_name_arg, diff --git a/sql/item_cmpfunc.h b/sql/item_cmpfunc.h index cd53ee731f7..f09cb76b75c 100644 --- a/sql/item_cmpfunc.h +++ b/sql/item_cmpfunc.h @@ -883,6 +883,18 @@ class in_string :public in_vector { char buff[STRING_BUFFER_USUAL_SIZE]; String tmp; + class Item_string_for_in_vector: public Item_string + { + public: + Item_string_for_in_vector(CHARSET_INFO *cs): + Item_string(cs) + { } + void set_value(const String *str) + { + str_value= *str; + collation.set(str->charset()); + } + }; public: in_string(uint elements,qsort2_cmp cmp_func, CHARSET_INFO *cs); ~in_string(); @@ -890,13 +902,13 @@ public: uchar *get_value(Item *item); Item* create_item() { - return new Item_string(collation); + return new Item_string_for_in_vector(collation); } void value_to_item(uint pos, Item *item) { String *str=((String*) base)+pos; - Item_string *to= (Item_string*)item; - to->str_value= *str; + Item_string_for_in_vector *to= (Item_string_for_in_vector*) item; + to->set_value(str); } Item_result result_type() { return STRING_RESULT; } }; diff --git a/sql/item_create.cc b/sql/item_create.cc index 193c7deb207..fa8249c3321 100644 --- a/sql/item_create.cc +++ b/sql/item_create.cc @@ -1264,6 +1264,21 @@ protected: }; +#if defined(HAVE_SPATIAL) && !defined(DBUG_OFF) +class Create_func_gis_debug : public Create_func_arg1 +{ + public: + virtual Item *create_1_arg(THD *thd, Item *arg1); + + static Create_func_gis_debug s_singleton; + + protected: + Create_func_gis_debug() {} + virtual ~Create_func_gis_debug() {} +}; +#endif + + #ifdef HAVE_SPATIAL class Create_func_glength : public Create_func_arg1 { @@ -4159,6 +4174,17 @@ Create_func_get_lock::create_2_arg(THD *thd, Item *arg1, Item *arg2) } +#if defined(HAVE_SPATIAL) && !defined(DBUG_OFF) +Create_func_gis_debug Create_func_gis_debug::s_singleton; + +Item* +Create_func_gis_debug::create_1_arg(THD *thd, Item *arg1) +{ + return new (thd->mem_root) Item_func_gis_debug(arg1); +} +#endif + + #ifdef HAVE_SPATIAL Create_func_glength Create_func_glength::s_singleton; @@ -5209,26 +5235,7 @@ Create_func_space Create_func_space::s_singleton; Item* Create_func_space::create_1_arg(THD *thd, Item *arg1) { - /** - TODO: Fix Bug#23637 - The parsed item tree should not depend on - <code>thd->variables.collation_connection</code>. - */ - CHARSET_INFO *cs= thd->variables.collation_connection; - Item *sp; - - if (cs->mbminlen > 1) - { - uint dummy_errors; - sp= new (thd->mem_root) Item_string("", 0, cs, DERIVATION_COERCIBLE, MY_REPERTOIRE_ASCII); - sp->str_value.copy(" ", 1, &my_charset_latin1, cs, &dummy_errors); - } - else - { - sp= new (thd->mem_root) Item_string(" ", 1, cs, DERIVATION_COERCIBLE, MY_REPERTOIRE_ASCII); - } - - return new (thd->mem_root) Item_func_repeat(sp, arg1); + return new (thd->mem_root) Item_func_space(arg1); } @@ -5854,6 +5861,9 @@ static Native_func_registry func_array[] = { { C_STRING_WITH_LEN("ST_GEOMETRYTYPE") }, GEOM_BUILDER(Create_func_geometry_type)}, { { C_STRING_WITH_LEN("ST_GEOMFROMTEXT") }, GEOM_BUILDER(Create_func_geometry_from_text)}, { { C_STRING_WITH_LEN("ST_GEOMFROMWKB") }, GEOM_BUILDER(Create_func_geometry_from_wkb)}, +#ifndef DBUG_OFF + { { C_STRING_WITH_LEN("ST_GIS_DEBUG") }, GEOM_BUILDER(Create_func_gis_debug)}, +#endif { { C_STRING_WITH_LEN("ST_EQUALS") }, GEOM_BUILDER(Create_func_equals)}, { { C_STRING_WITH_LEN("ST_INTERIORRINGN") }, GEOM_BUILDER(Create_func_interiorringn)}, { { C_STRING_WITH_LEN("ST_INTERSECTS") }, GEOM_BUILDER(Create_func_intersects)}, diff --git a/sql/item_create.h b/sql/item_create.h index 5f1a8c6006d..05fe48f656a 100644 --- a/sql/item_create.h +++ b/sql/item_create.h @@ -173,6 +173,15 @@ Item *create_temporal_literal(THD *thd, CHARSET_INFO *cs, enum_field_types type, bool send_error); +inline +Item *create_temporal_literal(THD *thd, const String *str, + enum_field_types type, + bool send_error) +{ + return create_temporal_literal(thd, + str->ptr(), str->length(), str->charset(), + type, send_error); +} int item_create_init(); void item_create_cleanup(); diff --git a/sql/item_func.cc b/sql/item_func.cc index 2b89aa04295..4ec0466bda8 100644 --- a/sql/item_func.cc +++ b/sql/item_func.cc @@ -593,7 +593,7 @@ my_decimal *Item_real_func::val_decimal(my_decimal *decimal_value) } -void Item_func::fix_num_length_and_dec() +void Item_udf_func::fix_num_length_and_dec() { uint fl_length= 0; decimals=0; @@ -611,11 +611,6 @@ void Item_func::fix_num_length_and_dec() } -void Item_func_numhybrid::fix_num_length_and_dec() -{} - - - /** Count max_length and decimals for temporal functions. @@ -803,9 +798,9 @@ bool Item_func_connection_id::fix_fields(THD *thd, Item **ref) function of two arguments. */ -void Item_num_op::find_num_type(void) +void Item_num_op::fix_length_and_dec(void) { - DBUG_ENTER("Item_num_op::find_num_type"); + DBUG_ENTER("Item_num_op::fix_length_and_dec"); DBUG_PRINT("info", ("name %s", func_name())); DBUG_ASSERT(arg_count == 2); Item_result r0= args[0]->cast_to_int_type(); @@ -849,22 +844,26 @@ void Item_num_op::find_num_type(void) type depends only on the first argument) */ -void Item_func_num1::find_num_type() +void Item_func_num1::fix_length_and_dec() { - DBUG_ENTER("Item_func_num1::find_num_type"); + DBUG_ENTER("Item_func_num1::fix_length_and_dec"); DBUG_PRINT("info", ("name %s", func_name())); switch (cached_result_type= args[0]->cast_to_int_type()) { case INT_RESULT: + max_length= args[0]->max_length; unsigned_flag= args[0]->unsigned_flag; break; case STRING_RESULT: case REAL_RESULT: cached_result_type= REAL_RESULT; + decimals= args[0]->decimals; // Preserve NOT_FIXED_DEC max_length= float_length(decimals); break; case TIME_RESULT: cached_result_type= DECIMAL_RESULT; case DECIMAL_RESULT: + decimals= args[0]->decimal_scale(); // Do not preserve NOT_FIXED_DEC + max_length= args[0]->max_length; break; case ROW_RESULT: case IMPOSSIBLE_RESULT: @@ -879,20 +878,6 @@ void Item_func_num1::find_num_type() } -void Item_func_num1::fix_num_length_and_dec() -{ - decimals= args[0]->decimals; - max_length= args[0]->max_length; -} - - -void Item_func_numhybrid::fix_length_and_dec() -{ - fix_num_length_and_dec(); - find_num_type(); -} - - String *Item_func_hybrid_result_type::val_str(String *str) { DBUG_ASSERT(fixed == 1); @@ -1537,11 +1522,14 @@ my_decimal *Item_func_plus::decimal_op(my_decimal *decimal_value) */ void Item_func_additive_op::result_precision() { - decimals= MY_MAX(args[0]->decimals, args[1]->decimals); - int arg1_int= args[0]->decimal_precision() - args[0]->decimals; - int arg2_int= args[1]->decimal_precision() - args[1]->decimals; + decimals= MY_MAX(args[0]->decimal_scale(), args[1]->decimal_scale()); + int arg1_int= args[0]->decimal_precision() - args[0]->decimal_scale(); + int arg2_int= args[1]->decimal_precision() - args[1]->decimal_scale(); int precision= MY_MAX(arg1_int, arg2_int) + 1 + decimals; + DBUG_ASSERT(arg1_int >= 0); + DBUG_ASSERT(arg2_int >= 0); + /* Integer operations keep unsigned_flag if one of arguments is unsigned */ if (result_type() == INT_RESULT) unsigned_flag= args[0]->unsigned_flag | args[1]->unsigned_flag; @@ -1778,7 +1766,8 @@ void Item_func_mul::result_precision() unsigned_flag= args[0]->unsigned_flag | args[1]->unsigned_flag; else unsigned_flag= args[0]->unsigned_flag & args[1]->unsigned_flag; - decimals= MY_MIN(args[0]->decimals + args[1]->decimals, DECIMAL_MAX_SCALE); + decimals= MY_MIN(args[0]->decimal_scale() + args[1]->decimal_scale(), + DECIMAL_MAX_SCALE); uint est_prec = args[0]->decimal_precision() + args[1]->decimal_precision(); uint precision= MY_MIN(est_prec, DECIMAL_MAX_PRECISION); max_length= my_decimal_precision_to_length_no_truncation(precision, decimals, @@ -1832,8 +1821,20 @@ my_decimal *Item_func_div::decimal_op(my_decimal *decimal_value) void Item_func_div::result_precision() { + /* + We need to add args[1]->divisor_precision_increment(), + to properly handle the cases like this: + SELECT 5.05 / 0.014; -> 360.714286 + i.e. when the divisor has a zero integer part + and non-zero digits appear only after the decimal point. + Precision in this example is calculated as + args[0]->decimal_precision() + // 3 + args[1]->divisor_precision_increment() + // 3 + prec_increment // 4 + which gives 10 decimals digits. + */ uint precision=MY_MIN(args[0]->decimal_precision() + - args[1]->decimals + prec_increment, + args[1]->divisor_precision_increment() + prec_increment, DECIMAL_MAX_PRECISION); /* Integer operations keep unsigned_flag if one of arguments is unsigned */ @@ -1841,7 +1842,7 @@ void Item_func_div::result_precision() unsigned_flag= args[0]->unsigned_flag | args[1]->unsigned_flag; else unsigned_flag= args[0]->unsigned_flag & args[1]->unsigned_flag; - decimals= MY_MIN(args[0]->decimals + prec_increment, DECIMAL_MAX_SCALE); + decimals= MY_MIN(args[0]->decimal_scale() + prec_increment, DECIMAL_MAX_SCALE); max_length= my_decimal_precision_to_length_no_truncation(precision, decimals, unsigned_flag); } @@ -2047,7 +2048,7 @@ my_decimal *Item_func_mod::decimal_op(my_decimal *decimal_value) void Item_func_mod::result_precision() { - decimals= MY_MAX(args[0]->decimals, args[1]->decimals); + decimals= MY_MAX(args[0]->decimal_scale(), args[1]->decimal_scale()); max_length= MY_MAX(args[0]->max_length, args[1]->max_length); } @@ -2103,18 +2104,12 @@ my_decimal *Item_func_neg::decimal_op(my_decimal *decimal_value) } -void Item_func_neg::fix_num_length_and_dec() -{ - decimals= args[0]->decimals; - /* 1 add because sign can appear */ - max_length= args[0]->max_length + 1; -} - - void Item_func_neg::fix_length_and_dec() { DBUG_ENTER("Item_func_neg::fix_length_and_dec"); Item_func_num1::fix_length_and_dec(); + /* 1 add because sign can appear */ + max_length= args[0]->max_length + 1; /* If this is in integer context keep the context as integer if possible @@ -2421,8 +2416,12 @@ void Item_func_integer::fix_length_and_dec() decimals=0; } -void Item_func_int_val::fix_num_length_and_dec() + +void Item_func_int_val::fix_length_and_dec() { + DBUG_ENTER("Item_func_int_val::fix_length_and_dec"); + DBUG_PRINT("info", ("name %s", func_name())); + ulonglong tmp_max_length= (ulonglong ) args[0]->max_length - (args[0]->decimals ? args[0]->decimals + 1 : 0) + 2; max_length= tmp_max_length > (ulonglong) 4294967295U ? @@ -2430,13 +2429,7 @@ void Item_func_int_val::fix_num_length_and_dec() uint tmp= float_length(decimals); set_if_smaller(max_length,tmp); decimals= 0; -} - -void Item_func_int_val::find_num_type() -{ - DBUG_ENTER("Item_func_int_val::find_num_type"); - DBUG_PRINT("info", ("name %s", func_name())); switch (cached_result_type= args[0]->cast_to_int_type()) { case STRING_RESULT: @@ -2973,7 +2966,7 @@ bool Item_func_min_max::get_date(MYSQL_TIME *ltime, ulonglong fuzzy_date) { ltime->time_type= MYSQL_TIMESTAMP_TIME; ltime->hour+= (ltime->month * 32 + ltime->day) * 24; - ltime->month= ltime->day= 0; + ltime->year= ltime->month= ltime->day= 0; if (adjust_time_range_with_warn(ltime, std::min<uint>(decimals, TIME_SECOND_PART_DIGITS))) return (null_value= true); @@ -3907,12 +3900,6 @@ String *Item_func_udf_decimal::val_str(String *str) } -void Item_func_udf_decimal::fix_length_and_dec() -{ - fix_num_length_and_dec(); -} - - /* Default max_length is max argument length */ void Item_func_udf_str::fix_length_and_dec() @@ -3987,9 +3974,13 @@ longlong Item_master_pos_wait::val_int() else connection_name= thd->variables.default_master_connection; - if (!(mi= master_info_index->get_master_info(&connection_name, - Sql_condition::WARN_LEVEL_WARN))) + mysql_mutex_lock(&LOCK_active_mi); + mi= master_info_index->get_master_info(&connection_name, + Sql_condition::WARN_LEVEL_WARN); + mysql_mutex_unlock(&LOCK_active_mi); + if (!mi) goto err; + if ((event_count = mi->rli.wait_for_pos(thd, log_name, pos, timeout)) == -2) { null_value = 1; diff --git a/sql/item_func.h b/sql/item_func.h index 1696898812d..18265f672dd 100644 --- a/sql/item_func.h +++ b/sql/item_func.h @@ -154,7 +154,6 @@ public: virtual void print(String *str, enum_query_type query_type); void print_op(String *str, enum_query_type query_type); void print_args(String *str, uint from, enum_query_type query_type); - virtual void fix_num_length_and_dec(); void count_only_length(Item **item, uint nitems); void count_real_length(); void count_decimal_length(); @@ -541,9 +540,6 @@ public: Item_func_numhybrid(List<Item> &list) :Item_func_hybrid_result_type(list) { } - void fix_length_and_dec(); - void fix_num_length_and_dec(); - virtual void find_num_type()= 0; /* To be called from fix_length_and_dec */ String *str_op(String *str) { DBUG_ASSERT(0); return 0; } bool date_op(MYSQL_TIME *ltime, uint fuzzydate) { DBUG_ASSERT(0); return true; } }; @@ -555,9 +551,7 @@ class Item_func_num1: public Item_func_numhybrid public: Item_func_num1(Item *a) :Item_func_numhybrid(a) {} Item_func_num1(Item *a, Item *b) :Item_func_numhybrid(a, b) {} - - void fix_num_length_and_dec(); - void find_num_type(); + void fix_length_and_dec(); }; @@ -573,7 +567,7 @@ class Item_num_op :public Item_func_numhybrid print_op(str, query_type); } - void find_num_type(); + void fix_length_and_dec(); }; @@ -795,7 +789,6 @@ public: const char *func_name() const { return "-"; } enum Functype functype() const { return NEG_FUNC; } void fix_length_and_dec(); - void fix_num_length_and_dec(); uint decimal_precision() const { return args[0]->decimal_precision(); } bool check_partition_func_processor(uchar *int_arg) {return FALSE;} bool check_vcol_func_processor(uchar *int_arg) { return FALSE;} @@ -962,8 +955,7 @@ class Item_func_int_val :public Item_func_num1 { public: Item_func_int_val(Item *a) :Item_func_num1(a) {} - void fix_num_length_and_dec(); - void find_num_type(); + void fix_length_and_dec(); }; @@ -1376,6 +1368,7 @@ public: fixed= 1; return res; } + void fix_num_length_and_dec(); void update_used_tables() { /* @@ -1489,7 +1482,7 @@ public: my_decimal *val_decimal(my_decimal *); String *val_str(String *str); enum Item_result result_type () const { return DECIMAL_RESULT; } - void fix_length_and_dec(); + void fix_length_and_dec() { fix_num_length_and_dec(); } }; diff --git a/sql/item_geofunc.cc b/sql/item_geofunc.cc index 1deda83907c..d9200b3e8d3 100644 --- a/sql/item_geofunc.cc +++ b/sql/item_geofunc.cc @@ -1739,4 +1739,12 @@ mem_error: } +#ifndef DBUG_OFF +longlong Item_func_gis_debug::val_int() +{ + /* For now this is just a stub. TODO: implement the internal GIS debuggign */ + return 0; +} +#endif + #endif /*HAVE_SPATIAL*/ diff --git a/sql/item_geofunc.h b/sql/item_geofunc.h index 2d715dc8765..6d52661e5c9 100644 --- a/sql/item_geofunc.h +++ b/sql/item_geofunc.h @@ -496,6 +496,18 @@ public: const char *func_name() const { return "st_distance"; } }; + +#ifndef DBUG_OFF +class Item_func_gis_debug: public Item_int_func +{ + public: + Item_func_gis_debug(Item *a) :Item_int_func(a) { null_value= false; } + const char *func_name() const { return "st_gis_debug"; } + longlong val_int(); +}; +#endif + + #define GEOM_NEW(thd, obj_constructor) new (thd->mem_root) obj_constructor #else /*HAVE_SPATIAL*/ diff --git a/sql/item_strfunc.cc b/sql/item_strfunc.cc index fa6ba706718..9a3c7589db2 100644 --- a/sql/item_strfunc.cc +++ b/sql/item_strfunc.cc @@ -179,16 +179,27 @@ String *Item_func_md5::val_str_ascii(String *str) } +/* + The MD5()/SHA() functions treat their parameter as being a case sensitive. + Thus we set binary collation on it so different instances of MD5() will be + compared properly. +*/ +static CHARSET_INFO *get_checksum_charset(const char *csname) +{ + CHARSET_INFO *cs= get_charset_by_csname(csname, MY_CS_BINSORT, MYF(0)); + if (!cs) + { + // Charset has no binary collation: use my_charset_bin. + cs= &my_charset_bin; + } + return cs; +} + + void Item_func_md5::fix_length_and_dec() { - /* - The MD5() function treats its parameter as being a case sensitive. Thus - we set binary collation on it so different instances of MD5() will be - compared properly. - */ - args[0]->collation.set( - get_charset_by_csname(args[0]->collation.collation->csname, - MY_CS_BINSORT,MYF(0)), DERIVATION_COERCIBLE); + CHARSET_INFO *cs= get_checksum_charset(args[0]->collation.collation->csname); + args[0]->collation.set(cs, DERIVATION_COERCIBLE); fix_length_and_charset(32, default_charset()); } @@ -218,14 +229,8 @@ String *Item_func_sha::val_str_ascii(String *str) void Item_func_sha::fix_length_and_dec() { - /* - The SHA() function treats its parameter as being a case sensitive. Thus - we set binary collation on it so different instances of MD5() will be - compared properly. - */ - args[0]->collation.set( - get_charset_by_csname(args[0]->collation.collation->csname, - MY_CS_BINSORT,MYF(0)), DERIVATION_COERCIBLE); + CHARSET_INFO *cs= get_checksum_charset(args[0]->collation.collation->csname); + args[0]->collation.set(cs, DERIVATION_COERCIBLE); // size of hex representation of hash fix_length_and_charset(SHA1_HASH_SIZE * 2, default_charset()); } @@ -348,18 +353,9 @@ void Item_func_sha2::fix_length_and_dec() ER(ER_WRONG_PARAMETERS_TO_NATIVE_FCT), "sha2"); } - /* - The SHA2() function treats its parameter as being a case sensitive. - Thus we set binary collation on it so different instances of SHA2() - will be compared properly. - */ + CHARSET_INFO *cs= get_checksum_charset(args[0]->collation.collation->csname); + args[0]->collation.set(cs, DERIVATION_COERCIBLE); - args[0]->collation.set( - get_charset_by_csname( - args[0]->collation.collation->csname, - MY_CS_BINSORT, - MYF(0)), - DERIVATION_COERCIBLE); #else push_warning_printf(current_thd, Sql_condition::WARN_LEVEL_WARN, @@ -513,39 +509,42 @@ void Item_func_from_base64::fix_length_and_dec() String *Item_func_from_base64::val_str(String *str) { String *res= args[0]->val_str_ascii(str); - bool too_long= false; int length; const char *end_ptr; - if (!res || - res->length() > (uint) base64_decode_max_arg_length() || - (too_long= - ((uint) (length= base64_needed_decoded_length((int) res->length())) > - current_thd->variables.max_allowed_packet)) || - tmp_value.alloc((uint) length) || - (length= base64_decode(res->ptr(), (int) res->length(), + if (!res) + goto err; + + if (res->length() > (uint) base64_decode_max_arg_length() || + ((uint) (length= base64_needed_decoded_length((int) res->length())) > + current_thd->variables.max_allowed_packet)) + { + push_warning_printf(current_thd, Sql_condition::WARN_LEVEL_WARN, + ER_WARN_ALLOWED_PACKET_OVERFLOWED, + ER(ER_WARN_ALLOWED_PACKET_OVERFLOWED), func_name(), + current_thd->variables.max_allowed_packet); + goto err; + } + + if (tmp_value.alloc((uint) length)) + goto err; + + if ((length= base64_decode(res->ptr(), (int) res->length(), (char *) tmp_value.ptr(), &end_ptr, 0)) < 0 || end_ptr < res->ptr() + res->length()) { - null_value= 1; // NULL input, too long input, OOM, or badly formed input - if (too_long) - { - push_warning_printf(current_thd, Sql_condition::WARN_LEVEL_WARN, - ER_WARN_ALLOWED_PACKET_OVERFLOWED, - ER(ER_WARN_ALLOWED_PACKET_OVERFLOWED), func_name(), - current_thd->variables.max_allowed_packet); - } - else if (res && length < 0) - { - push_warning_printf(current_thd, Sql_condition::WARN_LEVEL_WARN, - ER_BAD_BASE64_DATA, ER(ER_BAD_BASE64_DATA), - end_ptr - res->ptr()); - } - return 0; + push_warning_printf(current_thd, Sql_condition::WARN_LEVEL_WARN, + ER_BAD_BASE64_DATA, ER(ER_BAD_BASE64_DATA), + end_ptr - res->ptr()); + goto err; } + tmp_value.length((uint) length); null_value= 0; return &tmp_value; +err: + null_value= 1; // NULL input, too long input, OOM, or badly formed input + return 0; } /////////////////////////////////////////////////////////////////////////////// @@ -598,7 +597,7 @@ String *Item_func_decode_histogram::val_str(String *str) val= p[i] / ((double)((1 << 8) - 1)); break; case DOUBLE_PREC_HB: - val= ((uint16 *)(p + i))[0] / ((double)((1 << 16) - 1)); + val= uint2korr(p + i) / ((double)((1 << 16) - 1)); i++; break; default: @@ -1962,7 +1961,7 @@ String *Item_func_ltrim::val_str(String *str) if ((remove_length= remove_str->length()) == 0 || remove_length > res->length()) - return res; + return non_trimmed_value(res); ptr= (char*) res->ptr(); end= ptr+res->length(); @@ -1981,9 +1980,8 @@ String *Item_func_ltrim::val_str(String *str) end+=remove_length; } if (ptr == res->ptr()) - return res; - tmp_value.set(*res,(uint) (ptr - res->ptr()),(uint) (end-ptr)); - return &tmp_value; + return non_trimmed_value(res); + return trimmed_value(res, (uint32) (ptr - res->ptr()), (uint32) (end - ptr)); } @@ -2009,7 +2007,7 @@ String *Item_func_rtrim::val_str(String *str) if ((remove_length= remove_str->length()) == 0 || remove_length > res->length()) - return res; + return non_trimmed_value(res); ptr= (char*) res->ptr(); end= ptr+res->length(); @@ -2021,11 +2019,11 @@ String *Item_func_rtrim::val_str(String *str) { char chr=(*remove_str)[0]; #ifdef USE_MB - if (use_mb(res->charset())) + if (use_mb(collation.collation)) { while (ptr < end) { - if ((l=my_ismbchar(res->charset(), ptr,end))) ptr+=l,p=ptr; + if ((l= my_ismbchar(collation.collation, ptr, end))) ptr+= l, p=ptr; else ++ptr; } ptr=p; @@ -2038,12 +2036,12 @@ String *Item_func_rtrim::val_str(String *str) { const char *r_ptr=remove_str->ptr(); #ifdef USE_MB - if (use_mb(res->charset())) + if (use_mb(collation.collation)) { loop: while (ptr + remove_length < end) { - if ((l=my_ismbchar(res->charset(), ptr,end))) ptr+=l; + if ((l= my_ismbchar(collation.collation, ptr, end))) ptr+= l; else ++ptr; } if (ptr + remove_length == end && !memcmp(ptr,r_ptr,remove_length)) @@ -2062,9 +2060,8 @@ String *Item_func_rtrim::val_str(String *str) } } if (end == res->ptr()+res->length()) - return res; - tmp_value.set(*res,0,(uint) (end-res->ptr())); - return &tmp_value; + return non_trimmed_value(res); + return trimmed_value(res, 0, (uint32) (end - res->ptr())); } @@ -2091,37 +2088,22 @@ String *Item_func_trim::val_str(String *str) if ((remove_length= remove_str->length()) == 0 || remove_length > res->length()) - return res; + return non_trimmed_value(res); ptr= (char*) res->ptr(); end= ptr+res->length(); r_ptr= remove_str->ptr(); + while (ptr+remove_length <= end && !memcmp(ptr,r_ptr,remove_length)) + ptr+=remove_length; #ifdef USE_MB - if (use_mb(res->charset())) + if (use_mb(collation.collation)) { - while (ptr + remove_length <= end) - { - uint num_bytes= 0; - while (num_bytes < remove_length) - { - uint len; - if ((len= my_ismbchar(res->charset(), ptr + num_bytes, end))) - num_bytes+= len; - else - ++num_bytes; - } - if (num_bytes != remove_length) - break; - if (memcmp(ptr, r_ptr, remove_length)) - break; - ptr+= remove_length; - } char *p=ptr; register uint32 l; loop: while (ptr + remove_length < end) { - if ((l= my_ismbchar(res->charset(), ptr,end))) + if ((l= my_ismbchar(collation.collation, ptr, end))) ptr+= l; else ++ptr; @@ -2137,16 +2119,13 @@ String *Item_func_trim::val_str(String *str) else #endif /* USE_MB */ { - while (ptr+remove_length <= end && !memcmp(ptr,r_ptr,remove_length)) - ptr+=remove_length; while (ptr + remove_length <= end && !memcmp(end-remove_length,r_ptr,remove_length)) end-=remove_length; } if (ptr == res->ptr() && end == ptr+res->length()) - return res; - tmp_value.set(*res,(uint) (ptr - res->ptr()),(uint) (end-ptr)); - return &tmp_value; + return non_trimmed_value(res); + return trimmed_value(res, (uint32) (ptr - res->ptr()), (uint32) (end - ptr)); } void Item_func_trim::fix_length_and_dec() @@ -2348,32 +2327,6 @@ void Item_func_decode::crypto_transform(String *res) } -Item *Item_func_sysconst::safe_charset_converter(CHARSET_INFO *tocs) -{ - Item_string *conv; - uint conv_errors; - String tmp, cstr, *ostr= val_str(&tmp); - if (null_value) - { - Item *null_item= new Item_null((char *) fully_qualified_func_name()); - null_item->collation.set (tocs); - return null_item; - } - cstr.copy(ostr->ptr(), ostr->length(), ostr->charset(), tocs, &conv_errors); - if (conv_errors || - !(conv= new Item_static_string_func(fully_qualified_func_name(), - cstr.ptr(), cstr.length(), - cstr.charset(), - collation.derivation))) - { - return NULL; - } - conv->str_value.copy(); - conv->str_value.mark_as_const(); - return conv; -} - - String *Item_func_database::val_str(String *str) { DBUG_ASSERT(fixed == 1); @@ -3045,6 +2998,75 @@ err: } +void Item_func_space::fix_length_and_dec() +{ + collation.set(default_charset(), DERIVATION_COERCIBLE, MY_REPERTOIRE_ASCII); + if (args[0]->const_item()) + { + /* must be longlong to avoid truncation */ + longlong count= args[0]->val_int(); + if (args[0]->null_value) + goto end; + /* + Assumes that the maximum length of a String is < INT_MAX32. + Set here so that rest of code sees out-of-bound value as such. + */ + if (count > INT_MAX32) + count= INT_MAX32; + fix_char_length_ulonglong(count); + return; + } + +end: + max_length= MAX_BLOB_WIDTH; + maybe_null= 1; +} + + +String *Item_func_space::val_str(String *str) +{ + uint tot_length; + longlong count= args[0]->val_int(); + const CHARSET_INFO *cs= collation.collation; + + if (args[0]->null_value) + goto err; // string and/or delim are null + null_value= 0; + + if (count <= 0 && (count == 0 || !args[0]->unsigned_flag)) + return make_empty_result(); + /* + Assumes that the maximum length of a String is < INT_MAX32. + Bounds check on count: If this is triggered, we will error. + */ + if ((ulonglong) count > INT_MAX32) + count= INT_MAX32; + + // Safe length check + tot_length= (uint) count * cs->mbminlen; + if (tot_length > current_thd->variables.max_allowed_packet) + { + push_warning_printf(current_thd, Sql_condition::WARN_LEVEL_WARN, + ER_WARN_ALLOWED_PACKET_OVERFLOWED, + ER(ER_WARN_ALLOWED_PACKET_OVERFLOWED), + func_name(), + current_thd->variables.max_allowed_packet); + goto err; + } + + if (str->alloc(tot_length)) + goto err; + str->length(tot_length); + str->set_charset(cs); + cs->cset->fill(cs, (char*) str->ptr(), tot_length, ' '); + return str; + +err: + null_value= 1; + return 0; +} + + void Item_func_binlog_gtid_pos::fix_length_and_dec() { collation.set(system_charset_info); @@ -3443,7 +3465,7 @@ void Item_func_set_collation::print(String *str, enum_query_type query_type) str->append(STRING_WITH_LEN(" collate ")); DBUG_ASSERT(args[1]->basic_const_item() && args[1]->type() == Item::STRING_ITEM); - args[1]->str_value.print(str); + ((Item_string *)args[1])->print_value(str); str->append(')'); } diff --git a/sql/item_strfunc.h b/sql/item_strfunc.h index f3d5c064423..8377a20e0a4 100644 --- a/sql/item_strfunc.h +++ b/sql/item_strfunc.h @@ -349,6 +349,21 @@ class Item_func_trim :public Item_str_func protected: String tmp_value; String remove; + String *trimmed_value(String *res, uint32 offset, uint32 length) + { + tmp_value.set(*res, offset, length); + /* + Make sure to return correct charset and collation: + TRIM(0x000000 FROM _ucs2 0x0061) + should set charset to "binary" rather than to "ucs2". + */ + tmp_value.set_charset(collation.collation); + return &tmp_value; + } + String *non_trimmed_value(String *res) + { + return trimmed_value(res, 0, res->length()); + } public: Item_func_trim(Item *a,Item *b) :Item_str_func(a,b) {} Item_func_trim(Item *a) :Item_str_func(a) {} @@ -527,7 +542,10 @@ class Item_func_sysconst :public Item_str_func public: Item_func_sysconst() { collation.set(system_charset_info,DERIVATION_SYSCONST); } - Item *safe_charset_converter(CHARSET_INFO *tocs); + Item *safe_charset_converter(CHARSET_INFO *tocs) + { + return const_charset_converter(tocs, true, fully_qualified_func_name()); + } /* Used to create correct Item name in new converted item in safe_charset_converter, return string representation of this function @@ -701,6 +719,16 @@ public: }; +class Item_func_space :public Item_str_func +{ +public: + Item_func_space(Item *arg1):Item_str_func(arg1) {} + String *val_str(String *); + void fix_length_and_dec(); + const char *func_name() const { return "space"; } +}; + + class Item_func_binlog_gtid_pos :public Item_str_func { String tmp_value; diff --git a/sql/item_subselect.cc b/sql/item_subselect.cc index 38bb3121ed8..7db7b014d28 100644 --- a/sql/item_subselect.cc +++ b/sql/item_subselect.cc @@ -3654,8 +3654,9 @@ int subselect_single_select_engine::exec() pushed down into the subquery. Those optimizations are ref[_or_null] acceses. Change them to be full table scans. */ - for (JOIN_TAB *tab= first_linear_tab(join, WITHOUT_CONST_TABLES); tab; - tab= next_linear_tab(join, tab, WITH_BUSH_ROOTS)) + JOIN_TAB *tab; + for (tab= first_linear_tab(join, WITH_BUSH_ROOTS, WITHOUT_CONST_TABLES); + tab; tab= next_linear_tab(join, tab, WITH_BUSH_ROOTS)) { if (tab && tab->keyuse) { diff --git a/sql/item_sum.cc b/sql/item_sum.cc index 62db351150b..2dadf8b8835 100644 --- a/sql/item_sum.cc +++ b/sql/item_sum.cc @@ -3197,19 +3197,13 @@ Item_func_group_concat(Name_resolution_context *context_arg, /* We need to allocate: args - arg_count_field+arg_count_order - (for possible order items in temporare tables) + (for possible order items in temporary tables) order - arg_count_order */ - if (!(args= (Item**) sql_alloc(sizeof(Item*) * arg_count + + if (!(args= (Item**) sql_alloc(sizeof(Item*) * arg_count * 2 + sizeof(ORDER*)*arg_count_order))) return; - if (!(orig_args= (Item **) sql_alloc(sizeof(Item *) * arg_count))) - { - args= NULL; - return; - } - order= (ORDER**)(args + arg_count); /* fill args items of show and sort */ @@ -3230,6 +3224,9 @@ Item_func_group_concat(Name_resolution_context *context_arg, order_item->item= arg_ptr++; } } + + /* orig_args is only used for print() */ + orig_args= (Item**) (order + arg_count_order); memcpy(orig_args, args, sizeof(Item*) * arg_count); } @@ -3313,6 +3310,7 @@ void Item_func_group_concat::cleanup() } DBUG_ASSERT(tree == 0); } + DBUG_VOID_RETURN; } diff --git a/sql/item_timefunc.cc b/sql/item_timefunc.cc index 5fddad56028..4a8bb4cc77d 100644 --- a/sql/item_timefunc.cc +++ b/sql/item_timefunc.cc @@ -1816,7 +1816,7 @@ void Item_func_date_format::fix_length_and_dec() if (arg1->type() == STRING_ITEM) { // Optimize the normal case fixed_length=1; - max_length= format_length(&arg1->str_value) * + max_length= format_length(arg1->val_str(NULL)) * collation.collation->mbmaxlen; } else diff --git a/sql/item_timefunc.h b/sql/item_timefunc.h index 29badddad8e..cb8b59501a4 100644 --- a/sql/item_timefunc.h +++ b/sql/item_timefunc.h @@ -413,16 +413,15 @@ protected: public: Item_func_seconds_hybrid() :Item_func_numhybrid() {} Item_func_seconds_hybrid(Item *a) :Item_func_numhybrid(a) {} - void fix_num_length_and_dec() + void fix_length_and_dec() { if (arg_count) decimals= args[0]->temporal_precision(arg0_expected_type()); set_if_smaller(decimals, TIME_SECOND_PART_DIGITS); max_length=17 + (decimals ? decimals + 1 : 0); maybe_null= true; + cached_result_type= decimals ? DECIMAL_RESULT : INT_RESULT; } - void find_num_type() - { cached_result_type= decimals ? DECIMAL_RESULT : INT_RESULT; } double real_op() { DBUG_ASSERT(0); return 0; } String *str_op(String *str) { DBUG_ASSERT(0); return 0; } bool date_op(MYSQL_TIME *ltime, uint fuzzydate) { DBUG_ASSERT(0); return true; } @@ -470,11 +469,6 @@ protected: public: Item_func_time_to_sec(Item *item) :Item_func_seconds_hybrid(item) {} const char *func_name() const { return "time_to_sec"; } - void fix_num_length_and_dec() - { - maybe_null= true; - Item_func_seconds_hybrid::fix_num_length_and_dec(); - } bool check_partition_func_processor(uchar *int_arg) {return FALSE;} bool check_vcol_func_processor(uchar *int_arg) { return FALSE;} bool check_valid_arguments_processor(uchar *int_arg) diff --git a/sql/item_xmlfunc.cc b/sql/item_xmlfunc.cc index 759b929ff82..932f4245c27 100644 --- a/sql/item_xmlfunc.cc +++ b/sql/item_xmlfunc.cc @@ -532,6 +532,32 @@ public: }; +/** + A string whose value may be changed during execution. +*/ +class Item_string_xml_non_const: public Item_string +{ +public: + Item_string_xml_non_const(const char *str, uint length, CHARSET_INFO *cs) + :Item_string(str, length, cs) + { } + bool const_item() const { return false ; } + bool basic_const_item() const { return false; } + void set_value(const char *str, uint length, CHARSET_INFO *cs) + { + str_value.set(str, length, cs); + } + Item *safe_charset_converter(CHARSET_INFO *tocs) + { + /* + Item_string::safe_charset_converter() does not accept non-constants. + Note, conversion is not really needed here anyway. + */ + return this; + } +}; + + class Item_nodeset_to_const_comparator :public Item_bool_func { String *pxml; @@ -550,7 +576,8 @@ public: longlong val_int() { Item_func *comp= (Item_func*)args[1]; - Item_string *fake= (Item_string*)(comp->arguments()[0]); + Item_string_xml_non_const *fake= + (Item_string_xml_non_const*)(comp->arguments()[0]); String *res= args[0]->val_nodeset(&tmp_nodeset); MY_XPATH_FLT *fltbeg= (MY_XPATH_FLT*) res->ptr(); MY_XPATH_FLT *fltend= (MY_XPATH_FLT*) (res->ptr() + res->length()); @@ -568,8 +595,8 @@ public: if ((node->parent == flt->num) && (node->type == MY_XML_NODE_TEXT)) { - fake->str_value.set(node->beg, node->end - node->beg, - collation.collation); + fake->set_value(node->beg, node->end - node->beg, + collation.collation); if (args[1]->val_int()) return 1; } @@ -956,14 +983,12 @@ static Item *create_comparator(MY_XPATH *xpath, { /* Compare a node set to a scalar value. - We just create a fake Item_string() argument, + We just create a fake Item_string_xml_non_const() argument, which will be filled to the partular value in a loop through all of the nodes in the node set. */ - Item_string *fake= new Item_string("", 0, xpath->cs); - /* Don't cache fake because its value will be changed during comparison.*/ - fake->set_used_tables(RAND_TABLE_BIT); + Item_string *fake= new Item_string_xml_non_const("", 0, xpath->cs); Item_nodeset_func *nodeset; Item *scalar, *comp; if (a->type() == Item::XPATH_NODESET) diff --git a/sql/log.cc b/sql/log.cc index dcdf2bcc74d..4e50f57e656 100644 --- a/sql/log.cc +++ b/sql/log.cc @@ -2396,7 +2396,7 @@ static int find_uniq_filename(char *name) file_info= dir_info->dir_entry; for (i= dir_info->number_of_files ; i-- ; file_info++) { - if (memcmp(file_info->name, start, length) == 0 && + if (strncmp(file_info->name, start, length) == 0 && test_if_number(file_info->name+length, &number,0)) { set_if_bigger(max_found,(ulong) number); @@ -2673,11 +2673,13 @@ int MYSQL_LOG::generate_new_name(char *new_name, const char *log_name) { if (!fn_ext(log_name)[0]) { - if (find_uniq_filename(new_name)) + if (DBUG_EVALUATE_IF("binlog_inject_new_name_error", TRUE, FALSE) || + find_uniq_filename(new_name)) { - my_printf_error(ER_NO_UNIQUE_LOGFILE, ER(ER_NO_UNIQUE_LOGFILE), - MYF(ME_FATALERROR), log_name); - sql_print_error(ER(ER_NO_UNIQUE_LOGFILE), log_name); + if (current_thd) + my_printf_error(ER_NO_UNIQUE_LOGFILE, ER(ER_NO_UNIQUE_LOGFILE), + MYF(ME_FATALERROR), log_name); + sql_print_error(ER_DEFAULT(ER_NO_UNIQUE_LOGFILE), log_name); return 1; } } @@ -2930,7 +2932,8 @@ bool MYSQL_QUERY_LOG::write(THD *thd, time_t current_time, my_b_printf(&log_file, "# Full_scan: %s Full_join: %s " "Tmp_table: %s Tmp_table_on_disk: %s\n" - "# Filesort: %s Filesort_on_disk: %s Merge_passes: %lu\n", + "# Filesort: %s Filesort_on_disk: %s Merge_passes: %lu " + "Priority_queue: %s\n", ((thd->query_plan_flags & QPLAN_FULL_SCAN) ? "Yes" : "No"), ((thd->query_plan_flags & QPLAN_FULL_JOIN) ? "Yes" : "No"), ((thd->query_plan_flags & QPLAN_TMP_TABLE) ? "Yes" : "No"), @@ -2938,7 +2941,10 @@ bool MYSQL_QUERY_LOG::write(THD *thd, time_t current_time, ((thd->query_plan_flags & QPLAN_FILESORT) ? "Yes" : "No"), ((thd->query_plan_flags & QPLAN_FILESORT_DISK) ? "Yes" : "No"), - thd->query_plan_fsort_passes) == (size_t) -1) + thd->query_plan_fsort_passes, + ((thd->query_plan_flags & QPLAN_FILESORT_PRIORITY_QUEUE) ? + "Yes" : "No") + ) == (size_t) -1) tmp_errno= errno; if (thd->variables.log_slow_verbosity & LOG_SLOW_VERBOSITY_EXPLAIN && thd->lex->explain) @@ -4121,6 +4127,7 @@ int MYSQL_BIN_LOG::purge_first_log(Relay_log_info* rli, bool included) { int error; char *to_purge_if_included= NULL; + inuse_relaylog *ir; DBUG_ENTER("purge_first_log"); DBUG_ASSERT(is_open()); @@ -4128,7 +4135,31 @@ int MYSQL_BIN_LOG::purge_first_log(Relay_log_info* rli, bool included) DBUG_ASSERT(!strcmp(rli->linfo.log_file_name,rli->event_relay_log_name)); mysql_mutex_lock(&LOCK_index); - to_purge_if_included= my_strdup(rli->group_relay_log_name, MYF(0)); + + ir= rli->inuse_relaylog_list; + while (ir) + { + inuse_relaylog *next= ir->next; + if (!ir->completed || ir->dequeued_count < ir->queued_count) + { + included= false; + break; + } + if (!included && !strcmp(ir->name, rli->group_relay_log_name)) + break; + if (!next) + { + rli->last_inuse_relaylog= NULL; + included= 1; + to_purge_if_included= my_strdup(ir->name, MYF(0)); + } + my_atomic_rwlock_destroy(&ir->inuse_relaylog_atomic_lock); + my_free(ir); + ir= next; + } + rli->inuse_relaylog_list= ir; + if (ir) + to_purge_if_included= my_strdup(ir->name, MYF(0)); /* Read the next log file name from the index file and pass it back to @@ -6845,7 +6876,7 @@ MYSQL_BIN_LOG::queue_for_group_commit(group_commit_entry *orig_entry) /* Interrupted by kill. */ DEBUG_SYNC(orig_entry->thd, "group_commit_waiting_for_prior_killed"); wfc->wakeup_error= orig_entry->thd->killed_errno(); - if (wfc->wakeup_error) + if (!wfc->wakeup_error) wfc->wakeup_error= ER_QUERY_INTERRUPTED; my_message(wfc->wakeup_error, ER(wfc->wakeup_error), MYF(0)); DBUG_RETURN(-1); @@ -6856,12 +6887,6 @@ MYSQL_BIN_LOG::queue_for_group_commit(group_commit_entry *orig_entry) else mysql_mutex_unlock(&wfc->LOCK_wait_commit); } - if (wfc && wfc->wakeup_error) - { - my_error(ER_PRIOR_COMMIT_FAILED, MYF(0)); - DBUG_RETURN(-1); - } - /* If the transaction we were waiting for has already put us into the group commit queue (and possibly already done the entire binlog commit for us), @@ -6870,6 +6895,12 @@ MYSQL_BIN_LOG::queue_for_group_commit(group_commit_entry *orig_entry) if (orig_entry->queued_by_other) DBUG_RETURN(0); + if (wfc && wfc->wakeup_error) + { + my_error(ER_PRIOR_COMMIT_FAILED, MYF(0)); + DBUG_RETURN(-1); + } + /* Now enqueue ourselves in the group commit queue. */ DEBUG_SYNC(orig_entry->thd, "commit_before_enqueue"); orig_entry->thd->clear_wakeup_ready(); @@ -7473,6 +7504,13 @@ MYSQL_BIN_LOG::write_transaction_or_stmt(group_commit_entry *entry, } } + DBUG_EXECUTE_IF("inject_error_writing_xid", + { + entry->error_cache= NULL; + entry->commit_errno= 28; + DBUG_RETURN(ER_ERROR_ON_WRITE); + }); + if (entry->end_event->write(&log_file)) { entry->error_cache= NULL; @@ -9076,6 +9114,8 @@ binlog_background_thread(void *arg __attribute__((unused))) thd->thread_id= thread_id++; mysql_mutex_unlock(&LOCK_thread_count); thd->store_globals(); + thd->security_ctx->skip_grants(); + thd->set_command(COM_DAEMON); /* Load the slave replication GTID state from the mysql.gtid_slave_pos @@ -9379,7 +9419,7 @@ int TC_LOG_BINLOG::recover(LOG_INFO *linfo, const char *last_log_name, file= -1; } - if (0 == strcmp(linfo->log_file_name, last_log_name)) + if (!strcmp(linfo->log_file_name, last_log_name)) break; // No more files to do if ((file= open_binlog(&log, linfo->log_file_name, &errmsg)) < 0) { @@ -9636,7 +9676,7 @@ set_binlog_snapshot_file(const char *src) Copy out current values of status variables, for SHOW STATUS or information_schema.global_status. - This is called only under LOCK_status, so we can fill in a static array. + This is called only under LOCK_show_status, so we can fill in a static array. */ void TC_LOG_BINLOG::set_status_variables(THD *thd) diff --git a/sql/log_event.cc b/sql/log_event.cc index 600a98916a9..76170778bad 100644 --- a/sql/log_event.cc +++ b/sql/log_event.cc @@ -190,6 +190,28 @@ static const char *HA_ERR(int i) return "No Error!"; } + +/* + Return true if an error caught during event execution is a temporary error + that will cause automatic retry of the event group during parallel + replication, false otherwise. + + In parallel replication, conflicting transactions can occasionally cause + deadlocks; such errors are handled automatically by rolling back re-trying + the transactions, so should not pollute the error log. +*/ +static bool +is_parallel_retry_error(rpl_group_info *rgi, int err) +{ + if (!rgi->is_parallel_exec) + return false; + if (rgi->killed_for_retry && + (err == ER_QUERY_INTERRUPTED || err == ER_CONNECTION_KILLED)) + return true; + return has_temporary_error(rgi->thd); +} + + /** Error reporting facility for Rows_log_event::do_apply_event @@ -204,7 +226,7 @@ static const char *HA_ERR(int i) */ static void inline slave_rows_error_report(enum loglevel level, int ha_error, - Relay_log_info const *rli, THD *thd, + rpl_group_info *rgi, THD *thd, TABLE *table, const char * type, const char *log_name, ulong pos) { @@ -214,8 +236,19 @@ static void inline slave_rows_error_report(enum loglevel level, int ha_error, uint len; Diagnostics_area::Sql_condition_iterator it= thd->get_stmt_da()->sql_conditions(); + Relay_log_info const *rli= rgi->rli; const Sql_condition *err; buff[0]= 0; + int errcode= thd->is_error() ? thd->get_stmt_da()->sql_errno() : 0; + + /* + In parallel replication, deadlocks or other temporary errors can happen + occasionally in normal operation, they will be handled correctly and + automatically by re-trying the transactions. So do not pollute the error + log with messages about them. + */ + if (is_parallel_retry_error(rgi, errcode)) + return; for (err= it++, slider= buff; err && slider < buff_end - 1; slider += len, err= it++) @@ -226,7 +259,7 @@ static void inline slave_rows_error_report(enum loglevel level, int ha_error, } if (ha_error != 0) - rli->report(level, thd->is_error() ? thd->get_stmt_da()->sql_errno() : 0, + rli->report(level, errcode, rgi->gtid_info(), "Could not execute %s event on table %s.%s;" "%s handler error %s; " "the event's master log %s, end_log_pos %lu", @@ -234,7 +267,7 @@ static void inline slave_rows_error_report(enum loglevel level, int ha_error, buff, handler_error == NULL ? "<unknown>" : handler_error, log_name, pos); else - rli->report(level, thd->is_error() ? thd->get_stmt_da()->sql_errno() : 0, + rli->report(level, errcode, rgi->gtid_info(), "Could not execute %s event on table %s.%s;" "%s the event's master log %s, end_log_pos %lu", type, table->s->db.str, table->s->table_name.str, @@ -974,8 +1007,9 @@ Log_event::do_shall_skip(rpl_group_info *rgi) Relay_log_info *rli= rgi->rli; DBUG_PRINT("info", ("ev->server_id: %lu, ::server_id: %lu," " rli->replicate_same_server_id: %d," - " rli->slave_skip_counter: %lu", - (ulong) server_id, (ulong) global_system_variables.server_id, + " rli->slave_skip_counter: %llu", + (ulong) server_id, + (ulong) global_system_variables.server_id, rli->replicate_same_server_id, rli->slave_skip_counter)); if ((server_id == global_system_variables.server_id && @@ -4199,25 +4233,31 @@ int Query_log_event::do_apply_event(rpl_group_info *rgi, Record any GTID in the same transaction, so slave state is transactionally consistent. */ - if (current_stmt_is_commit && (sub_id= rgi->gtid_sub_id)) + if (current_stmt_is_commit) { - /* Clear the GTID from the RLI so we don't accidentally reuse it. */ - rgi->gtid_sub_id= 0; - - gtid= rgi->current_gtid; thd->variables.option_bits&= ~OPTION_GTID_BEGIN; - if (rpl_global_gtid_slave_state.record_gtid(thd, >id, sub_id, true, false)) + if (rgi->gtid_pending) { - rli->report(ERROR_LEVEL, ER_CANNOT_UPDATE_GTID_STATE, - "Error during COMMIT: failed to update GTID state in " - "%s.%s: %d: %s", - "mysql", rpl_gtid_slave_state_table_name.str, - thd->get_stmt_da()->sql_errno(), - thd->get_stmt_da()->message()); - trans_rollback(thd); - sub_id= 0; - thd->is_slave_error= 1; - goto end; + sub_id= rgi->gtid_sub_id; + rgi->gtid_pending= false; + + gtid= rgi->current_gtid; + if (rpl_global_gtid_slave_state.record_gtid(thd, >id, sub_id, true, false)) + { + int errcode= thd->get_stmt_da()->sql_errno(); + if (!is_parallel_retry_error(rgi, errcode)) + rli->report(ERROR_LEVEL, ER_CANNOT_UPDATE_GTID_STATE, + rgi->gtid_info(), + "Error during COMMIT: failed to update GTID state in " + "%s.%s: %d: %s", + "mysql", rpl_gtid_slave_state_table_name.str, + errcode, + thd->get_stmt_da()->message()); + trans_rollback(thd); + sub_id= 0; + thd->is_slave_error= 1; + goto end; + } } } @@ -4270,7 +4310,7 @@ int Query_log_event::do_apply_event(rpl_group_info *rgi, clear_all_errors(thd, const_cast<Relay_log_info*>(rli)); /* Can ignore query */ else { - rli->report(ERROR_LEVEL, expected_error, + rli->report(ERROR_LEVEL, expected_error, rgi->gtid_info(), "\ Query partially completed on the master (error on master: %d) \ and was aborted. There is a chance that your master is inconsistent at this \ @@ -4326,7 +4366,7 @@ compare_errors: !ignored_error_code(actual_error) && !ignored_error_code(expected_error)) { - rli->report(ERROR_LEVEL, 0, + rli->report(ERROR_LEVEL, 0, rgi->gtid_info(), "\ Query caused different errors on master and slave. \ Error on master: message (format)='%s' error code=%d ; \ @@ -4349,18 +4389,21 @@ Default database: '%s'. Query: '%s'", { DBUG_PRINT("info",("error ignored")); clear_all_errors(thd, const_cast<Relay_log_info*>(rli)); - thd->reset_killed(); + if (actual_error == ER_QUERY_INTERRUPTED || + actual_error == ER_CONNECTION_KILLED) + thd->reset_killed(); } /* Other cases: mostly we expected no error and get one. */ else if (thd->is_slave_error || thd->is_fatal_error) { - rli->report(ERROR_LEVEL, actual_error, - "Error '%s' on query. Default database: '%s'. Query: '%s'", - (actual_error ? thd->get_stmt_da()->message() : - "unexpected success or fatal error"), - print_slave_db_safe(thd->db), query_arg); + if (!is_parallel_retry_error(rgi, actual_error)) + rli->report(ERROR_LEVEL, actual_error, rgi->gtid_info(), + "Error '%s' on query. Default database: '%s'. Query: '%s'", + (actual_error ? thd->get_stmt_da()->message() : + "unexpected success or fatal error"), + print_slave_db_safe(thd->db), query_arg); thd->is_slave_error= 1; } @@ -5028,7 +5071,7 @@ int Format_description_log_event::do_apply_event(rpl_group_info *rgi) if (!is_artificial_event() && created && thd->transaction.all.ha_list) { /* This is not an error (XA is safe), just an information */ - rli->report(INFORMATION_LEVEL, 0, + rli->report(INFORMATION_LEVEL, 0, NULL, "Rolling back unfinished transaction (no COMMIT " "or ROLLBACK in relay log). A probable cause is that " "the master died while writing the transaction to " @@ -5969,7 +6012,7 @@ error: sql_errno=ER_UNKNOWN_ERROR; err=ER(sql_errno); } - rli->report(ERROR_LEVEL, sql_errno,"\ + rli->report(ERROR_LEVEL, sql_errno, rgi->gtid_info(), "\ Error '%s' running LOAD DATA INFILE on table '%s'. Default database: '%s'", err, (char*)table_name, print_slave_db_safe(remember_db)); free_root(thd->mem_root,MYF(MY_KEEP_PREALLOC)); @@ -5986,7 +6029,7 @@ Error '%s' running LOAD DATA INFILE on table '%s'. Default database: '%s'", (char*)table_name, print_slave_db_safe(remember_db)); - rli->report(ERROR_LEVEL, ER_SLAVE_FATAL_ERROR, + rli->report(ERROR_LEVEL, ER_SLAVE_FATAL_ERROR, rgi->gtid_info(), ER(ER_SLAVE_FATAL_ERROR), buf); DBUG_RETURN(1); } @@ -6476,12 +6519,10 @@ Gtid_log_event::do_apply_event(rpl_group_info *rgi) thd->variables.server_id= this->server_id; thd->variables.gtid_domain_id= this->domain_id; thd->variables.gtid_seq_no= this->seq_no; + mysql_reset_thd_for_next_command(thd); if (opt_gtid_strict_mode && opt_bin_log && opt_log_slave_updates) { - /* Need to reset prior "ok" status to give an error. */ - thd->clear_error(); - thd->get_stmt_da()->reset_diagnostics_area(); if (mysql_bin_log.check_strict_gtid_sequence(this->domain_id, this->server_id, this->seq_no)) return 1; @@ -7259,28 +7300,41 @@ int Xid_log_event::do_apply_event(rpl_group_info *rgi) bool res; int err; rpl_gtid gtid; - uint64 sub_id; + uint64 sub_id= 0; Relay_log_info const *rli= rgi->rli; /* + XID_EVENT works like a COMMIT statement. And it also updates the + mysql.gtid_slave_pos table with the GTID of the current transaction. + + Therefore, it acts much like a normal SQL statement, so we need to do + mysql_reset_thd_for_next_command() as if starting a new statement. + */ + mysql_reset_thd_for_next_command(thd); + /* Record any GTID in the same transaction, so slave state is transactionally consistent. */ - if ((sub_id= rgi->gtid_sub_id)) + if (rgi->gtid_pending) { - /* Clear the GTID from the RLI so we don't accidentally reuse it. */ - rgi->gtid_sub_id= 0; + sub_id= rgi->gtid_sub_id; + rgi->gtid_pending= false; gtid= rgi->current_gtid; err= rpl_global_gtid_slave_state.record_gtid(thd, >id, sub_id, true, false); if (err) { - rli->report(ERROR_LEVEL, ER_CANNOT_UPDATE_GTID_STATE, - "Error during XID COMMIT: failed to update GTID state in " - "%s.%s: %d: %s", - "mysql", rpl_gtid_slave_state_table_name.str, - thd->get_stmt_da()->sql_errno(), - thd->get_stmt_da()->message()); + int ec= thd->get_stmt_da()->sql_errno(); + /* + Do not report an error if this is really a kill due to a deadlock. + In this case, the transaction will be re-tried instead. + */ + if (!is_parallel_retry_error(rgi, ec)) + rli->report(ERROR_LEVEL, ER_CANNOT_UPDATE_GTID_STATE, rgi->gtid_info(), + "Error during XID COMMIT: failed to update GTID state in " + "%s.%s: %d: %s", + "mysql", rpl_gtid_slave_state_table_name.str, ec, + thd->get_stmt_da()->message()); trans_rollback(thd); thd->is_slave_error= 1; return err; @@ -8311,7 +8365,7 @@ int Create_file_log_event::do_apply_event(rpl_group_info *rgi) init_io_cache(&file, fd, IO_SIZE, WRITE_CACHE, (my_off_t)0, 0, MYF(MY_WME|MY_NABP))) { - rli->report(ERROR_LEVEL, my_errno, + rli->report(ERROR_LEVEL, my_errno, rgi->gtid_info(), "Error in Create_file event: could not open file '%s'", fname_buf); goto err; @@ -8323,7 +8377,7 @@ int Create_file_log_event::do_apply_event(rpl_group_info *rgi) if (write_base(&file)) { strmov(ext, ".info"); // to have it right in the error message - rli->report(ERROR_LEVEL, my_errno, + rli->report(ERROR_LEVEL, my_errno, rgi->gtid_info(), "Error in Create_file event: could not write to file '%s'", fname_buf); goto err; @@ -8339,14 +8393,14 @@ int Create_file_log_event::do_apply_event(rpl_group_info *rgi) O_WRONLY | O_BINARY | O_EXCL | O_NOFOLLOW, MYF(MY_WME))) < 0) { - rli->report(ERROR_LEVEL, my_errno, + rli->report(ERROR_LEVEL, my_errno, rgi->gtid_info(), "Error in Create_file event: could not open file '%s'", fname_buf); goto err; } if (mysql_file_write(fd, (uchar*) block, block_len, MYF(MY_WME+MY_NABP))) { - rli->report(ERROR_LEVEL, my_errno, + rli->report(ERROR_LEVEL, my_errno, rgi->gtid_info(), "Error in Create_file event: write to '%s' failed", fname_buf); goto err; @@ -8495,7 +8549,7 @@ int Append_block_log_event::do_apply_event(rpl_group_info *rgi) O_WRONLY | O_BINARY | O_EXCL | O_NOFOLLOW, MYF(MY_WME))) < 0) { - rli->report(ERROR_LEVEL, my_errno, + rli->report(ERROR_LEVEL, my_errno, rgi->gtid_info(), "Error in %s event: could not create file '%s'", get_type_str(), fname); goto err; @@ -8506,7 +8560,7 @@ int Append_block_log_event::do_apply_event(rpl_group_info *rgi) O_WRONLY | O_APPEND | O_BINARY | O_NOFOLLOW, MYF(MY_WME))) < 0) { - rli->report(ERROR_LEVEL, my_errno, + rli->report(ERROR_LEVEL, my_errno, rgi->gtid_info(), "Error in %s event: could not open file '%s'", get_type_str(), fname); goto err; @@ -8519,7 +8573,7 @@ int Append_block_log_event::do_apply_event(rpl_group_info *rgi) if (mysql_file_write(fd, (uchar*) block, block_len, MYF(MY_WME+MY_NABP))) { - rli->report(ERROR_LEVEL, my_errno, + rli->report(ERROR_LEVEL, my_errno, rgi->gtid_info(), "Error in %s event: write to '%s' failed", get_type_str(), fname); goto err; @@ -8736,7 +8790,7 @@ int Execute_load_log_event::do_apply_event(rpl_group_info *rgi) init_io_cache(&file, fd, IO_SIZE, READ_CACHE, (my_off_t)0, 0, MYF(MY_WME|MY_NABP))) { - rli->report(ERROR_LEVEL, my_errno, + rli->report(ERROR_LEVEL, my_errno, rgi->gtid_info(), "Error in Exec_load event: could not open file '%s'", fname); goto err; @@ -8748,7 +8802,7 @@ int Execute_load_log_event::do_apply_event(rpl_group_info *rgi) opt_slave_sql_verify_checksum)) || lev->get_type_code() != NEW_LOAD_EVENT) { - rli->report(ERROR_LEVEL, 0, "Error in Exec_load event: " + rli->report(ERROR_LEVEL, 0, rgi->gtid_info(), "Error in Exec_load event: " "file '%s' appears corrupted", fname); goto err; } @@ -8774,7 +8828,7 @@ int Execute_load_log_event::do_apply_event(rpl_group_info *rgi) char *tmp= my_strdup(rli->last_error().message, MYF(MY_WME)); if (tmp) { - rli->report(ERROR_LEVEL, rli->last_error().number, + rli->report(ERROR_LEVEL, rli->last_error().number, rgi->gtid_info(), "%s. Failed executing load from '%s'", tmp, fname); my_free(tmp); } @@ -8946,9 +9000,9 @@ void Execute_load_query_log_event::print(FILE* file, if (local_fname) { my_b_write(&cache, (uchar*) query, fn_pos_start); - my_b_write_string(&cache, " LOCAL INFILE \'"); - my_b_printf(&cache, "%s", local_fname); - my_b_write_string(&cache, "\'"); + my_b_write_string(&cache, " LOCAL INFILE "); + pretty_print_str(&cache, local_fname, strlen(local_fname)); + if (dup_handling == LOAD_DUP_REPLACE) my_b_write_string(&cache, " REPLACE"); my_b_write_string(&cache, " INTO"); @@ -9007,7 +9061,7 @@ Execute_load_query_log_event::do_apply_event(rpl_group_info *rgi) /* Replace filename and LOCAL keyword in query before executing it */ if (buf == NULL) { - rli->report(ERROR_LEVEL, ER_SLAVE_FATAL_ERROR, + rli->report(ERROR_LEVEL, ER_SLAVE_FATAL_ERROR, rgi->gtid_info(), ER(ER_SLAVE_FATAL_ERROR), "Not enough memory"); return 1; } @@ -9625,7 +9679,8 @@ int Rows_log_event::do_apply_event(rpl_group_info *rgi) (long long)wsrep_thd_trx_seqno(thd)); } #endif - if (thd->is_slave_error || thd->is_fatal_error) + if ((thd->is_slave_error || thd->is_fatal_error) && + !is_parallel_retry_error(rgi, actual_error)) { /* Error reporting borrowed from Query_log_event with many excessive @@ -9633,7 +9688,7 @@ int Rows_log_event::do_apply_event(rpl_group_info *rgi) We should not honour --slave-skip-errors at this point as we are having severe errors which should not be skiped. */ - rli->report(ERROR_LEVEL, actual_error, + rli->report(ERROR_LEVEL, actual_error, rgi->gtid_info(), "Error executing row event: '%s'", (actual_error ? thd->get_stmt_da()->message() : "unexpected success or fatal error")); @@ -9674,8 +9729,7 @@ int Rows_log_event::do_apply_event(rpl_group_info *rgi) { DBUG_ASSERT(ptr->m_tabledef_valid); TABLE *conv_table; - if (!ptr->m_tabledef.compatible_with(thd, const_cast<Relay_log_info*>(rli), - ptr->table, &conv_table)) + if (!ptr->m_tabledef.compatible_with(thd, rgi, ptr->table, &conv_table)) { DBUG_PRINT("debug", ("Table: %s.%s is not compatible with master", ptr->table->s->db.str, @@ -9831,7 +9885,7 @@ int Rows_log_event::do_apply_event(rpl_group_info *rgi) if (idempotent_error || ignored_error) { if (global_system_variables.log_warnings) - slave_rows_error_report(WARNING_LEVEL, error, rli, thd, table, + slave_rows_error_report(WARNING_LEVEL, error, rgi, thd, table, get_type_str(), RPL_LOG_NAME, (ulong) log_pos); clear_all_errors(thd, const_cast<Relay_log_info*>(rli)); @@ -9887,7 +9941,7 @@ int Rows_log_event::do_apply_event(rpl_group_info *rgi) { if (global_system_variables.log_warnings) - slave_rows_error_report(WARNING_LEVEL, error, rli, thd, table, + slave_rows_error_report(WARNING_LEVEL, error, rgi, thd, table, get_type_str(), RPL_LOG_NAME, (ulong) log_pos); clear_all_errors(thd, const_cast<Relay_log_info*>(rli)); @@ -9898,7 +9952,7 @@ int Rows_log_event::do_apply_event(rpl_group_info *rgi) if (error) { - slave_rows_error_report(ERROR_LEVEL, error, rli, thd, table, + slave_rows_error_report(ERROR_LEVEL, error, rgi, thd, table, get_type_str(), RPL_LOG_NAME, (ulong) log_pos); /* @@ -9920,7 +9974,7 @@ int Rows_log_event::do_apply_event(rpl_group_info *rgi) if (get_flags(STMT_END_F) && (error= rows_event_stmt_cleanup(rgi, thd))) slave_rows_error_report(ERROR_LEVEL, thd->is_error() ? 0 : error, - rli, thd, table, + rgi, thd, table, get_type_str(), RPL_LOG_NAME, (ulong) log_pos); DBUG_RETURN(error); @@ -10899,7 +10953,7 @@ int Table_map_log_event::do_apply_event(rpl_group_info *rgi) table_list->table_id); if (thd->slave_thread) - rli->report(ERROR_LEVEL, ER_SLAVE_FATAL_ERROR, + rli->report(ERROR_LEVEL, ER_SLAVE_FATAL_ERROR, rgi->gtid_info(), ER(ER_SLAVE_FATAL_ERROR), buf); else /* @@ -12407,6 +12461,8 @@ Incident_log_event::Incident_log_event(const char *buf, uint event_len, DBUG_PRINT("info",("event_len: %u; common_header_len: %d; post_header_len: %d", event_len, common_header_len, post_header_len)); + m_message.str= NULL; + m_message.length= 0; int incident_number= uint2korr(buf + common_header_len); if (incident_number >= INCIDENT_COUNT || incident_number <= INCIDENT_NONE) @@ -12423,7 +12479,13 @@ Incident_log_event::Incident_log_event(const char *buf, uint event_len, uint8 len= 0; // Assignment to keep compiler happy const char *str= NULL; // Assignment to keep compiler happy read_str(&ptr, str_end, &str, &len); - m_message.str= const_cast<char*>(str); + if (!(m_message.str= (char*) my_malloc(len+1, MYF(MY_WME)))) + { + /* Mark this event invalid */ + m_incident= INCIDENT_NONE; + DBUG_VOID_RETURN; + } + strmake(m_message.str, str, len); m_message.length= len; DBUG_PRINT("info", ("m_incident: %d", m_incident)); DBUG_VOID_RETURN; @@ -12432,6 +12494,8 @@ Incident_log_event::Incident_log_event(const char *buf, uint event_len, Incident_log_event::~Incident_log_event() { + if (m_message.str) + my_free(m_message.str); } @@ -12527,7 +12591,14 @@ Incident_log_event::do_apply_event(rpl_group_info *rgi) { Relay_log_info const *rli= rgi->rli; DBUG_ENTER("Incident_log_event::do_apply_event"); - rli->report(ERROR_LEVEL, ER_SLAVE_INCIDENT, + + if (ignored_error_code(ER_SLAVE_INCIDENT)) + { + DBUG_PRINT("info", ("Ignoring Incident")); + DBUG_RETURN(0); + } + + rli->report(ERROR_LEVEL, ER_SLAVE_INCIDENT, NULL, ER(ER_SLAVE_INCIDENT), description(), m_message.length > 0 ? m_message.str : "<none>"); diff --git a/sql/log_event.h b/sql/log_event.h index 212215d97b6..16329ab925b 100644 --- a/sql/log_event.h +++ b/sql/log_event.h @@ -4690,7 +4690,16 @@ public: { DBUG_ENTER("Incident_log_event::Incident_log_event"); DBUG_PRINT("enter", ("m_incident: %d", m_incident)); - m_message= msg; + m_message.str= NULL; + m_message.length= 0; + if (!(m_message.str= (char*) my_malloc(msg.length+1, MYF(MY_WME)))) + { + /* Mark this event invalid */ + m_incident= INCIDENT_NONE; + DBUG_VOID_RETURN; + } + strmake(m_message.str, msg.str, msg.length); + m_message.length= msg.length; set_direct_logging(); /* Replicate the incident irregardless of @@skip_replication. */ flags&= ~LOG_EVENT_SKIP_REPLICATION_F; diff --git a/sql/log_event_old.cc b/sql/log_event_old.cc index eaa882518f5..88617e2263f 100644 --- a/sql/log_event_old.cc +++ b/sql/log_event_old.cc @@ -108,7 +108,7 @@ Old_rows_log_event::do_apply_event(Old_rows_log_event *ev, rpl_group_info *rgi) Error reporting borrowed from Query_log_event with many excessive simplifications (we don't honour --slave-skip-errors) */ - rli->report(ERROR_LEVEL, actual_error, + rli->report(ERROR_LEVEL, actual_error, NULL, "Error '%s' on opening tables", (actual_error ? ev_thd->get_stmt_da()->message() : "unexpected success or fatal error")); @@ -133,8 +133,7 @@ Old_rows_log_event::do_apply_event(Old_rows_log_event *ev, rpl_group_info *rgi) { DBUG_ASSERT(ptr->m_tabledef_valid); TABLE *conv_table; - if (!ptr->m_tabledef.compatible_with(thd, const_cast<Relay_log_info*>(rli), - ptr->table, &conv_table)) + if (!ptr->m_tabledef.compatible_with(thd, rgi, ptr->table, &conv_table)) { ev_thd->is_slave_error= 1; rgi->slave_close_thread_tables(ev_thd); @@ -234,7 +233,7 @@ Old_rows_log_event::do_apply_event(Old_rows_log_event *ev, rpl_group_info *rgi) break; default: - rli->report(ERROR_LEVEL, ev_thd->get_stmt_da()->sql_errno(), + rli->report(ERROR_LEVEL, ev_thd->get_stmt_da()->sql_errno(), NULL, "Error in %s event: row application failed. %s", ev->get_type_str(), ev_thd->is_error() ? ev_thd->get_stmt_da()->message() : ""); @@ -251,7 +250,7 @@ Old_rows_log_event::do_apply_event(Old_rows_log_event *ev, rpl_group_info *rgi) if (error) { /* error has occured during the transaction */ - rli->report(ERROR_LEVEL, ev_thd->get_stmt_da()->sql_errno(), + rli->report(ERROR_LEVEL, ev_thd->get_stmt_da()->sql_errno(), NULL, "Error in %s event: error during transaction execution " "on table %s.%s. %s", ev->get_type_str(), table->s->db.str, @@ -1402,7 +1401,7 @@ int Old_rows_log_event::do_apply_event(rpl_group_info *rgi) simplifications (we don't honour --slave-skip-errors) */ uint actual_error= thd->net.last_errno; - rli->report(ERROR_LEVEL, actual_error, + rli->report(ERROR_LEVEL, actual_error, NULL, "Error '%s' in %s event: when locking tables", (actual_error ? thd->net.last_error : "unexpected success or fatal error"), @@ -1411,7 +1410,7 @@ int Old_rows_log_event::do_apply_event(rpl_group_info *rgi) } else { - rli->report(ERROR_LEVEL, error, + rli->report(ERROR_LEVEL, error, NULL, "Error in %s event: when locking tables", get_type_str()); } @@ -1433,8 +1432,7 @@ int Old_rows_log_event::do_apply_event(rpl_group_info *rgi) ptr= static_cast<RPL_TABLE_LIST*>(ptr->next_global), i++) { TABLE *conv_table; - if (ptr->m_tabledef.compatible_with(thd, const_cast<Relay_log_info*>(rli), - ptr->table, &conv_table)) + if (ptr->m_tabledef.compatible_with(thd, rgi, ptr->table, &conv_table)) { thd->is_slave_error= 1; rgi->slave_close_thread_tables(thd); @@ -1558,7 +1556,7 @@ int Old_rows_log_event::do_apply_event(rpl_group_info *rgi) break; default: - rli->report(ERROR_LEVEL, thd->net.last_errno, + rli->report(ERROR_LEVEL, thd->net.last_errno, NULL, "Error in %s event: row application failed. %s", get_type_str(), thd->net.last_error ? thd->net.last_error : ""); @@ -1596,7 +1594,7 @@ int Old_rows_log_event::do_apply_event(rpl_group_info *rgi) if (error) { /* error has occured during the transaction */ - rli->report(ERROR_LEVEL, thd->net.last_errno, + rli->report(ERROR_LEVEL, thd->net.last_errno, NULL, "Error in %s event: error during transaction execution " "on table %s.%s. %s", get_type_str(), table->s->db.str, @@ -1679,7 +1677,7 @@ int Old_rows_log_event::do_apply_event(rpl_group_info *rgi) */ DBUG_ASSERT(! thd->transaction_rollback_request); if ((error= (binlog_error ? trans_rollback_stmt(thd) : trans_commit_stmt(thd)))) - rli->report(ERROR_LEVEL, error, + rli->report(ERROR_LEVEL, error, NULL, "Error in %s event: commit of row events failed, " "table `%s`.`%s`", get_type_str(), m_table->s->db.str, diff --git a/sql/log_slow.h b/sql/log_slow.h index e8faf79a047..2ae07da97c3 100644 --- a/sql/log_slow.h +++ b/sql/log_slow.h @@ -31,6 +31,8 @@ #define QPLAN_QC_NO 1 << 6 #define QPLAN_TMP_DISK 1 << 7 #define QPLAN_TMP_TABLE 1 << 8 +#define QPLAN_FILESORT_PRIORITY_QUEUE 1 << 9 + /* ... */ #define QPLAN_MAX ((ulong) 1) << 31 /* reserved as placeholder */ diff --git a/sql/mf_iocache.cc b/sql/mf_iocache.cc index d8848c1ee35..3ed9261f630 100644 --- a/sql/mf_iocache.cc +++ b/sql/mf_iocache.cc @@ -57,7 +57,7 @@ int _my_b_net_read(register IO_CACHE *info, uchar *Buffer, if (!info->end_of_file) DBUG_RETURN(1); /* because my_b_get (no _) takes 1 byte at a time */ - read_length=my_net_read(net); + read_length= my_net_read_packet(net, 0); if (read_length == packet_error) { info->error= -1; diff --git a/sql/mysqld.cc b/sql/mysqld.cc index 94503d507fe..fa4f92b26dd 100644 --- a/sql/mysqld.cc +++ b/sql/mysqld.cc @@ -375,6 +375,7 @@ static bool binlog_format_used= false; LEX_STRING opt_init_connect, opt_init_slave; mysql_cond_t COND_thread_cache; static mysql_cond_t COND_flush_thread_cache; +mysql_cond_t COND_slave_init; static DYNAMIC_ARRAY all_options; /* Global variables */ @@ -521,6 +522,7 @@ ulong binlog_stmt_cache_use= 0, binlog_stmt_cache_disk_use= 0; ulong max_connections, max_connect_errors; ulong extra_max_connections; ulong slave_retried_transactions; +ulong feature_files_opened_with_delayed_keys; ulonglong denied_connections; my_decimal decimal_zero; @@ -706,12 +708,12 @@ pthread_key(MEM_ROOT**,THR_MALLOC); pthread_key(THD*, THR_THD); mysql_mutex_t LOCK_thread_count, LOCK_thread_cache; mysql_mutex_t - LOCK_status, LOCK_error_log, LOCK_short_uuid_generator, + LOCK_status, LOCK_show_status, LOCK_error_log, LOCK_short_uuid_generator, LOCK_delayed_insert, LOCK_delayed_status, LOCK_delayed_create, LOCK_crypt, LOCK_global_system_variables, LOCK_user_conn, LOCK_slave_list, LOCK_active_mi, - LOCK_connection_count, LOCK_error_messages; + LOCK_connection_count, LOCK_error_messages, LOCK_slave_init; mysql_mutex_t LOCK_stats, LOCK_global_user_client_stats, LOCK_global_table_stats, LOCK_global_index_stats; @@ -863,7 +865,8 @@ PSI_mutex_key key_BINLOG_LOCK_index, key_BINLOG_LOCK_xid_list, key_LOCK_gdl, key_LOCK_global_system_variables, key_LOCK_manager, key_LOCK_prepared_stmt_count, - key_LOCK_rpl_status, key_LOCK_server_started, key_LOCK_status, + key_LOCK_rpl_status, key_LOCK_server_started, + key_LOCK_status, key_LOCK_show_status, key_LOCK_system_variables_hash, key_LOCK_thd_data, key_LOCK_user_conn, key_LOCK_uuid_short_generator, key_LOG_LOCK_log, key_master_info_data_lock, key_master_info_run_lock, @@ -885,7 +888,8 @@ PSI_mutex_key key_LOCK_stats, key_LOCK_wakeup_ready, key_LOCK_wait_commit; PSI_mutex_key key_LOCK_gtid_waiting; -PSI_mutex_key key_LOCK_prepare_ordered, key_LOCK_commit_ordered; +PSI_mutex_key key_LOCK_prepare_ordered, key_LOCK_commit_ordered, + key_LOCK_slave_init; PSI_mutex_key key_TABLE_SHARE_LOCK_share; static PSI_mutex_info all_server_mutexes[]= @@ -922,6 +926,7 @@ static PSI_mutex_info all_server_mutexes[]= { &key_LOCK_rpl_status, "LOCK_rpl_status", PSI_FLAG_GLOBAL}, { &key_LOCK_server_started, "LOCK_server_started", PSI_FLAG_GLOBAL}, { &key_LOCK_status, "LOCK_status", PSI_FLAG_GLOBAL}, + { &key_LOCK_show_status, "LOCK_show_status", PSI_FLAG_GLOBAL}, { &key_LOCK_system_variables_hash, "LOCK_system_variables_hash", PSI_FLAG_GLOBAL}, { &key_LOCK_stats, "LOCK_stats", PSI_FLAG_GLOBAL}, { &key_LOCK_global_user_client_stats, "LOCK_global_user_client_stats", PSI_FLAG_GLOBAL}, @@ -948,6 +953,7 @@ static PSI_mutex_info all_server_mutexes[]= { &key_LOCK_error_messages, "LOCK_error_messages", PSI_FLAG_GLOBAL}, { &key_LOCK_prepare_ordered, "LOCK_prepare_ordered", PSI_FLAG_GLOBAL}, { &key_LOCK_commit_ordered, "LOCK_commit_ordered", PSI_FLAG_GLOBAL}, + { &key_LOCK_slave_init, "LOCK_slave_init", PSI_FLAG_GLOBAL}, { &key_LOG_INFO_lock, "LOG_INFO::lock", 0}, { &key_LOCK_thread_count, "LOCK_thread_count", PSI_FLAG_GLOBAL}, { &key_LOCK_thread_cache, "LOCK_thread_cache", PSI_FLAG_GLOBAL}, @@ -1002,7 +1008,7 @@ PSI_cond_key key_TC_LOG_MMAP_COND_queue_busy; PSI_cond_key key_COND_rpl_thread_queue, key_COND_rpl_thread, key_COND_rpl_thread_pool, key_COND_parallel_entry, key_COND_group_commit_orderer, - key_COND_prepare_ordered; + key_COND_prepare_ordered, key_COND_slave_init; PSI_cond_key key_COND_wait_gtid, key_COND_gtid_ignore_duplicates; static PSI_cond_info all_server_conds[]= @@ -1051,6 +1057,7 @@ static PSI_cond_info all_server_conds[]= { &key_COND_parallel_entry, "COND_parallel_entry", 0}, { &key_COND_group_commit_orderer, "COND_group_commit_orderer", 0}, { &key_COND_prepare_ordered, "COND_prepare_ordered", 0}, + { &key_COND_slave_init, "COND_slave_init", 0}, { &key_COND_wait_gtid, "COND_wait_gtid", 0}, { &key_COND_gtid_ignore_duplicates, "COND_gtid_ignore_duplicates", 0} }; @@ -1116,65 +1123,60 @@ void net_before_header_psi(struct st_net *net, void *user_data, size_t /* unused thd= static_cast<THD*> (user_data); DBUG_ASSERT(thd != NULL); - if (thd->m_server_idle) - { - /* - The server is IDLE, waiting for the next command. - Technically, it is a wait on a socket, which may take a long time, - because the call is blocking. - Disable the socket instrumentation, to avoid recording a SOCKET event. - Instead, start explicitly an IDLE event. - */ - MYSQL_SOCKET_SET_STATE(net->vio->mysql_socket, PSI_SOCKET_STATE_IDLE); - MYSQL_START_IDLE_WAIT(thd->m_idle_psi, &thd->m_idle_state); - } + /* + We only come where when the server is IDLE, waiting for the next command. + Technically, it is a wait on a socket, which may take a long time, + because the call is blocking. + Disable the socket instrumentation, to avoid recording a SOCKET event. + Instead, start explicitly an IDLE event. + */ + MYSQL_SOCKET_SET_STATE(net->vio->mysql_socket, PSI_SOCKET_STATE_IDLE); + MYSQL_START_IDLE_WAIT(thd->m_idle_psi, &thd->m_idle_state); } -void net_after_header_psi(struct st_net *net, void *user_data, size_t /* unused: count */, my_bool rc) +void net_after_header_psi(struct st_net *net, void *user_data, + size_t /* unused: count */, my_bool rc) { THD *thd; thd= static_cast<THD*> (user_data); DBUG_ASSERT(thd != NULL); - if (thd->m_server_idle) - { - /* - The server just got data for a network packet header, - from the network layer. - The IDLE event is now complete, since we now have a message to process. - We need to: - - start a new STATEMENT event - - start a new STAGE event, within this statement, - - start recording SOCKET WAITS events, within this stage. - The proper order is critical to get events numbered correctly, - and nested in the proper parent. - */ - MYSQL_END_IDLE_WAIT(thd->m_idle_psi); - - if (! rc) - { - thd->m_statement_psi= MYSQL_START_STATEMENT(&thd->m_statement_state, - stmt_info_new_packet.m_key, - thd->db, thd->db_length, - thd->charset()); + /* + The server just got data for a network packet header, + from the network layer. + The IDLE event is now complete, since we now have a message to process. + We need to: + - start a new STATEMENT event + - start a new STAGE event, within this statement, + - start recording SOCKET WAITS events, within this stage. + The proper order is critical to get events numbered correctly, + and nested in the proper parent. + */ + MYSQL_END_IDLE_WAIT(thd->m_idle_psi); - THD_STAGE_INFO(thd, stage_init); - } + if (! rc) + { + thd->m_statement_psi= MYSQL_START_STATEMENT(&thd->m_statement_state, + stmt_info_new_packet.m_key, + thd->db, thd->db_length, + thd->charset()); - /* - TODO: consider recording a SOCKET event for the bytes just read, - by also passing count here. - */ - MYSQL_SOCKET_SET_STATE(net->vio->mysql_socket, PSI_SOCKET_STATE_ACTIVE); + THD_STAGE_INFO(thd, stage_init); } + + /* + TODO: consider recording a SOCKET event for the bytes just read, + by also passing count here. + */ + MYSQL_SOCKET_SET_STATE(net->vio->mysql_socket, PSI_SOCKET_STATE_ACTIVE); } + void init_net_server_extension(THD *thd) { /* Start with a clean state for connection events. */ thd->m_idle_psi= NULL; thd->m_statement_psi= NULL; - thd->m_server_idle= false; /* Hook up the NET_SERVER callback in the net layer. */ thd->m_net_server_extension.m_user_data= thd; thd->m_net_server_extension.m_before_header= net_before_header_psi; @@ -2201,6 +2203,7 @@ static void clean_up_mutexes() mysql_mutex_destroy(&LOCK_thread_count); mysql_mutex_destroy(&LOCK_thread_cache); mysql_mutex_destroy(&LOCK_status); + mysql_mutex_destroy(&LOCK_show_status); mysql_mutex_destroy(&LOCK_delayed_insert); mysql_mutex_destroy(&LOCK_delayed_status); mysql_mutex_destroy(&LOCK_delayed_create); @@ -2238,6 +2241,8 @@ static void clean_up_mutexes() mysql_mutex_destroy(&LOCK_prepare_ordered); mysql_cond_destroy(&COND_prepare_ordered); mysql_mutex_destroy(&LOCK_commit_ordered); + mysql_mutex_destroy(&LOCK_slave_init); + mysql_cond_destroy(&COND_slave_init); DBUG_VOID_RETURN; } @@ -2440,6 +2445,7 @@ static MYSQL_SOCKET activate_tcp_port(uint port) int error; int arg; char port_buf[NI_MAXSERV]; + const char *real_bind_addr_str; MYSQL_SOCKET ip_sock= MYSQL_INVALID_SOCKET; DBUG_ENTER("activate_tcp_port"); DBUG_PRINT("general",("IP Socket is %d",port)); @@ -2448,16 +2454,36 @@ static MYSQL_SOCKET activate_tcp_port(uint port) hints.ai_flags= AI_PASSIVE; hints.ai_socktype= SOCK_STREAM; hints.ai_family= AF_UNSPEC; + + if (my_bind_addr_str && strcmp(my_bind_addr_str, "*") == 0) + real_bind_addr_str= NULL; // windows doesn't seem to support * here + else + real_bind_addr_str= my_bind_addr_str; my_snprintf(port_buf, NI_MAXSERV, "%d", port); - error= getaddrinfo(my_bind_addr_str, port_buf, &hints, &ai); + error= getaddrinfo(real_bind_addr_str, port_buf, &hints, &ai); if (error != 0) { DBUG_PRINT("error",("Got error: %d from getaddrinfo()", error)); - sql_perror(ER_DEFAULT(ER_IPSOCK_ERROR)); /* purecov: tested */ + + sql_print_error("%s: %s", ER_DEFAULT(ER_IPSOCK_ERROR), gai_strerror(error)); unireg_abort(1); /* purecov: tested */ } + /* + special case: for wildcard addresses prefer ipv6 over ipv4, + because we later switch off IPV6_V6ONLY, so ipv6 wildcard + addresses will work for ipv4 too + */ + if (!real_bind_addr_str && ai->ai_family == AF_INET && ai->ai_next + && ai->ai_next->ai_family == AF_INET6) + { + a= ai; + ai= ai->ai_next; + a->ai_next= ai->ai_next; + ai->ai_next= a; + } + for (a= ai; a != NULL; a= a->ai_next) { ip_sock= mysql_socket_socket(key_socket_tcpip, a->ai_family, @@ -4463,6 +4489,7 @@ static int init_thread_environment() mysql_mutex_init(key_LOCK_thread_count, &LOCK_thread_count, MY_MUTEX_INIT_FAST); mysql_mutex_init(key_LOCK_thread_cache, &LOCK_thread_cache, MY_MUTEX_INIT_FAST); mysql_mutex_init(key_LOCK_status, &LOCK_status, MY_MUTEX_INIT_FAST); + mysql_mutex_init(key_LOCK_show_status, &LOCK_show_status, MY_MUTEX_INIT_SLOW); mysql_mutex_init(key_LOCK_delayed_insert, &LOCK_delayed_insert, MY_MUTEX_INIT_FAST); mysql_mutex_init(key_LOCK_delayed_status, @@ -4497,6 +4524,9 @@ static int init_thread_environment() mysql_cond_init(key_COND_prepare_ordered, &COND_prepare_ordered, NULL); mysql_mutex_init(key_LOCK_commit_ordered, &LOCK_commit_ordered, MY_MUTEX_INIT_SLOW); + mysql_mutex_init(key_LOCK_slave_init, &LOCK_slave_init, + MY_MUTEX_INIT_SLOW); + mysql_cond_init(key_COND_slave_init, &COND_slave_init, NULL); #ifdef HAVE_OPENSSL mysql_mutex_init(key_LOCK_des_key_file, @@ -4985,6 +5015,8 @@ a file name for --log-bin-index option", opt_binlog_index_name); if (ha_init_errors()) DBUG_RETURN(1); + tc_log= 0; // ha_initialize_handlerton() needs that + if (plugin_init(&remaining_argc, remaining_argv, (opt_noacl ? PLUGIN_INIT_SKIP_PLUGIN_TABLE : 0) | (opt_abort ? PLUGIN_INIT_SKIP_INITIALIZATION : 0))) @@ -5111,12 +5143,6 @@ a file name for --log-bin-index option", opt_binlog_index_name); tc_log= get_tc_log_implementation(); - WSREP_DEBUG("Initial TC log open: %s", - (tc_log == &mysql_bin_log) ? "binlog" : - (tc_log == &tc_log_mmap) ? "mmap" : - (tc_log == &tc_log_dummy) ? "dummy" : "unknown" - ); - if (tc_log->open(opt_bin_log ? opt_bin_logname : opt_tc_log_file)) { sql_print_error("Can't init tc log"); @@ -6452,7 +6478,8 @@ void handle_connections_sockets() (void) mysql_socket_close(new_sock); /* The connection was refused by TCP wrappers. - There are no details (by client IP) available to update the host_cache. + There are no details (by client IP) available to update the + host_cache. */ statistic_increment(connection_errors_tcpwrap, &LOCK_status); continue; @@ -6961,7 +6988,7 @@ struct my_option my_long_options[]= {"autocommit", 0, "Set default value for autocommit (0 or 1)", &opt_autocommit, &opt_autocommit, 0, GET_BOOL, OPT_ARG, 1, 0, 0, 0, 0, NULL}, - {"bind-address", OPT_BIND_ADDRESS, "IP address to bind to.", + {"bind-address", 0, "IP address to bind to.", &my_bind_addr_str, &my_bind_addr_str, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"binlog-do-db", OPT_BINLOG_DO_DB, @@ -7457,7 +7484,6 @@ static int show_slave_running(THD *thd, SHOW_VAR *var, char *buff) var->type= SHOW_MY_BOOL; var->value= buff; - mysql_mutex_unlock(&LOCK_status); mysql_mutex_lock(&LOCK_active_mi); if (master_info_index) { @@ -7469,7 +7495,6 @@ static int show_slave_running(THD *thd, SHOW_VAR *var, char *buff) mi->rli.slave_running); } mysql_mutex_unlock(&LOCK_active_mi); - mysql_mutex_lock(&LOCK_status); if (mi) *((my_bool *)buff)= tmp; else @@ -7486,7 +7511,6 @@ static int show_slave_received_heartbeats(THD *thd, SHOW_VAR *var, char *buff) var->type= SHOW_LONGLONG; var->value= buff; - mysql_mutex_unlock(&LOCK_status); mysql_mutex_lock(&LOCK_active_mi); if (master_info_index) { @@ -7497,7 +7521,6 @@ static int show_slave_received_heartbeats(THD *thd, SHOW_VAR *var, char *buff) tmp= mi->received_heartbeats; } mysql_mutex_unlock(&LOCK_active_mi); - mysql_mutex_lock(&LOCK_status); if (mi) *((longlong *)buff)= tmp; else @@ -7514,7 +7537,6 @@ static int show_heartbeat_period(THD *thd, SHOW_VAR *var, char *buff) var->type= SHOW_CHAR; var->value= buff; - mysql_mutex_unlock(&LOCK_status); mysql_mutex_lock(&LOCK_active_mi); if (master_info_index) { @@ -7525,7 +7547,6 @@ static int show_heartbeat_period(THD *thd, SHOW_VAR *var, char *buff) tmp= mi->heartbeat_period; } mysql_mutex_unlock(&LOCK_active_mi); - mysql_mutex_lock(&LOCK_status); if (mi) sprintf(buff, "%.3f", tmp); else @@ -8058,6 +8079,7 @@ SHOW_VAR status_vars[]= { {"Empty_queries", (char*) offsetof(STATUS_VAR, empty_queries), SHOW_LONG_STATUS}, {"Executed_events", (char*) &executed_events, SHOW_LONG_NOFLUSH }, {"Executed_triggers", (char*) offsetof(STATUS_VAR, executed_triggers), SHOW_LONG_STATUS}, + {"Feature_delay_key_write", (char*) &feature_files_opened_with_delayed_keys, SHOW_LONG }, {"Feature_dynamic_columns", (char*) offsetof(STATUS_VAR, feature_dynamic_columns), SHOW_LONG_STATUS}, {"Feature_fulltext", (char*) offsetof(STATUS_VAR, feature_fulltext), SHOW_LONG_STATUS}, {"Feature_gis", (char*) offsetof(STATUS_VAR, feature_gis), SHOW_LONG_STATUS}, @@ -8141,6 +8163,7 @@ SHOW_VAR status_vars[]= { {"Slow_launch_threads", (char*) &slow_launch_threads, SHOW_LONG}, {"Slow_queries", (char*) offsetof(STATUS_VAR, long_query_count), SHOW_LONG_STATUS}, {"Sort_merge_passes", (char*) offsetof(STATUS_VAR, filesort_merge_passes_), SHOW_LONG_STATUS}, + {"Sort_priority_queue_sorts",(char*) offsetof(STATUS_VAR, filesort_pq_sorts_), SHOW_LONG_STATUS}, {"Sort_range", (char*) offsetof(STATUS_VAR, filesort_range_count_), SHOW_LONG_STATUS}, {"Sort_rows", (char*) offsetof(STATUS_VAR, filesort_rows_), SHOW_LONG_STATUS}, {"Sort_scan", (char*) offsetof(STATUS_VAR, filesort_scan_count_), SHOW_LONG_STATUS}, @@ -8949,6 +8972,7 @@ mysql_getopt_value(const char *name, uint length, case OPT_KEY_CACHE_DIVISION_LIMIT: case OPT_KEY_CACHE_AGE_THRESHOLD: case OPT_KEY_CACHE_PARTITIONS: + case OPT_KEY_CACHE_CHANGED_BLOCKS_HASH_SIZE: { KEY_CACHE *key_cache; if (!(key_cache= get_or_create_key_cache(name, length))) @@ -8968,6 +8992,8 @@ mysql_getopt_value(const char *name, uint length, return &key_cache->param_age_threshold; case OPT_KEY_CACHE_PARTITIONS: return (uchar**) &key_cache->param_partitions; + case OPT_KEY_CACHE_CHANGED_BLOCKS_HASH_SIZE: + return (uchar**) &key_cache->changed_blocks_hash_size; } } case OPT_REPLICATE_DO_DB: diff --git a/sql/mysqld.h b/sql/mysqld.h index d68831a0d4f..37ef449874c 100644 --- a/sql/mysqld.h +++ b/sql/mysqld.h @@ -254,7 +254,8 @@ extern PSI_mutex_key key_BINLOG_LOCK_index, key_BINLOG_LOCK_xid_list, key_LOCK_gdl, key_LOCK_global_system_variables, key_LOCK_logger, key_LOCK_manager, key_LOCK_prepared_stmt_count, - key_LOCK_rpl_status, key_LOCK_server_started, key_LOCK_status, + key_LOCK_rpl_status, key_LOCK_server_started, + key_LOCK_status, key_LOCK_show_status, key_LOCK_thd_data, key_LOCK_user_conn, key_LOG_LOCK_log, key_master_info_data_lock, key_master_info_run_lock, @@ -513,12 +514,13 @@ extern MYSQL_PLUGIN_IMPORT key_map key_map_full; /* Should be threaded Server mutex locks and condition variables. */ extern mysql_mutex_t - LOCK_item_func_sleep, LOCK_status, + LOCK_item_func_sleep, LOCK_status, LOCK_show_status, LOCK_error_log, LOCK_delayed_insert, LOCK_short_uuid_generator, LOCK_delayed_status, LOCK_delayed_create, LOCK_crypt, LOCK_timezone, LOCK_slave_list, LOCK_active_mi, LOCK_manager, LOCK_global_system_variables, LOCK_user_conn, - LOCK_prepared_stmt_count, LOCK_error_messages, LOCK_connection_count; + LOCK_prepared_stmt_count, LOCK_error_messages, LOCK_connection_count, + LOCK_slave_init; extern MYSQL_PLUGIN_IMPORT mysql_mutex_t LOCK_thread_count; #ifdef HAVE_OPENSSL extern mysql_mutex_t LOCK_des_key_file; @@ -529,6 +531,7 @@ extern mysql_rwlock_t LOCK_grant, LOCK_sys_init_connect, LOCK_sys_init_slave; extern mysql_rwlock_t LOCK_system_variables_hash; extern mysql_cond_t COND_thread_count; extern mysql_cond_t COND_manager; +extern mysql_cond_t COND_slave_init; extern int32 thread_running; extern int32 thread_count; extern my_atomic_rwlock_t thread_running_lock, thread_count_lock; @@ -548,7 +551,6 @@ extern MYSQL_PLUGIN_IMPORT pthread_key(THD*, THR_THD); enum options_mysqld { OPT_to_set_the_start_number=256, - OPT_BIND_ADDRESS, OPT_BINLOG_DO_DB, OPT_BINLOG_FORMAT, OPT_BINLOG_IGNORE_DB, @@ -556,7 +558,6 @@ enum options_mysqld OPT_BOOTSTRAP, OPT_CONSOLE, OPT_DEBUG_SYNC_TIMEOUT, - OPT_DELAY_KEY_WRITE_ALL, OPT_DEPRECATED_OPTION, OPT_IGNORE_DB_DIRECTORY, OPT_ISAM_LOG, @@ -565,6 +566,7 @@ enum options_mysqld OPT_KEY_CACHE_BLOCK_SIZE, OPT_KEY_CACHE_DIVISION_LIMIT, OPT_KEY_CACHE_PARTITIONS, + OPT_KEY_CACHE_CHANGED_BLOCKS_HASH_SIZE, OPT_LOG_BASENAME, OPT_LOG_ERROR, OPT_LOWER_CASE_TABLE_NAMES, @@ -572,7 +574,6 @@ enum options_mysqld OPT_PLUGIN_LOAD, OPT_PLUGIN_LOAD_ADD, OPT_PFS_INSTRUMENT, - OPT_POOL_OF_THREADS, OPT_REPLICATE_DO_DB, OPT_REPLICATE_DO_TABLE, OPT_REPLICATE_IGNORE_DB, @@ -583,10 +584,7 @@ enum options_mysqld OPT_SAFE, OPT_SERVER_ID, OPT_SKIP_HOST_CACHE, - OPT_SKIP_LOCK, OPT_SKIP_RESOLVE, - OPT_SKIP_STACK_TRACE, - OPT_SKIP_SYMLINKS, OPT_SSL_CA, OPT_SSL_CAPATH, OPT_SSL_CERT, @@ -594,7 +592,6 @@ enum options_mysqld OPT_SSL_CRL, OPT_SSL_CRLPATH, OPT_SSL_KEY, - OPT_UPDATE_LOG, OPT_WANT_CORE, OPT_MYSQL_COMPATIBILITY, OPT_MYSQL_TO_BE_IMPLEMENTED, diff --git a/sql/net_serv.cc b/sql/net_serv.cc index 546542fa207..eb34fcc2d77 100644 --- a/sql/net_serv.cc +++ b/sql/net_serv.cc @@ -824,7 +824,8 @@ static my_bool my_net_skip_rest(NET *net, uint32 remain, thr_alarm_t *alarmed, */ static ulong -my_real_read(NET *net, size_t *complen) +my_real_read(NET *net, size_t *complen, + my_bool header __attribute__((unused))) { uchar *pos; size_t length; @@ -839,14 +840,16 @@ my_real_read(NET *net, size_t *complen) NET_HEADER_SIZE); #ifdef MYSQL_SERVER size_t count= remain; - struct st_net_server *server_extension; - server_extension= static_cast<st_net_server*> (net->extension); - if (server_extension != NULL) + struct st_net_server *server_extension= 0; + + if (header) { - void *user_data= server_extension->m_user_data; - DBUG_ASSERT(server_extension->m_before_header != NULL); - DBUG_ASSERT(server_extension->m_after_header != NULL); - server_extension->m_before_header(net, user_data, count); + server_extension= static_cast<st_net_server*> (net->extension); + if (server_extension != NULL) + { + void *user_data= server_extension->m_user_data; + server_extension->m_before_header(net, user_data, count); + } } #endif @@ -1042,6 +1045,16 @@ end: } +/* Old interface. See my_net_read_packet() for function description */ + +#undef my_net_read + +ulong my_net_read(NET *net) +{ + return my_net_read_packet(net, 0); +} + + /** Read a packet from the client/server and return it without the internal package header. @@ -1053,13 +1066,17 @@ end: If the packet was compressed, its uncompressed and the length of the uncompressed packet is returned. + read_from_server is set when the server is reading a new command + from the client. + @return The function returns the length of the found packet or packet_error. net->read_pos points to the read data. */ + ulong -my_net_read(NET *net) +my_net_read_packet(NET *net, my_bool read_from_server) { size_t len, complen; @@ -1069,7 +1086,7 @@ my_net_read(NET *net) if (!net->compress) { #endif - len = my_real_read(net,&complen); + len = my_real_read(net,&complen, read_from_server); if (len == MAX_PACKET_LENGTH) { /* First packet of a multi-packet. Concatenate the packets */ @@ -1079,7 +1096,7 @@ my_net_read(NET *net) { net->where_b += len; total_length += len; - len = my_real_read(net,&complen); + len = my_real_read(net,&complen, 0); } while (len == MAX_PACKET_LENGTH); if (len != packet_error) len+= total_length; @@ -1171,11 +1188,13 @@ my_net_read(NET *net) } net->where_b=buf_length; - if ((packet_len = my_real_read(net,&complen)) == packet_error) + if ((packet_len = my_real_read(net,&complen, read_from_server)) + == packet_error) { MYSQL_NET_READ_DONE(1, 0); return packet_error; } + read_from_server= 0; if (my_uncompress(net->buff + net->where_b, packet_len, &complen)) { diff --git a/sql/opt_range.cc b/sql/opt_range.cc index 0dac6e56c1b..1cea800fbbc 100644 --- a/sql/opt_range.cc +++ b/sql/opt_range.cc @@ -1,5 +1,5 @@ -/* Copyright (c) 2000, 2013, Oracle and/or its affiliates. - Copyright (c) 2008, 2013, Monty Program Ab. +/* Copyright (c) 2000, 2014, Oracle and/or its affiliates. + Copyright (c) 2008, 2014, Monty Program Ab. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -359,31 +359,54 @@ public: elements(1),use_count(1),left(0),right(0), next_key_part(0), color(BLACK), type(type_arg) {} - inline bool is_same(SEL_ARG *arg) + /** + returns true if a range predicate is equal. Use all_same() + to check for equality of all the predicates on this keypart. + */ + inline bool is_same(const SEL_ARG *arg) const { if (type != arg->type || part != arg->part) - return 0; + return false; if (type != KEY_RANGE) - return 1; + return true; return cmp_min_to_min(arg) == 0 && cmp_max_to_max(arg) == 0; } + /** + returns true if all the predicates in the keypart tree are equal + */ + bool all_same(const SEL_ARG *arg) const + { + if (type != arg->type || part != arg->part) + return false; + if (type != KEY_RANGE) + return true; + if (arg == this) + return true; + const SEL_ARG *cmp_arg= arg->first(); + const SEL_ARG *cur_arg= first(); + for (; cur_arg && cmp_arg && cur_arg->is_same(cmp_arg); + cur_arg= cur_arg->next, cmp_arg= cmp_arg->next) ; + if (cur_arg || cmp_arg) + return false; + return true; + } inline void merge_flags(SEL_ARG *arg) { maybe_flag|=arg->maybe_flag; } inline void maybe_smaller() { maybe_flag=1; } /* Return true iff it's a single-point null interval */ inline bool is_null_interval() { return maybe_null && max_value[0] == 1; } - inline int cmp_min_to_min(SEL_ARG* arg) + inline int cmp_min_to_min(const SEL_ARG* arg) const { return sel_cmp(field,min_value, arg->min_value, min_flag, arg->min_flag); } - inline int cmp_min_to_max(SEL_ARG* arg) + inline int cmp_min_to_max(const SEL_ARG* arg) const { return sel_cmp(field,min_value, arg->max_value, min_flag, arg->max_flag); } - inline int cmp_max_to_max(SEL_ARG* arg) + inline int cmp_max_to_max(const SEL_ARG* arg) const { return sel_cmp(field,max_value, arg->max_value, max_flag, arg->max_flag); } - inline int cmp_max_to_min(SEL_ARG* arg) + inline int cmp_max_to_min(const SEL_ARG* arg) const { return sel_cmp(field,max_value, arg->min_value, max_flag, arg->min_flag); } @@ -563,6 +586,7 @@ public: void test_use_count(SEL_ARG *root); #endif SEL_ARG *first(); + const SEL_ARG *first() const; SEL_ARG *last(); void make_root(); inline bool simple_key() @@ -652,6 +676,18 @@ public: SEL_ARG *clone_tree(RANGE_OPT_PARAM *param); }; +/** + Helper function to compare two SEL_ARG's. +*/ +static bool all_same(const SEL_ARG *sa1, const SEL_ARG *sa2) +{ + if (sa1 == NULL && sa2 == NULL) + return true; + if ((sa1 != NULL && sa2 == NULL) || (sa1 == NULL && sa2 != NULL)) + return false; + return sa1->all_same(sa2); +} + class SEL_IMERGE; #define CLONE_KEY1_MAYBE 1 @@ -2501,6 +2537,13 @@ SEL_ARG *SEL_ARG::clone(RANGE_OPT_PARAM *param, SEL_ARG *new_parent, return tmp; } +/** + This gives the first SEL_ARG in the interval list, and the minimal element + in the red-black tree + + @return + SEL_ARG first SEL_ARG in the interval list +*/ SEL_ARG *SEL_ARG::first() { SEL_ARG *next_arg=this; @@ -2511,6 +2554,11 @@ SEL_ARG *SEL_ARG::first() return next_arg; } +const SEL_ARG *SEL_ARG::first() const +{ + return const_cast<SEL_ARG*>(this)->first(); +} + SEL_ARG *SEL_ARG::last() { SEL_ARG *next_arg=this; @@ -11124,6 +11172,7 @@ QUICK_RANGE_SELECT *get_quick_select_for_ref(THD *thd, TABLE *table, uint part; bool create_err= FALSE; Cost_estimate cost; + uint max_used_key_len; old_root= thd->mem_root; /* The following call may change thd->mem_root */ @@ -11150,12 +11199,13 @@ QUICK_RANGE_SELECT *get_quick_select_for_ref(THD *thd, TABLE *table, range->min_length= range->max_length= ref->key_length; range->min_keypart_map= range->max_keypart_map= make_prev_keypart_map(ref->key_parts); - range->flag= (ref->key_length == key_info->key_length ? EQ_RANGE : 0); + range->flag= EQ_RANGE; if (!(quick->key_parts=key_part=(KEY_PART *) alloc_root(&quick->alloc,sizeof(KEY_PART)*ref->key_parts))) goto err; - + + max_used_key_len=0; for (part=0 ; part < ref->key_parts ;part++,key_part++) { key_part->part=part; @@ -11164,7 +11214,12 @@ QUICK_RANGE_SELECT *get_quick_select_for_ref(THD *thd, TABLE *table, key_part->store_length= key_info->key_part[part].store_length; key_part->null_bit= key_info->key_part[part].null_bit; key_part->flag= (uint8) key_info->key_part[part].key_part_flag; + + max_used_key_len +=key_info->key_part[part].store_length; } + + quick->max_used_key_length= max_used_key_len; + if (insert_dynamic(&quick->ranges,(uchar*)&range)) goto err; @@ -12407,6 +12462,66 @@ void QUICK_ROR_UNION_SELECT::add_keys_and_lengths(String *key_names, } +void QUICK_RANGE_SELECT::add_used_key_part_to_set(MY_BITMAP *col_set) +{ + uint key_len; + KEY_PART *part= key_parts; + for (key_len=0; key_len < max_used_key_length; + key_len += (part++)->store_length) + { + bitmap_set_bit(col_set, part->field->field_index); + } +} + + +void QUICK_GROUP_MIN_MAX_SELECT::add_used_key_part_to_set(MY_BITMAP *col_set) +{ + uint key_len; + KEY_PART_INFO *part= index_info->key_part; + for (key_len=0; key_len < max_used_key_length; + key_len += (part++)->store_length) + { + bitmap_set_bit(col_set, part->field->field_index); + } +} + + +void QUICK_ROR_INTERSECT_SELECT::add_used_key_part_to_set(MY_BITMAP *col_set) +{ + List_iterator_fast<QUICK_SELECT_WITH_RECORD> it(quick_selects); + QUICK_SELECT_WITH_RECORD *quick; + while ((quick= it++)) + { + quick->quick->add_used_key_part_to_set(col_set); + } +} + + +void QUICK_INDEX_SORT_SELECT::add_used_key_part_to_set(MY_BITMAP *col_set) +{ + QUICK_RANGE_SELECT *quick; + List_iterator_fast<QUICK_RANGE_SELECT> it(quick_selects); + while ((quick= it++)) + { + quick->add_used_key_part_to_set(col_set); + } + if (pk_quick_select) + pk_quick_select->add_used_key_part_to_set(col_set); +} + + +void QUICK_ROR_UNION_SELECT::add_used_key_part_to_set(MY_BITMAP *col_set) +{ + QUICK_SELECT_I *quick; + List_iterator_fast<QUICK_SELECT_I> it(quick_selects); + + while ((quick= it++)) + { + quick->add_used_key_part_to_set(col_set); + } +} + + /******************************************************************************* * Implementation of QUICK_GROUP_MIN_MAX_SELECT *******************************************************************************/ @@ -12414,6 +12529,8 @@ void QUICK_ROR_UNION_SELECT::add_keys_and_lengths(String *key_names, static inline uint get_field_keypart(KEY *index, Field *field); static inline SEL_ARG * get_index_range_tree(uint index, SEL_TREE* range_tree, PARAM *param, uint *param_idx); +static bool get_sel_arg_for_keypart(Field *field, SEL_ARG *index_range_tree, + SEL_ARG **cur_range); static bool get_constant_key_infix(KEY *index_info, SEL_ARG *index_range_tree, KEY_PART_INFO *first_non_group_part, KEY_PART_INFO *min_max_arg_part, @@ -12479,6 +12596,16 @@ cost_group_min_max(TABLE* table, KEY *index_info, uint used_key_parts, never stored after a unique key lookup in the clustered index and furhter index_next/prev calls can not be used. So loose index scan optimization can not be used in this case. + SA7. If Q has both AGG_FUNC(DISTINCT ...) and MIN/MAX() functions then this + access method is not used. + For above queries MIN/MAX() aggregation has to be done at + nested_loops_join (end_send_group). But with current design MIN/MAX() + is always set as part of loose index scan. Because of this mismatch + MIN() and MAX() values will be set incorrectly. For such queries to + work we need a new interface for loose index scan. This new interface + should only fetch records with min and max values and let + end_send_group to do aggregation. Until then do not use + loose_index_scan. GA1. If Q has a GROUP BY clause, then GA is a prefix of I. That is, if G_i = A_j => i = j. GA2. If Q has a DISTINCT clause, then there is a permutation of SA that @@ -12510,6 +12637,8 @@ cost_group_min_max(TABLE* table, KEY *index_info, uint used_key_parts, above tests. By transitivity then it also follows that each WA_i participates in the index I (if this was already tested for GA, NGA and C). + WA2. If there is a predicate on C, then it must be in conjunction + to all predicates on all earlier keyparts in I. C) Overall query form: SELECT EXPR([A_1,...,A_k], [B_1,...,B_m], [MIN(C)], [MAX(C)]) @@ -12644,6 +12773,13 @@ get_best_group_min_max(PARAM *param, SEL_TREE *tree, double read_time) DBUG_RETURN(NULL); } } + + /* Check (SA7). */ + if (is_agg_distinct && (have_max || have_min)) + { + DBUG_RETURN(NULL); + } + /* Check (SA5). */ if (join->select_distinct) { @@ -12933,6 +13069,25 @@ get_best_group_min_max(PARAM *param, SEL_TREE *tree, double read_time) } } + /** + Test WA2:If there are conditions on a column C participating in + MIN/MAX, those conditions must be conjunctions to all earlier + keyparts. Otherwise, Loose Index Scan cannot be used. + */ + if (tree && min_max_arg_item) + { + uint dummy; + SEL_ARG *index_range_tree= get_index_range_tree(cur_index, tree, param, + &dummy); + SEL_ARG *cur_range= NULL; + if (get_sel_arg_for_keypart(min_max_arg_part->field, + index_range_tree, &cur_range) || + (cur_range && cur_range->type != SEL_ARG::KEY_RANGE)) + { + goto next_index; + } + } + /* If we got to this point, cur_index_info passes the test. */ key_infix_parts= cur_key_infix_len ? (uint) (first_non_infix_part - first_non_group_part) : 0; @@ -13250,73 +13405,75 @@ check_group_min_max_predicates(Item *cond, Item_field *min_max_arg_item, /* - Get SEL_ARG tree, if any, for the keypart covering non grouping - attribute (NGA) field 'nga_field'. + Get the SEL_ARG tree 'tree' for the keypart covering 'field', if + any. 'tree' must be a unique conjunction to ALL predicates in earlier + keyparts of 'keypart_tree'. + + E.g., if 'keypart_tree' is for a composite index (kp1,kp2) and kp2 + covers 'field', all these conditions satisfies the requirement: - This function enforces the NGA3 test: If 'keypart_tree' contains a - condition for 'nga_field', there can only be one range. In the - opposite case, this function returns with error and 'cur_range' - should not be used. + 1. "(kp1=2 OR kp1=3) AND kp2=10" => returns "kp2=10" + 2. "(kp1=2 AND kp2=10) OR (kp1=3 AND kp2=10)" => returns "kp2=10" + 3. "(kp1=2 AND (kp2=10 OR kp2=11)) OR (kp1=3 AND (kp2=10 OR kp2=11))" + => returns "kp2=10 OR kp2=11" - Note that the NGA1 and NGA2 requirements, like whether or not the - range predicate for 'nga_field' is equality, is not tested by this - function. + whereas these do not + 1. "(kp1=2 AND kp2=10) OR kp1=3" + 2. "(kp1=2 AND kp2=10) OR (kp1=3 AND kp2=11)" + 3. "(kp1=2 AND kp2=10) OR (kp1=3 AND (kp2=10 OR kp2=11))" - @param[in] nga_field The NGA field we want the SEL_ARG tree for + This function effectively tests requirement WA2. In combination with + a test that the returned tree has no more than one range it is also + a test of NGA3. + + @param[in] field The field we want the SEL_ARG tree for @param[in] keypart_tree Root node of the SEL_ARG* tree for the index @param[out] cur_range The SEL_ARG tree, if any, for the keypart covering field 'keypart_field' - @retval true 'keypart_tree' contained a predicate for 'nga_field' but - multiple ranges exists. 'cur_range' should not be used. + @retval true 'keypart_tree' contained a predicate for 'field' that + is not conjunction to all predicates on earlier keyparts @retval false otherwise */ static bool -get_sel_arg_for_keypart(Field *nga_field, +get_sel_arg_for_keypart(Field *field, SEL_ARG *keypart_tree, SEL_ARG **cur_range) { - if(keypart_tree == NULL) + if (keypart_tree == NULL) return false; - if(keypart_tree->field->eq(nga_field)) + if (keypart_tree->field->eq(field)) { - /* - Enforce NGA3: If a condition for nga_field has been found, only - a single range is allowed. - */ - if (keypart_tree->prev || keypart_tree->next) - return true; // There are multiple ranges - *cur_range= keypart_tree; return false; } - SEL_ARG *found_tree= NULL; + SEL_ARG *tree_first_range= NULL; SEL_ARG *first_kp= keypart_tree->first(); - for (SEL_ARG *cur_kp= first_kp; cur_kp && !found_tree; - cur_kp= cur_kp->next) + for (SEL_ARG *cur_kp= first_kp; cur_kp; cur_kp= cur_kp->next) { + SEL_ARG *curr_tree= NULL; if (cur_kp->next_key_part) { - if (get_sel_arg_for_keypart(nga_field, + if (get_sel_arg_for_keypart(field, cur_kp->next_key_part, - &found_tree)) + &curr_tree)) return true; - } /* - Enforce NGA3: If a condition for nga_field has been found,only - a single range is allowed. - */ - if (found_tree && first_kp->next) - return true; // There are multiple ranges + Check if the SEL_ARG tree for 'field' is identical for all ranges in + 'keypart_tree + */ + if (cur_kp == first_kp) + tree_first_range= curr_tree; + else if (!all_same(tree_first_range, curr_tree)) + return true; } - *cur_range= found_tree; + *cur_range= tree_first_range; return false; } - /* Extract a sequence of constants from a conjunction of equality predicates. @@ -13339,7 +13496,8 @@ get_sel_arg_for_keypart(Field *nga_field, (const_ci = NG_i).. In addition, there can only be one range when there is such a gap. Thus all the NGF_i attributes must fill the 'gap' between the last group-by - attribute and the MIN/MAX attribute in the index (if present). If these + attribute and the MIN/MAX attribute in the index (if present). Also ensure + that there is only a single range on NGF_i (NGA3). If these conditions hold, copy each constant from its corresponding predicate into key_infix, in the order its NG_i attribute appears in the index, and update key_infix_len with the total length of the key parts in key_infix. @@ -13348,7 +13506,6 @@ get_sel_arg_for_keypart(Field *nga_field, TRUE if the index passes the test FALSE o/w */ - static bool get_constant_key_infix(KEY *index_info, SEL_ARG *index_range_tree, KEY_PART_INFO *first_non_group_part, @@ -13368,32 +13525,42 @@ get_constant_key_infix(KEY *index_info, SEL_ARG *index_range_tree, { cur_range= NULL; /* - Find the range tree for the current keypart. We assume that - index_range_tree points to the first keypart in the index. + Check NGA3: + 1. get_sel_arg_for_keypart gets the range tree for the 'field' and also + checks for a unique conjunction of this tree with all the predicates + on the earlier keyparts in the index. + 2. Check for multiple ranges on the found keypart tree. + + We assume that index_range_tree points to the leftmost keypart in + the index. */ - if(get_sel_arg_for_keypart(cur_part->field, index_range_tree, &cur_range)) + if (get_sel_arg_for_keypart(cur_part->field, index_range_tree, + &cur_range)) + return false; + + if (cur_range && cur_range->elements > 1) return false; if (!cur_range || cur_range->type != SEL_ARG::KEY_RANGE) { if (min_max_arg_part) - return FALSE; /* The current keypart has no range predicates at all. */ + return false; /* The current keypart has no range predicates at all. */ else { *first_non_infix_part= cur_part; - return TRUE; + return true; } } if ((cur_range->min_flag & NO_MIN_RANGE) || (cur_range->max_flag & NO_MAX_RANGE) || (cur_range->min_flag & NEAR_MIN) || (cur_range->max_flag & NEAR_MAX)) - return FALSE; + return false; uint field_length= cur_part->store_length; if (cur_range->maybe_null && cur_range->min_value[0] && cur_range->max_value[0]) - { + { /* cur_range specifies 'IS NULL'. In this case the argument points to a "null value" (is_null_string) that may not always be long @@ -13412,7 +13579,7 @@ get_constant_key_infix(KEY *index_info, SEL_ARG *index_range_tree, *key_infix_len+= field_length; } else - return FALSE; + return false; } if (!min_max_arg_part && (cur_part == last_part)) diff --git a/sql/opt_range.h b/sql/opt_range.h index f602408ea82..a5488d6124d 100644 --- a/sql/opt_range.h +++ b/sql/opt_range.h @@ -389,6 +389,13 @@ public: Returns a QUICK_SELECT with reverse order of to the index. */ virtual QUICK_SELECT_I *make_reverse(uint used_key_parts_arg) { return NULL; } + + /* + Add the key columns used by the quick select into table's read set. + + This is used by an optimization in filesort. + */ + virtual void add_used_key_part_to_set(MY_BITMAP *col_set)=0; }; @@ -479,6 +486,9 @@ public: #endif virtual void replace_handler(handler *new_file) { file= new_file; } QUICK_SELECT_I *make_reverse(uint used_key_parts_arg); + + virtual void add_used_key_part_to_set(MY_BITMAP *col_set); + private: /* Default copy ctor used by QUICK_SELECT_DESC */ friend class TRP_ROR_INTERSECT; @@ -640,6 +650,8 @@ public: virtual int read_keys_and_merge()= 0; /* used to get rows collected in Unique */ READ_RECORD read_record; + + virtual void add_used_key_part_to_set(MY_BITMAP *col_set); }; @@ -714,6 +726,7 @@ public: void add_keys_and_lengths(String *key_names, String *used_lengths); Explain_quick_select *get_explain(MEM_ROOT *alloc); bool is_keys_used(const MY_BITMAP *fields); + void add_used_key_part_to_set(MY_BITMAP *col_set); #ifndef DBUG_OFF void dbug_dump(int indent, bool verbose); #endif @@ -793,6 +806,7 @@ public: void add_keys_and_lengths(String *key_names, String *used_lengths); Explain_quick_select *get_explain(MEM_ROOT *alloc); bool is_keys_used(const MY_BITMAP *fields); + void add_used_key_part_to_set(MY_BITMAP *col_set); #ifndef DBUG_OFF void dbug_dump(int indent, bool verbose); #endif @@ -935,6 +949,7 @@ public: bool unique_key_range() { return false; } int get_type() { return QS_TYPE_GROUP_MIN_MAX; } void add_keys_and_lengths(String *key_names, String *used_lengths); + void add_used_key_part_to_set(MY_BITMAP *col_set); #ifndef DBUG_OFF void dbug_dump(int indent, bool verbose); #endif diff --git a/sql/records.cc b/sql/records.cc index 0c6ecca9a58..242bf8dc3b2 100644 --- a/sql/records.cc +++ b/sql/records.cc @@ -287,8 +287,7 @@ bool init_read_record(READ_RECORD *info,THD *thd, TABLE *table, thd->variables.read_buff_size); } /* Condition pushdown to storage engine */ - if ((table->file->ha_table_flags() & - HA_CAN_TABLE_CONDITION_PUSHDOWN) && + if ((table->file->ha_table_flags() & HA_CAN_TABLE_CONDITION_PUSHDOWN) && select && select->cond && (select->cond->used_tables() & table->map) && !table->file->pushed_cond) diff --git a/sql/replication.h b/sql/replication.h index 510e56a3085..9f9cc9eadfc 100644 --- a/sql/replication.h +++ b/sql/replication.h @@ -16,6 +16,20 @@ #ifndef REPLICATION_H #define REPLICATION_H +/*************************************************************************** + NOTE: plugin locking. + This API was created specifically for the semisync plugin and its locking + logic is also matches semisync plugin usage pattern. In particular, a plugin + is locked on Binlog_transmit_observer::transmit_start and is unlocked after + Binlog_transmit_observer::transmit_stop. All other master observable events + happen between these two and don't lock the plugin at all. This works well + for the semisync_master plugin. + + Also a plugin is locked on Binlog_relay_IO_observer::thread_start + and unlocked after Binlog_relay_IO_observer::thread_stop. This works well for + the semisync_slave plugin. +***************************************************************************/ + #include <mysql.h> typedef struct st_mysql MYSQL; diff --git a/sql/rpl_gtid.cc b/sql/rpl_gtid.cc index 105bdad6f97..c8d5e2a2db0 100644 --- a/sql/rpl_gtid.cc +++ b/sql/rpl_gtid.cc @@ -65,16 +65,16 @@ rpl_slave_state::update_state_hash(uint64 sub_id, rpl_gtid *gtid, int rpl_slave_state::record_and_update_gtid(THD *thd, rpl_group_info *rgi) { - uint64 sub_id; DBUG_ENTER("rpl_slave_state::record_and_update_gtid"); /* Update the GTID position, if we have it and did not already update it in a GTID transaction. */ - if ((sub_id= rgi->gtid_sub_id)) + if (rgi->gtid_pending) { - rgi->gtid_sub_id= 0; + uint64 sub_id= rgi->gtid_sub_id; + rgi->gtid_pending= false; if (rgi->gtid_ignore_duplicate_state!=rpl_group_info::GTID_DUPLICATE_IGNORE) { if (record_gtid(thd, &rgi->current_gtid, sub_id, false, false)) @@ -120,7 +120,7 @@ rpl_slave_state::check_duplicate_gtid(rpl_gtid *gtid, rpl_group_info *rgi) uint32 seq_no= gtid->seq_no; rpl_slave_state::element *elem; int res; - bool did_enter_cond; + bool did_enter_cond= false; PSI_stage_info old_stage; THD *thd; Relay_log_info *rli= rgi->rli; @@ -138,7 +138,6 @@ rpl_slave_state::check_duplicate_gtid(rpl_gtid *gtid, rpl_group_info *rgi) each lock release and re-take. */ - did_enter_cond= false; for (;;) { if (elem->highest_seq_no >= seq_no) @@ -667,7 +666,7 @@ end: if (table_opened) { - if (err) + if (err || (err= ha_commit_trans(thd, FALSE))) { /* If error, we need to put any remaining elist back into the HASH so we @@ -681,13 +680,8 @@ end: } ha_rollback_trans(thd, FALSE); - close_thread_tables(thd); - } - else - { - ha_commit_trans(thd, FALSE); - close_thread_tables(thd); } + close_thread_tables(thd); if (in_transaction) thd->mdl_context.release_statement_locks(); else diff --git a/sql/rpl_handler.cc b/sql/rpl_handler.cc index a706fcd37ee..34d3df23435 100644 --- a/sql/rpl_handler.cc +++ b/sql/rpl_handler.cc @@ -170,40 +170,16 @@ void delegates_destroy() /* This macro is used by almost all the Delegate methods to iterate over all the observers running given callback function of the - delegate . - - Add observer plugins to the thd->lex list, after each statement, all - plugins add to thd->lex will be automatically unlocked. + delegate. */ -#define FOREACH_OBSERVER(r, f, thd, args) \ +#define FOREACH_OBSERVER(r, f, do_lock, args) \ param.server_id= thd->variables.server_id; \ - /* - Use a struct to make sure that they are allocated adjacent, check - delete_dynamic(). - */ \ - struct { \ - DYNAMIC_ARRAY plugins; \ - /* preallocate 8 slots */ \ - plugin_ref plugins_buffer[8]; \ - } s; \ - DYNAMIC_ARRAY *plugins= &s.plugins; \ - plugin_ref *plugins_buffer= s.plugins_buffer; \ - init_dynamic_array2(plugins, sizeof(plugin_ref), \ - plugins_buffer, 8, 8, MYF(0)); \ read_lock(); \ Observer_info_iterator iter= observer_info_iter(); \ Observer_info *info= iter++; \ for (; info; info= iter++) \ { \ - plugin_ref plugin= \ - my_plugin_lock(0, info->plugin); \ - if (!plugin) \ - { \ - /* plugin is not intialized or deleted, this is not an error */ \ - r= 0; \ - break; \ - } \ - insert_dynamic(plugins, (uchar *)&plugin); \ + if (do_lock) plugin_lock(thd, plugin_int_to_ref(info->plugin_int)); \ if (((Observer *)info->observer)->f \ && ((Observer *)info->observer)->f args) \ { \ @@ -213,17 +189,7 @@ void delegates_destroy() break; \ } \ } \ - unlock(); \ - /* - Unlock plugins should be done after we released the Delegate lock - to avoid possible deadlock when this is the last user of the - plugin, and when we unlock the plugin, it will try to - deinitialize the plugin, which will try to lock the Delegate in - order to remove the observers. - */ \ - plugin_unlock_list(0, (plugin_ref*)plugins->buffer, \ - plugins->elements); \ - delete_dynamic(plugins) + unlock(); int Trans_delegate::after_commit(THD *thd, bool all) @@ -240,7 +206,7 @@ int Trans_delegate::after_commit(THD *thd, bool all) param.log_pos= log_info ? log_info->log_pos : 0; int ret= 0; - FOREACH_OBSERVER(ret, after_commit, thd, (¶m)); + FOREACH_OBSERVER(ret, after_commit, false, (¶m)); /* This is the end of a real transaction or autocommit statement, we @@ -268,7 +234,7 @@ int Trans_delegate::after_rollback(THD *thd, bool all) param.log_pos= log_info ? log_info->log_pos : 0; int ret= 0; - FOREACH_OBSERVER(ret, after_rollback, thd, (¶m)); + FOREACH_OBSERVER(ret, after_rollback, false, (¶m)); /* This is the end of a real transaction or autocommit statement, we @@ -307,7 +273,7 @@ int Binlog_storage_delegate::after_flush(THD *thd, log_info->log_pos = log_pos; int ret= 0; - FOREACH_OBSERVER(ret, after_flush, thd, + FOREACH_OBSERVER(ret, after_flush, false, (¶m, log_info->log_file, log_info->log_pos, flags)); return ret; } @@ -321,7 +287,7 @@ int Binlog_transmit_delegate::transmit_start(THD *thd, ushort flags, param.flags= flags; int ret= 0; - FOREACH_OBSERVER(ret, transmit_start, thd, (¶m, log_file, log_pos)); + FOREACH_OBSERVER(ret, transmit_start, true, (¶m, log_file, log_pos)); return ret; } @@ -331,7 +297,7 @@ int Binlog_transmit_delegate::transmit_stop(THD *thd, ushort flags) param.flags= flags; int ret= 0; - FOREACH_OBSERVER(ret, transmit_stop, thd, (¶m)); + FOREACH_OBSERVER(ret, transmit_stop, false, (¶m)); return ret; } @@ -356,13 +322,6 @@ int Binlog_transmit_delegate::reserve_header(THD *thd, ushort flags, Observer_info *info= iter++; for (; info; info= iter++) { - plugin_ref plugin= - my_plugin_lock(thd, info->plugin); - if (!plugin) - { - ret= 1; - break; - } hlen= 0; if (((Observer *)info->observer)->reserve_header && ((Observer *)info->observer)->reserve_header(¶m, @@ -371,10 +330,8 @@ int Binlog_transmit_delegate::reserve_header(THD *thd, ushort flags, &hlen)) { ret= 1; - plugin_unlock(thd, plugin); break; } - plugin_unlock(thd, plugin); if (hlen == 0) continue; if (hlen > RESERVE_HEADER_SIZE || packet->append((char *)header, hlen)) @@ -396,7 +353,7 @@ int Binlog_transmit_delegate::before_send_event(THD *thd, ushort flags, param.flags= flags; int ret= 0; - FOREACH_OBSERVER(ret, before_send_event, thd, + FOREACH_OBSERVER(ret, before_send_event, false, (¶m, (uchar *)packet->c_ptr(), packet->length(), log_file+dirname_length(log_file), log_pos)); @@ -410,7 +367,7 @@ int Binlog_transmit_delegate::after_send_event(THD *thd, ushort flags, param.flags= flags; int ret= 0; - FOREACH_OBSERVER(ret, after_send_event, thd, + FOREACH_OBSERVER(ret, after_send_event, false, (¶m, packet->c_ptr(), packet->length())); return ret; } @@ -422,7 +379,7 @@ int Binlog_transmit_delegate::after_reset_master(THD *thd, ushort flags) param.flags= flags; int ret= 0; - FOREACH_OBSERVER(ret, after_reset_master, thd, (¶m)); + FOREACH_OBSERVER(ret, after_reset_master, false, (¶m)); return ret; } @@ -443,7 +400,7 @@ int Binlog_relay_IO_delegate::thread_start(THD *thd, Master_info *mi) init_param(¶m, mi); int ret= 0; - FOREACH_OBSERVER(ret, thread_start, thd, (¶m)); + FOREACH_OBSERVER(ret, thread_start, true, (¶m)); return ret; } @@ -455,7 +412,7 @@ int Binlog_relay_IO_delegate::thread_stop(THD *thd, Master_info *mi) init_param(¶m, mi); int ret= 0; - FOREACH_OBSERVER(ret, thread_stop, thd, (¶m)); + FOREACH_OBSERVER(ret, thread_stop, false, (¶m)); return ret; } @@ -467,7 +424,7 @@ int Binlog_relay_IO_delegate::before_request_transmit(THD *thd, init_param(¶m, mi); int ret= 0; - FOREACH_OBSERVER(ret, before_request_transmit, thd, (¶m, (uint32)flags)); + FOREACH_OBSERVER(ret, before_request_transmit, false, (¶m, (uint32)flags)); return ret; } @@ -480,7 +437,7 @@ int Binlog_relay_IO_delegate::after_read_event(THD *thd, Master_info *mi, init_param(¶m, mi); int ret= 0; - FOREACH_OBSERVER(ret, after_read_event, thd, + FOREACH_OBSERVER(ret, after_read_event, false, (¶m, packet, len, event_buf, event_len)); return ret; } @@ -498,7 +455,7 @@ int Binlog_relay_IO_delegate::after_queue_event(THD *thd, Master_info *mi, flags |= BINLOG_STORAGE_IS_SYNCED; int ret= 0; - FOREACH_OBSERVER(ret, after_queue_event, thd, + FOREACH_OBSERVER(ret, after_queue_event, false, (¶m, event_buf, event_len, flags)); return ret; } @@ -510,7 +467,7 @@ int Binlog_relay_IO_delegate::after_reset_slave(THD *thd, Master_info *mi) init_param(¶m, mi); int ret= 0; - FOREACH_OBSERVER(ret, after_reset_slave, thd, (¶m)); + FOREACH_OBSERVER(ret, after_reset_slave, false, (¶m)); return ret; } #endif /* HAVE_REPLICATION */ diff --git a/sql/rpl_handler.h b/sql/rpl_handler.h index e028fb49808..e262ebdbd6b 100644 --- a/sql/rpl_handler.h +++ b/sql/rpl_handler.h @@ -26,13 +26,10 @@ class Observer_info { public: void *observer; st_plugin_int *plugin_int; - plugin_ref plugin; Observer_info(void *ob, st_plugin_int *p) :observer(ob), plugin_int(p) - { - plugin= plugin_int_to_ref(plugin_int); - } + { } }; class Delegate { diff --git a/sql/rpl_mi.cc b/sql/rpl_mi.cc index 977dec96982..055dd09ac5c 100644 --- a/sql/rpl_mi.cc +++ b/sql/rpl_mi.cc @@ -38,6 +38,7 @@ Master_info::Master_info(LEX_STRING *connection_name_arg, connect_retry(DEFAULT_CONNECT_RETRY), inited(0), abort_slave(0), slave_running(0), slave_run_id(0), sync_counter(0), heartbeat_period(0), received_heartbeats(0), master_id(0), + prev_master_id(0), using_gtid(USE_GTID_NO), events_queued_since_last_gtid(0), gtid_reconnect_event_skip_count(0), gtid_event_seen(false) { @@ -890,6 +891,9 @@ bool Master_info_index::init_all_master_info() File index_file_nr; DBUG_ENTER("init_all_master_info"); + mysql_mutex_assert_owner(&LOCK_active_mi); + DBUG_ASSERT(master_info_index); + if ((index_file_nr= my_open(index_file_name, O_RDWR | O_CREAT | O_BINARY , MYF(MY_WME | ME_NOREFRESH))) < 0 || @@ -1089,6 +1093,10 @@ Master_info_index::get_master_info(const LEX_STRING *connection_name, ("connection_name: '%.*s'", (int) connection_name->length, connection_name->str)); + mysql_mutex_assert_owner(&LOCK_active_mi); + if (!this) // master_info_index is set to NULL on server shutdown + return NULL; + /* Make name lower case for comparison */ res= strmake(buff, connection_name->str, connection_name->length); my_casedn_str(system_charset_info, buff); @@ -1116,6 +1124,9 @@ bool Master_info_index::check_duplicate_master_info(LEX_STRING *name_arg, Master_info *mi; DBUG_ENTER("check_duplicate_master_info"); + mysql_mutex_assert_owner(&LOCK_active_mi); + DBUG_ASSERT(master_info_index); + /* Get full host and port name */ if ((mi= master_info_index->get_master_info(name_arg, Sql_condition::WARN_LEVEL_NOTE))) @@ -1238,6 +1249,8 @@ bool Master_info_index::give_error_if_slave_running() { DBUG_ENTER("warn_if_slave_running"); mysql_mutex_assert_owner(&LOCK_active_mi); + if (!this) // master_info_index is set to NULL on server shutdown + return TRUE; for (uint i= 0; i< master_info_hash.records; ++i) { diff --git a/sql/rpl_mi.h b/sql/rpl_mi.h index f20c2e21a5f..7dbe6d9b534 100644 --- a/sql/rpl_mi.h +++ b/sql/rpl_mi.h @@ -136,6 +136,12 @@ class Master_info : public Slave_reporting_capability DYNAMIC_ARRAY ignore_server_ids; ulong master_id; /* + At reconnect and until the first rotate event is seen, prev_master_id is + the value of master_id during the previous connection, used to detect + silent change of master server during reconnects. + */ + ulong prev_master_id; + /* Which kind of GTID position (if any) is used when connecting to master. Note that you can not change the numeric values of these, they are used diff --git a/sql/rpl_parallel.cc b/sql/rpl_parallel.cc index 90ee2360eb7..9b91206ca75 100644 --- a/sql/rpl_parallel.cc +++ b/sql/rpl_parallel.cc @@ -4,18 +4,8 @@ #include "rpl_mi.h" #include "debug_sync.h" - /* Code for optional parallel execution of replicated events on the slave. - - ToDo list: - - - Retry of failed transactions is not yet implemented for the parallel case. - - - All the waits (eg. in struct wait_for_commit and in - rpl_parallel_thread_pool::get_thread()) need to be killable. And on kill, - everything needs to be correctly rolled back and stopped in all threads, - to ensure a consistent slave replication state. */ struct rpl_parallel_thread_pool global_rpl_thread_pool; @@ -31,20 +21,22 @@ rpt_handle_event(rpl_parallel_thread::queued_event *qev, rpl_group_info *rgi= qev->rgi; Relay_log_info *rli= rgi->rli; THD *thd= rgi->thd; + Log_event *ev; + + DBUG_ASSERT(qev->typ == rpl_parallel_thread::queued_event::QUEUED_EVENT); + ev= qev->ev; - thd->rgi_slave= rgi; thd->system_thread_info.rpl_sql_info->rpl_filter = rli->mi->rpl_filter; + ev->thd= thd; - /* ToDo: Access to thd, and what about rli, split out a parallel part? */ - mysql_mutex_lock(&rli->data_lock); - qev->ev->thd= thd; strcpy(rgi->event_relay_log_name_buf, qev->event_relay_log_name); rgi->event_relay_log_name= rgi->event_relay_log_name_buf; rgi->event_relay_log_pos= qev->event_relay_log_pos; rgi->future_event_relay_log_pos= qev->future_event_relay_log_pos; strcpy(rgi->future_event_master_log_name, qev->future_event_master_log_name); - err= apply_event_and_update_pos(qev->ev, thd, rgi, rpt); - thd->rgi_slave= NULL; + mysql_mutex_lock(&rli->data_lock); + /* Mutex will be released in apply_event_and_update_pos(). */ + err= apply_event_and_update_pos(ev, thd, rgi, rpt); thread_safe_increment64(&rli->executed_entries, &slave_executed_entries_lock); @@ -58,6 +50,8 @@ handle_queued_pos_update(THD *thd, rpl_parallel_thread::queued_event *qev) { int cmp; Relay_log_info *rli; + rpl_parallel_entry *e; + /* Events that are not part of an event group, such as Format Description, Stop, GTID List and such, are executed directly in the driver SQL thread, @@ -68,6 +62,13 @@ handle_queued_pos_update(THD *thd, rpl_parallel_thread::queued_event *qev) if ((thd->variables.option_bits & OPTION_BEGIN) && opt_using_transactions) return; + + /* Do not update position if an earlier event group caused an error abort. */ + DBUG_ASSERT(qev->typ == rpl_parallel_thread::queued_event::QUEUED_POS_UPDATE); + e= qev->entry_for_queued; + if (e->stop_on_error_sub_id < (uint64)ULONGLONG_MAX || e->force_abort) + return; + rli= qev->rgi->rli; mysql_mutex_lock(&rli->data_lock); cmp= strcmp(rli->group_relay_log_name, qev->event_relay_log_name); @@ -165,6 +166,7 @@ finish_event_group(THD *thd, uint64 sub_id, rpl_parallel_entry *entry, mysql_mutex_unlock(&entry->LOCK_parallel_entry); thd->clear_error(); + thd->reset_killed(); thd->get_stmt_da()->reset_diagnostics_area(); wfc->wakeup_subsequent_commits(rgi->worker_error); } @@ -197,6 +199,290 @@ unlock_or_exit_cond(THD *thd, mysql_mutex_t *lock, bool *did_enter_cond, } +static void +register_wait_for_prior_event_group_commit(rpl_group_info *rgi, + rpl_parallel_entry *entry) +{ + mysql_mutex_assert_owner(&entry->LOCK_parallel_entry); + if (rgi->wait_commit_sub_id > entry->last_committed_sub_id) + { + /* + Register that the commit of this event group must wait for the + commit of the previous event group to complete before it may + complete itself, so that we preserve commit order. + */ + wait_for_commit *waitee= + &rgi->wait_commit_group_info->commit_orderer; + rgi->commit_orderer.register_wait_for_prior_commit(waitee); + } +} + + +#ifndef DBUG_OFF +static int +dbug_simulate_tmp_error(rpl_group_info *rgi, THD *thd) +{ + if (rgi->current_gtid.domain_id == 0 && rgi->current_gtid.seq_no == 100 && + rgi->retry_event_count == 4) + { + thd->clear_error(); + thd->get_stmt_da()->reset_diagnostics_area(); + my_error(ER_LOCK_DEADLOCK, MYF(0)); + return 1; + } + return 0; +} +#endif + + +/* + If we detect a deadlock due to eg. storage engine locks that conflict with + the fixed commit order, then the later transaction will be killed + asynchroneously to allow the former to complete its commit. + + In this case, we convert the 'killed' error into a deadlock error, and retry + the later transaction. */ +static void +convert_kill_to_deadlock_error(rpl_group_info *rgi) +{ + THD *thd= rgi->thd; + int err_code; + + if (!thd->get_stmt_da()->is_error()) + return; + err_code= thd->get_stmt_da()->sql_errno(); + if ((err_code == ER_QUERY_INTERRUPTED || err_code == ER_CONNECTION_KILLED) && + rgi->killed_for_retry) + { + thd->clear_error(); + my_error(ER_LOCK_DEADLOCK, MYF(0)); + rgi->killed_for_retry= false; + thd->reset_killed(); + } +} + + +static bool +is_group_ending(Log_event *ev, Log_event_type event_type) +{ + return event_type == XID_EVENT || + (event_type == QUERY_EVENT && + (((Query_log_event *)ev)->is_commit() || + ((Query_log_event *)ev)->is_rollback())); +} + + +static int +retry_event_group(rpl_group_info *rgi, rpl_parallel_thread *rpt, + rpl_parallel_thread::queued_event *orig_qev) +{ + IO_CACHE rlog; + LOG_INFO linfo; + File fd= (File)-1; + const char *errmsg= NULL; + inuse_relaylog *ir= rgi->relay_log; + uint64 event_count; + uint64 events_to_execute= rgi->retry_event_count; + Relay_log_info *rli= rgi->rli; + int err; + ulonglong cur_offset, old_offset; + char log_name[FN_REFLEN]; + THD *thd= rgi->thd; + rpl_parallel_entry *entry= rgi->parallel_entry; + ulong retries= 0; + +do_retry: + event_count= 0; + err= 0; + + /* + If we already started committing before getting the deadlock (or other + error) that caused us to need to retry, we have already signalled + subsequent transactions that we have started committing. This is + potentially a problem, as now we will rollback, and if subsequent + transactions would start to execute now, they could see an unexpected + state of the database and get eg. key not found or duplicate key error. + + However, to get a deadlock in the first place, there must have been + another earlier transaction that is waiting for us. Thus that other + transaction has _not_ yet started to commit, and any subsequent + transactions will still be waiting at this point. + + So here, we decrement back the count of transactions that started + committing (if we already incremented it), undoing the effect of an + earlier mark_start_commit(). Then later, when the retry succeeds and we + commit again, we can do a new mark_start_commit() and eventually wake up + subsequent transactions at the proper time. + + We need to do the unmark before the rollback, to be sure that the + transaction we deadlocked with will not signal that it started to commit + until after the unmark. + */ + rgi->unmark_start_commit(); + + /* + We might get the deadlock error that causes the retry during commit, while + sitting in wait_for_prior_commit(). If this happens, we will have a + pending error in the wait_for_commit object. So clear this by + unregistering (and later re-registering) the wait. + */ + if(thd->wait_for_commit_ptr) + thd->wait_for_commit_ptr->unregister_wait_for_prior_commit(); + rgi->cleanup_context(thd, 1); + + /* + If we retry due to a deadlock kill that occured during the commit step, we + might have already updated (but not committed) an update of table + mysql.gtid_slave_pos, and cleared the gtid_pending flag. Now we have + rolled back any such update, so we must set the gtid_pending flag back to + true so that we will do a new update when/if we succeed with the retry. + */ + rgi->gtid_pending= true; + + mysql_mutex_lock(&rli->data_lock); + ++rli->retried_trans; + statistic_increment(slave_retried_transactions, LOCK_status); + mysql_mutex_unlock(&rli->data_lock); + + mysql_mutex_lock(&entry->LOCK_parallel_entry); + register_wait_for_prior_event_group_commit(rgi, entry); + mysql_mutex_unlock(&entry->LOCK_parallel_entry); + + strmake_buf(log_name, ir->name); + if ((fd= open_binlog(&rlog, log_name, &errmsg)) <0) + { + err= 1; + goto err; + } + cur_offset= rgi->retry_start_offset; + my_b_seek(&rlog, cur_offset); + + do + { + Log_event_type event_type; + Log_event *ev; + rpl_parallel_thread::queued_event *qev; + + /* The loop is here so we can try again the next relay log file on EOF. */ + for (;;) + { + old_offset= cur_offset; + ev= Log_event::read_log_event(&rlog, 0, + rli->relay_log.description_event_for_exec /* ToDo: this needs fixing */, + opt_slave_sql_verify_checksum); + cur_offset= my_b_tell(&rlog); + + if (ev) + break; + if (rlog.error < 0) + { + errmsg= "slave SQL thread aborted because of I/O error"; + err= 1; + goto err; + } + if (rlog.error > 0) + { + sql_print_error("Slave SQL thread: I/O error reading " + "event(errno: %d cur_log->error: %d)", + my_errno, rlog.error); + errmsg= "Aborting slave SQL thread because of partial event read"; + err= 1; + goto err; + } + /* EOF. Move to the next relay log. */ + end_io_cache(&rlog); + mysql_file_close(fd, MYF(MY_WME)); + fd= (File)-1; + + /* Find the next relay log file. */ + if((err= rli->relay_log.find_log_pos(&linfo, log_name, 1)) || + (err= rli->relay_log.find_next_log(&linfo, 1))) + { + char buff[22]; + sql_print_error("next log error: %d offset: %s log: %s", + err, + llstr(linfo.index_file_offset, buff), + log_name); + goto err; + } + strmake_buf(log_name ,linfo.log_file_name); + + if ((fd= open_binlog(&rlog, log_name, &errmsg)) <0) + { + err= 1; + goto err; + } + /* Loop to try again on the new log file. */ + } + + event_type= ev->get_type_code(); + if (!Log_event::is_group_event(event_type)) + { + delete ev; + continue; + } + ev->thd= thd; + + mysql_mutex_lock(&rpt->LOCK_rpl_thread); + qev= rpt->retry_get_qev(ev, orig_qev, log_name, cur_offset, + cur_offset - old_offset); + mysql_mutex_unlock(&rpt->LOCK_rpl_thread); + if (!qev) + { + delete ev; + my_error(ER_OUT_OF_RESOURCES, MYF(0)); + err= 1; + goto err; + } + if (is_group_ending(ev, event_type)) + rgi->mark_start_commit(); + + err= rpt_handle_event(qev, rpt); + ++event_count; + mysql_mutex_lock(&rpt->LOCK_rpl_thread); + rpt->free_qev(qev); + mysql_mutex_unlock(&rpt->LOCK_rpl_thread); + + delete_or_keep_event_post_apply(rgi, event_type, ev); + DBUG_EXECUTE_IF("rpl_parallel_simulate_double_temp_err_gtid_0_x_100", + if (retries == 0) err= dbug_simulate_tmp_error(rgi, thd);); + DBUG_EXECUTE_IF("rpl_parallel_simulate_infinite_temp_err_gtid_0_x_100", + err= dbug_simulate_tmp_error(rgi, thd);); + if (err) + { + convert_kill_to_deadlock_error(rgi); + if (has_temporary_error(thd)) + { + ++retries; + if (retries < slave_trans_retries) + { + end_io_cache(&rlog); + mysql_file_close(fd, MYF(MY_WME)); + fd= (File)-1; + goto do_retry; + } + sql_print_error("Slave worker thread retried transaction %lu time(s) " + "in vain, giving up. Consider raising the value of " + "the slave_transaction_retries variable.", + slave_trans_retries); + } + goto err; + } + } while (event_count < events_to_execute); + +err: + + if (fd >= 0) + { + end_io_cache(&rlog); + mysql_file_close(fd, MYF(MY_WME)); + } + if (errmsg) + sql_print_error("Error reading relay log event: %s", errmsg); + return err; +} + + pthread_handler_t handle_rpl_parallel_thread(void *arg) { @@ -215,6 +501,8 @@ handle_rpl_parallel_thread(void *arg) rpl_sql_thread_info sql_info(NULL); size_t total_event_size; int err; + inuse_relaylog *last_ir; + uint64 accumulated_ir_count; struct rpl_parallel_thread *rpt= (struct rpl_parallel_thread *)arg; @@ -244,39 +532,6 @@ handle_rpl_parallel_thread(void *arg) thd->set_time(); thd->variables.lock_wait_timeout= LONG_TIMEOUT; thd->system_thread_info.rpl_sql_info= &sql_info; - /* - For now, we need to run the replication parallel worker threads in - READ COMMITTED. This is needed because gap locks are not symmetric. - For example, a gap lock from a DELETE blocks an insert intention lock, - but not vice versa. So an INSERT followed by DELETE can group commit - on the master, but if we are unlucky with thread scheduling we can - then deadlock on the slave because the INSERT ends up waiting for a - gap lock from the DELETE (and the DELETE in turn waits for the INSERT - in wait_for_prior_commit()). See also MDEV-5914. - - It should be mostly safe to run in READ COMMITTED in the slave anyway. - The commit order is already fixed from on the master, so we do not - risk logging into the binlog in an incorrect order between worker - threads (one that would cause different results if executed on a - lower-level slave that uses this slave as a master). The only - potential problem is with transactions run in a different master - connection (using multi-source replication), or run directly on the - slave by an application; when using READ COMMITTED we are not - guaranteed serialisability of binlogged statements. - - In practice, this is unlikely to be an issue. In GTID mode, such - parallel transactions from multi-source or application must in any - case use a different replication domain, in which case binlog order - by definition must be independent between the different domain. Even - in non-GTID mode, normally one will assume that the external - transactions are not conflicting with those applied by the slave, so - that isolation level should make no difference. It would be rather - strange if the result of applying query events from one master would - depend on the timing and nature of other queries executed from - different multi-source connections or done directly on the slave by - an application. Still, something to be aware of. - */ - thd->variables.tx_isolation= ISO_READ_COMMITTED; mysql_mutex_lock(&rpt->LOCK_rpl_thread); rpt->thd= thd; @@ -323,7 +578,7 @@ handle_rpl_parallel_thread(void *arg) bool end_of_group, group_ending; total_event_size+= events->event_size; - if (!events->ev) + if (events->typ == rpl_parallel_thread::queued_event::QUEUED_POS_UPDATE) { handle_queued_pos_update(thd, events); events->next= qevs_to_free; @@ -331,8 +586,33 @@ handle_rpl_parallel_thread(void *arg) events= next; continue; } + else if (events->typ == + rpl_parallel_thread::queued_event::QUEUED_MASTER_RESTART) + { + if (in_event_group) + { + /* + Master restarted (crashed) in the middle of an event group. + So we need to roll back and discard that event group. + */ + group_rgi->cleanup_context(thd, 1); + in_event_group= false; + finish_event_group(thd, group_rgi->gtid_sub_id, + events->entry_for_queued, group_rgi); + + group_rgi->next= rgis_to_free; + rgis_to_free= group_rgi; + thd->rgi_slave= group_rgi= NULL; + } + + events->next= qevs_to_free; + qevs_to_free= events; + events= next; + continue; + } + DBUG_ASSERT(events->typ==rpl_parallel_thread::queued_event::QUEUED_EVENT); - group_rgi= rgi; + thd->rgi_slave= group_rgi= rgi; gco= rgi->gco; /* Handle a new event group, which will be initiated by a GTID event. */ if ((event_type= events->ev->get_type_code()) == GTID_EVENT) @@ -341,7 +621,6 @@ handle_rpl_parallel_thread(void *arg) PSI_stage_info old_stage; uint64 wait_count; - thd->tx_isolation= (enum_tx_isolation)thd->variables.tx_isolation; in_event_group= true; /* If the standalone flag is set, then this event group consists of a @@ -352,9 +631,7 @@ handle_rpl_parallel_thread(void *arg) (0 != (static_cast<Gtid_log_event *>(events->ev)->flags2 & Gtid_log_event::FL_STANDALONE)); - /* Save this, as it gets cleared when the event group commits. */ event_gtid_sub_id= rgi->gtid_sub_id; - rgi->thd= thd; /* @@ -388,7 +665,7 @@ handle_rpl_parallel_thread(void *arg) { DEBUG_SYNC(thd, "rpl_parallel_start_waiting_for_prior_killed"); thd->send_kill_message(); - slave_output_error_info(rgi->rli, thd); + slave_output_error_info(rgi, thd); signal_error_to_sql_driver_thread(thd, rgi, 1); /* Even though we were killed, we need to continue waiting for the @@ -430,17 +707,9 @@ handle_rpl_parallel_thread(void *arg) if (unlikely(entry->stop_on_error_sub_id <= rgi->wait_commit_sub_id)) skip_event_group= true; - else if (rgi->wait_commit_sub_id > entry->last_committed_sub_id) - { - /* - Register that the commit of this event group must wait for the - commit of the previous event group to complete before it may - complete itself, so that we preserve commit order. - */ - wait_for_commit *waitee= - &rgi->wait_commit_group_info->commit_orderer; - rgi->commit_orderer.register_wait_for_prior_commit(waitee); - } + else + register_wait_for_prior_event_group_commit(rgi, entry); + unlock_or_exit_cond(thd, &entry->LOCK_parallel_entry, &did_enter_cond, &old_stage); @@ -467,7 +736,7 @@ handle_rpl_parallel_thread(void *arg) if (res < 0) { /* Error. */ - slave_output_error_info(rgi->rli, thd); + slave_output_error_info(rgi, thd); signal_error_to_sql_driver_thread(thd, rgi, 1); } else if (!res) @@ -482,11 +751,8 @@ handle_rpl_parallel_thread(void *arg) } } - group_ending= event_type == XID_EVENT || - (event_type == QUERY_EVENT && - (((Query_log_event *)events->ev)->is_commit() || - ((Query_log_event *)events->ev)->is_rollback())); - if (group_ending) + group_ending= is_group_ending(events->ev, event_type); + if (group_ending && likely(!rgi->worker_error)) { DEBUG_SYNC(thd, "rpl_parallel_before_mark_start_commit"); rgi->mark_start_commit(); @@ -498,24 +764,42 @@ handle_rpl_parallel_thread(void *arg) processing between the event groups as a simple way to ensure that everything is stopped and cleaned up correctly. */ - if (!rgi->worker_error && !skip_event_group) + if (likely(!rgi->worker_error) && !skip_event_group) + { + ++rgi->retry_event_count; err= rpt_handle_event(events, rpt); + delete_or_keep_event_post_apply(rgi, event_type, events->ev); + DBUG_EXECUTE_IF("rpl_parallel_simulate_temp_err_gtid_0_x_100", + err= dbug_simulate_tmp_error(rgi, thd);); + if (err) + { + convert_kill_to_deadlock_error(rgi); + if (has_temporary_error(thd) && slave_trans_retries > 0) + err= retry_event_group(rgi, rpt, events); + } + } else + { + delete events->ev; err= thd->wait_for_prior_commit(); + } end_of_group= in_event_group && ((group_standalone && !Log_event::is_part_of_group(event_type)) || group_ending); - delete_or_keep_event_post_apply(rgi, event_type, events->ev); events->next= qevs_to_free; qevs_to_free= events; - if (unlikely(err) && !rgi->worker_error) + if (unlikely(err)) { - slave_output_error_info(rgi->rli, thd); - signal_error_to_sql_driver_thread(thd, rgi, err); + if (!rgi->worker_error) + { + slave_output_error_info(rgi, thd); + signal_error_to_sql_driver_thread(thd, rgi, err); + } + thd->reset_killed(); } if (end_of_group) { @@ -523,7 +807,7 @@ handle_rpl_parallel_thread(void *arg) finish_event_group(thd, event_gtid_sub_id, entry, rgi); rgi->next= rgis_to_free; rgis_to_free= rgi; - group_rgi= rgi= NULL; + thd->rgi_slave= group_rgi= rgi= NULL; skip_event_group= false; DEBUG_SYNC(thd, "rpl_parallel_end_of_group"); } @@ -548,12 +832,34 @@ handle_rpl_parallel_thread(void *arg) rpt->free_rgi(rgis_to_free); rgis_to_free= next; } + last_ir= NULL; + accumulated_ir_count= 0; while (qevs_to_free) { rpl_parallel_thread::queued_event *next= qevs_to_free->next; + inuse_relaylog *ir= qevs_to_free->ir; + /* Batch up refcount update to reduce use of synchronised operations. */ + if (last_ir != ir) + { + if (last_ir) + { + my_atomic_rwlock_wrlock(&last_ir->inuse_relaylog_atomic_lock); + my_atomic_add64(&last_ir->dequeued_count, accumulated_ir_count); + my_atomic_rwlock_wrunlock(&last_ir->inuse_relaylog_atomic_lock); + accumulated_ir_count= 0; + } + last_ir= ir; + } + ++accumulated_ir_count; rpt->free_qev(qevs_to_free); qevs_to_free= next; } + if (last_ir) + { + my_atomic_rwlock_wrlock(&last_ir->inuse_relaylog_atomic_lock); + my_atomic_add64(&last_ir->dequeued_count, accumulated_ir_count); + my_atomic_rwlock_wrunlock(&last_ir->inuse_relaylog_atomic_lock); + } if ((events= rpt->event_queue) != NULL) { @@ -584,7 +890,7 @@ handle_rpl_parallel_thread(void *arg) in_event_group= false; mysql_mutex_lock(&rpt->LOCK_rpl_thread); rpt->free_rgi(group_rgi); - group_rgi= NULL; + thd->rgi_slave= group_rgi= NULL; skip_event_group= false; } if (!in_event_group) @@ -802,8 +1108,7 @@ err: rpl_parallel_thread::queued_event * -rpl_parallel_thread::get_qev(Log_event *ev, ulonglong event_size, - Relay_log_info *rli) +rpl_parallel_thread::get_qev_common(Log_event *ev, ulonglong event_size) { queued_event *qev; mysql_mutex_assert_owner(&LOCK_rpl_thread); @@ -814,9 +1119,21 @@ rpl_parallel_thread::get_qev(Log_event *ev, ulonglong event_size, my_error(ER_OUTOFMEMORY, MYF(0), (int)sizeof(*qev)); return NULL; } + qev->typ= rpl_parallel_thread::queued_event::QUEUED_EVENT; qev->ev= ev; qev->event_size= event_size; qev->next= NULL; + return qev; +} + + +rpl_parallel_thread::queued_event * +rpl_parallel_thread::get_qev(Log_event *ev, ulonglong event_size, + Relay_log_info *rli) +{ + queued_event *qev= get_qev_common(ev, event_size); + if (!qev) + return NULL; strcpy(qev->event_relay_log_name, rli->event_relay_log_name); qev->event_relay_log_pos= rli->event_relay_log_pos; qev->future_event_relay_log_pos= rli->future_event_relay_log_pos; @@ -825,6 +1142,24 @@ rpl_parallel_thread::get_qev(Log_event *ev, ulonglong event_size, } +rpl_parallel_thread::queued_event * +rpl_parallel_thread::retry_get_qev(Log_event *ev, queued_event *orig_qev, + const char *relay_log_name, + ulonglong event_pos, ulonglong event_size) +{ + queued_event *qev= get_qev_common(ev, event_size); + if (!qev) + return NULL; + qev->rgi= orig_qev->rgi; + strcpy(qev->event_relay_log_name, relay_log_name); + qev->event_relay_log_pos= event_pos; + qev->future_event_relay_log_pos= event_pos+event_size; + strcpy(qev->future_event_master_log_name, + orig_qev->future_event_master_log_name); + return qev; +} + + void rpl_parallel_thread::free_qev(rpl_parallel_thread::queued_event *qev) { @@ -836,7 +1171,7 @@ rpl_parallel_thread::free_qev(rpl_parallel_thread::queued_event *qev) rpl_group_info* rpl_parallel_thread::get_rgi(Relay_log_info *rli, Gtid_log_event *gtid_ev, - rpl_parallel_entry *e) + rpl_parallel_entry *e, ulonglong event_size) { rpl_group_info *rgi; mysql_mutex_assert_owner(&LOCK_rpl_thread); @@ -864,6 +1199,10 @@ rpl_parallel_thread::get_rgi(Relay_log_info *rli, Gtid_log_event *gtid_ev, return NULL; } rgi->parallel_entry= e; + rgi->relay_log= rli->last_inuse_relaylog; + rgi->retry_start_offset= rli->future_event_relay_log_pos-event_size; + rgi->retry_event_count= 0; + rgi->killed_for_retry= false; return rgi; } @@ -1018,10 +1357,11 @@ rpl_parallel_thread_pool::release_thread(rpl_parallel_thread *rpt) if it is still available. Otherwise a new worker thread is allocated. */ rpl_parallel_thread * -rpl_parallel_entry::choose_thread(Relay_log_info *rli, bool *did_enter_cond, +rpl_parallel_entry::choose_thread(rpl_group_info *rgi, bool *did_enter_cond, PSI_stage_info *old_stage, bool reuse) { uint32 idx; + Relay_log_info *rli= rgi->rli; rpl_parallel_thread *thr; idx= rpl_thread_idx; @@ -1066,7 +1406,7 @@ rpl_parallel_entry::choose_thread(Relay_log_info *rli, bool *did_enter_cond, debug_sync_set_action(rli->sql_driver_thd, STRING_WITH_LEN("now SIGNAL wait_queue_killed")); };); - slave_output_error_info(rli, rli->sql_driver_thd); + slave_output_error_info(rgi, rli->sql_driver_thd); return NULL; } else @@ -1300,6 +1640,91 @@ rpl_parallel::workers_idle() } +int +rpl_parallel_entry::queue_master_restart(rpl_group_info *rgi, + Format_description_log_event *fdev) +{ + uint32 idx; + rpl_parallel_thread *thr; + rpl_parallel_thread::queued_event *qev; + Relay_log_info *rli= rgi->rli; + + /* + We only need to queue the server restart if we still have a thread working + on a (potentially partial) event group. + + If the last thread we queued for has finished, then it cannot have any + partial event group that needs aborting. + + Thus there is no need for the full complexity of choose_thread(). We only + need to check if we have a current worker thread, and queue for it if so. + */ + idx= rpl_thread_idx; + thr= rpl_threads[idx]; + if (!thr) + return 0; + mysql_mutex_lock(&thr->LOCK_rpl_thread); + if (thr->current_owner != &rpl_threads[idx]) + { + /* No active worker thread, so no need to queue the master restart. */ + mysql_mutex_unlock(&thr->LOCK_rpl_thread); + return 0; + } + + if (!(qev= thr->get_qev(fdev, 0, rli))) + { + mysql_mutex_unlock(&thr->LOCK_rpl_thread); + return 1; + } + + qev->rgi= rgi; + qev->typ= rpl_parallel_thread::queued_event::QUEUED_MASTER_RESTART; + qev->entry_for_queued= this; + qev->ir= rli->last_inuse_relaylog; + ++qev->ir->queued_count; + thr->enqueue(qev); + mysql_mutex_unlock(&thr->LOCK_rpl_thread); + return 0; +} + + +int +rpl_parallel::wait_for_workers_idle(THD *thd) +{ + uint32 i, max_i; + + /* + The domain_hash is only accessed by the SQL driver thread, so it is safe + to iterate over without a lock. + */ + max_i= domain_hash.records; + for (i= 0; i < max_i; ++i) + { + bool active; + wait_for_commit my_orderer; + struct rpl_parallel_entry *e; + + e= (struct rpl_parallel_entry *)my_hash_element(&domain_hash, i); + mysql_mutex_lock(&e->LOCK_parallel_entry); + if ((active= (e->current_sub_id > e->last_committed_sub_id))) + { + wait_for_commit *waitee= &e->current_group_info->commit_orderer; + my_orderer.register_wait_for_prior_commit(waitee); + thd->wait_for_commit_ptr= &my_orderer; + } + mysql_mutex_unlock(&e->LOCK_parallel_entry); + if (active) + { + int err= my_orderer.wait_for_prior_commit(thd); + thd->wait_for_commit_ptr= NULL; + if (err) + return err; + } + } + return 0; +} + + /* This is used when we get an error during processing in do_event(); We will not queue any event to the thread, but we still need to wake it up @@ -1367,6 +1792,33 @@ rpl_parallel::do_event(rpl_group_info *serial_rgi, Log_event *ev, /* ToDo: what to do with this lock?!? */ mysql_mutex_unlock(&rli->data_lock); + if (typ == FORMAT_DESCRIPTION_EVENT) + { + Format_description_log_event *fdev= + static_cast<Format_description_log_event *>(ev); + if (fdev->created) + { + /* + This format description event marks a new binlog after a master server + restart. We are going to close all temporary tables to clean up any + possible left-overs after a prior master crash. + + Thus we need to wait for all prior events to execute to completion, + in case they need access to any of the temporary tables. + + We also need to notify the worker thread running the prior incomplete + event group (if any), as such event group signifies an incompletely + written group cut short by a master crash, and must be rolled back. + */ + if (current->queue_master_restart(serial_rgi, fdev) || + wait_for_workers_idle(rli->sql_driver_thd)) + { + delete ev; + return 1; + } + } + } + /* Stop queueing additional event groups once the SQL thread is requested to stop. @@ -1390,15 +1842,9 @@ rpl_parallel::do_event(rpl_group_info *serial_rgi, Log_event *ev, if (typ == GTID_EVENT) { - uint32 domain_id; - if (likely(typ == GTID_EVENT)) - { - Gtid_log_event *gtid_ev= static_cast<Gtid_log_event *>(ev); - domain_id= (rli->mi->using_gtid == Master_info::USE_GTID_NO ? - 0 : gtid_ev->domain_id); - } - else - domain_id= 0; + Gtid_log_event *gtid_ev= static_cast<Gtid_log_event *>(ev); + uint32 domain_id= (rli->mi->using_gtid == Master_info::USE_GTID_NO ? + 0 : gtid_ev->domain_id); if (!(e= find(domain_id))) { my_error(ER_OUT_OF_RESOURCES, MYF(MY_WME)); @@ -1417,7 +1863,8 @@ rpl_parallel::do_event(rpl_group_info *serial_rgi, Log_event *ev, instead re-use a thread that we queued for previously. */ cur_thread= - e->choose_thread(rli, &did_enter_cond, &old_stage, typ != GTID_EVENT); + e->choose_thread(serial_rgi, &did_enter_cond, &old_stage, + typ != GTID_EVENT); if (!cur_thread) { /* This means we were killed. The error is already signalled. */ @@ -1437,7 +1884,7 @@ rpl_parallel::do_event(rpl_group_info *serial_rgi, Log_event *ev, { Gtid_log_event *gtid_ev= static_cast<Gtid_log_event *>(ev); - if (!(rgi= cur_thread->get_rgi(rli, gtid_ev, e))) + if (!(rgi= cur_thread->get_rgi(rli, gtid_ev, e, event_size))) { cur_thread->free_qev(qev); abandon_worker_thread(rli->sql_driver_thd, cur_thread, @@ -1527,7 +1974,7 @@ rpl_parallel::do_event(rpl_group_info *serial_rgi, Log_event *ev, return 1; } /* - Queue an empty event, so that the position will be updated in a + Queue a position update, so that the position will be updated in a reasonable way relative to other events: - If the currently executing events are queued serially for a single @@ -1538,7 +1985,8 @@ rpl_parallel::do_event(rpl_group_info *serial_rgi, Log_event *ev, least the position will not be updated until one of them has reached the current point. */ - qev->ev= NULL; + qev->typ= rpl_parallel_thread::queued_event::QUEUED_POS_UPDATE; + qev->entry_for_queued= e; } else { @@ -1549,6 +1997,8 @@ rpl_parallel::do_event(rpl_group_info *serial_rgi, Log_event *ev, Queue the event for processing. */ rli->event_relay_log_pos= rli->future_event_relay_log_pos; + qev->ir= rli->last_inuse_relaylog; + ++qev->ir->queued_count; cur_thread->enqueue(qev); unlock_or_exit_cond(rli->sql_driver_thd, &cur_thread->LOCK_rpl_thread, &did_enter_cond, &old_stage); diff --git a/sql/rpl_parallel.h b/sql/rpl_parallel.h index c4bb407e5eb..b114ee4ebcb 100644 --- a/sql/rpl_parallel.h +++ b/sql/rpl_parallel.h @@ -9,6 +9,7 @@ struct rpl_parallel_entry; struct rpl_parallel_thread_pool; class Relay_log_info; +struct inuse_relaylog; /* @@ -71,8 +72,22 @@ struct rpl_parallel_thread { rpl_parallel_entry *current_entry; struct queued_event { queued_event *next; - Log_event *ev; + /* + queued_event can hold either an event to be executed, or just a binlog + position to be updated without any associated event. + */ + enum queued_event_t { + QUEUED_EVENT, + QUEUED_POS_UPDATE, + QUEUED_MASTER_RESTART + } typ; + union { + Log_event *ev; /* QUEUED_EVENT */ + rpl_parallel_entry *entry_for_queued; /* QUEUED_POS_UPDATE and + QUEUED_MASTER_RESTART */ + }; rpl_group_info *rgi; + inuse_relaylog *ir; ulonglong future_event_relay_log_pos; char event_relay_log_name[FN_REFLEN]; char future_event_master_log_name[FN_REFLEN]; @@ -106,11 +121,15 @@ struct rpl_parallel_thread { queued_size-= dequeue_size; } + queued_event *get_qev_common(Log_event *ev, ulonglong event_size); queued_event *get_qev(Log_event *ev, ulonglong event_size, Relay_log_info *rli); + queued_event *retry_get_qev(Log_event *ev, queued_event *orig_qev, + const char *relay_log_name, + ulonglong event_pos, ulonglong event_size); void free_qev(queued_event *qev); rpl_group_info *get_rgi(Relay_log_info *rli, Gtid_log_event *gtid_ev, - rpl_parallel_entry *e); + rpl_parallel_entry *e, ulonglong event_size); void free_rgi(rpl_group_info *rgi); group_commit_orderer *get_gco(uint64 wait_count, group_commit_orderer *prev); void free_gco(group_commit_orderer *gco); @@ -176,7 +195,7 @@ struct rpl_parallel_entry { Event groups commit in order, so the rpl_group_info for an event group will be alive (at least) as long as - rpl_grou_info::gtid_sub_id > last_committed_sub_id. This can be used to + rpl_group_info::gtid_sub_id > last_committed_sub_id. This can be used to safely refer back to previous event groups if they are still executing, and ignore them if they completed, without requiring explicit synchronisation between the threads. @@ -208,10 +227,10 @@ struct rpl_parallel_entry { /* The group_commit_orderer object for the events currently being queued. */ group_commit_orderer *current_gco; - rpl_parallel_thread * choose_thread(Relay_log_info *rli, bool *did_enter_cond, + rpl_parallel_thread * choose_thread(rpl_group_info *rgi, bool *did_enter_cond, PSI_stage_info *old_stage, bool reuse); - group_commit_orderer *get_gco(); - void free_gco(group_commit_orderer *gco); + int queue_master_restart(rpl_group_info *rgi, + Format_description_log_event *fdev); }; struct rpl_parallel { HASH domain_hash; @@ -225,6 +244,7 @@ struct rpl_parallel { void wait_for_done(THD *thd, Relay_log_info *rli); void stop_during_until(); bool workers_idle(); + int wait_for_workers_idle(THD *thd); int do_event(rpl_group_info *serial_rgi, Log_event *ev, ulonglong event_size); }; diff --git a/sql/rpl_record.cc b/sql/rpl_record.cc index b1cca04d947..5d1ef671159 100644 --- a/sql/rpl_record.cc +++ b/sql/rpl_record.cc @@ -332,6 +332,7 @@ unpack_row(rpl_group_info *rgi, } rgi->rli->report(ERROR_LEVEL, ER_SLAVE_CORRUPT_EVENT, + rgi->gtid_info(), "Could not read field '%s' of table '%s.%s'", f->field_name, table->s->db.str, table->s->table_name.str); diff --git a/sql/rpl_record_old.cc b/sql/rpl_record_old.cc index 5afa529a63c..8b43b268c17 100644 --- a/sql/rpl_record_old.cc +++ b/sql/rpl_record_old.cc @@ -141,7 +141,7 @@ unpack_row_old(rpl_group_info *rgi, f->move_field_offset(-offset); if (!ptr) { - rgi->rli->report(ERROR_LEVEL, ER_SLAVE_CORRUPT_EVENT, + rgi->rli->report(ERROR_LEVEL, ER_SLAVE_CORRUPT_EVENT, NULL, "Could not read field `%s` of table `%s`.`%s`", f->field_name, table->s->db.str, table->s->table_name.str); @@ -183,7 +183,7 @@ unpack_row_old(rpl_group_info *rgi, if (event_type == WRITE_ROWS_EVENT && ((*field_ptr)->flags & mask) == mask) { - rgi->rli->report(ERROR_LEVEL, ER_NO_DEFAULT_FOR_FIELD, + rgi->rli->report(ERROR_LEVEL, ER_NO_DEFAULT_FOR_FIELD, NULL, "Field `%s` of table `%s`.`%s` " "has no default value and cannot be NULL", (*field_ptr)->field_name, table->s->db.str, diff --git a/sql/rpl_reporting.cc b/sql/rpl_reporting.cc index 96fe6242ac3..eb362941f3e 100644 --- a/sql/rpl_reporting.cc +++ b/sql/rpl_reporting.cc @@ -28,6 +28,7 @@ Slave_reporting_capability::Slave_reporting_capability(char const *thread_name) void Slave_reporting_capability::report(loglevel level, int err_code, + const char *extra_info, const char *msg, ...) const { void (*report_function)(const char *, ...); @@ -67,9 +68,10 @@ Slave_reporting_capability::report(loglevel level, int err_code, va_end(args); /* If the msg string ends with '.', do not add a ',' it would be ugly */ - report_function("Slave %s: %s%s Internal MariaDB error code: %d", + report_function("Slave %s: %s%s %s%sInternal MariaDB error code: %d", m_thread_name, pbuff, (pbuff[0] && *(strend(pbuff)-1) == '.') ? "" : ",", + (extra_info ? extra_info : ""), (extra_info ? ", " : ""), err_code); } diff --git a/sql/rpl_reporting.h b/sql/rpl_reporting.h index 2b5e0527b9b..d90b7ad6650 100644 --- a/sql/rpl_reporting.h +++ b/sql/rpl_reporting.h @@ -52,8 +52,9 @@ public: code, but can contain more information), in printf() format. */ - void report(loglevel level, int err_code, const char *msg, ...) const - ATTRIBUTE_FORMAT(printf, 4, 5); + void report(loglevel level, int err_code, const char *extra_info, + const char *msg, ...) const + ATTRIBUTE_FORMAT(printf, 5, 6); /** Clear errors. They will not show up under <code>SHOW SLAVE diff --git a/sql/rpl_rli.cc b/sql/rpl_rli.cc index a162d1d79f8..754b877f654 100644 --- a/sql/rpl_rli.cc +++ b/sql/rpl_rli.cc @@ -52,6 +52,7 @@ Relay_log_info::Relay_log_info(bool is_slave_recovery) info_fd(-1), cur_log_fd(-1), relay_log(&sync_relaylog_period), sync_counter(0), is_relay_log_recovery(is_slave_recovery), save_temporary_tables(0), mi(0), + inuse_relaylog_list(0), last_inuse_relaylog(0), cur_log_old_open_count(0), group_relay_log_pos(0), event_relay_log_pos(0), #if HAVE_valgrind @@ -98,8 +99,18 @@ Relay_log_info::Relay_log_info(bool is_slave_recovery) Relay_log_info::~Relay_log_info() { + inuse_relaylog *cur; DBUG_ENTER("Relay_log_info::~Relay_log_info"); + cur= inuse_relaylog_list; + while (cur) + { + DBUG_ASSERT(cur->queued_count == cur->dequeued_count); + inuse_relaylog *next= cur->next; + my_atomic_rwlock_destroy(&cur->inuse_relaylog_atomic_lock); + my_free(cur); + cur= next; + } mysql_mutex_destroy(&run_lock); mysql_mutex_destroy(&data_lock); mysql_mutex_destroy(&log_space_lock); @@ -305,20 +316,80 @@ Failed to open the existing relay log info file '%s' (errno %d)", } rli->info_fd = info_fd; - int relay_log_pos, master_log_pos; + int relay_log_pos, master_log_pos, lines; + char *first_non_digit; + /* + In MySQL 5.6, there is a MASTER_DELAY option to CHANGE MASTER. This is + not yet merged into MariaDB (as of 10.0.13). However, we detect the + presense of the new option in relay-log.info, as a placeholder for + possible later merge of the feature, and to maintain file format + compatibility with MySQL 5.6+. + */ + int dummy_sql_delay; + + /* + Starting from MySQL 5.6.x, relay-log.info has a new format. + Now, its first line contains the number of lines in the file. + By reading this number we can determine which version our master.info + comes from. We can't simply count the lines in the file, since + versions before 5.6.x could generate files with more lines than + needed. If first line doesn't contain a number, or if it + contains a number less than LINES_IN_RELAY_LOG_INFO_WITH_DELAY, + then the file is treated like a file from pre-5.6.x version. + There is no ambiguity when reading an old master.info: before + 5.6.x, the first line contained the binlog's name, which is + either empty or has an extension (contains a '.'), so can't be + confused with an integer. + + So we're just reading first line and trying to figure which + version is this. + */ + + /* + The first row is temporarily stored in mi->master_log_name, if + it is line count and not binlog name (new format) it will be + overwritten by the second row later. + */ if (init_strvar_from_file(rli->group_relay_log_name, sizeof(rli->group_relay_log_name), + &rli->info_file, "")) + { + msg="Error reading slave log configuration"; + goto err; + } + + lines= strtoul(rli->group_relay_log_name, &first_non_digit, 10); + + if (rli->group_relay_log_name[0] != '\0' && + *first_non_digit == '\0' && + lines >= LINES_IN_RELAY_LOG_INFO_WITH_DELAY) + { + DBUG_PRINT("info", ("relay_log_info file is in new format.")); + /* Seems to be new format => read relay log name from next line */ + if (init_strvar_from_file(rli->group_relay_log_name, + sizeof(rli->group_relay_log_name), + &rli->info_file, "")) + { + msg="Error reading slave log configuration"; + goto err; + } + } + else + DBUG_PRINT("info", ("relay_log_info file is in old format.")); + + if (init_intvar_from_file(&relay_log_pos, + &rli->info_file, BIN_LOG_HEADER_SIZE) || + init_strvar_from_file(rli->group_master_log_name, + sizeof(rli->group_master_log_name), &rli->info_file, "") || - init_intvar_from_file(&relay_log_pos, - &rli->info_file, BIN_LOG_HEADER_SIZE) || - init_strvar_from_file(rli->group_master_log_name, - sizeof(rli->group_master_log_name), - &rli->info_file, "") || - init_intvar_from_file(&master_log_pos, &rli->info_file, 0)) + init_intvar_from_file(&master_log_pos, &rli->info_file, 0) || + (lines >= LINES_IN_RELAY_LOG_INFO_WITH_DELAY && + init_intvar_from_file(&dummy_sql_delay, &rli->info_file, 0))) { msg="Error reading slave log configuration"; goto err; } + strmake_buf(rli->event_relay_log_name,rli->group_relay_log_name); rli->group_relay_log_pos= rli->event_relay_log_pos= relay_log_pos; rli->group_master_log_pos= master_log_pos; @@ -1024,7 +1095,6 @@ int purge_relay_logs(Relay_log_info* rli, THD *thd, bool just_reset, DBUG_ASSERT(rli->slave_running == 0); DBUG_ASSERT(rli->mi->slave_running == 0); - rli->slave_skip_counter=0; mysql_mutex_lock(&rli->data_lock); /* @@ -1243,7 +1313,7 @@ void Relay_log_info::stmt_done(my_off_t event_master_log_pos, inc_group_relay_log_pos(event_master_log_pos, rgi); if (rpl_global_gtid_slave_state.record_and_update_gtid(thd, rgi)) { - report(WARNING_LEVEL, ER_CANNOT_UPDATE_GTID_STATE, + report(WARNING_LEVEL, ER_CANNOT_UPDATE_GTID_STATE, rgi->gtid_info(), "Failed to update GTID state in %s.%s, slave state may become " "inconsistent: %d: %s", "mysql", rpl_gtid_slave_state_table_name.str, @@ -1279,6 +1349,33 @@ void Relay_log_info::stmt_done(my_off_t event_master_log_pos, DBUG_VOID_RETURN; } + +int +Relay_log_info::alloc_inuse_relaylog(const char *name) +{ + inuse_relaylog *ir; + + if (!(ir= (inuse_relaylog *)my_malloc(sizeof(*ir), MYF(MY_WME|MY_ZEROFILL)))) + { + my_error(ER_OUTOFMEMORY, MYF(0), (int)sizeof(*ir)); + return 1; + } + strmake_buf(ir->name, name); + + if (!inuse_relaylog_list) + inuse_relaylog_list= ir; + else + { + last_inuse_relaylog->completed= true; + last_inuse_relaylog->next= ir; + } + last_inuse_relaylog= ir; + my_atomic_rwlock_init(&ir->inuse_relaylog_atomic_lock); + + return 0; +} + + #if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION) int rpl_load_gtid_slave_state(THD *thd) @@ -1465,6 +1562,9 @@ rpl_group_info::reinit(Relay_log_info *rli) tables_to_lock_count= 0; trans_retries= 0; last_event_start_time= 0; + gtid_sub_id= 0; + commit_id= 0; + gtid_pending= false; worker_error= 0; row_stmt_start_timestamp= 0; long_find_row_note_printed= false; @@ -1474,7 +1574,7 @@ rpl_group_info::reinit(Relay_log_info *rli) } rpl_group_info::rpl_group_info(Relay_log_info *rli) - : thd(0), gtid_sub_id(0), wait_commit_sub_id(0), + : thd(0), wait_commit_sub_id(0), wait_commit_group_info(0), parallel_entry(0), deferred_events(NULL), m_annotate_event(0), is_parallel_exec(false) { @@ -1505,9 +1605,11 @@ event_group_new_gtid(rpl_group_info *rgi, Gtid_log_event *gev) return 1; } rgi->gtid_sub_id= sub_id; - rgi->current_gtid.server_id= gev->server_id; rgi->current_gtid.domain_id= gev->domain_id; + rgi->current_gtid.server_id= gev->server_id; rgi->current_gtid.seq_no= gev->seq_no; + rgi->commit_id= gev->commit_id; + rgi->gtid_pending= true; return 0; } @@ -1563,7 +1665,7 @@ delete_or_keep_event_post_apply(rpl_group_info *rgi, void rpl_group_info::cleanup_context(THD *thd, bool error) { - DBUG_ENTER("Relay_log_info::cleanup_context"); + DBUG_ENTER("rpl_group_info::cleanup_context"); DBUG_PRINT("enter", ("error: %d", (int) error)); DBUG_ASSERT(this->thd == thd); @@ -1629,7 +1731,7 @@ void rpl_group_info::cleanup_context(THD *thd, bool error) void rpl_group_info::clear_tables_to_lock() { - DBUG_ENTER("Relay_log_info::clear_tables_to_lock()"); + DBUG_ENTER("rpl_group_info::clear_tables_to_lock()"); #ifndef DBUG_OFF /** When replicating in RBR and MyISAM Merge tables are involved @@ -1676,7 +1778,7 @@ void rpl_group_info::clear_tables_to_lock() void rpl_group_info::slave_close_thread_tables(THD *thd) { - DBUG_ENTER("Relay_log_info::slave_close_thread_tables(THD *thd)"); + DBUG_ENTER("rpl_group_info::slave_close_thread_tables(THD *thd)"); thd->get_stmt_da()->set_overwrite_status(true); thd->is_error() ? trans_rollback_stmt(thd) : trans_commit_stmt(thd); thd->get_stmt_da()->set_overwrite_status(false); @@ -1745,6 +1847,54 @@ rpl_group_info::mark_start_commit() } +/* + Format the current GTID as a string suitable for printing in error messages. + + The string is stored in a buffer inside rpl_group_info, so remains valid + until next call to gtid_info() or until destruction of rpl_group_info. + + If no GTID is available, then NULL is returned. +*/ +char * +rpl_group_info::gtid_info() +{ + if (!gtid_sub_id || !current_gtid.seq_no) + return NULL; + my_snprintf(gtid_info_buf, sizeof(gtid_info_buf), "Gtid %u-%u-%llu", + current_gtid.domain_id, current_gtid.server_id, + current_gtid.seq_no); + return gtid_info_buf; +} + + +/* + Undo the effect of a prior mark_start_commit(). + + This is only used for retrying a transaction in parallel replication, after + we have encountered a deadlock or other temporary error. + + When we get such a deadlock, it means that the current group of transactions + did not yet all start committing (else they would not have deadlocked). So + we will not yet have woken up anything in the next group, our rgi->gco is + still live, and we can simply decrement the counter (to be incremented again + later, when the retry succeeds and reaches the commit step). +*/ +void +rpl_group_info::unmark_start_commit() +{ + rpl_parallel_entry *e; + + if (!did_mark_start_commit) + return; + + e= this->parallel_entry; + mysql_mutex_lock(&e->LOCK_parallel_entry); + --e->count_committing_event_groups; + mysql_mutex_unlock(&e->LOCK_parallel_entry); + did_mark_start_commit= false; +} + + rpl_sql_thread_info::rpl_sql_thread_info(Rpl_filter *filter) : rpl_filter(filter) { diff --git a/sql/rpl_rli.h b/sql/rpl_rli.h index 137571ab820..3a8d87030ad 100644 --- a/sql/rpl_rli.h +++ b/sql/rpl_rli.h @@ -28,6 +28,12 @@ struct RPL_TABLE_LIST; class Master_info; class Rpl_filter; + +enum { + LINES_IN_RELAY_LOG_INFO_WITH_DELAY= 5 +}; + + /**************************************************************************** Replication SQL Thread @@ -55,6 +61,7 @@ class Rpl_filter; *****************************************************************************/ struct rpl_group_info; +struct inuse_relaylog; class Relay_log_info : public Slave_reporting_capability { @@ -158,6 +165,13 @@ public: Master_info *mi; /* + List of active relay log files. + (This can be more than one in case of parallel replication). + */ + inuse_relaylog *inuse_relaylog_list; + inuse_relaylog *last_inuse_relaylog; + + /* Needed to deal properly with cur_log getting closed and re-opened with a different log under our feet */ @@ -237,10 +251,11 @@ public: errors, and have been manually applied by DBA already. Must be ulong as it's refered to from set_var.cc */ - volatile ulong slave_skip_counter; + volatile ulonglong slave_skip_counter; + ulonglong max_relay_log_size; + volatile ulong abort_pos_wait; /* Incremented on change master */ volatile ulong slave_run_id; /* Incremented on slave start */ - ulong max_relay_log_size; mysql_mutex_t log_space_lock; mysql_cond_t log_space_cond; /* @@ -392,6 +407,7 @@ public: void stmt_done(my_off_t event_log_pos, time_t event_creation_time, THD *thd, rpl_group_info *rgi); + int alloc_inuse_relaylog(const char *name); /** Is the replication inside a group? @@ -458,6 +474,41 @@ private: /* + In parallel replication, if we need to re-try a transaction due to a + deadlock or other temporary error, we may need to go back and re-read events + out of an earlier relay log. + + This structure keeps track of the relaylogs that are potentially in use. + Each rpl_group_info has a pointer to one of those, corresponding to the + first GTID event. + + A pair of reference count keeps track of how long a relay log is potentially + in use. When the `completed' flag is set, all events have been read out of + the relay log, but the log might still be needed for retry in worker + threads. As worker threads complete an event group, they increment + atomically the `dequeued_count' with number of events queued. Thus, when + completed is set and dequeued_count equals queued_count, the relay log file + is finally done with and can be purged. + + By separating the queued and dequeued count, only the dequeued_count needs + multi-thread synchronisation; the completed flag and queued_count fields + are only accessed by the SQL driver thread and need no synchronisation. +*/ +struct inuse_relaylog { + inuse_relaylog *next; + /* Number of events in this relay log queued for worker threads. */ + int64 queued_count; + /* Number of events completed by worker threads. */ + volatile int64 dequeued_count; + /* Set when all events have been read from a relaylog. */ + bool completed; + char name[FN_REFLEN]; + /* Lock used to protect inuse_relaylog::dequeued_count */ + my_atomic_rwlock_t inuse_relaylog_atomic_lock; +}; + + +/* This is data for various state needed to be kept for the processing of one event group (transaction) during replication. @@ -483,6 +534,7 @@ struct rpl_group_info */ uint64 gtid_sub_id; rpl_gtid current_gtid; + uint64 commit_id; /* This is used to keep transaction commit order. We will signal this when we commit, and can register it to wait for the @@ -560,6 +612,8 @@ struct rpl_group_info */ char future_event_master_log_name[FN_REFLEN]; bool is_parallel_exec; + /* When gtid_pending is true, we have not yet done record_gtid(). */ + bool gtid_pending; int worker_error; /* Set true when we signalled that we reach the commit phase. Used to avoid @@ -587,6 +641,17 @@ struct rpl_group_info */ time_t row_stmt_start_timestamp; bool long_find_row_note_printed; + /* Needs room for "Gtid D-S-N\x00". */ + char gtid_info_buf[5+10+1+10+1+20+1]; + + /* + Information to be able to re-try an event group in case of a deadlock or + other temporary error. + */ + inuse_relaylog *relay_log; + uint64 retry_start_offset; + uint64 retry_event_count; + bool killed_for_retry; rpl_group_info(Relay_log_info *rli_); ~rpl_group_info(); @@ -675,6 +740,8 @@ struct rpl_group_info void slave_close_thread_tables(THD *); void mark_start_commit_no_lock(); void mark_start_commit(); + char *gtid_info(); + void unmark_start_commit(); time_t get_row_stmt_start_timestamp() { diff --git a/sql/rpl_utility.cc b/sql/rpl_utility.cc index 05227a29775..25dff72090c 100644 --- a/sql/rpl_utility.cc +++ b/sql/rpl_utility.cc @@ -826,7 +826,7 @@ can_convert_field_to(Field *field, @retval false Master table is not compatible with slave table. */ bool -table_def::compatible_with(THD *thd, Relay_log_info *rli, +table_def::compatible_with(THD *thd, rpl_group_info *rgi, TABLE *table, TABLE **conv_table_var) const { @@ -834,6 +834,7 @@ table_def::compatible_with(THD *thd, Relay_log_info *rli, We only check the initial columns for the tables. */ uint const cols_to_check= MY_MIN(table->s->fields, size()); + Relay_log_info *rli= rgi->rli; TABLE *tmp_table= NULL; for (uint col= 0 ; col < cols_to_check ; ++col) @@ -857,7 +858,7 @@ table_def::compatible_with(THD *thd, Relay_log_info *rli, This will create the full table with all fields. This is necessary to ge the correct field lengths for the record. */ - tmp_table= create_conversion_table(thd, rli, table); + tmp_table= create_conversion_table(thd, rgi, table); if (tmp_table == NULL) return false; /* @@ -885,7 +886,7 @@ table_def::compatible_with(THD *thd, Relay_log_info *rli, String target_type(target_buf, sizeof(target_buf), &my_charset_latin1); show_sql_type(type(col), field_metadata(col), &source_type, field->charset()); field->sql_type(target_type); - rli->report(ERROR_LEVEL, ER_SLAVE_CONVERSION_FAILED, + rli->report(ERROR_LEVEL, ER_SLAVE_CONVERSION_FAILED, rgi->gtid_info(), ER(ER_SLAVE_CONVERSION_FAILED), col, db_name, tbl_name, source_type.c_ptr_safe(), target_type.c_ptr_safe()); @@ -927,12 +928,14 @@ table_def::compatible_with(THD *thd, Relay_log_info *rli, conversion table. */ -TABLE *table_def::create_conversion_table(THD *thd, Relay_log_info *rli, TABLE *target_table) const +TABLE *table_def::create_conversion_table(THD *thd, rpl_group_info *rgi, + TABLE *target_table) const { DBUG_ENTER("table_def::create_conversion_table"); List<Create_field> field_list; TABLE *conv_table= NULL; + Relay_log_info *rli= rgi->rli; /* At slave, columns may differ. So we should create MY_MIN(columns@master, columns@slave) columns in the @@ -1014,7 +1017,7 @@ TABLE *table_def::create_conversion_table(THD *thd, Relay_log_info *rli, TABLE * err: if (conv_table == NULL) - rli->report(ERROR_LEVEL, ER_SLAVE_CANT_CREATE_CONVERSION, + rli->report(ERROR_LEVEL, ER_SLAVE_CANT_CREATE_CONVERSION, rgi->gtid_info(), ER(ER_SLAVE_CANT_CREATE_CONVERSION), target_table->s->db.str, target_table->s->table_name.str); diff --git a/sql/rpl_utility.h b/sql/rpl_utility.h index 7568a2d786c..ed0ce16363b 100644 --- a/sql/rpl_utility.h +++ b/sql/rpl_utility.h @@ -30,6 +30,7 @@ class Relay_log_info; class Log_event; +struct rpl_group_info; /** A table definition from the master. @@ -187,7 +188,7 @@ public: @retval 0 if the table definition is compatible with @c table */ #ifndef MYSQL_CLIENT - bool compatible_with(THD *thd, Relay_log_info *rli, TABLE *table, + bool compatible_with(THD *thd, rpl_group_info *rgi, TABLE *table, TABLE **conv_table_var) const; /** @@ -212,7 +213,8 @@ public: @return A pointer to a temporary table with memory allocated in the thread's memroot, NULL if the table could not be created */ - TABLE *create_conversion_table(THD *thd, Relay_log_info *rli, TABLE *target_table) const; + TABLE *create_conversion_table(THD *thd, rpl_group_info *rgi, + TABLE *target_table) const; #endif diff --git a/sql/scheduler.cc b/sql/scheduler.cc index ecf49e633ab..a9b253e478a 100644 --- a/sql/scheduler.cc +++ b/sql/scheduler.cc @@ -1,5 +1,5 @@ -/* Copyright (c) 2007, 2011, Oracle and/or its affiliates. All rights reserved. - Copyright (c) 2012, 2013, Monty Program Ab +/* Copyright (c) 2007, 2013, Oracle and/or its affiliates. + Copyright (c) 2012, 2014, SkySQL Ab. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by diff --git a/sql/scheduler.h b/sql/scheduler.h index 06c17c7b114..f7aff377eac 100644 --- a/sql/scheduler.h +++ b/sql/scheduler.h @@ -99,15 +99,13 @@ public: void *data; /* scheduler-specific data structure */ }; -#undef HAVE_POOL_OF_THREADS -#if !defined(EMBEDDED_LIBRARY) && !defined(_AIX) -#define HAVE_POOL_OF_THREADS 1 +#ifdef HAVE_POOL_OF_THREADS void pool_of_threads_scheduler(scheduler_functions* func, ulong *arg_max_connections, uint *arg_connection_count); #else #define pool_of_threads_scheduler(A,B,C) \ one_thread_per_connection_scheduler(A, B, C) -#endif +#endif /*HAVE_POOL_OF_THREADS*/ #endif /* SCHEDULER_INCLUDED */ diff --git a/sql/set_var.h b/sql/set_var.h index e48f394c316..fe2a0d8e953 100644 --- a/sql/set_var.h +++ b/sql/set_var.h @@ -284,9 +284,7 @@ public: if (value_arg && value_arg->type() == Item::FIELD_ITEM) { Item_field *item= (Item_field*) value_arg; - if (!(value=new Item_string(item->field_name, - (uint) strlen(item->field_name), - system_charset_info))) // names are utf8 + if (!(value=new Item_string_sys(item->field_name))) // names are utf8 value=value_arg; /* Give error message later */ } else diff --git a/sql/share/errmsg-utf8.txt b/sql/share/errmsg-utf8.txt index 233bb835bd8..199a822d022 100644 --- a/sql/share/errmsg-utf8.txt +++ b/sql/share/errmsg-utf8.txt @@ -3962,7 +3962,7 @@ ER_NEW_ABORTING_CONNECTION 08S01 spa "Abortada conexión %ld para db: '%-.192s' usuario: '%-.48s' servidor: '%-.64s' (%-.64s)" swe "Avbröt länken för trÃ¥d %ld till db '%-.192s', användare '%-.48s', host '%-.64s' (%-.64s)" ukr "Перервано з'Ñ”Ð´Ð½Ð°Ð½Ð½Ñ %ld до бази данних: '%-.192s' кориÑтувач: '%-.48s' хоÑÑ‚: '%-.64s' (%-.64s)" -ER_unused_2 +ER_UNUSED_10 eng "You should never see it" ER_FLUSH_MASTER_BINLOG_CLOSED eng "Binlog closed, cannot RESET MASTER" @@ -5879,10 +5879,9 @@ ER_EVENT_NEITHER_M_EXPR_NOR_M_AT ger "Kein DATETIME-Ausdruck angegeben" ER_UNUSED_2 - eng "" - + eng "You should never see it" ER_UNUSED_3 - eng "" + eng "You should never see it" ER_EVENT_CANNOT_DELETE eng "Failed to delete the event from mysql.event" ger "Löschen des Events aus mysql.event fehlgeschlagen" @@ -5910,7 +5909,7 @@ ER_CANT_LOCK_LOG_TABLE eng "You can't use locks with log tables." ger "Log-Tabellen können nicht gesperrt werden." ER_UNUSED_4 - eng "" + eng "You should never see it" ER_COL_COUNT_DOESNT_MATCH_PLEASE_UPDATE eng "Column count of mysql.%s is wrong. Expected %d, found %d. Created with MariaDB %d, now running %d. Please use mysql_upgrade to fix this error." ger "Spaltenanzahl von mysql.%s falsch. %d erwartet, aber %d erhalten. Erzeugt mit MariaDB %d, jetzt unter %d. Bitte benutzen Sie mysql_upgrade, um den Fehler zu beheben" @@ -6089,8 +6088,8 @@ ER_TRG_CANT_OPEN_TABLE ER_CANT_CREATE_SROUTINE eng "Cannot create stored routine `%-.64s`. Check warnings" ger "Kann gespeicherte Routine `%-.64s` nicht erzeugen. Beachten Sie die Warnungen" -ER_UNUSED - eng "" +ER_UNUSED_11 + eng "You should never see it" ER_NO_FORMAT_DESCRIPTION_EVENT_BEFORE_BINLOG_STATEMENT eng "The BINLOG statement of type `%s` was not preceded by a format description BINLOG statement." ger "Der BINLOG-Anweisung vom Typ `%s` ging keine BINLOG-Anweisung zur Formatbeschreibung voran." @@ -6457,7 +6456,7 @@ ER_BINLOG_UNSAFE_INSERT_TWO_KEYS ER_TABLE_IN_FK_CHECK eng "Table is being used in foreign key check." -ER_unused_1 +ER_UNUSED_1 eng "You should never see it" ER_BINLOG_UNSAFE_AUTOINC_NOT_FIRST @@ -6527,7 +6526,7 @@ ER_ROW_DOES_NOT_MATCH_GIVEN_PARTITION_SET swe "Hittade en rad som inte passar i nÃ¥gon given partition" ER_UNUSED_5 - eng "" + eng "You should never see it" ER_CHANGE_RPL_INFO_REPOSITORY_FAILURE eng "Failure while changing the type of replication repository: %s." @@ -6993,11 +6992,11 @@ ER_UNKNOWN_OPTION ER_BAD_OPTION_VALUE eng "Incorrect value '%-.64s' for option '%-.64s'" ER_UNUSED_6 - eng "" + eng "You should never see it" ER_UNUSED_7 - eng "" + eng "You should never see it" ER_UNUSED_8 - eng "" + eng "You should never see it" ER_DATA_OVERFLOW 22003 eng "Got overflow when converting '%-.128s' to %-.32s. Value truncated." ER_DATA_TRUNCATED 22003 @@ -7022,8 +7021,8 @@ ER_VIEW_ORDERBY_IGNORED eng "View '%-.192s'.'%-.192s' ORDER BY clause ignored because there is other ORDER BY clause already." ER_CONNECTION_KILLED 70100 eng "Connection was killed" -ER_UNSED - eng "Internal error: '%-.192s'" +ER_UNUSED_12 + eng "You should never see it" ER_INSIDE_TRANSACTION_PREVENTS_SWITCH_SKIP_REPLICATION eng "Cannot modify @@session.skip_replication inside a transaction" ER_STORED_FUNCTION_PREVENTS_SWITCH_SKIP_REPLICATION @@ -7108,5 +7107,7 @@ ER_IT_IS_A_VIEW 42S02 eng "'%-.192s' is a view" ER_SLAVE_SKIP_NOT_IN_GTID eng "When using GTID, @@sql_slave_skip_counter can not be used. Instead, setting @@gtid_slave_pos explicitly can be used to skip to after a given GTID position." +ER_TABLE_DEFINITION_TOO_BIG + eng "The definition for table %`s is too big" ER_STATEMENT_TIMEOUT 70100 eng "Query execution was interrupted (max_statement_time exceeded)" diff --git a/sql/slave.cc b/sql/slave.cc index ca29410cd1d..6e70f090247 100644 --- a/sql/slave.cc +++ b/sql/slave.cc @@ -302,7 +302,10 @@ handle_slave_init(void *arg __attribute__((unused))) mysql_mutex_lock(&LOCK_thread_count); thd->thread_id= thread_id++; mysql_mutex_unlock(&LOCK_thread_count); + thd->system_thread = SYSTEM_THREAD_SLAVE_INIT; thd->store_globals(); + thd->security_ctx->skip_grants(); + thd->set_command(COM_DAEMON); thd_proc_info(thd, "Loading slave GTID position from table"); if (rpl_load_gtid_slave_state(thd)) @@ -317,15 +320,22 @@ handle_slave_init(void *arg __attribute__((unused))) mysql_mutex_unlock(&LOCK_thread_count); my_thread_end(); - mysql_mutex_lock(&LOCK_thread_count); + mysql_mutex_lock(&LOCK_slave_init); slave_init_thread_running= false; - mysql_cond_broadcast(&COND_thread_count); - mysql_mutex_unlock(&LOCK_thread_count); + mysql_cond_broadcast(&COND_slave_init); + mysql_mutex_unlock(&LOCK_slave_init); return 0; } +/* + Start the slave init thread. + + This thread is used to load the GTID state from mysql.gtid_slave_pos at + server start; reading from table requires valid THD, which is otherwise not + available during server init. +*/ static int run_slave_init_thread() { @@ -339,10 +349,10 @@ run_slave_init_thread() return 1; } - mysql_mutex_lock(&LOCK_thread_count); + mysql_mutex_lock(&LOCK_slave_init); while (slave_init_thread_running) - mysql_cond_wait(&COND_thread_count, &LOCK_thread_count); - mysql_mutex_unlock(&LOCK_thread_count); + mysql_cond_wait(&COND_slave_init, &LOCK_slave_init); + mysql_mutex_unlock(&LOCK_slave_init); return 0; } @@ -1090,21 +1100,21 @@ static bool sql_slave_killed(rpl_group_info *rgi) if (ret == 0) { - rli->report(WARNING_LEVEL, 0, + rli->report(WARNING_LEVEL, 0, rgi->gtid_info(), "Request to stop slave SQL Thread received while " "applying a group that has non-transactional " "changes; waiting for completion of the group ... "); } else { - rli->report(ERROR_LEVEL, ER_SLAVE_FATAL_ERROR, + rli->report(ERROR_LEVEL, ER_SLAVE_FATAL_ERROR, rgi->gtid_info(), ER(ER_SLAVE_FATAL_ERROR), msg_stopped); } } else { ret= TRUE; - rli->report(ERROR_LEVEL, ER_SLAVE_FATAL_ERROR, + rli->report(ERROR_LEVEL, ER_SLAVE_FATAL_ERROR, rgi->gtid_info(), ER(ER_SLAVE_FATAL_ERROR), msg_stopped); } @@ -1522,7 +1532,7 @@ static int get_master_version_and_clock(MYSQL* mysql, Master_info* mi) goto slave_killed_err; else if (is_network_error(mysql_errno(mysql))) { - mi->report(WARNING_LEVEL, mysql_errno(mysql), + mi->report(WARNING_LEVEL, mysql_errno(mysql), NULL, "Get master clock failed with error: %s", mysql_error(mysql)); goto network_err; } @@ -1587,7 +1597,7 @@ not always make sense; please check the manual before using it)."; goto slave_killed_err; else if (is_network_error(mysql_errno(mysql))) { - mi->report(WARNING_LEVEL, mysql_errno(mysql), + mi->report(WARNING_LEVEL, mysql_errno(mysql), NULL, "Get master SERVER_ID failed with error: %s", mysql_error(mysql)); goto network_err; } @@ -1600,7 +1610,7 @@ when it try to get the value of SERVER_ID variable from master."; } else if (!master_row && master_res) { - mi->report(WARNING_LEVEL, ER_UNKNOWN_SYSTEM_VARIABLE, + mi->report(WARNING_LEVEL, ER_UNKNOWN_SYSTEM_VARIABLE, NULL, "Unknown system variable 'SERVER_ID' on master, \ maybe it is a *VERY OLD MASTER*."); } @@ -1660,7 +1670,7 @@ be equal for the Statement-format replication to work"; goto slave_killed_err; else if (is_network_error(mysql_errno(mysql))) { - mi->report(WARNING_LEVEL, mysql_errno(mysql), + mi->report(WARNING_LEVEL, mysql_errno(mysql), NULL, "Get master COLLATION_SERVER failed with error: %s", mysql_error(mysql)); goto network_err; } @@ -1674,7 +1684,7 @@ when it try to get the value of COLLATION_SERVER global variable from master."; goto err; } else - mi->report(WARNING_LEVEL, ER_UNKNOWN_SYSTEM_VARIABLE, + mi->report(WARNING_LEVEL, ER_UNKNOWN_SYSTEM_VARIABLE, NULL, "Unknown system variable 'COLLATION_SERVER' on master, \ maybe it is a *VERY OLD MASTER*. *NOTE*: slave may experience \ inconsistency if replicated data deals with collation."); @@ -1723,7 +1733,7 @@ be equal for the Statement-format replication to work"; goto slave_killed_err; else if (is_network_error(err_code= mysql_errno(mysql))) { - mi->report(ERROR_LEVEL, err_code, + mi->report(ERROR_LEVEL, err_code, NULL, "Get master TIME_ZONE failed with error: %s", mysql_error(mysql)); goto network_err; @@ -1731,7 +1741,7 @@ be equal for the Statement-format replication to work"; else if (err_code == ER_UNKNOWN_SYSTEM_VARIABLE) { /* We use ERROR_LEVEL to get the error logged to file */ - mi->report(ERROR_LEVEL, err_code, + mi->report(ERROR_LEVEL, err_code, NULL, "MySQL master doesn't have a TIME_ZONE variable. Note that" "if your timezone is not same between master and slave, your " @@ -1763,15 +1773,35 @@ when it try to get the value of TIME_ZONE global variable from master."; llstr((ulonglong) (mi->heartbeat_period*1000000000UL), llbuf); sprintf(query, query_format, llbuf); - if (mysql_real_query(mysql, query, strlen(query)) - && !check_io_slave_killed(mi, NULL)) + DBUG_EXECUTE_IF("simulate_slave_heartbeat_network_error", + { static ulong dbug_count= 0; + if (++dbug_count < 3) + goto heartbeat_network_error; + }); + if (mysql_real_query(mysql, query, strlen(query))) { - errmsg= "The slave I/O thread stops because SET @master_heartbeat_period " - "on master failed."; - err_code= ER_SLAVE_FATAL_ERROR; - sprintf(err_buff, "%s Error: %s", errmsg, mysql_error(mysql)); - mysql_free_result(mysql_store_result(mysql)); - goto err; + if (check_io_slave_killed(mi, NULL)) + goto slave_killed_err; + + if (is_network_error(mysql_errno(mysql))) + { + IF_DBUG(heartbeat_network_error: , ) + mi->report(WARNING_LEVEL, mysql_errno(mysql), NULL, + "SET @master_heartbeat_period to master failed with error: %s", + mysql_error(mysql)); + mysql_free_result(mysql_store_result(mysql)); + goto network_err; + } + else + { + /* Fatal error */ + errmsg= "The slave I/O thread stops because a fatal error is encountered " + "when it tries to SET @master_heartbeat_period on master."; + err_code= ER_SLAVE_FATAL_ERROR; + sprintf(err_buff, "%s Error: %s", errmsg, mysql_error(mysql)); + mysql_free_result(mysql_store_result(mysql)); + goto err; + } } mysql_free_result(mysql_store_result(mysql)); } @@ -1808,7 +1838,7 @@ when it try to get the value of TIME_ZONE global variable from master."; if (global_system_variables.log_warnings > 1) { // this is tolerable as OM -> NS is supported - mi->report(WARNING_LEVEL, mysql_errno(mysql), + mi->report(WARNING_LEVEL, mysql_errno(mysql), NULL, "Notifying master by %s failed with " "error: %s", query, mysql_error(mysql)); } @@ -1817,7 +1847,7 @@ when it try to get the value of TIME_ZONE global variable from master."; { if (is_network_error(mysql_errno(mysql))) { - mi->report(WARNING_LEVEL, mysql_errno(mysql), + mi->report(WARNING_LEVEL, mysql_errno(mysql), NULL, "Notifying master by %s failed with " "error: %s", query, mysql_error(mysql)); mysql_free_result(mysql_store_result(mysql)); @@ -1853,7 +1883,7 @@ when it try to get the value of TIME_ZONE global variable from master."; goto slave_killed_err; else if (is_network_error(mysql_errno(mysql))) { - mi->report(WARNING_LEVEL, mysql_errno(mysql), + mi->report(WARNING_LEVEL, mysql_errno(mysql), NULL, "Get master BINLOG_CHECKSUM failed with error: %s", mysql_error(mysql)); goto network_err; } @@ -1890,7 +1920,7 @@ past_checksum: err_code= mysql_errno(mysql); if (is_network_error(err_code)) { - mi->report(ERROR_LEVEL, err_code, + mi->report(ERROR_LEVEL, err_code, NULL, "Setting master-side filtering of @@skip_replication failed " "with error: %s", mysql_error(mysql)); goto network_err; @@ -1934,7 +1964,7 @@ past_checksum: err_code= mysql_errno(mysql); if (is_network_error(err_code)) { - mi->report(ERROR_LEVEL, err_code, + mi->report(ERROR_LEVEL, err_code, NULL, "Setting @mariadb_slave_capability failed with error: %s", mysql_error(mysql)); goto network_err; @@ -2000,7 +2030,7 @@ after_set_capability: err_code= mysql_errno(mysql); if (is_network_error(err_code)) { - mi->report(ERROR_LEVEL, err_code, + mi->report(ERROR_LEVEL, err_code, NULL, "Setting @slave_connect_state failed with error: %s", mysql_error(mysql)); goto network_err; @@ -2033,7 +2063,7 @@ after_set_capability: err_code= mysql_errno(mysql); if (is_network_error(err_code)) { - mi->report(ERROR_LEVEL, err_code, + mi->report(ERROR_LEVEL, err_code, NULL, "Setting @slave_gtid_strict_mode failed with error: %s", mysql_error(mysql)); goto network_err; @@ -2066,7 +2096,7 @@ after_set_capability: err_code= mysql_errno(mysql); if (is_network_error(err_code)) { - mi->report(ERROR_LEVEL, err_code, + mi->report(ERROR_LEVEL, err_code, NULL, "Setting @slave_gtid_ignore_duplicates failed with " "error: %s", mysql_error(mysql)); goto network_err; @@ -2102,7 +2132,7 @@ after_set_capability: err_code= mysql_errno(mysql); if (is_network_error(err_code)) { - mi->report(ERROR_LEVEL, err_code, + mi->report(ERROR_LEVEL, err_code, NULL, "Setting @slave_until_gtid failed with error: %s", mysql_error(mysql)); goto network_err; @@ -2150,7 +2180,7 @@ after_set_capability: goto slave_killed_err; else if (is_network_error(mysql_errno(mysql))) { - mi->report(WARNING_LEVEL, mysql_errno(mysql), + mi->report(WARNING_LEVEL, mysql_errno(mysql), NULL, "Get master GTID position failed with error: %s", mysql_error(mysql)); goto network_err; } @@ -2180,7 +2210,7 @@ err: if (master_res) mysql_free_result(master_res); DBUG_ASSERT(err_code != 0); - mi->report(ERROR_LEVEL, err_code, "%s", err_buff); + mi->report(ERROR_LEVEL, err_code, NULL, "%s", err_buff); DBUG_RETURN(1); } @@ -2201,6 +2231,7 @@ slave_killed_err: static bool wait_for_relay_log_space(Relay_log_info* rli) { bool slave_killed=0; + bool ignore_log_space_limit; Master_info* mi = rli->mi; PSI_stage_info old_stage; THD* thd = mi->io_thd; @@ -2216,6 +2247,11 @@ static bool wait_for_relay_log_space(Relay_log_info* rli) !rli->ignore_log_space_limit) mysql_cond_wait(&rli->log_space_cond, &rli->log_space_lock); + ignore_log_space_limit= rli->ignore_log_space_limit; + rli->ignore_log_space_limit= 0; + + thd->EXIT_COND(&old_stage); + /* Makes the IO thread read only one event at a time until the SQL thread is able to purge the relay @@ -2239,7 +2275,8 @@ static bool wait_for_relay_log_space(Relay_log_info* rli) thread sleeps waiting for events. */ - if (rli->ignore_log_space_limit) + + if (ignore_log_space_limit) { #ifndef DBUG_OFF { @@ -2261,11 +2298,8 @@ static bool wait_for_relay_log_space(Relay_log_info* rli) mysql_mutex_unlock(&mi->data_lock); rli->sql_force_rotate_relay= false; } - - rli->ignore_log_space_limit= false; } - thd->EXIT_COND(&old_stage); DBUG_RETURN(slave_killed); } @@ -2302,7 +2336,7 @@ static void write_ignored_events_info_to_relay_log(THD *thd, Master_info *mi) Rotate_log_event::DUP_NAME); rli->ign_master_log_name_end[0]= 0; if (unlikely(!(bool)rev)) - mi->report(ERROR_LEVEL, ER_SLAVE_CREATE_EVENT_FAILURE, + mi->report(ERROR_LEVEL, ER_SLAVE_CREATE_EVENT_FAILURE, NULL, ER(ER_SLAVE_CREATE_EVENT_FAILURE), "Rotate_event (out of memory?)," " SHOW SLAVE STATUS may be inaccurate"); @@ -2313,7 +2347,7 @@ static void write_ignored_events_info_to_relay_log(THD *thd, Master_info *mi) Gtid_list_log_event::FLAG_IGN_GTIDS); rli->ign_gtids.reset(); if (unlikely(!(bool)glev)) - mi->report(ERROR_LEVEL, ER_SLAVE_CREATE_EVENT_FAILURE, + mi->report(ERROR_LEVEL, ER_SLAVE_CREATE_EVENT_FAILURE, NULL, ER(ER_SLAVE_CREATE_EVENT_FAILURE), "Gtid_list_event (out of memory?)," " gtid_slave_pos may be inaccurate"); @@ -2326,7 +2360,7 @@ static void write_ignored_events_info_to_relay_log(THD *thd, Master_info *mi) DBUG_PRINT("info",("writing a Rotate event to track down ignored events")); rev->server_id= 0; // don't be ignored by slave SQL thread if (unlikely(rli->relay_log.append(rev))) - mi->report(ERROR_LEVEL, ER_SLAVE_RELAY_LOG_WRITE_FAILURE, + mi->report(ERROR_LEVEL, ER_SLAVE_RELAY_LOG_WRITE_FAILURE, NULL, ER(ER_SLAVE_RELAY_LOG_WRITE_FAILURE), "failed to write a Rotate event" " to the relay log, SHOW SLAVE STATUS may be" @@ -2339,7 +2373,7 @@ static void write_ignored_events_info_to_relay_log(THD *thd, Master_info *mi) glev->server_id= 0; // don't be ignored by slave SQL thread glev->set_artificial_event(); // Don't mess up Exec_Master_Log_Pos if (unlikely(rli->relay_log.append(glev))) - mi->report(ERROR_LEVEL, ER_SLAVE_RELAY_LOG_WRITE_FAILURE, + mi->report(ERROR_LEVEL, ER_SLAVE_RELAY_LOG_WRITE_FAILURE, NULL, ER(ER_SLAVE_RELAY_LOG_WRITE_FAILURE), "failed to write a Gtid_list event to the relay log, " "gtid_slave_pos may be inaccurate"); @@ -2424,7 +2458,7 @@ int register_slave_on_master(MYSQL* mysql, Master_info *mi, char buf[256]; my_snprintf(buf, sizeof(buf), "%s (Errno: %d)", mysql_error(mysql), mysql_errno(mysql)); - mi->report(ERROR_LEVEL, ER_SLAVE_MASTER_COM_FAILURE, + mi->report(ERROR_LEVEL, ER_SLAVE_MASTER_COM_FAILURE, NULL, ER(ER_SLAVE_MASTER_COM_FAILURE), "COM_REGISTER_SLAVE", buf); } DBUG_RETURN(1); @@ -2829,7 +2863,8 @@ bool show_all_master_info(THD* thd) if (send_show_master_info_header(thd, 1, gtid_pos.length())) DBUG_RETURN(TRUE); - if (!(elements= master_info_index->master_info_hash.records)) + if (!master_info_index || + !(elements= master_info_index->master_info_hash.records)) goto end; /* @@ -3093,7 +3128,8 @@ static ulong read_event(MYSQL* mysql, Master_info *mi, bool* suppress_warnings) Some errors are temporary in nature, such as ER_LOCK_DEADLOCK and ER_LOCK_WAIT_TIMEOUT. */ -static int has_temporary_error(THD *thd) +int +has_temporary_error(THD *thd) { DBUG_ENTER("has_temporary_error"); @@ -3274,7 +3310,7 @@ int apply_event_and_update_pos(Log_event* ev, THD* thd, if (error) { char buf[22]; - rli->report(ERROR_LEVEL, ER_UNKNOWN_ERROR, + rli->report(ERROR_LEVEL, ER_UNKNOWN_ERROR, rgi->gtid_info(), "It was not possible to update the positions" " of the relay log information: the slave may" " be in an inconsistent state." @@ -3290,7 +3326,7 @@ int apply_event_and_update_pos(Log_event* ev, THD* thd, Make sure we do not errorneously update gtid_slave_pos with a lingering GTID from this failed event group (MDEV-4906). */ - rgi->gtid_sub_id= 0; + rgi->gtid_pending= false; } DBUG_RETURN(exec_res ? 1 : 0); @@ -3501,9 +3537,6 @@ static int exec_relay_log_event(THD* thd, Relay_log_info* rli, if (opt_gtid_ignore_duplicates) { - serial_rgi->current_gtid.domain_id= gev->domain_id; - serial_rgi->current_gtid.server_id= gev->server_id; - serial_rgi->current_gtid.seq_no= gev->seq_no; int res= rpl_global_gtid_slave_state.check_duplicate_gtid (&serial_rgi->current_gtid, serial_rgi); if (res < 0) @@ -3616,7 +3649,7 @@ static int exec_relay_log_event(THD* thd, Relay_log_info* rli, DBUG_RETURN(exec_res); } mysql_mutex_unlock(&rli->data_lock); - rli->report(ERROR_LEVEL, ER_SLAVE_RELAY_LOG_READ_FAILURE, + rli->report(ERROR_LEVEL, ER_SLAVE_RELAY_LOG_READ_FAILURE, NULL, ER(ER_SLAVE_RELAY_LOG_READ_FAILURE), "\ Could not parse relay log event entry. The possible reasons are: the master's \ binary log is corrupted (you can check this by running 'mysqlbinlog' on the \ @@ -3711,7 +3744,7 @@ static int try_to_reconnect(THD *thd, MYSQL *mysql, Master_info *mi, */ if (messages[SLAVE_RECON_MSG_COMMAND][0]) { - mi->report(WARNING_LEVEL, ER_SLAVE_MASTER_COM_FAILURE, + mi->report(WARNING_LEVEL, ER_SLAVE_MASTER_COM_FAILURE, NULL, ER(ER_SLAVE_MASTER_COM_FAILURE), messages[SLAVE_RECON_MSG_COMMAND], buf); } @@ -3801,7 +3834,7 @@ pthread_handler_t handle_slave_io(void *arg) /* Load the set of seen GTIDs, if we did not already. */ if (rpl_load_gtid_slave_state(thd)) { - mi->report(ERROR_LEVEL, thd->get_stmt_da()->sql_errno(), + mi->report(ERROR_LEVEL, thd->get_stmt_da()->sql_errno(), NULL, "Unable to load replication GTID slave state from mysql.%s: %s", rpl_gtid_slave_state_table_name.str, thd->get_stmt_da()->message()); @@ -3817,14 +3850,14 @@ pthread_handler_t handle_slave_io(void *arg) if (RUN_HOOK(binlog_relay_io, thread_start, (thd, mi))) { - mi->report(ERROR_LEVEL, ER_SLAVE_FATAL_ERROR, + mi->report(ERROR_LEVEL, ER_SLAVE_FATAL_ERROR, NULL, ER(ER_SLAVE_FATAL_ERROR), "Failed to run 'thread_start' hook"); goto err; } if (!(mi->mysql = mysql = mysql_init(NULL))) { - mi->report(ERROR_LEVEL, ER_SLAVE_FATAL_ERROR, + mi->report(ERROR_LEVEL, ER_SLAVE_FATAL_ERROR, NULL, ER(ER_SLAVE_FATAL_ERROR), "error in mysql_init()"); goto err; } @@ -4006,18 +4039,18 @@ Log entry on master is longer than slave_max_allowed_packet (%lu) on \ slave. If the entry is correct, restart the server with a higher value of \ slave_max_allowed_packet", slave_max_allowed_packet); - mi->report(ERROR_LEVEL, ER_NET_PACKET_TOO_LARGE, + mi->report(ERROR_LEVEL, ER_NET_PACKET_TOO_LARGE, NULL, "%s", "Got a packet bigger than 'slave_max_allowed_packet' bytes"); goto err; case ER_MASTER_FATAL_ERROR_READING_BINLOG: - mi->report(ERROR_LEVEL, ER_MASTER_FATAL_ERROR_READING_BINLOG, + mi->report(ERROR_LEVEL, ER_MASTER_FATAL_ERROR_READING_BINLOG, NULL, ER(ER_MASTER_FATAL_ERROR_READING_BINLOG), mysql_error_number, mysql_error(mysql)); goto err; case ER_OUT_OF_RESOURCES: sql_print_error("\ Stopping slave I/O thread due to out-of-memory error from master"); - mi->report(ERROR_LEVEL, ER_OUT_OF_RESOURCES, + mi->report(ERROR_LEVEL, ER_OUT_OF_RESOURCES, NULL, "%s", ER(ER_OUT_OF_RESOURCES)); goto err; } @@ -4034,7 +4067,7 @@ Stopping slave I/O thread due to out-of-memory error from master"); (thd, mi,(const char*)mysql->net.read_pos + 1, event_len, &event_buf, &event_len))) { - mi->report(ERROR_LEVEL, ER_SLAVE_FATAL_ERROR, + mi->report(ERROR_LEVEL, ER_SLAVE_FATAL_ERROR, NULL, ER(ER_SLAVE_FATAL_ERROR), "Failed to run 'after_read_event' hook"); goto err; @@ -4045,7 +4078,7 @@ Stopping slave I/O thread due to out-of-memory error from master"); bool synced= 0; if (queue_event(mi, event_buf, event_len)) { - mi->report(ERROR_LEVEL, ER_SLAVE_RELAY_LOG_WRITE_FAILURE, + mi->report(ERROR_LEVEL, ER_SLAVE_RELAY_LOG_WRITE_FAILURE, NULL, ER(ER_SLAVE_RELAY_LOG_WRITE_FAILURE), "could not queue event from master"); goto err; @@ -4054,7 +4087,7 @@ Stopping slave I/O thread due to out-of-memory error from master"); if (RUN_HOOK(binlog_relay_io, after_queue_event, (thd, mi, event_buf, event_len, synced))) { - mi->report(ERROR_LEVEL, ER_SLAVE_FATAL_ERROR, + mi->report(ERROR_LEVEL, ER_SLAVE_FATAL_ERROR, NULL, ER(ER_SLAVE_FATAL_ERROR), "Failed to run 'after_queue_event' hook"); goto err; @@ -4151,9 +4184,10 @@ err_during_init: // TODO: make rpl_status part of Master_info change_rpl_status(RPL_ACTIVE_SLAVE,RPL_IDLE_SLAVE); mysql_mutex_lock(&LOCK_thread_count); + thd->unlink(); + mysql_mutex_unlock(&LOCK_thread_count); THD_CHECK_SENTRY(thd); delete thd; - mysql_mutex_unlock(&LOCK_thread_count); mi->abort_slave= 0; mi->slave_running= MYSQL_SLAVE_NOT_RUN; mi->io_thd= 0; @@ -4242,13 +4276,14 @@ end: void -slave_output_error_info(Relay_log_info *rli, THD *thd) +slave_output_error_info(rpl_group_info *rgi, THD *thd) { /* retrieve as much info as possible from the thd and, error codes and warnings and print this to the error log as to allow the user to locate the error */ + Relay_log_info *rli= rgi->rli; uint32 const last_errno= rli->last_error().number; char llbuff[22]; @@ -4265,7 +4300,8 @@ slave_output_error_info(Relay_log_info *rli, THD *thd) This function is reporting an error which was not reported while executing exec_relay_log_event(). */ - rli->report(ERROR_LEVEL, thd->get_stmt_da()->sql_errno(), "%s", errmsg); + rli->report(ERROR_LEVEL, thd->get_stmt_da()->sql_errno(), + rgi->gtid_info(), "%s", errmsg); } else if (last_errno != thd->get_stmt_da()->sql_errno()) { @@ -4344,6 +4380,7 @@ pthread_handler_t handle_slave_sql(void *arg) char saved_master_log_name[FN_REFLEN]; my_off_t UNINIT_VAR(saved_log_pos); my_off_t UNINIT_VAR(saved_master_log_pos); + String saved_skip_gtid_pos; my_off_t saved_skip= 0; Master_info *mi= ((Master_info*)arg); Relay_log_info* rli = &mi->rli; @@ -4394,7 +4431,7 @@ pthread_handler_t handle_slave_sql(void *arg) will be stuck if we fail here */ mysql_cond_broadcast(&rli->start_cond); - rli->report(ERROR_LEVEL, ER_SLAVE_FATAL_ERROR, + rli->report(ERROR_LEVEL, ER_SLAVE_FATAL_ERROR, NULL, "Failed during slave thread initialization"); goto err_during_init; } @@ -4446,16 +4483,20 @@ pthread_handler_t handle_slave_sql(void *arg) mysql_mutex_unlock(&rli->log_space_lock); serial_rgi->gtid_sub_id= 0; + serial_rgi->gtid_pending= false; if (init_relay_log_pos(rli, rli->group_relay_log_name, rli->group_relay_log_pos, 1 /*need data lock*/, &errmsg, 1 /*look for a description_event*/)) { - rli->report(ERROR_LEVEL, ER_SLAVE_FATAL_ERROR, + rli->report(ERROR_LEVEL, ER_SLAVE_FATAL_ERROR, NULL, "Error initializing relay log position: %s", errmsg); goto err; } + if (rli->alloc_inuse_relaylog(rli->group_relay_log_name)) + goto err; + strcpy(rli->future_event_master_log_name, rli->group_master_log_name); THD_CHECK_SENTRY(thd); #ifndef DBUG_OFF @@ -4510,7 +4551,7 @@ log '%s' at position %s, relay log '%s' position: %s%s", RPL_LOG_NAME, if (check_temp_dir(rli->slave_patternload_file)) { - rli->report(ERROR_LEVEL, thd->get_stmt_da()->sql_errno(), + rli->report(ERROR_LEVEL, thd->get_stmt_da()->sql_errno(), NULL, "Unable to use slave's temporary directory %s - %s", slave_load_tmpdir, thd->get_stmt_da()->message()); goto err; @@ -4519,7 +4560,7 @@ log '%s' at position %s, relay log '%s' position: %s%s", RPL_LOG_NAME, /* Load the set of seen GTIDs, if we did not already. */ if (rpl_load_gtid_slave_state(thd)) { - rli->report(ERROR_LEVEL, thd->get_stmt_da()->sql_errno(), + rli->report(ERROR_LEVEL, thd->get_stmt_da()->sql_errno(), NULL, "Unable to load replication GTID slave state from mysql.%s: %s", rpl_gtid_slave_state_table_name.str, thd->get_stmt_da()->message()); @@ -4538,7 +4579,7 @@ log '%s' at position %s, relay log '%s' position: %s%s", RPL_LOG_NAME, execute_init_command(thd, &opt_init_slave, &LOCK_sys_init_slave); if (thd->is_slave_error) { - rli->report(ERROR_LEVEL, thd->get_stmt_da()->sql_errno(), + rli->report(ERROR_LEVEL, thd->get_stmt_da()->sql_errno(), NULL, "Slave SQL thread aborted. Can't execute init_slave query"); goto err; } @@ -4555,6 +4596,12 @@ log '%s' at position %s, relay log '%s' position: %s%s", RPL_LOG_NAME, strmake_buf(saved_master_log_name, rli->group_master_log_name); saved_log_pos= rli->group_relay_log_pos; saved_master_log_pos= rli->group_master_log_pos; + if (mi->using_gtid != Master_info::USE_GTID_NO) + { + saved_skip_gtid_pos.append(STRING_WITH_LEN(", GTID '")); + rpl_append_gtid_state(&saved_skip_gtid_pos, false); + saved_skip_gtid_pos.append(STRING_WITH_LEN("'; ")); + } saved_skip= rli->slave_skip_counter; } if ((rli->until_condition == Relay_log_info::UNTIL_MASTER_POS || @@ -4578,16 +4625,27 @@ log '%s' at position %s, relay log '%s' position: %s%s", RPL_LOG_NAME, if (saved_skip && rli->slave_skip_counter == 0) { + String tmp; + if (mi->using_gtid != Master_info::USE_GTID_NO) + { + tmp.append(STRING_WITH_LEN(", GTID '")); + rpl_append_gtid_state(&tmp, false); + tmp.append(STRING_WITH_LEN("'; ")); + } + sql_print_information("'SQL_SLAVE_SKIP_COUNTER=%ld' executed at " "relay_log_file='%s', relay_log_pos='%ld', master_log_name='%s', " - "master_log_pos='%ld' and new position at " + "master_log_pos='%ld'%s and new position at " "relay_log_file='%s', relay_log_pos='%ld', master_log_name='%s', " - "master_log_pos='%ld' ", + "master_log_pos='%ld'%s ", (ulong) saved_skip, saved_log_name, (ulong) saved_log_pos, saved_master_log_name, (ulong) saved_master_log_pos, + saved_skip_gtid_pos.c_ptr_safe(), rli->group_relay_log_name, (ulong) rli->group_relay_log_pos, - rli->group_master_log_name, (ulong) rli->group_master_log_pos); + rli->group_master_log_name, (ulong) rli->group_master_log_pos, + tmp.c_ptr_safe()); saved_skip= 0; + saved_skip_gtid_pos.free(); } if (exec_relay_log_event(thd, rli, serial_rgi)) @@ -4596,7 +4654,7 @@ log '%s' at position %s, relay log '%s' position: %s%s", RPL_LOG_NAME, // do not scare the user if SQL thread was simply killed or stopped if (!sql_slave_killed(serial_rgi)) { - slave_output_error_info(rli, thd); + slave_output_error_info(serial_rgi, thd); if (WSREP_ON && rli->last_error().number == ER_UNKNOWN_COM_ERROR) { wsrep_node_dropped= TRUE; @@ -4791,7 +4849,7 @@ static int process_io_create_file(Master_info* mi, Create_file_log_event* cev) xev.log_pos = cev->log_pos; if (unlikely(mi->rli.relay_log.append(&xev))) { - mi->report(ERROR_LEVEL, ER_SLAVE_RELAY_LOG_WRITE_FAILURE, + mi->report(ERROR_LEVEL, ER_SLAVE_RELAY_LOG_WRITE_FAILURE, NULL, ER(ER_SLAVE_RELAY_LOG_WRITE_FAILURE), "error writing Exec_load event to relay log"); goto err; @@ -4805,7 +4863,7 @@ static int process_io_create_file(Master_info* mi, Create_file_log_event* cev) cev->block_len = num_bytes; if (unlikely(mi->rli.relay_log.append(cev))) { - mi->report(ERROR_LEVEL, ER_SLAVE_RELAY_LOG_WRITE_FAILURE, + mi->report(ERROR_LEVEL, ER_SLAVE_RELAY_LOG_WRITE_FAILURE, NULL, ER(ER_SLAVE_RELAY_LOG_WRITE_FAILURE), "error writing Create_file event to relay log"); goto err; @@ -4820,7 +4878,7 @@ static int process_io_create_file(Master_info* mi, Create_file_log_event* cev) aev.log_pos = cev->log_pos; if (unlikely(mi->rli.relay_log.append(&aev))) { - mi->report(ERROR_LEVEL, ER_SLAVE_RELAY_LOG_WRITE_FAILURE, + mi->report(ERROR_LEVEL, ER_SLAVE_RELAY_LOG_WRITE_FAILURE, NULL, ER(ER_SLAVE_RELAY_LOG_WRITE_FAILURE), "error writing Append_block event to relay log"); goto err; @@ -4927,7 +4985,7 @@ static int queue_binlog_ver_1_event(Master_info *mi, const char *buf, { if (unlikely(!(tmp_buf=(char*)my_malloc(event_len+1,MYF(MY_WME))))) { - mi->report(ERROR_LEVEL, ER_SLAVE_FATAL_ERROR, + mi->report(ERROR_LEVEL, ER_SLAVE_FATAL_ERROR, NULL, ER(ER_SLAVE_FATAL_ERROR), "Memory allocation failed"); DBUG_RETURN(1); } @@ -5225,6 +5283,86 @@ static int queue_event(Master_info* mi,const char* buf, ulong event_len) event_len - BINLOG_CHECKSUM_LEN : event_len, mi->rli.relay_log.description_event_for_queue); + if (unlikely(mi->gtid_reconnect_event_skip_count) && + unlikely(!mi->gtid_event_seen) && + rev.is_artificial_event() && + (mi->prev_master_id != mi->master_id || + strcmp(rev.new_log_ident, mi->master_log_name) != 0)) + { + /* + Artificial Rotate_log_event is the first event we receive at the start + of each master binlog file. It gives the name of the new binlog file. + + Normally, we already have this name from the real rotate event at the + end of the previous binlog file (unless we are making a new connection + using GTID). But if the master server restarted/crashed, there is no + rotate event at the end of the prior binlog file, so the name is new. + + We use this fact to handle a special case of master crashing. If the + master crashed while writing the binlog, it might end with a partial + event group lacking the COMMIT/XID event, which must be rolled + back. If the slave IO thread happens to get a disconnect in the middle + of exactly this event group, it will try to reconnect at the same GTID + and skip already fetched events. However, that GTID did not commit on + the master before the crash, so it does not really exist, and the + master will connect the slave at the next following GTID starting in + the next binlog. This could confuse the slave and make it mix the + start of one event group with the end of another. + + But we detect this case here, by noticing the change of binlog name + which detects the missing rotate event at the end of the previous + binlog file. In this case, we reset the counters to make us not skip + the next event group, and queue an artificial Format Description + event. The previously fetched incomplete event group will then be + rolled back when the Format Description event is executed by the SQL + thread. + + A similar case is if the reconnect somehow connects to a different + master server (like due to a network proxy or IP address takeover). + We detect this case by noticing a change of server_id and in this + case likewise rollback the partially received event group. + */ + Format_description_log_event fdle(4); + + if (mi->prev_master_id != mi->master_id) + sql_print_warning("The server_id of master server changed in the " + "middle of GTID %u-%u-%llu. Assuming a change of " + "master server, so rolling back the previously " + "received partial transaction. Expected: %lu, " + "received: %lu", mi->last_queued_gtid.domain_id, + mi->last_queued_gtid.server_id, + mi->last_queued_gtid.seq_no, + mi->prev_master_id, mi->master_id); + else if (strcmp(rev.new_log_ident, mi->master_log_name) != 0) + sql_print_warning("Unexpected change of master binlog file name in the " + "middle of GTID %u-%u-%llu, assuming that master has " + "crashed and rolling back the transaction. Expected: " + "'%s', received: '%s'", + mi->last_queued_gtid.domain_id, + mi->last_queued_gtid.server_id, + mi->last_queued_gtid.seq_no, + mi->master_log_name, rev.new_log_ident); + + mysql_mutex_lock(log_lock); + if (likely(!fdle.write(rli->relay_log.get_log_file()) && + !rli->relay_log.flush_and_sync(NULL))) + { + rli->relay_log.harvest_bytes_written(&rli->log_space_total); + } + else + { + error= ER_SLAVE_RELAY_LOG_WRITE_FAILURE; + mysql_mutex_unlock(log_lock); + goto err; + } + rli->relay_log.signal_update(); + mysql_mutex_unlock(log_lock); + + mi->gtid_reconnect_event_skip_count= 0; + mi->events_queued_since_last_gtid= 0; + } + mi->prev_master_id= mi->master_id; + if (unlikely(process_io_rotate(mi, &rev))) { error= ER_SLAVE_RELAY_LOG_WRITE_FAILURE; @@ -5710,7 +5848,7 @@ err: mysql_mutex_unlock(&mi->data_lock); DBUG_PRINT("info", ("error: %d", error)); if (error) - mi->report(ERROR_LEVEL, error, ER(error), + mi->report(ERROR_LEVEL, error, NULL, ER(error), (error == ER_SLAVE_RELAY_LOG_WRITE_FAILURE)? "could not queue event from master" : error_msg.ptr()); @@ -5817,7 +5955,7 @@ static int connect_to_master(THD* thd, MYSQL* mysql, Master_info* mi, #ifndef DBUG_OFF mi->events_till_disconnect = disconnect_slave_event_count; #endif - ulong client_flag= CLIENT_REMEMBER_OPTIONS; + ulong client_flag= 0; if (opt_slave_compressed_protocol) client_flag=CLIENT_COMPRESS; /* We will use compression */ @@ -5855,7 +5993,7 @@ static int connect_to_master(THD* thd, MYSQL* mysql, Master_info* mi, /* we disallow empty users */ if (mi->user == NULL || mi->user[0] == 0) { - mi->report(ERROR_LEVEL, ER_SLAVE_FATAL_ERROR, + mi->report(ERROR_LEVEL, ER_SLAVE_FATAL_ERROR, NULL, ER(ER_SLAVE_FATAL_ERROR), "Invalid (empty) username when attempting to " "connect to the master server. Connection attempt " @@ -5872,7 +6010,7 @@ static int connect_to_master(THD* thd, MYSQL* mysql, Master_info* mi, { last_errno=mysql_errno(mysql); suppress_warnings= 0; - mi->report(ERROR_LEVEL, last_errno, + mi->report(ERROR_LEVEL, last_errno, NULL, "error %s to master '%s@%s:%d'" " - retry-time: %d retries: %lu message: %s", (reconnect ? "reconnecting" : "connecting"), @@ -6404,6 +6542,7 @@ static Log_event* next_event(rpl_group_info *rgi, ulonglong *event_size) DBUG_ASSERT(rli->cur_log_fd >= 0); mysql_file_close(rli->cur_log_fd, MYF(MY_WME)); rli->cur_log_fd = -1; + rli->last_inuse_relaylog->completed= true; if (relay_log_purge) { @@ -6532,6 +6671,12 @@ static Log_event* next_event(rpl_group_info *rgi, ulonglong *event_size) mysql_mutex_unlock(log_lock); goto err; } + if (rli->alloc_inuse_relaylog(rli->linfo.log_file_name)) + { + if (!hot_log) + mysql_mutex_unlock(log_lock); + goto err; + } if (!hot_log) mysql_mutex_unlock(log_lock); continue; @@ -6547,6 +6692,8 @@ static Log_event* next_event(rpl_group_info *rgi, ulonglong *event_size) if ((rli->cur_log_fd=open_binlog(cur_log,rli->linfo.log_file_name, &errmsg)) <0) goto err; + if (rli->alloc_inuse_relaylog(rli->linfo.log_file_name)) + goto err; } else { @@ -6685,7 +6832,7 @@ bool rpl_master_has_bug(const Relay_log_info *rli, uint bug_id, bool report, " so slave stops; check error log on slave" " for more info", MYF(0), bug_id); // a verbose message for the error log - rli->report(ERROR_LEVEL, ER_UNKNOWN_ERROR, + rli->report(ERROR_LEVEL, ER_UNKNOWN_ERROR, NULL, "According to the master's version ('%s')," " it is probable that master suffers from this bug:" " http://bugs.mysql.com/bug.php?id=%u" diff --git a/sql/slave.h b/sql/slave.h index aa3976f6e6c..e65b4a589a1 100644 --- a/sql/slave.h +++ b/sql/slave.h @@ -229,35 +229,30 @@ int purge_relay_logs(Relay_log_info* rli, THD *thd, bool just_reset, void set_slave_thread_options(THD* thd); void set_slave_thread_default_charset(THD *thd, rpl_group_info *rgi); int rotate_relay_log(Master_info* mi); +int has_temporary_error(THD *thd); int apply_event_and_update_pos(Log_event* ev, THD* thd, struct rpl_group_info *rgi, rpl_parallel_thread *rpt); pthread_handler_t handle_slave_io(void *arg); -void slave_output_error_info(Relay_log_info *rli, THD *thd); +void slave_output_error_info(rpl_group_info *rgi, THD *thd); pthread_handler_t handle_slave_sql(void *arg); bool net_request_file(NET* net, const char* fname); extern bool volatile abort_loop; -extern Master_info main_mi, *active_mi; /* active_mi for multi-master */ +extern Master_info *active_mi; /* active_mi for multi-master */ extern Master_info *default_master_info; /* To replace active_mi */ extern Master_info_index *master_info_index; extern LEX_STRING default_master_connection_name; -extern LIST master_list; extern my_bool replicate_same_server_id; extern int disconnect_slave_event_count, abort_slave_event_count ; /* the master variables are defaults read from my.cnf or command line */ -extern uint master_port, master_connect_retry, report_port; -extern char * master_user, *master_password, *master_host; +extern uint report_port; extern char *master_info_file, *report_user; extern char *report_host, *report_password; -extern my_bool master_ssl; -extern char *master_ssl_ca, *master_ssl_capath, *master_ssl_cert; -extern char *master_ssl_cipher, *master_ssl_key; - extern I_List<THD> threads; #else diff --git a/sql/sp_head.cc b/sql/sp_head.cc index f8320e830a5..296135c93e7 100644 --- a/sql/sp_head.cc +++ b/sql/sp_head.cc @@ -43,6 +43,7 @@ #include "sql_base.h" // close_thread_tables #include "transaction.h" // trans_commit_stmt #include "sql_audit.h" +#include "debug_sync.h" /* Sufficient max length of printed destinations and frame offsets (all uints). @@ -1123,6 +1124,8 @@ sp_head::execute(THD *thd, bool merge_da_on_success) Item_change_list old_change_list; String old_packet; uint old_server_status; + const uint status_backup_mask= SERVER_STATUS_CURSOR_EXISTS | + SERVER_STATUS_LAST_ROW_SENT; Reprepare_observer *save_reprepare_observer= thd->m_reprepare_observer; Object_creation_ctx *saved_creation_ctx; Diagnostics_area *da= thd->get_stmt_da(); @@ -1257,7 +1260,7 @@ sp_head::execute(THD *thd, bool merge_da_on_success) It is probably safe to use same thd->convert_buff everywhere. */ old_packet.swap(thd->packet); - old_server_status= thd->server_status; + old_server_status= thd->server_status & status_backup_mask; /* Switch to per-instruction arena here. We can do it since we cleanup @@ -1275,6 +1278,7 @@ sp_head::execute(THD *thd, bool merge_da_on_success) /* Discard the initial part of executing routines. */ thd->profiling.discard_current_query(); #endif + DEBUG_SYNC(thd, "sp_head_execute_before_loop"); do { sp_instr *i; @@ -1379,7 +1383,7 @@ sp_head::execute(THD *thd, bool merge_da_on_success) thd->spcont->pop_all_cursors(); // To avoid memory leaks after an error /* Restore all saved */ - thd->server_status= old_server_status; + thd->server_status= (thd->server_status & ~status_backup_mask) | old_server_status; old_packet.swap(thd->packet); DBUG_ASSERT(thd->change_list.is_empty()); old_change_list.move_elements_to(&thd->change_list); @@ -1852,9 +1856,7 @@ sp_head::execute_function(THD *thd, Item **argp, uint argcount, as one select and not resetting THD::user_var_events before each invocation. */ - mysql_mutex_lock(&LOCK_thread_count); - q= global_query_id; - mysql_mutex_unlock(&LOCK_thread_count); + q= get_query_id(); mysql_bin_log.start_union_events(thd, q + 1); binlog_save_options= thd->variables.option_bits; thd->variables.option_bits&= ~OPTION_BIN_LOG; @@ -2290,6 +2292,11 @@ sp_head::restore_lex(THD *thd) */ if (sp_update_sp_used_routines(&m_sroutines, &sublex->sroutines)) DBUG_RETURN(TRUE); + + /* If this substatement is a update query, then mark MODIFIES_DATA */ + if (is_update_query(sublex->sql_command)) + m_flags|= MODIFIES_DATA; + /* Merge tables used by this statement (but not by its functions or procedures) to multiset of tables used by this routine. @@ -3109,7 +3116,10 @@ sp_instr_stmt::execute(THD *thd, uint *nextp) thd->query_name_consts= 0; if (!thd->is_error()) + { + res= 0; thd->get_stmt_da()->reset_diagnostics_area(); + } } DBUG_RETURN(res || thd->is_error()); } diff --git a/sql/sp_head.h b/sql/sp_head.h index cc598186d08..dbdb957aa79 100644 --- a/sql/sp_head.h +++ b/sql/sp_head.h @@ -161,7 +161,21 @@ public: LOG_SLOW_STATEMENTS= 256, // Used by events LOG_GENERAL_LOG= 512, // Used by events HAS_SQLCOM_RESET= 1024, - HAS_SQLCOM_FLUSH= 2048 + HAS_SQLCOM_FLUSH= 2048, + + /** + Marks routines that directly (i.e. not by calling other routines) + change tables. Note that this flag is set automatically based on + type of statements used in the stored routine and is different + from routine characteristic provided by user in a form of CONTAINS + SQL, READS SQL DATA, MODIFIES SQL DATA clauses. The latter are + accepted by parser but pretty much ignored after that. + We don't rely on them: + a) for compatibility reasons. + b) because in CONTAINS SQL case they don't provide enough + information anyway. + */ + MODIFIES_DATA= 4096 }; stored_procedure_type m_type; @@ -332,11 +346,17 @@ public: int add_instr(sp_instr *instr); - inline uint - instructions() - { - return m_instr.elements; - } + /** + Returns true if any substatement in the routine directly + (not through another routine) modifies data/changes table. + + @sa Comment for MODIFIES_DATA flag. + */ + bool modifies_data() const + { return m_flags & MODIFIES_DATA; } + + inline uint instructions() + { return m_instr.elements; } inline sp_instr * last_instruction() diff --git a/sql/sql_acl.cc b/sql/sql_acl.cc index 3d3c0bc835a..c6f23b3f1a3 100644 --- a/sql/sql_acl.cc +++ b/sql/sql_acl.cc @@ -297,7 +297,7 @@ public: bool eq(const char *user2, const char *host2) { return !cmp(user2, host2); } - bool wild_eq(const char *user2, const char *host2, const char *ip2 = 0) + bool wild_eq(const char *user2, const char *host2, const char *ip2) { if (strcmp(safe_str(user.str), safe_str(user2))) return false; @@ -1886,8 +1886,8 @@ bool acl_getroot(Security_context *sctx, char *user, char *host, DBUG_RETURN(res); } -int check_user_can_set_role(const char *host, const char *user, - const char *rolename, ulonglong *access) +static int check_user_can_set_role(const char *user, const char *host, + const char *ip, const char *rolename, ulonglong *access) { ACL_ROLE *role; ACL_USER_BASE *acl_user_base; @@ -1930,7 +1930,7 @@ int check_user_can_set_role(const char *host, const char *user, continue; acl_user= (ACL_USER *)acl_user_base; - if (acl_user->wild_eq(user, host)) + if (acl_user->wild_eq(user, host, ip)) { is_granted= TRUE; break; @@ -1958,9 +1958,8 @@ end: int acl_check_setrole(THD *thd, char *rolename, ulonglong *access) { /* Yes! priv_user@host. Don't ask why - that's what check_access() does. */ - return check_user_can_set_role(thd->security_ctx->host, - thd->security_ctx->priv_user, - rolename, access); + return check_user_can_set_role(thd->security_ctx->priv_user, + thd->security_ctx->host, thd->security_ctx->ip, rolename, access); } @@ -2776,7 +2775,7 @@ int acl_set_default_role(THD *thd, const char *host, const char *user, rolename= thd->security_ctx->priv_role; } - if (check_user_can_set_role(host, user, rolename, NULL)) + if (check_user_can_set_role(user, host, host, rolename, NULL)) DBUG_RETURN(result); if (!strcasecmp(rolename, "NONE")) @@ -7665,7 +7664,7 @@ bool mysql_show_grants(THD *thd, LEX_USER *lex_user) } DBUG_ASSERT(rolename || username); - Item_string *field=new Item_string("",0,&my_charset_latin1); + Item_string *field=new Item_string_ascii("", 0); List<Item> field_list; field->name=buff; field->max_length=1024; @@ -8944,6 +8943,7 @@ static int handle_grant_struct(enum enum_acl_lists struct_no, bool drop, acl_user->user.str= strdup_root(&acl_memroot, user_to->user.str); acl_user->user.length= user_to->user.length; acl_user->host.hostname= strdup_root(&acl_memroot, user_to->host.str); + acl_user->hostname_length= user_to->host.length; break; case DB_ACL: @@ -12561,7 +12561,7 @@ maria_declare_plugin(mysql_password) NULL, /* status variables */ NULL, /* system variables */ "1.0", /* String version */ - MariaDB_PLUGIN_MATURITY_BETA /* Maturity */ + MariaDB_PLUGIN_MATURITY_STABLE /* Maturity */ }, { MYSQL_AUTHENTICATION_PLUGIN, /* type constant */ @@ -12576,7 +12576,7 @@ maria_declare_plugin(mysql_password) NULL, /* status variables */ NULL, /* system variables */ "1.0", /* String version */ - MariaDB_PLUGIN_MATURITY_BETA /* Maturity */ + MariaDB_PLUGIN_MATURITY_STABLE /* Maturity */ } maria_declare_plugin_end; diff --git a/sql/sql_admin.cc b/sql/sql_admin.cc index 0b610718cd0..aefa88feb43 100644 --- a/sql/sql_admin.cc +++ b/sql/sql_admin.cc @@ -914,7 +914,7 @@ send_result_message: protocol->store(operator_name, system_charset_info); if (result_code) // either mysql_recreate_table or analyze failed { - DBUG_ASSERT(thd->is_error() || thd->killed); + DBUG_ASSERT(thd->is_error()); if (thd->is_error()) { const char *err_msg= thd->get_stmt_da()->message(); diff --git a/sql/sql_base.cc b/sql/sql_base.cc index e51eb1c1a11..0bbcca5e778 100644 --- a/sql/sql_base.cc +++ b/sql/sql_base.cc @@ -2085,7 +2085,10 @@ bool open_table(THD *thd, TABLE_LIST *table_list, MEM_ROOT *mem_root, DBUG_RETURN(TRUE); if (!(flags & MYSQL_OPEN_IGNORE_KILLED) && thd->killed) + { + thd->send_kill_message(); DBUG_RETURN(TRUE); + } /* Check if we're trying to take a write lock in a read only transaction. @@ -2923,6 +2926,7 @@ Locked_tables_list::reopen_tables(THD *thd) size_t reopen_count= 0; MYSQL_LOCK *lock; MYSQL_LOCK *merged_lock; + DBUG_ENTER("Locked_tables_list::reopen_tables"); for (TABLE_LIST *table_list= m_locked_tables; table_list; table_list= table_list->next_global) @@ -2934,7 +2938,7 @@ Locked_tables_list::reopen_tables(THD *thd) if (open_table(thd, table_list, thd->mem_root, &ot_ctx)) { unlink_all_closed_tables(thd, 0, reopen_count); - return TRUE; + DBUG_RETURN(TRUE); } table_list->table->pos_in_locked_tables= table_list; /* See also the comment on lock type in init_locked_tables(). */ @@ -2966,11 +2970,11 @@ Locked_tables_list::reopen_tables(THD *thd) unlink_all_closed_tables(thd, lock, reopen_count); if (! thd->killed) my_error(ER_LOCK_DEADLOCK, MYF(0)); - return TRUE; + DBUG_RETURN(TRUE); } thd->lock= merged_lock; } - return FALSE; + DBUG_RETURN(FALSE); } /** @@ -3515,9 +3519,12 @@ Open_table_context::recover_from_failed_open() /* Return a appropriate read lock type given a table object. - @param thd Thread context - @param prelocking_ctx Prelocking context. - @param table_list Table list element for table to be locked. + @param thd Thread context + @param prelocking_ctx Prelocking context. + @param table_list Table list element for table to be locked. + @param routine_modifies_data + Some routine that is invoked by statement + modifies data. @remark Due to a statement-based replication limitation, statements such as INSERT INTO .. SELECT FROM .. and CREATE TABLE .. SELECT FROM need @@ -3530,9 +3537,13 @@ Open_table_context::recover_from_failed_open() This also applies to SELECT/SET/DO statements which use stored functions. Calls to such functions are going to be logged as a whole and thus should be serialized against concurrent changes - to tables used by those functions. This can be avoided if functions - only read data but doing so requires more complex analysis than it - is done now. + to tables used by those functions. This is avoided when functions + do not modify data but only read it, since in this case nothing is + written to the binary log. Argument routine_modifies_data + denotes the same. So effectively, if the statement is not a + update query and routine_modifies_data is false, then + prelocking_placeholder does not take importance. + Furthermore, this does not apply to I_S and log tables as it's always unsafe to replicate such tables under statement-based replication as the table on the slave might contain other data @@ -3547,7 +3558,8 @@ Open_table_context::recover_from_failed_open() thr_lock_type read_lock_type_for_table(THD *thd, Query_tables_list *prelocking_ctx, - TABLE_LIST *table_list) + TABLE_LIST *table_list, + bool routine_modifies_data) { /* In cases when this function is called for a sub-statement executed in @@ -3561,7 +3573,7 @@ thr_lock_type read_lock_type_for_table(THD *thd, (table_list->table->s->table_category == TABLE_CATEGORY_LOG) || (table_list->table->s->table_category == TABLE_CATEGORY_PERFORMANCE) || !(is_update_query(prelocking_ctx->sql_command) || - table_list->prelocking_placeholder || + (routine_modifies_data && table_list->prelocking_placeholder) || (thd->locked_tables_mode > LTM_LOCK_TABLES))) return TL_READ; else @@ -3574,19 +3586,21 @@ thr_lock_type read_lock_type_for_table(THD *thd, and, if prelocking strategy prescribes so, extend the prelocking set with tables and routines used by it. - @param[in] thd Thread context. - @param[in] prelocking_ctx Prelocking context. - @param[in] rt Element of prelocking set to be processed. - @param[in] prelocking_strategy Strategy which specifies how the - prelocking set should be extended when - one of its elements is processed. - @param[in] has_prelocking_list Indicates that prelocking set/list for - this statement has already been built. - @param[in] ot_ctx Context of open_table used to recover from - locking failures. - @param[out] need_prelocking Set to TRUE if it was detected that this - statement will require prelocked mode for - its execution, not touched otherwise. + @param[in] thd Thread context. + @param[in] prelocking_ctx Prelocking context. + @param[in] rt Element of prelocking set to be processed. + @param[in] prelocking_strategy Strategy which specifies how the + prelocking set should be extended when + one of its elements is processed. + @param[in] has_prelocking_list Indicates that prelocking set/list for + this statement has already been built. + @param[in] ot_ctx Context of open_table used to recover from + locking failures. + @param[out] need_prelocking Set to TRUE if it was detected that this + statement will require prelocked mode for + its execution, not touched otherwise. + @param[out] routine_modifies_data Set to TRUE if it was detected that this + routine does modify table data. @retval FALSE Success. @retval TRUE Failure (Conflicting metadata lock, OOM, other errors). @@ -3598,11 +3612,13 @@ open_and_process_routine(THD *thd, Query_tables_list *prelocking_ctx, Prelocking_strategy *prelocking_strategy, bool has_prelocking_list, Open_table_context *ot_ctx, - bool *need_prelocking) + bool *need_prelocking, bool *routine_modifies_data) { MDL_key::enum_mdl_namespace mdl_type= rt->mdl_request.key.mdl_namespace(); DBUG_ENTER("open_and_process_routine"); + *routine_modifies_data= false; + switch (mdl_type) { case MDL_key::FUNCTION: @@ -3655,10 +3671,13 @@ open_and_process_routine(THD *thd, Query_tables_list *prelocking_ctx, DBUG_RETURN(TRUE); /* 'sp' is NULL when there is no such routine. */ - if (sp && !has_prelocking_list) + if (sp) { - prelocking_strategy->handle_routine(thd, prelocking_ctx, rt, sp, - need_prelocking); + *routine_modifies_data= sp->modifies_data(); + + if (!has_prelocking_list) + prelocking_strategy->handle_routine(thd, prelocking_ctx, rt, sp, + need_prelocking); } } else @@ -4003,16 +4022,7 @@ open_and_process_table(THD *thd, LEX *lex, TABLE_LIST *tables, goto end; } - if (tables->lock_type != TL_UNLOCK && ! thd->locked_tables_mode) - { - if (tables->lock_type == TL_WRITE_DEFAULT) - tables->table->reginfo.lock_type= thd->update_lock_default; - else if (tables->lock_type == TL_READ_DEFAULT) - tables->table->reginfo.lock_type= - read_lock_type_for_table(thd, lex, tables); - else - tables->table->reginfo.lock_type= tables->lock_type; - } + /* Copy grant information from TABLE_LIST instance to TABLE one. */ tables->table->grant= tables->grant; /* Check and update metadata version of a base table. */ @@ -4351,6 +4361,7 @@ bool open_tables(THD *thd, TABLE_LIST **start, uint *counter, uint flags, Open_table_context ot_ctx(thd, flags); bool error= FALSE; MEM_ROOT new_frm_mem; + bool some_routine_modifies_data= FALSE; bool has_prelocking_list; DBUG_ENTER("open_tables"); @@ -4523,11 +4534,16 @@ restart: sroutine_to_open= &rt->next, rt= rt->next) { bool need_prelocking= false; + bool routine_modifies_data; TABLE_LIST **save_query_tables_last= thd->lex->query_tables_last; error= open_and_process_routine(thd, thd->lex, rt, prelocking_strategy, has_prelocking_list, &ot_ctx, - &need_prelocking); + &need_prelocking, + &routine_modifies_data); + + // Remember if any of SF modifies data. + some_routine_modifies_data|= routine_modifies_data; if (need_prelocking && ! thd->lex->requires_prelocking()) thd->lex->mark_as_requiring_prelocking(save_query_tables_last); @@ -4568,6 +4584,10 @@ restart: the children are detached. Attaching and detaching are always done, even under LOCK TABLES. + We also convert all TL_WRITE_DEFAULT and TL_READ_DEFAULT locks to + appropriate "real" lock types to be used for locking and to be passed + to storage engine. + And start wsrep TOI if needed. */ for (tables= *start; tables; tables= tables->next_global) @@ -4595,6 +4615,19 @@ restart: goto error; } } + + /* Set appropriate TABLE::lock_type. */ + if (tbl && tables->lock_type != TL_UNLOCK && !thd->locked_tables_mode) + { + if (tables->lock_type == TL_WRITE_DEFAULT) + tbl->reginfo.lock_type= thd->update_lock_default; + else if (tables->lock_type == TL_READ_DEFAULT) + tbl->reginfo.lock_type= + read_lock_type_for_table(thd, thd->lex, tables, + some_routine_modifies_data); + else + tbl->reginfo.lock_type= tables->lock_type; + } } error: @@ -4858,11 +4891,15 @@ static bool check_lock_and_start_stmt(THD *thd, engine is important as, for example, InnoDB uses it to determine what kind of row locks should be acquired when executing statement in prelocked mode or under LOCK TABLES with @@innodb_table_locks = 0. + + Last argument routine_modifies_data for read_lock_type_for_table() + is ignored, as prelocking placeholder will never be set here. */ + DBUG_ASSERT(table_list->prelocking_placeholder == false); if (table_list->lock_type == TL_WRITE_DEFAULT) lock_type= thd->update_lock_default; else if (table_list->lock_type == TL_READ_DEFAULT) - lock_type= read_lock_type_for_table(thd, prelocking_ctx, table_list); + lock_type= read_lock_type_for_table(thd, prelocking_ctx, table_list, true); else lock_type= table_list->lock_type; @@ -5283,6 +5320,7 @@ bool lock_tables(THD *thd, TABLE_LIST *tables, uint count, thd->lex->set_stmt_unsafe(LEX::BINLOG_STMT_UNSAFE_AUTOINC_NOT_FIRST); } +#ifdef NOT_USED_IN_MARIADB /* INSERT...ON DUPLICATE KEY UPDATE on a table with more than one unique keys can be unsafe. @@ -5308,6 +5346,7 @@ bool lock_tables(THD *thd, TABLE_LIST *tables, uint count, thd->lex->duplicates == DUP_UPDATE) thd->lex->set_stmt_unsafe(LEX::BINLOG_STMT_UNSAFE_INSERT_TWO_KEYS); } +#endif /* We have to emulate LOCK TABLES if we are statement needs prelocking. */ if (thd->lex->requires_prelocking()) diff --git a/sql/sql_base.h b/sql/sql_base.h index 6f8e9c1c03b..e39ec16028b 100644 --- a/sql/sql_base.h +++ b/sql/sql_base.h @@ -137,7 +137,8 @@ TABLE *find_write_locked_table(TABLE *list, const char *db, const char *table_name); thr_lock_type read_lock_type_for_table(THD *thd, Query_tables_list *prelocking_ctx, - TABLE_LIST *table_list); + TABLE_LIST *table_list, + bool routine_modifies_data); my_bool mysql_rm_tmp_tables(void); bool rm_temporary_table(handlerton *base, const char *path); diff --git a/sql/sql_cache.cc b/sql/sql_cache.cc index cf68ba36997..6001517b0c7 100644 --- a/sql/sql_cache.cc +++ b/sql/sql_cache.cc @@ -1711,7 +1711,7 @@ Query_cache::send_result_to_client(THD *thd, char *org_sql, uint query_length) DBUG_ENTER("Query_cache::send_result_to_client"); /* - Testing 'query_cache_size' without a lock here is safe: the thing + Testing without a lock here is safe: the thing we may loose is that the query won't be served from cache, but we save on mutex locking in the case when query cache is disabled. @@ -1731,8 +1731,6 @@ Query_cache::send_result_to_client(THD *thd, char *org_sql, uint query_length) goto err; } - DBUG_ASSERT(query_cache_size != 0); // otherwise cache would be disabled - thd->query_cache_is_applicable= 1; sql= org_sql; sql_end= sql + query_length; diff --git a/sql/sql_class.cc b/sql/sql_class.cc index 430bfbf760d..9d675490b28 100644 --- a/sql/sql_class.cc +++ b/sql/sql_class.cc @@ -1,6 +1,6 @@ /* Copyright (c) 2000, 2013, Oracle and/or its affiliates. - Copyright (c) 2008, 2013, Monty Program Ab. + Copyright (c) 2008, 2014, SkySQL Ab. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -895,7 +895,6 @@ THD::THD(bool is_wsrep_applier) accessed_rows_and_keys(0), m_statement_psi(NULL), m_idle_psi(NULL), - m_server_idle(false), thread_id(0), global_disable_checkpoint(0), failed_com_change_user(0), @@ -4346,6 +4345,220 @@ extern "C" int thd_rpl_is_parallel(const MYSQL_THD thd) return thd->rgi_slave && thd->rgi_slave->is_parallel_exec; } +/* + This function can optionally be called to check if thd_report_wait_for() + needs to be called for waits done by a given transaction. + + If this function returns false for a given thd, there is no need to do any + calls to thd_report_wait_for() on that thd. + + This call is optional; it is safe to call thd_report_wait_for() in any case. + This call can be used to save some redundant calls to thd_report_wait_for() + if desired. (This is unlikely to matter much unless there are _lots_ of + waits to report, as the overhead of thd_report_wait_for() is small). +*/ +extern "C" int +thd_need_wait_for(const MYSQL_THD thd) +{ + rpl_group_info *rgi; + + if (!thd) + return false; + rgi= thd->rgi_slave; + if (!rgi) + return false; + return rgi->is_parallel_exec; +} + +/* + Used by InnoDB/XtraDB to report that one transaction THD is about to go to + wait for a transactional lock held by another transactions OTHER_THD. + + This is used for parallel replication, where transactions are required to + commit in the same order on the slave as they did on the master. If the + transactions on the slave encounters lock conflicts on the slave that did + not exist on the master, this can cause deadlocks. + + Normally, such conflicts will not occur, because the same conflict would + have prevented the two transactions from committing in parallel on the + master, thus preventing them from running in parallel on the slave in the + first place. However, it is possible in case when the optimizer chooses a + different plan on the slave than on the master (eg. table scan instead of + index scan). + + InnoDB/XtraDB reports lock waits using this call. If a lock wait causes a + deadlock with the pre-determined commit order, we kill the later transaction, + and later re-try it, to resolve the deadlock. + + This call need only receive reports about waits for locks that will remain + until the holding transaction commits. InnoDB/XtraDB auto-increment locks + are released earlier, and so need not be reported. (Such false positives are + not harmful, but could lead to unnecessary kill and retry, so best avoided). +*/ +extern "C" void +thd_report_wait_for(const MYSQL_THD thd, MYSQL_THD other_thd) +{ + rpl_group_info *rgi; + rpl_group_info *other_rgi; + + if (!thd || !other_thd) + return; + rgi= thd->rgi_slave; + other_rgi= other_thd->rgi_slave; + if (!rgi || !other_rgi) + return; + if (!rgi->is_parallel_exec) + return; + if (rgi->rli != other_rgi->rli) + return; + if (!rgi->gtid_sub_id || !other_rgi->gtid_sub_id) + return; + if (rgi->current_gtid.domain_id != other_rgi->current_gtid.domain_id) + return; + if (rgi->gtid_sub_id > other_rgi->gtid_sub_id) + return; + /* + This transaction is about to wait for another transaction that is required + by replication binlog order to commit after. This would cause a deadlock. + + So send a kill to the other transaction, with a temporary error; this will + cause replication to rollback (and later re-try) the other transaction, + releasing the lock for this transaction so replication can proceed. + */ + other_rgi->killed_for_retry= true; + mysql_mutex_lock(&other_thd->LOCK_thd_data); + other_thd->awake(KILL_CONNECTION); + mysql_mutex_unlock(&other_thd->LOCK_thd_data); +} + +/* + This function is called from InnoDB/XtraDB to check if the commit order of + two transactions has already been decided by the upper layer. This happens + in parallel replication, where the commit order is forced to be the same on + the slave as it was originally on the master. + + If this function returns false, it means that such commit order will be + enforced. This allows the storage engine to optionally omit gap lock waits + or similar measures that would otherwise be needed to ensure that + transactions would be serialised in a way that would cause a commit order + that is correct for binlogging for statement-based replication. + + Since transactions are only run in parallel on the slave if they ran without + lock conflicts on the master, normally no lock conflicts on the slave happen + during parallel replication. However, there are a couple of corner cases + where it can happen, like these secondary-index operations: + + T1: INSERT INTO t1 VALUES (7, NULL); + T2: DELETE FROM t1 WHERE b <= 3; + + T1: UPDATE t1 SET secondary=NULL WHERE primary=1 + T2: DELETE t1 WHERE secondary <= 3 + + The DELETE takes a gap lock that can block the INSERT/UPDATE, but the row + locks set by INSERT/UPDATE do not block the DELETE. Thus, the execution + order of the transactions determine whether a lock conflict occurs or + not. Thus a lock conflict can occur on the slave where it did not on the + master. + + If this function returns true, normal locking should be done as required by + the binlogging and transaction isolation level in effect. But if it returns + false, the correct order will be enforced anyway, and InnoDB/XtraDB can + avoid taking the gap lock, preventing the lock conflict. + + Calling this function is just an optimisation to avoid unnecessary + deadlocks. If it was not used, a gap lock would be set that could eventually + cause a deadlock; the deadlock would be caught by thd_report_wait_for() and + the transaction T2 killed and rolled back (and later re-tried). +*/ +extern "C" int +thd_need_ordering_with(const MYSQL_THD thd, const MYSQL_THD other_thd) +{ + rpl_group_info *rgi, *other_rgi; + + DBUG_EXECUTE_IF("disable_thd_need_ordering_with", return 1;); + if (!thd || !other_thd) + return 1; + rgi= thd->rgi_slave; + other_rgi= other_thd->rgi_slave; + if (!rgi || !other_rgi) + return 1; + if (!rgi->is_parallel_exec) + return 1; + if (rgi->rli != other_rgi->rli) + return 1; + if (rgi->current_gtid.domain_id != other_rgi->current_gtid.domain_id) + return 1; + if (!rgi->commit_id || rgi->commit_id != other_rgi->commit_id) + return 1; + /* + Otherwise, these two threads are doing parallel replication within the same + replication domain. Their commit order is already fixed, so we do not need + gap locks or similar to otherwise enforce ordering (and in fact such locks + could lead to unnecessary deadlocks and transaction retry). + */ + return 0; +} + + +/* + If the storage engine detects a deadlock, and needs to choose a victim + transaction to roll back, it can call this function to ask the upper + server layer for which of two possible transactions is prefered to be + aborted and rolled back. + + In parallel replication, if two transactions are running in parallel and + one is fixed to commit before the other, then the one that commits later + will be prefered as the victim - chosing the early transaction as a victim + will not resolve the deadlock anyway, as the later transaction still needs + to wait for the earlier to commit. + + Otherwise, a transaction that uses only transactional tables, and can thus + be safely rolled back, will be prefered as a deadlock victim over a + transaction that also modified non-transactional (eg. MyISAM) tables. + + The return value is -1 if the first transaction is prefered as a deadlock + victim, 1 if the second transaction is prefered, or 0 for no preference (in + which case the storage engine can make the choice as it prefers). +*/ +extern "C" int +thd_deadlock_victim_preference(const MYSQL_THD thd1, const MYSQL_THD thd2) +{ + rpl_group_info *rgi1, *rgi2; + bool nontrans1, nontrans2; + + if (!thd1 || !thd2) + return 0; + + /* + If the transactions are participating in the same replication domain in + parallel replication, then request to select the one that will commit + later (in the fixed commit order from the master) as the deadlock victim. + */ + rgi1= thd1->rgi_slave; + rgi2= thd2->rgi_slave; + if (rgi1 && rgi2 && + rgi1->is_parallel_exec && + rgi1->rli == rgi2->rli && + rgi1->current_gtid.domain_id == rgi2->current_gtid.domain_id) + return rgi1->gtid_sub_id < rgi2->gtid_sub_id ? 1 : -1; + + /* + If one transaction has modified non-transactional tables (so that it + cannot be safely rolled back), and the other has not, then prefer to + select the purely transactional one as the victim. + */ + nontrans1= thd1->transaction.all.modified_non_trans_table; + nontrans2= thd2->transaction.all.modified_non_trans_table; + if (nontrans1 && !nontrans2) + return 1; + else if (!nontrans1 && nontrans2) + return -1; + + /* No preferences, let the storage engine decide. */ + return 0; +} + + extern "C" int thd_non_transactional_update(const MYSQL_THD thd) { return(thd->transaction.all.modified_non_trans_table); @@ -4370,9 +4583,18 @@ extern "C" bool thd_binlog_filter_ok(const MYSQL_THD thd) return binlog_filter->db_ok(thd->db); } +/* + This is similar to sqlcom_can_generate_row_events, with the expection + that we only return 1 if we are going to generate row events in a + transaction. + CREATE OR REPLACE is always safe to do as this will run in it's own + transaction. +*/ + extern "C" bool thd_sqlcom_can_generate_row_events(const MYSQL_THD thd) { - return sqlcom_can_generate_row_events(thd); + return (sqlcom_can_generate_row_events(thd) && thd->lex->sql_command != + SQLCOM_CREATE_TABLE); } @@ -5888,23 +6110,35 @@ show_query_type(THD::enum_binlog_query_type qtype) Constants required for the limit unsafe warnings suppression */ //seconds after which the limit unsafe warnings suppression will be activated -#define LIMIT_UNSAFE_WARNING_ACTIVATION_TIMEOUT 50 +#define LIMIT_UNSAFE_WARNING_ACTIVATION_TIMEOUT 5*60 //number of limit unsafe warnings after which the suppression will be activated -#define LIMIT_UNSAFE_WARNING_ACTIVATION_THRESHOLD_COUNT 50 +#define LIMIT_UNSAFE_WARNING_ACTIVATION_THRESHOLD_COUNT 10 -static ulonglong limit_unsafe_suppression_start_time= 0; -static bool unsafe_warning_suppression_is_activated= false; -static int limit_unsafe_warning_count= 0; +static ulonglong unsafe_suppression_start_time= 0; +static bool unsafe_warning_suppression_active[LEX::BINLOG_STMT_UNSAFE_COUNT]; +static ulong unsafe_warnings_count[LEX::BINLOG_STMT_UNSAFE_COUNT]; +static ulong total_unsafe_warnings_count; /** Auxiliary function to reset the limit unsafety warning suppression. + This is done without mutex protection, but this should be good + enough as it doesn't matter if we loose a couple of suppressed + messages or if this is called multiple times. */ -static void reset_binlog_unsafe_suppression() + +static void reset_binlog_unsafe_suppression(ulonglong now) { + uint i; DBUG_ENTER("reset_binlog_unsafe_suppression"); - unsafe_warning_suppression_is_activated= false; - limit_unsafe_warning_count= 0; - limit_unsafe_suppression_start_time= my_interval_timer()/10000000; + + unsafe_suppression_start_time= now; + total_unsafe_warnings_count= 0; + + for (i= 0 ; i < LEX::BINLOG_STMT_UNSAFE_COUNT ; i++) + { + unsafe_warnings_count[i]= 0; + unsafe_warning_suppression_active[i]= 0; + } DBUG_VOID_RETURN; } @@ -5922,95 +6156,94 @@ static void print_unsafe_warning_to_log(int unsafe_type, char* buf, } /** - Auxiliary function to check if the warning for limit unsafety should be - thrown or suppressed. Details of the implementation can be found in the - comments inline. + Auxiliary function to check if the warning for unsafe repliction statements + should be thrown or suppressed. + + Logic is: + - If we get more than LIMIT_UNSAFE_WARNING_ACTIVATION_THRESHOLD_COUNT errors + of one type, that type of errors will be suppressed for + LIMIT_UNSAFE_WARNING_ACTIVATION_TIMEOUT. + - When the time limit has been reached, all suppression is reset. + + This means that if one gets many different types of errors, some of them + may be reset less than LIMIT_UNSAFE_WARNING_ACTIVATION_TIMEOUT. However at + least one error is disable for this time. + SYNOPSIS: @params - buf - buffer to hold the warning message text unsafe_type - The type of unsafety. - query - The actual query statement. - TODO: Remove this function and implement a general service for all warnings - that would prevent flooding the error log. + RETURN: + 0 0k to log + 1 Message suppressed */ -static void do_unsafe_limit_checkout(char* buf, int unsafe_type, char* query) + +static bool protect_against_unsafe_warning_flood(int unsafe_type) { - ulonglong now= 0; - DBUG_ENTER("do_unsafe_limit_checkout"); - DBUG_ASSERT(unsafe_type == LEX::BINLOG_STMT_UNSAFE_LIMIT); - limit_unsafe_warning_count++; + ulong count; + ulonglong now= my_interval_timer()/1000000000ULL; + DBUG_ENTER("protect_against_unsafe_warning_flood"); + + count= ++unsafe_warnings_count[unsafe_type]; + total_unsafe_warnings_count++; + /* INITIALIZING: If this is the first time this function is called with log warning enabled, the monitoring the unsafe warnings should start. */ - if (limit_unsafe_suppression_start_time == 0) + if (unsafe_suppression_start_time == 0) { - limit_unsafe_suppression_start_time= my_interval_timer()/10000000; - print_unsafe_warning_to_log(unsafe_type, buf, query); + reset_binlog_unsafe_suppression(now); + DBUG_RETURN(0); } - else + + /* + The following is true if we got too many errors or if the error was + already suppressed + */ + if (count >= LIMIT_UNSAFE_WARNING_ACTIVATION_THRESHOLD_COUNT) { - if (!unsafe_warning_suppression_is_activated) - print_unsafe_warning_to_log(unsafe_type, buf, query); + ulonglong diff_time= (now - unsafe_suppression_start_time); - if (limit_unsafe_warning_count >= - LIMIT_UNSAFE_WARNING_ACTIVATION_THRESHOLD_COUNT) + if (!unsafe_warning_suppression_active[unsafe_type]) { - now= my_interval_timer()/10000000; - if (!unsafe_warning_suppression_is_activated) + /* + ACTIVATION: + We got LIMIT_UNSAFE_WARNING_ACTIVATION_THRESHOLD_COUNT warnings in + less than LIMIT_UNSAFE_WARNING_ACTIVATION_TIMEOUT we activate the + suppression. + */ + if (diff_time <= LIMIT_UNSAFE_WARNING_ACTIVATION_TIMEOUT) { - /* - ACTIVATION: - We got LIMIT_UNSAFE_WARNING_ACTIVATION_THRESHOLD_COUNT warnings in - less than LIMIT_UNSAFE_WARNING_ACTIVATION_TIMEOUT we activate the - suppression. - */ - if ((now-limit_unsafe_suppression_start_time) <= - LIMIT_UNSAFE_WARNING_ACTIVATION_TIMEOUT) - { - unsafe_warning_suppression_is_activated= true; - DBUG_PRINT("info",("A warning flood has been detected and the limit \ -unsafety warning suppression has been activated.")); - } - else - { - /* - there is no flooding till now, therefore we restart the monitoring - */ - limit_unsafe_suppression_start_time= my_interval_timer()/10000000; - limit_unsafe_warning_count= 0; - } + unsafe_warning_suppression_active[unsafe_type]= 1; + sql_print_information("Suppressing warnings of type '%s' for up to %d seconds because of flooding", + ER(LEX::binlog_stmt_unsafe_errcode[unsafe_type]), + LIMIT_UNSAFE_WARNING_ACTIVATION_TIMEOUT); } else { /* - Print the suppression note and the unsafe warning. - */ - sql_print_information("The following warning was suppressed %d times \ -during the last %d seconds in the error log", - limit_unsafe_warning_count, - (int) - (now-limit_unsafe_suppression_start_time)); - print_unsafe_warning_to_log(unsafe_type, buf, query); - /* - DEACTIVATION: We got LIMIT_UNSAFE_WARNING_ACTIVATION_THRESHOLD_COUNT - warnings in more than LIMIT_UNSAFE_WARNING_ACTIVATION_TIMEOUT, the - suppression should be deactivated. + There is no flooding till now, therefore we restart the monitoring */ - if ((now - limit_unsafe_suppression_start_time) > - LIMIT_UNSAFE_WARNING_ACTIVATION_TIMEOUT) - { - reset_binlog_unsafe_suppression(); - DBUG_PRINT("info",("The limit unsafety warning supression has been \ -deactivated")); - } + reset_binlog_unsafe_suppression(now); + } + } + else + { + /* This type of warnings was suppressed */ + if (diff_time > LIMIT_UNSAFE_WARNING_ACTIVATION_TIMEOUT) + { + ulong save_count= total_unsafe_warnings_count; + /* Print a suppression note and remove the suppression */ + reset_binlog_unsafe_suppression(now); + sql_print_information("Suppressed %lu unsafe warnings during " + "the last %d seconds", + save_count, (int) diff_time); } - limit_unsafe_warning_count= 0; } } - DBUG_VOID_RETURN; + DBUG_RETURN(unsafe_warning_suppression_active[unsafe_type]); } /** @@ -6022,6 +6255,7 @@ deactivated")); void THD::issue_unsafe_warnings() { char buf[MYSQL_ERRMSG_SIZE * 2]; + uint32 unsafe_type_flags; DBUG_ENTER("issue_unsafe_warnings"); /* Ensure that binlog_unsafe_warning_flags is big enough to hold all @@ -6029,8 +6263,10 @@ void THD::issue_unsafe_warnings() */ DBUG_ASSERT(LEX::BINLOG_STMT_UNSAFE_COUNT <= sizeof(binlog_unsafe_warning_flags) * CHAR_BIT); + + if (!(unsafe_type_flags= binlog_unsafe_warning_flags)) + DBUG_VOID_RETURN; // Nothing to do - uint32 unsafe_type_flags= binlog_unsafe_warning_flags; /* For each unsafe_type, check if the statement is unsafe in this way and issue a warning. @@ -6045,13 +6281,9 @@ void THD::issue_unsafe_warnings() ER_BINLOG_UNSAFE_STATEMENT, ER(ER_BINLOG_UNSAFE_STATEMENT), ER(LEX::binlog_stmt_unsafe_errcode[unsafe_type])); - if (global_system_variables.log_warnings) - { - if (unsafe_type == LEX::BINLOG_STMT_UNSAFE_LIMIT) - do_unsafe_limit_checkout( buf, unsafe_type, query()); - else //cases other than LIMIT unsafety - print_unsafe_warning_to_log(unsafe_type, buf, query()); - } + if (global_system_variables.log_warnings > 0 && + !protect_against_unsafe_warning_flood(unsafe_type)) + print_unsafe_warning_to_log(unsafe_type, buf, query()); } } DBUG_VOID_RETURN; @@ -6531,6 +6763,7 @@ wait_for_commit::unregister_wait_for_prior_commit2() this->waitee= NULL; } } + wakeup_error= 0; mysql_mutex_unlock(&LOCK_wait_commit); } diff --git a/sql/sql_class.h b/sql/sql_class.h index d7bbfc3799d..e515b99b1a5 100644 --- a/sql/sql_class.h +++ b/sql/sql_class.h @@ -524,6 +524,14 @@ typedef struct system_variables ulonglong sortbuff_size; ulonglong group_concat_max_len; ulonglong default_regex_flags; + + /** + Place holders to store Multi-source variables in sys_var.cc during + update and show of variables. + */ + ulonglong slave_skip_counter; + ulonglong max_relay_log_size; + ha_rows select_limit; ha_rows max_join_size; ha_rows expensive_subquery_limit; @@ -589,12 +597,6 @@ typedef struct system_variables */ uint32 gtid_domain_id; uint64 gtid_seq_no; - /** - Place holders to store Multi-source variables in sys_var.cc during - update and show of variables. - */ - ulong slave_skip_counter; - ulong max_relay_log_size; /** Default transaction access mode. READ ONLY (true) or READ WRITE (false). @@ -714,6 +716,7 @@ typedef struct system_status_var ulong filesort_range_count_; ulong filesort_rows_; ulong filesort_scan_count_; + ulong filesort_pq_sorts_; /* Prepared statements and binary protocol */ ulong com_stmt_prepare; ulong com_stmt_reprepare; @@ -768,6 +771,13 @@ typedef struct system_status_var #define last_system_status_var questions #define last_cleared_system_status_var memory_used +/* + Global status variables +*/ + +extern ulong feature_files_opened_with_delayed_keys; + + void add_to_status(STATUS_VAR *to_var, STATUS_VAR *from_var); void add_diff_to_status(STATUS_VAR *to_var, STATUS_VAR *from_var, @@ -1371,7 +1381,8 @@ enum enum_thread_type SYSTEM_THREAD_SLAVE_SQL= 4, SYSTEM_THREAD_EVENT_SCHEDULER= 8, SYSTEM_THREAD_EVENT_WORKER= 16, - SYSTEM_THREAD_BINLOG_BACKGROUND= 32 + SYSTEM_THREAD_BINLOG_BACKGROUND= 32, + SYSTEM_THREAD_SLAVE_INIT= 64 }; inline char const * @@ -1386,6 +1397,7 @@ show_system_thread(enum_thread_type thread) RETURN_NAME_AS_STRING(SYSTEM_THREAD_SLAVE_SQL); RETURN_NAME_AS_STRING(SYSTEM_THREAD_EVENT_SCHEDULER); RETURN_NAME_AS_STRING(SYSTEM_THREAD_EVENT_WORKER); + RETURN_NAME_AS_STRING(SYSTEM_THREAD_SLAVE_INIT); default: sprintf(buf, "<UNKNOWN SYSTEM THREAD: %d>", thread); return buf; @@ -1753,6 +1765,8 @@ struct wait_for_commit { if (waitee) unregister_wait_for_prior_commit2(); + else + wakeup_error= 0; } /* Remove a waiter from the list in the waitee. Used to unregister a wait. @@ -2510,8 +2524,6 @@ public: /** Idle instrumentation state. */ PSI_idle_locker_state m_idle_state; #endif /* HAVE_PSI_IDLE_INTERFACE */ - /** True if the server code is IDLE for this connection. */ - bool m_server_idle; /* Id of current query. Statement can be reused to execute several queries diff --git a/sql/sql_delete.cc b/sql/sql_delete.cc index 418c1db9b21..da2f7b156fe 100644 --- a/sql/sql_delete.cc +++ b/sql/sql_delete.cc @@ -937,7 +937,8 @@ multi_delete::initialize_tables(JOIN *join) walk= delete_tables; - for (JOIN_TAB *tab= first_linear_tab(join, WITH_CONST_TABLES); + for (JOIN_TAB *tab= first_linear_tab(join, WITHOUT_BUSH_ROOTS, + WITH_CONST_TABLES); tab; tab= next_linear_tab(join, tab, WITHOUT_BUSH_ROOTS)) { diff --git a/sql/sql_derived.cc b/sql/sql_derived.cc index a910ed6290f..8e8dbfc71d4 100644 --- a/sql/sql_derived.cc +++ b/sql/sql_derived.cc @@ -465,6 +465,8 @@ bool mysql_derived_merge(THD *thd, LEX *lex, TABLE_LIST *derived) } } + if (!derived->merged_for_insert) + dt_select->first_cond_optimization= FALSE; // consider it optimized exit_merge: if (arena) thd->restore_active_arena(arena, &backup); @@ -614,6 +616,7 @@ bool mysql_derived_prepare(THD *thd, LEX *lex, TABLE_LIST *derived) SELECT_LEX_UNIT *unit= derived->get_unit(); DBUG_ENTER("mysql_derived_prepare"); bool res= FALSE; + DBUG_PRINT("enter", ("unit 0x%lx", (ulong) unit)); // Skip already prepared views/DT if (!unit || unit->prepared || @@ -623,9 +626,6 @@ bool mysql_derived_prepare(THD *thd, LEX *lex, TABLE_LIST *derived) thd->lex->sql_command == SQLCOM_DELETE_MULTI)))) DBUG_RETURN(FALSE); - Query_arena *arena, backup; - arena= thd->activate_stmt_arena_if_needed(&backup); - SELECT_LEX *first_select= unit->first_select(); /* prevent name resolving out of derived table */ @@ -743,8 +743,6 @@ exit: if (derived->outer_join) table->maybe_null= 1; } - if (arena) - thd->restore_active_arena(arena, &backup); DBUG_RETURN(res); } diff --git a/sql/sql_explain.cc b/sql/sql_explain.cc index 53ac095d1d0..576fd48016e 100644 --- a/sql/sql_explain.cc +++ b/sql/sql_explain.cc @@ -203,15 +203,13 @@ bool Explain_query::print_explain_str(THD *thd, String *out_str, bool is_analyz static void push_str(List<Item> *item_list, const char *str) { - item_list->push_back(new Item_string(str, - strlen(str), system_charset_info)); + item_list->push_back(new Item_string_sys(str)); } static void push_string(List<Item> *item_list, String *str) { - item_list->push_back(new Item_string(str->ptr(), str->length(), - system_charset_info)); + item_list->push_back(new Item_string_sys(str->ptr(), str->length())); } @@ -263,8 +261,7 @@ int Explain_union::print_explain(Explain_query *query, len+= lastop; table_name_buffer[len - 1]= '>'; // change ',' to '>' } - const CHARSET_INFO *cs= system_charset_info; - item_list.push_back(new Item_string(table_name_buffer, len, cs)); + item_list.push_back(new Item_string_sys(table_name_buffer, len)); } /* `partitions` column */ @@ -311,8 +308,7 @@ int Explain_union::print_explain(Explain_query *query, { extra_buf.append(STRING_WITH_LEN("Using filesort")); } - const CHARSET_INFO *cs= system_charset_info; - item_list.push_back(new Item_string(extra_buf.ptr(), extra_buf.length(), cs)); + item_list.push_back(new Item_string_sys(extra_buf.ptr(), extra_buf.length())); //output->unit.offset_limit_cnt= 0; if (output->send_data(item_list)) @@ -370,12 +366,10 @@ int Explain_select::print_explain(Explain_query *query, if (message) { List<Item> item_list; - const CHARSET_INFO *cs= system_charset_info; Item *item_null= new Item_null(); item_list.push_back(new Item_int((int32) select_id)); - item_list.push_back(new Item_string(select_type, - strlen(select_type), cs)); + item_list.push_back(new Item_string_sys(select_type)); for (uint i=0 ; i < 7; i++) item_list.push_back(item_null); if (explain_flags & DESCRIBE_PARTITIONS) @@ -392,7 +386,7 @@ int Explain_select::print_explain(Explain_query *query, item_list.push_back(item_null); } - item_list.push_back(new Item_string(message,strlen(message),cs)); + item_list.push_back(new Item_string_sys(message)); if (output->send_data(item_list)) return 1; @@ -622,7 +616,7 @@ int Explain_table_access::print_explain(select_result_sink *output, uint8 explai extra_buf.append(STRING_WITH_LEN("Using filesort")); } - item_list.push_back(new Item_string(extra_buf.ptr(), extra_buf.length(), cs)); + item_list.push_back(new Item_string_sys(extra_buf.ptr(), extra_buf.length())); if (output->send_data(item_list)) return 1; diff --git a/sql/sql_get_diagnostics.cc b/sql/sql_get_diagnostics.cc index be1e3589cc6..8b0d86aa7d1 100644 --- a/sql/sql_get_diagnostics.cc +++ b/sql/sql_get_diagnostics.cc @@ -267,9 +267,11 @@ Condition_information_item::make_utf8_string_item(THD *thd, const String *str) CHARSET_INFO *to_cs= &my_charset_utf8_general_ci; /* If a charset was not set, assume that no conversion is needed. */ CHARSET_INFO *from_cs= str->charset() ? str->charset() : to_cs; - Item_string *item= new Item_string(str->ptr(), str->length(), from_cs); + String tmp(str->ptr(), str->length(), from_cs); /* If necessary, convert the string (ignoring errors), then copy it over. */ - return item ? item->charset_converter(to_cs, false) : NULL; + uint conv_errors; + return new Item_string(&tmp, to_cs, &conv_errors, + DERIVATION_COERCIBLE, MY_REPERTOIRE_UNICODE30); } diff --git a/sql/sql_help.cc b/sql/sql_help.cc index 844810af0f4..8f458ea0b9f 100644 --- a/sql/sql_help.cc +++ b/sql/sql_help.cc @@ -626,7 +626,7 @@ SQL_SELECT *prepare_select_for_name(THD *thd, const char *mask, uint mlen, { Item *cond= new Item_func_like(new Item_field(pfname), new Item_string(mask,mlen,pfname->charset()), - new Item_string("\\",1,&my_charset_latin1), + new Item_string_ascii("\\"), FALSE); if (thd->is_fatal_error) return 0; // OOM diff --git a/sql/sql_insert.cc b/sql/sql_insert.cc index 2b68f7766ac..71a1983878f 100644 --- a/sql/sql_insert.cc +++ b/sql/sql_insert.cc @@ -1,6 +1,6 @@ /* Copyright (c) 2000, 2013, Oracle and/or its affiliates. - Copyright (c) 2009, 2013, Monty Program Ab. + Copyright (c) 2010, 2014, SkySQL Ab. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -4148,15 +4148,14 @@ select_create::binlog_show_create_table(TABLE **tables, uint count) { /* Note 1: In RBR mode, we generate a CREATE TABLE statement for the - created table by calling store_create_info() (behaves as SHOW - CREATE TABLE). In the event of an error, nothing should be - written to the binary log, even if the table is non-transactional; - therefore we pretend that the generated CREATE TABLE statement is - for a transactional table. The event will then be put in the - transaction cache, and any subsequent events (e.g., table-map - events and binrow events) will also be put there. We can then use - ha_autocommit_or_rollback() to either throw away the entire - kaboodle of events, or write them to the binary log. + created table by calling show_create_table(). In the event of an error, + nothing should be written to the binary log, even if the table is + non-transactional; therefore we pretend that the generated CREATE TABLE + statement is for a transactional table. The event will then be put in the + transaction cache, and any subsequent events (e.g., table-map events and + binrow events) will also be put there. We can then use + ha_autocommit_or_rollback() to either throw away the entire kaboodle of + events, or write them to the binary log. We write the CREATE TABLE statement here and not in prepare() since there potentially are sub-selects or accesses to information @@ -4175,12 +4174,9 @@ select_create::binlog_show_create_table(TABLE **tables, uint count) tmp_table_list.table = *tables; query.length(0); // Have to zero it since constructor doesn't - result= store_create_info(thd, &tmp_table_list, &query, create_info, - /* show_database */ TRUE, - MY_TEST(create_info->org_options & - HA_LEX_CREATE_REPLACE) || - create_info->table_was_deleted); - DBUG_ASSERT(result == 0); /* store_create_info() always return 0 */ + result= show_create_table(thd, &tmp_table_list, &query, create_info, + WITH_DB_NAME); + DBUG_ASSERT(result == 0); /* show_create_table() always return 0 */ if (WSREP_EMULATE_BINLOG(thd) || mysql_bin_log.is_open()) { diff --git a/sql/sql_join_cache.cc b/sql/sql_join_cache.cc index 7f97b70952e..cec0755c9b0 100644 --- a/sql/sql_join_cache.cc +++ b/sql/sql_join_cache.cc @@ -2094,7 +2094,7 @@ enum_nested_loop_state JOIN_CACHE::join_records(bool skip_last) goto finish; if (outer_join_first_inner) { - if (next_cache) + if (next_cache && join_tab != join_tab->last_inner) { /* Ensure that all matches for outer records from join buffer are to be diff --git a/sql/sql_lex.cc b/sql/sql_lex.cc index 45045ff54a0..772aacdaacd 100644 --- a/sql/sql_lex.cc +++ b/sql/sql_lex.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2000, 2013, Oracle and/or its affiliates. +/* Copyright (c) 2000, 2014, Oracle and/or its affiliates. Copyright (c) 2009, 2014, Monty Program Ab. This program is free software; you can redistribute it and/or modify @@ -30,7 +30,7 @@ #include "sp.h" #include "sql_select.h" -static int lex_one_token(void *arg, THD *thd); +static int lex_one_token(YYSTYPE *yylval, THD *thd); /* We are using pointer to this variable for distinguishing between assignment @@ -969,15 +969,17 @@ bool consume_comment(Lex_input_stream *lip, int remaining_recursions_permitted) /* MYSQLlex remember the following states from the following MYSQLlex() + @param yylval [out] semantic value of the token being parsed (yylval) + @param thd THD + - MY_LEX_EOQ Found end of query - MY_LEX_OPERATOR_OR_IDENT Last state was an ident, text or number (which can't be followed by a signed number) */ -int MYSQLlex(void *arg, THD *thd) +int MYSQLlex(YYSTYPE *yylval, THD *thd) { Lex_input_stream *lip= & thd->m_parser_state->m_lip; - YYSTYPE *yylval=(YYSTYPE*) arg; int token; if (lip->lookahead_token >= 0) @@ -994,7 +996,7 @@ int MYSQLlex(void *arg, THD *thd) return token; } - token= lex_one_token(arg, thd); + token= lex_one_token(yylval, thd); switch(token) { case WITH: @@ -1005,7 +1007,7 @@ int MYSQLlex(void *arg, THD *thd) to transform the grammar into a LALR(1) grammar, which sql_yacc.yy can process. */ - token= lex_one_token(arg, thd); + token= lex_one_token(yylval, thd); switch(token) { case CUBE_SYM: lip->m_digest_psi= MYSQL_ADD_TOKEN(lip->m_digest_psi, WITH_CUBE_SYM, @@ -1034,7 +1036,7 @@ int MYSQLlex(void *arg, THD *thd) return token; } -int lex_one_token(void *arg, THD *thd) +static int lex_one_token(YYSTYPE *yylval, THD *thd) { reg1 uchar c; bool comment_closed; @@ -1043,7 +1045,6 @@ int lex_one_token(void *arg, THD *thd) enum my_lex_states state; Lex_input_stream *lip= & thd->m_parser_state->m_lip; LEX *lex= thd->lex; - YYSTYPE *yylval=(YYSTYPE*) arg; CHARSET_INFO *const cs= thd->charset(); const uchar *const state_map= cs->state_map; const uchar *const ident_map= cs->ident_map; @@ -3302,7 +3303,7 @@ static void fix_prepare_info_in_table_list(THD *thd, TABLE_LIST *tbl) { for (; tbl; tbl= tbl->next_local) { - if (tbl->on_expr) + if (tbl->on_expr && !tbl->prep_on_expr) { thd->check_and_register_item_tree(&tbl->prep_on_expr, &tbl->on_expr); tbl->on_expr= tbl->on_expr->copy_andor_structure(thd); diff --git a/sql/sql_lex.h b/sql/sql_lex.h index 5e9c7b9dc6a..0a1232e81d1 100644 --- a/sql/sql_lex.h +++ b/sql/sql_lex.h @@ -1,5 +1,5 @@ -/* Copyright (c) 2000, 2013, Oracle and/or its affiliates. - Copyright (c) 2010, 2013, Monty Program Ab. +/* Copyright (c) 2000, 2014, Oracle and/or its affiliates. + Copyright (c) 2010, 2014, Monty Program Ab. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -2897,7 +2897,7 @@ extern void lex_start(THD *thd); extern void lex_end(LEX *lex); void end_lex_with_single_table(THD *thd, TABLE *table, LEX *old_lex); int init_lex_with_single_table(THD *thd, TABLE *table, LEX *lex); -extern int MYSQLlex(void *arg, THD *thd); +extern int MYSQLlex(union YYSTYPE *yylval, THD *thd); extern void trim_whitespace(CHARSET_INFO *cs, LEX_STRING *str); diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc index d8906b2d578..4bc32df549d 100644 --- a/sql/sql_parse.cc +++ b/sql/sql_parse.cc @@ -1,5 +1,5 @@ /* Copyright (c) 2000, 2013, Oracle and/or its affiliates. - Copyright (c) 2008, 2014, Monty Program Ab + Copyright (c) 2008, 2014, SkySQL Ab. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -302,7 +302,7 @@ void init_update_queries(void) sql_command_flags[SQLCOM_CREATE_TABLE]= CF_CHANGES_DATA | CF_REEXECUTION_FRAGILE | CF_AUTO_COMMIT_TRANS | CF_REPORT_PROGRESS | CF_CAN_GENERATE_ROW_EVENTS; - sql_command_flags[SQLCOM_CREATE_INDEX]= CF_CHANGES_DATA | CF_AUTO_COMMIT_TRANS; + sql_command_flags[SQLCOM_CREATE_INDEX]= CF_CHANGES_DATA | CF_AUTO_COMMIT_TRANS | CF_REPORT_PROGRESS; sql_command_flags[SQLCOM_ALTER_TABLE]= CF_CHANGES_DATA | CF_WRITE_LOGS_COMMAND | CF_AUTO_COMMIT_TRANS | CF_REPORT_PROGRESS | CF_INSERTS_DATA; @@ -981,9 +981,7 @@ bool do_command(THD *thd) */ DEBUG_SYNC(thd, "before_do_command_net_read"); - thd->m_server_idle= TRUE; - packet_length= my_net_read(net); - thd->m_server_idle= FALSE; + packet_length= my_net_read_packet(net, 1); #ifdef WITH_WSREP if (WSREP(thd)) { mysql_mutex_lock(&thd->LOCK_wsrep_thd); @@ -2946,6 +2944,9 @@ mysql_execute_command(THD *thd) goto error; mysql_mutex_lock(&LOCK_active_mi); + if (!master_info_index) + goto error; + mi= master_info_index->get_master_info(&lex_mi->connection_name, Sql_condition::WARN_LEVEL_NOTE); @@ -3195,7 +3196,11 @@ mysql_execute_command(THD *thd) goto end_with_restore_list; } + /* Copy temporarily the statement flags to thd for lock_table_names() */ + uint save_thd_create_info_options= thd->lex->create_info.options; + thd->lex->create_info.options|= create_info.options; res= open_and_lock_tables(thd, lex->query_tables, TRUE, 0); + thd->lex->create_info.options= save_thd_create_info_options; if (res) { /* Got error or warning. Set res to 1 if error */ @@ -3407,7 +3412,7 @@ end_with_restore_list: case SQLCOM_SLAVE_ALL_START: { mysql_mutex_lock(&LOCK_active_mi); - if (!master_info_index->start_all_slaves(thd)) + if (master_info_index && !master_info_index->start_all_slaves(thd)) my_ok(thd); mysql_mutex_unlock(&LOCK_active_mi); break; @@ -3423,7 +3428,7 @@ end_with_restore_list: goto error; } mysql_mutex_lock(&LOCK_active_mi); - if (!master_info_index->stop_all_slaves(thd)) + if (master_info_index && !master_info_index->stop_all_slaves(thd)) my_ok(thd); mysql_mutex_unlock(&LOCK_active_mi); break; @@ -4693,11 +4698,12 @@ end_with_restore_list: case SQLCOM_SHOW_GRANTS: { LEX_USER *grant_user= lex->grant_user; + Security_context *sctx= thd->security_ctx; if (!grant_user) goto error; - if (grant_user->user.str && - !strcmp(thd->security_ctx->priv_user, grant_user->user.str)) + if (grant_user->user.str && !strcmp(sctx->priv_user, grant_user->user.str) && + grant_user->host.str && !strcmp(sctx->priv_host, grant_user->host.str)) grant_user->user= current_user; if (grant_user->user.str == current_user.str || @@ -7916,7 +7922,7 @@ static uint kill_threads_for_user(THD *thd, LEX_USER *user, I_List_iterator<THD> it(threads); while ((tmp=it++)) { - if (tmp->get_command() == COM_DAEMON) + if (!tmp->security_ctx->user) continue; /* Check that hostname (if given) and user name matches. diff --git a/sql/sql_partition.cc b/sql/sql_partition.cc index 9bc8147c75f..2b9ba2d00c4 100644 --- a/sql/sql_partition.cc +++ b/sql/sql_partition.cc @@ -1,5 +1,5 @@ -/* Copyright (c) 2005, 2013, Oracle and/or its affiliates. - Copyright (c) 2009, 2013, Monty Program Ab. +/* Copyright (c) 2005, 2014, Oracle and/or its affiliates. + Copyright (c) 2009, 2014, SkySQL Ab. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -3167,19 +3167,28 @@ uint32 get_partition_id_cols_list_for_endpoint(partition_info *part_info, uint num_columns= part_info->part_field_list.elements; uint list_index; uint min_list_index= 0; + int cmp; + /* Notice that max_list_index = last_index + 1 here! */ uint max_list_index= part_info->num_list_values; DBUG_ENTER("get_partition_id_cols_list_for_endpoint"); /* Find the matching partition (including taking endpoint into account). */ do { - /* Midpoint, adjusted down, so it can never be > last index. */ + /* Midpoint, adjusted down, so it can never be >= max_list_index. */ list_index= (max_list_index + min_list_index) >> 1; - if (cmp_rec_and_tuple_prune(list_col_array + list_index*num_columns, - nparts, left_endpoint, include_endpoint) > 0) + cmp= cmp_rec_and_tuple_prune(list_col_array + list_index*num_columns, + nparts, left_endpoint, include_endpoint); + if (cmp > 0) + { min_list_index= list_index + 1; + } else + { max_list_index= list_index; + if (cmp == 0) + break; + } } while (max_list_index > min_list_index); list_index= max_list_index; @@ -3196,12 +3205,10 @@ uint32 get_partition_id_cols_list_for_endpoint(partition_info *part_info, nparts, left_endpoint, include_endpoint))); - if (!left_endpoint) - { - /* Set the end after this list tuple if not already after the last. */ - if (list_index < part_info->num_parts) - list_index++; - } + /* Include the right endpoint if not already passed end of array. */ + if (!left_endpoint && include_endpoint && cmp == 0 && + list_index < part_info->num_list_values) + list_index++; DBUG_RETURN(list_index); } @@ -7573,15 +7580,13 @@ static int cmp_rec_and_tuple_prune(part_column_list_val *val, field= val->part_info->part_field_array + n_vals_in_rec; if (!(*field)) { - /* - Full match, if right endpoint and not including the endpoint, - (rec < part) return lesser. - */ - if (!is_left_endpoint && !include_endpoint) - return -4; + /* Full match. Only equal if including endpoint. */ + if (include_endpoint) + return 0; - /* Otherwise they are equal! */ - return 0; + if (is_left_endpoint) + return +4; /* Start of range, part_tuple < rec, return higher. */ + return -4; /* End of range, rec < part_tupe, return lesser. */ } /* The prefix is equal and there are more partition columns to compare. diff --git a/sql/sql_plugin.cc b/sql/sql_plugin.cc index 9ae3d792744..4da4fcf21b0 100644 --- a/sql/sql_plugin.cc +++ b/sql/sql_plugin.cc @@ -1025,7 +1025,7 @@ static st_plugin_int *plugin_insert_or_reuse(struct st_plugin_int *plugin) static bool plugin_add(MEM_ROOT *tmp_root, const LEX_STRING *name, LEX_STRING *dl, int report) { - struct st_plugin_int tmp; + struct st_plugin_int tmp, *maybe_dupe; struct st_maria_plugin *plugin; uint oks= 0, errs= 0, dupes= 0; DBUG_ENTER("plugin_add"); @@ -1055,8 +1055,14 @@ static bool plugin_add(MEM_ROOT *tmp_root, (const uchar *)tmp.name.str, tmp.name.length)) continue; // plugin name doesn't match - if (!name->str && plugin_find_internal(&tmp.name, MYSQL_ANY_PLUGIN)) + if (!name->str && + (maybe_dupe= plugin_find_internal(&tmp.name, MYSQL_ANY_PLUGIN))) { + if (plugin->name != maybe_dupe->plugin->name) + { + report_error(report, ER_UDF_EXISTS, plugin->name); + DBUG_RETURN(TRUE); + } dupes++; continue; // already installed } @@ -1572,7 +1578,7 @@ int plugin_init(int *argc, char **argv, int flags) if (plugin_initialize(&tmp_root, plugin_ptr, argc, argv, !is_myisam && (flags & PLUGIN_INIT_SKIP_INITIALIZATION))) { - if (mandatory) + if (plugin_ptr->load_option == PLUGIN_FORCE) goto err_unlock; plugin_ptr->state= PLUGIN_IS_DISABLED; } @@ -3313,7 +3319,7 @@ bool sys_var_pluginvar::session_update(THD *thd, set_var *var) mysql_mutex_unlock(&LOCK_global_system_variables); plugin_var->update(thd, plugin_var, tgt, src); - + return false; } @@ -3731,7 +3737,7 @@ static int construct_options(MEM_ROOT *mem_root, struct st_plugin_int *tmp, if (opt->flags & PLUGIN_VAR_NOCMDOPT) continue; - optname= (char*) memdup_root(mem_root, v->key + 1, + optname= (char*) memdup_root(mem_root, v->key + 1, (optnamelen= v->name_len) + 1); } @@ -3993,7 +3999,7 @@ static int test_plugin_options(MEM_ROOT *tmp_root, struct st_plugin_int *tmp, } DBUG_RETURN(0); - + err: if (opts) my_cleanup_options(opts); diff --git a/sql/sql_reload.cc b/sql/sql_reload.cc index 24e5d053145..1754ffce220 100644 --- a/sql/sql_reload.cc +++ b/sql/sql_reload.cc @@ -175,18 +175,21 @@ bool reload_acl_and_cache(THD *thd, unsigned long long options, */ tmp_write_to_binlog= 0; mysql_mutex_lock(&LOCK_active_mi); - if (!(mi= (master_info_index-> - get_master_info(&connection_name, - Sql_condition::WARN_LEVEL_ERROR)))) + if (master_info_index) { - result= 1; - } - else - { - mysql_mutex_lock(&mi->data_lock); - if (rotate_relay_log(mi)) - *write_to_binlog= -1; - mysql_mutex_unlock(&mi->data_lock); + if (!(mi= (master_info_index-> + get_master_info(&connection_name, + Sql_condition::WARN_LEVEL_ERROR)))) + { + result= 1; + } + else + { + mysql_mutex_lock(&mi->data_lock); + if (rotate_relay_log(mi)) + *write_to_binlog= -1; + mysql_mutex_unlock(&mi->data_lock); + } } mysql_mutex_unlock(&LOCK_active_mi); #endif @@ -356,22 +359,24 @@ bool reload_acl_and_cache(THD *thd, unsigned long long options, Master_info *mi; tmp_write_to_binlog= 0; mysql_mutex_lock(&LOCK_active_mi); - - if (!(mi= (master_info_index-> - get_master_info(&lex_mi->connection_name, - Sql_condition::WARN_LEVEL_ERROR)))) + if (master_info_index) { - result= 1; - } - else if (reset_slave(thd, mi)) - { - /* NOTE: my_error() has been already called by reset_slave(). */ - result= 1; - } - else if (mi->connection_name.length && thd->lex->reset_slave_info.all) - { - /* If not default connection and 'all' is used */ - master_info_index->remove_master_info(&mi->connection_name); + if (!(mi= (master_info_index-> + get_master_info(&lex_mi->connection_name, + Sql_condition::WARN_LEVEL_ERROR)))) + { + result= 1; + } + else if (reset_slave(thd, mi)) + { + /* NOTE: my_error() has been already called by reset_slave(). */ + result= 1; + } + else if (mi->connection_name.length && thd->lex->reset_slave_info.all) + { + /* If not default connection and 'all' is used */ + master_info_index->remove_master_info(&mi->connection_name); + } } mysql_mutex_unlock(&LOCK_active_mi); } diff --git a/sql/sql_repl.cc b/sql/sql_repl.cc index e91b3b0a2ed..d9c88983797 100644 --- a/sql/sql_repl.cc +++ b/sql/sql_repl.cc @@ -1,5 +1,5 @@ /* Copyright (c) 2000, 2013, Oracle and/or its affiliates. - Copyright (c) 2008, 2014, Monty Program Ab + Copyright (c) 2008, 2014, SkySQL Ab. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -3074,6 +3074,7 @@ int reset_slave(THD *thd, Master_info* mi) mi->clear_error(); mi->rli.clear_error(); mi->rli.clear_until_condition(); + mi->rli.slave_skip_counter= 0; // close master_info_file, relay_log_info_file, set mi->inited=rli->inited=0 end_master_info(mi); @@ -3224,6 +3225,9 @@ bool change_master(THD* thd, Master_info* mi, bool *master_info_added) LEX_MASTER_INFO* lex_mi= &thd->lex->mi; DBUG_ENTER("change_master"); + mysql_mutex_assert_owner(&LOCK_active_mi); + DBUG_ASSERT(master_info_index); + *master_info_added= false; /* We need to check if there is an empty master_host. Otherwise @@ -3521,6 +3525,7 @@ bool change_master(THD* thd, Master_info* mi, bool *master_info_added) /* Clear the errors, for a clean start */ mi->rli.clear_error(); mi->rli.clear_until_condition(); + mi->rli.slave_skip_counter= 0; sql_print_information("'CHANGE MASTER TO executed'. " "Previous state master_host='%s', master_port='%u', master_log_file='%s', " @@ -3622,7 +3627,8 @@ bool mysql_show_binlog_events(THD* thd) else /* showing relay log contents */ { mysql_mutex_lock(&LOCK_active_mi); - if (!(mi= master_info_index-> + if (!master_info_index || + !(mi= master_info_index-> get_master_info(&thd->variables.default_master_connection, Sql_condition::WARN_LEVEL_ERROR))) { diff --git a/sql/sql_select.cc b/sql/sql_select.cc index 436a2f6ce5d..ff93aa6d103 100644 --- a/sql/sql_select.cc +++ b/sql/sql_select.cc @@ -1444,7 +1444,8 @@ TODO: make view to decide if it is possible to write to WHERE directly or make S Perform the optimization on fields evaluation mentioned above for all on expressions. */ - for (JOIN_TAB *tab= first_linear_tab(this, WITHOUT_CONST_TABLES); tab; + JOIN_TAB *tab; + for (tab= first_linear_tab(this, WITH_BUSH_ROOTS, WITHOUT_CONST_TABLES); tab; tab= next_linear_tab(this, tab, WITH_BUSH_ROOTS)) { if (*tab->on_expr_ref) @@ -1467,7 +1468,7 @@ TODO: make view to decide if it is possible to write to WHERE directly or make S Perform the optimization on fields evaliation mentioned above for all used ref items. */ - for (JOIN_TAB *tab= first_linear_tab(this, WITHOUT_CONST_TABLES); tab; + for (tab= first_linear_tab(this, WITH_BUSH_ROOTS, WITHOUT_CONST_TABLES); tab; tab= next_linear_tab(this, tab, WITH_BUSH_ROOTS)) { uint key_copy_index=0; @@ -2107,7 +2108,8 @@ bool JOIN::setup_subquery_caches() if (conds) conds= conds->transform(&Item::expr_cache_insert_transformer, (uchar*) thd); - for (JOIN_TAB *tab= first_linear_tab(this, WITHOUT_CONST_TABLES); + JOIN_TAB *tab; + for (tab= first_linear_tab(this, WITH_BUSH_ROOTS, WITHOUT_CONST_TABLES); tab; tab= next_linear_tab(this, tab, WITH_BUSH_ROOTS)) { if (tab->select_cond) @@ -2269,7 +2271,8 @@ JOIN::reinit() /* need to reset ref access state (see join_read_key) */ if (join_tab) { - for (JOIN_TAB *tab= first_linear_tab(this, WITH_CONST_TABLES); tab; + JOIN_TAB *tab; + for (tab= first_linear_tab(this, WITH_BUSH_ROOTS, WITH_CONST_TABLES); tab; tab= next_linear_tab(this, tab, WITH_BUSH_ROOTS)) { tab->ref.key_err= TRUE; @@ -2531,7 +2534,7 @@ void JOIN::exec_inner() Item *cur_const_item; while ((cur_const_item= const_item_it++)) { - cur_const_item->val_str(&cur_const_item->str_value); + cur_const_item->val_str(); // This caches val_str() to Item::str_value if (thd->is_error()) { error= thd->is_error(); @@ -3137,8 +3140,9 @@ JOIN::destroy() { if (join_tab != tmp_join->join_tab) { - for (JOIN_TAB *tab= first_linear_tab(this, WITH_CONST_TABLES); tab; - tab= next_linear_tab(this, tab, WITH_BUSH_ROOTS)) + JOIN_TAB *tab; + for (tab= first_linear_tab(this, WITH_BUSH_ROOTS, WITH_CONST_TABLES); + tab; tab= next_linear_tab(this, tab, WITH_BUSH_ROOTS)) { tab->cleanup(); } @@ -8211,14 +8215,24 @@ JOIN_TAB *next_top_level_tab(JOIN *join, JOIN_TAB *tab) } -JOIN_TAB *first_linear_tab(JOIN *join, enum enum_with_const_tables const_tbls) +JOIN_TAB *first_linear_tab(JOIN *join, + enum enum_with_bush_roots include_bush_roots, + enum enum_with_const_tables const_tbls) { JOIN_TAB *first= join->join_tab; if (const_tbls == WITHOUT_CONST_TABLES) first+= join->const_tables; - if (first < join->join_tab + join->top_join_tab_count) - return first; - return NULL; /* All tables were const tables */ + + if (first >= join->join_tab + join->top_join_tab_count) + return NULL; /* All are const tables */ + + if (first->bush_children && include_bush_roots == WITHOUT_BUSH_ROOTS) + { + /* This JOIN_TAB is a SJM nest; Start from first table in nest */ + return first->bush_children->start; + } + + return first; } @@ -9084,9 +9098,10 @@ inline void add_cond_and_fix(THD *thd, Item **e1, Item *e2) static void add_not_null_conds(JOIN *join) { + JOIN_TAB *tab; DBUG_ENTER("add_not_null_conds"); - for (JOIN_TAB *tab= first_linear_tab(join, WITHOUT_CONST_TABLES); + for (tab= first_linear_tab(join, WITH_BUSH_ROOTS, WITHOUT_CONST_TABLES); tab; tab= next_linear_tab(join, tab, WITH_BUSH_ROOTS)) { @@ -9257,7 +9272,7 @@ make_outerjoin_info(JOIN *join) tab->table->pos_in_table_list being set. */ JOIN_TAB *tab; - for (tab= first_linear_tab(join, WITHOUT_CONST_TABLES); + for (tab= first_linear_tab(join, WITH_BUSH_ROOTS, WITHOUT_CONST_TABLES); tab; tab= next_linear_tab(join, tab, WITH_BUSH_ROOTS)) { @@ -9269,7 +9284,7 @@ make_outerjoin_info(JOIN *join) } } - for (JOIN_TAB *tab= first_linear_tab(join, WITHOUT_CONST_TABLES); tab; + for (JOIN_TAB *tab= first_linear_tab(join, WITH_BUSH_ROOTS, WITHOUT_CONST_TABLES); tab; tab= next_linear_tab(join, tab, WITH_BUSH_ROOTS)) { TABLE *table= tab->table; @@ -9901,6 +9916,25 @@ uint get_next_field_for_derived_key(uchar *arg) } +static +uint get_next_field_for_derived_key_simple(uchar *arg) +{ + KEYUSE *keyuse= *(KEYUSE **) arg; + if (!keyuse) + return (uint) (-1); + TABLE *table= keyuse->table; + uint key= keyuse->key; + uint fldno= keyuse->keypart; + for ( ; + keyuse->table == table && keyuse->key == key && keyuse->keypart == fldno; + keyuse++) + ; + if (keyuse->key != key) + keyuse= 0; + *((KEYUSE **) arg)= keyuse; + return fldno; +} + static bool generate_derived_keys_for_table(KEYUSE *keyuse, uint count, uint keys) { @@ -9931,12 +9965,28 @@ bool generate_derived_keys_for_table(KEYUSE *keyuse, uint count, uint keys) } else { - if (table->add_tmp_key(table->s->keys, parts, - get_next_field_for_derived_key, - (uchar *) &first_keyuse, - FALSE)) - return TRUE; - table->reginfo.join_tab->keys.set_bit(table->s->keys); + KEYUSE *save_first_keyuse= first_keyuse; + if (table->check_tmp_key(table->s->keys, parts, + get_next_field_for_derived_key_simple, + (uchar *) &first_keyuse)) + + { + first_keyuse= save_first_keyuse; + if (table->add_tmp_key(table->s->keys, parts, + get_next_field_for_derived_key, + (uchar *) &first_keyuse, + FALSE)) + return TRUE; + table->reginfo.join_tab->keys.set_bit(table->s->keys); + } + else + { + /* Mark keyuses for this key to be excluded */ + for (KEYUSE *curr=save_first_keyuse; curr < first_keyuse; curr++) + { + curr->key= MAX_KEY; + } + } first_keyuse= keyuse; key_count++; parts= 0; @@ -10023,7 +10073,7 @@ bool generate_derived_keys(DYNAMIC_ARRAY *keyuse_array) void JOIN::drop_unused_derived_keys() { JOIN_TAB *tab; - for (tab= first_linear_tab(this, WITHOUT_CONST_TABLES); + for (tab= first_linear_tab(this, WITH_BUSH_ROOTS, WITHOUT_CONST_TABLES); tab; tab= next_linear_tab(this, tab, WITH_BUSH_ROOTS)) { @@ -10711,7 +10761,7 @@ void check_join_cache_usage_for_tables(JOIN *join, ulonglong options, JOIN_TAB *tab; JOIN_TAB *prev_tab; - for (tab= first_linear_tab(join, WITHOUT_CONST_TABLES); + for (tab= first_linear_tab(join, WITH_BUSH_ROOTS, WITHOUT_CONST_TABLES); tab; tab= next_linear_tab(join, tab, WITH_BUSH_ROOTS)) { @@ -10719,7 +10769,7 @@ void check_join_cache_usage_for_tables(JOIN *join, ulonglong options, } uint idx= join->const_tables; - for (tab= first_linear_tab(join, WITHOUT_CONST_TABLES); + for (tab= first_linear_tab(join, WITH_BUSH_ROOTS, WITHOUT_CONST_TABLES); tab; tab= next_linear_tab(join, tab, WITH_BUSH_ROOTS)) { @@ -10893,7 +10943,8 @@ make_join_readinfo(JOIN *join, ulonglong options, uint no_jbuf_after) tab->partial_join_cardinality= 1; JOIN_TAB *prev_tab= NULL; - for (tab= first_linear_tab(join, WITHOUT_CONST_TABLES), i= join->const_tables; + i= join->const_tables; + for (tab= first_linear_tab(join, WITH_BUSH_ROOTS, WITHOUT_CONST_TABLES); tab; prev_tab=tab, tab= next_linear_tab(join, tab, WITH_BUSH_ROOTS)) { @@ -10918,7 +10969,7 @@ make_join_readinfo(JOIN *join, ulonglong options, uint no_jbuf_after) check_join_cache_usage_for_tables(join, options, no_jbuf_after); JOIN_TAB *first_tab; - for (tab= first_tab= first_linear_tab(join, WITHOUT_CONST_TABLES); + for (tab= first_tab= first_linear_tab(join, WITH_BUSH_ROOTS, WITHOUT_CONST_TABLES); tab; tab= next_linear_tab(join, tab, WITH_BUSH_ROOTS)) { @@ -11612,7 +11663,8 @@ void JOIN::cleanup(bool full) } if (full) { - JOIN_TAB *sort_tab= first_linear_tab(this, WITHOUT_CONST_TABLES); + JOIN_TAB *sort_tab= first_linear_tab(this, WITH_BUSH_ROOTS, + WITHOUT_CONST_TABLES); if (pre_sort_join_tab) { if (sort_tab && sort_tab->select == pre_sort_join_tab->select) @@ -11659,7 +11711,7 @@ void JOIN::cleanup(bool full) } else { - for (tab= first_linear_tab(this, WITH_CONST_TABLES); tab; + for (tab= first_linear_tab(this, WITH_BUSH_ROOTS, WITH_CONST_TABLES); tab; tab= next_linear_tab(this, tab, WITH_BUSH_ROOTS)) { if (tab->table) @@ -11821,7 +11873,9 @@ only_eq_ref_tables(JOIN *join,ORDER *order,table_map tables) static void update_depend_map(JOIN *join) { - for (JOIN_TAB *join_tab= first_linear_tab(join, WITH_CONST_TABLES); join_tab; + JOIN_TAB *join_tab; + for (join_tab= first_linear_tab(join, WITH_BUSH_ROOTS, WITH_CONST_TABLES); + join_tab; join_tab= next_linear_tab(join, join_tab, WITH_BUSH_ROOTS)) { TABLE_REF *ref= &join_tab->ref; @@ -21121,7 +21175,7 @@ cp_buffer_from_ref(THD *thd, TABLE *table, TABLE_REF *ref) ref_pointer_array and all_fields are updated. - @param[in] thd Pointer to current thread structure + @param[in] thd Pointer to current thread structure @param[in,out] ref_pointer_array All select, group and order by fields @param[in] tables List of tables to search in (usually FROM clause) @@ -21163,11 +21217,11 @@ find_order_in_list(THD *thd, Item **ref_pointer_array, TABLE_LIST *tables, order_item->full_name(), thd->where); return TRUE; } - order->item= ref_pointer_array + count - 1; + thd->change_item_tree((Item**)&order->item, (Item*)(ref_pointer_array + count - 1)); order->in_field_list= 1; order->counter= count; order->counter_used= 1; - return FALSE; + return FALSE; } /* Lookup the current GROUP/ORDER field in the SELECT clause. */ select_item= find_item_in_list(order_item, fields, &counter, @@ -21235,7 +21289,8 @@ find_order_in_list(THD *thd, Item **ref_pointer_array, TABLE_LIST *tables, warning so the user knows that the field from the FROM clause overshadows the column reference from the SELECT list. */ - push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, ER_NON_UNIQ_ERROR, + push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, + ER_NON_UNIQ_ERROR, ER(ER_NON_UNIQ_ERROR), ((Item_ident*) order_item)->field_name, current_thd->where); @@ -23005,13 +23060,11 @@ int print_explain_message_line(select_result_sink *result, ha_rows *rows, const char *message) { - const CHARSET_INFO *cs= system_charset_info; Item *item_null= new Item_null(); List<Item> item_list; item_list.push_back(new Item_int((int32) select_number)); - item_list.push_back(new Item_string(select_type, - strlen(select_type), cs)); + item_list.push_back(new Item_string_sys(select_type)); /* `table` */ item_list.push_back(item_null); @@ -23046,7 +23099,7 @@ int print_explain_message_line(select_result_sink *result, /* `Extra` */ if (message) - item_list.push_back(new Item_string(message,strlen(message),cs)); + item_list.push_back(new Item_string_sys(message)); else item_list.push_back(item_null); @@ -23107,45 +23160,39 @@ int print_explain_row(select_result_sink *result, double r_filtered, const char *extra) { - const CHARSET_INFO *cs= system_charset_info; Item *item_null= new Item_null(); List<Item> item_list; Item *item; item_list.push_back(new Item_int((int32) select_number)); - item_list.push_back(new Item_string(select_type, - strlen(select_type), cs)); - item_list.push_back(new Item_string(table_name, - strlen(table_name), cs)); + item_list.push_back(new Item_string_sys(select_type)); + item_list.push_back(new Item_string_sys(table_name)); if (options & DESCRIBE_PARTITIONS) { if (partitions) { - item_list.push_back(new Item_string(partitions, - strlen(partitions), cs)); + item_list.push_back(new Item_string_sys(partitions)); } else item_list.push_back(item_null); } const char *jtype_str= join_type_str[jtype]; - item_list.push_back(new Item_string(jtype_str, - strlen(jtype_str), cs)); + item_list.push_back(new Item_string_sys(jtype_str)); - item= possible_keys? new Item_string(possible_keys, strlen(possible_keys), - cs) : item_null; + item= possible_keys? new Item_string_sys(possible_keys) : item_null; item_list.push_back(item); /* 'index */ - item= index ? new Item_string(index, strlen(index), cs) : item_null; + item= index ? new Item_string_sys(index) : item_null; item_list.push_back(item); /* 'key_len */ - item= key_len ? new Item_string(key_len, strlen(key_len), cs) : item_null; + item= key_len ? new Item_string_sys(key_len) : item_null; item_list.push_back(item); /* 'ref' */ - item= ref ? new Item_string(ref, strlen(ref), cs) : item_null; + item= ref ? new Item_string_sys(ref) : item_null; item_list.push_back(item); /* 'rows' */ @@ -23180,7 +23227,7 @@ int print_explain_row(select_result_sink *result, /* 'Extra' */ if (extra) - item_list.push_back(new Item_string(extra, strlen(extra), cs)); + item_list.push_back(new Item_string_sys(extra)); else item_list.push_back(item_null); @@ -23193,7 +23240,6 @@ int print_explain_row(select_result_sink *result, int print_fake_select_lex_join(select_result_sink *result, bool on_the_fly, SELECT_LEX *select_lex, uint8 explain_flags) { - const CHARSET_INFO *cs= system_charset_info; Item *item_null= new Item_null(); List<Item> item_list; if (on_the_fly) @@ -23210,9 +23256,7 @@ int print_fake_select_lex_join(select_result_sink *result, bool on_the_fly, /* id */ item_list.push_back(new Item_null); /* select_type */ - item_list.push_back(new Item_string(select_lex->type, - strlen(select_lex->type), - cs)); + item_list.push_back(new Item_string_sys(select_lex->type)); /* table */ { SELECT_LEX *sl= select_lex->master_unit()->first_select(); @@ -23234,15 +23278,14 @@ int print_fake_select_lex_join(select_result_sink *result, bool on_the_fly, len+= lastop; table_name_buffer[len - 1]= '>'; // change ',' to '>' } - item_list.push_back(new Item_string(table_name_buffer, len, cs)); + item_list.push_back(new Item_string_sys(table_name_buffer, len)); } /* partitions */ if (explain_flags & DESCRIBE_PARTITIONS) item_list.push_back(item_null); /* type */ - item_list.push_back(new Item_string(join_type_str[JT_ALL], - strlen(join_type_str[JT_ALL]), - cs)); + item_list.push_back(new Item_string_sys(join_type_str[JT_ALL])); + /* possible_keys */ item_list.push_back(item_null); /* key*/ @@ -23258,10 +23301,9 @@ int print_fake_select_lex_join(select_result_sink *result, bool on_the_fly, item_list.push_back(item_null); /* extra */ if (select_lex->master_unit()->global_parameters->order_list.first) - item_list.push_back(new Item_string("Using filesort", - 14, cs)); + item_list.push_back(new Item_string_sys("Using filesort", 14)); else - item_list.push_back(new Item_string("", 0, cs)); + item_list.push_back(new Item_string_sys("", 0)); if (result->send_data(item_list)) return 1; diff --git a/sql/sql_select.h b/sql/sql_select.h index 63fd6a6d99f..ee953a351f9 100644 --- a/sql/sql_select.h +++ b/sql/sql_select.h @@ -1499,7 +1499,9 @@ private: enum enum_with_bush_roots { WITH_BUSH_ROOTS, WITHOUT_BUSH_ROOTS}; enum enum_with_const_tables { WITH_CONST_TABLES, WITHOUT_CONST_TABLES}; -JOIN_TAB *first_linear_tab(JOIN *join, enum enum_with_const_tables const_tbls); +JOIN_TAB *first_linear_tab(JOIN *join, + enum enum_with_bush_roots include_bush_roots, + enum enum_with_const_tables const_tbls); JOIN_TAB *next_linear_tab(JOIN* join, JOIN_TAB* tab, enum enum_with_bush_roots include_bush_roots); diff --git a/sql/sql_show.cc b/sql/sql_show.cc index fcca91c456b..d1c88e35b7a 100644 --- a/sql/sql_show.cc +++ b/sql/sql_show.cc @@ -116,6 +116,8 @@ static void get_cs_converted_string_value(THD *thd, bool use_hex); #endif +static int show_create_view(THD *thd, TABLE_LIST *table, String *buff); + static void append_algorithm(TABLE_LIST *table, String *buff); bool get_lookup_field_values(THD *, COND *, TABLE_LIST *, LOOKUP_FIELD_VALUES *); @@ -1025,9 +1027,8 @@ mysqld_show_create(THD *thd, TABLE_LIST *table_list) buffer.set_charset(table_list->view_creation_ctx->get_client_cs()); if ((table_list->view ? - view_store_create_info(thd, table_list, &buffer) : - store_create_info(thd, table_list, &buffer, NULL, - FALSE /* show_database */, FALSE))) + show_create_view(thd, table_list, &buffer) : + show_create_table(thd, table_list, &buffer, NULL, WITHOUT_DB_NAME))) goto exit; if (table_list->view) @@ -1283,9 +1284,22 @@ append_identifier(THD *thd, String *packet, const char *name, uint length) it's a keyword */ + /* + Special code for swe7. It encodes the letter "E WITH ACUTE" on + the position 0x60, where backtick normally resides. + In swe7 we cannot append 0x60 using system_charset_info, + because it cannot be converted to swe7 and will be replaced to + question mark '?'. Use &my_charset_bin to avoid this. + It will prevent conversion and will append the backtick as is. + */ + CHARSET_INFO *quote_charset= q == 0x60 && + (packet->charset()->state & MY_CS_NONASCII) && + packet->charset()->mbmaxlen == 1 ? + &my_charset_bin : system_charset_info; + (void) packet->reserve(length*2 + 2); quote_char= (char) q; - if (packet->append("e_char, 1, system_charset_info)) + if (packet->append("e_char, 1, quote_charset)) return true; for (name_end= name+length ; name < name_end ; name+= length) @@ -1302,12 +1316,12 @@ append_identifier(THD *thd, String *packet, const char *name, uint length) if (!length) length= 1; if (length == 1 && chr == (uchar) quote_char && - packet->append("e_char, 1, system_charset_info)) + packet->append("e_char, 1, quote_charset)) return true; if (packet->append(name, length, system_charset_info)) return true; } - return packet->append("e_char, 1, system_charset_info); + return packet->append("e_char, 1, quote_charset); } @@ -1481,13 +1495,34 @@ static bool get_field_default_value(THD *thd, Field *field, String *def_value, @param thd thread handler @param packet string to append @param opt list of options + @param check_options only print known options + @param rules list of known options */ static void append_create_options(THD *thd, String *packet, - engine_option_value *opt) + engine_option_value *opt, + bool check_options, + ha_create_table_option *rules) { + bool in_comment= false; for(; opt; opt= opt->next) { + if (check_options) + { + if (is_engine_option_known(opt, rules)) + { + if (in_comment) + packet->append(STRING_WITH_LEN(" */")); + in_comment= false; + } + else + { + if (!in_comment) + packet->append(STRING_WITH_LEN(" /*")); + in_comment= true; + } + } + DBUG_ASSERT(opt->value.str); packet->append(' '); append_identifier(thd, packet, opt->name.str, opt->name.length); @@ -1497,13 +1532,15 @@ static void append_create_options(THD *thd, String *packet, else packet->append(opt->value.str, opt->value.length); } + if (in_comment) + packet->append(STRING_WITH_LEN(" */")); } /* Build a CREATE TABLE statement for a table. SYNOPSIS - store_create_info() + show_create_table() thd The thread table_list A list containing one table to write statement for. @@ -1513,8 +1550,7 @@ static void append_create_options(THD *thd, String *packet, to tailor the format of the statement. Can be NULL, in which case only SQL_MODE is considered when building the statement. - show_database Add database name to table name - create_or_replace Use CREATE OR REPLACE syntax + with_db_name Add database name to table name NOTE Currently always return 0, but might return error code in the @@ -1524,9 +1560,9 @@ static void append_create_options(THD *thd, String *packet, 0 OK */ -int store_create_info(THD *thd, TABLE_LIST *table_list, String *packet, - HA_CREATE_INFO *create_info_arg, bool show_database, - bool create_or_replace) +int show_create_table(THD *thd, TABLE_LIST *table_list, String *packet, + HA_CREATE_INFO *create_info_arg, + enum_with_db_name with_db_name) { List<Item> field_list; char tmp[MAX_FIELD_WIDTH], *for_str, buff[128], def_value_buf[MAX_FIELD_WIDTH]; @@ -1540,27 +1576,35 @@ int store_create_info(THD *thd, TABLE_LIST *table_list, String *packet, handler *file= table->file; TABLE_SHARE *share= table->s; HA_CREATE_INFO create_info; -#ifdef WITH_PARTITION_STORAGE_ENGINE - bool show_table_options= FALSE; -#endif /* WITH_PARTITION_STORAGE_ENGINE */ - bool foreign_db_mode= (thd->variables.sql_mode & (MODE_POSTGRESQL | - MODE_ORACLE | - MODE_MSSQL | - MODE_DB2 | - MODE_MAXDB | - MODE_ANSI)) != 0; - bool limited_mysql_mode= (thd->variables.sql_mode & (MODE_NO_FIELD_OPTIONS | - MODE_MYSQL323 | - MODE_MYSQL40)) != 0; + sql_mode_t sql_mode= thd->variables.sql_mode; + bool foreign_db_mode= sql_mode & (MODE_POSTGRESQL | MODE_ORACLE | + MODE_MSSQL | MODE_DB2 | + MODE_MAXDB | MODE_ANSI); + bool limited_mysql_mode= sql_mode & (MODE_NO_FIELD_OPTIONS | MODE_MYSQL323 | + MODE_MYSQL40); + bool show_table_options= !(sql_mode & MODE_NO_TABLE_OPTIONS) && + !foreign_db_mode; + bool check_options= !(sql_mode & MODE_IGNORE_BAD_TABLE_OPTIONS) && + !create_info_arg; + handlerton *hton; my_bitmap_map *old_map; int error= 0; - DBUG_ENTER("store_create_info"); + DBUG_ENTER("show_create_table"); DBUG_PRINT("enter",("table: %s", table->s->table_name.str)); +#ifdef WITH_PARTITION_STORAGE_ENGINE + if (table->part_info) + hton= table->part_info->default_engine_type; + else +#endif + hton= file->ht; + restore_record(table, s->default_values); // Get empty record packet->append(STRING_WITH_LEN("CREATE ")); - if (create_or_replace) + if (create_info_arg && + (create_info_arg->org_options & HA_LEX_CREATE_REPLACE || + create_info_arg->table_was_deleted)) packet->append(STRING_WITH_LEN("OR REPLACE ")); if (share->tmp_table) packet->append(STRING_WITH_LEN("TEMPORARY ")); @@ -1587,7 +1631,7 @@ int store_create_info(THD *thd, TABLE_LIST *table_list, String *packet, avoid having to update gazillions of tests and result files, but it also saves a few bytes of the binary log. */ - if (show_database) + if (with_db_name == WITH_DB_NAME) { const LEX_STRING *const db= table_list->schema_table ? &INFORMATION_SCHEMA_NAME : &table->s->db; @@ -1626,8 +1670,7 @@ int store_create_info(THD *thd, TABLE_LIST *table_list, String *packet, field->sql_type(type); packet->append(type.ptr(), type.length(), system_charset_info); - if (field->has_charset() && - !(thd->variables.sql_mode & (MODE_MYSQL323 | MODE_MYSQL40))) + if (field->has_charset() && !(sql_mode & (MODE_MYSQL323 | MODE_MYSQL40))) { if (field->charset() != share->table_charset) { @@ -1684,7 +1727,7 @@ int store_create_info(THD *thd, TABLE_LIST *table_list, String *packet, if (field->unireg_check == Field::NEXT_NUMBER && - !(thd->variables.sql_mode & MODE_NO_FIELD_OPTIONS)) + !(sql_mode & MODE_NO_FIELD_OPTIONS)) packet->append(STRING_WITH_LEN(" AUTO_INCREMENT")); if (field->comment.length) @@ -1692,7 +1735,8 @@ int store_create_info(THD *thd, TABLE_LIST *table_list, String *packet, packet->append(STRING_WITH_LEN(" COMMENT ")); append_unescaped(packet, field->comment.str, field->comment.length); } - append_create_options(thd, packet, field->option_list); + append_create_options(thd, packet, field->option_list, check_options, + hton->field_options); } key_info= table->key_info; @@ -1759,7 +1803,8 @@ int store_create_info(THD *thd, TABLE_LIST *table_list, String *packet, append_identifier(thd, packet, parser_name->str, parser_name->length); packet->append(STRING_WITH_LEN(" */ ")); } - append_create_options(thd, packet, key_info->option_list); + append_create_options(thd, packet, key_info->option_list, check_options, + hton->index_options); } /* @@ -1774,12 +1819,8 @@ int store_create_info(THD *thd, TABLE_LIST *table_list, String *packet, } packet->append(STRING_WITH_LEN("\n)")); - if (!(thd->variables.sql_mode & MODE_NO_TABLE_OPTIONS) && !foreign_db_mode) + if (show_table_options) { -#ifdef WITH_PARTITION_STORAGE_ENGINE - show_table_options= TRUE; -#endif /* WITH_PARTITION_STORAGE_ENGINE */ - /* IF check_create_info THEN add ENGINE only if it was used when creating the table @@ -1787,19 +1828,11 @@ int store_create_info(THD *thd, TABLE_LIST *table_list, String *packet, if (!create_info_arg || (create_info_arg->used_fields & HA_CREATE_USED_ENGINE)) { - if (thd->variables.sql_mode & (MODE_MYSQL323 | MODE_MYSQL40)) + if (sql_mode & (MODE_MYSQL323 | MODE_MYSQL40)) packet->append(STRING_WITH_LEN(" TYPE=")); else packet->append(STRING_WITH_LEN(" ENGINE=")); -#ifdef WITH_PARTITION_STORAGE_ENGINE - if (table->part_info) - packet->append(ha_resolve_storage_engine_name( - table->part_info->default_engine_type)); - else - packet->append(file->table_type()); -#else - packet->append(file->table_type()); -#endif + packet->append(hton_name(hton)); } /* @@ -1821,9 +1854,7 @@ int store_create_info(THD *thd, TABLE_LIST *table_list, String *packet, packet->append(buff, (uint) (end - buff)); } - if (share->table_charset && - !(thd->variables.sql_mode & MODE_MYSQL323) && - !(thd->variables.sql_mode & MODE_MYSQL40)) + if (share->table_charset && !(sql_mode & (MODE_MYSQL323 | MODE_MYSQL40))) { /* IF check_create_info @@ -1924,7 +1955,8 @@ int store_create_info(THD *thd, TABLE_LIST *table_list, String *packet, packet->append(STRING_WITH_LEN(" CONNECTION=")); append_unescaped(packet, share->connect_string.str, share->connect_string.length); } - append_create_options(thd, packet, share->option_list); + append_create_options(thd, packet, share->option_list, check_options, + hton->table_options); append_directory(thd, packet, "DATA", create_info.data_file_name); append_directory(thd, packet, "INDEX", create_info.index_file_name); } @@ -2076,8 +2108,7 @@ void append_definer(THD *thd, String *buffer, const LEX_STRING *definer_user, } -int -view_store_create_info(THD *thd, TABLE_LIST *table, String *buff) +static int show_create_view(THD *thd, TABLE_LIST *table, String *buff) { my_bool compact_view_name= TRUE; my_bool foreign_db_mode= (thd->variables.sql_mode & (MODE_POSTGRESQL | @@ -2222,77 +2253,77 @@ void mysqld_list_processes(THD *thd,const char *user, bool verbose) Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF)) DBUG_VOID_RETURN; + if (thd->killed) + DBUG_VOID_RETURN; + mysql_mutex_lock(&LOCK_thread_count); // For unlink from list - if (!thd->killed) + I_List_iterator<THD> it(threads); + THD *tmp; + while ((tmp=it++)) { - I_List_iterator<THD> it(threads); - THD *tmp; - while ((tmp=it++)) + Security_context *tmp_sctx= tmp->security_ctx; + struct st_my_thread_var *mysys_var; + if ((tmp->vio_ok() || tmp->system_thread) && + (!user || (tmp_sctx->user && !strcmp(tmp_sctx->user, user)))) { - Security_context *tmp_sctx= tmp->security_ctx; - struct st_my_thread_var *mysys_var; - if ((tmp->vio_ok() || tmp->system_thread) && - (!user || (tmp_sctx->user && !strcmp(tmp_sctx->user, user)))) + thread_info *thd_info= new thread_info; + + thd_info->thread_id=tmp->thread_id; + thd_info->user= thd->strdup(tmp_sctx->user ? tmp_sctx->user : + (tmp->system_thread ? + "system user" : "unauthenticated user")); + if (tmp->peer_port && (tmp_sctx->host || tmp_sctx->ip) && + thd->security_ctx->host_or_ip[0]) { - thread_info *thd_info= new thread_info; - - thd_info->thread_id=tmp->thread_id; - thd_info->user= thd->strdup(tmp_sctx->user ? tmp_sctx->user : - (tmp->system_thread ? - "system user" : "unauthenticated user")); - if (tmp->peer_port && (tmp_sctx->host || tmp_sctx->ip) && - thd->security_ctx->host_or_ip[0]) - { - if ((thd_info->host= (char*) thd->alloc(LIST_PROCESS_HOST_LEN+1))) - my_snprintf((char *) thd_info->host, LIST_PROCESS_HOST_LEN, - "%s:%u", tmp_sctx->host_or_ip, tmp->peer_port); - } - else - thd_info->host= thd->strdup(tmp_sctx->host_or_ip[0] ? - tmp_sctx->host_or_ip : - tmp_sctx->host ? tmp_sctx->host : ""); - thd_info->command=(int) tmp->get_command(); - mysql_mutex_lock(&tmp->LOCK_thd_data); - if ((thd_info->db= tmp->db)) // Safe test - thd_info->db= thd->strdup(thd_info->db); - if ((mysys_var= tmp->mysys_var)) - mysql_mutex_lock(&mysys_var->mutex); - thd_info->proc_info= (char*) (tmp->killed >= KILL_QUERY ? - "Killed" : 0); - thd_info->state_info= thread_state_info(tmp); - if (mysys_var) - mysql_mutex_unlock(&mysys_var->mutex); - - /* Lock THD mutex that protects its data when looking at it. */ - if (tmp->query()) - { - uint length= MY_MIN(max_query_length, tmp->query_length()); - char *q= thd->strmake(tmp->query(),length); - /* Safety: in case strmake failed, we set length to 0. */ - thd_info->query_string= - CSET_STRING(q, q ? length : 0, tmp->query_charset()); - } + if ((thd_info->host= (char*) thd->alloc(LIST_PROCESS_HOST_LEN+1))) + my_snprintf((char *) thd_info->host, LIST_PROCESS_HOST_LEN, + "%s:%u", tmp_sctx->host_or_ip, tmp->peer_port); + } + else + thd_info->host= thd->strdup(tmp_sctx->host_or_ip[0] ? + tmp_sctx->host_or_ip : + tmp_sctx->host ? tmp_sctx->host : ""); + thd_info->command=(int) tmp->get_command(); + mysql_mutex_lock(&tmp->LOCK_thd_data); + if ((thd_info->db= tmp->db)) // Safe test + thd_info->db= thd->strdup(thd_info->db); + if ((mysys_var= tmp->mysys_var)) + mysql_mutex_lock(&mysys_var->mutex); + thd_info->proc_info= (char*) (tmp->killed >= KILL_QUERY ? + "Killed" : 0); + thd_info->state_info= thread_state_info(tmp); + if (mysys_var) + mysql_mutex_unlock(&mysys_var->mutex); - /* - Progress report. We need to do this under a lock to ensure that all - is from the same stage. - */ - if (tmp->progress.max_counter) - { - uint max_stage= MY_MAX(tmp->progress.max_stage, 1); - thd_info->progress= (((tmp->progress.stage / (double) max_stage) + - ((tmp->progress.counter / - (double) tmp->progress.max_counter) / - (double) max_stage)) * - 100.0); - set_if_smaller(thd_info->progress, 100); - } - else - thd_info->progress= 0.0; - thd_info->start_time= tmp->start_time; - mysql_mutex_unlock(&tmp->LOCK_thd_data); - thread_infos.append(thd_info); + /* Lock THD mutex that protects its data when looking at it. */ + if (tmp->query()) + { + uint length= MY_MIN(max_query_length, tmp->query_length()); + char *q= thd->strmake(tmp->query(),length); + /* Safety: in case strmake failed, we set length to 0. */ + thd_info->query_string= + CSET_STRING(q, q ? length : 0, tmp->query_charset()); } + + /* + Progress report. We need to do this under a lock to ensure that all + is from the same stage. + */ + if (tmp->progress.max_counter) + { + uint max_stage= MY_MAX(tmp->progress.max_stage, 1); + thd_info->progress= (((tmp->progress.stage / (double) max_stage) + + ((tmp->progress.counter / + (double) tmp->progress.max_counter) / + (double) max_stage)) * + 100.0); + set_if_smaller(thd_info->progress, 100); + } + else + thd_info->progress= 0.0; + thd_info->start_time= tmp->start_time; + mysql_mutex_unlock(&tmp->LOCK_thd_data); + thread_infos.append(thd_info); } } mysql_mutex_unlock(&LOCK_thread_count); @@ -2778,7 +2809,7 @@ int add_status_vars(SHOW_VAR *list) { int res= 0; if (status_vars_inited) - mysql_mutex_lock(&LOCK_status); + mysql_mutex_lock(&LOCK_show_status); if (!all_status_vars.buffer && // array is not allocated yet - do it now my_init_dynamic_array(&all_status_vars, sizeof(SHOW_VAR), 200, 20, MYF(0))) { @@ -2793,7 +2824,7 @@ int add_status_vars(SHOW_VAR *list) sort_dynamic(&all_status_vars, show_var_cmp); err: if (status_vars_inited) - mysql_mutex_unlock(&LOCK_status); + mysql_mutex_unlock(&LOCK_show_status); return res; } @@ -2855,7 +2886,7 @@ void remove_status_vars(SHOW_VAR *list) { if (status_vars_inited) { - mysql_mutex_lock(&LOCK_status); + mysql_mutex_lock(&LOCK_show_status); SHOW_VAR *all= dynamic_element(&all_status_vars, 0, SHOW_VAR *); for (; list->name; list++) @@ -2876,7 +2907,7 @@ void remove_status_vars(SHOW_VAR *list) } } shrink_var_array(&all_status_vars); - mysql_mutex_unlock(&LOCK_status); + mysql_mutex_unlock(&LOCK_show_status); } else { @@ -4769,7 +4800,7 @@ static int get_schema_tables_record(THD *thd, TABLE_LIST *tables, str.qs_append(STRING_WITH_LEN(" transactional=")); str.qs_append(ha_choice_values[(uint) share->transactional]); } - append_create_options(thd, &str, share->option_list); + append_create_options(thd, &str, share->option_list, false, 0); if (str.length()) table->field[19]->store(str.ptr()+1, str.length()-1, cs); @@ -6903,7 +6934,7 @@ int fill_variables(THD *thd, TABLE_LIST *tables, COND *cond) bool upper_case_names= lex->sql_command != SQLCOM_SHOW_VARIABLES; bool sorted_vars= lex->sql_command == SQLCOM_SHOW_VARIABLES; - if (lex->option_type == OPT_GLOBAL || + if ((sorted_vars && lex->option_type == OPT_GLOBAL) || schema_table_idx == SCH_GLOBAL_VARIABLES) scope= OPT_GLOBAL; @@ -6954,14 +6985,20 @@ int fill_status(THD *thd, TABLE_LIST *tables, COND *cond) if (partial_cond) partial_cond->val_int(); - mysql_mutex_lock(&LOCK_status); if (scope == OPT_GLOBAL) + { + /* We only hold LOCK_status for summary status vars */ + mysql_mutex_lock(&LOCK_status); calc_sum_of_all_status(&tmp); + mysql_mutex_unlock(&LOCK_status); + } + + mysql_mutex_lock(&LOCK_show_status); res= show_status_array(thd, wild, (SHOW_VAR *)all_status_vars.buffer, scope, tmp1, "", tables->table, upper_case_names, partial_cond); - mysql_mutex_unlock(&LOCK_status); + mysql_mutex_unlock(&LOCK_show_status); DBUG_RETURN(res); } @@ -7675,7 +7712,8 @@ bool optimize_schema_tables_reads(JOIN *join) bool result= 0; DBUG_ENTER("optimize_schema_tables_reads"); - for (JOIN_TAB *tab= first_linear_tab(join, WITH_CONST_TABLES); + JOIN_TAB *tab; + for (tab= first_linear_tab(join, WITHOUT_BUSH_ROOTS, WITH_CONST_TABLES); tab; tab= next_linear_tab(join, tab, WITH_BUSH_ROOTS)) { @@ -7743,8 +7781,9 @@ bool get_schema_tables_result(JOIN *join, Warnings_only_error_handler err_handler; thd->push_internal_handler(&err_handler); old_proc_info= thd_proc_info(thd, "Filling schema table"); - - for (JOIN_TAB *tab= first_linear_tab(join, WITH_CONST_TABLES); + + JOIN_TAB *tab; + for (tab= first_linear_tab(join, WITHOUT_BUSH_ROOTS, WITH_CONST_TABLES); tab; tab= next_linear_tab(join, tab, WITH_BUSH_ROOTS)) { diff --git a/sql/sql_show.h b/sql/sql_show.h index a759c8d94f5..bad2b41c52c 100644 --- a/sql/sql_show.h +++ b/sql/sql_show.h @@ -74,10 +74,10 @@ typedef struct system_status_var STATUS_VAR; #define IS_FILES_STATUS 36 #define IS_FILES_EXTRA 37 -int store_create_info(THD *thd, TABLE_LIST *table_list, String *packet, - HA_CREATE_INFO *create_info_arg, bool show_database, - bool create_or_replace); -int view_store_create_info(THD *thd, TABLE_LIST *table, String *buff); +typedef enum { WITHOUT_DB_NAME, WITH_DB_NAME } enum_with_db_name; +int show_create_table(THD *thd, TABLE_LIST *table_list, String *packet, + HA_CREATE_INFO *create_info_arg, + enum_with_db_name with_db_name); int copy_event_to_schema_table(THD *thd, TABLE *sch_table, TABLE *event_table); diff --git a/sql/sql_statistics.cc b/sql/sql_statistics.cc index 67e7a9c304b..9acd3d98322 100644 --- a/sql/sql_statistics.cc +++ b/sql/sql_statistics.cc @@ -184,7 +184,7 @@ private: public: inline void init(THD *thd, Field * table_field); - inline void add(ha_rows rowno); + inline bool add(ha_rows rowno); inline void finish(ha_rows rows); inline void cleanup(); }; @@ -1550,6 +1550,7 @@ public: uint key_parts= table->actual_n_key_parts(key_info); empty= TRUE; prefixes= 0; + LINT_INIT(calc_state); is_single_comp_pk= FALSE; uint pk= table->s->primary_key; @@ -2218,9 +2219,10 @@ void Column_statistics_collected::init(THD *thd, Field *table_field) */ inline -void Column_statistics_collected::add(ha_rows rowno) +bool Column_statistics_collected::add(ha_rows rowno) { + bool err= 0; if (column->is_null()) nulls++; else @@ -2231,8 +2233,9 @@ void Column_statistics_collected::add(ha_rows rowno) if (max_value && column->update_max(max_value, rowno == nulls)) set_not_null(COLUMN_STAT_MAX_VALUE); if (count_distinct) - count_distinct->add(); + err= count_distinct->add(); } + return err; } @@ -2486,8 +2489,11 @@ int collect_statistics_for_table(THD *thd, TABLE *table) table_field= *field_ptr; if (!bitmap_is_set(table->read_set, table_field->field_index)) continue; - table_field->collected_stats->add(rows); + if ((rc= table_field->collected_stats->add(rows))) + break; } + if (rc) + break; rows++; } file->ha_rnd_end(); @@ -2517,7 +2523,7 @@ int collect_statistics_for_table(THD *thd, TABLE *table) else table_field->collected_stats->cleanup(); } -bitmap_clear_all(table->write_set); + bitmap_clear_all(table->write_set); if (!rc) { diff --git a/sql/sql_statistics.h b/sql/sql_statistics.h index 331e3559203..c399951b828 100644 --- a/sql/sql_statistics.h +++ b/sql/sql_statistics.h @@ -147,7 +147,7 @@ private: case SINGLE_PREC_HB: return (uint) (((uint8 *) values)[i]); case DOUBLE_PREC_HB: - return (uint) (((uint16 *) values)[i]); + return (uint) uint2korr(values + i * 2); } return 0; } @@ -214,7 +214,7 @@ public: ((uint8 *) values)[i]= (uint8) (val * prec_factor()); return; case DOUBLE_PREC_HB: - ((uint16 *) values)[i]= (uint16) (val * prec_factor()); + int2store(values + i * 2, val * prec_factor()); return; } } @@ -226,7 +226,7 @@ public: ((uint8 *) values)[i]= ((uint8 *) values)[i-1]; return; case DOUBLE_PREC_HB: - ((uint16 *) values)[i]= ((uint16 *) values)[i-1]; + int2store(values + i * 2, uint2korr(values + i * 2 - 2)); return; } } diff --git a/sql/sql_string.cc b/sql/sql_string.cc index f8348cfb30e..a7bfa6c1455 100644 --- a/sql/sql_string.cc +++ b/sql/sql_string.cc @@ -580,7 +580,7 @@ bool String::append_with_prefill(const char *s,uint32 arg_length, return FALSE; } -uint32 String::numchars() +uint32 String::numchars() const { return str_charset->cset->numchars(str_charset, Ptr, Ptr+str_length); } @@ -1022,8 +1022,15 @@ well_formed_copy_nchars(CHARSET_INFO *to_cs, wc= '?'; } else - break; // Not enough characters - + { + if ((uchar *) from >= from_end) + break; // End of line + // Incomplete byte sequence + if (!*well_formed_error_pos) + *well_formed_error_pos= from; + from++; + wc= '?'; + } outp: if ((cnvres= (*wc_mb)(to_cs, wc, (uchar*) to, to_end)) > 0) to+= cnvres; @@ -1074,7 +1081,7 @@ bool String::append_for_single_quote(const char *st, uint len) return 0; } -void String::print(String *str) +void String::print(String *str) const { str->append_for_single_quote(Ptr, str_length); } diff --git a/sql/sql_string.h b/sql/sql_string.h index 0b7e949392d..f6f0344e2f1 100644 --- a/sql/sql_string.h +++ b/sql/sql_string.h @@ -353,6 +353,10 @@ public: bool set_or_copy_aligned(const char *s, uint32 arg_length, CHARSET_INFO *cs); bool copy(const char*s,uint32 arg_length, CHARSET_INFO *csfrom, CHARSET_INFO *csto, uint *errors); + bool copy(const String *str, CHARSET_INFO *tocs, uint *errors) + { + return copy(str->ptr(), str->length(), str->charset(), tocs, errors); + } void move(String &s) { free(); @@ -409,7 +413,7 @@ public: friend int stringcmp(const String *a,const String *b); friend String *copy_if_not_alloced(String *a,String *b,uint32 arg_length); friend class Field; - uint32 numchars(); + uint32 numchars() const; int charpos(longlong i,uint32 offset=0); int reserve(uint32 space_needed) @@ -500,7 +504,7 @@ public: str_length+= arg_length; return FALSE; } - void print(String *print); + void print(String *print) const; bool append_for_single_quote(const char *st, uint len); bool append_for_single_quote(const String *s) @@ -519,6 +523,12 @@ public: { return (s->alloced && Ptr >= s->Ptr && Ptr < s->Ptr + s->str_length); } + uint well_formed_length() const + { + int dummy_error; + return charset()->cset->well_formed_len(charset(), ptr(), ptr() + length(), + length(), &dummy_error); + } bool is_ascii() const { if (length() == 0) @@ -532,6 +542,15 @@ public: } return TRUE; } + bool bin_eq(const String *other) const + { + return length() == other->length() && + !memcmp(ptr(), other->ptr(), length()); + } + bool eq(const String *other, CHARSET_INFO *cs) const + { + return !sortcmp(this, other, cs); + } }; diff --git a/sql/sql_table.cc b/sql/sql_table.cc index 68c25438f0c..b991215d30a 100644 --- a/sql/sql_table.cc +++ b/sql/sql_table.cc @@ -1,6 +1,6 @@ /* Copyright (c) 2000, 2014, Oracle and/or its affiliates. - Copyright (c) 2010, 2014, Monty Program Ab. + Copyright (c) 2010, 2014, SkySQL Ab. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -75,6 +75,7 @@ static bool prepare_blob_field(THD *thd, Create_field *sql_field); static bool check_engine(THD *, const char *, const char *, HA_CREATE_INFO *); static int mysql_prepare_create_table(THD *, HA_CREATE_INFO *, Alter_info *, uint *, handler *, KEY **, uint *, int); +static uint blob_length_by_type(enum_field_types type); /** @brief Helper function for explain_filename @@ -3791,7 +3792,6 @@ mysql_prepare_create_table(THD *thd, HA_CREATE_INFO *create_info, CHARSET_INFO *ft_key_charset=0; // for FULLTEXT for (uint column_nr=0 ; (column=cols++) ; column_nr++) { - uint length; Key_part_spec *dup_column; it.rewind(); @@ -3869,7 +3869,7 @@ mysql_prepare_create_table(THD *thd, HA_CREATE_INFO *create_info, } if (f_is_geom(sql_field->pack_flag) && sql_field->geom_type == Field::GEOM_POINT) - column->length= 25; + column->length= MAX_LEN_GEOM_POINT_FIELD; if (!column->length) { my_error(ER_BLOB_KEY_WITHOUT_LENGTH, MYF(0), column->field_name.str); @@ -3935,30 +3935,31 @@ mysql_prepare_create_table(THD *thd, HA_CREATE_INFO *create_info, key_part_info->fieldnr= field; key_part_info->offset= (uint16) sql_field->offset; key_part_info->key_type=sql_field->pack_flag; - length= sql_field->key_length; + uint key_part_length= sql_field->key_length; if (column->length) { if (f_is_blob(sql_field->pack_flag)) { - if ((length=column->length) > max_key_length || - length > file->max_key_part_length()) + key_part_length= MY_MIN(column->length, + blob_length_by_type(sql_field->sql_type) + * sql_field->charset->mbmaxlen); + if (key_part_length > max_key_length || + key_part_length > file->max_key_part_length()) { - length=MY_MIN(max_key_length, file->max_key_part_length()); + key_part_length= MY_MIN(max_key_length, file->max_key_part_length()); if (key->type == Key::MULTIPLE) { /* not a critical problem */ - char warn_buff[MYSQL_ERRMSG_SIZE]; - my_snprintf(warn_buff, sizeof(warn_buff), ER(ER_TOO_LONG_KEY), - length); - push_warning(thd, Sql_condition::WARN_LEVEL_WARN, - ER_TOO_LONG_KEY, warn_buff); + push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, + ER_TOO_LONG_KEY, ER(ER_TOO_LONG_KEY), + key_part_length); /* Align key length to multibyte char boundary */ - length-= length % sql_field->charset->mbmaxlen; + key_part_length-= key_part_length % sql_field->charset->mbmaxlen; } else { - my_error(ER_TOO_LONG_KEY,MYF(0),length); + my_error(ER_TOO_LONG_KEY, MYF(0), key_part_length); DBUG_RETURN(TRUE); } } @@ -3966,9 +3967,9 @@ mysql_prepare_create_table(THD *thd, HA_CREATE_INFO *create_info, // Catch invalid use of partial keys else if (!f_is_geom(sql_field->pack_flag) && // is the key partial? - column->length != length && + column->length != key_part_length && // is prefix length bigger than field length? - (column->length > length || + (column->length > key_part_length || // can the field have a partial key? !Field::type_can_have_key_part (sql_field->sql_type) || // a packed field can't be used in a partial key @@ -3977,44 +3978,43 @@ mysql_prepare_create_table(THD *thd, HA_CREATE_INFO *create_info, ((file->ha_table_flags() & HA_NO_PREFIX_CHAR_KEYS) && // and is this a 'unique' key? (key_info->flags & HA_NOSAME)))) - { + { my_message(ER_WRONG_SUB_KEY, ER(ER_WRONG_SUB_KEY), MYF(0)); DBUG_RETURN(TRUE); } else if (!(file->ha_table_flags() & HA_NO_PREFIX_CHAR_KEYS)) - length=column->length; + key_part_length= column->length; } - else if (length == 0 && (sql_field->flags & NOT_NULL_FLAG)) + else if (key_part_length == 0 && (sql_field->flags & NOT_NULL_FLAG)) { my_error(ER_WRONG_KEY_COLUMN, MYF(0), file->table_type(), column->field_name.str); DBUG_RETURN(TRUE); } - if (length > file->max_key_part_length() && key->type != Key::FULLTEXT) + if (key_part_length > file->max_key_part_length() && + key->type != Key::FULLTEXT) { - length= file->max_key_part_length(); + key_part_length= file->max_key_part_length(); if (key->type == Key::MULTIPLE) { /* not a critical problem */ - char warn_buff[MYSQL_ERRMSG_SIZE]; - my_snprintf(warn_buff, sizeof(warn_buff), ER(ER_TOO_LONG_KEY), - length); - push_warning(thd, Sql_condition::WARN_LEVEL_WARN, - ER_TOO_LONG_KEY, warn_buff); + push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, + ER_TOO_LONG_KEY, ER(ER_TOO_LONG_KEY), + key_part_length); /* Align key length to multibyte char boundary */ - length-= length % sql_field->charset->mbmaxlen; + key_part_length-= key_part_length % sql_field->charset->mbmaxlen; } else { - my_error(ER_TOO_LONG_KEY,MYF(0),length); + my_error(ER_TOO_LONG_KEY, MYF(0), key_part_length); DBUG_RETURN(TRUE); } } - key_part_info->length=(uint16) length; + key_part_info->length= (uint16) key_part_length; /* Use packed keys for long strings on the first column */ if (!((*db_options) & HA_OPTION_NO_PACK_KEYS) && !((create_info->table_options & HA_OPTION_NO_PACK_KEYS)) && - (length >= KEY_DEFAULT_PACK_LENGTH && + (key_part_length >= KEY_DEFAULT_PACK_LENGTH && (sql_field->sql_type == MYSQL_TYPE_STRING || sql_field->sql_type == MYSQL_TYPE_VARCHAR || sql_field->pack_flag & FIELDFLAG_BLOB))) @@ -4026,10 +4026,10 @@ mysql_prepare_create_table(THD *thd, HA_CREATE_INFO *create_info, key_info->flags|= HA_PACK_KEY; } /* Check if the key segment is partial, set the key flag accordingly */ - if (length != sql_field->key_length) + if (key_part_length != sql_field->key_length) key_info->flags|= HA_KEY_HAS_PART_KEY_SEG; - key_length+=length; + key_length+= key_part_length; key_part_info++; /* Create the key name based on the first column (if not given) */ @@ -4351,9 +4351,6 @@ handler *mysql_create_frm_image(THD *thd, DBUG_RETURN(NULL); } - if (check_engine(thd, db, table_name, create_info)) - DBUG_RETURN(NULL); - set_table_default_charset(thd, create_info, (char*) db); db_options= create_info->table_options; @@ -4759,6 +4756,9 @@ int create_table_impl(THD *thd, THD_STAGE_INFO(thd, stage_creating_table); + if (check_engine(thd, orig_db, orig_table_name, create_info)) + goto err; + if (create_table_mode == C_ASSISTED_DISCOVERY) { /* check that it's used correctly */ @@ -4950,7 +4950,7 @@ bool mysql_create_table(THD *thd, TABLE_LIST *create_table, const char *db= create_table->db; const char *table_name= create_table->table_name; bool is_trans= FALSE; - bool result= 0; + bool result; int create_table_mode; TABLE_LIST *pos_in_locked_tables= 0; MDL_ticket *mdl_ticket= 0; @@ -4958,8 +4958,16 @@ bool mysql_create_table(THD *thd, TABLE_LIST *create_table, DBUG_ASSERT(create_table == thd->lex->query_tables); + /* Copy temporarily the statement flags to thd for lock_table_names() */ + uint save_thd_create_info_options= thd->lex->create_info.options; + thd->lex->create_info.options|= create_info->options; + /* Open or obtain an exclusive metadata lock on table being created */ - if (open_and_lock_tables(thd, create_table, FALSE, 0)) + result= open_and_lock_tables(thd, create_table, FALSE, 0); + + thd->lex->create_info.options= save_thd_create_info_options; + + if (result) { /* is_error() may be 0 if table existed and we generated a warning */ DBUG_RETURN(thd->is_error()); @@ -5000,7 +5008,10 @@ bool mysql_create_table(THD *thd, TABLE_LIST *create_table, */ thd->locked_tables_list.add_back_last_deleted_lock(pos_in_locked_tables); if (thd->locked_tables_list.reopen_tables(thd)) + { thd->locked_tables_list.unlink_all_closed_tables(thd, NULL, 0); + result= 1; + } else { TABLE *table= pos_in_locked_tables->table; @@ -5260,8 +5271,16 @@ bool mysql_create_like_table(THD* thd, TABLE_LIST* table, Thus by holding both these locks we ensure that our statement is properly isolated from all concurrent operations which matter. */ - if (open_tables(thd, &thd->lex->query_tables, ¬_used, 0)) + + /* Copy temporarily the statement flags to thd for lock_table_names() */ + uint save_thd_create_info_options= thd->lex->create_info.options; + thd->lex->create_info.options|= create_info->options; + res= open_tables(thd, &thd->lex->query_tables, ¬_used, 0); + thd->lex->create_info.options= save_thd_create_info_options; + + if (res) { + /* is_error() may be 0 if table existed and we generated a warning */ res= thd->is_error(); goto err; } @@ -5344,7 +5363,10 @@ bool mysql_create_like_table(THD* thd, TABLE_LIST* table, */ thd->locked_tables_list.add_back_last_deleted_lock(pos_in_locked_tables); if (thd->locked_tables_list.reopen_tables(thd)) + { thd->locked_tables_list.unlink_all_closed_tables(thd, NULL, 0); + res= 1; // We got an error + } else { /* @@ -5419,7 +5441,7 @@ bool mysql_create_like_table(THD* thd, TABLE_LIST* table, table->open_strategy= TABLE_LIST::OPEN_NORMAL; /* - In order for store_create_info() to work we need to open + In order for show_create_table() to work we need to open destination table if it is not already open (i.e. if it has not existed before). We don't need acquire metadata lock in order to do this as we already hold exclusive @@ -5443,13 +5465,9 @@ bool mysql_create_like_table(THD* thd, TABLE_LIST* table, if (!table->view) { int result __attribute__((unused))= - store_create_info(thd, table, &query, - create_info, FALSE /* show_database */, - MY_TEST(create_info->org_options & - HA_LEX_CREATE_REPLACE) || - create_info->table_was_deleted); + show_create_table(thd, table, &query, create_info, WITHOUT_DB_NAME); - DBUG_ASSERT(result == 0); // store_create_info() always return 0 + DBUG_ASSERT(result == 0); // show_create_table() always return 0 do_logging= FALSE; if (write_bin_log(thd, TRUE, query.ptr(), query.length())) { diff --git a/sql/sql_table.h b/sql/sql_table.h index 6a7fddb96ab..c3e903aa505 100644 --- a/sql/sql_table.h +++ b/sql/sql_table.h @@ -117,6 +117,9 @@ enum enum_explain_filename_mode EXPLAIN_PARTITIONS_AS_COMMENT }; +/* Maximum length of GEOM_POINT Field */ +#define MAX_LEN_GEOM_POINT_FIELD 25 + /* depends on errmsg.txt Database `db`, Table `t` ... */ #define EXPLAIN_FILENAME_MAX_EXTRA_LENGTH 63 diff --git a/sql/sql_test.cc b/sql/sql_test.cc index ae16a281277..60e9b2cc54c 100644 --- a/sql/sql_test.cc +++ b/sql/sql_test.cc @@ -468,8 +468,7 @@ static void display_table_locks(void) DYNAMIC_ARRAY saved_table_locks; (void) my_init_dynamic_array(&saved_table_locks,sizeof(TABLE_LOCK_INFO), - tc_records() + 20, 50, - MYF(MY_THREAD_SPECIFIC)); + tc_records() + 20, 50, MYF(0)); mysql_mutex_lock(&THR_LOCK_lock); for (list= thr_lock_thread_list; list; list= list_rest(list)) { @@ -576,7 +575,6 @@ void mysql_print_status() /* Print key cache status */ puts("\nKey caches:"); process_key_caches(print_key_cache_status, 0); - mysql_mutex_lock(&LOCK_status); printf("\nhandler status:\n\ read_key: %10lu\n\ read_next: %10lu\n\ @@ -592,7 +590,6 @@ update: %10lu\n", tmp.ha_write_count, tmp.ha_delete_count, tmp.ha_update_count); - mysql_mutex_unlock(&LOCK_status); printf("\nTable status:\n\ Opened tables: %10lu\n\ Open tables: %10lu\n\ diff --git a/sql/sql_union.cc b/sql/sql_union.cc index 9d068e464f5..fe8bb7a6620 100644 --- a/sql/sql_union.cc +++ b/sql/sql_union.cc @@ -1,4 +1,5 @@ -/* Copyright (c) 2000, 2011, Oracle and/or its affiliates. +/* Copyright (c) 2000, 2014, Oracle and/or its affiliates. + Copyright (c) 2010, 2014, SkySQL Ab. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by diff --git a/sql/sql_update.cc b/sql/sql_update.cc index aa290c91569..fa5b6968795 100644 --- a/sql/sql_update.cc +++ b/sql/sql_update.cc @@ -1446,11 +1446,15 @@ int mysql_multi_update_prepare(THD *thd) another table instance used by this statement which is going to be write-locked (for example, trigger to be invoked might try to update this table). + Last argument routine_modifies_data for read_lock_type_for_table() + is ignored, as prelocking placeholder will never be set here. */ + DBUG_ASSERT(tl->prelocking_placeholder == false); + thr_lock_type lock_type= read_lock_type_for_table(thd, lex, tl, true); if (using_lock_tables) - tl->lock_type= read_lock_type_for_table(thd, lex, tl); + tl->lock_type= lock_type; else - tl->set_lock_type(thd, read_lock_type_for_table(thd, lex, tl)); + tl->set_lock_type(thd, lock_type); tl->updating= 0; } } diff --git a/sql/sql_view.cc b/sql/sql_view.cc index a18193c6eb6..07169f299d7 100644 --- a/sql/sql_view.cc +++ b/sql/sql_view.cc @@ -400,9 +400,7 @@ bool mysql_create_view(THD *thd, TABLE_LIST *views, TABLE_LIST *tables= lex->query_tables; TABLE_LIST *tbl; SELECT_LEX *select_lex= &lex->select_lex; -#ifndef NO_EMBEDDED_ACCESS_CHECKS SELECT_LEX *sl; -#endif SELECT_LEX_UNIT *unit= &lex->unit; bool res= FALSE; DBUG_ENTER("mysql_create_view"); @@ -547,7 +545,8 @@ bool mysql_create_view(THD *thd, TABLE_LIST *views, } /* Check if the auto generated column names are conforming. */ - make_valid_column_names(select_lex->item_list); + for (sl= select_lex; sl; sl= sl->next_select()) + make_valid_column_names(sl->item_list); if (check_duplicate_names(select_lex->item_list, 1)) { @@ -624,7 +623,7 @@ bool mysql_create_view(THD *thd, TABLE_LIST *views, if (!res) tdc_remove_table(thd, TDC_RT_REMOVE_ALL, view->db, view->table_name, false); - if (mysql_bin_log.is_open()) + if (!res && mysql_bin_log.is_open()) { String buff; const LEX_STRING command[3]= diff --git a/sql/sql_yacc.yy b/sql/sql_yacc.yy index e7fcdfbe596..41852e36b9b 100644 --- a/sql/sql_yacc.yy +++ b/sql/sql_yacc.yy @@ -77,7 +77,7 @@ int yylex(void *yylval, void *yythd); ulong val= *(F); \ if (my_yyoverflow((B), (D), &val)) \ { \ - yyerror(current_thd, (char*) (A)); \ + yyerror(thd, (char*) (A)); \ return 2; \ } \ else \ @@ -1606,7 +1606,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize); %type <lex_str> IDENT IDENT_QUOTED TEXT_STRING DECIMAL_NUM FLOAT_NUM NUM LONG_NUM - HEX_NUM HEX_STRING hex_num_or_string + HEX_NUM HEX_STRING LEX_HOSTNAME ULONGLONG_NUM field_ident select_alias ident ident_or_text IDENT_sys TEXT_STRING_sys TEXT_STRING_literal NCHAR_STRING opt_component key_cache_name @@ -1625,7 +1625,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize); wild_and_where %type <string> - text_string opt_gconcat_separator + text_string hex_or_bin_String opt_gconcat_separator %type <num> type type_with_opt_collate int_type real_type order_dir lock_option @@ -6278,7 +6278,8 @@ spatial_type: | GEOMETRYCOLLECTION { $$= Field::GEOM_GEOMETRYCOLLECTION; } | POINT_SYM { - Lex->length= (char*)"25"; + Lex->length= const_cast<char*>(STRINGIFY_ARG + (MAX_LEN_GEOM_POINT_FIELD)); $$= Field::GEOM_POINT; } | MULTIPOINT { $$= Field::GEOM_MULTIPOINT; } @@ -6498,11 +6499,6 @@ now_or_signed_literal: { $$=$1; } ; -hex_num_or_string: - HEX_NUM {} - | HEX_STRING {} - ; - charset: CHAR_SYM SET {} | CHARSET {} @@ -9183,7 +9179,6 @@ simple_expr: } | '{' ident expr '}' { - Item_string *item; $$= NULL; /* If "expr" is reasonably short pure ASCII string literal, @@ -9193,31 +9188,13 @@ simple_expr: SELECT {t'10:20:30'}; SELECT {ts'2001-01-01 10:20:30'}; */ - if ($3->type() == Item::STRING_ITEM && - (item= (Item_string *) $3) && - item->collation.repertoire == MY_REPERTOIRE_ASCII && - item->str_value.length() < MAX_DATE_STRING_REP_LENGTH * 4) - { - enum_field_types type= MYSQL_TYPE_STRING; - LEX_STRING *ls= &$2; - if (ls->length == 1) - { - if (ls->str[0] == 'd') /* {d'2001-01-01'} */ - type= MYSQL_TYPE_DATE; - else if (ls->str[0] == 't') /* {t'10:20:30'} */ - type= MYSQL_TYPE_TIME; - } - else if (ls->length == 2) /* {ts'2001-01-01 10:20:30'} */ - { - if (ls->str[0] == 't' && ls->str[1] == 's') - type= MYSQL_TYPE_DATETIME; - } + if ($3->type() == Item::STRING_ITEM) + { + Item_string *item= (Item_string *) $3; + enum_field_types type= item->odbc_temporal_literal_type(&$2); if (type != MYSQL_TYPE_STRING) { - $$= create_temporal_literal(thd, - item->str_value.ptr(), - item->str_value.length(), - item->str_value.charset(), + $$= create_temporal_literal(thd, item->val_str(NULL), type, false); } } @@ -11136,8 +11113,8 @@ opt_escape: { Lex->escape_used= FALSE; $$= ((thd->variables.sql_mode & MODE_NO_BACKSLASH_ESCAPES) ? - new (thd->mem_root) Item_string("", 0, &my_charset_latin1) : - new (thd->mem_root) Item_string("\\", 1, &my_charset_latin1)); + new (thd->mem_root) Item_string_ascii("", 0) : + new (thd->mem_root) Item_string_ascii("\\", 1)); if ($$ == NULL) MYSQL_YYABORT; } @@ -13253,14 +13230,10 @@ text_literal: } | UNDERSCORE_CHARSET TEXT_STRING { - Item_string *str= new (thd->mem_root) Item_string($2.str, + $$= new (thd->mem_root) Item_string_with_introducer($2.str, $2.length, $1); - if (str == NULL) + if ($$ == NULL) MYSQL_YYABORT; - str->set_repertoire_from_value(); - str->set_cs_specified(TRUE); - - $$= str; } | text_literal TEXT_STRING_literal { @@ -13289,7 +13262,12 @@ text_string: if ($$ == NULL) MYSQL_YYABORT; } - | HEX_NUM + | hex_or_bin_String { $$= $1; } + ; + + +hex_or_bin_String: + HEX_NUM { Item *tmp= new (thd->mem_root) Item_hex_hybrid($1.str, $1.length); if (tmp == NULL) @@ -13396,60 +13374,12 @@ literal: if ($$ == NULL) MYSQL_YYABORT; } - | UNDERSCORE_CHARSET hex_num_or_string - { - Item *tmp= new (thd->mem_root) Item_hex_string($2.str, $2.length); - if (tmp == NULL) - MYSQL_YYABORT; - /* - it is OK only emulate fix_fieds, because we need only - value of constant - */ - tmp->quick_fix_field(); - String *str= tmp->val_str((String*) 0); - - Item_string *item_str; - item_str= new (thd->mem_root) - Item_string(NULL, /* name will be set in select_item */ - str ? str->ptr() : "", - str ? str->length() : 0, - $1); - if (!item_str || - !item_str->check_well_formed_result(&item_str->str_value, TRUE)) - { - MYSQL_YYABORT; - } - - item_str->set_repertoire_from_value(); - item_str->set_cs_specified(TRUE); - - $$= item_str; - } - | UNDERSCORE_CHARSET BIN_NUM + | UNDERSCORE_CHARSET hex_or_bin_String { - Item *tmp= new (thd->mem_root) Item_bin_string($2.str, $2.length); - if (tmp == NULL) + Item_string_with_introducer *item_str; + item_str= new (thd->mem_root) Item_string_with_introducer($2, $1); + if (!item_str || !item_str->check_well_formed_result(true)) MYSQL_YYABORT; - /* - it is OK only emulate fix_fieds, because we need only - value of constant - */ - tmp->quick_fix_field(); - String *str= tmp->val_str((String*) 0); - - Item_string *item_str; - item_str= new (thd->mem_root) - Item_string(NULL, /* name will be set in select_item */ - str ? str->ptr() : "", - str ? str->length() : 0, - $1); - if (!item_str || - !item_str->check_well_formed_result(&item_str->str_value, TRUE)) - { - MYSQL_YYABORT; - } - - item_str->set_cs_specified(TRUE); $$= item_str; } @@ -14873,19 +14803,19 @@ set_expr_or_default: | DEFAULT { $$=0; } | ON { - $$=new (thd->mem_root) Item_string("ON", 2, system_charset_info); + $$=new (thd->mem_root) Item_string_sys("ON", 2); if ($$ == NULL) MYSQL_YYABORT; } | ALL { - $$=new (thd->mem_root) Item_string("ALL", 3, system_charset_info); + $$=new (thd->mem_root) Item_string_sys("ALL", 3); if ($$ == NULL) MYSQL_YYABORT; } | BINARY { - $$=new (thd->mem_root) Item_string("binary", 6, system_charset_info); + $$=new (thd->mem_root) Item_string_sys("binary", 6); if ($$ == NULL) MYSQL_YYABORT; } diff --git a/sql/sys_vars.cc b/sql/sys_vars.cc index c8b589e0fd6..6252e89b199 100644 --- a/sql/sys_vars.cc +++ b/sql/sys_vars.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2002, 2013, Oracle and/or its affiliates. +/* Copyright (c) 2002, 2014, Oracle and/or its affiliates. Copyright (c) 2012, 2014, SkySQL Ab. This program is free software; you can redistribute it and/or modify @@ -1074,6 +1074,17 @@ static Sys_var_keycache Sys_key_cache_age_threshold( BLOCK_SIZE(100), NO_MUTEX_GUARD, NOT_IN_BINLOG, ON_CHECK(0), ON_UPDATE(change_keycache_param)); +static Sys_var_keycache Sys_key_cache_file_hash_size( + "key_cache_file_hash_size", + "Number of hash buckets for open and changed files. If you have a lot of MyISAM " + "files open you should increase this for faster flush of changes. A good " + "value is probably 1/10 of number of possible open MyISAM files.", + KEYCACHE_VAR(changed_blocks_hash_size), + CMD_LINE(REQUIRED_ARG, OPT_KEY_CACHE_CHANGED_BLOCKS_HASH_SIZE), + VALID_RANGE(128, 16384), DEFAULT(512), + BLOCK_SIZE(1), NO_MUTEX_GUARD, NOT_IN_BINLOG, ON_CHECK(0), + ON_UPDATE(resize_keycache)); + static Sys_var_mybool Sys_large_files_support( "large_files_support", "Whether mysqld was compiled with options for large file support", @@ -3231,9 +3242,10 @@ static Sys_var_ulonglong Sys_tmp_table_size( static Sys_var_mybool Sys_timed_mutexes( "timed_mutexes", - "Specify whether to time mutexes (only InnoDB mutexes are currently " - "supported)", - GLOBAL_VAR(timed_mutexes), CMD_LINE(OPT_ARG), DEFAULT(0)); + "Specify whether to time mutexes. Deprecated, has no effect.", + GLOBAL_VAR(timed_mutexes), CMD_LINE(OPT_ARG), DEFAULT(0), + NO_MUTEX_GUARD, NOT_IN_BINLOG, ON_CHECK(NULL), ON_UPDATE(NULL), + DEPRECATED("")); static char *server_version_ptr; static Sys_var_charptr Sys_version( @@ -4241,11 +4253,11 @@ static Sys_var_uint Sys_slave_net_timeout( Return 0 + warning if it doesn't exist */ -uint Sys_var_multi_source_ulong:: -get_master_info_uint_value(THD *thd, ptrdiff_t offset) +ulonglong Sys_var_multi_source_ulonglong:: +get_master_info_ulonglong_value(THD *thd, ptrdiff_t offset) { Master_info *mi; - uint res= 0; // Default value + ulonglong res= 0; // Default value mysql_mutex_unlock(&LOCK_global_system_variables); mysql_mutex_lock(&LOCK_active_mi); mi= master_info_index-> @@ -4254,7 +4266,7 @@ get_master_info_uint_value(THD *thd, ptrdiff_t offset) if (mi) { mysql_mutex_lock(&mi->rli.data_lock); - res= *((uint*) (((uchar*) mi) + master_info_offset)); + res= *((ulonglong*) (((uchar*) mi) + master_info_offset)); mysql_mutex_unlock(&mi->rli.data_lock); } mysql_mutex_unlock(&LOCK_active_mi); @@ -4266,7 +4278,7 @@ get_master_info_uint_value(THD *thd, ptrdiff_t offset) bool update_multi_source_variable(sys_var *self_var, THD *thd, enum_var_type type) { - Sys_var_multi_source_ulong *self= (Sys_var_multi_source_ulong*) self_var; + Sys_var_multi_source_ulonglong *self= (Sys_var_multi_source_ulonglong*) self_var; bool result= true; Master_info *mi; @@ -4292,11 +4304,6 @@ bool update_multi_source_variable(sys_var *self_var, THD *thd, static bool update_slave_skip_counter(sys_var *self, THD *thd, Master_info *mi) { - if (mi->using_gtid != Master_info::USE_GTID_NO) - { - my_error(ER_SLAVE_SKIP_NOT_IN_GTID, MYF(0)); - return true; - } if (mi->rli.slave_running) { my_error(ER_SLAVE_MUST_STOP, MYF(0), mi->connection_name.length, @@ -4308,16 +4315,12 @@ static bool update_slave_skip_counter(sys_var *self, THD *thd, Master_info *mi) return false; } - -static Sys_var_multi_source_ulong -Sys_slave_skip_counter("sql_slave_skip_counter", - "Skip the next N events from the master log", - SESSION_VAR(slave_skip_counter), - NO_CMD_LINE, - my_offsetof(Master_info, rli.slave_skip_counter), - VALID_RANGE(0, UINT_MAX), DEFAULT(0), BLOCK_SIZE(1), - ON_UPDATE(update_slave_skip_counter)); - +static Sys_var_multi_source_ulonglong Sys_slave_skip_counter( + "sql_slave_skip_counter", "Skip the next N events from the master log", + SESSION_VAR(slave_skip_counter), NO_CMD_LINE, + MASTER_INFO_VAR(rli.slave_skip_counter), + VALID_RANGE(0, UINT_MAX), DEFAULT(0), BLOCK_SIZE(1), + ON_UPDATE(update_slave_skip_counter)); static bool update_max_relay_log_size(sys_var *self, THD *thd, Master_info *mi) { @@ -4326,17 +4329,14 @@ static bool update_max_relay_log_size(sys_var *self, THD *thd, Master_info *mi) return false; } -static Sys_var_multi_source_ulong -Sys_max_relay_log_size( "max_relay_log_size", - "relay log will be rotated automatically when the " - "size exceeds this value. If 0 at startup, it's " - "set to max_binlog_size", - SESSION_VAR(max_relay_log_size), - CMD_LINE(REQUIRED_ARG), - my_offsetof(Master_info, rli.max_relay_log_size), - VALID_RANGE(0, 1024L*1024*1024), DEFAULT(0), - BLOCK_SIZE(IO_SIZE), - ON_UPDATE(update_max_relay_log_size)); +static Sys_var_multi_source_ulonglong Sys_max_relay_log_size( + "max_relay_log_size", + "relay log will be rotated automatically when the size exceeds this " + "value. If 0 at startup, it's set to max_binlog_size", + SESSION_VAR(max_relay_log_size), CMD_LINE(REQUIRED_ARG), + MASTER_INFO_VAR(rli.max_relay_log_size), + VALID_RANGE(0, 1024L*1024*1024), DEFAULT(0), BLOCK_SIZE(IO_SIZE), + ON_UPDATE(update_max_relay_log_size)); static Sys_var_charptr Sys_slave_skip_errors( "slave_skip_errors", "Tells the slave thread to continue " diff --git a/sql/sys_vars.h b/sql/sys_vars.h index fa997416cbd..da93b765d0f 100644 --- a/sql/sys_vars.h +++ b/sql/sys_vars.h @@ -1985,7 +1985,8 @@ public: like sql_slave_skip_counter are GLOBAL. */ -class Sys_var_multi_source_ulong; +#define MASTER_INFO_VAR(X) my_offsetof(Master_info, X), sizeof(((Master_info *)0x10)->X) +class Sys_var_multi_source_ulonglong; class Master_info; typedef bool (*on_multi_source_update_function)(sys_var *self, THD *thd, @@ -1994,31 +1995,27 @@ bool update_multi_source_variable(sys_var *self, THD *thd, enum_var_type type); -class Sys_var_multi_source_ulong :public Sys_var_ulong +class Sys_var_multi_source_ulonglong :public Sys_var_ulonglong { ptrdiff_t master_info_offset; on_multi_source_update_function update_multi_source_variable_func; public: - Sys_var_multi_source_ulong(const char *name_arg, + Sys_var_multi_source_ulonglong(const char *name_arg, const char *comment, int flag_args, ptrdiff_t off, size_t size, CMD_LINE getopt, ptrdiff_t master_info_offset_arg, - uint min_val, uint max_val, uint def_val, - uint block_size, + size_t master_info_arg_size, + ulonglong min_val, ulonglong max_val, + ulonglong def_val, uint block_size, on_multi_source_update_function on_update_func) - :Sys_var_ulong(name_arg, comment, flag_args, off, size, - getopt, min_val, max_val, def_val, block_size, - 0, VARIABLE_NOT_IN_BINLOG, 0, update_multi_source_variable), + :Sys_var_ulonglong(name_arg, comment, flag_args, off, size, + getopt, min_val, max_val, def_val, block_size, + 0, VARIABLE_NOT_IN_BINLOG, 0, update_multi_source_variable), master_info_offset(master_info_offset_arg), update_multi_source_variable_func(on_update_func) { - } - bool session_update(THD *thd, set_var *var) - { - session_var(thd, uint)= (uint) (var->save_result.ulonglong_value); - /* Value should be moved to multi_master in on_update_func */ - return false; + SYSVAR_ASSERT(master_info_arg_size == size); } bool global_update(THD *thd, set_var *var) { @@ -2031,9 +2028,9 @@ public: } uchar *session_value_ptr(THD *thd, const LEX_STRING *base) { - uint *tmp, res; - tmp= (uint*) (((uchar*)&(thd->variables)) + offset); - res= get_master_info_uint_value(thd, master_info_offset); + ulonglong *tmp, res; + tmp= (ulonglong*) (((uchar*)&(thd->variables)) + offset); + res= get_master_info_ulonglong_value(thd, master_info_offset); *tmp= res; return (uchar*) tmp; } @@ -2041,7 +2038,7 @@ public: { return session_value_ptr(thd, base); } - uint get_master_info_uint_value(THD *thd, ptrdiff_t offset); + ulonglong get_master_info_ulonglong_value(THD *thd, ptrdiff_t offset); bool update_variable(THD *thd, Master_info *mi) { return update_multi_source_variable_func(this, thd, mi); diff --git a/sql/table.cc b/sql/table.cc index 4f642cadaa2..6ac45445136 100644 --- a/sql/table.cc +++ b/sql/table.cc @@ -1774,13 +1774,25 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write, key_part= keyinfo->key_part; for (i=0 ; i < keyinfo->user_defined_key_parts ;i++) { - uint fieldnr= key_part[i].fieldnr; - if (!fieldnr || - share->field[fieldnr-1]->null_ptr || - share->field[fieldnr-1]->key_length() != - key_part[i].length) + DBUG_ASSERT(key_part[i].fieldnr > 0); + // Table field corresponding to the i'th key part. + Field *table_field= share->field[key_part[i].fieldnr - 1]; + + /* + If the key column is of NOT NULL BLOB type, then it + will definitly have key prefix. And if key part prefix size + is equal to the BLOB column max size, then we can promote + it to primary key. + */ + if (!table_field->real_maybe_null() && + table_field->type() == MYSQL_TYPE_BLOB && + table_field->field_length == key_part[i].length) + continue; + + if (table_field->real_maybe_null() || + table_field->key_length() != key_part[i].length) { - primary_key=MAX_KEY; // Can't be used + primary_key= MAX_KEY; // Can't be used break; } } @@ -4210,7 +4222,8 @@ bool TABLE_LIST::create_field_translation(THD *thd) while ((item= it++)) { - transl[field_count].name= item->name; + DBUG_ASSERT(item->name && item->name[0]); + transl[field_count].name= thd->strdup(item->name); transl[field_count++].item= item; } field_translation= transl; @@ -6104,6 +6117,52 @@ void TABLE::create_key_part_by_field(KEY *keyinfo, /** @brief + Check validity of a possible key for the derived table + + @param key the number of the key + @param key_parts number of components of the key + @param next_field_no the call-back function that returns the number of + the field used as the next component of the key + @param arg the argument for the above function + + @details + The function checks whether a possible key satisfies the constraints + imposed on the keys of any temporary table. + + @return TRUE if the key is valid + @return FALSE otherwise +*/ + +bool TABLE::check_tmp_key(uint key, uint key_parts, + uint (*next_field_no) (uchar *), uchar *arg) +{ + Field **reg_field; + uint i; + uint key_len= 0; + + for (i= 0; i < key_parts; i++) + { + uint fld_idx= next_field_no(arg); + reg_field= field + fld_idx; + uint fld_store_len= (uint16) (*reg_field)->key_length(); + if ((*reg_field)->real_maybe_null()) + fld_store_len+= HA_KEY_NULL_LENGTH; + if ((*reg_field)->type() == MYSQL_TYPE_BLOB || + (*reg_field)->real_type() == MYSQL_TYPE_VARCHAR || + (*reg_field)->type() == MYSQL_TYPE_GEOMETRY) + fld_store_len+= HA_KEY_BLOB_LENGTH; + key_len+= fld_store_len; + } + /* + We use MI_MAX_KEY_LENGTH (myisam's default) below because it is + smaller than MAX_KEY_LENGTH (heap's default) and it's unknown whether + myisam or heap will be used for the temporary table. + */ + return key_len <= MI_MAX_KEY_LENGTH; +} + +/** + @brief Add one key to a temporary table @param key the number of the key @@ -6133,6 +6192,7 @@ bool TABLE::add_tmp_key(uint key, uint key_parts, KEY* keyinfo; Field **reg_field; uint i; + bool key_start= TRUE; KEY_PART_INFO* key_part_info= (KEY_PART_INFO*) alloc_root(&mem_root, sizeof(KEY_PART_INFO)*key_parts); diff --git a/sql/table.h b/sql/table.h index eca35d6c52c..69462539a20 100644 --- a/sql/table.h +++ b/sql/table.h @@ -1336,6 +1336,8 @@ public: { return !db_stat || m_needs_reopen; } bool alloc_keys(uint key_count); + bool check_tmp_key(uint key, uint key_parts, + uint (*next_field_no) (uchar *), uchar *arg); bool add_tmp_key(uint key, uint key_parts, uint (*next_field_no) (uchar *), uchar *arg, bool unique); diff --git a/sql/table_cache.cc b/sql/table_cache.cc index 8b768240b4f..097f37d26d8 100644 --- a/sql/table_cache.cc +++ b/sql/table_cache.cc @@ -267,7 +267,7 @@ void tc_add_table(THD *thd, TABLE *table) TABLE_SHARE *purge_share= 0; TABLE_SHARE *share; TABLE *entry; - ulonglong purge_time; + ulonglong UNINIT_VAR(purge_time); TDC_iterator tdc_it; tdc_it.init(); diff --git a/sql/unireg.cc b/sql/unireg.cc index 3eb7a8ce5eb..e02420d9468 100644 --- a/sql/unireg.cc +++ b/sql/unireg.cc @@ -211,7 +211,13 @@ LEX_CUSTRING build_frm_image(THD *thd, const char *table, filepos= frm.length; frm.length+= FRM_FORMINFO_SIZE; // forminfo frm.length+= packed_fields_length(create_fields); - + + if (frm.length > FRM_MAX_SIZE) + { + my_error(ER_TABLE_DEFINITION_TOO_BIG, MYF(0), table); + DBUG_RETURN(frm); + } + frm_ptr= (uchar*) my_malloc(frm.length, MYF(MY_WME | MY_ZEROFILL | MY_THREAD_SPECIFIC)); if (!frm_ptr) diff --git a/sql/unireg.h b/sql/unireg.h index 9b40b7b0779..5f133da674f 100644 --- a/sql/unireg.h +++ b/sql/unireg.h @@ -203,7 +203,7 @@ LEX_CUSTRING build_frm_image(THD *thd, const char *table, #define FRM_HEADER_SIZE 64 #define FRM_FORMINFO_SIZE 288 -#define FRM_MAX_SIZE (256*1024) +#define FRM_MAX_SIZE (512*1024) static inline bool is_binary_frm_header(uchar *head) { diff --git a/sql/wsrep_mysqld.cc b/sql/wsrep_mysqld.cc index 64b7d6c8721..81b1e182282 100644 --- a/sql/wsrep_mysqld.cc +++ b/sql/wsrep_mysqld.cc @@ -2391,7 +2391,7 @@ bool wsrep_create_like_table(THD* thd, TABLE_LIST* table, String query(buf, sizeof(buf), system_charset_info); query.length(0); // Have to zero it since constructor doesn't - (void) store_create_info(thd, &tbl, &query, NULL, TRUE, FALSE); + (void) show_create_table(thd, &tbl, &query, NULL, WITH_DB_NAME); WSREP_DEBUG("TMP TABLE: %s", query.ptr()); thd->wsrep_TOI_pre_query= query.ptr(); diff --git a/storage/connect/CMakeLists.txt b/storage/connect/CMakeLists.txt index 41956e5f398..0252091ef80 100644 --- a/storage/connect/CMakeLists.txt +++ b/storage/connect/CMakeLists.txt @@ -19,20 +19,20 @@ SET(CONNECT_PLUGIN_DYNAMIC "connect") SET(CONNECT_SOURCES ha_connect.cc connect.cc user_connect.cc mycat.cc fmdlex.c osutil.c plugutil.c rcmsg.c rcmsg.h -csort.cpp maputil.cpp plgdbutl.cpp -colblk.cpp reldef.cpp tabcol.cpp table.cpp -filamap.cpp filamdbf.cpp filamfix.cpp filamtxt.cpp filamvct.cpp -tabdos.cpp tabfix.cpp tabfmt.cpp tabmul.cpp tabsys.cpp tabvct.cpp +array.cpp blkfil.cpp colblk.cpp csort.cpp +filamap.cpp filamdbf.cpp filamfix.cpp filamtxt.cpp filamvct.cpp filamzip.cpp +filter.cpp maputil.cpp myutil.cpp plgdbutl.cpp reldef.cpp tabcol.cpp +tabdos.cpp tabfix.cpp tabfmt.cpp table.cpp tabmul.cpp taboccur.cpp +tabpivot.cpp tabsys.cpp tabtbl.cpp tabutil.cpp tabvct.cpp tabxcl.cpp valblk.cpp value.cpp xindex.cpp xobject.cpp -filamzip.cpp tabtbl.cpp myutil.cpp -tabutil.cpp tabxcl.cpp taboccur.cpp tabpivot.cpp -block.h catalog.h checklvl.h colblk.h connect.h csort.h engmsg.h -filamap.h filamdbf.h filamfix.h filamtxt.h filamvct.h filamzip.h -global.h ha_connect.h inihandl.h maputil.h msgid.h mycat.h myutil.h os.h -osutil.h plgcnx.h plgdbsem.h preparse.h reldef.h resource.h tabcol.h -tabdos.h tabfix.h tabfmt.h tabmul.h tabsys.h tabtbl.h tabvct.h -user_connect.h valblk.h value.h xindex.h xobject.h xtable.h -tabutil.h tabxcl.h taboccur.h tabpivot.h) + +array.h blkfil.h block.h catalog.h checklvl.h colblk.h connect.h csort.h +engmsg.h filamap.h filamdbf.h filamfix.h filamtxt.h filamvct.h filamzip.h +filter.h global.h ha_connect.h inihandl.h maputil.h msgid.h mycat.h myutil.h +os.h osutil.h plgcnx.h plgdbsem.h preparse.h reldef.h resource.h tabcol.h +tabdos.h tabfix.h tabfmt.h tabmul.h taboccur.h tabpivot.h tabsys.h +tabtbl.h tabutil.h tabvct.h tabxcl.h user_connect.h valblk.h value.h +xindex.h xobject.h xtable.h) # # Definitions that are shared for all OSes diff --git a/storage/connect/array.cpp b/storage/connect/array.cpp new file mode 100644 index 00000000000..9815fbb6be6 --- /dev/null +++ b/storage/connect/array.cpp @@ -0,0 +1,1173 @@ +/************* Array C++ Functions Source Code File (.CPP) *************/ +/* Name: ARRAY.CPP Version 2.3 */ +/* */ +/* (C) Copyright to the author Olivier BERTRAND 2005-2014 */ +/* */ +/* This file contains the XOBJECT derived class ARRAY functions. */ +/* ARRAY is used for elaborate type of processing, such as sorting */ +/* and dichotomic search (Find). This new version does not use sub */ +/* classes anymore for the different types but relies entirely on the */ +/* functionalities provided by the VALUE and VALBLK classes. */ +/* Currently the only supported types are STRING, SHORT, int, DATE, */ +/* TOKEN, DOUBLE, and Compressed Strings. */ +/***********************************************************************/ + +/***********************************************************************/ +/* Include relevant MariaDB header file. */ +/***********************************************************************/ +#include "my_global.h" +#include "sql_class.h" +//#include "sql_time.h" + +#if defined(WIN32) +//#include <windows.h> +#else // !WIN32 +#include <string.h> +#include <sys/types.h> +#include <sys/stat.h> +#endif // !WIN32 + +/***********************************************************************/ +/* Include required application header files */ +/* global.h is header containing all global Plug declarations. */ +/* plgdbsem.h is header containing the DB applic. declarations. */ +/* xobject.h is header containing XOBJECT derived classes declares. */ +/***********************************************************************/ +#include "global.h" +#include "plgdbsem.h" +#include "xtable.h" +#include "array.h" +//#include "select.h" +//#include "query.h" +//#include "token.h" + +/***********************************************************************/ +/* Macro definitions. */ +/***********************************************************************/ +#if defined(_DEBUG) +#define ASSERT(B) assert(B); +#else +#define ASSERT(B) +#endif + +/***********************************************************************/ +/* Static variables. */ +/***********************************************************************/ +extern "C" int trace; + +/***********************************************************************/ +/* DB static external variables. */ +/***********************************************************************/ +extern MBLOCK Nmblk; /* Used to initialize MBLOCK's */ + +/***********************************************************************/ +/* External functions. */ +/***********************************************************************/ +BYTE OpBmp(PGLOBAL g, OPVAL opc); +void EncodeValue(int *lp, char *strp, int n); +PARRAY MakeValueArray(PGLOBAL g, PPARM pp); // avoid gcc warning + +/***********************************************************************/ +/* MakeValueArray: Makes a value array from a value list. */ +/***********************************************************************/ +PARRAY MakeValueArray(PGLOBAL g, PPARM pp) + { + int n, valtyp = 0; + size_t len = 0; + PARRAY par; + PPARM parmp; + + if (!pp) + return NULL; + + /*********************************************************************/ + /* New version with values coming in a list. */ + /*********************************************************************/ + if ((valtyp = pp->Type) != TYPE_STRING) + len = 1; + + if (trace) + htrc("valtyp=%d len=%d\n", valtyp, len); + + /*********************************************************************/ + /* Firstly check the list and count the number of values in it. */ + /*********************************************************************/ + for (n = 0, parmp = pp; parmp; n++, parmp = parmp->Next) + if (parmp->Type != valtyp) { + sprintf(g->Message, MSG(BAD_PARAM_TYPE), "MakeValueArray", parmp->Type); + return NULL; + } else if (valtyp == TYPE_STRING) + len = MY_MAX(len, strlen((char*)parmp->Value)); + + /*********************************************************************/ + /* Make an array object with one block of the proper size. */ + /*********************************************************************/ + par = new(g) ARRAY(g, valtyp, n, (int)len); + + if (par->GetResultType() == TYPE_ERROR) + return NULL; // Memory allocation error in ARRAY + + /*********************************************************************/ + /* All is right now, fill the array block. */ + /*********************************************************************/ + for (parmp = pp; parmp; parmp = parmp->Next) + switch (valtyp) { + case TYPE_STRING: + par->AddValue(g, (PSZ)parmp->Value); + break; + case TYPE_SHORT: + par->AddValue(g, *(short*)parmp->Value); + break; + case TYPE_INT: + par->AddValue(g, *(int*)parmp->Value); + break; + case TYPE_DOUBLE: + par->AddValue(g, *(double*)parmp->Value); + break; + case TYPE_PCHAR: + par->AddValue(g, parmp->Value); + break; + case TYPE_VOID: + // Integer stored inside pp->Value + par->AddValue(g, (int)parmp->Value); + break; + } // endswitch valtyp + + /*********************************************************************/ + /* Send back resulting array. */ + /*********************************************************************/ + return par; + } // end of MakeValueArray + +/* -------------------------- Class ARRAY ---------------------------- */ + +/***********************************************************************/ +/* ARRAY public constructor. */ +/***********************************************************************/ +ARRAY::ARRAY(PGLOBAL g, int type, int size, int length, int prec) + : CSORT(FALSE) + { + Nval = 0; + Ndif = 0; + Bot = 0; + Top = 0; + Size = size; + Type = type; + Xsize = -1; + Len = 1; + + switch (type) { + case TYPE_STRING: + Len = length; + case TYPE_SHORT: + case TYPE_INT: + case TYPE_DOUBLE: + case TYPE_PCHAR: + Type = type; + break; + case TYPE_VOID: + Type = TYPE_INT; + break; +#if 0 + case TYPE_TOKEN: + break; + case TYPE_LIST: + Len = 0; + prec = length; + break; +#endif // 0 + default: // This is illegal an causes an ill formed array building + sprintf(g->Message, MSG(BAD_ARRAY_TYPE), type); + Type = TYPE_ERROR; + return; + } // endswitch type + + Valblk = new(g) MBVALS; + + if (!(Vblp = Valblk->Allocate(g, Type, Len, prec, Size))) + Type = TYPE_ERROR; + else if (!Valblk->GetMemp() && Type != TYPE_LIST) + // The error message was built by PlgDBalloc + Type = TYPE_ERROR; + else if (type != TYPE_PCHAR) + Value = AllocateValue(g, type, Len, prec, NULL); + + Constant = TRUE; + } // end of ARRAY constructor + +#if 0 +/***********************************************************************/ +/* ARRAY public constructor from a QUERY. */ +/***********************************************************************/ +ARRAY::ARRAY(PGLOBAL g, PQUERY qryp) : CSORT(FALSE) + { + Type = qryp->GetColType(0); + Nval = qryp->GetNblin(); + Ndif = 0; + Bot = 0; + Top = 0; + Size = Nval; + Xsize = -1; + Len = qryp->GetColLength(0); + X = Inf = Sup = 0; + Correlated = FALSE; + + switch (Type) { + case TYPE_STRING: + case TYPE_SHORT: + case TYPE_INT: + case TYPE_DATE: + case TYPE_DOUBLE: +// case TYPE_TOKEN: +// case TYPE_LIST: +// Valblk = qryp->GetCol(0)->Result; +// Vblp = qryp->GetColBlk(0); +// Value = qryp->GetColValue(0); +// break; + default: // This is illegal an causes an ill formed array building + sprintf(g->Message, MSG(BAD_ARRAY_TYPE), Type); + Type = TYPE_ERROR; + } // endswitch type + + if (!Valblk || (!Valblk->GetMemp() && Type != TYPE_LIST)) + // The error message was built by ??? + Type = TYPE_ERROR; + + Constant = TRUE; + } // end of ARRAY constructor + +/***********************************************************************/ +/* ARRAY constructor from a TYPE_LIST subarray. */ +/***********************************************************************/ +ARRAY::ARRAY(PGLOBAL g, PARRAY par, int k) : CSORT(FALSE) + { + int prec; + LSTBLK *lp; + + if (par->Type != TYPE_LIST) { + Type = TYPE_ERROR; + return; + } // endif Type + + lp = (LSTBLK*)par->Vblp; + + Nval = par->Nval; + Ndif = 0; + Bot = 0; + Top = 0; + Size = par->Size; + Xsize = -1; + + Valblk = lp->Mbvk[k]; + Vblp = Valblk->Vblk; + Type = Vblp->GetType(); + Len = (Type == TYPE_STRING) ? Vblp->GetVlen() : 0; + prec = (Type == TYPE_FLOAT) ? 2 : 0; + Value = AllocateValue(g, Type, Len, prec, NULL); + Constant = TRUE; + } // end of ARRAY constructor + +/***********************************************************************/ +/* Empty: reset the array for a new use (correlated queries). */ +/* Note: this is temporary as correlated queries will not use arrays */ +/* anymore with future optimized algorithms. */ +/***********************************************************************/ +void ARRAY::Empty(void) + { + assert(Correlated); + Nval = Ndif = 0; + Bot = Top = X = Inf = Sup = 0; + } // end of Empty +#endif // 0 + +/***********************************************************************/ +/* Add a string element to an array. */ +/***********************************************************************/ +bool ARRAY::AddValue(PGLOBAL g, PSZ strp) + { + if (Type != TYPE_STRING) { + sprintf(g->Message, MSG(ADD_BAD_TYPE), GetTypeName(Type), "CHAR"); + return TRUE; + } // endif Type + + if (trace) + htrc(" adding string(%d): '%s'\n", Nval, strp); + +//Value->SetValue_psz(strp); +//Vblp->SetValue(valp, Nval++); + Vblp->SetValue(strp, Nval++); + return FALSE; + } // end of AddValue + +/***********************************************************************/ +/* Add a char pointer element to an array. */ +/***********************************************************************/ +bool ARRAY::AddValue(PGLOBAL g, void *p) + { + if (Type != TYPE_PCHAR) { + sprintf(g->Message, MSG(ADD_BAD_TYPE), GetTypeName(Type), "PCHAR"); + return TRUE; + } // endif Type + + if (trace) + htrc(" adding pointer(%d): %p\n", Nval, p); + + Vblp->SetValue((PSZ)p, Nval++); + return FALSE; + } // end of AddValue + +/***********************************************************************/ +/* Add a short integer element to an array. */ +/***********************************************************************/ +bool ARRAY::AddValue(PGLOBAL g, short n) + { + if (Type != TYPE_SHORT) { + sprintf(g->Message, MSG(ADD_BAD_TYPE), GetTypeName(Type), "SHORT"); + return TRUE; + } // endif Type + + if (trace) + htrc(" adding SHORT(%d): %hd\n", Nval, n); + +//Value->SetValue(n); +//Vblp->SetValue(valp, Nval++); + Vblp->SetValue(n, Nval++); + return FALSE; + } // end of AddValue + +/***********************************************************************/ +/* Add an integer element to an array. */ +/***********************************************************************/ +bool ARRAY::AddValue(PGLOBAL g, int n) + { + if (Type != TYPE_INT) { + sprintf(g->Message, MSG(ADD_BAD_TYPE), GetTypeName(Type), "INTEGER"); + return TRUE; + } // endif Type + + if (trace) + htrc(" adding int(%d): %d\n", Nval, n); + +//Value->SetValue(n); +//Vblp->SetValue(valp, Nval++); + Vblp->SetValue(n, Nval++); + return FALSE; + } // end of AddValue + +/***********************************************************************/ +/* Add a double float element to an array. */ +/***********************************************************************/ +bool ARRAY::AddValue(PGLOBAL g, double d) + { + if (Type != TYPE_DOUBLE) { + sprintf(g->Message, MSG(ADD_BAD_TYPE), GetTypeName(Type), "DOUBLE"); + return TRUE; + } // endif Type + + if (trace) + htrc(" adding float(%d): %lf\n", Nval, d); + + Value->SetValue(d); + Vblp->SetValue(Value, Nval++); + return FALSE; + } // end of AddValue + +/***********************************************************************/ +/* Add the value of a XOBJECT block to an array. */ +/***********************************************************************/ +bool ARRAY::AddValue(PGLOBAL g, PXOB xp) + { + if (Type != xp->GetResultType()) { + sprintf(g->Message, MSG(ADD_BAD_TYPE), + GetTypeName(xp->GetResultType()), GetTypeName(Type)); + return TRUE; + } // endif Type + + if (trace) + htrc(" adding (%d) from xp=%p\n", Nval, xp); + +//AddValue(xp->GetValue()); + Vblp->SetValue(xp->GetValue(), Nval++); + return FALSE; + } // end of AddValue + +/***********************************************************************/ +/* Add a value to an array. */ +/***********************************************************************/ +bool ARRAY::AddValue(PGLOBAL g, PVAL vp) + { + if (Type != vp->GetType()) { + sprintf(g->Message, MSG(ADD_BAD_TYPE), + GetTypeName(vp->GetType()), GetTypeName(Type)); + return TRUE; + } // endif Type + + if (trace) + htrc(" adding (%d) from vp=%p\n", Nval, vp); + + Vblp->SetValue(vp, Nval++); + return FALSE; + } // end of AddValue + +/***********************************************************************/ +/* Retrieve the nth value of the array. */ +/***********************************************************************/ +void ARRAY::GetNthValue(PVAL valp, int n) + { + valp->SetValue_pvblk(Vblp, n); + } // end of GetNthValue + +#if 0 +/***********************************************************************/ +/* Retrieve the nth subvalue of a list array. */ +/***********************************************************************/ +bool ARRAY::GetSubValue(PGLOBAL g, PVAL valp, int *kp) + { + PVBLK vblp; + + if (Type != TYPE_LIST) { + sprintf(g->Message, MSG(NO_SUB_VAL), Type); + return TRUE; + } // endif Type + + vblp = ((LSTBLK*)Vblp)->Mbvk[kp[0]]->Vblk; + valp->SetValue_pvblk(vblp, kp[1]); + return FALSE; + } // end of GetSubValue +#endif // 0 + +/***********************************************************************/ +/* Return the nth value of an integer array. */ +/***********************************************************************/ +int ARRAY::GetIntValue(int n) + { + assert (Type == TYPE_INT); + return Vblp->GetIntValue(n); + } // end of GetIntValue + +/***********************************************************************/ +/* Return the nth value of a STRING array. */ +/***********************************************************************/ +char *ARRAY::GetStringValue(int n) + { + assert (Type == TYPE_STRING || Type == TYPE_PCHAR); + return Vblp->GetCharValue(n); + } // end of GetStringValue + +/***********************************************************************/ +/* Find whether a value is in an array. */ +/* Provide a conversion limited to the Value limitation. */ +/***********************************************************************/ +bool ARRAY::Find(PVAL valp) + { + register int n; + PVAL vp; + + if (Type != valp->GetType()) { + Value->SetValue_pval(valp); + vp = Value; + } else + vp = valp; + + Inf = Bot, Sup = Top; + + while (Sup - Inf > 1) { + X = (Inf + Sup) >> 1; + n = Vblp->CompVal(vp, X); + + if (n < 0) + Sup = X; + else if (n > 0) + Inf = X; + else + return TRUE; + + } // endwhile + + return FALSE; + } // end of Find + +/***********************************************************************/ +/* ARRAY: Compare routine for a list of values. */ +/***********************************************************************/ +BYTE ARRAY::Vcompare(PVAL vp, int n) + { + Value->SetValue_pvblk(Vblp, n); + return vp->TestValue(Value); + } // end of Vcompare + +/***********************************************************************/ +/* Test a filter condition on an array depending on operator and mod. */ +/* Modificator values are 1: ANY (or SOME) and 2: ALL. */ +/***********************************************************************/ +bool ARRAY::FilTest(PGLOBAL g, PVAL valp, OPVAL opc, int opm) + { + int i; + PVAL vp; + BYTE bt = OpBmp(g, opc); + int top = Nval - 1; + + if (top < 0) // Array is empty + // Return TRUE for ALL because it means that there are no item that + // does not verify the condition, which is true indeed. + // Return FALSE for ANY because TRUE means that there is at least + // one item that verifies the condition, which is false. + return opm == 2; + + if (valp) { + if (Type != valp->GetType()) { + Value->SetValue_pval(valp); + vp = Value; + } else + vp = valp; + + } else if (opc != OP_EXIST) { + sprintf(g->Message, MSG(MISSING_ARG), opc); + longjmp(g->jumper[g->jump_level], TYPE_ARRAY); + } else // OP_EXIST + return Nval > 0; + + if (opc == OP_IN || (opc == OP_EQ && opm == 1)) + return Find(vp); + else if (opc == OP_NE && opm == 2) + return !Find(vp); + else if (opc == OP_EQ && opm == 2) + return (Ndif == 1) ? !(Vcompare(vp, 0) & bt) : FALSE; + else if (opc == OP_NE && opm == 1) + return (Ndif == 1) ? !(Vcompare(vp, 0) & bt) : TRUE; + + if (Type != TYPE_LIST) { + if (opc == OP_GT || opc == OP_GE) + return !(Vcompare(vp, (opm == 1) ? 0 : top) & bt); + else + return !(Vcompare(vp, (opm == 2) ? 0 : top) & bt); + + } // endif Type + + // Case of TYPE_LIST + if (opm == 2) { + for (i = 0; i < Nval; i++) + if (Vcompare(vp, i) & bt) + return FALSE; + + return TRUE; + } else { // opm == 1 + for (i = 0; i < Nval; i++) + if (!(Vcompare(vp, i) & bt)) + return TRUE; + + return FALSE; + } // endif opm + + } // end of FilTest + +/***********************************************************************/ +/* Test whether this array can be converted to TYPE_SHORT. */ +/* Must be called after the array is sorted. */ +/***********************************************************************/ +bool ARRAY::CanBeShort(void) + { + int* To_Val = (int*)Valblk->GetMemp(); + + if (Type != TYPE_INT || !Ndif) + return FALSE; + + // Because the array is sorted, this is true if all the array + // int values are in the range of SHORT values + return (To_Val[0] >= -32768 && To_Val[Nval-1] < 32768); + } // end of CanBeShort + +/***********************************************************************/ +/* Convert an array to new numeric type k. */ +/* Note: conversion is always made in ascending order from STRING to */ +/* short to int to double so no precision is lost in the conversion. */ +/* One exception is converting from int to short compatible arrays. */ +/***********************************************************************/ +int ARRAY::Convert(PGLOBAL g, int k, PVAL vp) + { + int i; + bool b = FALSE; + PMBV ovblk = Valblk; + PVBLK ovblp = Vblp; + + Type = k; // k is the new type + Valblk = new(g) MBVALS; + + switch (Type) { + case TYPE_DOUBLE: + case TYPE_SHORT: + case TYPE_INT: + case TYPE_DATE: + Len = 1; + break; + default: + sprintf(g->Message, MSG(BAD_CONV_TYPE), Type); + return TYPE_ERROR; + } // endswitch k + + Size = Nval; + Nval = 0; + Vblp = Valblk->Allocate(g, Type, Len, 0, Size); + + if (!Valblk->GetMemp()) + // The error message was built by PlgDBalloc + return TYPE_ERROR; + else + Value = AllocateValue(g, Type, Len, 0, NULL); + + /*********************************************************************/ + /* Converting STRING to DATE can be done according to date format. */ + /*********************************************************************/ + if (Type == TYPE_DATE && ovblp->GetType() == TYPE_STRING && vp) + if (((DTVAL*)Value)->SetFormat(g, vp)) + return TYPE_ERROR; + else + b = TRUE; // Sort the new array on date internal values + + /*********************************************************************/ + /* Do the actual conversion. */ + /*********************************************************************/ + for (i = 0; i < Size; i++) { + Value->SetValue_pvblk(ovblp, i); + + if (AddValue(g, Value)) + return TYPE_ERROR; + + } // endfor i + + /*********************************************************************/ + /* For sorted arrays, get the initial find values. */ + /*********************************************************************/ + if (b) + Sort(g); + + ovblk->Free(); + return Type; + } // end of Convert + +/***********************************************************************/ +/* ARRAY Save: save value at i (used while rordering). */ +/***********************************************************************/ +void ARRAY::Save(int i) + { + Value->SetValue_pvblk(Vblp, i); + } // end of Save + +/***********************************************************************/ +/* ARRAY Restore: restore value to j (used while rordering). */ +/***********************************************************************/ +void ARRAY::Restore(int j) + { + Vblp->SetValue(Value, j); + } // end of Restore + +/***********************************************************************/ +/* ARRAY Move: move value from k to j (used while rordering). */ +/***********************************************************************/ +void ARRAY::Move(int j, int k) + { + Vblp->Move(k, j); // VALBLK does the opposite !!! + } // end of Move + +/***********************************************************************/ +/* ARRAY: Compare routine for one LIST value (ascending only). */ +/***********************************************************************/ +int ARRAY::Qcompare(int *i1, int *i2) + { + return Vblp->CompVal(*i1, *i2); + } // end of Qcompare + +/***********************************************************************/ +/* Mainly meant to set the character arrays case sensitiveness. */ +/***********************************************************************/ +void ARRAY::SetPrecision(PGLOBAL g, int p) + { + if (Vblp == NULL) { + strcpy(g->Message, MSG(PREC_VBLP_NULL)); + longjmp(g->jumper[g->jump_level], TYPE_ARRAY); + } // endif Vblp + + bool was = Vblp->IsCi(); + + if (was && !p) { + strcpy(g->Message, MSG(BAD_SET_CASE)); + longjmp(g->jumper[g->jump_level], TYPE_ARRAY); + } // endif Vblp + + if (was || !p) + return; + else + Vblp->SetPrec(p); + + if (!was && Type == TYPE_STRING) + // Must be resorted to eliminate duplicate strings + if (Sort(g)) + longjmp(g->jumper[g->jump_level], TYPE_ARRAY); + + } // end of SetPrecision + +/***********************************************************************/ +/* Sort and eliminate distinct values from an array. */ +/* Note: this is done by making a sorted index on distinct values. */ +/* Returns FALSE if Ok or TRUE in case of error. */ +/***********************************************************************/ +bool ARRAY::Sort(PGLOBAL g) + { + int i, j, k; + + // This is to avoid multiply allocating for correlated subqueries + if (Nval > Xsize) { + if (Xsize >= 0) { + // Was already allocated + PlgDBfree(Index); + PlgDBfree(Offset); + } // endif Xsize + + // Prepare non conservative sort with offet values + Index.Size = Nval * sizeof(int); + + if (!PlgDBalloc(g, NULL, Index)) + goto error; + + Offset.Size = (Nval + 1) * sizeof(int); + + if (!PlgDBalloc(g, NULL, Offset)) + goto error; + + Xsize = Nval; + } // endif Nval + + // Call the sort program, it returns the number of distinct values + Ndif = Qsort(g, Nval); + + if (Ndif < 0) + goto error; + + // Use the sort index to reorder the data in storage so it will + // be physically sorted and Index can be removed. + for (i = 0; i < Nval; i++) { + if (Pex[i] == i || Pex[i] == Nval) + // Already placed or already moved + continue; + + Save(i); + + for (j = i;; j = k) { + k = Pex[j]; + Pex[j] = Nval; // Mark position as set + + if (k == i) { + Restore(j); + break; // end of loop + } else + Move(j, k); + + } // endfor j + + } // endfor i + + // Reduce the size of the To_Val array if Ndif < Nval + if (Ndif < Nval) { + for (i = 1; i < Ndif; i++) + if (i != Pof[i]) + break; + + for (; i < Ndif; i++) + Move(i, Pof[i]); + + Nval = Ndif; + } // endif ndif + +//if (!Correlated) { + if (Size > Nval) { + Size = Nval; + Valblk->ReAllocate(g, Size); + } // endif Size + + // Index and Offset are not used anymore + PlgDBfree(Index); + PlgDBfree(Offset); + Xsize = -1; +// } // endif Correlated + + Bot = -1; // For non optimized search + Top = Ndif; // Find searches the whole array. + return FALSE; + + error: + Nval = Ndif = 0; + Valblk->Free(); + PlgDBfree(Index); + PlgDBfree(Offset); + return TRUE; + } // end of Sort + +/***********************************************************************/ +/* Sort and return the sort index. */ +/* Note: This is meant if the array contains unique values. */ +/* Returns Index.Memp if Ok or NULL in case of error. */ +/***********************************************************************/ +void *ARRAY::GetSortIndex(PGLOBAL g) + { + // Prepare non conservative sort with offet values + Index.Size = Nval * sizeof(int); + + if (!PlgDBalloc(g, NULL, Index)) + goto error; + + Offset.Size = (Nval + 1) * sizeof(int); + + if (!PlgDBalloc(g, NULL, Offset)) + goto error; + + // Call the sort program, it returns the number of distinct values + Ndif = Qsort(g, Nval); + + if (Ndif < 0) + goto error; + + if (Ndif < Nval) + goto error; + + PlgDBfree(Offset); + return Index.Memp; + + error: + Nval = Ndif = 0; + Valblk->Free(); + PlgDBfree(Index); + PlgDBfree(Offset); + return NULL; + } // end of GetSortIndex + +/***********************************************************************/ +/* Block filter testing for IN operator on Column/Array operands. */ +/* Here we call Find that returns TRUE if the value is in the array */ +/* with X equal to the index of the found value in the array, or */ +/* FALSE if the value is not in the array with Inf and Sup being the */ +/* indexes of the array values that are immediately below and over */ +/* the not found value. This enables to restrict the array to the */ +/* values that are between the min and max block values and to return */ +/* the indication of whether the Find will be always true, always not */ +/* true or other. */ +/***********************************************************************/ +int ARRAY::BlockTest(PGLOBAL g, int opc, int opm, + void *minp, void *maxp, bool s) + { + bool bin, bax, pin, pax, veq, all = (opm == 2); + + if (Ndif == 0) // Array is empty + // Return TRUE for ALL because it means that there are no item that + // does not verify the condition, which is true indeed. + // Return FALSE for ANY because TRUE means that there is at least + // one item that verifies the condition, which is false. + return (all) ? 2 : -2; + else if (opc == OP_EQ && all && Ndif > 1) + return -2; + else if (opc == OP_NE && !all && Ndif > 1) + return 2; +// else if (Ndif == 1) +// all = FALSE; + + // veq is true when all values in the block are equal + switch (Type) { + case TYPE_STRING: veq = (Vblp->IsCi()) + ? !stricmp((char*)minp, (char*)maxp) + : !strcmp((char*)minp, (char*)maxp); break; + case TYPE_SHORT: veq = *(short*)minp == *(short*)maxp; break; + case TYPE_INT: veq = *(int*)minp == *(int*)maxp; break; + case TYPE_DOUBLE: veq = *(double*)minp == *(double*)maxp; break; + default: veq = FALSE; // Error ? + } // endswitch type + + if (!s) + Bot = -1; + + Top = Ndif; // Reset Top at top of list + Value->SetBinValue(maxp); + Top = (bax = Find(Value)) ? X + 1 : Sup; + + if (bax) { + if (opc == OP_EQ) + return (veq) ? 1 : 0; + else if (opc == OP_NE) + return (veq) ? -1 : 0; + + if (X == 0) switch (opc) { + // Max value is equal to min list value + case OP_LE: return 1; break; + case OP_LT: return (veq) ? -1 : 0; break; + case OP_GE: return (veq) ? 1 : 0; break; + case OP_GT: return -1; break; + } // endswitch opc + + pax = (opc == OP_GE) ? (X < Ndif - 1) : TRUE; + } else if (Inf == Bot) { + // Max value is smaller than min list value + return (opc == OP_LT || opc == OP_LE || opc == OP_NE) ? 1 : -1; + } else + pax = (Sup < Ndif); // True if max value is inside the list value + + if (!veq) { + Value->SetBinValue(minp); + bin = Find(Value); + } else + bin = bax; + + Bot = (bin) ? X - 1 : Inf; + + if (bin) { + if (opc == OP_EQ || opc == OP_NE) + return 0; + + if (X == Ndif - 1) switch (opc) { + case OP_GE: return (s) ? 2 : 1; break; + case OP_GT: return (veq) ? -1 : 0; break; + case OP_LE: return (veq) ? 1 : 0; break; + case OP_LT: return (s) ? -2 : -1; break; + } // endswitch opc + + pin = (opc == OP_LE) ? (X > 0) : TRUE; + } else if (Sup == Ndif) { + // Min value is greater than max list value + if (opc == OP_GT || opc == OP_GE || opc == OP_NE) + return (s) ? 2 : 1; + else + return (s) ? -2 : -1; + + } else + pin = (Inf >= 0); // True if min value is inside the list value + + if (Top - Bot <= 1) { + // No list item between min and max value +#if defined(_DEBUG) + assert (!bin && !bax); +#endif + switch (opc) { + case OP_EQ: return -1; break; + case OP_NE: return 1; break; + default: return (all) ? -1 : 1; break; + } // endswitch opc + + } // endif + +#if defined(_DEBUG) + assert (Ndif > 1); // if Ndif = 1 we should have returned already +#endif + + // At this point, if there are no logical errors in the algorithm, + // the only possible overlaps between the array and the block are: + // Array: +-------+ +-------+ +-------+ +-----+ + // Block: +-----+ +---+ +------+ +--------+ + // TRUE: pax pin pax pin + if (all) switch (opc) { + case OP_GT: + case OP_GE: return (pax) ? -1 : 0; break; + case OP_LT: + case OP_LE: return (pin) ? -1 : 0; break; + } // endswitch opc + + return 0; + } // end of BlockTest + +/***********************************************************************/ +/* MakeArrayList: Makes a value list from an SQL IN array (in work). */ +/***********************************************************************/ +PSZ ARRAY::MakeArrayList(PGLOBAL g) + { + char *p, *tp; + int i; + size_t z, len = 2; + + if (Type == TYPE_LIST) + return "(?" "?" "?)"; // To be implemented + + z = MY_MAX(24, GetTypeSize(Type, Len) + 4); + tp = (char*)PlugSubAlloc(g, NULL, z); + + for (i = 0; i < Nval; i++) { + Value->SetValue_pvblk(Vblp, i); + Value->Print(g, tp, z); + len += strlen(tp); + } // enfor i + + if (trace) + htrc("Arraylist: len=%d\n", len); + + p = (char *)PlugSubAlloc(g, NULL, len); + strcpy(p, "("); + + for (i = 0; i < Nval;) { + Value->SetValue_pvblk(Vblp, i); + Value->Print(g, tp, z); + strcat(p, tp); + strcat(p, (++i == Nval) ? ")" : ","); + } // enfor i + + if (trace) + htrc("Arraylist: newlen=%d\n", strlen(p)); + + return p; + } // end of MakeArrayList + +/***********************************************************************/ +/* Make file output of ARRAY contents. */ +/***********************************************************************/ +void ARRAY::Print(PGLOBAL g, FILE *f, uint n) + { + char m[64]; + int lim = MY_MIN(Nval,10); + + memset(m, ' ', n); // Make margin string + m[n] = '\0'; + fprintf(f, "%sARRAY: type=%d\n", m, Type); + memset(m, ' ', n + 2); // Make margin string + m[n] = '\0'; + + if (Type != TYPE_LIST) { + fprintf(f, "%sblock=%p numval=%d\n", m, Valblk->GetMemp(), Nval); + + if (Vblp) + for (int i = 0; i < lim; i++) { + Value->SetValue_pvblk(Vblp, i); + Value->Print(g, f, n+4); + } // endfor i + + } else + fprintf(f, "%sVALLST: numval=%d\n", m, Nval); + + } // end of Print + +/***********************************************************************/ +/* Make string output of ARRAY contents. */ +/***********************************************************************/ +void ARRAY::Print(PGLOBAL g, char *ps, uint z) + { + if (z < 16) + return; + + sprintf(ps, "ARRAY: type=%d\n", Type); + // More to be implemented later + } // end of Print + +/* -------------------------- Class MULAR ---------------------------- */ + +/***********************************************************************/ +/* MULAR public constructor. */ +/***********************************************************************/ +MULAR::MULAR(PGLOBAL g, int n) : CSORT(FALSE) + { + Narray = n; + Pars = (PARRAY*)PlugSubAlloc(g, NULL, n * sizeof(PARRAY)); + } // end of MULAR constructor + +/***********************************************************************/ +/* MULAR: Compare routine multiple arrays. */ +/***********************************************************************/ +int MULAR::Qcompare(int *i1, int *i2) + { + register int i, n = 0; + + for (i = 0; i < Narray; i++) + if ((n = Pars[i]->Qcompare(i1, i2))) + break; + + return n; + } // end of Qcompare + +/***********************************************************************/ +/* Sort and eliminate distinct values from multiple arrays. */ +/* Note: this is done by making a sorted index on distinct values. */ +/* Returns FALSE if Ok or TRUE in case of error. */ +/***********************************************************************/ +bool MULAR::Sort(PGLOBAL g) + { + int i, j, k, n, nval, ndif; + + // All arrays must have the same number of values + nval = Pars[0]->Nval; + + for (n = 1; n < Narray; n++) + if (Pars[n]->Nval != nval) { + strcpy(g->Message, MSG(BAD_ARRAY_VAL)); + return TRUE; + } // endif nval + + // Prepare non conservative sort with offet values + Index.Size = nval * sizeof(int); + + if (!PlgDBalloc(g, NULL, Index)) + goto error; + + Offset.Size = (nval + 1) * sizeof(int); + + if (!PlgDBalloc(g, NULL, Offset)) + goto error; + + // Call the sort program, it returns the number of distinct values + ndif = Qsort(g, nval); + + if (ndif < 0) + goto error; + + // Use the sort index to reorder the data in storage so it will + // be physically sorted and Index can be removed. + for (i = 0; i < nval; i++) { + if (Pex[i] == i || Pex[i] == nval) + // Already placed or already moved + continue; + + for (n = 0; n < Narray; n++) + Pars[n]->Save(i); + + for (j = i;; j = k) { + k = Pex[j]; + Pex[j] = nval; // Mark position as set + + if (k == i) { + for (n = 0; n < Narray; n++) + Pars[n]->Restore(j); + + break; // end of loop + } else + for (n = 0; n < Narray; n++) + Pars[n]->Move(j, k); + + } // endfor j + + } // endfor i + + // Reduce the size of the To_Val array if ndif < nval + if (ndif < nval) { + for (i = 1; i < ndif; i++) + if (i != Pof[i]) + break; + + for (; i < ndif; i++) + for (n = 0; n < Narray; n++) + Pars[n]->Move(i, Pof[i]); + + for (n = 0; n < Narray; n++) { + Pars[n]->Nval = ndif; + Pars[n]->Size = ndif; + Pars[n]->Valblk->ReAllocate(g, ndif); + } // endfor n + + } // endif ndif + + // Index and Offset are not used anymore + PlgDBfree(Index); + PlgDBfree(Offset); + + for (n = 0; n < Narray; n++) { + Pars[n]->Bot = -1; // For non optimized search + Pars[n]->Top = ndif; // Find searches the whole array. + } // endfor n + + return FALSE; + + error: + PlgDBfree(Index); + PlgDBfree(Offset); + return TRUE; + } // end of Sort diff --git a/storage/connect/array.h b/storage/connect/array.h new file mode 100644 index 00000000000..4a818414e9c --- /dev/null +++ b/storage/connect/array.h @@ -0,0 +1,130 @@ +/**************** Array H Declares Source Code File (.H) ***************/ +/* Name: ARRAY.H Version 3.1 */ +/* */ +/* (C) Copyright to the author Olivier BERTRAND 2005-2014 */ +/* */ +/* This file contains the ARRAY and VALBASE derived classes declares. */ +/***********************************************************************/ +#ifndef __ARRAY_H +#define __ARRAY_H + + +/***********************************************************************/ +/* Include required application header files */ +/***********************************************************************/ +#include "xobject.h" +#include "valblk.h" +#include "csort.h" + +typedef class ARRAY *PARRAY; + +/***********************************************************************/ +/* Definition of class ARRAY with all its method functions. */ +/* Note: This is not a general array class that could be defined as */ +/* a template class, but rather a specific object containing a list */ +/* of values to be processed by the filter IN operator. */ +/* In addition it must act as a metaclass by being able to give back */ +/* the type of values it contains. */ +/* It must also be able to convert itself from some type to another. */ +/***********************************************************************/ +class DllExport ARRAY : public XOBJECT, public CSORT { // Array descblock + friend class MULAR; +//friend class VALLST; +//friend class SFROW; + public: + // Constructors + ARRAY(PGLOBAL g, int type, int size, int len = 1, int prec = 0); +//ARRAY(PGLOBAL g, PQUERY qryp); +//ARRAY(PGLOBAL g, PARRAY par, int k); + + // Implementation + virtual int GetType(void) {return TYPE_ARRAY;} + virtual int GetResultType(void) {return Type;} + virtual int GetLength(void) {return Len;} + virtual int GetLengthEx(void) {return Len;} + virtual int GetScale() {return 0;} + int GetNval(void) {return Nval;} + int GetSize(void) {return Size;} +// PVAL GetValp(void) {return Valp;} + void SetType(int atype) {Type = atype;} +// void SetCorrel(bool b) {Correlated = b;} + + // Methods + virtual void Reset(void) {Bot = -1;} + virtual int Qcompare(int *, int *); + virtual bool Compare(PXOB) {assert(FALSE); return FALSE;} + virtual bool SetFormat(PGLOBAL, FORMAT&) {assert(FALSE); return FALSE;} +//virtual int CheckSpcCol(PTDB, int) {return 0;} + virtual void Print(PGLOBAL g, FILE *f, uint n); + virtual void Print(PGLOBAL g, char *ps, uint z); +// void Empty(void); + void SetPrecision(PGLOBAL g, int p); + bool AddValue(PGLOBAL g, PSZ sp); + bool AddValue(PGLOBAL g, void *p); + bool AddValue(PGLOBAL g, short n); + bool AddValue(PGLOBAL g, int n); + bool AddValue(PGLOBAL g, double f); + bool AddValue(PGLOBAL g, PXOB xp); + bool AddValue(PGLOBAL g, PVAL vp); + void GetNthValue(PVAL valp, int n); + int GetIntValue(int n); + char *GetStringValue(int n); + BYTE Vcompare(PVAL vp, int n); + void Save(int); + void Restore(int); + void Move(int, int); + bool Sort(PGLOBAL g); + void *GetSortIndex(PGLOBAL g); + bool Find(PVAL valp); + bool FilTest(PGLOBAL g, PVAL valp, OPVAL opc, int opm); + int Convert(PGLOBAL g, int k, PVAL vp = NULL); + int BlockTest(PGLOBAL g, int opc, int opm, + void *minp, void *maxp, bool s); + PSZ MakeArrayList(PGLOBAL g); + bool CanBeShort(void); + bool GetSubValue(PGLOBAL g, PVAL valp, int *kp); + + protected: + // Members + PMBV Valblk; // To the MBVALS class + PVBLK Vblp; // To Valblock of the data array +//PVAL Valp; // The value used for Save and Restore is Value + int Size; // Size of value array + int Nval; // Total number of items in array + int Ndif; // Total number of distinct items in array + int Xsize; // Size of Index (used for correlated arrays) + int Type; // Type of individual values in the array + int Len; // Length of character string + int Bot; // Bottom of research index + int Top; // Top of research index + int X, Inf, Sup; // Used for block optimization +//bool Correlated; // -----------> Temporary + }; // end of class ARRAY + +/***********************************************************************/ +/* Definition of class MULAR with all its method functions. */ +/* This class is used when constructing the arrays of constants used */ +/* for indexing. Its only purpose is to provide a way to sort, reduce */ +/* and reorder the arrays of multicolumn indexes as one block. Indeed */ +/* sorting the arrays independantly would break the correspondance of */ +/* column values. */ +/***********************************************************************/ +class MULAR : public CSORT, public BLOCK { // No need to be an XOBJECT + public: + // Constructor + MULAR(PGLOBAL g, int n); + + // Implementation + void SetPars(PARRAY par, int i) {Pars[i] = par;} + + // Methods + virtual int Qcompare(int *i1, int *i2); // Sort compare routine + bool Sort(PGLOBAL g); + + protected: + // Members + int Narray; // The number of sub-arrays + PARRAY *Pars; // To the block of real arrays + }; // end of class ARRAY + +#endif // __ARRAY_H diff --git a/storage/connect/blkfil.cpp b/storage/connect/blkfil.cpp new file mode 100644 index 00000000000..c1099261cef --- /dev/null +++ b/storage/connect/blkfil.cpp @@ -0,0 +1,1080 @@ +/************* BlkFil C++ Program Source Code File (.CPP) **************/ +/* PROGRAM NAME: BLKFIL */ +/* ------------- */ +/* Version 2.5 */ +/* */ +/* COPYRIGHT: */ +/* ---------- */ +/* (C) Copyright to the author Olivier BERTRAND 2004-2014 */ +/* */ +/* WHAT THIS PROGRAM DOES: */ +/* ----------------------- */ +/* This program is the implementation of block indexing classes. */ +/* */ +/***********************************************************************/ + +/***********************************************************************/ +/* Include relevant MariaDB header file. */ +/***********************************************************************/ +#include "my_global.h" +#include "sql_class.h" +//#include "sql_time.h" + +#if defined(WIN32) +//#include <windows.h> +#else // !WIN32 +#include <string.h> +#include <sys/types.h> +#include <sys/stat.h> +#endif // !WIN32 + +/***********************************************************************/ +/* Include application header files: */ +/***********************************************************************/ +#include "global.h" // global declarations +#include "plgdbsem.h" // DB application declarations +#include "xindex.h" // Key Index class declarations +#include "filamtxt.h" // File access method dcls +#include "tabdos.h" // TDBDOS and DOSCOL class dcls +#include "array.h" // ARRAY classes dcls +#include "blkfil.h" // Block Filter classes dcls + +/***********************************************************************/ +/* Static variables. */ +/***********************************************************************/ +extern "C" int trace; + +/* ------------------------ Class BLOCKFILTER ------------------------ */ + +/***********************************************************************/ +/* BLOCKFILTER constructor. */ +/***********************************************************************/ +BLOCKFILTER::BLOCKFILTER(PTDBDOS tdbp, int op) + { + Tdbp = tdbp; + Correl = FALSE; + Opc = op; + Opm = 0; + Result = 0; + } // end of BLOCKFILTER constructor + +/***********************************************************************/ +/* Make file output of BLOCKFILTER contents. */ +/***********************************************************************/ +void BLOCKFILTER::Print(PGLOBAL g, FILE *f, uint n) + { + char m[64]; + + memset(m, ' ', n); // Make margin string + m[n] = '\0'; + + fprintf(f, "%sBLOCKFILTER: at %p opc=%d opm=%d result=%d\n", + m, this, Opc, Opm, Result); + } // end of Print + +/***********************************************************************/ +/* Make string output of BLOCKFILTER contents. */ +/***********************************************************************/ +void BLOCKFILTER::Print(PGLOBAL g, char *ps, uint z) + { + strncat(ps, "BlockFilter(s)", z); + } // end of Print + + +/* ---------------------- Class BLKFILLOG ---------------------------- */ + +/***********************************************************************/ +/* BLKFILLOG constructor. */ +/***********************************************************************/ +BLKFILLOG::BLKFILLOG(PTDBDOS tdbp, int op, PBF *bfp, int n) + : BLOCKFILTER(tdbp, op) + { + N = n; + Fil = bfp; + + for (int i = 0; i < N; i++) + if (Fil[i]) + Correl |= Fil[i]->Correl; + + } // end of BLKFILLOG constructor + +/***********************************************************************/ +/* Reset: this function is used only to check the existence of a */ +/* BLKFILIN block and have it reset its Bot value for sorted columns. */ +/***********************************************************************/ +void BLKFILLOG::Reset(PGLOBAL g) + { + for (int i = 0; i < N; i++) + if (Fil[i]) + Fil[i]->Reset(g); + + } // end of Reset + +/***********************************************************************/ +/* This function is used for block filter evaluation. We use here a */ +/* fuzzy logic between the values returned by evaluation blocks: */ +/* -2: the condition will be always false for the rest of the file. */ +/* -1: the condition will be false for the whole group. */ +/* 0: the condition may be true for some of the group values. */ +/* 1: the condition will be true for the whole group. */ +/* 2: the condition will be always true for the rest of the file. */ +/***********************************************************************/ +int BLKFILLOG::BlockEval(PGLOBAL g) + { + int i, rc; + + for (i = 0; i < N; i++) { + // 0: Means some block filter value may be True + rc = (Fil[i]) ? Fil[i]->BlockEval(g) : 0; + + if (!i) + Result = (Opc == OP_NOT) ? -rc : rc; + else switch (Opc) { + case OP_AND: + Result = MY_MIN(Result, rc); + break; + case OP_OR: + Result = MY_MAX(Result, rc); + break; + default: + // Should never happen + Result = 0; + return Result; + } // endswitch Opc + + } // endfor i + + return Result; + } // end of BlockEval + +/* ---------------------- Class BLKFILARI----------------------------- */ + +/***********************************************************************/ +/* BLKFILARI constructor. */ +/***********************************************************************/ +BLKFILARI::BLKFILARI(PGLOBAL g, PTDBDOS tdbp, int op, PXOB *xp) + : BLOCKFILTER(tdbp, op) + { + Colp = (PDOSCOL)xp[0]; + + if (xp[1]->GetType() == TYPE_COLBLK) { + Cpx = (PCOL)xp[1]; // Subquery pseudo constant column + Correl = TRUE; + } else + Cpx = NULL; + + Sorted = Colp->IsSorted() > 0; + + // Don't remember why this was changed. Anyway it is no good for + // correlated subqueries because the Value must reflect changes + if (Cpx) + Valp = xp[1]->GetValue(); + else + Valp = AllocateValue(g, xp[1]->GetValue()); + + } // end of BLKFILARI constructor + +/***********************************************************************/ +/* Reset: re-eval the constant value in the case of pseudo constant */ +/* column use in a correlated subquery. */ +/***********************************************************************/ +void BLKFILARI::Reset(PGLOBAL g) + { + if (Cpx) { + Cpx->Reset(); + Cpx->Eval(g); + MakeValueBitmap(); // Does nothing for class BLKFILARI + } // endif Cpx + + } // end of Reset + +/***********************************************************************/ +/* Evaluate block filter for arithmetic operators. */ +/***********************************************************************/ +int BLKFILARI::BlockEval(PGLOBAL g) + { + int mincmp, maxcmp, n; + +#if defined(_DEBUG) + assert (Colp->IsClustered()); +#endif + + n = ((PTDBDOS)Colp->GetTo_Tdb())->GetCurBlk(); + mincmp = Colp->GetMin()->CompVal(Valp, n); + maxcmp = Colp->GetMax()->CompVal(Valp, n); + + switch (Opc) { + case OP_EQ: + case OP_NE: + if (mincmp < 0) // Means minval > Val + Result = (Sorted) ? -2 : -1; + else if (maxcmp > 0) // Means maxval < Val + Result = -1; + else if (!mincmp && !maxcmp) // minval = maxval = val + Result = 1; + else + Result = 0; + + break; + case OP_GT: + case OP_LE: + if (mincmp < 0) // minval > Val + Result = (Sorted) ? 2 : 1; + else if (maxcmp < 0) // maxval > Val + Result = 0; + else // maxval <= Val + Result = -1; + + break; + case OP_GE: + case OP_LT: + if (mincmp <= 0) // minval >= Val + Result = (Sorted) ? 2 : 1; + else if (maxcmp <= 0) // Maxval >= Val + Result = 0; + else // Maxval < Val + Result = -1; + + break; + } // endswitch Opc + + switch (Opc) { + case OP_NE: + case OP_LE: + case OP_LT: + Result = -Result; + break; + } // endswitch Opc + + if (trace) + htrc("BlockEval: op=%d n=%d rc=%d\n", Opc, n, Result); + + return Result; + } // end of BlockEval + +/* ---------------------- Class BLKFILAR2----------------------------- */ + +/***********************************************************************/ +/* BLKFILAR2 constructor. */ +/***********************************************************************/ +BLKFILAR2::BLKFILAR2(PGLOBAL g, PTDBDOS tdbp, int op, PXOB *xp) + : BLKFILARI(g, tdbp, op, xp) + { + MakeValueBitmap(); + } // end of BLKFILAR2 constructor + +/***********************************************************************/ +/* MakeValueBitmap: Set the constant value bit map. It can be void */ +/* if the constant value is not in the column distinct values list. */ +/***********************************************************************/ +void BLKFILAR2::MakeValueBitmap(void) + { + int i; // ndv = Colp->GetNdv(); + bool found = FALSE; + PVBLK dval = Colp->GetDval(); + + assert(dval); + + /*********************************************************************/ + /* Here we cannot use Find because we must get the index */ + /* of where to put the value if it is not found in the array. */ + /* This is needed by operators other than OP_EQ or OP_NE. */ + /*********************************************************************/ + found = dval->Locate(Valp, i); + + /*********************************************************************/ + /* Set the constant value bitmap. The bitmaps are really matching */ + /* the OP_EQ, OP_LE, and OP_LT operator but are also used for the */ + /* other operators for which the Result will be inverted. */ + /* The reason the bitmaps are not directly complemented for them is */ + /* to be able to test easily the cases of sorted columns with Bxp, */ + /* and the case of a void bitmap, which happens if the constant */ + /* value is not in the column distinct values list. */ + /*********************************************************************/ + if (found) { + Bmp = 1 << i; // Bit of the found value + Bxp = Bmp - 1; // All smaller values + + if (Opc != OP_LT && Opc != OP_GE) + Bxp |= Bmp; // Found value must be included + + } else { + Bmp = 0; + Bxp = (1 << i) - 1; + } // endif found + + if (!(Opc == OP_EQ || Opc == OP_NE)) + Bmp = Bxp; + + } // end of MakeValueBitmap + +/***********************************************************************/ +/* Evaluate XDB2 block filter for arithmetic operators. */ +/***********************************************************************/ +int BLKFILAR2::BlockEval(PGLOBAL g) + { +#if defined(_DEBUG) + assert (Colp->IsClustered()); +#endif + + int n = ((PTDBDOS)Colp->GetTo_Tdb())->GetCurBlk(); + uint bkmp = *(uint*)Colp->GetBmap()->GetValPtr(n); + uint bres = Bmp & bkmp; + + // Set result as if Opc were OP_EQ, OP_LT, or OP_LE + if (!bres) { + if (!Bmp) + Result = -2; // No good block in the table file + else if (!Sorted) + Result = -1; // No good values in this block + else // Sorted column, test for no more good blocks in file + Result = (Bxp & bkmp) ? -1 : -2; + + } else + // Test whether all block values are good or only some ones + Result = (bres == bkmp) ? 1 : 0; + + // For OP_NE, OP_GE, and OP_GT the result must be inverted. + switch (Opc) { + case OP_NE: + case OP_GE: + case OP_GT: + Result = -Result; + break; + } // endswitch Opc + + if (trace) + htrc("BlockEval2: op=%d n=%d rc=%d\n", Opc, n, Result); + + return Result; + } // end of BlockEval + +/* ---------------------- Class BLKFILMR2----------------------------- */ + +/***********************************************************************/ +/* BLKFILMR2 constructor. */ +/***********************************************************************/ +BLKFILMR2::BLKFILMR2(PGLOBAL g, PTDBDOS tdbp, int op, PXOB *xp) + : BLKFILARI(g, tdbp, op, xp) + { + Nbm = Colp->GetNbm(); + Bmp = (uint*)PlugSubAlloc(g, NULL, Nbm * sizeof(uint)); + Bxp = (uint*)PlugSubAlloc(g, NULL, Nbm * sizeof(uint)); + MakeValueBitmap(); + } // end of BLKFILMR2 constructor + +/***********************************************************************/ +/* MakeValueBitmap: Set the constant value bit map. It can be void */ +/* if the constant value is not in the column distinct values list. */ +/***********************************************************************/ +void BLKFILMR2::MakeValueBitmap(void) + { + int i; // ndv = Colp->GetNdv(); + bool found = FALSE, noteq = !(Opc == OP_EQ || Opc == OP_NE); + PVBLK dval = Colp->GetDval(); + + assert(dval); + + for (i = 0; i < Nbm; i++) + Bmp[i] = Bxp[i] = 0; + + /*********************************************************************/ + /* Here we cannot use Find because we must get the index */ + /* of where to put the value if it is not found in the array. */ + /* This is needed by operators other than OP_EQ or OP_NE. */ + /*********************************************************************/ + found = dval->Locate(Valp, i); + + /*********************************************************************/ + /* For bitmaps larger than a ULONG, we must know where Bmp and Bxp */ + /* are positioned in the ULONG bit map block array. */ + /*********************************************************************/ + N = i / MAXBMP; + i %= MAXBMP; + + /*********************************************************************/ + /* Set the constant value bitmaps. The bitmaps are really matching */ + /* the OP_EQ, OP_LE, and OP_LT operator but are also used for the */ + /* other operators for which the Result will be inverted. */ + /* The reason the bitmaps are not directly complemented for them is */ + /* to be able to easily test the cases of sorted columns with Bxp, */ + /* and the case of a void bitmap, which happens if the constant */ + /* value is not in the column distinct values list. */ + /*********************************************************************/ + if (found) { + Bmp[N] = 1 << i; + Bxp[N] = Bmp[N] - 1; + + if (Opc != OP_LT && Opc != OP_GE) + Bxp[N] |= Bmp[N]; // Found value must be included + + } else + Bxp[N] = (1 << i) - 1; + + if (noteq) + Bmp[N] = Bxp[N]; + + Void = !Bmp[N]; // There are no good values in the file + + for (i = 0; i < N; i++) { + Bxp[i] = ~0; + + if (noteq) + Bmp[i] = Bxp[i]; + + Void = Void && !Bmp[i]; + } // endfor i + + if (!Bmp[N] && !Bxp[N]) + N--; + + } // end of MakeValueBitmap + +/***********************************************************************/ +/* Evaluate XDB2 block filter for arithmetic operators. */ +/***********************************************************************/ +int BLKFILMR2::BlockEval(PGLOBAL g) + { +#if defined(_DEBUG) + assert (Colp->IsClustered()); +#endif + + int i, n = ((PTDBDOS)Colp->GetTo_Tdb())->GetCurBlk(); + bool fnd = FALSE, all = TRUE, gt = TRUE; + uint bres; + uint *bkmp = (uint*)Colp->GetBmap()->GetValPtr(n * Nbm); + + // Set result as if Opc were OP_EQ, OP_LT, or OP_LE + for (i = 0; i < Nbm; i++) + if (i <= N) { + if ((bres = Bmp[i] & bkmp[i])) + fnd = TRUE; // Some good value(s) found in the block + + if (bres != bkmp[i]) + all = FALSE; // Not all block values are good + + if (Bxp[i] & bkmp[i]) + gt = FALSE; // Not all block values are > good value(s) + + } else if (bkmp[i]) { + all = FALSE; + break; + } // endif's + + if (!fnd) { + if (Void || (gt && Sorted)) + Result = -2; // No (more) good block in file + else + Result = -1; // No good values in this block + + } else + Result = (all) ? 1 : 0; // All block values are good + + // For OP_NE, OP_GE, and OP_GT the result must be inverted. + switch (Opc) { + case OP_NE: + case OP_GE: + case OP_GT: + Result = -Result; + break; + } // endswitch Opc + + if (trace) + htrc("BlockEval2: op=%d n=%d rc=%d\n", Opc, n, Result); + + return Result; + } // end of BlockEval + +/***********************************************************************/ +/* BLKSPCARI constructor. */ +/***********************************************************************/ +BLKSPCARI::BLKSPCARI(PTDBDOS tdbp, int op, PXOB *xp, int bsize) + : BLOCKFILTER(tdbp, op) + { + if (xp[1]->GetType() == TYPE_COLBLK) { + Cpx = (PCOL)xp[1]; // Subquery pseudo constant column + Correl = TRUE; + } else + Cpx = NULL; + + Valp = xp[1]->GetValue(); + Val = (int)xp[1]->GetValue()->GetIntValue(); + Bsize = bsize; + } // end of BLKFILARI constructor + +/***********************************************************************/ +/* Reset: re-eval the constant value in the case of pseudo constant */ +/* column use in a correlated subquery. */ +/***********************************************************************/ +void BLKSPCARI::Reset(PGLOBAL g) + { + if (Cpx) { + Cpx->Reset(); + Cpx->Eval(g); + Val = (int)Valp->GetIntValue(); + } // endif Cpx + + } // end of Reset + +/***********************************************************************/ +/* Evaluate block filter for arithmetic operators (ROWID) */ +/***********************************************************************/ +int BLKSPCARI::BlockEval(PGLOBAL g) + { + int mincmp, maxcmp, n, m; + + n = Tdbp->GetCurBlk(); + m = n * Bsize + 1; // Minimum Rowid value for this block + mincmp = (Val > m) ? 1 : (Val < m) ? (-1) : 0; + m = (n + 1) * Bsize; // Maximum Rowid value for this block + maxcmp = (Val > m) ? 1 : (Val < m) ? (-1) : 0; + + switch (Opc) { + case OP_EQ: + case OP_NE: + if (mincmp < 0) // Means minval > Val + Result = -2; // Always sorted + else if (maxcmp > 0) // Means maxval < Val + Result = -1; + else if (!mincmp && !maxcmp) // minval = maxval = val + Result = 1; + else + Result = 0; + + break; + case OP_GT: + case OP_LE: + if (mincmp < 0) // minval > Val + Result = 2; // Always sorted + else if (maxcmp < 0) // maxval > Val + Result = 0; + else // maxval <= Val + Result = -1; + + break; + case OP_GE: + case OP_LT: + if (mincmp <= 0) // minval >= Val + Result = 2; // Always sorted + else if (maxcmp <= 0) // Maxval >= Val + Result = 0; + else // Maxval < Val + Result = -1; + + break; + } // endswitch Opc + + switch (Opc) { + case OP_NE: + case OP_LE: + case OP_LT: + Result = -Result; + break; + } // endswitch Opc + + if (trace) + htrc("BlockEval: op=%d n=%d rc=%d\n", Opc, n, Result); + + return Result; + } // end of BlockEval + +/* ------------------------ Class BLKFILIN --------------------------- */ + +/***********************************************************************/ +/* BLKFILIN constructor. */ +/***********************************************************************/ +BLKFILIN::BLKFILIN(PGLOBAL g, PTDBDOS tdbp, int op, int opm, PXOB *xp) + : BLOCKFILTER(tdbp, op) + { + if (op == OP_IN) { + Opc = OP_EQ; + Opm = 1; + } else { + Opc = op; + Opm = opm; + } // endif op + + Colp = (PDOSCOL)xp[0]; + Arap = (PARRAY)xp[1]; + Type = Arap->GetResultType(); + + if (Colp->GetResultType() != Type) { + sprintf(g->Message, "BLKFILIN: %s", MSG(VALTYPE_NOMATCH)); + longjmp(g->jumper[g->jump_level], 99); + } else if (Colp->GetValue()->IsCi()) + Arap->SetPrecision(g, 1); // Case insensitive + + Sorted = Colp->IsSorted() > 0; + } // end of BLKFILIN constructor + +/***********************************************************************/ +/* Reset: have the sorted array reset its Bot value to -1 (bottom). */ +/***********************************************************************/ +void BLKFILIN::Reset(PGLOBAL g) + { + Arap->Reset(); +// MakeValueBitmap(); // Does nothing for class BLKFILIN + } // end of Reset + +/***********************************************************************/ +/* Evaluate block filter for a IN operator on a constant array. */ +/* Note: here we need to use the GetValPtrEx function to get a zero */ +/* ended string in case of string argument. This is because the ARRAY */ +/* can have a different width than the char column. */ +/***********************************************************************/ +int BLKFILIN::BlockEval(PGLOBAL g) + { + int n = ((PTDBDOS)Colp->GetTo_Tdb())->GetCurBlk(); + void *minp = Colp->GetMin()->GetValPtrEx(n); + void *maxp = Colp->GetMax()->GetValPtrEx(n); + + Result = Arap->BlockTest(g, Opc, Opm, minp, maxp, Sorted); + return Result; + } // end of BlockEval + +/* ------------------------ Class BLKFILIN2 -------------------------- */ + +/***********************************************************************/ +/* BLKFILIN2 constructor. */ +/* New version that takes care of all operators and modificators. */ +/* It is also ready to handle the case of correlated sub-selects. */ +/***********************************************************************/ +BLKFILIN2::BLKFILIN2(PGLOBAL g, PTDBDOS tdbp, int op, int opm, PXOB *xp) + : BLKFILIN(g, tdbp, op, opm, xp) + { + Nbm = Colp->GetNbm(); + Valp = AllocateValue(g, Colp->GetValue()); + Invert = (Opc == OP_NE || Opc == OP_GE || Opc ==OP_GT); + Bmp = (uint*)PlugSubAlloc(g, NULL, Nbm * sizeof(uint)); + Bxp = (uint*)PlugSubAlloc(g, NULL, Nbm * sizeof(uint)); + MakeValueBitmap(); + } // end of BLKFILIN2 constructor + +/***********************************************************************/ +/* MakeValueBitmap: Set the constant values bit map. It can be void */ +/* if the constant values are not in the column distinct values list. */ +/* The bitmaps are prepared for the EQ, LE, and LT operators and */ +/* takes care of the ALL and ANY modificators. If the operators are */ +/* NE, GE, or GT the modificator is inverted and the result will be. */ +/***********************************************************************/ +void BLKFILIN2::MakeValueBitmap(void) + { + int i, k, n, ndv = Colp->GetNdv(); + bool found, noteq = !(Opc == OP_EQ || Opc == OP_NE); + bool all = (!Invert) ? (Opm == 2) : (Opm != 2); + uint btp; + PVBLK dval = Colp->GetDval(); + + N = -1; + + // Take care of special cases + if (!(n = Arap->GetNval())) { + // Return TRUE for ALL because it means that there are no item that + // does not verify the condition, which is true indeed. + // Return FALSE for ANY because TRUE means that there is at least + // one item that verifies the condition, which is false. + Result = (Opm == 2) ? 2 : -2; + return; + } else if (!noteq && all && n > 1) { + // An item cannot be equal to all different values + // or an item is always unequal to any different values + Result = (Opc == OP_EQ) ? -2 : 2; + return; + } // endif's + + for (i = 0; i < Nbm; i++) + Bmp[i] = Bxp[i] = 0; + + for (k = 0; k < n; k++) { + Arap->GetNthValue(Valp, k); + found = dval->Locate(Valp, i); + N = i / MAXBMP; + btp = 1 << (i % MAXBMP); + + if (found) + Bmp[N] |= btp; + + // For LT and LE if ALL the condition applies to the smallest item + // if ANY it applies to the largest item. In the case of EQ we come + // here only if ANY or if n == 1, so it does applies to the largest. + if ((!k && all) || (k == n - 1 && !all)) { + Bxp[N] = btp - 1; + + if (found && Opc != OP_LT && Opc != OP_GE) + Bxp[N] |= btp; // Found value must be included + + } // endif k, opm + + } // endfor k + + if (noteq) + Bmp[N] = Bxp[N]; + + Void = !Bmp[N]; // There are no good values in the file + + for (i = 0; i < N; i++) { + Bxp[i] = ~0; + + if (noteq) { + Bmp[i] = Bxp[i]; + Void = FALSE; + } // endif noteq + + } // endfor i + + if (!Bmp[N] && !Bxp[N]) { + if (--N < 0) + // All array values are smaller than block values + Result = (Invert) ? 2 : -2; + + } else if (N == Nbm - 1 && (signed)Bmp[N] == (1 << (ndv % MAXBMP)) - 1) { + // Condition will be always TRUE or FALSE for the whole file + Result = (Invert) ? -2 : 2; + N = -1; + } // endif's + + } // end of MakeValueBitmap + +/***********************************************************************/ +/* Evaluate block filter for set operators on a constant array. */ +/* Note: here we need to use the GetValPtrEx function to get a zero */ +/* ended string in case of string argument. This is because the ARRAY */ +/* can have a different width than the char column. */ +/***********************************************************************/ +int BLKFILIN2::BlockEval(PGLOBAL g) + { + if (N < 0) + return Result; // Was set in MakeValueBitmap + + int i, n = ((PTDBDOS)Colp->GetTo_Tdb())->GetCurBlk(); + bool fnd = FALSE, all = TRUE, gt = TRUE; + uint bres; + uint *bkmp = (uint*)Colp->GetBmap()->GetValPtr(n * Nbm); + + // Set result as if Opc were OP_EQ, OP_LT, or OP_LE + // The difference between ALL or ANY was handled in MakeValueBitmap + for (i = 0; i < Nbm; i++) + if (i <= N) { + if ((bres = Bmp[i] & bkmp[i])) + fnd = TRUE; + + if (bres != bkmp[i]) + all = FALSE; + + if (Bxp[i] & bkmp[i]) + gt = FALSE; + + } else if (bkmp[i]) { + all = FALSE; + break; + } // endif's + + if (!fnd) { + if (Void || (Sorted && gt)) + Result = -2; // No more good block in file + else + Result = -1; // No good values in this block + + } else if (all) + Result = 1; // All block values are good + else + Result = 0; // Block contains some good values + + // For OP_NE, OP_GE, and OP_GT the result must be inverted. + switch (Opc) { + case OP_NE: + case OP_GE: + case OP_GT: + Result = -Result; + break; + } // endswitch Opc + + return Result; + } // end of BlockEval + +#if 0 +/***********************************************************************/ +/* BLKFILIN2 constructor. */ +/***********************************************************************/ +BLKFILIN2::BLKFILIN2(PGLOBAL g, PTDBDOS tdbp, int op, int opm, PXOB *xp) + : BLKFILIN(g, tdbp, op, opm, xp) + { + // Currently, bitmap matching is only implemented for the IN operator + if (!(Bitmap = (op == OP_IN || (op == OP_EQ && opm != 2)))) { + Nbm = Colp->GetNbm(); + N = 0; + return; // Revert to standard minmax method + } // endif minmax + + int i, n; + ULONG btp; + PVAL valp = AllocateValue(g, Colp->GetValue()); + PVBLK dval = Colp->GetDval(); + + Nbm = Colp->GetNbm(); + N = -1; + Bmp = (PULONG)PlugSubAlloc(g, NULL, Nbm * sizeof(ULONG)); + Bxp = (PULONG)PlugSubAlloc(g, NULL, Nbm * sizeof(ULONG)); + + for (i = 0; i < Nbm; i++) + Bmp[i] = Bxp[i] = 0; + + for (n = 0; n < Arap->GetNval(); n++) { + Arap->GetNthValue(valp, n); + + if ((i = dval->Find(valp)) >= 0) + Bmp[i / MAXBMP] |= 1 << (i % MAXBMP); + + } // endfor n + + for (i = Nbm - 1; i >= 0; i--) + if (Bmp[i]) { + for (btp = Bmp[i]; btp; btp >>= 1) + Bxp[i] |= btp; + + for (N = i--; i >= 0; i--) + Bxp[i] = ~0; + + break; + } // endif Bmp + + } // end of BLKFILIN2 constructor + +/***********************************************************************/ +/* Evaluate block filter for a IN operator on a constant array. */ +/* Note: here we need to use the GetValPtrEx function to get a zero */ +/* ended string in case of string argument. This is because the ARRAY */ +/* can have a different width than the char column. */ +/***********************************************************************/ +int BLKFILIN2::BlockEval(PGLOBAL g) + { + if (N < 0) + return -2; // IN list contains no good values + + int i, n = ((PTDBDOS)Colp->GetTo_Tdb())->GetCurBlk(); + bool fnd = FALSE, all = TRUE, gt = TRUE; + ULONG bres; + PULONG bkmp = (PULONG)Colp->GetBmap()->GetValPtr(n * Nbm); + + if (Bitmap) { + // For IN operator use the bitmap method + for (i = 0; i < Nbm; i++) + if (i <= N) { + if ((bres = Bmp[i] & bkmp[i])) + fnd = TRUE; + + if (bres != bkmp[i]) + all = FALSE; + + if (Bxp[i] & bkmp[i]) + gt = FALSE; + + } else if (bkmp[i]) { + all = FALSE; + break; + } // endif's + + if (!fnd) { + if (Sorted && gt) + Result = -2; // No more good block in file + else + Result = -1; // No good values in this block + + } else if (all) + Result = 1; // All block values are good + else + Result = 0; // Block contains some good values + + } else { + // For other than IN operators, revert to standard minmax method + int n = 0, ndv = Colp->GetNdv(); + void *minp = NULL; + void *maxp = NULL; + ULONG btp; + PVBLK dval = Colp->GetDval(); + + for (i = 0; i < Nbm; i++) + for (btp = 1; btp && n < ndv; btp <<= 1, n++) + if (btp & bkmp[i]) { + if (!minp) + minp = dval->GetValPtrEx(n); + + maxp = dval->GetValPtrEx(n); + } // endif btp + + Result = Arap->BlockTest(g, Opc, Opm, minp, maxp, Colp->IsSorted()); + } // endif Bitmap + + return Result; + } // end of BlockEval +#endif // 0 + +/* ------------------------ Class BLKSPCIN --------------------------- */ + +/***********************************************************************/ +/* BLKSPCIN constructor. */ +/***********************************************************************/ +BLKSPCIN::BLKSPCIN(PGLOBAL g, PTDBDOS tdbp, int op, int opm, + PXOB *xp, int bsize) + : BLOCKFILTER(tdbp, op) + { + if (op == OP_IN) { + Opc = OP_EQ; + Opm = 1; + } else + Opm = opm; + + Arap = (PARRAY)xp[1]; +#if defined(_DEBUG) + assert (Opm); + assert (Arap->GetResultType() == TYPE_INT); +#endif + Bsize = bsize; + } // end of BLKSPCIN constructor + +/***********************************************************************/ +/* Reset: have the sorted array reset its Bot value to -1 (bottom). */ +/***********************************************************************/ +void BLKSPCIN::Reset(PGLOBAL g) + { + Arap->Reset(); + } // end of Reset + +/***********************************************************************/ +/* Evaluate block filter for a IN operator on a constant array. */ +/***********************************************************************/ +int BLKSPCIN::BlockEval(PGLOBAL g) + { + int n = Tdbp->GetCurBlk(); + int minrow = n * Bsize + 1; // Minimum Rowid value for this block + int maxrow = (n + 1) * Bsize; // Maximum Rowid value for this block + + Result = Arap->BlockTest(g, Opc, Opm, &minrow, &maxrow, TRUE); + return Result; + } // end of BlockEval + +/* ------------------------------------------------------------------- */ + +#if 0 +/***********************************************************************/ +/* Implementation of the BLOCKINDEX class. */ +/***********************************************************************/ +BLOCKINDEX::BLOCKINDEX(PBX nx, PDOSCOL cp, PKXBASE kp) + { + Next = nx; + Tdbp = (cp) ? (PTDBDOS)cp->GetTo_Tdb() : NULL; + Colp = cp; + Kxp = kp; + Type = (cp) ? cp->GetResultType() : TYPE_ERROR; + Sorted = (cp) ? cp->IsSorted() > 0 : FALSE; + Result = 0; + } // end of BLOCKINDEX constructor + +/***********************************************************************/ +/* Reset Bot and Top values of optimized Kindex blocks. */ +/***********************************************************************/ +void BLOCKINDEX::Reset(void) + { + if (Next) + Next->Reset(); + + Kxp->Reset(); + } // end of Reset + +/***********************************************************************/ +/* Evaluate block indexing test. */ +/***********************************************************************/ +int BLOCKINDEX::BlockEval(PGLOBAL g) + { +#if defined(_DEBUG) + assert (Tdbp && Colp); +#endif + int n = Tdbp->GetCurBlk(); + void *minp = Colp->GetMin()->GetValPtr(n); + void *maxp = Colp->GetMax()->GetValPtr(n); + + Result = Kxp->BlockTest(g, minp, maxp, Type, Sorted); + return Result; + } // end of BlockEval + +/***********************************************************************/ +/* Make file output of BLOCKINDEX contents. */ +/***********************************************************************/ +void BLOCKINDEX::Print(PGLOBAL g, FILE *f, UINT n) + { + char m[64]; + + memset(m, ' ', n); // Make margin string + m[n] = '\0'; + + fprintf(f, "%sBLOCKINDEX: at %p next=%p col=%s kxp=%p result=%d\n", + m, this, Next, (Colp) ? Colp->GetName() : "Rowid", Kxp, Result); + + if (Next) + Next->Print(g, f, n); + + } // end of Print + +/***********************************************************************/ +/* Make string output of BLOCKINDEX contents. */ +/***********************************************************************/ +void BLOCKINDEX::Print(PGLOBAL g, char *ps, UINT z) + { + strncat(ps, "BlockIndex(es)", z); + } // end of Print + +/* ------------------------------------------------------------------- */ + +/***********************************************************************/ +/* Implementation of the BLOCKINDX2 class. */ +/***********************************************************************/ +BLOCKINDX2::BLOCKINDX2(PBX nx, PDOSCOL cp, PKXBASE kp) + : BLOCKINDEX(nx, cp, kp) + { + Nbm = Colp->GetNbm(); + Dval = Colp->GetDval(); + Bmap = Colp->GetBmap(); +#if defined(_DEBUG) + assert(Dval && Bmap); +#endif + } // end of BLOCKINDX2 constructor + +/***********************************************************************/ +/* Evaluate block indexing test. */ +/***********************************************************************/ +int BLOCKINDX2::BlockEval(PGLOBAL g) + { + int n = Tdbp->GetCurBlk(); + PUINT bmp = (PUINT)Bmap->GetValPtr(n * Nbm); + + Result = Kxp->BlockTst2(g, Dval, bmp, Nbm, Type, Sorted); + return Result; + } // end of BlockEval + +/* ------------------------------------------------------------------- */ + +/***********************************************************************/ +/* Implementation of the BLKSPCINDX class. */ +/***********************************************************************/ +BLKSPCINDX::BLKSPCINDX(PBX nx, PTDBDOS tp, PKXBASE kp, int bsize) + : BLOCKINDEX(nx, NULL, kp) + { + Tdbp = tp; + Bsize = bsize; + Type = TYPE_INT; + Sorted = TRUE; + } // end of BLKSPCINDX constructor + +/***********************************************************************/ +/* Evaluate block indexing test. */ +/***********************************************************************/ +int BLKSPCINDX::BlockEval(PGLOBAL g) + { + int n = Tdbp->GetCurBlk(); + int minrow = n * Bsize + 1; // Minimum Rowid value for this block + int maxrow = (n + 1) * Bsize; // Maximum Rowid value for this block + + Result = Kxp->BlockTest(g, &minrow, &maxrow, TYPE_INT, TRUE); + return Result; + } // end of BlockEval +#endif // 0 diff --git a/storage/connect/blkfil.h b/storage/connect/blkfil.h new file mode 100644 index 00000000000..00b00139042 --- /dev/null +++ b/storage/connect/blkfil.h @@ -0,0 +1,295 @@ +/*************** BlkFil H Declares Source Code File (.H) ***************/ +/* Name: BLKFIL.H Version 2.1 */ +/* */ +/* (C) Copyright to the author Olivier BERTRAND 2004-2010 */ +/* */ +/* This file contains the block optimization related classes declares */ +/***********************************************************************/ +#ifndef __BLKFIL__ +#define __BLKFIL__ + +typedef class BLOCKFILTER *PBF; +typedef class BLOCKINDEX *PBX; + +/***********************************************************************/ +/* Definition of class BLOCKFILTER. */ +/***********************************************************************/ +class DllExport BLOCKFILTER : public BLOCK { /* Block Filter */ + friend class BLKFILLOG; + public: + // Constructors + BLOCKFILTER(PTDBDOS tdbp, int op); + + // Implementation + int GetResult(void) {return Result;} + bool Correlated(void) {return Correl;} + + // Methods + virtual void Reset(PGLOBAL) = 0; + virtual int BlockEval(PGLOBAL) = 0; + virtual void Print(PGLOBAL g, FILE *f, uint n); + virtual void Print(PGLOBAL g, char *ps, uint z); + + protected: + BLOCKFILTER(void) {} // Standard constructor not to be used + + // Members + PTDBDOS Tdbp; // Owner TDB + bool Correl; // TRUE for correlated subqueries + int Opc; // Comparison operator + int Opm; // Operator modificator + int Result; // Result from evaluation + }; // end of class BLOCKFILTER + +/***********************************************************************/ +/* Definition of class BLKFILLOG (with Op=OP_AND,OP_OR, or OP_NOT) */ +/***********************************************************************/ +class DllExport BLKFILLOG : public BLOCKFILTER { /* Logical Op Block Filter */ + public: + // Constructors + BLKFILLOG(PTDBDOS tdbp, int op, PBF *bfp, int n); + + // Methods + virtual void Reset(PGLOBAL g); + virtual int BlockEval(PGLOBAL g); + + protected: + BLKFILLOG(void) {} // Standard constructor not to be used + + // Members + PBF *Fil; // Points to Block filter args + int N; + }; // end of class BLKFILLOG + +/***********************************************************************/ +/* Definition of class BLKFILARI (with Op=OP_EQ,NE,GT,GE,LT, or LE) */ +/***********************************************************************/ +class DllExport BLKFILARI : public BLOCKFILTER { /* Arithm. Op Block Filter */ + public: + // Constructors + BLKFILARI(PGLOBAL g, PTDBDOS tdbp, int op, PXOB *xp); + + // Methods + virtual void Reset(PGLOBAL g); + virtual int BlockEval(PGLOBAL g); + virtual void MakeValueBitmap(void) {} + + protected: + BLKFILARI(void) {} // Standard constructor not to be used + + // Members + PDOSCOL Colp; // Points to column argument + PCOL Cpx; // Point to subquery "constant" column + PVAL Valp; // Points to constant argument Value + bool Sorted; // True if the column is sorted + }; // end of class BLKFILARI + +/***********************************************************************/ +/* Definition of class BLKFILAR2 (with Op=OP_EQ,NE,GT,GE,LT, or LE) */ +/***********************************************************************/ +class DllExport BLKFILAR2 : public BLKFILARI { /* Arithm. Op Block Filter */ + public: + // Constructors + BLKFILAR2(PGLOBAL g, PTDBDOS tdbp, int op, PXOB *xp); + + // Methods + virtual int BlockEval(PGLOBAL g); + virtual void MakeValueBitmap(void); + + protected: + BLKFILAR2(void) {} // Standard constructor not to be used + + // Members + uint Bmp; // The value bitmap used to test blocks + uint Bxp; // Bitmap used when Opc = OP_EQ + }; // end of class BLKFILAR2 + +/***********************************************************************/ +/* Definition of class BLKFILAR2 (with Op=OP_EQ,NE,GT,GE,LT, or LE) */ +/* To be used when the bitmap is an array of ULONG bitmaps; */ +/***********************************************************************/ +class DllExport BLKFILMR2 : public BLKFILARI { /* Arithm. Op Block Filter */ + public: + // Constructors + BLKFILMR2(PGLOBAL g, PTDBDOS tdbp, int op, PXOB *xp); + + // Methods + virtual int BlockEval(PGLOBAL g); + virtual void MakeValueBitmap(void); + + protected: + BLKFILMR2(void) {} // Standard constructor not to be used + + // Members + int Nbm; // The number of ULONG bitmaps + int N; // The position of the leftmost ULONG + bool Void; // True if all file blocks can be skipped + uint *Bmp; // The values bitmaps used to test blocks + uint *Bxp; // Bit of values <= max value + }; // end of class BLKFILMR2 + +/***********************************************************************/ +/* Definition of class BLKSPCARI (with Op=OP_EQ,NE,GT,GE,LT, or LE) */ +/***********************************************************************/ +class DllExport BLKSPCARI : public BLOCKFILTER { /* Arithm. Op Block Filter */ + public: + // Constructors + BLKSPCARI(PTDBDOS tdbp, int op, PXOB *xp, int bsize); + + // Methods + virtual void Reset(PGLOBAL g); + virtual int BlockEval(PGLOBAL g); + + protected: + BLKSPCARI(void) {} // Standard constructor not to be used + + // Members + PCOL Cpx; // Point to subquery "constant" column + PVAL Valp; // Points to constant argument Value + int Val; // Constant argument Value + int Bsize; // Table block size + }; // end of class BLKSPCARI + +/***********************************************************************/ +/* Definition of class BLKFILIN (with Op=OP_IN) */ +/***********************************************************************/ +class DllExport BLKFILIN : public BLOCKFILTER { // With array arguments. + public: + // Constructors + BLKFILIN(PGLOBAL g, PTDBDOS tdbp, int op, int opm, PXOB *xp); + + // Methods + virtual void Reset(PGLOBAL g); + virtual int BlockEval(PGLOBAL g); + virtual void MakeValueBitmap(void) {} + + protected: + // Member + PDOSCOL Colp; // Points to column argument + PARRAY Arap; // Points to array argument + bool Sorted; // True if the column is sorted + int Type; // Type of array elements + }; // end of class BLKFILIN + +/***********************************************************************/ +/* Definition of class BLKFILIN2 (with Op=OP_IN) */ +/***********************************************************************/ +class DllExport BLKFILIN2 : public BLKFILIN { // With array arguments. + public: + // Constructors + BLKFILIN2(PGLOBAL g, PTDBDOS tdbp, int op, int opm, PXOB *xp); + + // Methods +//virtual void Reset(PGLOBAL g); + virtual int BlockEval(PGLOBAL g); + virtual void MakeValueBitmap(void); + + protected: + // Member + int Nbm; // The number of ULONG bitmaps + int N; // The position of the leftmost ULONG +//bool Bitmap; // True for IN operator (temporary) + bool Void; // True if all file blocks can be skipped + bool Invert; // True when Result must be inverted + uint *Bmp; // The values bitmaps used to test blocks + uint *Bxp; // Bit of values <= max value + PVAL Valp; // Used while building the bitmaps + }; // end of class BLKFILIN2 + +/***********************************************************************/ +/* Definition of class BLKSPCIN (with Op=OP_IN) Special column */ +/***********************************************************************/ +class DllExport BLKSPCIN : public BLOCKFILTER { // With array arguments. + public: + // Constructors + BLKSPCIN(PGLOBAL g, PTDBDOS tdbp, int op, int opm, PXOB *xp, int bsize); + + // Methods + virtual void Reset(PGLOBAL g); + virtual int BlockEval(PGLOBAL g); + + protected: + // Member + PARRAY Arap; // Points to array argument + int Bsize; // Table block size + }; // end of class BLKSPCIN + +// ---------------- Class used in block indexing testing ---------------- + +#if 0 +/***********************************************************************/ +/* Definition of class BLOCKINDEX. */ +/* Used to test the indexing to joined tables when the foreign key is */ +/* a clustered or sorted column. If the table is joined to several */ +/* tables, blocks will be chained together. */ +/***********************************************************************/ +class DllExport BLOCKINDEX : public BLOCK { /* Indexing Test Block */ + public: + // Constructors + BLOCKINDEX(PBX nx, PDOSCOL cp, PKXBASE kp); + + // Implementation + PBX GetNext(void) {return Next;} + + // Methods + void Reset(void); + virtual int BlockEval(PGLOBAL); + virtual void Print(PGLOBAL g, FILE *f, UINT n); + virtual void Print(PGLOBAL g, char *ps, UINT z); + + protected: + BLOCKINDEX(void) {} // Standard constructor not to be used + + // Members + PBX Next; // To next Index Block + PTDBDOS Tdbp; // To table description block + PDOSCOL Colp; // Clustered foreign key + PKXBASE Kxp; // To Kindex of joined table + bool Sorted; // TRUE if column is sorted + int Type; // Col/Index type + int Result; // Result from evaluation + }; // end of class BLOCKINDEX + +/***********************************************************************/ +/* Definition of class BLOCKINDX2. (XDB2) */ +/***********************************************************************/ +class DllExport BLOCKINDX2 : public BLOCKINDEX { /* Indexing Test Block */ + public: + // Constructors + BLOCKINDX2(PBX nx, PDOSCOL cp, PKXBASE kp); + + // Methods + virtual int BlockEval(PGLOBAL); + + protected: + BLOCKINDX2(void) {} // Standard constructor not to be used + + // Members + int Nbm; // The number of ULONG bitmaps + PVBLK Dval; // Array of column distinct values + PVBLK Bmap; // Array of block bitmap values + }; // end of class BLOCKINDX2 + +/***********************************************************************/ +/* Definition of class BLKSPCINDX. */ +/* Used to test the indexing to joined tables when the foreign key is */ +/* the ROWID special column. If the table is joined to several */ +/* tables, blocks will be chained together. */ +/***********************************************************************/ +class DllExport BLKSPCINDX : public BLOCKINDEX { /* Indexing Test Block */ + public: + // Constructors + BLKSPCINDX(PBX nx, PTDBDOS tp, PKXBASE kp, int bsize); + + // Methods + virtual int BlockEval(PGLOBAL); + + protected: + BLKSPCINDX(void) {} // Standard constructor not to be used + + // Members + int Bsize; // Table block size + }; // end of class BLOCKINDEX +#endif // 0 + +#endif // __BLKFIL__ diff --git a/storage/connect/catalog.h b/storage/connect/catalog.h index 6e6cf86fc87..5baab294737 100644 --- a/storage/connect/catalog.h +++ b/storage/connect/catalog.h @@ -1,7 +1,7 @@ /*************** Catalog H Declares Source Code File (.H) **************/ -/* Name: CATALOG.H Version 3.2 */ +/* Name: CATALOG.H Version 3.3 */ /* */ -/* (C) Copyright to the author Olivier BERTRAND 2000-2012 */ +/* (C) Copyright to the author Olivier BERTRAND 2000-2014 */ /* */ /* This file contains the CATALOG PlugDB classes definitions. */ /***********************************************************************/ @@ -43,6 +43,8 @@ typedef struct _colinfo { int Key; int Precision; int Scale; + int Opt; + int Freq; char *Remark; char *Datefmt; char *Fieldfmt; @@ -66,11 +68,11 @@ class DllExport CATALOG { bool GetDefHuge(void) {return DefHuge;} void SetDefHuge(bool b) {DefHuge = b;} char *GetCbuf(void) {return Cbuf;} - char *GetDataPath(void) {return (char*)DataPath;} +//char *GetDataPath(void) {return (char*)DataPath;} // Methods virtual void Reset(void) {} - virtual void SetDataPath(PGLOBAL g, const char *path) {} +//virtual void SetDataPath(PGLOBAL g, const char *path) {} virtual bool CheckName(PGLOBAL g, char *name) {return true;} virtual bool ClearName(PGLOBAL g, PSZ name) {return true;} virtual PRELDEF MakeOneTableDesc(PGLOBAL g, LPCSTR name, LPCSTR am) {return NULL;} @@ -82,7 +84,7 @@ class DllExport CATALOG { virtual bool TestCond(PGLOBAL g, const char *name, const char *type) {return true;} virtual bool DropTable(PGLOBAL g, PSZ name, bool erase) {return true;} - virtual PTDB GetTable(PGLOBAL g, PTABLE tablep, + virtual PTDB GetTable(PGLOBAL g, PTABLE tablep, MODE mode = MODE_READ, LPCSTR type = NULL) {return NULL;} virtual void TableNames(PGLOBAL g, char *buffer, int maxbuf, int info[]) {} @@ -104,7 +106,7 @@ class DllExport CATALOG { int Cblen; /* Length of suballoc. buffer */ CURTAB Ctb; /* Used to enumerate tables */ bool DefHuge; /* true: tables default to huge */ - LPCSTR DataPath; /* Is the Path of DB data dir */ +//LPCSTR DataPath; /* Is the Path of DB data dir */ }; // end of class CATALOG #endif // __CATALOG__H diff --git a/storage/connect/checklvl.h b/storage/connect/checklvl.h index 5505534678d..d1e37afbc93 100644 --- a/storage/connect/checklvl.h +++ b/storage/connect/checklvl.h @@ -34,9 +34,10 @@ enum XMOD {XMOD_EXECUTE = 0, /* DOS execution mode */ /***********************************************************************/ /* Following definitions indicate the use of a temporay file. */ /***********************************************************************/ -enum USETEMP {TMP_AUTO = 0, /* Best choice */ - TMP_NO = 1, /* Never */ +enum USETEMP {TMP_NO = 0, /* Never */ + TMP_AUTO = 1, /* Best choice */ TMP_YES = 2, /* Always */ - TMP_FORCE = 3}; /* Forced for MAP tables */ + TMP_FORCE = 3, /* Forced for MAP tables */ + TMP_TEST = 4}; /* Testing value */ #endif // _CHKLVL_DEFINED_ diff --git a/storage/connect/colblk.cpp b/storage/connect/colblk.cpp index ffa29bb9821..81ab1ad7245 100644 --- a/storage/connect/colblk.cpp +++ b/storage/connect/colblk.cpp @@ -1,376 +1,419 @@ -/************* Colblk C++ Functions Source Code File (.CPP) ************/ -/* Name: COLBLK.CPP Version 2.0 */ -/* */ -/* (C) Copyright to the author Olivier BERTRAND 1998-2013 */ -/* */ -/* This file contains the COLBLK class functions. */ -/***********************************************************************/ - -/***********************************************************************/ -/* Include relevant MariaDB header file. */ -/***********************************************************************/ -#include "my_global.h" - -/***********************************************************************/ -/* Include required application header files */ -/* global.h is header containing all global Plug declarations. */ -/* plgdbsem.h is header containing the DB applic. declarations. */ -/***********************************************************************/ -#include "global.h" -#include "plgdbsem.h" -#include "tabcol.h" -#include "colblk.h" -#include "xindex.h" -#include "xtable.h" - -extern "C" int trace; - -/***********************************************************************/ -/* COLBLK protected constructor. */ -/***********************************************************************/ -COLBLK::COLBLK(PCOLDEF cdp, PTDB tdbp, int i) - { - Next = NULL; - Index = i; -//Number = 0; - ColUse = 0; - - if ((Cdp = cdp)) { - Name = cdp->Name; - Format = cdp->F; - Long = cdp->Long; - Precision = cdp->Precision; - Buf_Type = cdp->Buf_Type; - ColUse |= cdp->Flags; // Used by CONNECT - Nullable = !!(cdp->Flags & U_NULLS); - Unsigned = !!(cdp->Flags & U_UNSIGNED); - } else { - Name = NULL; - memset(&Format, 0, sizeof(FORMAT)); - Long = 0; - Precision = 0; - Buf_Type = TYPE_ERROR; - Nullable = false; - Unsigned = false; - } // endif cdp - - To_Tdb = tdbp; - Status = BUF_NO; -//Value = NULL; done in XOBJECT constructor - To_Kcol = NULL; - } // end of COLBLK constructor - -/***********************************************************************/ -/* COLBLK constructor used for copying columns. */ -/* tdbp is the pointer to the new table descriptor. */ -/***********************************************************************/ -COLBLK::COLBLK(PCOL col1, PTDB tdbp) - { - PCOL colp; - - // Copy the old column block to the new one - *this = *col1; - Next = NULL; -//To_Orig = col1; - To_Tdb = tdbp; - - if (trace > 1) - htrc(" copying COLBLK %s from %p to %p\n", Name, col1, this); - - if (tdbp) - // Attach the new column to the table block - if (!tdbp->GetColumns()) - tdbp->SetColumns(this); - else { - for (colp = tdbp->GetColumns(); colp->Next; colp = colp->Next) ; - - colp->Next = this; - } // endelse - - } // end of COLBLK copy constructor - -/***********************************************************************/ -/* Reset the column descriptor to non evaluated yet. */ -/***********************************************************************/ -void COLBLK::Reset(void) - { - Status &= ~BUF_READ; - } // end of Reset - -/***********************************************************************/ -/* Compare: compares itself to an (expression) object and returns */ -/* true if it is equivalent. */ -/***********************************************************************/ -bool COLBLK::Compare(PXOB xp) - { - return (this == xp); - } // end of Compare - -/***********************************************************************/ -/* SetFormat: function used to set SELECT output format. */ -/***********************************************************************/ -bool COLBLK::SetFormat(PGLOBAL g, FORMAT& fmt) - { - fmt = Format; - - if (trace > 1) - htrc("COLBLK: %p format=%c(%d,%d)\n", - this, *fmt.Type, fmt.Length, fmt.Prec); - - return false; - } // end of SetFormat - -/***********************************************************************/ -/* Eval: get the column value from the last read record or from a */ -/* matching Index column if there is one. */ -/***********************************************************************/ -bool COLBLK::Eval(PGLOBAL g) - { - if (trace > 1) - htrc("Col Eval: %s status=%.4X\n", Name, Status); - - if (!GetStatus(BUF_READ)) { -// if (To_Tdb->IsNull()) -// Value->Reset(); - if (To_Kcol) - To_Kcol->FillValue(Value); - else - ReadColumn(g); - - AddStatus(BUF_READ); - } // endif - - return false; - } // end of Eval - -/***********************************************************************/ -/* InitValue: prepare a column block for read operation. */ -/* Now we use Format.Length for the len parameter to avoid strings */ -/* to be truncated when converting from string to coded string. */ -/* Added in version 1.5 is the arguments GetScale() and Domain */ -/* in calling AllocateValue. Domain is used for TYPE_DATE only. */ -/***********************************************************************/ -bool COLBLK::InitValue(PGLOBAL g) - { - if (Value) - return false; // Already done - - // Allocate a Value object - if (!(Value = AllocateValue(g, Buf_Type, Precision, - GetScale(), Unsigned, GetDomain()))) - return true; - - AddStatus(BUF_READY); - Value->SetNullable(Nullable); - - if (trace > 1) - htrc(" colp=%p type=%d value=%p coluse=%.4X status=%.4X\n", - this, Buf_Type, Value, ColUse, Status); - - return false; - } // end of InitValue - -/***********************************************************************/ -/* SetBuffer: prepare a column block for write operation. */ -/***********************************************************************/ -bool COLBLK::SetBuffer(PGLOBAL g, PVAL value, bool ok, bool check) - { - sprintf(g->Message, MSG(UNDEFINED_AM), "SetBuffer"); - return true; - } // end of SetBuffer - -/***********************************************************************/ -/* GetLength: returns an evaluation of the column string length. */ -/***********************************************************************/ -int COLBLK::GetLengthEx(void) - { - return Long; - } // end of GetLengthEx - -/***********************************************************************/ -/* ReadColumn: what this routine does is to access the last line */ -/* read from the corresponding table, extract from it the field */ -/* corresponding to this column and convert it to buffer type. */ -/***********************************************************************/ -void COLBLK::ReadColumn(PGLOBAL g) - { - sprintf(g->Message, MSG(UNDEFINED_AM), "ReadColumn"); - longjmp(g->jumper[g->jump_level], TYPE_COLBLK); - } // end of ReadColumn - -/***********************************************************************/ -/* WriteColumn: what this routine does is to access the last line */ -/* read from the corresponding table, and rewrite the field */ -/* corresponding to this column from the column buffer and type. */ -/***********************************************************************/ -void COLBLK::WriteColumn(PGLOBAL g) - { - sprintf(g->Message, MSG(UNDEFINED_AM), "WriteColumn"); - longjmp(g->jumper[g->jump_level], TYPE_COLBLK); - } // end of WriteColumn - -/***********************************************************************/ -/* Make file output of a column descriptor block. */ -/***********************************************************************/ -void COLBLK::Print(PGLOBAL g, FILE *f, uint n) - { - char m[64]; - int i; - PCOL colp; - - memset(m, ' ', n); // Make margin string - m[n] = '\0'; - - for (colp = To_Tdb->GetColumns(), i = 1; colp; colp = colp->Next, i++) - if (colp == this) - break; - - fprintf(f, "%sR%dC%d type=%d F=%.2s(%d,%d)", m, To_Tdb->GetTdb_No(), - i, GetAmType(), Format.Type, Format.Length, Format.Prec); - fprintf(f, - " coluse=%04X status=%04X buftyp=%d value=%p name=%s\n", - ColUse, Status, Buf_Type, Value, Name); - } // end of Print - -/***********************************************************************/ -/* Make string output of a column descriptor block. */ -/***********************************************************************/ -void COLBLK::Print(PGLOBAL g, char *ps, uint z) - { - sprintf(ps, "R%d.%s", To_Tdb->GetTdb_No(), Name); - } // end of Print - - -/***********************************************************************/ -/* SPCBLK constructor. */ -/***********************************************************************/ -SPCBLK::SPCBLK(PCOLUMN cp) - : COLBLK((PCOLDEF)NULL, cp->GetTo_Table()->GetTo_Tdb(), 0) - { - Name = (char*)cp->GetName(); - Precision = Long = 0; - Buf_Type = TYPE_ERROR; - } // end of SPCBLK constructor - -/***********************************************************************/ -/* WriteColumn: what this routine does is to access the last line */ -/* read from the corresponding table, and rewrite the field */ -/* corresponding to this column from the column buffer and type. */ -/***********************************************************************/ -void SPCBLK::WriteColumn(PGLOBAL g) - { - sprintf(g->Message, MSG(SPCOL_READONLY), Name); - longjmp(g->jumper[g->jump_level], TYPE_COLBLK); - } // end of WriteColumn - -/***********************************************************************/ -/* RIDBLK constructor for the ROWID special column. */ -/***********************************************************************/ -RIDBLK::RIDBLK(PCOLUMN cp, bool rnm) : SPCBLK(cp) - { - Precision = Long = 10; - Buf_Type = TYPE_INT; - Rnm = rnm; - *Format.Type = 'N'; - Format.Length = 10; - } // end of RIDBLK constructor - -/***********************************************************************/ -/* ReadColumn: what this routine does is to return the ordinal */ -/* number of the current row in the table (if Rnm is true) or in the */ -/* current file (if Rnm is false) the same except for multiple tables.*/ -/***********************************************************************/ -void RIDBLK::ReadColumn(PGLOBAL g) - { - Value->SetValue(To_Tdb->RowNumber(g, Rnm)); - } // end of ReadColumn - -/***********************************************************************/ -/* FIDBLK constructor for the FILEID special column. */ -/***********************************************************************/ -FIDBLK::FIDBLK(PCOLUMN cp) : SPCBLK(cp) - { -//Is_Key = 2; for when the MUL table indexed reading will be implemented. - Precision = Long = _MAX_PATH; - Buf_Type = TYPE_STRING; - *Format.Type = 'C'; - Format.Length = Long; -#if defined(WIN32) - Format.Prec = 1; // Case insensitive -#endif // WIN32 - Constant = (!((PTDBASE)To_Tdb)->GetDef()->GetMultiple() && - To_Tdb->GetAmType() != TYPE_AM_PLG && - To_Tdb->GetAmType() != TYPE_AM_PLM); - Fn = NULL; - } // end of FIDBLK constructor - -/***********************************************************************/ -/* ReadColumn: what this routine does is to return the current */ -/* file ID of the table (can change for Multiple tables). */ -/***********************************************************************/ -void FIDBLK::ReadColumn(PGLOBAL g) - { - if (Fn != ((PTDBASE)To_Tdb)->GetFile(g)) { - char filename[_MAX_PATH]; - - Fn = ((PTDBASE)To_Tdb)->GetFile(g); - PlugSetPath(filename, Fn, ((PTDBASE)To_Tdb)->GetPath()); - Value->SetValue_psz(filename); - } // endif Fn - - } // end of ReadColumn - -/***********************************************************************/ -/* TIDBLK constructor for the TABID special column. */ -/***********************************************************************/ -TIDBLK::TIDBLK(PCOLUMN cp) : SPCBLK(cp) - { -//Is_Key = 2; for when the MUL table indexed reading will be implemented. - Precision = Long = 64; - Buf_Type = TYPE_STRING; - *Format.Type = 'C'; - Format.Length = Long; - Format.Prec = 1; // Case insensitive - Constant = (To_Tdb->GetAmType() != TYPE_AM_TBL); - Tname = NULL; - } // end of TIDBLK constructor - -/***********************************************************************/ -/* ReadColumn: what this routine does is to return the table ID. */ -/***********************************************************************/ -void TIDBLK::ReadColumn(PGLOBAL g) - { - if (Tname == NULL) { - Tname = (char*)To_Tdb->GetName(); - Value->SetValue_psz(Tname); - } // endif Tname - - } // end of ReadColumn - -/***********************************************************************/ -/* SIDBLK constructor for the SERVID special column. */ -/***********************************************************************/ -SIDBLK::SIDBLK(PCOLUMN cp) : SPCBLK(cp) - { -//Is_Key = 2; for when the MUL table indexed reading will be implemented. - Precision = Long = 64; - Buf_Type = TYPE_STRING; - *Format.Type = 'C'; - Format.Length = Long; - Format.Prec = 1; // Case insensitive - Constant = (To_Tdb->GetAmType() != TYPE_AM_TBL); - Sname = NULL; - } // end of TIDBLK constructor - -/***********************************************************************/ -/* ReadColumn: what this routine does is to return the server ID. */ -/***********************************************************************/ -void SIDBLK::ReadColumn(PGLOBAL g) - { -//if (Sname == NULL) { - Sname = (char*)To_Tdb->GetServer(); - Value->SetValue_psz(Sname); -// } // endif Sname - - } // end of ReadColumn - +/************* Colblk C++ Functions Source Code File (.CPP) ************/
+/* Name: COLBLK.CPP Version 2.1 */
+/* */
+/* (C) Copyright to the author Olivier BERTRAND 1998-2014 */
+/* */
+/* This file contains the COLBLK class functions. */
+/***********************************************************************/
+
+/***********************************************************************/
+/* Include relevant MariaDB header file. */
+/***********************************************************************/
+#include "my_global.h"
+
+/***********************************************************************/
+/* Include required application header files */
+/* global.h is header containing all global Plug declarations. */
+/* plgdbsem.h is header containing the DB applic. declarations. */
+/***********************************************************************/
+#include "global.h"
+#include "plgdbsem.h"
+#include "tabcol.h"
+#include "colblk.h"
+#include "xindex.h"
+#include "xtable.h"
+
+extern "C" int trace;
+
+/***********************************************************************/
+/* COLBLK protected constructor. */
+/***********************************************************************/
+COLBLK::COLBLK(PCOLDEF cdp, PTDB tdbp, int i)
+ {
+ Next = NULL;
+ Index = i;
+//Number = 0;
+ ColUse = 0;
+
+ if ((Cdp = cdp)) {
+ Name = cdp->Name;
+ Format = cdp->F;
+ Opt = cdp->Opt;
+ Long = cdp->Long;
+ Precision = cdp->Precision;
+ Freq = cdp->Freq;
+ Buf_Type = cdp->Buf_Type;
+ ColUse |= cdp->Flags; // Used by CONNECT
+ Nullable = !!(cdp->Flags & U_NULLS);
+ Unsigned = !!(cdp->Flags & U_UNSIGNED);
+ } else {
+ Name = NULL;
+ memset(&Format, 0, sizeof(FORMAT));
+ Opt = 0;
+ Long = 0;
+ Precision = 0;
+ Freq = 0;
+ Buf_Type = TYPE_ERROR;
+ Nullable = false;
+ Unsigned = false;
+ } // endif cdp
+
+ To_Tdb = tdbp;
+ Status = BUF_NO;
+//Value = NULL; done in XOBJECT constructor
+ To_Kcol = NULL;
+ } // end of COLBLK constructor
+
+/***********************************************************************/
+/* COLBLK constructor used for copying columns. */
+/* tdbp is the pointer to the new table descriptor. */
+/***********************************************************************/
+COLBLK::COLBLK(PCOL col1, PTDB tdbp)
+ {
+ PCOL colp;
+
+ // Copy the old column block to the new one
+ *this = *col1;
+ Next = NULL;
+//To_Orig = col1;
+ To_Tdb = tdbp;
+
+ if (trace > 1)
+ htrc(" copying COLBLK %s from %p to %p\n", Name, col1, this);
+
+ if (tdbp)
+ // Attach the new column to the table block
+ if (!tdbp->GetColumns())
+ tdbp->SetColumns(this);
+ else {
+ for (colp = tdbp->GetColumns(); colp->Next; colp = colp->Next) ;
+
+ colp->Next = this;
+ } // endelse
+
+ } // end of COLBLK copy constructor
+
+/***********************************************************************/
+/* Reset the column descriptor to non evaluated yet. */
+/***********************************************************************/
+void COLBLK::Reset(void)
+ {
+ Status &= ~BUF_READ;
+ } // end of Reset
+
+/***********************************************************************/
+/* Compare: compares itself to an (expression) object and returns */
+/* true if it is equivalent. */
+/***********************************************************************/
+bool COLBLK::Compare(PXOB xp)
+ {
+ return (this == xp);
+ } // end of Compare
+
+/***********************************************************************/
+/* SetFormat: function used to set SELECT output format. */
+/***********************************************************************/
+bool COLBLK::SetFormat(PGLOBAL g, FORMAT& fmt)
+ {
+ fmt = Format;
+
+ if (trace > 1)
+ htrc("COLBLK: %p format=%c(%d,%d)\n",
+ this, *fmt.Type, fmt.Length, fmt.Prec);
+
+ return false;
+ } // end of SetFormat
+
+/***********************************************************************/
+/* Eval: get the column value from the last read record or from a */
+/* matching Index column if there is one. */
+/***********************************************************************/
+bool COLBLK::Eval(PGLOBAL g)
+ {
+ if (trace > 1)
+ htrc("Col Eval: %s status=%.4X\n", Name, Status);
+
+ if (!GetStatus(BUF_READ)) {
+// if (To_Tdb->IsNull())
+// Value->Reset();
+ if (To_Kcol)
+ To_Kcol->FillValue(Value);
+ else
+ ReadColumn(g);
+
+ AddStatus(BUF_READ);
+ } // endif
+
+ return false;
+ } // end of Eval
+
+/***********************************************************************/
+/* InitValue: prepare a column block for read operation. */
+/* Now we use Format.Length for the len parameter to avoid strings */
+/* to be truncated when converting from string to coded string. */
+/* Added in version 1.5 is the arguments GetScale() and Domain */
+/* in calling AllocateValue. Domain is used for TYPE_DATE only. */
+/***********************************************************************/
+bool COLBLK::InitValue(PGLOBAL g)
+ {
+ if (Value)
+ return false; // Already done
+
+ // Allocate a Value object
+ if (!(Value = AllocateValue(g, Buf_Type, Precision,
+ GetScale(), Unsigned, GetDomain())))
+ return true;
+
+ AddStatus(BUF_READY);
+ Value->SetNullable(Nullable);
+
+ if (trace > 1)
+ htrc(" colp=%p type=%d value=%p coluse=%.4X status=%.4X\n",
+ this, Buf_Type, Value, ColUse, Status);
+
+ return false;
+ } // end of InitValue
+
+/***********************************************************************/
+/* SetBuffer: prepare a column block for write operation. */
+/***********************************************************************/
+bool COLBLK::SetBuffer(PGLOBAL g, PVAL value, bool ok, bool check)
+ {
+ sprintf(g->Message, MSG(UNDEFINED_AM), "SetBuffer");
+ return true;
+ } // end of SetBuffer
+
+/***********************************************************************/
+/* GetLength: returns an evaluation of the column string length. */
+/***********************************************************************/
+int COLBLK::GetLengthEx(void)
+ {
+ return Long;
+ } // end of GetLengthEx
+
+/***********************************************************************/
+/* ReadColumn: what this routine does is to access the last line */
+/* read from the corresponding table, extract from it the field */
+/* corresponding to this column and convert it to buffer type. */
+/***********************************************************************/
+void COLBLK::ReadColumn(PGLOBAL g)
+ {
+ sprintf(g->Message, MSG(UNDEFINED_AM), "ReadColumn");
+ longjmp(g->jumper[g->jump_level], TYPE_COLBLK);
+ } // end of ReadColumn
+
+/***********************************************************************/
+/* WriteColumn: what this routine does is to access the last line */
+/* read from the corresponding table, and rewrite the field */
+/* corresponding to this column from the column buffer and type. */
+/***********************************************************************/
+void COLBLK::WriteColumn(PGLOBAL g)
+ {
+ sprintf(g->Message, MSG(UNDEFINED_AM), "WriteColumn");
+ longjmp(g->jumper[g->jump_level], TYPE_COLBLK);
+ } // end of WriteColumn
+
+/***********************************************************************/
+/* Make file output of a column descriptor block. */
+/***********************************************************************/
+void COLBLK::Print(PGLOBAL g, FILE *f, uint n)
+ {
+ char m[64];
+ int i;
+ PCOL colp;
+
+ memset(m, ' ', n); // Make margin string
+ m[n] = '\0';
+
+ for (colp = To_Tdb->GetColumns(), i = 1; colp; colp = colp->Next, i++)
+ if (colp == this)
+ break;
+
+ fprintf(f, "%sR%dC%d type=%d F=%.2s(%d,%d)", m, To_Tdb->GetTdb_No(),
+ i, GetAmType(), Format.Type, Format.Length, Format.Prec);
+ fprintf(f,
+ " coluse=%04X status=%04X buftyp=%d value=%p name=%s\n",
+ ColUse, Status, Buf_Type, Value, Name);
+ } // end of Print
+
+/***********************************************************************/
+/* Make string output of a column descriptor block. */
+/***********************************************************************/
+void COLBLK::Print(PGLOBAL g, char *ps, uint z)
+ {
+ sprintf(ps, "R%d.%s", To_Tdb->GetTdb_No(), Name);
+ } // end of Print
+
+
+/***********************************************************************/
+/* SPCBLK constructor. */
+/***********************************************************************/
+SPCBLK::SPCBLK(PCOLUMN cp)
+ : COLBLK((PCOLDEF)NULL, cp->GetTo_Table()->GetTo_Tdb(), 0)
+ {
+ Name = (char*)cp->GetName();
+ Precision = Long = 0;
+ Buf_Type = TYPE_ERROR;
+ } // end of SPCBLK constructor
+
+/***********************************************************************/
+/* WriteColumn: what this routine does is to access the last line */
+/* read from the corresponding table, and rewrite the field */
+/* corresponding to this column from the column buffer and type. */
+/***********************************************************************/
+void SPCBLK::WriteColumn(PGLOBAL g)
+ {
+ sprintf(g->Message, MSG(SPCOL_READONLY), Name);
+ longjmp(g->jumper[g->jump_level], TYPE_COLBLK);
+ } // end of WriteColumn
+
+/***********************************************************************/
+/* RIDBLK constructor for the ROWID special column. */
+/***********************************************************************/
+RIDBLK::RIDBLK(PCOLUMN cp, bool rnm) : SPCBLK(cp)
+ {
+ Precision = Long = 10;
+ Buf_Type = TYPE_INT;
+ Rnm = rnm;
+ *Format.Type = 'N';
+ Format.Length = 10;
+ } // end of RIDBLK constructor
+
+/***********************************************************************/
+/* ReadColumn: what this routine does is to return the ordinal */
+/* number of the current row in the table (if Rnm is true) or in the */
+/* current file (if Rnm is false) the same except for multiple tables.*/
+/***********************************************************************/
+void RIDBLK::ReadColumn(PGLOBAL g)
+ {
+ Value->SetValue(To_Tdb->RowNumber(g, Rnm));
+ } // end of ReadColumn
+
+/***********************************************************************/
+/* FIDBLK constructor for the FILEID special column. */
+/***********************************************************************/
+FIDBLK::FIDBLK(PCOLUMN cp, OPVAL op) : SPCBLK(cp), Op(op)
+ {
+//Is_Key = 2; for when the MUL table indexed reading will be implemented.
+ Precision = Long = _MAX_PATH;
+ Buf_Type = TYPE_STRING;
+ *Format.Type = 'C';
+ Format.Length = Long;
+#if defined(WIN32)
+ Format.Prec = 1; // Case insensitive
+#endif // WIN32
+ Constant = (!((PTDBASE)To_Tdb)->GetDef()->GetMultiple() &&
+ To_Tdb->GetAmType() != TYPE_AM_PLG &&
+ To_Tdb->GetAmType() != TYPE_AM_PLM);
+ Fn = NULL;
+ } // end of FIDBLK constructor
+
+/***********************************************************************/
+/* ReadColumn: what this routine does is to return the current */
+/* file ID of the table (can change for Multiple tables). */
+/***********************************************************************/
+void FIDBLK::ReadColumn(PGLOBAL g)
+ {
+ if (Fn != ((PTDBASE)To_Tdb)->GetFile(g)) {
+ char filename[_MAX_PATH];
+
+ Fn = ((PTDBASE)To_Tdb)->GetFile(g);
+ PlugSetPath(filename, Fn, ((PTDBASE)To_Tdb)->GetPath());
+
+ if (Op != OP_XX) {
+ char buff[_MAX_PATH];
+
+ Value->SetValue_psz(ExtractFromPath(g, buff, filename, Op));
+ } else
+ Value->SetValue_psz(filename);
+
+ } // endif Fn
+
+ } // end of ReadColumn
+
+/***********************************************************************/
+/* TIDBLK constructor for the TABID special column. */
+/***********************************************************************/
+TIDBLK::TIDBLK(PCOLUMN cp) : SPCBLK(cp)
+ {
+//Is_Key = 2; for when the MUL table indexed reading will be implemented.
+ Precision = Long = 64;
+ Buf_Type = TYPE_STRING;
+ *Format.Type = 'C';
+ Format.Length = Long;
+ Format.Prec = 1; // Case insensitive
+ Constant = (To_Tdb->GetAmType() != TYPE_AM_TBL);
+ Tname = NULL;
+ } // end of TIDBLK constructor
+
+/***********************************************************************/
+/* ReadColumn: what this routine does is to return the table ID. */
+/***********************************************************************/
+void TIDBLK::ReadColumn(PGLOBAL g)
+ {
+ if (Tname == NULL) {
+ Tname = (char*)To_Tdb->GetName();
+ Value->SetValue_psz(Tname);
+ } // endif Tname
+
+ } // end of ReadColumn
+
+/***********************************************************************/
+/* PRTBLK constructor for the PARTID special column. */
+/***********************************************************************/
+PRTBLK::PRTBLK(PCOLUMN cp) : SPCBLK(cp)
+ {
+//Is_Key = 2; for when the MUL table indexed reading will be implemented.
+ Precision = Long = 64;
+ Buf_Type = TYPE_STRING;
+ *Format.Type = 'C';
+ Format.Length = Long;
+ Format.Prec = 1; // Case insensitive
+ Constant = true; // TODO: check whether this is true indeed
+ Pname = NULL;
+ } // end of PRTBLK constructor
+
+/***********************************************************************/
+/* ReadColumn: what this routine does is to return the partition ID. */
+/***********************************************************************/
+void PRTBLK::ReadColumn(PGLOBAL g)
+ {
+ if (Pname == NULL) {
+ char *p;
+ PTDBASE tdbp = (PTDBASE)To_Tdb;
+
+ Pname = tdbp->GetDef()->GetStringCatInfo(g, "partname", "?");
+
+ p = strrchr(Pname, '#');
+ Value->SetValue_psz((p) ? p + 1 : Pname);
+ } // endif Pname
+
+ } // end of ReadColumn
+
+/***********************************************************************/
+/* SIDBLK constructor for the SERVID special column. */
+/***********************************************************************/
+SIDBLK::SIDBLK(PCOLUMN cp) : SPCBLK(cp)
+ {
+//Is_Key = 2; for when the MUL table indexed reading will be implemented.
+ Precision = Long = 64;
+ Buf_Type = TYPE_STRING;
+ *Format.Type = 'C';
+ Format.Length = Long;
+ Format.Prec = 1; // Case insensitive
+ Constant = (To_Tdb->GetAmType() != TYPE_AM_TBL);
+ Sname = NULL;
+ } // end of TIDBLK constructor
+
+/***********************************************************************/
+/* ReadColumn: what this routine does is to return the server ID. */
+/***********************************************************************/
+void SIDBLK::ReadColumn(PGLOBAL g)
+ {
+//if (Sname == NULL) {
+ Sname = (char*)To_Tdb->GetServer();
+ Value->SetValue_psz(Sname);
+// } // endif Sname
+
+ } // end of ReadColumn
+
diff --git a/storage/connect/colblk.h b/storage/connect/colblk.h index a340ee4450a..5e8dc77ff69 100644 --- a/storage/connect/colblk.h +++ b/storage/connect/colblk.h @@ -36,10 +36,13 @@ class DllExport COLBLK : public XOBJECT { virtual int GetAmType() {return TYPE_AM_ERROR;} virtual void SetOk(void) {Status |= BUF_EMPTY;} virtual PTDB GetTo_Tdb(void) {return To_Tdb;} + virtual int GetClustered(void) {return 0;} + virtual int IsClustered(void) {return FALSE;} PCOL GetNext(void) {return Next;} PSZ GetName(void) {return Name;} int GetIndex(void) {return Index;} ushort GetColUse(void) {return ColUse;} + int GetOpt(void) {return Opt;} ushort GetColUse(ushort u) {return (ColUse & u);} ushort GetStatus(void) {return Status;} ushort GetStatus(ushort u) {return (Status & u);} @@ -48,17 +51,18 @@ class DllExport COLBLK : public XOBJECT { void AddColUse(ushort u) {ColUse |= u;} void AddStatus(ushort u) {Status |= u;} void SetNext(PCOL cp) {Next = cp;} + PXCOL GetKcol(void) {return To_Kcol;} void SetKcol(PXCOL kcp) {To_Kcol = kcp;} PCOLDEF GetCdp(void) {return Cdp;} PSZ GetDomain(void) {return (Cdp) ? Cdp->Decode : NULL;} PSZ GetDesc(void) {return (Cdp) ? Cdp->Desc : NULL;} PSZ GetFmt(void) {return (Cdp) ? Cdp->Fmt : NULL;} bool IsUnsigned(void) {return Unsigned;} - bool IsNullable(void) {return Nullable;} bool IsVirtual(void) {return Cdp->IsVirtual();} + bool IsNullable(void) {return Nullable;} void SetNullable(bool b) {Nullable = b;} - - // Methods + + // Methods virtual void Reset(void); virtual bool Compare(PXOB xp); virtual bool SetFormat(PGLOBAL, FORMAT&); @@ -70,6 +74,7 @@ class DllExport COLBLK : public XOBJECT { virtual void WriteColumn(PGLOBAL g); virtual void Print(PGLOBAL g, FILE *, uint); virtual void Print(PGLOBAL g, char *, uint); + virtual bool VarSize(void) {return false;} bool InitValue(PGLOBAL g); protected: @@ -82,9 +87,11 @@ class DllExport COLBLK : public XOBJECT { bool Nullable; // True if nullable bool Unsigned; // True if unsigned int Index; // Column number in table + int Opt; // Cluster/sort information int Buf_Type; // Data type int Long; // Internal length in table int Precision; // Column length (as for ODBC) + int Freq; // Evaluated ceiling of distinct values FORMAT Format; // Output format ushort ColUse; // Column usage ushort Status; // Column read status @@ -100,7 +107,7 @@ class DllExport SPCBLK : public COLBLK { // Implementation virtual int GetAmType(void) = 0; - virtual bool GetRnm(void) {return false;} + virtual bool GetRnm(void) {return false;} // Methods virtual bool IsSpecial(void) {return true;} @@ -122,7 +129,7 @@ class DllExport RIDBLK : public SPCBLK { // Implementation virtual int GetAmType(void) {return TYPE_AM_ROWID;} - virtual bool GetRnm(void) {return Rnm;} + virtual bool GetRnm(void) {return Rnm;} // Methods virtual void ReadColumn(PGLOBAL g); @@ -137,7 +144,7 @@ class DllExport RIDBLK : public SPCBLK { class DllExport FIDBLK : public SPCBLK { public: // Constructor - FIDBLK(PCOLUMN cp); + FIDBLK(PCOLUMN cp, OPVAL op); // Implementation virtual int GetAmType(void) {return TYPE_AM_FILID;} @@ -147,7 +154,8 @@ class DllExport FIDBLK : public SPCBLK { virtual void ReadColumn(PGLOBAL g); protected: - PSZ Fn; // The current To_File of the table + PSZ Fn; // The current To_File of the table + OPVAL Op; // The file part operator }; // end of class FIDBLK /***********************************************************************/ @@ -174,6 +182,29 @@ class DllExport TIDBLK : public SPCBLK { }; // end of class TIDBLK /***********************************************************************/ +/* Class PRTBLK: PARTID special column descriptor. */ +/***********************************************************************/ +class DllExport PRTBLK : public SPCBLK { + public: + // Constructor + PRTBLK(PCOLUMN cp); + + // Implementation + virtual int GetAmType(void) {return TYPE_AM_PRTID;} + + // Methods + virtual void Reset(void) {} // This is a pseudo constant column + virtual void ReadColumn(PGLOBAL g); + + protected: + // Default constructor not to be used + PRTBLK(void) {} + + // Members + PSZ Pname; // The current partition name + }; // end of class PRTBLK + +/***********************************************************************/ /* Class SIDBLK: SERVID special column descriptor. */ /***********************************************************************/ class DllExport SIDBLK : public SPCBLK { diff --git a/storage/connect/connect.cc b/storage/connect/connect.cc index 2d8aeb8b5f4..381e437f9ec 100644 --- a/storage/connect/connect.cc +++ b/storage/connect/connect.cc @@ -57,7 +57,7 @@ extern "C" int trace; /* Routines called internally by semantic routines. */ /***********************************************************************/ void CntEndDB(PGLOBAL); -RCODE EvalColumns(PGLOBAL g, PTDB tdbp); +RCODE EvalColumns(PGLOBAL g, PTDB tdbp, bool mrr= false); /***********************************************************************/ /* MySQL routines called externally by semantic routines. */ @@ -122,9 +122,12 @@ bool CntCheckDB(PGLOBAL g, PHC handler, const char *pathname) (dbuserp->Catalog) ? ((MYCAT*)dbuserp->Catalog)->GetHandler() : NULL, handler); + // Set the database path for this table + handler->SetDataPath(g, pathname); + if (dbuserp->Catalog) { // ((MYCAT *)dbuserp->Catalog)->SetHandler(handler); done later - ((MYCAT *)dbuserp->Catalog)->SetDataPath(g, pathname); +// ((MYCAT *)dbuserp->Catalog)->SetDataPath(g, pathname); return false; // Nothing else to do } // endif Catalog @@ -141,8 +144,8 @@ bool CntCheckDB(PGLOBAL g, PHC handler, const char *pathname) if (!(dbuserp->Catalog= new MYCAT(handler))) return true; - ((MYCAT *)dbuserp->Catalog)->SetDataPath(g, pathname); - dbuserp->UseTemp= TMP_YES; // Must use temporary file +//((MYCAT *)dbuserp->Catalog)->SetDataPath(g, pathname); +//dbuserp->UseTemp= TMP_AUTO; /*********************************************************************/ /* All is correct. */ @@ -167,7 +170,13 @@ bool CntInfo(PGLOBAL g, PTDB tp, PXF info) if (tdbp) { b= tdbp->GetFtype() != RECFM_NAF; info->data_file_length= (b) ? (ulonglong)tdbp->GetFileLength(g) : 0; - info->records= (unsigned)tdbp->GetMaxSize(g); + + if (!b || info->data_file_length) + info->records= (unsigned)tdbp->Cardinality(g); +// info->records= (unsigned)tdbp->GetMaxSize(g); + else + info->records= 0; + // info->mean_rec_length= tdbp->GetLrecl(); info->mean_rec_length= 0; info->data_file_name= (b) ? tdbp->GetFile(g) : NULL; @@ -343,12 +352,12 @@ bool CntOpenTable(PGLOBAL g, PTDB tdbp, MODE mode, char *c1, char *c2, //tdbp->SetMode(mode); - if (del && ((PTDBASE)tdbp)->GetFtype() != RECFM_NAF) { + if (del/* && ((PTDBASE)tdbp)->GetFtype() != RECFM_NAF*/) { // To avoid erasing the table when doing a partial delete // make a fake Next - PDOSDEF ddp= new(g) DOSDEF; - PTDB tp= new(g) TDBDOS(ddp, NULL); - tdbp->SetNext(tp); +// PDOSDEF ddp= new(g) DOSDEF; +// PTDB tp= new(g) TDBDOS(ddp, NULL); + tdbp->SetNext((PTDB)1); dup->Check &= ~CHK_DELETE; } // endif del @@ -387,7 +396,7 @@ bool CntRewindTable(PGLOBAL g, PTDB tdbp) /***********************************************************************/ /* Evaluate all columns after a record is read. */ /***********************************************************************/ -RCODE EvalColumns(PGLOBAL g, PTDB tdbp) +RCODE EvalColumns(PGLOBAL g, PTDB tdbp, bool mrr) { RCODE rc= RC_OK; PCOL colp; @@ -415,7 +424,7 @@ RCODE EvalColumns(PGLOBAL g, PTDB tdbp) colp->Reset(); // Virtual columns are computed by MariaDB - if (!colp->GetColUse(U_VIRTUAL)) + if (!colp->GetColUse(U_VIRTUAL) && (!mrr || colp->GetKcol())) if (colp->Eval(g)) rc= RC_FX; @@ -439,8 +448,8 @@ RCODE CntReadNext(PGLOBAL g, PTDB tdbp) // Reading sequencially an indexed table. This happens after the // handler function records_in_range was called and MySQL decides // to quit using the index (!!!) Drop the index. - for (PCOL colp= tdbp->GetColumns(); colp; colp= colp->GetNext()) - colp->SetKcol(NULL); +// for (PCOL colp= tdbp->GetColumns(); colp; colp= colp->GetNext()) +// colp->SetKcol(NULL); ((PTDBASE)tdbp)->ResetKindex(g, NULL); } // endif index @@ -456,7 +465,12 @@ RCODE CntReadNext(PGLOBAL g, PTDB tdbp) goto err; } // endif rc - while ((rc= (RCODE)tdbp->ReadDB(g)) == RC_NF) ; + do { + if ((rc= (RCODE)tdbp->ReadDB(g)) == RC_OK) + if (!ApplyFilter(g, tdbp->GetFilter())) + rc= RC_NF; + + } while (rc == RC_NF); err: g->jump_level--; @@ -468,7 +482,7 @@ RCODE CntReadNext(PGLOBAL g, PTDB tdbp) /***********************************************************************/ RCODE CntWriteRow(PGLOBAL g, PTDB tdbp) { - RCODE rc; + RCODE rc; PCOL colp; PTDBASE tp= (PTDBASE)tdbp; @@ -492,11 +506,12 @@ RCODE CntWriteRow(PGLOBAL g, PTDB tdbp) if (!colp->GetColUse(U_VIRTUAL)) colp->WriteColumn(g); -// if (tdbp->GetMode() == MODE_INSERT) -// tbxp->SetModified(true); - - // Return result code from write operation - rc= (RCODE)tdbp->WriteDB(g); + if (tp->IsIndexed()) + // Index values must be sorted before updating + rc= (RCODE)((PTDBDOS)tp)->GetTxfp()->StoreValues(g, true); + else + // Return result code from write operation + rc= (RCODE)tdbp->WriteDB(g); err: g->jump_level--; @@ -506,7 +521,7 @@ RCODE CntWriteRow(PGLOBAL g, PTDB tdbp) /***********************************************************************/ /* UpdateRow: Update a row into a table. */ /***********************************************************************/ -RCODE CntUpdateRow(PGLOBAL g, PTDB tdbp) +RCODE CntUpdateRow(PGLOBAL g, PTDB tdbp) { if (!tdbp || tdbp->GetMode() != MODE_UPDATE) return RC_FX; @@ -520,38 +535,70 @@ RCODE CntUpdateRow(PGLOBAL g, PTDB tdbp) /***********************************************************************/ RCODE CntDeleteRow(PGLOBAL g, PTDB tdbp, bool all) { - RCODE rc; + RCODE rc; + PTDBASE tp= (PTDBASE)tdbp; if (!tdbp || tdbp->GetMode() != MODE_DELETE) return RC_FX; else if (tdbp->IsReadOnly()) return RC_NF; - if (((PTDBASE)tdbp)->GetDef()->Indexable() && all) - ((PTDBDOS)tdbp)->Cardinal= 0; - - // Return result code from delete operation - // Note: if all, this call will be done when closing the table - rc= (RCODE)tdbp->DeleteDB(g, (all) ? RC_FX : RC_OK); + if (all) { + if (((PTDBASE)tdbp)->GetDef()->Indexable()) + ((PTDBDOS)tdbp)->Cardinal= 0; + + // Note: if all, this call will be done when closing the table + rc= (RCODE)tdbp->DeleteDB(g, RC_FX); +//} else if (tp->GetKindex() && !tp->GetKindex()->IsSorted() && +// tp->Txfp->GetAmType() != TYPE_AM_DBF) { + } else if(tp->IsIndexed()) { + // Index values must be sorted before updating + rc= (RCODE)((PTDBDOS)tp)->GetTxfp()->StoreValues(g, false); + } else // Return result code from delete operation + rc= (RCODE)tdbp->DeleteDB(g, RC_OK); + return rc; } // end of CntDeleteRow /***********************************************************************/ /* CLOSETAB: Close a table. */ /***********************************************************************/ -int CntCloseTable(PGLOBAL g, PTDB tdbp) +int CntCloseTable(PGLOBAL g, PTDB tdbp, bool nox, bool abort) { int rc= RC_OK; - TDBDOX *tbxp= NULL; + TDBASE *tbxp= (PTDBASE)tdbp; - if (!tdbp || tdbp->GetUse() != USE_OPEN) + if (!tdbp) return rc; // Nothing to do + else if (tdbp->GetUse() != USE_OPEN) { + if (tdbp->GetAmType() == TYPE_AM_XML) + tdbp->CloseDB(g); // Opened by GetMaxSize + + return rc; + } // endif !USE_OPEN if (trace) - printf("CntCloseTable: tdbp=%p mode=%d\n", tdbp, tdbp->GetMode()); + printf("CntCloseTable: tdbp=%p mode=%d nox=%d abort=%d\n", + tdbp, tdbp->GetMode(), nox, abort); + + if (tdbp->GetMode() == MODE_DELETE && tdbp->GetUse() == USE_OPEN) { + if (tbxp->IsIndexed()) + rc= ((PTDBDOS)tdbp)->GetTxfp()->DeleteSortedRows(g); - if (tdbp->GetMode() == MODE_DELETE && tdbp->GetUse() == USE_OPEN) - rc= tdbp->DeleteDB(g, RC_EF); // Specific A.M. delete routine + if (!rc) + rc= tdbp->DeleteDB(g, RC_EF); // Specific A.M. delete routine + + } else if (tbxp->GetMode() == MODE_UPDATE && tbxp->IsIndexed()) + rc= ((PTDBDOX)tdbp)->Txfp->UpdateSortedRows(g); + + switch(rc) { + case RC_FX: + abort= true; + break; + case RC_INFO: + PushWarning(g, tbxp); + break; + } // endswitch rc // Prepare error return if (g->jump_level == MAX_JUMP) { @@ -561,14 +608,16 @@ int CntCloseTable(PGLOBAL g, PTDB tdbp) } // endif if ((rc = setjmp(g->jumper[++g->jump_level])) != 0) { + rc= RC_FX; g->jump_level--; goto err; } // endif // This will close the table file(s) and also finalize write // operations such as Insert, Update, or Delete. + tdbp->SetAbort(abort); tdbp->CloseDB(g); - + tdbp->SetAbort(false); g->jump_level--; if (trace > 1) @@ -577,17 +626,17 @@ int CntCloseTable(PGLOBAL g, PTDB tdbp) //if (!((PTDBDOX)tdbp)->GetModified()) // return 0; - if (tdbp->GetMode() == MODE_READ || tdbp->GetMode() == MODE_ANY) + if (nox || tdbp->GetMode() == MODE_READ || tdbp->GetMode() == MODE_ANY) return 0; if (trace > 1) - printf("About to reset indexes\n"); + printf("About to reset opt\n"); // Make all the eventual indexes tbxp= (TDBDOX*)tdbp; tbxp->ResetKindex(g, NULL); - tbxp->To_Key_Col= NULL; - rc= tbxp->ResetTableOpt(g, ((PTDBASE)tdbp)->GetDef()->Indexable() == 1); + tbxp->SetKey_Col(NULL); + rc= tbxp->ResetTableOpt(g, true, tbxp->GetDef()->Indexable() == 1); err: if (trace > 1) @@ -601,17 +650,10 @@ int CntCloseTable(PGLOBAL g, PTDB tdbp) /* This is the condition(s) for doing indexing. */ /* Note: FIX table are not reset here to Nrec= 1. */ /***********************************************************************/ -int CntIndexInit(PGLOBAL g, PTDB ptdb, int id) +int CntIndexInit(PGLOBAL g, PTDB ptdb, int id, bool sorted) { - int k; - PCOL colp; - PVAL valp; - PKXBASE xp; - PXLOAD pxp; PIXDEF xdp; - XKPDEF *kdp; PTDBDOX tdbp; - PCOLDEF cdp; DOXDEF *dfp; if (!ptdb) @@ -650,63 +692,27 @@ int CntIndexInit(PGLOBAL g, PTDB ptdb, int id) return 0; } // endif xdp - // Allocate the key columns definition block - tdbp->Knum= xdp->GetNparts(); - tdbp->To_Key_Col= (PCOL*)PlugSubAlloc(g, NULL, tdbp->Knum * sizeof(PCOL)); - - // Get the key column description list - for (k= 0, kdp= (XKPDEF*)xdp->GetToKeyParts(); kdp; kdp= (XKPDEF*)kdp->Next) - if (!(colp= tdbp->ColDB(g, kdp->Name, 0)) || colp->InitValue(g)) { - sprintf(g->Message, "Wrong column %s", kdp->Name); - return 0; - } else - tdbp->To_Key_Col[k++]= colp; - -#if defined(_DEBUG) - if (k != tdbp->Knum) { - sprintf(g->Message, "Key part number mismatch for %s", - xdp->GetName()); - return 0; - } // endif k -#endif // _DEBUG - - // Allocate the pseudo constants that will contain the key values - tdbp->To_Link= (PXOB*)PlugSubAlloc(g, NULL, tdbp->Knum * sizeof(PXOB)); - - for (k= 0, kdp= (XKPDEF*)xdp->GetToKeyParts(); - kdp; k++, kdp= (XKPDEF*)kdp->Next) { - cdp= tdbp->Key(k)->GetCdp(); - valp= AllocateValue(g, cdp->GetType(), cdp->GetLength()); - tdbp->To_Link[k]= new(g) CONSTANT(valp); - } // endfor k - - // Make the index on xdp - if (!xdp->IsAuto()) { - if (dfp->Huge) - pxp= new(g) XHUGE; - else - pxp= new(g) XFILE; - - if (tdbp->Knum == 1) // Single index - xp= new(g) XINDXS(tdbp, xdp, pxp, tdbp->To_Key_Col, tdbp->To_Link); - else // Multi-Column index - xp= new(g) XINDEX(tdbp, xdp, pxp, tdbp->To_Key_Col, tdbp->To_Link); - - } else // Column contains same values as ROWID - xp= new(g) XXROW(tdbp); - - if (xp->Init(g)) +#if 0 + if (xdp->IsDynamic()) { + // This is a dynamically created index (KINDEX) + // It should not be created now, if called by index range + tdbp->SetXdp(xdp); + return (xdp->IsUnique()) ? 1 : 2; + } // endif dynamic +#endif // 0 + + // Static indexes must be initialized now for records_in_range + if (tdbp->InitialyzeIndex(g, xdp, sorted)) return 0; - tdbp->To_Kindex= xp; - return (xp->IsMul()) ? 2 : 1; + return (tdbp->To_Kindex->IsMul()) ? 2 : 1; } // end of CntIndexInit /***********************************************************************/ /* IndexRead: fetch a record having the index value. */ /***********************************************************************/ RCODE CntIndexRead(PGLOBAL g, PTDB ptdb, OPVAL op, - const void *key, int len) + const void *key, int len, bool mrr) { char *kp= (char*)key; int n, x; @@ -737,18 +743,29 @@ RCODE CntIndexRead(PGLOBAL g, PTDB ptdb, OPVAL op, // Set reference values and index operator if (!tdbp->To_Link || !tdbp->To_Kindex) { - sprintf(g->Message, "Index not initialized for table %s", tdbp->Name); - return RC_FX; - } else - xbp= (XXBASE*)tdbp->To_Kindex; +// if (!tdbp->To_Xdp) { + sprintf(g->Message, "Index not initialized for table %s", tdbp->Name); + return RC_FX; +#if 0 + } // endif !To_Xdp + // Now it's time to make the dynamic index + if (tdbp->InitialyzeIndex(g, NULL, false)) { + sprintf(g->Message, "Fail to make dynamic index %s", + tdbp->To_Xdp->GetName()); + return RC_FX; + } // endif MakeDynamicIndex +#endif // 0 + } // endif !To_Kindex + + xbp= (XXBASE*)tdbp->To_Kindex; if (key) { for (n= 0; n < tdbp->Knum; n++) { colp= (PCOL)tdbp->To_Key_Col[n]; - + if (colp->GetColUse(U_NULLS)) kp++; // Skip null byte - + valp= tdbp->To_Link[n]->GetValue(); if (!valp->IsTypeNum()) { @@ -774,7 +791,7 @@ RCODE CntIndexRead(PGLOBAL g, PTDB ptdb, OPVAL op, valp->SetBinValue((void*)kp); kp+= valp->GetClen(); - + if (len == kp - (char*)key) { n++; break; @@ -793,7 +810,7 @@ RCODE CntIndexRead(PGLOBAL g, PTDB ptdb, OPVAL op, rnd: if ((rc= (RCODE)ptdb->ReadDB(g)) == RC_OK) - rc= EvalColumns(g, ptdb); + rc= EvalColumns(g, ptdb, mrr); return rc; } // end of CntIndexRead @@ -828,28 +845,32 @@ int CntIndexRange(PGLOBAL g, PTDB ptdb, const uchar* *key, uint *len, } else tdbp= (PTDBDOX)ptdb; - if (!tdbp->To_Link || !tdbp->To_Kindex) { - sprintf(g->Message, "Index not initialized for table %s", tdbp->Name); - DBUG_PRINT("Range", ("%s", g->Message)); - return -1; + if (!tdbp->To_Kindex || !tdbp->To_Link) { + if (!tdbp->To_Xdp) { + sprintf(g->Message, "Index not initialized for table %s", tdbp->Name); + DBUG_PRINT("Range", ("%s", g->Message)); + return -1; + } else // Dynamic index + return tdbp->To_Xdp->GetMaxSame(); // TODO a better estimate + } else xbp= (XXBASE*)tdbp->To_Kindex; for (b= false, i= 0; i < 2; i++) { p= kp= key[i]; - + if (kp) { for (n= 0; n < tdbp->Knum; n++) { if (kmap[i] & (key_part_map)(1 << n)) { if (b == true) // Cannot do indexing with missing intermediate key - return -1; + return -1; colp= (PCOL)tdbp->To_Key_Col[n]; - + if (colp->GetColUse(U_NULLS)) p++; // Skip null byte ??? - + valp= tdbp->To_Link[n]->GetValue(); if (!valp->IsTypeNum()) { @@ -862,7 +883,7 @@ int CntIndexRange(PGLOBAL g, PTDB ptdb, const uchar* *key, uint *len, if (rcb) { if (tdbp->RowNumber(g)) - sprintf(g->Message, + sprintf(g->Message, "Out of range value for column %s at row %d", colp->GetName(), tdbp->RowNumber(g)); else @@ -881,7 +902,7 @@ int CntIndexRange(PGLOBAL g, PTDB ptdb, const uchar* *key, uint *len, } // endif trace p+= valp->GetClen(); - + if (len[i] == (unsigned)(p - kp)) { n++; break; diff --git a/storage/connect/connect.h b/storage/connect/connect.h index 4c9cee46daf..fd8b7e9442f 100644 --- a/storage/connect/connect.h +++ b/storage/connect/connect.h @@ -33,10 +33,10 @@ bool CntCheckDB(PGLOBAL g, PHC handler, const char *pathname); PTDB CntGetTDB(PGLOBAL g, const char *name, MODE xmod, PHC); bool CntOpenTable(PGLOBAL g, PTDB tdbp, MODE, char *, char *, bool, PHC); bool CntRewindTable(PGLOBAL g, PTDB tdbp); -int CntCloseTable(PGLOBAL g, PTDB tdbp); -int CntIndexInit(PGLOBAL g, PTDB tdbp, int id); +int CntCloseTable(PGLOBAL g, PTDB tdbp, bool nox, bool abort); +int CntIndexInit(PGLOBAL g, PTDB tdbp, int id, bool sorted); RCODE CntReadNext(PGLOBAL g, PTDB tdbp); -RCODE CntIndexRead(PGLOBAL g, PTDB, OPVAL op, const void *k, int n); +RCODE CntIndexRead(PGLOBAL g, PTDB, OPVAL op, const void *k, int n, bool mrr); RCODE CntWriteRow(PGLOBAL g, PTDB tdbp); RCODE CntUpdateRow(PGLOBAL g, PTDB tdbp); RCODE CntDeleteRow(PGLOBAL g, PTDB tdbp, bool all); @@ -50,7 +50,7 @@ PGLOBAL CntExit(PGLOBAL g); /* These classes purpose is chiefly to access protected items! */ /***********************************************************************/ class DOXDEF: public DOSDEF { - friend int CntIndexInit(PGLOBAL, PTDB, int); + friend int CntIndexInit(PGLOBAL, PTDB, int, bool); }; // end of class DOXDEF /***********************************************************************/ @@ -58,9 +58,9 @@ class DOXDEF: public DOSDEF { /***********************************************************************/ class TDBDOX: public TDBDOS { friend int MakeIndex(PGLOBAL, PTDB, PIXDEF); - friend int CntCloseTable(PGLOBAL, PTDB); - friend int CntIndexInit(PGLOBAL, PTDB, int); - friend RCODE CntIndexRead(PGLOBAL, PTDB, OPVAL, const void*, int); + friend int CntCloseTable(PGLOBAL, PTDB, bool, bool); + friend int CntIndexInit(PGLOBAL, PTDB, int, bool); + friend RCODE CntIndexRead(PGLOBAL, PTDB, OPVAL, const void*, int, bool); friend RCODE CntDeleteRow(PGLOBAL, PTDB, bool); friend int CntIndexRange(PGLOBAL, PTDB, const uchar**, uint*, bool*, key_part_map*); @@ -70,7 +70,7 @@ class TDBDOX: public TDBDOS { class XKPDEF: public KPARTDEF { friend class TDBDOX; friend class ha_connect; - friend int CntIndexInit(PGLOBAL, PTDB, int); + friend int CntIndexInit(PGLOBAL, PTDB, int, bool); public: XKPDEF(const char *name, int n) : KPARTDEF((PSZ)name, n) {} }; // end of class XKPDEF diff --git a/storage/connect/filamap.cpp b/storage/connect/filamap.cpp index 1723ee4ac27..c0ca40f4c01 100644 --- a/storage/connect/filamap.cpp +++ b/storage/connect/filamap.cpp @@ -1,11 +1,11 @@ /*********** File AM Map C++ Program Source Code File (.CPP) ***********/ /* PROGRAM NAME: FILAMAP */ /* ------------- */ -/* Version 1.4 */ +/* Version 1.6 */ /* */ /* COPYRIGHT: */ /* ---------- */ -/* (C) Copyright to the author Olivier BERTRAND 2005-2013 */ +/* (C) Copyright to the author Olivier BERTRAND 2005-2014 */ /* */ /* WHAT THIS PROGRAM DOES: */ /* ----------------------- */ @@ -89,7 +89,7 @@ int MAPFAM::GetFileLength(PGLOBAL g) { int len; - len = (To_Fb) ? To_Fb->Length : TXTFAM::GetFileLength(g); + len = (To_Fb) ? To_Fb->Length : TXTFAM::GetFileLength(g); if (trace) htrc("Mapped file length=%d\n", len); @@ -129,7 +129,7 @@ bool MAPFAM::OpenTableFile(PGLOBAL g) && fp->Count && fp->Mode == mode) break; - if (trace) + if (trace) htrc("Mapping file, fp=%p\n", fp); } else @@ -286,6 +286,16 @@ bool MAPFAM::RecordPos(PGLOBAL g) } // end of RecordPos /***********************************************************************/ +/* Initialize Fpos and Mempos for indexed DELETE. */ +/***********************************************************************/ +int MAPFAM::InitDelete(PGLOBAL g, int fpos, int spos) + { + Fpos = Memory + fpos; + Mempos = Memory + spos; + return RC_OK; + } // end of InitDelete + +/***********************************************************************/ /* Skip one record in file. */ /***********************************************************************/ int MAPFAM::SkipRecord(PGLOBAL g, bool header) @@ -322,8 +332,27 @@ int MAPFAM::ReadBuffer(PGLOBAL g) /*******************************************************************/ /* Record file position in case of UPDATE or DELETE. */ /*******************************************************************/ - Fpos = Mempos; - CurBlk = (int)Rows++; + int rc; + + next: + Fpos = Mempos; + CurBlk = (int)Rows++; + + /*******************************************************************/ + /* Check whether optimization on ROWID */ + /* can be done, as well as for join as for local filtering. */ + /*******************************************************************/ + switch (Tdbp->TestBlock(g)) { + case RC_EF: + return RC_EF; + case RC_NF: + // Skip this record + if ((rc = SkipRecord(g, false)) != RC_OK) + return rc; + + goto next; + } // endswitch rc + } else Placed = false; @@ -380,13 +409,13 @@ int MAPFAM::DeleteRecords(PGLOBAL g, int irc) } // endif irc - if (Tpos == Spos) + if (Tpos == Spos) { /*******************************************************************/ - /* First line to delete. Move of eventual preceding lines is */ + /* First line to delete. Move of eventual preceeding lines is */ /* not required here, just setting of future Spos and Tpos. */ /*******************************************************************/ - Tpos = Fpos; // Spos is set below - else if ((n = Fpos - Spos) > 0) { + Tpos = Spos = Fpos; + } else if ((n = Fpos - Spos) > 0) { /*******************************************************************/ /* Non consecutive line to delete. Move intermediate lines. */ /*******************************************************************/ @@ -396,7 +425,7 @@ int MAPFAM::DeleteRecords(PGLOBAL g, int irc) if (trace) htrc("move %d bytes\n", n); - } // endif n + } // endif n if (irc == RC_OK) { Spos = Mempos; // New start position @@ -415,40 +444,46 @@ int MAPFAM::DeleteRecords(PGLOBAL g, int irc) CloseMemMap(fp->Memory, (size_t)fp->Length); fp->Count = 0; // Avoid doing it twice - /*******************************************************************/ - /* Remove extra records. */ - /*******************************************************************/ - n = Tpos - Memory; + if (!Abort) { + /*****************************************************************/ + /* Remove extra records. */ + /*****************************************************************/ + n = Tpos - Memory; #if defined(WIN32) - DWORD drc = SetFilePointer(fp->Handle, n, NULL, FILE_BEGIN); + DWORD drc = SetFilePointer(fp->Handle, n, NULL, FILE_BEGIN); - if (drc == 0xFFFFFFFF) { - sprintf(g->Message, MSG(FUNCTION_ERROR), - "SetFilePointer", GetLastError()); - CloseHandle(fp->Handle); - return RC_FX; - } // endif + if (drc == 0xFFFFFFFF) { + sprintf(g->Message, MSG(FUNCTION_ERROR), + "SetFilePointer", GetLastError()); + CloseHandle(fp->Handle); + return RC_FX; + } // endif - if (trace) - htrc("done, Tpos=%p newsize=%d drc=%d\n", Tpos, n, drc); + if (trace) + htrc("done, Tpos=%p newsize=%d drc=%d\n", Tpos, n, drc); - if (!SetEndOfFile(fp->Handle)) { - sprintf(g->Message, MSG(FUNCTION_ERROR), - "SetEndOfFile", GetLastError()); - CloseHandle(fp->Handle); - return RC_FX; - } // endif + if (!SetEndOfFile(fp->Handle)) { + sprintf(g->Message, MSG(FUNCTION_ERROR), + "SetEndOfFile", GetLastError()); + CloseHandle(fp->Handle); + return RC_FX; + } // endif - CloseHandle(fp->Handle); #else // UNIX - if (ftruncate(fp->Handle, (off_t)n)) { - sprintf(g->Message, MSG(TRUNCATE_ERROR), strerror(errno)); - close(fp->Handle); - return RC_FX; - } // endif + if (ftruncate(fp->Handle, (off_t)n)) { + sprintf(g->Message, MSG(TRUNCATE_ERROR), strerror(errno)); + close(fp->Handle); + return RC_FX; + } // endif + +#endif // UNIX + } // endif Abort - close(fp->Handle); +#if defined(WIN32) + CloseHandle(fp->Handle); +#else // UNIX + close(fp->Handle); #endif // UNIX } // endif irc @@ -458,10 +493,10 @@ int MAPFAM::DeleteRecords(PGLOBAL g, int irc) /***********************************************************************/ /* Table file close routine for MAP access method. */ /***********************************************************************/ -void MAPFAM::CloseTableFile(PGLOBAL g) +void MAPFAM::CloseTableFile(PGLOBAL g, bool abort) { PlugCloseFile(g, To_Fb); - To_Fb = NULL; // To get correct file size in Cardinality + To_Fb = NULL; // To get correct file size in Cardinality if (trace) htrc("MAP Close: closing %s count=%d\n", @@ -488,7 +523,7 @@ MBKFAM::MBKFAM(PDOSDEF tdp) : MAPFAM(tdp) Block = tdp->GetBlock(); Last = tdp->GetLast(); Nrec = tdp->GetElemt(); - BlkPos = NULL; + BlkPos = tdp->GetTo_Pos(); CurNum = Nrec; } // end of MBKFAM standard constructor @@ -508,9 +543,7 @@ void MBKFAM::Reset(void) /***********************************************************************/ int MBKFAM::Cardinality(PGLOBAL g) { - // Should not be called in this version - return (g) ? -1 : 0; -//return (g) ? (int)((Block - 1) * Nrec + Last) : 1; + return (g) ? (int)((Block - 1) * Nrec + Last) : 1; } // end of Cardinality /***********************************************************************/ @@ -534,8 +567,49 @@ int MBKFAM::GetRowID(void) /***********************************************************************/ int MBKFAM::ReadBuffer(PGLOBAL g) { - strcpy(g->Message, "This AM cannot be used in this version"); - return RC_FX; + int len; + + /*********************************************************************/ + /* Sequential block reading when Placed is not true. */ + /*********************************************************************/ + if (Placed) { + Placed = false; + } else if (Mempos >= Top) { // Are we at the end of the memory + return RC_EF; + } else if (++CurNum < Nrec) { + Fpos = Mempos; + } else { + /*******************************************************************/ + /* New block. */ + /*******************************************************************/ + CurNum = 0; + + next: + if (++CurBlk >= Block) + return RC_EF; + + /*******************************************************************/ + /* Before reading a new block, check whether block optimization */ + /* can be done, as well as for join as for local filtering. */ + /*******************************************************************/ + switch (Tdbp->TestBlock(g)) { + case RC_EF: + return RC_EF; + case RC_NF: + goto next; + } // endswitch rc + + Fpos = Mempos = Memory + BlkPos[CurBlk]; + } // endif's + + // Immediately calculate next position (Used by DeleteDB) + while (*Mempos++ != '\n') ; // What about Unix ??? + + // Set caller line buffer + len = (Mempos - Fpos) - Ending; + memcpy(Tdbp->GetLine(), Fpos, len); + Tdbp->GetLine()[len] = '\0'; + return RC_OK; } // end of ReadBuffer /***********************************************************************/ @@ -607,6 +681,16 @@ bool MPXFAM::SetPos(PGLOBAL g, int pos) } // end of SetPos /***********************************************************************/ +/* Initialize CurBlk, CurNum, Mempos and Fpos for indexed DELETE. */ +/***********************************************************************/ +int MPXFAM::InitDelete(PGLOBAL g, int fpos, int spos) + { + Fpos = Memory + Headlen + fpos * Lrecl; + Mempos = Fpos + Lrecl; + return RC_OK; + } // end of InitDelete + +/***********************************************************************/ /* ReadBuffer: Read one line for a mapped Fix file. */ /***********************************************************************/ int MPXFAM::ReadBuffer(PGLOBAL g) @@ -618,17 +702,29 @@ int MPXFAM::ReadBuffer(PGLOBAL g) Placed = false; } else if (Mempos >= Top) { // Are we at the end of the memory return RC_EF; - } else if (++CurNum < Nrec) { + } else if (++CurNum < Nrec) { Fpos = Mempos; - } else { + } else { /*******************************************************************/ /* New block. */ /*******************************************************************/ - CurNum = 0; + CurNum = 0; + + next: + if (++CurBlk >= Block) + return RC_EF; + + /*******************************************************************/ + /* Before reading a new block, check whether block optimization */ + /* can be done, as well as for join as for local filtering. */ + /*******************************************************************/ + switch (Tdbp->TestBlock(g)) { + case RC_EF: + return RC_EF; + case RC_NF: + goto next; + } // endswitch rc - if (++CurBlk >= Block) - return RC_EF; - Fpos = Mempos = Headlen + Memory + CurBlk * Blksize; } // endif's diff --git a/storage/connect/filamap.h b/storage/connect/filamap.h index 7f71b90a18f..1d85fa36155 100644 --- a/storage/connect/filamap.h +++ b/storage/connect/filamap.h @@ -1,7 +1,7 @@ /*************** FilAMap H Declares Source Code File (.H) **************/ -/* Name: FILAMAP.H Version 1.2 */ +/* Name: FILAMAP.H Version 1.3 */ /* */ -/* (C) Copyright to the author Olivier BERTRAND 2005-2012 */ +/* (C) Copyright to the author Olivier BERTRAND 2005-2014 */ /* */ /* This file contains the MAP file access method classes declares. */ /***********************************************************************/ @@ -33,23 +33,26 @@ class DllExport MAPFAM : public TXTFAM { virtual void Reset(void); virtual int GetFileLength(PGLOBAL g); virtual int Cardinality(PGLOBAL g) {return (g) ? -1 : 0;} + virtual int MaxBlkSize(PGLOBAL g, int s) {return s;} virtual int GetRowID(void); virtual bool RecordPos(PGLOBAL g); - virtual bool SetPos(PGLOBAL g, int recpos); + virtual bool SetPos(PGLOBAL g, int recpos); virtual int SkipRecord(PGLOBAL g, bool header); virtual bool OpenTableFile(PGLOBAL g); virtual bool DeferReading(void) {return false;} virtual int ReadBuffer(PGLOBAL g); virtual int WriteBuffer(PGLOBAL g); - virtual int DeleteRecords(PGLOBAL g, int irc); - virtual void CloseTableFile(PGLOBAL g); + virtual int DeleteRecords(PGLOBAL g, int irc); + virtual void CloseTableFile(PGLOBAL g, bool abort); virtual void Rewind(void); protected: + virtual int InitDelete(PGLOBAL g, int fpos, int spos); + // Members char *Memory; // Pointer on file mapping view. char *Mempos; // Position of next data to read - char *Fpos; // Position of last read record + char *Fpos; // Position of last read record char *Tpos; // Target Position for delete move char *Spos; // Start position for delete move char *Top; // Mark end of file mapping view @@ -71,6 +74,8 @@ class DllExport MBKFAM : public MAPFAM { // Methods virtual void Reset(void); virtual int Cardinality(PGLOBAL g); + virtual int MaxBlkSize(PGLOBAL g, int s) + {return TXTFAM::MaxBlkSize(g, s);} virtual int GetRowID(void); virtual int SkipRecord(PGLOBAL g, bool header); virtual int ReadBuffer(PGLOBAL g); @@ -96,12 +101,17 @@ class DllExport MPXFAM : public MBKFAM { // Methods virtual int Cardinality(PGLOBAL g) {return TXTFAM::Cardinality(g);} - virtual bool SetPos(PGLOBAL g, int recpos); + virtual int MaxBlkSize(PGLOBAL g, int s) + {return TXTFAM::MaxBlkSize(g, s);} + virtual bool SetPos(PGLOBAL g, int recpos); + virtual int GetNextPos(void) {return (int)Fpos + Nrec;} virtual bool DeferReading(void) {return false;} virtual int ReadBuffer(PGLOBAL g); virtual int WriteBuffer(PGLOBAL g); protected: + virtual int InitDelete(PGLOBAL g, int fpos, int spos); + // No additional members }; // end of class MPXFAM diff --git a/storage/connect/filamdbf.cpp b/storage/connect/filamdbf.cpp index 7ca98eeff55..a214ab8acf2 100644 --- a/storage/connect/filamdbf.cpp +++ b/storage/connect/filamdbf.cpp @@ -1,11 +1,11 @@ /*********** File AM Dbf C++ Program Source Code File (.CPP) ****************/ /* PROGRAM NAME: FILAMDBF */ /* ------------- */ -/* Version 1.6 */ +/* Version 1.7 */ /* */ /* COPYRIGHT: */ /* ---------- */ -/* (C) Copyright to the author Olivier BERTRAND 2005-2013 */ +/* (C) Copyright to the author Olivier BERTRAND 2005-2014 */ /* */ /* WHAT THIS PROGRAM DOES: */ /* ----------------------- */ @@ -176,7 +176,7 @@ static int dbfhead(PGLOBAL g, FILE *file, PSZ fn, DBFHEADER *buf) /* DBFColumns: constructs the result blocks containing the description */ /* of all the columns of a DBF file that will be retrieved by #GetData. */ /****************************************************************************/ -PQRYRES DBFColumns(PGLOBAL g, const char *fn, BOOL info) +PQRYRES DBFColumns(PGLOBAL g, char *dp, const char *fn, bool info) { int buftyp[] = {TYPE_STRING, TYPE_SHORT, TYPE_STRING, TYPE_INT, TYPE_INT, TYPE_SHORT}; @@ -186,7 +186,7 @@ PQRYRES DBFColumns(PGLOBAL g, const char *fn, BOOL info) char buf[2], filename[_MAX_PATH]; int ncol = sizeof(buftyp) / sizeof(int); int rc, type, len, field, fields; - BOOL bad; + bool bad; DBFHEADER mainhead; DESCRIPTOR thisfield; FILE *infile = NULL; @@ -205,7 +205,7 @@ PQRYRES DBFColumns(PGLOBAL g, const char *fn, BOOL info) /************************************************************************/ /* Open the input file. */ /************************************************************************/ - PlugSetPath(filename, fn, PlgGetDataPath(g)); + PlugSetPath(filename, fn, dp); if (!(infile= global_fopen(g, MSGID_CANNOT_OPEN, filename, "rb"))) return NULL; @@ -668,12 +668,9 @@ void DBFFAM::ResetBuffer(PGLOBAL g) /*********************************************************************/ /* If access is random, performances can be much better when the */ /* reads are done on only one row, except for small tables that can */ - /* be entirely read in one block. If the index is just used as a */ - /* bitmap filter, as for Update or delete, reading will be */ - /* sequential and we better keep block reading. */ + /* be entirely read in one block. */ /*********************************************************************/ - if (Tdbp->GetKindex() && Tdbp->GetMode() == MODE_READ && - ReadBlks != 1) { + if (Tdbp->GetKindex() && ReadBlks != 1) { Nrec = 1; // Better for random access Rbuf = 0; Blksize = Lrecl; @@ -753,6 +750,36 @@ bool DBFFAM::CopyHeader(PGLOBAL g) return rc; } // end of CopyHeader +#if 0 // Not useful when UseTemp is false. +/***********************************************************************/ +/* Mark the line to delete with '*' (soft delete). */ +/* NOTE: this is not ready for UseTemp. */ +/***********************************************************************/ +int DBFFAM::InitDelete(PGLOBAL g, int fpos, int spos) + { + int rc = RC_FX; + size_t lrecl = (size_t)Lrecl; + + if (Nrec != 1) + strcpy(g->Message, "Cannot delete in block mode"); + else if (fseek(Stream, Headlen + fpos * Lrecl, SEEK_SET)) + sprintf(g->Message, MSG(FSETPOS_ERROR), 0); + else if (fread(To_Buf, 1, lrecl, Stream) != lrecl) + sprintf(g->Message, MSG(READ_ERROR), To_File, strerror(errno)); + else + *To_Buf = '*'; + + if (fseek(Stream, Headlen + fpos * Lrecl, SEEK_SET)) + sprintf(g->Message, MSG(FSETPOS_ERROR), 0); + else if (fwrite(To_Buf, 1, lrecl, Stream) != lrecl) + sprintf(g->Message, MSG(FWRITE_ERROR), strerror(errno)); + else + rc = RC_NF; // Ok, Nothing else to do + + return rc; + } // end of InitDelete +#endif // 0 + /***********************************************************************/ /* Data Base delete line routine for DBF access methods. */ /* Deleted lines are just flagged in the first buffer character. */ @@ -791,11 +818,13 @@ void DBFFAM::Rewind(void) /***********************************************************************/ /* Table file close routine for DBF access method. */ /***********************************************************************/ -void DBFFAM::CloseTableFile(PGLOBAL g) +void DBFFAM::CloseTableFile(PGLOBAL g, bool abort) { int rc = RC_OK, wrc = RC_OK; MODE mode = Tdbp->GetMode(); + Abort = abort; + // Closing is True if last Write was in error if (mode == MODE_INSERT && CurNum && !Closing) { // Some more inserted lines remain to be written @@ -806,21 +835,21 @@ void DBFFAM::CloseTableFile(PGLOBAL g) if (Modif && !Closing) { // Last updated block remains to be written Closing = true; - wrc = ReadBuffer(g); + wrc = WriteModifiedBlock(g); } // endif Modif if (UseTemp && T_Stream && wrc == RC_OK) { - // Copy any remaining lines - bool b; - - Fpos = Tdbp->Cardinality(g); - - if ((rc = MoveIntermediateLines(g, &b)) == RC_OK) { - // Delete the old file and rename the new temp file. - RenameTempFile(g); - goto fin; - } // endif rc + if (!Abort) { + // Copy any remaining lines + bool b; + + Fpos = Tdbp->Cardinality(g); + Abort = MoveIntermediateLines(g, &b) != RC_OK; + } // endif Abort + // Delete the old file and rename the new temp file. + RenameTempFile(g); + goto fin; } // endif UseTemp } // endif's mode diff --git a/storage/connect/filamdbf.h b/storage/connect/filamdbf.h index b85b9fc47fe..da84d7685a8 100644 --- a/storage/connect/filamdbf.h +++ b/storage/connect/filamdbf.h @@ -1,7 +1,7 @@ /***************** FilAmDbf H Declares Source Code File (.H) ****************/ -/* Name: filamdbf.h Version 1.3 */ +/* Name: filamdbf.h Version 1.4 */ /* */ -/* (C) Copyright to the author Olivier BERTRAND 2005-2012 */ +/* (C) Copyright to the author Olivier BERTRAND 2005-2014 */ /* */ /* This file contains the DBF file access method classes declares. */ /****************************************************************************/ @@ -19,7 +19,7 @@ typedef class DBMFAM *PDBMFAM; /****************************************************************************/ /* Functions used externally. */ /****************************************************************************/ -PQRYRES DBFColumns(PGLOBAL g, const char *fn, BOOL info); +PQRYRES DBFColumns(PGLOBAL g, char *dp, const char *fn, bool info); /****************************************************************************/ /* This is the base class for dBASE file access methods. */ @@ -40,8 +40,8 @@ class DllExport DBFBASE { // Members int Records; /* records in the file */ bool Accept; /* true if bad lines are accepted */ - int Nerr; /* Number of bad records */ - int Maxerr; /* Maximum number of bad records */ + int Nerr; /* Number of bad records */ + int Maxerr; /* Maximum number of bad records */ int ReadMode; /* 1: ALL 2: DEL 0: NOT DEL */ }; // end of class DBFBASE @@ -67,11 +67,12 @@ class DllExport DBFFAM : public FIXFAM, public DBFBASE { virtual void ResetBuffer(PGLOBAL g); virtual int ReadBuffer(PGLOBAL g); virtual int DeleteRecords(PGLOBAL g, int irc); - virtual void CloseTableFile(PGLOBAL g); + virtual void CloseTableFile(PGLOBAL g, bool abort); virtual void Rewind(void); protected: virtual bool CopyHeader(PGLOBAL g); +//virtual int InitDelete(PGLOBAL g, int fpos, int spos); // Members }; // end of class DBFFAM diff --git a/storage/connect/filamfix.cpp b/storage/connect/filamfix.cpp index 9338ae322db..1fa72d52746 100644 --- a/storage/connect/filamfix.cpp +++ b/storage/connect/filamfix.cpp @@ -1,7 +1,7 @@ /*********** File AM Fix C++ Program Source Code File (.CPP) ***********/ /* PROGRAM NAME: FILAMFIX */ /* ------------- */ -/* Version 1.5 */ +/* Version 1.6 */ /* */ /* COPYRIGHT: */ /* ---------- */ @@ -80,6 +80,37 @@ FIXFAM::FIXFAM(PFIXFAM txfp) : BLKFAM(txfp) } // end of FIXFAM copy constructor /***********************************************************************/ +/* SetPos: Replace the table at the specified position. */ +/***********************************************************************/ +bool FIXFAM::SetPos(PGLOBAL g, int pos) + { + if (pos < 0) { + strcpy(g->Message, MSG(INV_REC_POS)); + return true; + } // endif recpos + + CurBlk = pos / Nrec; + CurNum = pos % Nrec; +#if defined(_DEBUG) + num_eq[(CurBlk == OldBlk) ? 1 : 0]++; +#endif + + // Indicate the table position was externally set + Placed = true; + return false; + } // end of SetPos + +/***********************************************************************/ +/* Initialize CurBlk and CurNum for indexed DELETE. */ +/***********************************************************************/ +int FIXFAM::InitDelete(PGLOBAL g, int fpos, int spos) + { + CurBlk = fpos / Nrec; + CurNum = fpos % Nrec; + return RC_OK; + } // end of InitDelete + +/***********************************************************************/ /* Allocate the block buffer for the table. */ /***********************************************************************/ bool FIXFAM::AllocateBuffer(PGLOBAL g) @@ -128,12 +159,9 @@ void FIXFAM::ResetBuffer(PGLOBAL g) /*********************************************************************/ /* If access is random, performances can be much better when the */ /* reads are done on only one row, except for small tables that can */ - /* be entirely read in one block. If the index is just used as a */ - /* bitmap filter as for Update or Delete reading will be sequential */ - /* and we better keep block reading. */ + /* be entirely read in one block. */ /*********************************************************************/ - if (Tdbp->GetMode() == MODE_READ && ReadBlks != 1 && !Padded && - Tdbp->GetKindex() /*&& Tdbp->GetKindex()->IsRandom()*/) { + if (Tdbp->GetKindex() && ReadBlks != 1 && !Padded) { Nrec = 1; // Better for random access Rbuf = 0; Blksize = Lrecl; @@ -144,79 +172,93 @@ void FIXFAM::ResetBuffer(PGLOBAL g) } // end of ResetBuffer /***********************************************************************/ +/* WriteModifiedBlock: Used when updating. */ +/***********************************************************************/ +int FIXFAM::WriteModifiedBlock(PGLOBAL g) + { + /*********************************************************************/ + /* The old block was modified in Update mode. */ + /* In Update mode we simply rewrite the old block on itself. */ + /*********************************************************************/ + int rc = RC_OK; + bool moved = false; + + // Using temp copy any intermediate lines. + if (UseTemp && MoveIntermediateLines(g, &moved)) + rc = RC_FX; + + // Fpos is last position, Headlen is DBF file header length + else if (!moved && fseek(Stream, Headlen + Fpos * Lrecl, SEEK_SET)) { + sprintf(g->Message, MSG(FSETPOS_ERROR), 0); + rc = RC_FX; + } else if (fwrite(To_Buf, Lrecl, Rbuf, T_Stream) != (size_t)Rbuf) { + sprintf(g->Message, MSG(FWRITE_ERROR), strerror(errno)); + rc = RC_FX; + } else + Spos = Fpos + Nrec; // + Rbuf ??? + + if (Closing || rc != RC_OK) { // Error or called from CloseDB + Closing = true; // To tell CloseDB about error + return rc; + } // endif Closing + + // NOTE: Next line was added to avoid a very strange fread bug. + // When the fseek is not executed (even the file has the good + // pointer position) the next read can happen anywhere in the file. + OldBlk = CurBlk; // This will force fseek to be executed + Modif = 0; + return rc; + } // end of WriteModifiedBlock + +/***********************************************************************/ /* ReadBuffer: Read one line for a FIX file. */ /***********************************************************************/ int FIXFAM::ReadBuffer(PGLOBAL g) { int n, rc = RC_OK; - if (!Closing) { + /*********************************************************************/ + /* Sequential reading when Placed is not true. */ + /*********************************************************************/ + if (Placed) { + Tdbp->SetLine(To_Buf + CurNum * Lrecl); + Placed = false; + } else if (++CurNum < Rbuf) { + Tdbp->IncLine(Lrecl); // Used by DOSCOL functions + return RC_OK; + } else if (Rbuf < Nrec && CurBlk != -1) { + return RC_EF; + } else { /*******************************************************************/ - /* Sequential reading when Placed is not true. */ + /* New block. */ /*******************************************************************/ - if (Placed) { - Tdbp->SetLine(To_Buf + CurNum * Lrecl); - Placed = false; - } else if (++CurNum < Rbuf) { - Tdbp->IncLine(Lrecl); // Used by DOSCOL functions - return RC_OK; - } else if (Rbuf < Nrec && CurBlk != -1) { - return RC_EF; - } else { - /*****************************************************************/ - /* New block. */ - /*****************************************************************/ - CurNum = 0; - Tdbp->SetLine(To_Buf); - - if (++CurBlk >= Block) - return RC_EF; - - } // endif's - - if (OldBlk == CurBlk) { - IsRead = true; // Was read indeed - return RC_OK; // Block is already there - } // endif OldBlk + CurNum = 0; + Tdbp->SetLine(To_Buf); - } // endif !Closing + next: + if (++CurBlk >= Block) + return RC_EF; - if (Modif) { /*******************************************************************/ - /* The old block was modified in Update mode. */ - /* In Update mode we simply rewrite the old block on itself. */ + /* Before reading a new block, check whether block indexing */ + /* can be done, as well as for join as for local filtering. */ /*******************************************************************/ - bool moved = false; + switch (Tdbp->TestBlock(g)) { + case RC_EF: + return RC_EF; + case RC_NF: + goto next; + } // endswitch rc + } // endif's - if (UseTemp) // Copy any intermediate lines. - if (MoveIntermediateLines(g, &moved)) - rc = RC_FX; + if (OldBlk == CurBlk) { + IsRead = true; // Was read indeed + return RC_OK; // Block is already there + } // endif OldBlk - if (rc == RC_OK) { - // Fpos is last position, Headlen is DBF file header length - if (!moved && fseek(Stream, Headlen + Fpos * Lrecl, SEEK_SET)) { - sprintf(g->Message, MSG(FSETPOS_ERROR), 0); - rc = RC_FX; - } else if (fwrite(To_Buf, Lrecl, Rbuf, T_Stream) != (size_t)Rbuf) { - sprintf(g->Message, MSG(FWRITE_ERROR), strerror(errno)); - rc = RC_FX; - } // endif fwrite - - Spos = Fpos + Nrec; // + Rbuf ??? - } // endif rc - - if (Closing || rc != RC_OK) { // Error or called from CloseDB - Closing = true; // To tell CloseDB about error - return rc; - } // endif Closing - - // NOTE: Next line was added to avoid a very strange fread bug. - // When the fseek is not executed (even the file has the good - // pointer position) the next read can happen anywhere in the file. - OldBlk = CurBlk; // This will force fseek to be executed - Modif = 0; -// Spos = Fpos + Nrec; done above - } // endif Mode + // Write modified block in mode UPDATE + if (Modif && (rc = WriteModifiedBlock(g)) != RC_OK) + return rc; // This could be done only for new block. However note that FPOS // is used as block position when updating and as line position @@ -234,8 +276,6 @@ int FIXFAM::ReadBuffer(PGLOBAL g) if (trace > 1) htrc("File position is now %d\n", ftell(Stream)); -//long tell = ftell(Stream); not used - if (Padded) n = fread(To_Buf, (size_t)Blksize, 1, Stream); else @@ -304,19 +344,23 @@ int FIXFAM::WriteBuffer(PGLOBAL g) } else { // Mode == MODE_UPDATE // T_Stream is the temporary stream or the table file stream itself - if (!T_Stream) - { - if (UseTemp /*&& Tdbp->GetMode() == MODE_UPDATE*/) { + if (!T_Stream) { + if (UseTemp) { if (OpenTempFile(g)) return RC_FX; - - if (CopyHeader(g)) // For DBF tables + else if (CopyHeader(g)) // For DBF tables return RC_FX; } else T_Stream = Stream; - } - Modif++; // Modified line in Update mode + + } // endif T_Stream + + if (Nrec > 1) + Modif++; // Modified line in blocked mode + else if (WriteModifiedBlock(g)) // Indexed update + return RC_FX; + } // endif Mode return RC_OK; @@ -516,36 +560,37 @@ bool FIXFAM::MoveIntermediateLines(PGLOBAL g, bool *b) /***********************************************************************/ /* Table file close routine for FIX access method. */ /***********************************************************************/ -void FIXFAM::CloseTableFile(PGLOBAL g) +void FIXFAM::CloseTableFile(PGLOBAL g, bool abort) { int rc = RC_OK, wrc = RC_OK; MODE mode = Tdbp->GetMode(); + Abort = abort; + // Closing is True if last Write was in error if (mode == MODE_INSERT && CurNum && !Closing) { // Some more inserted lines remain to be written Rbuf = CurNum--; -// Closing = true; wrc = WriteBuffer(g); } else if (mode == MODE_UPDATE) { if (Modif && !Closing) { // Last updated block remains to be written - Closing = true; - wrc = ReadBuffer(g); + Closing = true; // ??? + wrc = WriteModifiedBlock(g); } // endif Modif if (UseTemp && T_Stream && wrc == RC_OK) { - // Copy any remaining lines - bool b; - - Fpos = Tdbp->Cardinality(g); - - if ((rc = MoveIntermediateLines(g, &b)) == RC_OK) { - // Delete the old file and rename the new temp file. - RenameTempFile(g); - goto fin; - } // endif rc - + if (!Abort) { + // Copy any remaining lines + bool b; + + Fpos = Tdbp->Cardinality(g); + Abort = MoveIntermediateLines(g, &b) != RC_OK; + } // endif Abort + + // Delete the old file and rename the new temp file. + RenameTempFile(g); + goto fin; } // endif UseTemp } // endif's mode @@ -602,7 +647,9 @@ bool BGXFAM::BigSeek(PGLOBAL g, HANDLE h, BIGINT pos, int org) } // endif #else // !WIN32 if (lseek64(h, pos, org) < 0) { - sprintf(g->Message, MSG(ERROR_IN_LSK), errno); +// sprintf(g->Message, MSG(ERROR_IN_LSK), errno); + sprintf(g->Message, "lseek64: %s", strerror(errno)); + printf("%s\n", g->Message); return true; } // endif #endif // !WIN32 @@ -804,7 +851,7 @@ bool BGXFAM::OpenTableFile(PGLOBAL g) #else // UNIX int rc = 0; int oflag = O_LARGEFILE; // Enable file size > 2G - mode_t tmode = 0; + mode_t tmode = S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP | S_IROTH | S_IWOTH; /*********************************************************************/ /* Create the file object according to access mode */ @@ -829,7 +876,7 @@ bool BGXFAM::OpenTableFile(PGLOBAL g) break; case MODE_INSERT: oflag |= (O_WRONLY | O_CREAT | O_APPEND); - tmode = S_IREAD | S_IWRITE; + // tmode = S_IREAD | S_IWRITE; break; default: sprintf(g->Message, MSG(BAD_OPEN_MODE), mode); @@ -988,73 +1035,92 @@ int BGXFAM::Cardinality(PGLOBAL g) } // end of Cardinality /***********************************************************************/ +/* WriteModifiedBlock: Used when updating. */ +/***********************************************************************/ +int BGXFAM::WriteModifiedBlock(PGLOBAL g) + { + /*********************************************************************/ + /* The old block was modified in Update mode. */ + /* In Update mode we simply rewrite the old block on itself. */ + /*********************************************************************/ + int rc = RC_OK; + bool moved = false; + + if (UseTemp) // Copy any intermediate lines. + if (MoveIntermediateLines(g, &moved)) + rc = RC_FX; + + if (rc == RC_OK) { + // Set file position to OldBlk position (Fpos) + if (!moved && BigSeek(g, Hfile, (BIGINT)Fpos * (BIGINT)Lrecl)) + rc = RC_FX; + else if (BigWrite(g, Tfile, To_Buf, Lrecl * Rbuf)) + rc = RC_FX; + + Spos = Fpos + Nrec; // + Rbuf ??? + } // endif rc + + if (Closing || rc != RC_OK) // Error or called from CloseDB + return rc; + + // NOTE: Next line was added to avoid a very strange fread bug. + // When the fseek is not executed (even the file has the good + // pointer position) the next read can happen anywhere in the file. + OldBlk = CurBlk; // This will force fseek to be executed + Modif = 0; + return rc; + } // end of WriteModifiedBlock + +/***********************************************************************/ /* ReadBuffer: Read Nrec lines for a big fixed/binary file. */ /***********************************************************************/ int BGXFAM::ReadBuffer(PGLOBAL g) { int nbr, rc = RC_OK; - if (!Closing) { + /*********************************************************************/ + /* Sequential reading when Placed is not true. */ + /*********************************************************************/ + if (Placed) { + Tdbp->SetLine(To_Buf + CurNum * Lrecl); + Placed = false; + } else if (++CurNum < Rbuf) { + Tdbp->IncLine(Lrecl); // Used by DOSCOL functions + return RC_OK; + } else if (Rbuf < Nrec && CurBlk != -1) { + return RC_EF; + } else { /*******************************************************************/ - /* Sequential reading when Placed is not true. */ + /* New block. */ /*******************************************************************/ - if (Placed) { - Tdbp->SetLine(To_Buf + CurNum * Lrecl); - Placed = false; - } else if (++CurNum < Rbuf) { - Tdbp->IncLine(Lrecl); // Used by DOSCOL functions - return RC_OK; - } else if (Rbuf < Nrec && CurBlk != -1) { + CurNum = 0; + Tdbp->SetLine(To_Buf); + + next: + if (++CurBlk >= Block) return RC_EF; - } else { - /*****************************************************************/ - /* New block. */ - /*****************************************************************/ - CurNum = 0; - Tdbp->SetLine(To_Buf); - if (++CurBlk >= Block) + /*******************************************************************/ + /* Before reading a new block, check whether block optimization */ + /* can be done, as well as for join as for local filtering. */ + /*******************************************************************/ + switch (Tdbp->TestBlock(g)) { + case RC_EF: return RC_EF; + case RC_NF: + goto next; + } // endswitch rc - } // endif's + } // endif's - if (OldBlk == CurBlk) { - IsRead = true; // Was read indeed - return RC_OK; // Block is already there - } // endif OldBlk + if (OldBlk == CurBlk) { + IsRead = true; // Was read indeed + return RC_OK; // Block is already there + } // endif OldBlk - } // endif !Closing - - if (Modif) { - /*******************************************************************/ - /* The old block was modified in Update mode. */ - /* In Update mode we simply rewrite the old block on itself. */ - /*******************************************************************/ - bool moved = false; - - if (UseTemp) // Copy any intermediate lines. - if (MoveIntermediateLines(g, &moved)) - rc = RC_FX; - - if (rc == RC_OK) { - // Set file position to OldBlk position (Fpos) - if (!moved && BigSeek(g, Hfile, (BIGINT)Fpos * (BIGINT)Lrecl)) - rc = RC_FX; - else if (BigWrite(g, Tfile, To_Buf, Lrecl * Rbuf)) - rc = RC_FX; - - Spos = Fpos + Nrec; // + Rbuf ??? - } // endif rc - - if (Closing || rc != RC_OK) // Error or called from CloseDB - return rc; - - // NOTE: Next line was added to avoid a very strange fread bug. - // When the fseek is not executed (even the file has the good - // pointer position) the next read can happen anywhere in the file. - OldBlk = CurBlk; // This will force fseek to be executed - Modif = 0; - } // endif Mode + // Write modified block in mode UPDATE + if (Modif && (rc = WriteModifiedBlock(g)) != RC_OK) + return rc; Fpos = CurBlk * Nrec; @@ -1116,16 +1182,21 @@ int BGXFAM::WriteBuffer(PGLOBAL g) } else { // Mode == MODE_UPDATE // Tfile is the temporary file or the table file handle itself - if (Tfile == INVALID_HANDLE_VALUE) - { + if (Tfile == INVALID_HANDLE_VALUE) { if (UseTemp /*&& Tdbp->GetMode() == MODE_UPDATE*/) { if (OpenTempFile(g)) return RC_FX; } else Tfile = Hfile; - } - Modif++; // Modified line in Update mode + + } // endif Tfile + + if (Nrec > 1) + Modif++; // Modified line in blocked mode + else if (WriteModifiedBlock(g)) // Indexed update + return RC_FX; + } // endif Mode return RC_OK; @@ -1210,14 +1281,10 @@ int BGXFAM::DeleteRecords(PGLOBAL g, int irc) if (trace > 1) htrc("after: Tpos=%d Spos=%d\n", Tpos, Spos); - } else { + } else if (irc != RC_OK) { /*******************************************************************/ /* Last call after EOF has been reached. */ /*******************************************************************/ - char filename[_MAX_PATH]; - - PlugSetPath(filename, To_File, Tdbp->GetPath()); - if (UseTemp) { /*****************************************************************/ /* Ok, now delete old file and rename new temp file. */ @@ -1352,11 +1419,13 @@ bool BGXFAM::MoveIntermediateLines(PGLOBAL g, bool *b) /***********************************************************************/ /* Data Base close routine for BIGFIX access method. */ /***********************************************************************/ -void BGXFAM::CloseTableFile(PGLOBAL g) +void BGXFAM::CloseTableFile(PGLOBAL g, bool abort) { int rc = RC_OK, wrc = RC_OK; MODE mode = Tdbp->GetMode(); + Abort = abort; + // Closing is True if last Write was in error if (mode == MODE_INSERT && CurNum && !Closing) { // Some more inserted lines remain to be written @@ -1366,21 +1435,21 @@ void BGXFAM::CloseTableFile(PGLOBAL g) if (Modif && !Closing) { // Last updated block remains to be written Closing = true; - wrc = ReadBuffer(g); + wrc = WriteModifiedBlock(g); } // endif Modif if (UseTemp && Tfile && wrc == RC_OK) { - // Copy any remaining lines - bool b; - - Fpos = Tdbp->Cardinality(g); - - if ((rc = MoveIntermediateLines(g, &b)) == RC_OK) { - // Delete the old file and rename the new temp file. - RenameTempFile(g); - goto fin; - } // endif rc - + if (!Abort) { + // Copy any remaining lines + bool b; + + Fpos = Tdbp->Cardinality(g); + Abort = MoveIntermediateLines(g, &b) != RC_OK; + } // endif Abort + + // Delete the old file and rename the new temp file. + RenameTempFile(g); + goto fin; } // endif UseTemp } // endif's mode diff --git a/storage/connect/filamfix.h b/storage/connect/filamfix.h index 6f9e6ef3b45..a99a36af232 100644 --- a/storage/connect/filamfix.h +++ b/storage/connect/filamfix.h @@ -1,7 +1,7 @@ /************** FilAMFix H Declares Source Code File (.H) **************/ -/* Name: FILAMFIX.H Version 1.2 */ +/* Name: FILAMFIX.H Version 1.3 */ /* */ -/* (C) Copyright to the author Olivier BERTRAND 2005 - 2012 */ +/* (C) Copyright to the author Olivier BERTRAND 2005 - 2014 */ /* */ /* This file contains the FIX file access method classes declares. */ /***********************************************************************/ @@ -25,22 +25,28 @@ class DllExport FIXFAM : public BLKFAM { FIXFAM(PFIXFAM txfp); // Implementation - virtual AMT GetAmType(void) {return TYPE_AM_FIX;} - virtual PTXF Duplicate(PGLOBAL g) - {return (PTXF)new(g) FIXFAM(this);} + virtual AMT GetAmType(void) {return TYPE_AM_FIX;} + virtual PTXF Duplicate(PGLOBAL g) + {return (PTXF)new(g) FIXFAM(this);} // Methods virtual int Cardinality(PGLOBAL g) {return TXTFAM::Cardinality(g);} + virtual int MaxBlkSize(PGLOBAL g, int s) + {return TXTFAM::MaxBlkSize(g, s);} + virtual bool SetPos(PGLOBAL g, int recpos); + virtual int GetNextPos(void) {return Fpos + 1;} virtual bool AllocateBuffer(PGLOBAL g); virtual void ResetBuffer(PGLOBAL g); + virtual int WriteModifiedBlock(PGLOBAL g); virtual int ReadBuffer(PGLOBAL g); virtual int WriteBuffer(PGLOBAL g); virtual int DeleteRecords(PGLOBAL g, int irc); - virtual void CloseTableFile(PGLOBAL g); + virtual void CloseTableFile(PGLOBAL g, bool abort); protected: virtual bool CopyHeader(PGLOBAL g) {return false;} virtual bool MoveIntermediateLines(PGLOBAL g, bool *b); + virtual int InitDelete(PGLOBAL g, int fpos, int spos); // No additional members }; // end of class FIXFAM @@ -58,25 +64,26 @@ class BGXFAM : public FIXFAM { BGXFAM(PBGXFAM txfp); // Implementation - virtual PTXF Duplicate(PGLOBAL g) - {return (PTXF)new(g) BGXFAM(this);} + virtual PTXF Duplicate(PGLOBAL g) + {return (PTXF)new(g) BGXFAM(this);} // Methods - virtual int Cardinality(PGLOBAL g); - virtual bool OpenTableFile(PGLOBAL g); - virtual int ReadBuffer(PGLOBAL g); - virtual int WriteBuffer(PGLOBAL g); - virtual int DeleteRecords(PGLOBAL g, int irc); - virtual void CloseTableFile(PGLOBAL g); - virtual void Rewind(void); + virtual int Cardinality(PGLOBAL g); + virtual bool OpenTableFile(PGLOBAL g); + virtual int WriteModifiedBlock(PGLOBAL g); + virtual int ReadBuffer(PGLOBAL g); + virtual int WriteBuffer(PGLOBAL g); + virtual int DeleteRecords(PGLOBAL g, int irc); + virtual void CloseTableFile(PGLOBAL g, bool abort); + virtual void Rewind(void); protected: - bool BigSeek(PGLOBAL g, HANDLE h, BIGINT pos - , int org = FILE_BEGIN); - int BigRead(PGLOBAL g, HANDLE h, void *inbuf, int req); - bool BigWrite(PGLOBAL g, HANDLE h, void *inbuf, int req); virtual bool OpenTempFile(PGLOBAL g); virtual bool MoveIntermediateLines(PGLOBAL g, bool *b = NULL); + int BigRead(PGLOBAL g, HANDLE h, void *inbuf, int req); + bool BigWrite(PGLOBAL g, HANDLE h, void *inbuf, int req); + bool BigSeek(PGLOBAL g, HANDLE h, BIGINT pos + , int org = FILE_BEGIN); // Members HANDLE Hfile; // Handle(descriptor) to big file diff --git a/storage/connect/filamtxt.cpp b/storage/connect/filamtxt.cpp index e4e9130dc86..dfd5a6638cf 100644 --- a/storage/connect/filamtxt.cpp +++ b/storage/connect/filamtxt.cpp @@ -1,11 +1,11 @@ /*********** File AM Txt C++ Program Source Code File (.CPP) ***********/ /* PROGRAM NAME: FILAMTXT */ /* ------------- */ -/* Version 1.4 */ +/* Version 1.6 */ /* */ /* COPYRIGHT: */ /* ---------- */ -/* (C) Copyright to the author Olivier BERTRAND 2005-2013 */ +/* (C) Copyright to the author Olivier BERTRAND 2005-2014 */ /* */ /* WHAT THIS PROGRAM DOES: */ /* ----------------------- */ @@ -58,6 +58,11 @@ extern int num_read, num_there, num_eq[2]; // Statistics extern "C" int trace; +/***********************************************************************/ +/* Routine called externally by TXTFAM SortedRows functions. */ +/***********************************************************************/ +PARRAY MakeValueArray(PGLOBAL g, PPARM pp); + /* --------------------------- Class TXTFAM -------------------------- */ /***********************************************************************/ @@ -75,6 +80,12 @@ TXTFAM::TXTFAM(PDOSDEF tdp) To_Buf = NULL; DelBuf = NULL; BlkPos = NULL; + To_Pos = NULL; + To_Sos = NULL; + To_Upd = NULL; + Posar = NULL; + Sosar = NULL; + Updar = NULL; BlkLen = 0; Buflen = 0; Dbflen = 0; @@ -91,10 +102,12 @@ TXTFAM::TXTFAM(PDOSDEF tdp) Rbuf = 0; Modif = 0; Blksize = 0; + Fpos = Spos = Tpos = 0; Padded = false; Eof = tdp->Eof; Ending = tdp->Ending; - CrLf = (char*)(Ending == 2 ? "\r\n" : "\n"); + Abort = false; + CrLf = (char*)(Ending == 1 ? "\n" : "\r\n"); } // end of TXTFAM standard constructor TXTFAM::TXTFAM(PTXF txfp) @@ -109,6 +122,12 @@ TXTFAM::TXTFAM(PTXF txfp) To_Buf = txfp->To_Buf; DelBuf = txfp->DelBuf; BlkPos = txfp->BlkPos; + To_Pos = txfp->To_Pos; + To_Sos = txfp->To_Sos; + To_Upd = txfp->To_Upd; + Posar = txfp->Posar; + Sosar = txfp->Sosar; + Updar = txfp->Updar; BlkLen = txfp->BlkLen; Buflen = txfp->Buflen; Dbflen = txfp->Dbflen; @@ -125,9 +144,14 @@ TXTFAM::TXTFAM(PTXF txfp) Rbuf = txfp->Rbuf; Modif = txfp->Modif; Blksize = txfp->Blksize; + Fpos = txfp->Fpos; + Spos = txfp->Spos; + Tpos = txfp->Tpos; Padded = txfp->Padded; Eof = txfp->Eof; Ending = txfp->Ending; + Abort = txfp->Abort; + CrLf = txfp->CrLf; } // end of TXTFAM copy constructor /***********************************************************************/ @@ -151,9 +175,9 @@ void TXTFAM::Reset(void) /***********************************************************************/ int TXTFAM::GetFileLength(PGLOBAL g) { - char filename[_MAX_PATH]; - int h; - int len; + char filename[_MAX_PATH]; + int h; + int len; PlugSetPath(filename, To_File, Tdbp->GetPath()); h= global_open(g, MSGID_OPEN_MODE_STRERROR, filename, _O_RDONLY); @@ -165,13 +189,13 @@ int TXTFAM::GetFileLength(PGLOBAL g) if (errno != ENOENT) { if (trace) htrc("%s\n", g->Message); + len = -1; - } - else - { + } else { len = 0; // File does not exist yet g->Message[0]= '\0'; - } + } // endif errno + } else { if ((len = _filelength(h)) < 0) sprintf(g->Message, MSG(FILELEN_ERROR), "_filelength", filename); @@ -214,7 +238,7 @@ int TXTFAM::Cardinality(PGLOBAL g) } // endif Padded if (trace) - htrc(" Computed max_K=%d Filen=%d lrecl=%d\n", + htrc(" Computed max_K=%d Filen=%d lrecl=%d\n", card, len, Lrecl); } else @@ -228,6 +252,199 @@ int TXTFAM::Cardinality(PGLOBAL g) } // end of Cardinality +/***********************************************************************/ +/* Use BlockTest to reduce the table estimated size. */ +/* Note: This function is meant only for fixed length files but is */ +/* placed here to be available to FIXFAM and MPXFAM classes. */ +/***********************************************************************/ +int TXTFAM::MaxBlkSize(PGLOBAL g, int s) + { + int rc = RC_OK, savcur = CurBlk, blm1 = Block - 1; + int size, last = s - blm1 * Nrec; + + // Roughly estimate the table size as the sum of blocks + // that can contain good rows + for (size = 0, CurBlk = 0; CurBlk < Block; CurBlk++) + if ((rc = Tdbp->TestBlock(g)) == RC_OK) + size += (CurBlk == blm1) ? last : Nrec; + else if (rc == RC_EF) + break; + + CurBlk = savcur; + return size; + } // end of MaxBlkSize + +/***********************************************************************/ +/* AddListValue: Used when doing indexed update or delete. */ +/***********************************************************************/ +bool TXTFAM::AddListValue(PGLOBAL g, int type, void *val, PPARM *top) + { + PPARM pp = (PPARM)PlugSubAlloc(g, NULL, sizeof(PARM)); + + switch (type) { +// case TYPE_INT: +// pp->Value = PlugSubAlloc(g, NULL, sizeof(int)); +// *((int*)pp->Value) = *((int*)val); +// break; + case TYPE_VOID: + pp->Value = (void*)(intptr)*(int*)val; + break; +// case TYPE_STRING: +// pp->Value = PlugSubAlloc(g, NULL, strlen((char*)val) + 1); +// strcpy((char*)pp->Value, (char*)val); +// break; + case TYPE_PCHAR: + pp->Value = val; + break; + default: + return true; + } // endswitch type + + pp->Type = type; + pp->Domain = 0; + pp->Next = *top; + *top = pp; + return false; + } // end of AddListValue + +/***********************************************************************/ +/* Store needed values for indexed UPDATE or DELETE. */ +/***********************************************************************/ +int TXTFAM::StoreValues(PGLOBAL g, bool upd) +{ + int pos = GetPos(); + bool rc = AddListValue(g, TYPE_VOID, &pos, &To_Pos); + + if (!rc) { + pos = GetNextPos(); + rc = AddListValue(g, TYPE_VOID, &pos, &To_Sos); + } // endif rc + + if (upd && !rc) { + char *buf; + + if (Tdbp->PrepareWriting(g)) + return RC_FX; + + buf = (char*)PlugSubAlloc(g, NULL, strlen(Tdbp->GetLine()) + 1); + strcpy(buf, Tdbp->GetLine()); + rc = AddListValue(g, TYPE_PCHAR, buf, &To_Upd); + } // endif upd + + return rc ? RC_FX : RC_OK; +} // end of StoreValues + +/***********************************************************************/ +/* UpdateSortedRows. When updating using indexing, the issue is that */ +/* record are not necessarily updated in sequential order. */ +/* Moving intermediate lines cannot be done while making them because */ +/* this can cause extra wrong records to be included in the new file. */ +/* What we do here is to reorder the updated records and do all the */ +/* updates ordered by record position. */ +/***********************************************************************/ +int TXTFAM::UpdateSortedRows(PGLOBAL g) + { + int *ix, i; + + /*********************************************************************/ + /* Get the stored update values and sort them. */ + /*********************************************************************/ + if (!(Posar = MakeValueArray(g, To_Pos))) { +// strcpy(g->Message, "Position array is null"); +// return RC_INFO; + return RC_OK; // Nothing to do + } else if (!(Sosar = MakeValueArray(g, To_Sos))) { + strcpy(g->Message, "Start position array is null"); + goto err; + } else if (!(Updar = MakeValueArray(g, To_Upd))) { + strcpy(g->Message, "Updated line array is null"); + goto err; + } else if (!(ix = (int*)Posar->GetSortIndex(g))) { + strcpy(g->Message, "Error getting array sort index"); + goto err; + } // endif's + + Rewind(); + + for (i = 0; i < Posar->GetNval(); i++) { + SetPos(g, Sosar->GetIntValue(ix[i])); + Fpos = Posar->GetIntValue(ix[i]); + strcpy(Tdbp->To_Line, Updar->GetStringValue(ix[i])); + + // Now write the updated line. + if (WriteBuffer(g)) + goto err; + + } // endfor i + + return RC_OK; + +err: + if (trace) + htrc("%s\n", g->Message); + + return RC_FX; + } // end of UpdateSortedRows + +/***********************************************************************/ +/* DeleteSortedRows. When deleting using indexing, the issue is that */ +/* record are not necessarily deleted in sequential order. Moving */ +/* intermediate lines cannot be done while deleing them because */ +/* this can cause extra wrong records to be included in the new file. */ +/* What we do here is to reorder the deleted record and delete from */ +/* the file from the ordered deleted records. */ +/***********************************************************************/ +int TXTFAM::DeleteSortedRows(PGLOBAL g) + { + int *ix, i, irc; + + /*********************************************************************/ + /* Get the stored delete values and sort them. */ + /*********************************************************************/ + if (!(Posar = MakeValueArray(g, To_Pos))) { +// strcpy(g->Message, "Position array is null"); +// return RC_INFO; + return RC_OK; // Nothing to do + } else if (!(Sosar = MakeValueArray(g, To_Sos))) { + strcpy(g->Message, "Start position array is null"); + goto err; + } else if (!(ix = (int*)Posar->GetSortIndex(g))) { + strcpy(g->Message, "Error getting array sort index"); + goto err; + } // endif's + + Tpos = Spos = 0; + + for (i = 0; i < Posar->GetNval(); i++) { + if ((irc = InitDelete(g, Posar->GetIntValue(ix[i]), + Sosar->GetIntValue(ix[i])) == RC_FX)) + goto err; + + // Now delete the sorted rows + if (DeleteRecords(g, irc)) + goto err; + + } // endfor i + + return RC_OK; + +err: + if (trace) + htrc("%s\n", g->Message); + + return RC_FX; + } // end of DeleteSortedRows + +/***********************************************************************/ +/* The purpose of this function is to deal with access methods that */ +/* are not coherent regarding the use of SetPos and GetPos. */ +/***********************************************************************/ +int TXTFAM::InitDelete(PGLOBAL g, int fpos, int spos) + { + strcpy(g->Message, "InitDelete should not be used by this table type"); + return RC_FX; + } // end of InitDelete + /* --------------------------- Class DOSFAM -------------------------- */ /***********************************************************************/ @@ -238,7 +455,6 @@ DOSFAM::DOSFAM(PDOSDEF tdp) : TXTFAM(tdp) To_Fbt = NULL; Stream = NULL; T_Stream = NULL; - Fpos = Spos = Tpos = 0; UseTemp = false; Bin = false; } // end of DOSFAM standard constructor @@ -248,13 +464,21 @@ DOSFAM::DOSFAM(PDOSFAM tdfp) : TXTFAM(tdfp) To_Fbt = tdfp->To_Fbt; Stream = tdfp->Stream; T_Stream = tdfp->T_Stream; - Fpos = tdfp->Fpos; - Spos = tdfp->Spos; - Tpos = tdfp->Tpos; UseTemp = tdfp->UseTemp; Bin = tdfp->Bin; } // end of DOSFAM copy constructor +DOSFAM::DOSFAM(PBLKFAM tdfp, PDOSDEF tdp) : TXTFAM(tdp) + { + Tdbp = tdfp->Tdbp; + To_Fb = tdfp->To_Fb; + To_Fbt = tdfp->To_Fbt; + Stream = tdfp->Stream; + T_Stream = tdfp->T_Stream; + UseTemp = tdfp->UseTemp; + Bin = tdfp->Bin; + } // end of DOSFAM constructor from BLKFAM + /***********************************************************************/ /* Reset: reset position values at the beginning of file. */ /***********************************************************************/ @@ -295,6 +519,15 @@ int DOSFAM::Cardinality(PGLOBAL g) } // end of Cardinality /***********************************************************************/ +/* Use BlockTest to reduce the table estimated size. */ +/* Note: This function is not really implemented yet. */ +/***********************************************************************/ +int DOSFAM::MaxBlkSize(PGLOBAL g, int s) + { + return s; + } // end of MaxBlkSize + +/***********************************************************************/ /* OpenTableFile: Open a DOS/UNIX table file using C standard I/Os. */ /***********************************************************************/ bool DOSFAM::OpenTableFile(PGLOBAL g) @@ -304,8 +537,8 @@ bool DOSFAM::OpenTableFile(PGLOBAL g) MODE mode = Tdbp->Mode; PDBUSER dbuserp = PlgGetUser(g); - // This is required when using Unix files under Windows - Bin = (Ending == 1); + // This is required when using Unix files under Windows and vice versa + Bin = (Blocked || Ending != CRLF); switch (mode) { case MODE_READ: @@ -347,7 +580,7 @@ bool DOSFAM::OpenTableFile(PGLOBAL g) } // endswitch Mode // For blocked I/O or for moving lines, open the table in binary - strcat(opmode, (Blocked || Bin) ? "b" : "t"); + strcat(opmode, (Bin) ? "b" : "t"); // Now open the file stream PlugSetPath(filename, To_File, Tdbp->GetPath()); @@ -381,7 +614,7 @@ bool DOSFAM::AllocateBuffer(PGLOBAL g) MODE mode = Tdbp->Mode; // Lrecl does not include line ending - Buflen = Lrecl + Ending + ((Bin) ? 1 : 0); + Buflen = Lrecl + Ending + ((Bin) ? 1 : 0) + 1; // Sergei if (trace) htrc("SubAllocating a buffer of %d bytes\n", Buflen); @@ -458,6 +691,21 @@ bool DOSFAM::RecordPos(PGLOBAL g) } // end of RecordPos /***********************************************************************/ +/* Initialize Fpos and the current position for indexed DELETE. */ +/***********************************************************************/ +int DOSFAM::InitDelete(PGLOBAL g, int fpos, int spos) + { + Fpos = fpos; + + if (fseek(Stream, spos, SEEK_SET)) { + sprintf(g->Message, MSG(FSETPOS_ERROR), Fpos); + return RC_FX; + } // endif + + return RC_OK; + } // end of InitDelete + +/***********************************************************************/ /* Skip one record in file. */ /***********************************************************************/ int DOSFAM::SkipRecord(PGLOBAL g, bool header) @@ -509,19 +757,35 @@ int DOSFAM::ReadBuffer(PGLOBAL g) if (trace > 1) htrc("ReadBuffer: Tdbp=%p To_Line=%p Placed=%d\n", - Tdbp, Tdbp->To_Line, Placed); + Tdbp, Tdbp->To_Line, Placed); if (!Placed) { /*******************************************************************/ /* Record file position in case of UPDATE or DELETE. */ /*******************************************************************/ + next: if (RecordPos(g)) return RC_FX; CurBlk = (int)Rows++; - if (trace > 1) - htrc("ReadBuffer: CurBlk=%d\n", CurBlk); + if (trace > 1) + htrc("ReadBuffer: CurBlk=%d\n", CurBlk); + + /********************************************************************/ + /* Check whether optimization on ROWID */ + /* can be done, as well as for join as for local filtering. */ + /*******************************************************************/ + switch (Tdbp->TestBlock(g)) { + case RC_EF: + return RC_EF; + case RC_NF: + // Skip this record + if ((rc = SkipRecord(g, FALSE)) != RC_OK) + return rc; + + goto next; + } // endswitch rc } else Placed = false; @@ -594,12 +858,11 @@ int DOSFAM::ReadBuffer(PGLOBAL g) /***********************************************************************/ int DOSFAM::WriteBuffer(PGLOBAL g) { - char *crlf = "\n"; - int curpos = 0; + int curpos = 0; bool moved = true; // T_Stream is the temporary stream or the table file stream itself - if (!T_Stream) + if (!T_Stream) { if (UseTemp && Tdbp->Mode == MODE_UPDATE) { if (OpenTempFile(g)) return RC_FX; @@ -607,6 +870,8 @@ int DOSFAM::WriteBuffer(PGLOBAL g) } else T_Stream = Stream; + } // endif T_Stream + if (Tdbp->Mode == MODE_UPDATE) { /*******************************************************************/ /* Here we simply rewrite a record on itself. There are two cases */ @@ -622,14 +887,14 @@ int DOSFAM::WriteBuffer(PGLOBAL g) if (UseTemp) { /*****************************************************************/ - /* We are using a temporary file. Before writing the updated */ - /* record, we must eventually copy all the intermediate records */ - /* that have not been updated. */ + /* We are using a temporary file. */ + /* Before writing the updated record, we must eventually copy */ + /* all the intermediate records that have not been updated. */ /*****************************************************************/ if (MoveIntermediateLines(g, &moved)) return RC_FX; - Spos = curpos; // New start position + Spos = curpos; // New start position } else // Update is directly written back into the file, // with this (fast) method, record size cannot change. @@ -641,13 +906,9 @@ int DOSFAM::WriteBuffer(PGLOBAL g) } // endif mode /*********************************************************************/ - /* Prepare the write buffer. */ + /* Prepare the write the updated line. */ /*********************************************************************/ -#if defined(WIN32) - if (Bin) - crlf = "\r\n"; -#endif // WIN32 - strcat(strcpy(To_Buf, Tdbp->To_Line), crlf); + strcat(strcpy(To_Buf, Tdbp->To_Line), (Bin) ? CrLf : "\n"); /*********************************************************************/ /* Now start the writing process. */ @@ -675,7 +936,7 @@ int DOSFAM::WriteBuffer(PGLOBAL g) int DOSFAM::DeleteRecords(PGLOBAL g, int irc) { bool moved; - int curpos = ftell(Stream); + int curpos = ftell(Stream); /*********************************************************************/ /* There is an alternative here: */ @@ -684,8 +945,7 @@ int DOSFAM::DeleteRecords(PGLOBAL g, int irc) /* the temporary file renamed to the original file name. */ /* 2 - directly move the not deleted lines inside the original */ /* file, and at the end erase all trailing records. */ - /* This will be experimented, but method 1 must be used for Unix as */ - /* the function needed to erase trailing records is not available. */ + /* This will be experimented. */ /*********************************************************************/ if (trace) htrc( @@ -750,7 +1010,7 @@ int DOSFAM::DeleteRecords(PGLOBAL g, int irc) /* Last call after EOF has been reached. */ /* The UseTemp case is treated in CloseTableFile. */ /*******************************************************************/ - if (!UseTemp) { + if (!UseTemp & !Abort) { /*****************************************************************/ /* Because the chsize functionality is only accessible with a */ /* system call we must close the file and reopen it with the */ @@ -875,41 +1135,47 @@ bool DOSFAM::MoveIntermediateLines(PGLOBAL g, bool *b) /***********************************************************************/ /* Delete the old file and rename the new temp file. */ +/* If aborting just delete the new temp file. */ +/* If indexed, make the temp file from the arrays. */ /***********************************************************************/ int DOSFAM::RenameTempFile(PGLOBAL g) { char *tempname, filetemp[_MAX_PATH], filename[_MAX_PATH]; - int rc; + int rc = RC_OK; - if (!To_Fbt) + if (To_Fbt) + tempname = (char*)To_Fbt->Fname; + else return RC_INFO; // Nothing to do ??? // This loop is necessary because, in case of join, // To_File can have been open several times. for (PFBLOCK fb = PlgGetUser(g)->Openlist; fb; fb = fb->Next) - if (fb == To_Fb || fb == To_Fbt) + if (fb == To_Fb || (fb == To_Fbt)) rc = PlugCloseFile(g, fb); + + if (!Abort) { + PlugSetPath(filename, To_File, Tdbp->GetPath()); + strcat(PlugRemoveType(filetemp, filename), ".ttt"); + remove(filetemp); // May still be there from previous error + + if (rename(filename, filetemp)) { // Save file for security + sprintf(g->Message, MSG(RENAME_ERROR), + filename, filetemp, strerror(errno)); + longjmp(g->jumper[g->jump_level], 51); + } else if (rename(tempname, filename)) { + sprintf(g->Message, MSG(RENAME_ERROR), + tempname, filename, strerror(errno)); + rc = rename(filetemp, filename); // Restore saved file + longjmp(g->jumper[g->jump_level], 52); + } else if (remove(filetemp)) { + sprintf(g->Message, MSG(REMOVE_ERROR), + filetemp, strerror(errno)); + rc = RC_INFO; // Acceptable + } // endif's - tempname = (char*)To_Fbt->Fname; - PlugSetPath(filename, To_File, Tdbp->GetPath()); - strcat(PlugRemoveType(filetemp, filename), ".ttt"); - remove(filetemp); // May still be there from previous error - - if (rename(filename, filetemp)) { // Save file for security - sprintf(g->Message, MSG(RENAME_ERROR), - filename, filetemp, strerror(errno)); - rc = RC_FX; - } else if (rename(tempname, filename)) { - sprintf(g->Message, MSG(RENAME_ERROR), - tempname, filename, strerror(errno)); - rc = rename(filetemp, filename); // Restore saved file - rc = RC_FX; - } else if (remove(filetemp)) { - sprintf(g->Message, MSG(REMOVE_ERROR), - filetemp, strerror(errno)); - rc = RC_INFO; // Acceptable } else - rc = RC_OK; + remove(tempname); return rc; } // end of RenameTempFile @@ -917,22 +1183,24 @@ int DOSFAM::RenameTempFile(PGLOBAL g) /***********************************************************************/ /* Table file close routine for DOS access method. */ /***********************************************************************/ -void DOSFAM::CloseTableFile(PGLOBAL g) +void DOSFAM::CloseTableFile(PGLOBAL g, bool abort) { int rc; + Abort = abort; + if (UseTemp && T_Stream) { - if (Tdbp->Mode == MODE_UPDATE) { + if (Tdbp->Mode == MODE_UPDATE && !Abort) { // Copy eventually remaining lines bool b; fseek(Stream, 0, SEEK_END); Fpos = ftell(Stream); - rc = MoveIntermediateLines(g, &b); - } // endif Mode + Abort = MoveIntermediateLines(g, &b) != RC_OK; + } // endif Abort // Delete the old file and rename the new temp file. - RenameTempFile(g); // Also close all files + rc = RenameTempFile(g); // Also close all files } else { rc = PlugCloseFile(g, To_Fb); @@ -942,6 +1210,7 @@ void DOSFAM::CloseTableFile(PGLOBAL g) } // endif UseTemp Stream = NULL; // So we can know whether table is open + T_Stream = NULL; } // end of CloseTableFile /***********************************************************************/ @@ -968,7 +1237,7 @@ BLKFAM::BLKFAM(PDOSDEF tdp) : DOSFAM(tdp) Last = tdp->GetLast(); Nrec = tdp->GetElemt(); Closing = false; - BlkPos = NULL; + BlkPos = tdp->GetTo_Pos(); CurLine = NULL; NxtLine = NULL; OutBuf = NULL; @@ -998,12 +1267,30 @@ void BLKFAM::Reset(void) /***********************************************************************/ int BLKFAM::Cardinality(PGLOBAL g) { - // Should not be called in this version - return (g) ? -1 : 0; -//return (g) ? (int)((Block - 1) * Nrec + Last) : 1; + return (g) ? ((Block > 0) ? (int)((Block - 1) * Nrec + Last) : 0) : 1; } // end of Cardinality /***********************************************************************/ +/* Use BlockTest to reduce the table estimated size. */ +/***********************************************************************/ +int BLKFAM::MaxBlkSize(PGLOBAL g, int s) + { + int rc = RC_OK, savcur = CurBlk; + int size; + + // Roughly estimate the table size as the sum of blocks + // that can contain good rows + for (size = 0, CurBlk = 0; CurBlk < Block; CurBlk++) + if ((rc = Tdbp->TestBlock(g)) == RC_OK) + size += (CurBlk == Block - 1) ? Last : Nrec; + else if (rc == RC_EF) + break; + + CurBlk = savcur; + return size; + } // end of MaxBlkSize + +/***********************************************************************/ /* Allocate the line buffer. For mode Delete or when a temp file is */ /* used another big buffer has to be allocated because is it used */ /* to move or update the lines into the (temp) file. */ @@ -1059,20 +1346,8 @@ int BLKFAM::GetNextPos(void) /***********************************************************************/ bool BLKFAM::SetPos(PGLOBAL g, int pos) { - if (pos < 0) { - strcpy(g->Message, MSG(INV_REC_POS)); - return true; - } // endif recpos - - CurBlk = pos / Nrec; - CurNum = pos % Nrec; -#if defined(_DEBUG) - num_eq[(CurBlk == OldBlk) ? 1 : 0]++; -#endif - - // Indicate the table position was externally set - Placed = true; - return false; + strcpy(g->Message, "Blocked variable tables cannot be used indexed"); + return true; } // end of SetPos /***********************************************************************/ @@ -1108,8 +1383,109 @@ int BLKFAM::SkipRecord(PGLOBAL g, bool header) /***********************************************************************/ int BLKFAM::ReadBuffer(PGLOBAL g) { - strcpy(g->Message, "This AM cannot be used in this version"); - return RC_FX; + int i, n, rc = RC_OK; + + /*********************************************************************/ + /* Sequential reading when Placed is not true. */ + /*********************************************************************/ + if (Placed) { + Placed = false; + } else if (++CurNum < Rbuf) { + CurLine = NxtLine; + + // Get the position of the next line in the buffer + while (*NxtLine++ != '\n') ; + + // Set caller line buffer + n = NxtLine - CurLine - Ending; + memcpy(Tdbp->GetLine(), CurLine, n); + Tdbp->GetLine()[n] = '\0'; + goto fin; + } else if (Rbuf < Nrec && CurBlk != -1) { + return RC_EF; + } else { + /*******************************************************************/ + /* New block. */ + /*******************************************************************/ + CurNum = 0; + + next: + if (++CurBlk >= Block) + return RC_EF; + + /*******************************************************************/ + /* Before reading a new block, check whether block optimization */ + /* can be done, as well as for join as for local filtering. */ + /*******************************************************************/ + switch (Tdbp->TestBlock(g)) { + case RC_EF: + return RC_EF; + case RC_NF: + goto next; + } // endswitch rc + + } // endif's + + if (OldBlk == CurBlk) + goto ok; // Block is already there + + // fseek is required only in non sequential reading + if (CurBlk != OldBlk + 1) + if (fseek(Stream, BlkPos[CurBlk], SEEK_SET)) { + sprintf(g->Message, MSG(FSETPOS_ERROR), BlkPos[CurBlk]); + return RC_FX; + } // endif fseek + + // Calculate the length of block to read + BlkLen = BlkPos[CurBlk + 1] - BlkPos[CurBlk]; + + if (trace) + htrc("File position is now %d\n", ftell(Stream)); + + // Read the entire next block + n = fread(To_Buf, 1, (size_t)BlkLen, Stream); + + if (n == BlkLen) { +// ReadBlks++; + num_read++; + Rbuf = (CurBlk == Block - 1) ? Last : Nrec; + + ok: + rc = RC_OK; + + // Get the position of the current line + for (i = 0, CurLine = To_Buf; i < CurNum; i++) + while (*CurLine++ != '\n') ; // What about Unix ??? + + // Now get the position of the next line + for (NxtLine = CurLine; *NxtLine++ != '\n';) ; + + // Set caller line buffer + n = NxtLine - CurLine - Ending; + memcpy(Tdbp->GetLine(), CurLine, n); + Tdbp->GetLine()[n] = '\0'; + } else if (feof(Stream)) { + rc = RC_EF; + } else { +#if defined(UNIX) + sprintf(g->Message, MSG(READ_ERROR), To_File, strerror(errno)); +#else + sprintf(g->Message, MSG(READ_ERROR), To_File, _strerror(NULL)); +#endif + + if (trace) + htrc("%s\n", g->Message); + + return RC_FX; + } // endelse + + OldBlk = CurBlk; // Last block actually read + IsRead = true; // Is read indeed + + fin: + // Store the current record file position for Delete and Update + Fpos = BlkPos[CurBlk] + CurLine - To_Buf; + return rc; } // end of ReadBuffer /***********************************************************************/ @@ -1150,7 +1526,7 @@ int BLKFAM::WriteBuffer(PGLOBAL g) /*******************************************************************/ /* Mode == MODE_UPDATE. */ /*******************************************************************/ - char *crlf; + const char *crlf; size_t len; int curpos = ftell(Stream); bool moved = true; @@ -1214,27 +1590,24 @@ int BLKFAM::WriteBuffer(PGLOBAL g) /***********************************************************************/ /* Table file close routine for DOS access method. */ /***********************************************************************/ -void BLKFAM::CloseTableFile(PGLOBAL g) +void BLKFAM::CloseTableFile(PGLOBAL g, bool abort) { int rc, wrc = RC_OK; + Abort = abort; + if (UseTemp && T_Stream) { - if (Tdbp->GetMode() == MODE_UPDATE) { + if (Tdbp->GetMode() == MODE_UPDATE && !Abort) { // Copy eventually remaining lines bool b; fseek(Stream, 0, SEEK_END); Fpos = ftell(Stream); - rc = MoveIntermediateLines(g, &b); - } else - rc = RC_OK; - - if (rc == RC_OK) - // Delete the old file and rename the new temp file. - rc = RenameTempFile(g); // Also close all files - else - rc = PlugCloseFile(g, To_Fb); + Abort = MoveIntermediateLines(g, &b) != RC_OK; + } // endif Abort + // Delete the old file and rename the new temp file. + rc = RenameTempFile(g); // Also close all files } else { // Closing is True if last Write was in error if (Tdbp->GetMode() == MODE_INSERT && CurNum && !Closing) { diff --git a/storage/connect/filamtxt.h b/storage/connect/filamtxt.h index c3ee96ada1a..b89d58965f9 100644 --- a/storage/connect/filamtxt.h +++ b/storage/connect/filamtxt.h @@ -1,7 +1,7 @@ /************** FilAMTxt H Declares Source Code File (.H) **************/ -/* Name: FILAMTXT.H Version 1.2 */ +/* Name: FILAMTXT.H Version 1.3 */ /* */ -/* (C) Copyright to the author Olivier BERTRAND 2005-2012 */ +/* (C) Copyright to the author Olivier BERTRAND 2005-2014 */ /* */ /* This file contains the file access method classes declares. */ /***********************************************************************/ @@ -10,6 +10,7 @@ #define __FILAMTXT_H #include "block.h" +#include "array.h" typedef class TXTFAM *PTXF; typedef class DOSFAM *PDOSFAM; @@ -53,26 +54,38 @@ class DllExport TXTFAM : public BLOCK { virtual void Reset(void); virtual int GetFileLength(PGLOBAL g); virtual int Cardinality(PGLOBAL g); + virtual int MaxBlkSize(PGLOBAL g, int s); virtual bool AllocateBuffer(PGLOBAL g) {return false;} virtual void ResetBuffer(PGLOBAL g) {} virtual int GetNerr(void) {return 0;} virtual int GetRowID(void) = 0; virtual bool RecordPos(PGLOBAL g) = 0; - virtual bool SetPos(PGLOBAL g, int recpos) = 0; + virtual bool SetPos(PGLOBAL g, int recpos) = 0; virtual int SkipRecord(PGLOBAL g, bool header) = 0; virtual bool OpenTableFile(PGLOBAL g) = 0; virtual bool DeferReading(void) {IsRead = false; return true;} virtual int ReadBuffer(PGLOBAL g) = 0; virtual int WriteBuffer(PGLOBAL g) = 0; - virtual int DeleteRecords(PGLOBAL g, int irc) = 0; - virtual void CloseTableFile(PGLOBAL g) = 0; + virtual int DeleteRecords(PGLOBAL g, int irc) = 0; + virtual void CloseTableFile(PGLOBAL g, bool abort) = 0; virtual void Rewind(void) = 0; + virtual int InitDelete(PGLOBAL g, int fpos, int spos); + bool AddListValue(PGLOBAL g, int type, void *val, PPARM *top); + int StoreValues(PGLOBAL g, bool upd); + int UpdateSortedRows(PGLOBAL g); + int DeleteSortedRows(PGLOBAL g); protected: // Members PTDBDOS Tdbp; // To table class PSZ To_File; // Points to table file name PFBLOCK To_Fb; // Pointer to file block + PPARM To_Pos; // Pointer to position list + PPARM To_Sos; // Pointer to start position list + PPARM To_Upd; // Pointer to udated line list + PARRAY Posar; // Pointer to position array + PARRAY Sosar; // Pointer to start position array + PARRAY Updar; // Pointer to udated lines array bool Placed; // true if Recpos was externally set bool IsRead; // false for deferred reading bool Blocked; // true if using blocked I/O @@ -97,8 +110,12 @@ class DllExport TXTFAM : public BLOCK { int Modif; // Number of modified lines in block int Blksize; // Size of padded blocks int Ending; // Length of line end + int Fpos; // Position of last read record + int Spos; // Start position for update/delete move + int Tpos; // Target Position for delete move bool Padded; // true if fixed size blocks are padded bool Eof; // true if an EOF (0xA) character exists + bool Abort; // To abort on error char *CrLf; // End of line character(s) }; // end of class TXTFAM @@ -111,6 +128,7 @@ class DllExport DOSFAM : public TXTFAM { // Constructor DOSFAM(PDOSDEF tdp); DOSFAM(PDOSFAM txfp); + DOSFAM(PBLKFAM tdfp, PDOSDEF tdp); // Implementation virtual AMT GetAmType(void) {return TYPE_AM_DOS;} @@ -124,31 +142,30 @@ class DllExport DOSFAM : public TXTFAM { virtual void Reset(void); virtual int GetFileLength(PGLOBAL g); virtual int Cardinality(PGLOBAL g); + virtual int MaxBlkSize(PGLOBAL g, int s); virtual bool AllocateBuffer(PGLOBAL g); virtual int GetRowID(void); virtual bool RecordPos(PGLOBAL g); - virtual bool SetPos(PGLOBAL g, int recpos); + virtual bool SetPos(PGLOBAL g, int recpos); virtual int SkipRecord(PGLOBAL g, bool header); virtual bool OpenTableFile(PGLOBAL g); virtual int ReadBuffer(PGLOBAL g); virtual int WriteBuffer(PGLOBAL g); virtual int DeleteRecords(PGLOBAL g, int irc); - virtual void CloseTableFile(PGLOBAL g); + virtual void CloseTableFile(PGLOBAL g, bool abort); virtual void Rewind(void); protected: virtual bool OpenTempFile(PGLOBAL g); virtual bool MoveIntermediateLines(PGLOBAL g, bool *b); virtual int RenameTempFile(PGLOBAL g); + virtual int InitDelete(PGLOBAL g, int fpos, int spos); // Members FILE *Stream; // Points to Dos file structure FILE *T_Stream; // Points to temporary file structure PFBLOCK To_Fbt; // Pointer to temp file block - int Fpos; // Position of last read record - int Tpos; // Target Position for delete move - int Spos; // Start position for delete move - bool UseTemp; // True to use a temporary file in Delete + bool UseTemp; // True to use a temporary file in Upd/Del bool Bin; // True to force binary mode }; // end of class DOSFAM @@ -172,14 +189,15 @@ class DllExport BLKFAM : public DOSFAM { // Methods virtual void Reset(void); virtual int Cardinality(PGLOBAL g); + virtual int MaxBlkSize(PGLOBAL g, int s); virtual bool AllocateBuffer(PGLOBAL g); virtual int GetRowID(void); virtual bool RecordPos(PGLOBAL g); - virtual bool SetPos(PGLOBAL g, int recpos); + virtual bool SetPos(PGLOBAL g, int recpos); virtual int SkipRecord(PGLOBAL g, bool header); virtual int ReadBuffer(PGLOBAL g); virtual int WriteBuffer(PGLOBAL g); - virtual void CloseTableFile(PGLOBAL g); + virtual void CloseTableFile(PGLOBAL g, bool abort); virtual void Rewind(void); protected: diff --git a/storage/connect/filamvct.cpp b/storage/connect/filamvct.cpp index edadc25b50b..b93adbd13dd 100755 --- a/storage/connect/filamvct.cpp +++ b/storage/connect/filamvct.cpp @@ -1,16 +1,16 @@ /*********** File AM Vct C++ Program Source Code File (.CPP) ***********/ /* PROGRAM NAME: FILAMVCT */ /* ------------- */ -/* Version 2.4 */ +/* Version 2.6 */ /* */ /* COPYRIGHT: */ /* ---------- */ -/* (C) Copyright to the author Olivier BERTRAND 2005-2013 */ +/* (C) Copyright to the author Olivier BERTRAND 2005-2014 */ /* */ /* WHAT THIS PROGRAM DOES: */ /* ----------------------- */ /* This program are the VCT file access method classes. */ -/* Added in version 2: F */ +/* Added in version 2: */ /* - Split Vec format. */ /* - Partial delete. */ /* - Use of tempfile for update. */ @@ -29,7 +29,7 @@ #endif // __BORLAND__ //#include <windows.h> #include <sys/stat.h> -#else // !WIN32 F +#else // !WIN32 #if defined(UNIX) #include <sys/types.h> #include <sys/stat.h> @@ -137,6 +137,39 @@ VCTFAM::VCTFAM(PVCTFAM txfp) : FIXFAM(txfp) } // end of VCTFAM copy constructor /***********************************************************************/ +/* VCT GetFileLength: returns file size in number of bytes. */ +/* This function is here to be accessible by VECFAM and VMPFAM. */ +/***********************************************************************/ +int VCTFAM::GetFileLength(PGLOBAL g) + { + if (Split) { + // Get the total file length + char filename[_MAX_PATH]; + char *savfile = To_File; + int i, len = 0; + + // Initialize the array of file structures + if (!Colfn) { + // Prepare the column file name pattern and set Ncol + Colfn = (char*)PlugSubAlloc(g, NULL, _MAX_PATH); + Ncol = ((PVCTDEF)Tdbp->GetDef())->MakeFnPattern(Colfn); + } // endif Colfn + + To_File = filename; + + for (i = 0; i < Ncol; i++) { + sprintf(filename, Colfn, i+1); + len += TXTFAM::GetFileLength(g); + } // endfor i + + To_File = savfile; + return len; + } else + return TXTFAM::GetFileLength(g); + + } // end of GetFileLength + +/***********************************************************************/ /* Reset read/write position values. */ /***********************************************************************/ void VCTFAM::Reset(void) @@ -170,7 +203,7 @@ int VCTFAM::GetBlockInfo(PGLOBAL g) if ((h = global_open(g, MSGID_CANNOT_OPEN, filename, O_RDONLY)) == -1 || !_filelength(h)) { // Consider this is a void table - Last = Nrec; + Last = Nrec; Block = 0; if (h != -1) @@ -179,7 +212,7 @@ int VCTFAM::GetBlockInfo(PGLOBAL g) return n; } else if (Header == 3) k = lseek(h, -(int)sizeof(VECHEADER), SEEK_END); - + if ((k = read(h, &vh, sizeof(vh))) != sizeof(vh)) { sprintf(g->Message, "Error reading header file %s", filename); n = -1; @@ -187,7 +220,7 @@ int VCTFAM::GetBlockInfo(PGLOBAL g) sprintf(g->Message, "MaxRec=%d doesn't match MaxBlk=%d Nrec=%d", vh.MaxRec, MaxBlk, Nrec); n = -1; - } else { + } else { Block = (vh.NumRec > 0) ? (vh.NumRec + Nrec - 1) / Nrec : 0; Last = (vh.NumRec + Nrec - 1) % Nrec + 1; } // endif s @@ -245,6 +278,26 @@ bool VCTFAM::SetBlockInfo(PGLOBAL g) } // end of SetBlockInfo /***********************************************************************/ +/* Use BlockTest to reduce the table estimated size. */ +/***********************************************************************/ +int VCTFAM::MaxBlkSize(PGLOBAL g, int s) + { + int rc = RC_OK, savcur = CurBlk; + int size; + + // Roughly estimate the table size as the sum of blocks + // that can contain good rows + for (size = 0, CurBlk = 0; CurBlk < Block; CurBlk++) + if ((rc = Tdbp->TestBlock(g)) == RC_OK) + size += (CurBlk == Block - 1) ? Last : Nrec; + else if (rc == RC_EF) + break; + + CurBlk = savcur; + return size; + } // end of MaxBlkSize + +/***********************************************************************/ /* VCT Cardinality: returns table cardinality in number of rows. */ /* This function can be called with a null argument to test the */ /* availability of Cardinality implementation (1 yes, 0 no). */ @@ -264,20 +317,20 @@ int VCTFAM::Cardinality(PGLOBAL g) PSZ savfn = To_File; int len, clen, card = -1; PCOLDEF cdp = Tdbp->GetDef()->GetCols(); - + if (!Colfn) { // Prepare the column file name pattern Colfn = (char*)PlugSubAlloc(g, NULL, _MAX_PATH); Ncol = ((VCTDEF*)Tdbp->GetDef())->MakeFnPattern(Colfn); } // endif Colfn - + // Use the first column file to calculate the cardinality clen = cdp->GetClen(); sprintf(filename, Colfn, 1); To_File = filename; - len = GetFileLength(g); + len = TXTFAM::GetFileLength(g); To_File = savfn; - + if (len >= 0) { if (!(len % clen)) card = len / clen; // Fixed length file @@ -289,7 +342,7 @@ int VCTFAM::Cardinality(PGLOBAL g) } else card = 0; - + // Set number of blocks for later use Block = (card > 0) ? (card + Nrec - 1) / Nrec : 0; Last = (card + Nrec - 1) % Nrec + 1; @@ -301,7 +354,7 @@ int VCTFAM::Cardinality(PGLOBAL g) } // endif split - return (int)((Block - 1) * Nrec + Last); + return (Block) ? ((Block - 1) * Nrec + Last) : 0; } // end of Cardinality /***********************************************************************/ @@ -310,7 +363,7 @@ int VCTFAM::Cardinality(PGLOBAL g) int VCTFAM::GetRowID(void) { return 1 + ((CurBlk < Block) ? CurNum + Nrec * CurBlk - : (Block - 1) * Nrec + Last); + : (Block - 1) * Nrec + Last); } // end of GetRowID /***********************************************************************/ @@ -394,7 +447,7 @@ bool VCTFAM::OpenTableFile(PGLOBAL g) return true; strcpy(opmode, "r+b"); // Required to update empty blocks - } else if (Last == Nrec) + } else if (!Block || Last == Nrec) strcpy(opmode, "ab"); else strcpy(opmode, "r+b"); // Required to update the last block @@ -429,7 +482,7 @@ bool VCTFAM::OpenTableFile(PGLOBAL g) return ResetTableSize(g, 0, Nrec); num_read = num_there = num_write = 0; - + // Allocate the table and column block buffer return AllocateBuffer(g); } // end of OpenTableFile @@ -555,9 +608,21 @@ int VCTFAM::ReadBuffer(PGLOBAL g) /*******************************************************************/ CurNum = 0; + next: if (++CurBlk == Block) return RC_EF; // End of file + /*******************************************************************/ + /* Before reading a new block, check whether block optimizing */ + /* can be done, as well as for join as for local filtering. */ + /*******************************************************************/ + switch (Tdbp->TestBlock(g)) { + case RC_EF: + return RC_EF; + case RC_NF: + goto next; + } // endswitch rc + num_there++; } // endif CurNum @@ -684,7 +749,7 @@ int VCTFAM::WriteBuffer(PGLOBAL g) int VCTFAM::DeleteRecords(PGLOBAL g, int irc) { bool eof = false; - + if (trace) htrc("VCT DeleteDB: rc=%d UseTemp=%d Fpos=%d Tpos=%d Spos=%d\n", irc, UseTemp, Fpos, Tpos, Spos); @@ -694,7 +759,7 @@ int VCTFAM::DeleteRecords(PGLOBAL g, int irc) /* EOF: position Fpos at the end-of-file position. */ /*******************************************************************/ Fpos = (Block - 1) * Nrec + Last; - + if (trace) htrc("Fpos placed at file end=%d\n", Fpos); @@ -843,7 +908,7 @@ bool VCTFAM::OpenTempFile(PGLOBAL g) bool VCTFAM::MoveIntermediateLines(PGLOBAL g, bool *b) { int i, dep, off; - int n; + int n; bool eof = (b) ? *b : false; size_t req, len; @@ -1010,11 +1075,13 @@ bool VCTFAM::CleanUnusedSpace(PGLOBAL g) /***********************************************************************/ /* Data Base close routine for VCT access method. */ /***********************************************************************/ -void VCTFAM::CloseTableFile(PGLOBAL g) +void VCTFAM::CloseTableFile(PGLOBAL g, bool abort) { int rc = 0, wrc = RC_OK; MODE mode = Tdbp->GetMode(); + Abort = abort; + if (mode == MODE_INSERT) { if (Closing) wrc = RC_FX; // Last write was in error @@ -1093,10 +1160,10 @@ bool VCTFAM::ResetTableSize(PGLOBAL g, int block, int last) // Update catalog values for Block and Last PVCTDEF defp = (PVCTDEF)Tdbp->GetDef(); LPCSTR name = Tdbp->GetName(); - + defp->SetBlock(Block); defp->SetLast(Last); - + if (!defp->SetIntCatInfo("Blocks", Block) || !defp->SetIntCatInfo("Last", Last)) { sprintf(g->Message, MSG(UPDATE_ERROR), "Header"); @@ -1189,7 +1256,7 @@ bool VCTFAM::WriteBlock(PGLOBAL g, PVCTCOL colp) /* Calculate the offset and size of the block to write. */ /*********************************************************************/ if (MaxBlk) // File has Vector format - len = Headlen + len = Headlen + Nrec * (colp->Deplac * MaxBlk + colp->Clen * colp->ColBlk); else // Old VCT format len = Nrec * (colp->Deplac + Lrecl * colp->ColBlk); @@ -1212,7 +1279,7 @@ bool VCTFAM::WriteBlock(PGLOBAL g, PVCTCOL colp) (size_t)colp->Clen, n, T_Stream)) { sprintf(g->Message, MSG(WRITE_STRERROR), (UseTemp) ? To_Fbt->Fname : To_File, strerror(errno)); - + if (trace) htrc("Write error: %s\n", strerror(errno)); @@ -1537,9 +1604,6 @@ int VCMFAM::WriteBuffer(PGLOBAL g) /***********************************************************************/ int VCMFAM::DeleteRecords(PGLOBAL g, int irc) { - int i; - int m, n; - if (trace) htrc("VCM DeleteDB: irc=%d tobuf=%p Tpos=%p Spos=%p\n", irc, To_Buf, Tpos, Spos); @@ -1549,59 +1613,21 @@ int VCMFAM::DeleteRecords(PGLOBAL g, int irc) /* EOF: position Fpos at the top of map position. */ /*******************************************************************/ Fpos = (Block - 1) * Nrec + Last; - + if (trace) htrc("Fpos placed at file top=%p\n", Fpos); } else // Fpos is the Deleted line position Fpos = CurBlk * Nrec + CurNum; - if (Tpos == Spos) + if (Tpos == Spos) { /*******************************************************************/ /* First line to delete. Move of eventual preceding lines is */ /* not required here, just setting of future Spos and Tpos. */ /*******************************************************************/ - Tpos = Fpos; // Spos is set below - else if (Fpos > Spos) { - /*******************************************************************/ - /* Non consecutive line to delete. Move intermediate lines. */ - /*******************************************************************/ - if (!MaxBlk) { - // Old VCT format, moving must respect block limits - char *ps, *pt; - int req, soff, toff; - - for (n = Fpos - Spos; n > 0; n -= req) { - soff = Spos % Nrec; - toff = Tpos % Nrec; - req = (size_t)MY_MIN(n, Nrec - MY_MAX(soff, toff)); - - for (i = 0; i < Ncol; i++) { - ps = Memcol[i] + (Spos / Nrec) * Blksize + soff * Clens[i]; - pt = Memcol[i] + (Tpos / Nrec) * Blksize + toff * Clens[i]; - memmove(pt, ps, req * Clens[i]); - } // endfor i - - Tpos += req; - Spos += req; - } // endfor n - - } else { - // True vector format, all is simple... - n = Fpos - Spos; - - for (i = 0; i < Ncol; i++) { - m = Clens[i]; - memmove(Memcol[i] + Tpos * m, Memcol[i] + Spos * m, n * m); - } // endfor i - - Tpos += n; - } // endif MaxBlk - - if (trace) - htrc("move %d bytes\n", n); - - } // endif n + Tpos = Spos = Fpos; + } else + (void)MoveIntermediateLines(g); if (irc == RC_OK) { Spos = Fpos + 1; // New start position @@ -1611,8 +1637,12 @@ int VCMFAM::DeleteRecords(PGLOBAL g, int irc) } else { /*******************************************************************/ - /* Last call after EOF has been reached. Reset the Block and */ - /* Last values for TDBVCT::MakeBlockValues. */ + /* Last call after EOF has been reached. */ + /*******************************************************************/ + int i, m, n; + + /*******************************************************************/ + /* Reset the Block and Last values for TDBVCT::MakeBlockValues. */ /*******************************************************************/ Block = (Tpos > 0) ? (Tpos + Nrec - 1) / Nrec : 0; Last = (Tpos + Nrec - 1) % Nrec + 1; @@ -1681,9 +1711,58 @@ int VCMFAM::DeleteRecords(PGLOBAL g, int irc) } // end of DeleteRecords /***********************************************************************/ +/* Move intermediate deleted or updated lines. */ +/***********************************************************************/ +bool VCMFAM::MoveIntermediateLines(PGLOBAL g, bool *b) + { + int i, m, n; + + if ((n = Fpos - Spos) > 0) { + /*******************************************************************/ + /* Non consecutive line to delete. Move intermediate lines. */ + /*******************************************************************/ + if (!MaxBlk) { + // Old VCT format, moving must respect block limits + char *ps, *pt; + int req, soff, toff; + + for (; n > 0; n -= req) { + soff = Spos % Nrec; + toff = Tpos % Nrec; + req = (size_t)MY_MIN(n, Nrec - MY_MAX(soff, toff)); + + for (i = 0; i < Ncol; i++) { + ps = Memcol[i] + (Spos / Nrec) * Blksize + soff * Clens[i]; + pt = Memcol[i] + (Tpos / Nrec) * Blksize + toff * Clens[i]; + memmove(pt, ps, req * Clens[i]); + } // endfor i + + Tpos += req; + Spos += req; + } // endfor n + + } else { + // True vector format, all is simple... + for (i = 0; i < Ncol; i++) { + m = Clens[i]; + memmove(Memcol[i] + Tpos * m, Memcol[i] + Spos * m, n * m); + } // endfor i + + Tpos += n; + } // endif MaxBlk + + if (trace) + htrc("move %d bytes\n", n); + + } // endif n + + return false; + } // end of MoveIntermediate Lines + +/***********************************************************************/ /* Data Base close routine for VMP access method. */ /***********************************************************************/ -void VCMFAM::CloseTableFile(PGLOBAL g) +void VCMFAM::CloseTableFile(PGLOBAL g, bool abort) { int wrc = RC_OK; MODE mode = Tdbp->GetMode(); @@ -1710,7 +1789,7 @@ void VCMFAM::CloseTableFile(PGLOBAL g) if (wrc != RC_FX) /*rc =*/ ResetTableSize(g, Block, Last); - } else if (mode != MODE_DELETE) + } else if (mode != MODE_DELETE || Abort) PlugCloseFile(g, To_Fb); } // end of CloseTableFile @@ -1838,7 +1917,7 @@ bool VECFAM::OpenTableFile(PGLOBAL g) // Selective delete, pass thru case MODE_UPDATE: UseTemp = Tdbp->IsUsingTemp(g); - strcpy(opmode, (UseTemp) ? "r": "r+"); + strcpy(opmode, (UseTemp) ? "rb": "r+b"); break; case MODE_INSERT: strcpy(opmode, "ab"); @@ -1897,10 +1976,13 @@ bool VECFAM::OpenTableFile(PGLOBAL g) // Check for void table or missing columns for (i = 0, cp = (PVCTCOL)Tdbp->GetColumns(); cp; cp = (PVCTCOL)cp->Next) - if (!i++) - b = !Streams[cp->Index - 1]; - else if (b != !Streams[cp->Index - 1]) - return true; + if (!cp->IsSpecial()) { + if (!i++) + b = !Streams[cp->Index - 1]; + else if (b != !Streams[cp->Index - 1]) + return true; + + } // endif Special } // endif mode @@ -2114,18 +2196,10 @@ int VECFAM::WriteBuffer(PGLOBAL g) /***********************************************************************/ /* Data Base delete line routine for split vertical access methods. */ /* Note: lines are moved directly in the files (ooops...) */ +/* Using temp file depends on the Check setting, false by default. */ /***********************************************************************/ int VECFAM::DeleteRecords(PGLOBAL g, int irc) { - /*********************************************************************/ - /* There is an alternative here: */ - /* 1 - use a temporary file in which are copied all not deleted */ - /* lines, at the end the original file will be deleted and */ - /* the temporary file renamed to the original file name. */ - /* 2 - directly move the not deleted lines inside the original */ - /* file, and at the end erase all trailing records. */ - /* This depends on the Check setting, false by default. */ - /*********************************************************************/ if (trace) htrc("VEC DeleteDB: rc=%d UseTemp=%d Fpos=%d Tpos=%d Spos=%d\n", irc, UseTemp, Fpos, Tpos, Spos); @@ -2135,14 +2209,14 @@ int VECFAM::DeleteRecords(PGLOBAL g, int irc) /* EOF: position Fpos at the end-of-file position. */ /*******************************************************************/ Fpos = Cardinality(g); - + if (trace) htrc("Fpos placed at file end=%d\n", Fpos); } else // Fpos is the Deleted line position Fpos = CurBlk * Nrec + CurNum; - if (Tpos == Spos) + if (Tpos == Spos) { // First line to delete if (UseTemp) { /*****************************************************************/ @@ -2158,6 +2232,8 @@ int VECFAM::DeleteRecords(PGLOBAL g, int irc) /*****************************************************************/ Spos = Tpos = Fpos; + } // endif Tpos == Spos + /*********************************************************************/ /* Move any intermediate lines. */ /*********************************************************************/ @@ -2180,7 +2256,7 @@ int VECFAM::DeleteRecords(PGLOBAL g, int irc) if (!UseTemp) { /*****************************************************************/ /* Because the chsize functionality is only accessible with a */ - /* system call we must close the file and reopen it with the */ + /* system call we must close the files and reopen them with the */ /* open function (_fopen for MS??) this is still to be checked */ /* for compatibility with other OS's. */ /*****************************************************************/ @@ -2288,8 +2364,7 @@ bool VECFAM::MoveLines(PGLOBAL g) /***********************************************************************/ bool VECFAM::MoveIntermediateLines(PGLOBAL g, bool *bn) { - int i; - int n; + int i, n; bool b = false; size_t req, len; @@ -2366,25 +2441,30 @@ int VECFAM::RenameTempFile(PGLOBAL g) continue; tempname = (char*)T_Fbs[i]->Fname; - sprintf(filename, Colfn, i+1); - PlugSetPath(filename, filename, Tdbp->GetPath()); - strcat(PlugRemoveType(filetemp, filename), ".ttt"); - remove(filetemp); // May still be there from previous error - - if (rename(filename, filetemp)) { // Save file for security - sprintf(g->Message, MSG(RENAME_ERROR), - filename, filetemp, strerror(errno)); - rc = RC_FX; - } else if (rename(tempname, filename)) { - sprintf(g->Message, MSG(RENAME_ERROR), - tempname, filename, strerror(errno)); - rc = rename(filetemp, filename); // Restore saved file - rc = RC_FX; - } else if (remove(filetemp)) { - sprintf(g->Message, MSG(REMOVE_ERROR), - filetemp, strerror(errno)); - rc = RC_INFO; // Acceptable - } // endif's + + if (!Abort) { + sprintf(filename, Colfn, i+1); + PlugSetPath(filename, filename, Tdbp->GetPath()); + strcat(PlugRemoveType(filetemp, filename), ".ttt"); + remove(filetemp); // May still be there from previous error + + if (rename(filename, filetemp)) { // Save file for security + sprintf(g->Message, MSG(RENAME_ERROR), + filename, filetemp, strerror(errno)); + rc = RC_FX; + } else if (rename(tempname, filename)) { + sprintf(g->Message, MSG(RENAME_ERROR), + tempname, filename, strerror(errno)); + rc = rename(filetemp, filename); // Restore saved file + rc = RC_FX; + } else if (remove(filetemp)) { + sprintf(g->Message, MSG(REMOVE_ERROR), + filetemp, strerror(errno)); + rc = RC_INFO; // Acceptable + } // endif's + + } else + remove(tempname); } // endfor i @@ -2394,11 +2474,13 @@ int VECFAM::RenameTempFile(PGLOBAL g) /***********************************************************************/ /* Data Base close routine for VEC access method. */ /***********************************************************************/ -void VECFAM::CloseTableFile(PGLOBAL g) +void VECFAM::CloseTableFile(PGLOBAL g, bool abort) { int rc = 0, wrc = RC_OK; MODE mode = Tdbp->GetMode(); + Abort = abort; + if (mode == MODE_INSERT) { if (Closing) wrc = RC_FX; // Last write was in error @@ -2421,10 +2503,10 @@ void VECFAM::CloseTableFile(PGLOBAL g) longjmp(g->jumper[g->jump_level], 44); } else if (mode == MODE_UPDATE) { - if (UseTemp && !InitUpdate) { + if (UseTemp && !InitUpdate && !Abort) { // Write any intermediate lines to temp file Fpos = OldBlk * Nrec; - wrc = MoveIntermediateLines(g); + Abort = MoveIntermediateLines(g) != RC_OK; // Spos = Fpos + Nrec; } // endif UseTemp @@ -2434,20 +2516,17 @@ void VECFAM::CloseTableFile(PGLOBAL g) colp; colp = (PVCTCOL)colp->Next) colp->WriteBlock(g); - if (wrc == RC_OK && UseTemp && !InitUpdate) { + if (wrc == RC_OK && UseTemp && !InitUpdate && !Abort) { // Write any intermediate lines to temp file Fpos = (Block - 1) * Nrec + Last; - wrc = MoveIntermediateLines(g); + Abort = MoveIntermediateLines(g) != RC_OK; } // endif UseTemp } // endif's mode if (UseTemp && !InitUpdate) { // If they are errors, leave files unchanged - if (wrc == RC_OK) - rc = RenameTempFile(g); - else - longjmp(g->jumper[g->jump_level], 44); + rc = RenameTempFile(g); } else if (Streams) for (int i = 0; i < Ncol; i++) @@ -2553,7 +2632,7 @@ bool VECFAM::WriteBlock(PGLOBAL g, PVCTCOL colp) sprintf(fn, (UseTemp) ? Tempat : Colfn, colp->Index); sprintf(g->Message, MSG(WRITE_STRERROR), fn, strerror(errno)); - + if (trace) htrc("Write error: %s\n", strerror(errno)); @@ -2593,7 +2672,7 @@ VMPFAM::VMPFAM(PVMPFAM txfp) : VCMFAM(txfp) bool VMPFAM::OpenTableFile(PGLOBAL g) { int i; - bool b; + bool b = false; MODE mode = Tdbp->GetMode(); PCOLDEF cdp; PVCTCOL cp; @@ -2639,7 +2718,7 @@ bool VMPFAM::OpenTableFile(PGLOBAL g) } else { /*******************************************************************/ - /* Open the files corresponding updated columns of the query. */ + /* Open the files corresponding to updated columns of the query. */ /*******************************************************************/ for (cp = (PVCTCOL)((PTDBVCT)Tdbp)->To_SetCols; cp; cp = (PVCTCOL)cp->Next) @@ -2654,14 +2733,18 @@ bool VMPFAM::OpenTableFile(PGLOBAL g) if (MapColumnFile(g, MODE_READ, cp->Index - 1)) return true; - } // endif mode + // Check for void table or missing columns + for (i = 0, cp = (PVCTCOL)Tdbp->GetColumns(); cp; + cp = (PVCTCOL)cp->Next) + if (!cp->IsSpecial()) { + if (!i++) + b = !Memcol[cp->Index - 1]; + else if (b != !Memcol[cp->Index - 1]) + return true; - /*********************************************************************/ - /* Check for void table or missing columns */ - /*********************************************************************/ - for (b = !Memcol[0], i = 1; i < Ncol; i++) - if (b != !Memcol[i]) - return true; + } // endif Special + + } // endif mode /*********************************************************************/ /* Allocate the table and column block buffer. */ @@ -2825,20 +2908,20 @@ int VMPFAM::DeleteRecords(PGLOBAL g, int irc) /* EOF: position Fpos at the top of map position. */ /*******************************************************************/ Fpos = (Block - 1) * Nrec + Last; - + if (trace) htrc("Fpos placed at file top=%p\n", Fpos); } else // Fpos is the Deleted line position Fpos = CurBlk * Nrec + CurNum; - if (Tpos == Spos) + if (Tpos == Spos) { /*******************************************************************/ /* First line to delete. Move of eventual preceding lines is */ /* not required here, just setting of future Spos and Tpos. */ /*******************************************************************/ Tpos = Fpos; // Spos is set below - else if ((n = Fpos - Spos) > 0) { + } else if ((n = Fpos - Spos) > 0) { /*******************************************************************/ /* Non consecutive line to delete. Move intermediate lines. */ /*******************************************************************/ @@ -2868,6 +2951,12 @@ int VMPFAM::DeleteRecords(PGLOBAL g, int irc) /*******************************************************************/ PFBLOCK fp; + /*******************************************************************/ + /* Reset the Block and Last values for TDBVCT::MakeBlockValues. */ + /*******************************************************************/ +// Block = (Tpos > 0) ? (Tpos + Nrec - 1) / Nrec : 0; +// Last = (Tpos + Nrec - 1) % Nrec + 1; + for (i = 0; i < Ncol; i++) { fp = To_Fbs[i]; CloseMemMap(fp->Memory, (size_t)fp->Length); @@ -2918,7 +3007,7 @@ int VMPFAM::DeleteRecords(PGLOBAL g, int irc) /***********************************************************************/ /* Data Base close routine for VMP access method. */ /***********************************************************************/ -void VMPFAM::CloseTableFile(PGLOBAL g) +void VMPFAM::CloseTableFile(PGLOBAL g, bool abort) { if (Tdbp->GetMode() == MODE_DELETE) { // Set Block and Nrec values for TDBVCT::MakeBlockValues @@ -3011,7 +3100,7 @@ bool BGVFAM::BigRead(PGLOBAL g, HANDLE h, void *inbuf, int req) } // endelse brc sprintf(g->Message, MSG(READ_ERROR), To_File, buf); - + if (trace) htrc("BIGREAD: %s\n", g->Message); @@ -3025,7 +3114,7 @@ bool BGVFAM::BigRead(PGLOBAL g, HANDLE h, void *inbuf, int req) const char *fn = (h == Hfile) ? To_File : "Tempfile"; sprintf(g->Message, MSG(READ_ERROR), fn, strerror(errno)); - + if (trace) htrc("BIGREAD: nbr=%d len=%d errno=%d %s\n", nbr, len, errno, g->Message); @@ -3079,7 +3168,7 @@ bool BGVFAM::BigWrite(PGLOBAL g, HANDLE h, void *inbuf, int req) const char *fn = (h == Hfile) ? To_File : "Tempfile"; sprintf(g->Message, MSG(WRITE_STRERROR), fn, strerror(errno)); - + if (trace) htrc("BIGWRITE: nbw=%d len=%d errno=%d %s\n", nbw, len, errno, g->Message); @@ -3132,17 +3221,17 @@ int BGVFAM::GetBlockInfo(PGLOBAL g) // Consider this is a void table if (trace) htrc("Void table h=%d\n", h); - - Last = Nrec; + + Last = Nrec; Block = 0; if (h != INVALID_HANDLE_VALUE) CloseFileHandle(h); return n; - } else if (Header == 3) + } else if (Header == 3) /*b = */ BigSeek(g, h, -(BIGINT)sizeof(vh), true); - + if (BigRead(g, h, &vh, sizeof(vh))) { sprintf(g->Message, "Error reading header file %s", filename); n = -1; @@ -3153,10 +3242,10 @@ int BGVFAM::GetBlockInfo(PGLOBAL g) } else { Block = (vh.NumRec > 0) ? (vh.NumRec + Nrec - 1) / Nrec : 0; Last = (vh.NumRec + Nrec - 1) % Nrec + 1; - + if (trace) htrc("Block=%d Last=%d\n", Block, Last); - + } // endif's CloseFileHandle(h); @@ -3297,7 +3386,7 @@ bool BGVFAM::MakeEmptyFile(PGLOBAL g, char *fn) if (h == -1) return true; - + pos = (BIGINT)n + (BIGINT)MaxBlk * (BIGINT)Blksize - (BIGINT)1; if (trace) @@ -3738,7 +3827,7 @@ int BGVFAM::DeleteRecords(PGLOBAL g, int irc) /* EOF: position Fpos at the end-of-file position. */ /*******************************************************************/ Fpos = (Block - 1) * Nrec + Last; - + if (trace) htrc("Fpos placed at file end=%d\n", Fpos); @@ -4040,11 +4129,13 @@ bool BGVFAM::CleanUnusedSpace(PGLOBAL g) /***********************************************************************/ /* Data Base close routine for huge VEC access method. */ /***********************************************************************/ -void BGVFAM::CloseTableFile(PGLOBAL g) +void BGVFAM::CloseTableFile(PGLOBAL g, bool abort) { int rc = 0, wrc = RC_OK; MODE mode = Tdbp->GetMode(); + Abort = abort; + if (mode == MODE_INSERT) { if (Closing) wrc = RC_FX; // Last write was in error diff --git a/storage/connect/filamvct.h b/storage/connect/filamvct.h index 0dd1c06ad8b..be66232acfb 100644 --- a/storage/connect/filamvct.h +++ b/storage/connect/filamvct.h @@ -37,9 +37,11 @@ class DllExport VCTFAM : public FIXFAM { virtual AMT GetAmType(void) {return TYPE_AM_VCT;} virtual PTXF Duplicate(PGLOBAL g) {return (PTXF)new(g) VCTFAM(this);} + virtual int GetFileLength(PGLOBAL g); // Methods virtual void Reset(void); + virtual int MaxBlkSize(PGLOBAL g, int s); virtual bool AllocateBuffer(PGLOBAL g); virtual bool InitInsert(PGLOBAL g); virtual void ResetBuffer(PGLOBAL g) {} @@ -50,8 +52,8 @@ class DllExport VCTFAM : public FIXFAM { virtual bool OpenTableFile(PGLOBAL g); virtual int ReadBuffer(PGLOBAL g); virtual int WriteBuffer(PGLOBAL g); - virtual int DeleteRecords(PGLOBAL g, int irc); - virtual void CloseTableFile(PGLOBAL g); + virtual int DeleteRecords(PGLOBAL g, int irc); + virtual void CloseTableFile(PGLOBAL g, bool abort); virtual void Rewind(void); // Specific functions @@ -59,19 +61,19 @@ class DllExport VCTFAM : public FIXFAM { virtual bool WriteBlock(PGLOBAL g, PVCTCOL colp); protected: - virtual bool MakeEmptyFile(PGLOBAL g, char *fn); + virtual bool MakeEmptyFile(PGLOBAL g, char *fn); virtual bool OpenTempFile(PGLOBAL g); virtual bool MoveLines(PGLOBAL g) {return false;} virtual bool MoveIntermediateLines(PGLOBAL g, bool *b = NULL); virtual bool CleanUnusedSpace(PGLOBAL g); - virtual int GetBlockInfo(PGLOBAL g); - virtual bool SetBlockInfo(PGLOBAL g); + virtual int GetBlockInfo(PGLOBAL g); + virtual bool SetBlockInfo(PGLOBAL g); bool ResetTableSize(PGLOBAL g, int block, int last); // Members char *NewBlock; // To block written on Insert - char *Colfn; // Pattern for column file names (VER) - char *Tempat; // Pattern for temp file names (VER) + char *Colfn; // Pattern for column file names (VEC) + char *Tempat; // Pattern for temp file names (VEC) int *Clens; // Pointer to col size array int *Deplac; // Pointer to col start position array bool *Isnum; // Pointer to buffer type isnum result @@ -107,10 +109,12 @@ class DllExport VCMFAM : public VCTFAM { // Database routines virtual bool OpenTableFile(PGLOBAL g); virtual int WriteBuffer(PGLOBAL g); - virtual int DeleteRecords(PGLOBAL g, int irc); - virtual void CloseTableFile(PGLOBAL g); + virtual int DeleteRecords(PGLOBAL g, int irc); + virtual void CloseTableFile(PGLOBAL g, bool abort); + protected: // Specific functions + virtual bool MoveIntermediateLines(PGLOBAL g, bool *b = NULL); virtual bool ReadBlock(PGLOBAL g, PVCTCOL colp); virtual bool WriteBlock(PGLOBAL g, PVCTCOL colp); @@ -144,15 +148,15 @@ class DllExport VECFAM : public VCTFAM { // Database routines virtual bool OpenTableFile(PGLOBAL g); virtual int WriteBuffer(PGLOBAL g); - virtual int DeleteRecords(PGLOBAL g, int irc); - virtual void CloseTableFile(PGLOBAL g); + virtual int DeleteRecords(PGLOBAL g, int irc); + virtual void CloseTableFile(PGLOBAL g, bool abort); // Specific functions virtual bool ReadBlock(PGLOBAL g, PVCTCOL colp); virtual bool WriteBlock(PGLOBAL g, PVCTCOL colp); protected: - virtual bool OpenTempFile(PGLOBAL g); + virtual bool OpenTempFile(PGLOBAL g); virtual bool MoveLines(PGLOBAL g); virtual bool MoveIntermediateLines(PGLOBAL g, bool *b = NULL); virtual int RenameTempFile(PGLOBAL g); @@ -189,7 +193,7 @@ class DllExport VMPFAM : public VCMFAM { // Database routines virtual bool OpenTableFile(PGLOBAL g); virtual int DeleteRecords(PGLOBAL g, int irc); - virtual void CloseTableFile(PGLOBAL g); + virtual void CloseTableFile(PGLOBAL g, bool abort); protected: bool MapColumnFile(PGLOBAL g, MODE mode, int i); @@ -220,7 +224,7 @@ class BGVFAM : public VCTFAM { virtual bool OpenTableFile(PGLOBAL g); virtual int WriteBuffer(PGLOBAL g); virtual int DeleteRecords(PGLOBAL g, int irc); - virtual void CloseTableFile(PGLOBAL g); + virtual void CloseTableFile(PGLOBAL g, bool abort); virtual void Rewind(void); // Specific functions diff --git a/storage/connect/filamzip.cpp b/storage/connect/filamzip.cpp index 0ec9e65c17e..8473011ab8b 100644 --- a/storage/connect/filamzip.cpp +++ b/storage/connect/filamzip.cpp @@ -1,11 +1,11 @@ /*********** File AM Zip C++ Program Source Code File (.CPP) ***********/ /* PROGRAM NAME: FILAMZIP */ /* ------------- */ -/* Version 1.4 */ +/* Version 1.5 */ /* */ /* COPYRIGHT: */ /* ---------- */ -/* (C) Copyright to the author Olivier BERTRAND 2005-2013 */ +/* (C) Copyright to the author Olivier BERTRAND 2005-2014 */ /* */ /* WHAT THIS PROGRAM DOES: */ /* ----------------------- */ @@ -306,10 +306,27 @@ int ZIPFAM::ReadBuffer(PGLOBAL g) /*******************************************************************/ /* Record file position in case of UPDATE or DELETE. */ /*******************************************************************/ + next: if (RecordPos(g)) return RC_FX; CurBlk = Rows++; // Update RowID + + /*******************************************************************/ + /* Check whether optimization on ROWID */ + /* can be done, as well as for join as for local filtering. */ + /*******************************************************************/ + switch (Tdbp->TestBlock(g)) { + case RC_EF: + return RC_EF; + case RC_NF: + // Skip this record + if ((rc = SkipRecord(g, FALSE)) != RC_OK) + return rc; + + goto next; + } // endswitch rc + } else Placed = false; @@ -369,7 +386,7 @@ int ZIPFAM::DeleteRecords(PGLOBAL g, int irc) /***********************************************************************/ /* Data Base close routine for DOS access method. */ /***********************************************************************/ -void ZIPFAM::CloseTableFile(PGLOBAL g) +void ZIPFAM::CloseTableFile(PGLOBAL g, bool abort) { int rc = gzclose(Zfile); @@ -402,7 +419,7 @@ ZBKFAM::ZBKFAM(PDOSDEF tdp) : ZIPFAM(tdp) CurLine = NULL; NxtLine = NULL; Closing = false; - BlkPos = NULL; + BlkPos = tdp->GetTo_Pos(); } // end of ZBKFAM standard constructor ZBKFAM::ZBKFAM(PZBKFAM txfp) : ZIPFAM(txfp) @@ -413,15 +430,33 @@ ZBKFAM::ZBKFAM(PZBKFAM txfp) : ZIPFAM(txfp) } // end of ZBKFAM copy constructor /***********************************************************************/ +/* Use BlockTest to reduce the table estimated size. */ +/***********************************************************************/ +int ZBKFAM::MaxBlkSize(PGLOBAL g, int s) + { + int rc = RC_OK, savcur = CurBlk; + int size; + + // Roughly estimate the table size as the sum of blocks + // that can contain good rows + for (size = 0, CurBlk = 0; CurBlk < Block; CurBlk++) + if ((rc = Tdbp->TestBlock(g)) == RC_OK) + size += (CurBlk == Block - 1) ? Last : Nrec; + else if (rc == RC_EF) + break; + + CurBlk = savcur; + return size; + } // end of MaxBlkSize + +/***********************************************************************/ /* ZBK Cardinality: returns table cardinality in number of rows. */ /* This function can be called with a null argument to test the */ /* availability of Cardinality implementation (1 yes, 0 no). */ /***********************************************************************/ int ZBKFAM::Cardinality(PGLOBAL g) { - // Should not be called in this version - return (g) ? -1 : 0; -//return (g) ? (int)((Block - 1) * Nrec + Last) : 1; + return (g) ? (int)((Block - 1) * Nrec + Last) : 1; } // end of Cardinality /***********************************************************************/ @@ -491,8 +526,80 @@ int ZBKFAM::SkipRecord(PGLOBAL g, bool header) /***********************************************************************/ int ZBKFAM::ReadBuffer(PGLOBAL g) { - strcpy(g->Message, "This AM cannot be used in this version"); - return RC_FX; + int n, skip, rc = RC_OK; + + /*********************************************************************/ + /* Sequential reading when Placed is not true. */ + /*********************************************************************/ + if (++CurNum < Rbuf) { + CurLine = NxtLine; + + // Get the position of the next line in the buffer + while (*NxtLine++ != '\n') ; + + // Set caller line buffer + n = NxtLine - CurLine - Ending; + memcpy(Tdbp->GetLine(), CurLine, n); + Tdbp->GetLine()[n] = '\0'; + return RC_OK; + } else if (Rbuf < Nrec && CurBlk != -1) + return RC_EF; + + /*********************************************************************/ + /* New block. */ + /*********************************************************************/ + CurNum = 0; + skip = 0; + + next: + if (++CurBlk >= Block) + return RC_EF; + + /*********************************************************************/ + /* Before using the new block, check whether block optimization */ + /* can be done, as well as for join as for local filtering. */ + /*********************************************************************/ + switch (Tdbp->TestBlock(g)) { + case RC_EF: + return RC_EF; + case RC_NF: + skip++; + goto next; + } // endswitch rc + + if (skip) + // Skip blocks rejected by block optimization + for (int i = CurBlk - skip; i < CurBlk; i++) { + BlkLen = BlkPos[i + 1] - BlkPos[i]; + + if (gzseek(Zfile, (z_off_t)BlkLen, SEEK_CUR) < 0) + return Zerror(g); + + } // endfor i + + BlkLen = BlkPos[CurBlk + 1] - BlkPos[CurBlk]; + + if (!(n = gzread(Zfile, To_Buf, BlkLen))) { + rc = RC_EF; + } else if (n > 0) { + // Get the position of the current line + CurLine = To_Buf; + + // Now get the position of the next line + for (NxtLine = CurLine; *NxtLine++ != '\n';) ; + + // Set caller line buffer + n = NxtLine - CurLine - Ending; + memcpy(Tdbp->GetLine(), CurLine, n); + Tdbp->GetLine()[n] = '\0'; + Rbuf = (CurBlk == Block - 1) ? Last : Nrec; + IsRead = true; + rc = RC_OK; + num_read++; + } else + rc = Zerror(g); + + return rc; } // end of ReadBuffer /***********************************************************************/ @@ -562,7 +669,7 @@ int ZBKFAM::DeleteRecords(PGLOBAL g, int irc) /***********************************************************************/ /* Data Base close routine for ZBK access method. */ /***********************************************************************/ -void ZBKFAM::CloseTableFile(PGLOBAL g) +void ZBKFAM::CloseTableFile(PGLOBAL g, bool abort) { int rc = RC_OK; @@ -701,6 +808,32 @@ int ZIXFAM::ReadBuffer(PGLOBAL g) CurNum = 0; Tdbp->SetLine(To_Buf); + int skip = 0; + + next: + if (++CurBlk >= Block) + return RC_EF; + + /*********************************************************************/ + /* Before using the new block, check whether block optimization */ + /* can be done, as well as for join as for local filtering. */ + /*********************************************************************/ + switch (Tdbp->TestBlock(g)) { + case RC_EF: + return RC_EF; + case RC_NF: + skip++; + goto next; + } // endswitch rc + + if (skip) + // Skip blocks rejected by block optimization + for (int i = 0; i < skip; i++) { + if (gzseek(Zfile, (z_off_t)Buflen, SEEK_CUR) < 0) + return Zerror(g); + + } // endfor i + if (!(n = gzread(Zfile, To_Buf, Buflen))) { rc = RC_EF; } else if (n > 0) { @@ -746,4 +879,544 @@ int ZIXFAM::WriteBuffer(PGLOBAL g) return RC_OK; } // end of WriteBuffer +/* --------------------------- Class ZLBFAM -------------------------- */ + +/***********************************************************************/ +/* Constructors. */ +/***********************************************************************/ +ZLBFAM::ZLBFAM(PDOSDEF tdp) : BLKFAM(tdp) + { + Zstream = NULL; + Zbuffer = NULL; + Zlenp = NULL; + Optimized = tdp->IsOptimized(); + } // end of ZLBFAM standard constructor + +ZLBFAM::ZLBFAM(PZLBFAM txfp) : BLKFAM(txfp) + { + Zstream = txfp->Zstream; + Zbuffer = txfp->Zbuffer; + Zlenp = txfp->Zlenp; + Optimized = txfp->Optimized; + } // end of ZLBFAM (dummy?) copy constructor + +/***********************************************************************/ +/* ZLB GetFileLength: returns an estimate of what would be the */ +/* uncompressed file size in number of bytes. */ +/***********************************************************************/ +int ZLBFAM::GetFileLength(PGLOBAL g) + { + int len = (Optimized) ? BlkPos[Block] : BLKFAM::GetFileLength(g); + + if (len > 0) + // Estimate size reduction to a max of 5 + len *= 5; + + return len; + } // end of GetFileLength + +/***********************************************************************/ +/* Allocate the line buffer. For mode Delete a bigger buffer has to */ +/* be allocated because is it also used to move lines into the file. */ +/***********************************************************************/ +bool ZLBFAM::AllocateBuffer(PGLOBAL g) + { + char *msg; + int n, zrc; + +#if 0 + if (!Optimized && Tdbp->NeedIndexing(g)) { + strcpy(g->Message, MSG(NOP_ZLIB_INDEX)); + return TRUE; + } // endif indexing +#endif // 0 + +#if defined(NOLIB) + if (!zlib && LoadZlib()) { + sprintf(g->Message, MSG(DLL_LOAD_ERROR), GetLastError(), "zlib.dll"); + return TRUE; + } // endif zlib +#endif + + BLKFAM::AllocateBuffer(g); +//Buflen = Nrec * (Lrecl + 2); +//Rbuf = Nrec; + + // Allocate the compressed buffer + n = Buflen + 16; // ????????????????????????????????? + Zlenp = (int*)PlugSubAlloc(g, NULL, n); + Zbuffer = (Byte*)(Zlenp + 1); + + // Allocate and initialize the Z stream + Zstream = (z_streamp)PlugSubAlloc(g, NULL, sizeof(z_stream)); + Zstream->zalloc = (alloc_func)0; + Zstream->zfree = (free_func)0; + Zstream->opaque = (voidpf)0; + Zstream->next_in = NULL; + Zstream->avail_in = 0; + + if (Tdbp->GetMode() == MODE_READ) { + msg = "inflateInit"; + zrc = inflateInit(Zstream); + } else { + msg = "deflateInit"; + zrc = deflateInit(Zstream, Z_DEFAULT_COMPRESSION); + } // endif Mode + + if (zrc != Z_OK) { + if (Zstream->msg) + sprintf(g->Message, "%s error: %s", msg, Zstream->msg); + else + sprintf(g->Message, "%s error: %d", msg, zrc); + + return TRUE; + } // endif zrc + + if (Tdbp->GetMode() == MODE_INSERT) { + // Write the file header block + if (Last == Nrec) { + CurBlk = Block; + CurNum = 0; + + if (!GetFileLength(g)) { + // Write the zlib header as an extra block + strcpy(To_Buf, "PlugDB"); + BlkLen = strlen("PlugDB") + 1; + + if (WriteCompressedBuffer(g)) + return TRUE; + + } // endif void file + + } else { + // In mode insert, if Last != Nrec, last block must be updated + CurBlk = Block - 1; + CurNum = Last; + + strcpy(g->Message, MSG(NO_PAR_BLK_INS)); + return TRUE; + } // endif Last + + } else { // MODE_READ + // First thing to do is to read the header block + void *rdbuf; + + if (Optimized) { + BlkLen = BlkPos[0]; + rdbuf = Zlenp; + } else { + // Get the stored length from the file itself + if (fread(Zlenp, sizeof(int), 1, Stream) != 1) + return FALSE; // Empty file + + BlkLen = *Zlenp; + rdbuf = Zbuffer; + } // endif Optimized + + switch (ReadCompressedBuffer(g, rdbuf)) { + case RC_EF: + return FALSE; + case RC_FX: +#if defined(UNIX) + sprintf(g->Message, MSG(READ_ERROR), To_File, strerror(errno)); +#else + sprintf(g->Message, MSG(READ_ERROR), To_File, _strerror(NULL)); +#endif + case RC_NF: + return TRUE; + } // endswitch + + // Some old tables can have PlugDB in their header + if (strcmp(To_Buf, "PlugDB")) { + sprintf(g->Message, MSG(BAD_HEADER), Tdbp->GetFile(g)); + return TRUE; + } // endif strcmp + + } // endif Mode + + return FALSE; + } // end of AllocateBuffer + +/***********************************************************************/ +/* GetPos: return the position of last read record. */ +/***********************************************************************/ +int ZLBFAM::GetPos(void) + { + return (Optimized) ? (CurNum + Nrec * CurBlk) : Fpos; + } // end of GetPos + +/***********************************************************************/ +/* GetNextPos: should not be called for this class. */ +/***********************************************************************/ +int ZLBFAM::GetNextPos(void) + { + if (Optimized) { + assert(FALSE); + return 0; + } else + return ftell(Stream); + + } // end of GetNextPos + +/***********************************************************************/ +/* SetPos: Replace the table at the specified position. */ +/***********************************************************************/ +bool ZLBFAM::SetPos(PGLOBAL g, int pos) + { + sprintf(g->Message, MSG(NO_SETPOS_YET), "ZIP"); + return true; +#if 0 // All this must be checked + if (pos < 0) { + strcpy(g->Message, MSG(INV_REC_POS)); + return true; + } // endif recpos + + CurBlk = pos / Nrec; + CurNum = pos % Nrec; +#if defined(_DEBUG) + num_eq[(CurBlk == OldBlk) ? 1 : 0]++; +#endif + + // Indicate the table position was externally set + Placed = true; + return false; +#endif // 0 + } // end of SetPos + +/***********************************************************************/ +/* ReadBuffer: Read one line for a text file. */ +/***********************************************************************/ +int ZLBFAM::ReadBuffer(PGLOBAL g) + { + int n; + void *rdbuf; + + /*********************************************************************/ + /* Sequential reading when Placed is not true. */ + /*********************************************************************/ + if (Placed) { + Placed = FALSE; + } else if (++CurNum < Rbuf) { + CurLine = NxtLine; + + // Get the position of the next line in the buffer + if (Tdbp->GetFtype() == RECFM_VAR) + while (*NxtLine++ != '\n') ; + else + NxtLine += Lrecl; + + // Set caller line buffer + n = NxtLine - CurLine - ((Tdbp->GetFtype() == RECFM_BIN) ? 0 : Ending); + memcpy(Tdbp->GetLine(), CurLine, n); + Tdbp->GetLine()[n] = '\0'; + return RC_OK; + } else if (Rbuf < Nrec && CurBlk != -1) { + CurNum--; // To have a correct Last value when optimizing + return RC_EF; + } else { + /*******************************************************************/ + /* New block. */ + /*******************************************************************/ + CurNum = 0; + + next: + if (++CurBlk >= Block) + return RC_EF; + + /*******************************************************************/ + /* Before reading a new block, check whether block optimization */ + /* can be done, as well as for join as for local filtering. */ + /*******************************************************************/ + if (Optimized) switch (Tdbp->TestBlock(g)) { + case RC_EF: + return RC_EF; + case RC_NF: + goto next; + } // endswitch rc + + } // endif's + + if (OldBlk == CurBlk) + goto ok; // Block is already there + + if (Optimized) { + // Store the position of next block + Fpos = BlkPos[CurBlk]; + + // fseek is required only in non sequential reading + if (CurBlk != OldBlk + 1) + if (fseek(Stream, Fpos, SEEK_SET)) { + sprintf(g->Message, MSG(FSETPOS_ERROR), Fpos); + return RC_FX; + } // endif fseek + + // Calculate the length of block to read + BlkLen = BlkPos[CurBlk + 1] - Fpos; + rdbuf = Zlenp; + } else { // !Optimized + if (CurBlk != OldBlk + 1) { + strcpy(g->Message, MSG(INV_RAND_ACC)); + return RC_FX; + } else + Fpos = ftell(Stream); // Used when optimizing + + // Get the stored length from the file itself + if (fread(Zlenp, sizeof(int), 1, Stream) != 1) { + if (feof(Stream)) + return RC_EF; + + goto err; + } // endif fread + + BlkLen = *Zlenp; + rdbuf = Zbuffer; + } // endif Optimized + + // Read the next block + switch (ReadCompressedBuffer(g, rdbuf)) { + case RC_FX: goto err; + case RC_NF: return RC_FX; + case RC_EF: return RC_EF; + default: Rbuf = (CurBlk == Block - 1) ? Last : Nrec; + } // endswitch ReadCompressedBuffer + + ok: + if (Tdbp->GetFtype() == RECFM_VAR) { + int i; + + // Get the position of the current line + for (i = 0, CurLine = To_Buf; i < CurNum; i++) + while (*CurLine++ != '\n') ; // What about Unix ??? + + // Now get the position of the next line + for (NxtLine = CurLine; *NxtLine++ != '\n';) ; + + // Set caller line buffer + n = NxtLine - CurLine - Ending; + } else { + CurLine = To_Buf + CurNum * Lrecl; + NxtLine = CurLine + Lrecl; + n = Lrecl - ((Tdbp->GetFtype() == RECFM_BIN) ? 0 : Ending); + } // endif Ftype + + memcpy(Tdbp->GetLine(), CurLine, n); + Tdbp->GetLine()[n] = '\0'; + + OldBlk = CurBlk; // Last block actually read + IsRead = TRUE; // Is read indeed + return RC_OK; + + err: +#if defined(UNIX) + sprintf(g->Message, MSG(READ_ERROR), To_File, strerror(errno)); +#else + sprintf(g->Message, MSG(READ_ERROR), To_File, _strerror(NULL)); +#endif + return RC_FX; + } // end of ReadBuffer + +/***********************************************************************/ +/* Read and decompress a block from the stream. */ +/***********************************************************************/ +int ZLBFAM::ReadCompressedBuffer(PGLOBAL g, void *rdbuf) + { + if (fread(rdbuf, 1, (size_t)BlkLen, Stream) == (unsigned)BlkLen) { + int zrc; + + num_read++; + + if (Optimized && BlkLen != signed(*Zlenp + sizeof(int))) { + sprintf(g->Message, MSG(BAD_BLK_SIZE), CurBlk + 1); + return RC_NF; + } // endif BlkLen + + // HERE WE MUST INFLATE THE BLOCK + Zstream->next_in = Zbuffer; + Zstream->avail_in = (uInt)(*Zlenp); + Zstream->next_out = (Byte*)To_Buf; + Zstream->avail_out = Buflen; + zrc = inflate(Zstream, Z_SYNC_FLUSH); + + if (zrc != Z_OK) { + if (Zstream->msg) + sprintf(g->Message, MSG(FUNC_ERR_S), "inflate", Zstream->msg); + else + sprintf(g->Message, MSG(FUNCTION_ERROR), "inflate", (int)zrc); + + return RC_NF; + } // endif zrc + + } else if (feof(Stream)) { + return RC_EF; + } else + return RC_FX; + + return RC_OK; + } // end of ReadCompressedBuffer + +/***********************************************************************/ +/* WriteBuffer: File write routine for DOS access method. */ +/* Update is directly written back into the file, */ +/* with this (fast) method, record size cannot change. */ +/***********************************************************************/ +int ZLBFAM::WriteBuffer(PGLOBAL g) + { + assert (Tdbp->GetMode() == MODE_INSERT); + + /*********************************************************************/ + /* Prepare the write buffer. */ + /*********************************************************************/ + if (!Closing) { + if (Tdbp->GetFtype() == RECFM_BIN) + memcpy(CurLine, Tdbp->GetLine(), Lrecl); + else + strcat(strcpy(CurLine, Tdbp->GetLine()), CrLf); + +#if defined(_DEBUG) + if (Tdbp->GetFtype() == RECFM_FIX && + (signed)strlen(CurLine) != Lrecl + (signed)strlen(CrLf)) { + strcpy(g->Message, MSG(BAD_LINE_LEN)); + Closing = TRUE; + return RC_FX; + } // endif Lrecl +#endif // _DEBUG + } // endif Closing + + /*********************************************************************/ + /* In Insert mode, blocs are added sequentialy to the file end. */ + /*********************************************************************/ + if (++CurNum != Rbuf) { + if (Tdbp->GetFtype() == RECFM_VAR) + CurLine += strlen(CurLine); + else + CurLine += Lrecl; + + return RC_OK; // We write only full blocks + } // endif CurNum + + // HERE WE MUST DEFLATE THE BLOCK + if (Tdbp->GetFtype() == RECFM_VAR) + NxtLine = CurLine + strlen(CurLine); + else + NxtLine = CurLine + Lrecl; + + BlkLen = NxtLine - To_Buf; + + if (WriteCompressedBuffer(g)) { + Closing = TRUE; // To tell CloseDB about a Write error + return RC_FX; + } // endif WriteCompressedBuffer + + CurBlk++; + CurNum = 0; + CurLine = To_Buf; + return RC_OK; + } // end of WriteBuffer + +/***********************************************************************/ +/* Compress the buffer and write the deflated output to stream. */ +/***********************************************************************/ +bool ZLBFAM::WriteCompressedBuffer(PGLOBAL g) + { + int zrc; + + Zstream->next_in = (Byte*)To_Buf; + Zstream->avail_in = (uInt)BlkLen; + Zstream->next_out = Zbuffer; + Zstream->avail_out = Buflen + 16; + Zstream->total_out = 0; + zrc = deflate(Zstream, Z_FULL_FLUSH); + + if (zrc != Z_OK) { + if (Zstream->msg) + sprintf(g->Message, MSG(FUNC_ERR_S), "deflate", Zstream->msg); + else + sprintf(g->Message, MSG(FUNCTION_ERROR), "deflate", (int)zrc); + + return TRUE; + } else + *Zlenp = Zstream->total_out; + + // Now start the writing process. + BlkLen = *Zlenp + sizeof(int); + + if (fwrite(Zlenp, 1, BlkLen, Stream) != (size_t)BlkLen) { + sprintf(g->Message, MSG(FWRITE_ERROR), strerror(errno)); + return TRUE; + } // endif size + + return FALSE; + } // end of WriteCompressedBuffer + +/***********************************************************************/ +/* Table file close routine for DOS access method. */ +/***********************************************************************/ +void ZLBFAM::CloseTableFile(PGLOBAL g, bool abort) + { + int rc = RC_OK; + + if (Tdbp->GetMode() == MODE_INSERT) { + LPCSTR name = Tdbp->GetName(); + PDOSDEF defp = (PDOSDEF)Tdbp->GetDef(); + + // Closing is True if last Write was in error + if (CurNum && !Closing) { + // Some more inserted lines remain to be written + Last = (Nrec - Rbuf) + CurNum; + Block = CurBlk + 1; + Rbuf = CurNum--; + Closing = TRUE; + rc = WriteBuffer(g); + } else if (Rbuf == Nrec) { + Last = Nrec; + Block = CurBlk; + } // endif CurNum + + if (rc != RC_FX) { + defp->SetBlock(Block); + defp->SetLast(Last); + defp->SetIntCatInfo("Blocks", Block); + defp->SetIntCatInfo("Last", Last); + } // endif + + fclose(Stream); + } else + rc = fclose(Stream); + + if (trace) + htrc("ZLB CloseTableFile: closing %s mode=%d rc=%d\n", + To_File, Tdbp->GetMode(), rc); + + Stream = NULL; // So we can know whether table is open + To_Fb->Count = 0; // Avoid double closing by PlugCloseAll + + if (Tdbp->GetMode() == MODE_READ) + rc = inflateEnd(Zstream); + else + rc = deflateEnd(Zstream); + + } // end of CloseTableFile + +/***********************************************************************/ +/* Rewind routine for ZLIB access method. */ +/***********************************************************************/ +void ZLBFAM::Rewind(void) + { + // We must be positioned after the header block + if (CurBlk >= 0) { // Nothing to do if no block read yet + if (!Optimized) { // If optimized, fseek will be done in ReadBuffer + rewind(Stream); + fread(Zlenp, sizeof(int), 1, Stream); + fseek(Stream, *Zlenp + sizeof(int), SEEK_SET); + OldBlk = -1; + } // endif Optimized + + CurBlk = -1; + CurNum = Rbuf; + } // endif CurBlk + +//OldBlk = -1; +//Rbuf = 0; commented out in case we reuse last read block + } // end of Rewind + /* ------------------------ End of ZipFam ---------------------------- */ diff --git a/storage/connect/filamzip.h b/storage/connect/filamzip.h index 37cc130311c..6d27cb67e81 100644 --- a/storage/connect/filamzip.h +++ b/storage/connect/filamzip.h @@ -1,169 +1,170 @@ -/************** FilAmZip H Declares Source Code File (.H) **************/ -/* Name: FILAMZIP.H Version 1.1 */ -/* */ -/* (C) Copyright to the author Olivier BERTRAND 2005-2012 */ -/* */ -/* This file contains the GZIP access method classes declares. */ -/***********************************************************************/ -#ifndef __FILAMZIP_H -#define __FILAMZIP_H - -#include "zlib.h" - -typedef class ZIPFAM *PZIPFAM; -typedef class ZBKFAM *PZBKFAM; -typedef class ZIXFAM *PZIXFAM; -typedef class ZLBFAM *PZLBFAM; - -/***********************************************************************/ -/* This is the access method class declaration for not optimized */ -/* variable record length files compressed using the gzip library */ -/* functions. File is accessed record by record (row). */ -/***********************************************************************/ -class DllExport ZIPFAM : public TXTFAM { -// friend class DOSCOL; - public: - // Constructor - ZIPFAM(PDOSDEF tdp) : TXTFAM(tdp) {Zfile = NULL; Zpos = 0;} - ZIPFAM(PZIPFAM txfp); - - // Implementation - virtual AMT GetAmType(void) {return TYPE_AM_ZIP;} - virtual int GetPos(void); - virtual int GetNextPos(void); - virtual PTXF Duplicate(PGLOBAL g) - {return (PTXF)new(g) ZIPFAM(this);} - - // Methods - virtual void Reset(void); - virtual int GetFileLength(PGLOBAL g); - virtual int Cardinality(PGLOBAL g) {return (g) ? -1 : 0;} - virtual bool AllocateBuffer(PGLOBAL g); - virtual int GetRowID(void); - virtual bool RecordPos(PGLOBAL g); +/************** FilAmZip H Declares Source Code File (.H) **************/
+/* Name: FILAMZIP.H Version 1.2 */
+/* */
+/* (C) Copyright to the author Olivier BERTRAND 2005-2014 */
+/* */
+/* This file contains the GZIP access method classes declares. */
+/***********************************************************************/
+#ifndef __FILAMZIP_H
+#define __FILAMZIP_H
+
+#include "zlib.h"
+
+typedef class ZIPFAM *PZIPFAM;
+typedef class ZBKFAM *PZBKFAM;
+typedef class ZIXFAM *PZIXFAM;
+typedef class ZLBFAM *PZLBFAM;
+
+/***********************************************************************/
+/* This is the access method class declaration for not optimized */
+/* variable record length files compressed using the gzip library */
+/* functions. File is accessed record by record (row). */
+/***********************************************************************/
+class DllExport ZIPFAM : public TXTFAM {
+// friend class DOSCOL;
+ public:
+ // Constructor
+ ZIPFAM(PDOSDEF tdp) : TXTFAM(tdp) {Zfile = NULL; Zpos = 0;}
+ ZIPFAM(PZIPFAM txfp);
+
+ // Implementation
+ virtual AMT GetAmType(void) {return TYPE_AM_ZIP;}
+ virtual int GetPos(void);
+ virtual int GetNextPos(void);
+ virtual PTXF Duplicate(PGLOBAL g)
+ {return (PTXF)new(g) ZIPFAM(this);}
+
+ // Methods
+ virtual void Reset(void);
+ virtual int GetFileLength(PGLOBAL g);
+ virtual int Cardinality(PGLOBAL g) {return (g) ? -1 : 0;}
+ virtual int MaxBlkSize(PGLOBAL g, int s) {return s;}
+ virtual bool AllocateBuffer(PGLOBAL g);
+ virtual int GetRowID(void);
+ virtual bool RecordPos(PGLOBAL g);
+ virtual bool SetPos(PGLOBAL g, int recpos);
+ virtual int SkipRecord(PGLOBAL g, bool header);
+ virtual bool OpenTableFile(PGLOBAL g);
+ virtual int ReadBuffer(PGLOBAL g);
+ virtual int WriteBuffer(PGLOBAL g);
+ virtual int DeleteRecords(PGLOBAL g, int irc);
+ virtual void CloseTableFile(PGLOBAL g, bool abort);
+ virtual void Rewind(void);
+
+ protected:
+ int Zerror(PGLOBAL g); // GZ error function
+
+ // Members
+ gzFile Zfile; // Points to GZ file structure
+ z_off_t Zpos; // Uncompressed file position
+ }; // end of class ZIPFAM
+
+/***********************************************************************/
+/* This is the access method class declaration for optimized variable */
+/* record length files compressed using the gzip library functions. */
+/* The File is accessed by block (requires an opt file). */
+/***********************************************************************/
+class DllExport ZBKFAM : public ZIPFAM {
+ public:
+ // Constructor
+ ZBKFAM(PDOSDEF tdp);
+ ZBKFAM(PZBKFAM txfp);
+
+ // Implementation
+ virtual int GetPos(void);
+ virtual int GetNextPos(void) {return 0;}
+ virtual PTXF Duplicate(PGLOBAL g)
+ {return (PTXF)new(g) ZBKFAM(this);}
+
+ // Methods
+ virtual int Cardinality(PGLOBAL g);
+ virtual int MaxBlkSize(PGLOBAL g, int s);
+ virtual bool AllocateBuffer(PGLOBAL g);
+ virtual int GetRowID(void);
+ virtual bool RecordPos(PGLOBAL g);
+ virtual int SkipRecord(PGLOBAL g, bool header);
+ virtual int ReadBuffer(PGLOBAL g);
+ virtual int WriteBuffer(PGLOBAL g);
+ virtual int DeleteRecords(PGLOBAL g, int irc);
+ virtual void CloseTableFile(PGLOBAL g, bool abort);
+ virtual void Rewind(void);
+
+ protected:
+ // Members
+ char *CurLine; // Position of current line in buffer
+ char *NxtLine; // Position of Next line in buffer
+ bool Closing; // True when closing on Insert
+ }; // end of class ZBKFAM
+
+/***********************************************************************/
+/* This is the access method class declaration for fixed record */
+/* length files compressed using the gzip library functions. */
+/* The file is always accessed by block. */
+/***********************************************************************/
+class DllExport ZIXFAM : public ZBKFAM {
+ public:
+ // Constructor
+ ZIXFAM(PDOSDEF tdp);
+ ZIXFAM(PZIXFAM txfp) : ZBKFAM(txfp) {}
+
+ // Implementation
+ virtual int GetNextPos(void) {return 0;}
+ virtual PTXF Duplicate(PGLOBAL g)
+ {return (PTXF)new(g) ZIXFAM(this);}
+
+ // Methods
+ virtual int Cardinality(PGLOBAL g);
+ virtual bool AllocateBuffer(PGLOBAL g);
+ virtual int ReadBuffer(PGLOBAL g);
+ virtual int WriteBuffer(PGLOBAL g);
+
+ protected:
+ // No additional Members
+ }; // end of class ZIXFAM
+
+/***********************************************************************/
+/* This is the DOS/UNIX Access Method class declaration for PlugDB */
+/* fixed/variable files compressed using the zlib library functions. */
+/* Physically these are written and read using the same technique */
+/* than blocked variable files, only the contain of each block is */
+/* compressed using the deflate zlib function. The purpose of this */
+/* specific format is to have a fast mechanism for direct access of */
+/* records so blocked optimization is fast and direct access (joins) */
+/* is allowed. Note that the block length is written ahead of each */
+/* block to enable reading when optimization file is not available. */
+/***********************************************************************/
+class DllExport ZLBFAM : public BLKFAM {
+ public:
+ // Constructor
+ ZLBFAM(PDOSDEF tdp);
+ ZLBFAM(PZLBFAM txfp);
+
+ // Implementation
+ virtual AMT GetAmType(void) {return TYPE_AM_ZLIB;}
+ virtual int GetPos(void);
+ virtual int GetNextPos(void);
+ virtual PTXF Duplicate(PGLOBAL g)
+ {return (PTXF)new(g) ZLBFAM(this);}
+ inline void SetOptimized(bool b) {Optimized = b;}
+
+ // Methods
+ virtual int GetFileLength(PGLOBAL g);
virtual bool SetPos(PGLOBAL g, int recpos); - virtual int SkipRecord(PGLOBAL g, bool header); - virtual bool OpenTableFile(PGLOBAL g); - virtual int ReadBuffer(PGLOBAL g); - virtual int WriteBuffer(PGLOBAL g); - virtual int DeleteRecords(PGLOBAL g, int irc); - virtual void CloseTableFile(PGLOBAL g); - virtual void Rewind(void); - - protected: - int Zerror(PGLOBAL g); // GZ error function - - // Members - gzFile Zfile; // Points to GZ file structure - z_off_t Zpos; // Uncompressed file position - }; // end of class ZIPFAM - -/***********************************************************************/ -/* This is the access method class declaration for optimized variable */ -/* record length files compressed using the gzip library functions. */ -/* The File is accessed by block (requires an opt file). */ -/***********************************************************************/ -class DllExport ZBKFAM : public ZIPFAM { - public: - // Constructor - ZBKFAM(PDOSDEF tdp); - ZBKFAM(PZBKFAM txfp); - - // Implementation - virtual int GetPos(void); - virtual int GetNextPos(void) {return 0;} - virtual PTXF Duplicate(PGLOBAL g) - {return (PTXF)new(g) ZBKFAM(this);} - - // Methods - virtual int Cardinality(PGLOBAL g); - virtual bool AllocateBuffer(PGLOBAL g); - virtual int GetRowID(void); - virtual bool RecordPos(PGLOBAL g); - virtual int SkipRecord(PGLOBAL g, bool header); - virtual int ReadBuffer(PGLOBAL g); - virtual int WriteBuffer(PGLOBAL g); - virtual int DeleteRecords(PGLOBAL g, int irc); - virtual void CloseTableFile(PGLOBAL g); - virtual void Rewind(void); - - protected: - // Members - char *CurLine; // Position of current line in buffer - char *NxtLine; // Position of Next line in buffer - bool Closing; // True when closing on Insert - }; // end of class ZBKFAM - -/***********************************************************************/ -/* This is the access method class declaration for fixed record */ -/* length files compressed using the gzip library functions. */ -/* The file is always accessed by block. */ -/***********************************************************************/ -class DllExport ZIXFAM : public ZBKFAM { - public: - // Constructor - ZIXFAM(PDOSDEF tdp); - ZIXFAM(PZIXFAM txfp) : ZBKFAM(txfp) {} - - // Implementation - virtual int GetNextPos(void) {return 0;} - virtual PTXF Duplicate(PGLOBAL g) - {return (PTXF)new(g) ZIXFAM(this);} - - // Methods - virtual int Cardinality(PGLOBAL g); - virtual bool AllocateBuffer(PGLOBAL g); - virtual int ReadBuffer(PGLOBAL g); - virtual int WriteBuffer(PGLOBAL g); - - protected: - // No additional Members - }; // end of class ZIXFAM - -#if 0 -/***********************************************************************/ -/* This is the DOS/UNIX Access Method class declaration for PlugDB */ -/* fixed/variable files compressed using the zlib library functions. */ -/* Physically these are written and read using the same technique */ -/* than blocked variable files, only the contain of each block is */ -/* compressed using the deflate zlib function. The purpose of this */ -/* specific format is to have a fast mechanism for direct access of */ -/* records so blocked optimization is fast and direct access (joins) */ -/* is allowed. Note that the block length is written ahead of each */ -/* block to enable reading when optimization file is not available. */ -/***********************************************************************/ -class DllExport ZLBFAM : public BLKFAM { - public: - // Constructor - ZLBFAM(PDOSDEF tdp); - ZLBFAM(PZLBFAM txfp); - - // Implementation - virtual AMT GetAmType(void) {return TYPE_AM_ZLIB;} - virtual int GetPos(void); - virtual int GetNextPos(void); - virtual PTXF Duplicate(PGLOBAL g) - {return (PTXF)new(g) ZLBFAM(this);} - inline void SetOptimized(bool b) {Optimized = b;} - - // Methods - virtual int GetFileLength(PGLOBAL g); - virtual bool AllocateBuffer(PGLOBAL g); - virtual int ReadBuffer(PGLOBAL g); - virtual int WriteBuffer(PGLOBAL g); - virtual void CloseTableFile(PGLOBAL g); - virtual void Rewind(void); - - protected: - bool WriteCompressedBuffer(PGLOBAL g); - int ReadCompressedBuffer(PGLOBAL g, void *rdbuf); - - // Members - z_streamp Zstream; // Compression/decompression stream - Byte *Zbuffer; // Compressed block buffer - int *Zlenp; // Pointer to block length - bool Optimized; // true when opt file is available - }; // end of class ZLBFAM -#endif // 0 - -#endif // __FILAMZIP_H + virtual bool AllocateBuffer(PGLOBAL g);
+ virtual int ReadBuffer(PGLOBAL g);
+ virtual int WriteBuffer(PGLOBAL g);
+ virtual void CloseTableFile(PGLOBAL g, bool abort);
+ virtual void Rewind(void);
+
+ protected:
+ bool WriteCompressedBuffer(PGLOBAL g);
+ int ReadCompressedBuffer(PGLOBAL g, void *rdbuf);
+
+ // Members
+ z_streamp Zstream; // Compression/decompression stream
+ Byte *Zbuffer; // Compressed block buffer
+ int *Zlenp; // Pointer to block length
+ bool Optimized; // true when opt file is available
+ }; // end of class ZLBFAM
+
+#endif // __FILAMZIP_H
diff --git a/storage/connect/filter.cpp b/storage/connect/filter.cpp new file mode 100644 index 00000000000..9212432cdde --- /dev/null +++ b/storage/connect/filter.cpp @@ -0,0 +1,1733 @@ +/***************** Filter C++ Class Filter Code (.CPP) *****************/ +/* Name: FILTER.CPP Version 3.9 */ +/* */ +/* (C) Copyright to the author Olivier BERTRAND 1998-2014 */ +/* */ +/* This file contains the class FILTER function code. */ +/***********************************************************************/ + +/***********************************************************************/ +/* Include relevant MariaDB header file. */ +/***********************************************************************/ +#include "my_global.h" +#include "sql_class.h" +//#include "sql_time.h" + +#if defined(WIN32) +//#include <windows.h> +#else // !WIN32 +#include <string.h> +#include <sys/types.h> +#include <sys/stat.h> +#endif // !WIN32 + + +/***********************************************************************/ +/* Include required application header files */ +/* global.h is header containing all global Plug declarations. */ +/* plgdbsem.h is header containing the DB applic. declarations. */ +/* xobject.h is header containing the XOBJECT derived classes dcls. */ +/***********************************************************************/ +#include "global.h" +#include "plgdbsem.h" +#include "tabcol.h" +#include "xtable.h" +#include "array.h" +//#include "subquery.h" +#include "filter.h" +//#include "token.h" +//#include "select.h" +#include "xindex.h" + +/***********************************************************************/ +/* Static variables. */ +/***********************************************************************/ +extern "C" int trace; + +/***********************************************************************/ +/* Utility routines. */ +/***********************************************************************/ +void PlugConvertConstant(PGLOBAL, void* &, short&); +//void *PlugCopyDB(PTABS, void*, INT); +void NewPointer(PTABS, void*, void*); +void AddPointer(PTABS, void*); + +static PPARM MakeParm(PGLOBAL g, PXOB xp) + { + PPARM pp = (PPARM)PlugSubAlloc(g, NULL, sizeof(PARM)); + pp->Type = TYPE_XOBJECT; + pp->Value = xp; + pp->Domain = 0; + pp->Next = NULL; + return pp; + } // end of MakeParm + +/***********************************************************************/ +/* Routines called externally by FILTER function. */ +/***********************************************************************/ +bool PlugEvalLike(PGLOBAL, LPCSTR, LPCSTR, bool); +//bool ReadSubQuery(PGLOBAL, PSUBQ); +//PSUBQ OpenSubQuery(PGLOBAL, PSQL); +//void PlugCloseDB(PGLOBAL, PSQL); +BYTE OpBmp(PGLOBAL g, OPVAL opc); +PARRAY MakeValueArray(PGLOBAL g, PPARM pp); + +/***********************************************************************/ +/* Routines called externally by CondFilter. */ +/***********************************************************************/ +PFIL MakeFilter(PGLOBAL g, PFIL fp1, OPVAL vop, PFIL fp2) + { + PFIL filp = new(g) FILTER(g, vop); + + filp->Arg(0) = fp1; + filp->Arg(1) = fp2; + + if (filp->Convert(g, false)) + return NULL; + + return filp; + } // end of MakeFilter + +PFIL MakeFilter(PGLOBAL g, PCOL *colp, POPER pop, PPARM pfirst, bool neg) +{ + PPARM parmp, pp[2]; + PFIL fp1, fp2, filp = NULL; + + if (pop->Val == OP_IN) { + PARRAY par = MakeValueArray(g, pfirst); + + if (par) { + pp[0] = MakeParm(g, colp[0]); + pp[1] = MakeParm(g, par); + fp1 = new(g) FILTER(g, pop, pp); + + if (fp1->Convert(g, false)) + return NULL; + + filp = (neg) ? MakeFilter(g, fp1, OP_NOT, NULL) : fp1; + } // endif par + + } else if (pop->Val == OP_XX) { // BETWEEN + if (pfirst && pfirst->Next) { + pp[0] = MakeParm(g, colp[0]); + pp[1] = pfirst; + fp1 = new(g) FILTER(g, neg ? OP_LT : OP_GE, pp); + + if (fp1->Convert(g, false)) + return NULL; + + pp[1] = pfirst->Next; + fp2 = new(g) FILTER(g, neg ? OP_GT : OP_LE, pp); + + if (fp2->Convert(g, false)) + return NULL; + + filp = MakeFilter(g, fp1, neg ? OP_OR : OP_AND, fp2); + } // endif parmp + + } else { + parmp = pfirst; + + for (int i = 0; i < 2; i++) + if (colp[i]) { + pp[i] = MakeParm(g, colp[i]); + } else { + if (!parmp || parmp->Domain != i) + return NULL; // Logical error, should never happen + + pp[i] = parmp; + parmp = parmp->Next; + } // endif colp + + filp = new(g) FILTER(g, pop, pp); + + if (filp->Convert(g, false)) + return NULL; + + } // endif's Val + + return filp; +} // end of MakeFilter + +/* --------------------------- Class FILTER -------------------------- */ + +/***********************************************************************/ +/* FILTER public constructors. */ +/***********************************************************************/ +FILTER::FILTER(PGLOBAL g, POPER pop, PPARM *tp) + { + Constr(g, pop->Val, pop->Mod, tp); + } // end of FILTER constructor + +FILTER::FILTER(PGLOBAL g, OPVAL opc, PPARM *tp) + { + Constr(g, opc, 0, tp); + } // end of FILTER constructor + +void FILTER::Constr(PGLOBAL g, OPVAL opc, int opm, PPARM *tp) + { + Next = NULL; + Opc = opc; + Opm = opm; + Bt = 0x00; + + for (int i = 0; i < 2; i++) { + Test[i].B_T = TYPE_VOID; + + if (tp && tp[i]) { + PlugConvertConstant(g, tp[i]->Value, tp[i]->Type); +#if defined(_DEBUG) + assert(tp[i]->Type == TYPE_XOBJECT); +#endif + Arg(i) = (PXOB)tp[i]->Value; + } else + Arg(i) = pXVOID; + + Val(i) = NULL; + Test[i].Conv = FALSE; + } // endfor i + + } // end of Constr + +/***********************************************************************/ +/* FILTER copy constructor. */ +/***********************************************************************/ +FILTER::FILTER(PFIL fil1) + { + Next = NULL; + Opc = fil1->Opc; + Opm = fil1->Opm; + Test[0] = fil1->Test[0]; + Test[1] = fil1->Test[1]; + } // end of FILTER copy constructor + +#if 0 +/***********************************************************************/ +/* Linearize: Does the linearization of the filter tree: */ +/* Independent filters (not implied in OR/NOT) will be separated */ +/* from others and filtering operations will be automated by */ +/* making a list of filter operations in polish operation style. */ +/* Returned value points to the first filter of the list, which ends */ +/* with the filter that was pointed by the first call argument, */ +/* except for separators, in which case a loop is needed to find it. */ +/* Note: a loop is used now in all cases (was not for OP_NOT) to be */ +/* able to handle the case of filters whose arguments are already */ +/* linearized, as it is done in LNA semantic routines. Indeed for */ +/* already linearized chains, the first filter is never an OP_AND, */ +/* OP_OR or OP_NOT filter, so this function just returns 'this'. */ +/***********************************************************************/ +PFIL FILTER::Linearize(bool nosep) + { + int i; + PFIL lfp[2], ffp[2] = {NULL,NULL}; + + switch (Opc) { + case OP_NOT: + if (GetArgType(0) == TYPE_FILTER) { + lfp[0] = (PFIL)Arg(0); + ffp[0] = lfp[0]->Linearize(TRUE); + } /* endif */ + + if (!ffp[0]) + return NULL; + + while (lfp[0]->Next) // See Note above + lfp[0] = lfp[0]->Next; + + Arg(0) = lfp[0]; + lfp[0]->Next = this; + break; + case OP_OR: + nosep = TRUE; + case OP_AND: + for (i = 0; i < 2; i++) { + if (GetArgType(i) == TYPE_FILTER) { + lfp[i] = (PFIL)Arg(i); + ffp[i] = lfp[i]->Linearize(nosep); + } /* endif */ + + if (!ffp[i]) + return NULL; + + while (lfp[i]->Next) + lfp[i] = lfp[i]->Next; + + Arg(i) = lfp[i]; + } /* endfor i */ + + if (nosep) { + lfp[0]->Next = ffp[1]; + lfp[1]->Next = this; + } else { + lfp[0]->Next = this; + Opc = OP_SEP; + Arg(1) = pXVOID; + Next = ffp[1]; + } /* endif */ + + break; + default: + ffp[0] = this; + } /* endswitch */ + + return (ffp[0]); + } // end of Linearize + +/***********************************************************************/ +/* Link the fil2 filter chain to the fil1(this) filter chain. */ +/***********************************************************************/ +PFIL FILTER::Link(PGLOBAL g, PFIL fil2) + { + PFIL fil1; + + if (trace) + htrc("Linking filter %p with op=%d... to filter %p with op=%d\n", + this, Opc, fil2, (fil2) ? fil2->Opc : 0); + + for (fil1 = this; fil1->Next; fil1 = fil1->Next) ; + + if (fil1->Opc == OP_SEP) + fil1->Next = fil2; // Separator already exists + else { + // Create a filter separator and insert it between the chains + PFIL filp = new(g) FILTER(g, OP_SEP); + + filp->Arg(0) = fil1; + filp->Next = fil2; + fil1->Next = filp; + } // endelse + + return (this); + } // end of Link + +/***********************************************************************/ +/* Remove eventual last separator from a filter chain. */ +/***********************************************************************/ +PFIL FILTER::RemoveLastSep(void) + { + PFIL filp, gfp = NULL; + + // Find last filter block (filp) and previous one (gfp). + for (filp = this; filp->Next; filp = filp->Next) + gfp = filp; + + // If last filter is a separator, remove it + if (filp->Opc == OP_SEP) + if (gfp) + gfp->Next = NULL; + else + return NULL; // chain is now empty + + return this; + } // end of RemoveLastSep + +/***********************************************************************/ +/* CheckColumn: Checks references to Columns in the filter and change */ +/* them into references to Col Blocks. */ +/* Returns the number of column references or -1 in case of column */ +/* not found and -2 in case of unrecoverable error. */ +/* WHERE filters are called with *aggreg == AGG_NO. */ +/* HAVING filters are called with *aggreg == AGG_ANY. */ +/***********************************************************************/ +int FILTER::CheckColumn(PGLOBAL g, PSQL sqlp, PXOB &p, int &ag) + { + char errmsg[MAX_STR] = ""; + int agg, k, n = 0; + + if (trace) + htrc("FILTER CheckColumn: sqlp=%p ag=%d\n", sqlp, ag); + + switch (Opc) { + case OP_SEP: + case OP_AND: + case OP_OR: + case OP_NOT: + return 0; // This because we are called for a linearized filter + default: + break; + } // endswitch Opc + + // Check all arguments even in case of error for when we are called + // from CheckHaving, where references to an alias raise an error but + // we must have all other arguments to be set. + for (int i = 0; i < 2; i++) { + if (GetArgType(i) == TYPE_FILTER) // Should never happen in + return 0; // current implementation + + agg = ag; + + if ((k = Arg(i)->CheckColumn(g, sqlp, Arg(i), agg)) < -1) { + return k; + } else if (k < 0) { + if (!*errmsg) // Keep first error message + strcpy(errmsg, g->Message); + + } else + n += k; + + } // endfor i + + if (*errmsg) { + strcpy(g->Message, errmsg); + return -1; + } else + return n; + + } // end of CheckColumn + +/***********************************************************************/ +/* RefNum: Find the number of references correlated sub-queries make */ +/* to the columns of the outer query (pointed by sqlp). */ +/***********************************************************************/ +int FILTER::RefNum(PSQL sqlp) + { + int n = 0; + + for (int i = 0; i < 2; i++) + n += Arg(i)->RefNum(sqlp); + + return n; + } // end of RefNum + +/***********************************************************************/ +/* CheckSubQuery: see SUBQUERY::CheckSubQuery for comment. */ +/***********************************************************************/ +PXOB FILTER::CheckSubQuery(PGLOBAL g, PSQL sqlp) + { + switch (Opc) { + case OP_SEP: + case OP_AND: + case OP_OR: + case OP_NOT: + break; + default: + for (int i = 0; i < 2; i++) + if (!(Arg(i) = (PXOB)Arg(i)->CheckSubQuery(g, sqlp))) + return NULL; + + break; + } // endswitch Opc + + return this; + } // end of CheckSubQuery + +/***********************************************************************/ +/* SortJoin: function that places ahead of the list the 'good' groups */ +/* for join filtering. These are groups with only one filter that */ +/* specify equality between two different table columns, at least */ +/* one is a table key column. Doing so the join filter will be in */ +/* general compatible with linearization of the joined table tree. */ +/* This function has been added a further sorting on column indexing. */ +/***********************************************************************/ +PFIL FILTER::SortJoin(PGLOBAL g) + { + int k; + PCOL cp1, cp2; + PTDBASE tp1, tp2; + PFIL fp, filp, gfp, filstart = this, filjoin = NULL, lfp = NULL; + bool join = TRUE, key = TRUE; + + // This routine requires that the chain ends with a separator + // So check for it and eventually add one if necessary + for (filp = this; filp->Next; filp = filp->Next) ; + + if (filp->Opc != OP_SEP) + filp->Next = new(g) FILTER(g, OP_SEP); + + again: + for (k = (key) ? 0 : MAX_MULT_KEY; k <= MAX_MULT_KEY; k++) + for (gfp = NULL, fp = filp = filstart; filp; filp = filp->Next) + switch (filp->Opc) { + case OP_SEP: + if (join) { + // Put this filter group into the join filter group list. + if (!lfp) + filjoin = fp; + else + lfp->Next = fp; + + if (!gfp) + filstart = filp->Next; + else + gfp->Next = filp->Next; + + lfp = filp; // last block of join filter list + } else + gfp = filp; // last block of bad filter list + + join = TRUE; + fp = filp->Next; + break; + case OP_LOJ: + case OP_ROJ: + case OP_DTJ: + join &= TRUE; + break; + case OP_EQ: + if (join && k > 0 // So specific join operators come first + && filp->GetArgType(0) == TYPE_COLBLK + && filp->GetArgType(1) == TYPE_COLBLK) { + cp1 = (PCOL)filp->Arg(0); + cp2 = (PCOL)filp->Arg(1); + tp1 = (PTDBASE)cp1->GetTo_Tdb(); + tp2 = (PTDBASE)cp2->GetTo_Tdb(); + + if (tp1->GetTdb_No() != tp2->GetTdb_No()) { + if (key) + join &= (cp1->GetKey() == k || cp2->GetKey() == k); + else + join &= (tp1->GetColIndex(cp1) || tp2->GetColIndex(cp2)); + + } else + join = FALSE; + + } else + join = FALSE; + + break; + default: + join = FALSE; + } // endswitch filp->Opc + + if (key) { + key = FALSE; + goto again; + } // endif key + + if (filjoin) { + lfp->Next = filstart; + filstart = filjoin; + } // endif filjoin + + // Removing last separator is perhaps unuseful, but it was so + return filstart->RemoveLastSep(); + } // end of SortJoin + +/***********************************************************************/ +/* Check that this filter is a good join filter. */ +/* If so the opj block will be set accordingly. */ +/* opj points to the join block, fprec to the filter block to which */ +/* the rest of the chain must be linked in case of success. */ +/* teq, tek and tk2 indicates the severity of the tests: */ +/* tk2 == TRUE means both columns must be primary keys. */ +/* tc2 == TRUE means both args must be columns (not expression). */ +/* tek == TRUE means at least one column must be a primary key. */ +/* teq == TRUE means the filter operator must be OP_EQ. */ +/* tix == TRUE means at least one column must be a simple index key. */ +/* thx == TRUE means at least one column must be a leading index key. */ +/***********************************************************************/ +bool FILTER::FindJoinFilter(POPJOIN opj, PFIL fprec, bool teq, bool tek, + bool tk2, bool tc2, bool tix, bool thx) + { + if (trace) + htrc("FindJoinFilter: opj=%p fprec=%p tests=(%d,%d,%d,%d)\n", + opj, fprec, teq, tek, tk2, tc2); + + // Firstly check that this filter is an independent filter + // meaning that it is the only one in its own group. + if (Next && Next->Opc != OP_SEP) + return (Opc < 0); + + // Keep only equi-joins and specific joins (Outer and Distinct) + // Normally specific join operators comme first because they have + // been placed first by SortJoin. + if (teq && Opc > OP_EQ) + return FALSE; + + // We have a candidate for join filter, now check that it + // fulfil the requirement about its operands, to point to + // columns of respectively the two TDB's of that join. + int col1 = 0, col2 = 0; + bool key = tk2; + bool idx = FALSE, ihx = FALSE; + PIXDEF pdx; + + for (int i = 0; i < 2; i++) + if (GetArgType(i) == TYPE_COLBLK) { + PCOL colp = (PCOL)Arg(i); + + if (tk2) + key &= (colp->IsKey()); + else + key |= (colp->IsKey()); + + pdx = ((PTDBASE)colp->GetTo_Tdb())->GetColIndex(colp); + idx |= (pdx && pdx->GetNparts() == 1); + ihx |= (pdx != NULL); + + if (colp->VerifyColumn(opj->GetTbx1())) + col1 = i + 1; + else if (colp->VerifyColumn(opj->GetTbx2())) + col2 = i + 1; + + } else if (!tc2 && GetArgType(i) != TYPE_CONST) { + PXOB xp = Arg(i); + + if (xp->VerifyColumn(opj->GetTbx1())) + col1 = i + 1; + else if (xp->VerifyColumn(opj->GetTbx2())) + col2 = i + 1; + + } else + return (Opc < 0); + + if (col1 == 0 || col2 == 0) + return (Opc < 0); + + if (((tek && !key) || (tix && !idx) || (thx && !ihx)) && Opc != OP_DTJ) + return FALSE; + + // This is the join filter, set the join block. + if (col1 == 1) { + opj->SetCol1(Arg(0)); + opj->SetCol2(Arg(1)); + } else { + opj->SetCol1(Arg(1)); + opj->SetCol2(Arg(0)); + + switch (Opc) { +// case OP_GT: Opc = OP_LT; break; +// case OP_LT: Opc = OP_GT; break; +// case OP_GE: Opc = OP_LE; break; +// case OP_LE: Opc = OP_GE; break; + case OP_LOJ: + case OP_ROJ: + case OP_DTJ: + // For expended join operators, the filter must indicate + // the way the join should be done, and not the order of + // appearance of tables in the table list (which is kept + // because tables are sorted in AddTdb). Therefore the + // join is inversed, not the filter. + opj->InverseJoin(); + default: break; + } // endswitch Opc + + } // endif col1 + + if (Opc < 0) { + // For join operators, special processing is needed + int knum = 0; + PFIL fp; + + switch (Opc) { + case OP_LOJ: + opj->SetJtype(JT_LEFT); + knum = opj->GetCol2()->GetKey(); + break; + case OP_ROJ: + opj->SetJtype(JT_RIGHT); + knum = opj->GetCol1()->GetKey(); + break; + case OP_DTJ: + for (knum = 1, fp = this->Next; fp; fp = fp->Next) + if (fp->Opc == OP_DTJ) + knum++; + else if (fp->Opc != OP_SEP) + break; + + opj->SetJtype(JT_DISTINCT); + opj->GetCol2()->SetKey(knum); + break; + default: + break; + } // endswitch Opc + + if (knum > 1) { + // Lets take care of a multiple key join + // We do a minimum of checking here as it will done later + int k = 1; + OPVAL op; + BYTE tmp[sizeof(Test[0])]; + + for (fp = this->Next; k < knum && fp; fp = fp->Next) { + switch (op = fp->Opc) { + case OP_SEP: + continue; + case OP_LOJ: + if (Opc == OP_ROJ) { + op = Opc; + memcpy(tmp, &fp->Test[0], sizeof(Test[0])); + fp->Test[0] = fp->Test[1]; + memcpy(&fp->Test[1], tmp, sizeof(Test[0])); + } // endif Opc + + k++; + break; + case OP_ROJ: + if (Opc == OP_LOJ) { + op = Opc; + memcpy(tmp, &fp->Test[0], sizeof(Test[0])); + fp->Test[0] = fp->Test[1]; + memcpy(&fp->Test[1], tmp, sizeof(Test[0])); + } // endif Opc + + k++; + break; + case OP_DTJ: + if (op == Opc && fp->GetArgType(1) == TYPE_COLBLK) + ((PCOL)fp->Arg(1))->SetKey(knum); + + k++; + break; + default: + break; + } // endswitch op + + if (op != Opc) + return TRUE; + + fp->Opc = OP_EQ; + } // endfor fp + + } // endif k + + Opc = OP_EQ; + } // endif Opc + + // Set the join filter operator + opj->SetOpc(Opc); + + // Now mark the columns involved in the join filter because + // this information will be used by the linearize program. + // Note: this should be replaced in the future by something + // enabling to mark tables as Parent or Child. + opj->GetCol1()->MarkCol(U_J_EXT); + opj->GetCol2()->MarkCol(U_J_EXT); + + // Remove the filter from the filter chain. If the filter is + // not last in the chain, also remove the SEP filter after it. + if (Next) // Next->Opc == OP_SEP + Next = Next->Next; + + if (!fprec) + opj->SetFilter(Next); + else + fprec->Next = Next; + + return FALSE; + } // end of FindJoinFilter + +/***********************************************************************/ +/* CheckHaving: check and process a filter of an HAVING clause. */ +/* Check references to Columns and Functions in the filter. */ +/* All these references can correspond to items existing in the */ +/* SELECT list, else if it is a function, allocate a SELECT block */ +/* to be added to the To_Sel list (non projected blocks). */ +/***********************************************************************/ +bool FILTER::CheckHaving(PGLOBAL g, PSQL sqlp) + { + int agg = AGG_ANY; + PXOB xp; + +//sqlp->SetOk(TRUE); // Ok to look into outer queries for filters + + switch (Opc) { + case OP_SEP: + case OP_AND: + case OP_OR: + case OP_NOT: + return FALSE; + default: + if (CheckColumn(g, sqlp, xp, agg) < -1) + return TRUE; // Unrecovable error + + break; + } // endswitch Opc + + sqlp->SetOk(TRUE); // Ok to look into outer queries for filters + + for (int i = 0; i < 2; i++) + if (!(xp = Arg(i)->SetSelect(g, sqlp, TRUE))) + return TRUE; + else if (xp != Arg(i)) { + Arg(i) = xp; + Val(i) = Arg(i)->GetValue(); + } // endif + + sqlp->SetOk(FALSE); + return FALSE; + } // end of CheckHaving + +/***********************************************************************/ +/* Used while building a table index. This function split the filter */ +/* attached to the tdbp table into the local and not local part. */ +/* The local filter is used to restrict the size of the index and the */ +/* not local part remains to be executed later. This has been added */ +/* recently and not only to improve the performance but chiefly to */ +/* avoid loosing rows when processing distinct joins. */ +/* Returns: */ +/* 0: the whole filter is local (both arguments are) */ +/* 1: the whole filter is not local */ +/* 2: the filter was split in local (attached to fp[0]) and */ +/* not local (attached to fp[1]). */ +/***********************************************************************/ +int FILTER::SplitFilter(PFIL *fp) + { + int i, rc[2]; + + if (Opc == OP_AND) { + for (i = 0; i < 2; i++) + rc[i] = ((PFIL)Arg(i))->SplitFilter(fp); + + // Filter first argument should never be split because of the + // algorithm used to de-linearize the filter. + assert(rc[0] != 2); + + if (rc[0] != rc[1]) { + // Splitting to be done + if (rc[1] == 2) { + // 2nd argument already split, add 1st to the proper filter + assert(fp[*rc]); + Arg(1) = fp[*rc]; + Val(1) = fp[*rc]->GetValue(); + fp[*rc] = this; + } else for (i = 0; i < 2; i++) { + // Split the filter arguments + assert(!fp[rc[i]]); + fp[rc[i]] = (PFIL)Arg(i); + } // endfor i + + *rc = 2; + } // endif rc + + } else + *rc = (CheckLocal(NULL)) ? 0 : 1; + + return *rc; + } // end of SplitFilter + +/***********************************************************************/ +/* This function is called when making a Kindex after the filter was */ +/* split in local and nolocal part in the case of many to many joins. */ +/* Indeed the whole filter must be reconstructed to take care of next */ +/* same values when doing the explosive join. In addition, the link */ +/* must be done respecting the way filters are de-linearized, no AND */ +/* filter in the first argument of an AND filter, because this is */ +/* expected to be true if SplitFilter is used again on this filter. */ +/***********************************************************************/ +PFIL FILTER::LinkFilter(PGLOBAL g, PFIL fp2) + { + PFIL fp1, filp, filand = NULL; + + assert(fp2); // Test must be made by caller + + // Find where the new AND filter must be attached + for (fp1 = this; fp1->Opc == OP_AND; fp1 = (PFIL)fp1->Arg(1)) + filand = fp1; + + filp = new(g) FILTER(g, OP_AND); + filp->Arg(0) = fp1; + filp->Val(0) = fp1->GetValue(); + filp->Test[0].B_T = TYPE_INT; + filp->Test[0].Conv = FALSE; + filp->Arg(1) = fp2; + filp->Val(1) = fp2->GetValue(); + filp->Test[1].B_T = TYPE_INT; + filp->Test[1].Conv = FALSE; + filp->Value = AllocateValue(g, TYPE_INT); + + if (filand) { + // filp must be inserted here + filand->Arg(1) = filp; + filand->Val(1) = filp->GetValue(); + filp = this; + } // endif filand + + return filp; + } // end of LinkFilter + +/***********************************************************************/ +/* Checks whether filter contains reference to a previous table that */ +/* is not logically joined to the currently openned table, or whether */ +/* it is a Sub-Select filter. In any case, local is set to FALSE. */ +/* Note: This function is now applied to de-linearized filters. */ +/***********************************************************************/ +bool FILTER::CheckLocal(PTDB tdbp) + { + bool local = TRUE; + + if (trace) { + if (tdbp) + htrc("CheckLocal: filp=%p R%d\n", this, tdbp->GetTdb_No()); + else + htrc("CheckLocal: filp=%p\n", this); + } // endif trace + + for (int i = 0; local && i < 2; i++) + local = Arg(i)->CheckLocal(tdbp); + + if (trace) + htrc("FCL: returning %d\n", local); + + return (local); + } // end of CheckLocal + +/***********************************************************************/ +/* This routine is used to split the filter attached to the tdbp */ +/* table into the local and not local part where "local" means that */ +/* it applies "locally" to the FILEID special column with crit = 2 */ +/* and to the SERVID and/or TABID special columns with crit = 3. */ +/* Returns: */ +/* 0: the whole filter is local (both arguments are) */ +/* 1: the whole filter is not local */ +/* 2: the filter was split in local (attached to fp[0]) and */ +/* not local (attached to fp[1]). */ +/* Note: "Locally" means that the "local" filter can be evaluated */ +/* before opening the table. This implies that the special column be */ +/* compared only with constants and that this filter not to be or'ed */ +/* with a non "local" filter. */ +/***********************************************************************/ +int FILTER::SplitFilter(PFIL *fp, PTDB tp, int crit) + { + int i, rc[2]; + + if (Opc == OP_AND) { + for (i = 0; i < 2; i++) + rc[i] = ((PFIL)Arg(i))->SplitFilter(fp, tp, crit); + + // Filter first argument should never be split because of the + // algorithm used to de-linearize the filter. + assert(rc[0] != 2); + + if (rc[0] != rc[1]) { + // Splitting to be done + if (rc[1] == 2) { + // 2nd argument already split, add 1st to the proper filter + assert(fp[*rc]); + Arg(1) = fp[*rc]; + Val(1) = fp[*rc]->GetValue(); + fp[*rc] = this; + } else for (i = 0; i < 2; i++) { + // Split the filter arguments + assert(!fp[rc[i]]); + fp[rc[i]] = (PFIL)Arg(i); + } // endfor i + + *rc = 2; + } // endif rc + + } else + *rc = (CheckSpcCol(tp, crit) == 1) ? 0 : 1; + + return *rc; + } // end of SplitFilter + +/***********************************************************************/ +/* Checks whether filter contains only references to FILEID, SERVID, */ +/* or TABID with constants or pseudo constants. */ +/***********************************************************************/ +int FILTER::CheckSpcCol(PTDB tdbp, int n) + { + int n1 = Arg(0)->CheckSpcCol(tdbp, n); + int n2 = Arg(1)->CheckSpcCol(tdbp, n); + + return max(n1, n2); + } // end of CheckSpcCol +#endif // 0 + +/***********************************************************************/ +/* Reset the filter arguments to non evaluated yet. */ +/***********************************************************************/ +void FILTER::Reset(void) + { + for (int i = 0; i < 2; i++) + Arg(i)->Reset(); + + } // end of Reset + +/***********************************************************************/ +/* Init: called when reinitializing a query (Correlated subqueries) */ +/***********************************************************************/ +bool FILTER::Init(PGLOBAL g) + { + for (int i = 0; i < 2; i++) + Arg(i)->Init(g); + + return FALSE; + } // end of Init + +/***********************************************************************/ +/* Convert: does all filter setting and conversions. */ +/* (having = TRUE for Having Clauses, FALSE for Where Clauses) */ +/* Note: hierarchy of types is implied by the ConvertType */ +/* function, currently FLOAT, int, STRING and TOKEN. */ +/* Returns FALSE if successful or TRUE in case of error. */ +/* Note on result type for filters: */ +/* Currently the result type is of TYPE_INT (should be TYPE_BOOL). */ +/* This avoids to introduce a new type and perhaps will permit */ +/* conversions. However the boolean operators will result in a */ +/* boolean int result, meaning that result shall be only 0 or 1 . */ +/***********************************************************************/ +bool FILTER::Convert(PGLOBAL g, bool having) + { + int i, comtype = TYPE_ERROR; + + if (trace) + htrc("converting(?) %s %p opc=%d\n", + (having) ? "having" : "filter", this, Opc); + + for (i = 0; i < 2; i++) { + switch (GetArgType(i)) { + case TYPE_COLBLK: + if (((PCOL)Arg(i))->InitValue(g)) + return TRUE; + + break; + case TYPE_ARRAY: + if ((Opc != OP_IN && !Opm) || i == 0) { + strcpy(g->Message, MSG(BAD_ARRAY_OPER)); + return TRUE; + } // endif + + if (((PARRAY)Arg(i))->Sort(g)) // Sort the array + return TRUE; // Error + + break; + case TYPE_VOID: + if (i == 1) { + Val(0) = Arg(0)->GetValue(); + goto TEST; // Filter has only one argument + } // endif i + + strcpy(g->Message, MSG(VOID_FIRST_ARG)); + return TRUE; + } // endswitch + + if (trace) + htrc("Filter(%d): Arg type=%d\n", i, GetArgType(i)); + + // Set default values + Test[i].B_T = Arg(i)->GetResultType(); + Test[i].Conv = FALSE; + + // Special case of the LIKE operator. + if (Opc == OP_LIKE) { + if (!IsTypeChar((int)Test[i].B_T)) { + sprintf(g->Message, MSG(BAD_TYPE_LIKE), i, Test[i].B_T); + return TRUE; + } // endif + + comtype = TYPE_STRING; + } else { + // Set the common type for both (eventually converted) arguments + int argtyp = Test[i].B_T; + + if (GetArgType(i) == TYPE_CONST && argtyp == TYPE_INT) { + // If possible, downcast the type to smaller types to avoid + // convertion as much as possible. + int n = Arg(i)->GetValue()->GetIntValue(); + + if (n >= INT_MIN8 && n <= INT_MAX8) + argtyp = TYPE_TINY; + else if (n >= INT_MIN16 && n <= INT_MAX16) + argtyp = TYPE_SHORT; + + } else if (GetArgType(i) == TYPE_ARRAY) { + // If possible, downcast int arrays target type to TYPE_SHORT + // to take care of filters written like shortcol in (34,35,36). + if (((PARRAY)Arg(i))->CanBeShort()) + argtyp = TYPE_SHORT; + + } // endif TYPE_CONST + + comtype = ConvertType(comtype, argtyp, CNV_ANY); + } // endif Opc + + if (comtype == TYPE_ERROR) { + strcpy(g->Message, MSG(ILL_FILTER_CONV)); + return TRUE; + } // endif + + if (trace) + htrc(" comtype=%d, B_T(%d)=%d Val(%d)=%p\n", + comtype, i, Test[i].B_T, i, Val(i)); + + } // endfor i + + // Set or allocate the filter argument values and buffers + for (i = 0; i < 2; i++) { + if (trace) + htrc(" conv type %d ? i=%d B_T=%d comtype=%d\n", + GetArgType(i), i, Test[i].B_T, comtype); + + if (Test[i].B_T == comtype) { + // No conversion, set Value to argument Value + Val(i) = Arg(i)->GetValue(); +#if defined(_DEBUG) + assert (Val(i) && Val(i)->GetType() == Test[i].B_T); +#endif + } else { + // Conversion between filter arguments to be done. + // Note that the argument must be converted, not only the + // buffer and buffer type, so GetArgType() returns the new type. + switch (GetArgType(i)) { + case TYPE_CONST: + if (comtype == TYPE_DATE && Test[i].B_T == TYPE_STRING) { + // Convert according to the format of the other argument + Val(i) = AllocateValue(g, comtype, Arg(i)->GetLength()); + + if (((DTVAL*)Val(i))->SetFormat(g, Val(1-i))) + return TRUE; + + Val(i)->SetValue_psz(Arg(i)->GetValue()->GetCharValue()); + } else { + ((PCONST)Arg(i))->Convert(g, comtype); + Val(i) = Arg(i)->GetValue(); + } // endif comtype + + break; + case TYPE_ARRAY: + // Conversion PSZ or int array to int or double FLOAT. + if (((PARRAY)Arg(i))->Convert(g, comtype, Val(i-1)) == TYPE_ERROR) + return TRUE; + + break; + case TYPE_FILTER: + strcpy(g->Message, MSG(UNMATCH_FIL_ARG)); + return TRUE; + default: + // Conversion from Column, Select/Func, Expr, Scalfnc... + // The argument requires conversion during Eval + // A separate Value block must be allocated. + // Note: the test on comtype is to prevent unnecessary + // domain initialization and get the correct length in + // case of Token -> numeric conversion. + Val(i) = AllocateValue(g, comtype, (comtype == TYPE_STRING) + ? Arg(i)->GetLengthEx() : Arg(i)->GetLength()); + + if (comtype == TYPE_DATE && Test[i].B_T == TYPE_STRING) + // Convert according to the format of the other argument + if (((DTVAL*)Val(i))->SetFormat(g, Val(1 - i))) + return TRUE; + + Test[i].Conv = TRUE; + break; + } // endswitch GetType + + Test[i].B_T = comtype; + } // endif comtype + + } // endfor i + + // Last check to be sure all is correct. + if (Test[0].B_T != Test[1].B_T) { + sprintf(g->Message, MSG(BAD_FILTER_CONV), Test[0].B_T, Test[1].B_T); + return TRUE; +//} else if (Test[0].B_T == TYPE_LIST && +// ((LSTVAL*)Val(0))->GetN() != ((LSTVAL*)Val(1))->GetN()) { +// sprintf(g->Message, MSG(ROW_ARGNB_ERR), +// ((LSTVAL*)Val(0))->GetN(), ((LSTVAL*)Val(1))->GetN()); +// return TRUE; + } // endif's B_T + + + TEST: // Test for possible Eval optimization + + if (trace) + htrc("Filp %p op=%d argtypes=(%d,%d)\n", + this, Opc, GetArgType(0), GetArgType(1)); + + // Check whether we have a "simple" filter and in that case + // change its class so an optimized Eval function will be used + if (!Test[0].Conv && !Test[1].Conv) { + if (Opm) switch (Opc) { + case OP_EQ: + case OP_NE: + case OP_GT: + case OP_GE: + case OP_LT: + case OP_LE: + if (GetArgType(1) != TYPE_ARRAY) + break; // On subquery, do standard processing + + // Change the FILTER class to FILTERIN + new(this) FILTERIN; + break; + default: + break; + } // endswitch Opc + + else switch (Opc) { +#if 0 + case OP_EQ: new(this) FILTEREQ; break; + case OP_NE: new(this) FILTERNE; break; + case OP_GT: new(this) FILTERGT; break; + case OP_GE: new(this) FILTERGE; break; + case OP_LT: new(this) FILTERLT; break; + case OP_LE: new(this) FILTERLE; break; +#endif // 0 + case OP_EQ: + case OP_NE: + case OP_GT: + case OP_GE: + case OP_LT: + case OP_LE: new(this) FILTERCMP(g); break; + case OP_AND: new(this) FILTERAND; break; + case OP_OR: new(this) FILTEROR; break; + case OP_NOT: new(this) FILTERNOT; break; + case OP_EXIST: + if (GetArgType(1) == TYPE_VOID) { + // For EXISTS it is the first argument that should be null + Arg(1) = Arg(0); + Arg(0) = pXVOID; + } // endif void + + // pass thru + case OP_IN: + // For IN operator do optimize if operand is an array + if (GetArgType(1) != TYPE_ARRAY) + break; // IN on subquery, do standard processing + + // Change the FILTER class to FILTERIN + new(this) FILTERIN; + break; + default: + break; + } // endswitch Opc + + } // endif Conv + + // The result value (should be TYPE_BOOL ???) + Value = AllocateValue(g, TYPE_INT); + return FALSE; + } // end of Convert + +/***********************************************************************/ +/* Eval: Compute filter result value. */ +/* New algorithm: evaluation is now done from the root for each group */ +/* so Eval is now a recursive process for FILTER operands. */ +/***********************************************************************/ +bool FILTER::Eval(PGLOBAL g) + { + int i; // n = 0; +//PSUBQ subp = NULL; + PARRAY ap = NULL; + PDBUSER dup = PlgGetUser(g); + + if (Opc <= OP_XX) + for (i = 0; i < 2; i++) + // Evaluate the object and eventually convert it. + if (Arg(i)->Eval(g)) + return TRUE; + else if (Test[i].Conv) + Val(i)->SetValue_pval(Arg(i)->GetValue()); + + if (trace) + htrc(" Filter: op=%d type=%d %d B_T=%d %d val=%p %p\n", + Opc, GetArgType(0), GetArgType(1), Test[0].B_T, Test[1].B_T, + Val(0), Val(1)); + + // Main switch on filtering according to operator type. + switch (Opc) { + case OP_EQ: + case OP_NE: + case OP_GT: + case OP_GE: + case OP_LT: + case OP_LE: + if (!Opm) { + // Comparison boolean operators. +#if defined(_DEBUG) + if (Val(0)->GetType() != Val(1)->GetType()) + goto FilterError; +#endif + // Compare the two arguments + // New algorithm to take care of TYPE_LIST + Bt = OpBmp(g, Opc); + Value->SetValue_bool(!(Val(0)->TestValue(Val(1)) & Bt)); + break; + } // endif Opm + + // For modified operators, pass thru + case OP_IN: + case OP_EXIST: + // For IN operations, special processing is done here + switch (GetArgType(1)) { + case TYPE_ARRAY: + ap = (PARRAY)Arg(1); + break; + default: + strcpy(g->Message, MSG(IN_WITHOUT_SUB)); + goto FilterError; + } // endswitch Type + + if (trace) { + htrc(" IN filtering: ap=%p\n", ap); + + if (ap) + htrc(" Array: type=%d size=%d other_type=%d\n", + ap->GetType(), ap->GetSize(), Test[0].B_T); + + } // endif trace + + /*****************************************************************/ + /* Implementation note: The Find function is now able to do a */ + /* conversion but limited to SHORT, int, and FLOAT arrays. */ + /*****************************************************************/ +// Value->SetValue_bool(ap->Find(g, Val(0))); + + if (ap) + Value->SetValue_bool(ap->FilTest(g, Val(0), Opc, Opm)); + + break; + + case OP_LIKE: +#if defined(_DEBUG) + if (!IsTypeChar((int)Test[0].B_T) || !IsTypeChar((int)Test[1].B_T)) + goto FilterError; +#endif + if (Arg(0)->Eval(g)) + return TRUE; + + Value->SetValue_bool(PlugEvalLike(g, Val(0)->GetCharValue(), + Val(1)->GetCharValue(), + Val(0)->IsCi())); + break; + + case OP_AND: +#if defined(_DEBUG) + if (Test[0].B_T != TYPE_INT || Test[1].B_T != TYPE_INT) + goto FilterError; +#endif + + if (Arg(0)->Eval(g)) + return TRUE; + + Value->SetValue(Val(0)->GetIntValue()); + + if (!Value->GetIntValue()) + return FALSE; // No need to evaluate 2nd argument + + if (Arg(1)->Eval(g)) + return TRUE; + + Value->SetValue(Val(1)->GetIntValue()); + break; + + case OP_OR: +#if defined(_DEBUG) + if (Test[0].B_T != TYPE_INT || Test[1].B_T != TYPE_INT) + goto FilterError; +#endif + + if (Arg(0)->Eval(g)) + return TRUE; + + Value->SetValue(Val(0)->GetIntValue()); + + if (Value->GetIntValue()) + return FALSE; // No need to evaluate 2nd argument + + if (Arg(1)->Eval(g)) + return TRUE; + + Value->SetValue(Val(1)->GetIntValue()); + break; + + case OP_NOT: +#if defined(_DEBUG) + if (Test[0].B_T != TYPE_INT) // Should be type bool ??? + goto FilterError; +#endif + + if (Arg(0)->Eval(g)) + return TRUE; + + Value->SetValue_bool(!Val(0)->GetIntValue()); + break; + + case OP_SEP: // No more used while evaluating + default: + goto FilterError; + } // endswitch Opc + + if (trace) + htrc("Eval: filter %p Opc=%d result=%d\n", + this, Opc, Value->GetIntValue()); + + return FALSE; + + FilterError: + sprintf(g->Message, MSG(BAD_FILTER), + Opc, Test[0].B_T, Test[1].B_T, GetArgType(0), GetArgType(1)); + return TRUE; + } // end of Eval + +#if 0 +/***********************************************************************/ +/* Called by PlugCopyDB to make a copy of a (linearized) filter chain.*/ +/***********************************************************************/ +PFIL FILTER::Copy(PTABS t) + { + int i; + PFIL fil1, fil2, newfilchain = NULL, fprec = NULL; + + for (fil1 = this; fil1; fil1 = fil1->Next) { + fil2 = new(t->G) FILTER(fil1); + + if (!fprec) + newfilchain = fil2; + else + fprec->Next = fil2; + + NewPointer(t, fil1, fil2); + + for (i = 0; i < 2; i++) + if (fil1->GetArgType(i) == TYPE_COLBLK || + fil1->GetArgType(i) == TYPE_FILTER) + AddPointer(t, &fil2->Arg(i)); + + fprec = fil2; + } /* endfor fil1 */ + + return newfilchain; + } // end of Copy +#endif // 0 + +/*********************************************************************/ +/* Make file output of FILTER contents. */ +/*********************************************************************/ +void FILTER::Print(PGLOBAL g, FILE *f, uint n) + { + char m[64]; + + memset(m, ' ', n); // Make margin string + m[n] = '\0'; + + bool lin = (Next != NULL); // lin == TRUE if linearized + + for (PFIL fp = this; fp; fp = fp->Next) { + fprintf(f, "%sFILTER: at %p opc=%d lin=%d result=%d\n", + m, fp, fp->Opc, lin, + (Value) ? Value->GetIntValue() : 0); + + for (int i = 0; i < 2; i++) { + fprintf(f, "%s Arg(%d) type=%d value=%p B_T=%d val=%p\n", + m, i, fp->GetArgType(i), fp->Arg(i), + fp->Test[i].B_T, fp->Val(i)); + + if (lin && fp->GetArgType(i) == TYPE_FILTER) + fprintf(f, "%s Filter at %p\n", m, fp->Arg(i)); + else + fp->Arg(i)->Print(g, f, n + 2); + + } // endfor i + + } // endfor fp + + } // end of Print + +/***********************************************************************/ +/* Make string output of TABLE contents (z should be checked). */ +/***********************************************************************/ +void FILTER::Print(PGLOBAL g, char *ps, uint z) + { + #define FLEN 100 + + typedef struct _bc { + struct _bc *Next; + char Cold[FLEN+1]; + } BC, *PBC; + + char *p; + int n; + PFIL fp; + PBC bxp, bcp = NULL; + + *ps = '\0'; + + for (fp = this; fp && z > 0; fp = fp->Next) { + if (fp->Opc < OP_CNC || fp->Opc == OP_IN || fp->Opc == OP_NULL + || fp->Opc == OP_LIKE || fp->Opc == OP_EXIST) { + if (!(bxp = new BC)) { + strncat(ps, "Filter(s)", z); + return; + } /* endif */ + + bxp->Next = bcp; + bcp = bxp; + p = bcp->Cold; + n = FLEN; + fp->Arg(0)->Print(g, p, n); + n = FLEN - strlen(p); + + switch (fp->Opc) { + case OP_EQ: + strncat(bcp->Cold, "=", n); + break; + case OP_NE: + strncat(bcp->Cold, "!=", n); + break; + case OP_GT: + strncat(bcp->Cold, ">", n); + break; + case OP_GE: + strncat(bcp->Cold, ">=", n); + break; + case OP_LT: + strncat(bcp->Cold, "<", n); + break; + case OP_LE: + strncat(bcp->Cold, "<=", n); + break; + case OP_IN: + strncat(bcp->Cold, " in ", n); + break; + case OP_NULL: + strncat(bcp->Cold, " is null", n); + break; + case OP_LIKE: + strncat(bcp->Cold, " like ", n); + break; + case OP_EXIST: + strncat(bcp->Cold, " exists ", n); + break; + case OP_AND: + strncat(bcp->Cold, " and ", n); + break; + case OP_OR: + strncat(bcp->Cold, " or ", n); + break; + default: + strncat(bcp->Cold, "?", n); + } // endswitch Opc + + n = FLEN - strlen(p); + p += strlen(p); + fp->Arg(1)->Print(g, p, n); + } else + if (!bcp) { + strncat(ps, "???", z); + z -= 3; + } else + switch (fp->Opc) { + case OP_SEP: // Filter list separator + strncat(ps, bcp->Cold, z); + z -= strlen(bcp->Cold); + strncat(ps, ";", z--); + bxp = bcp->Next; + delete bcp; + bcp = bxp; + break; + case OP_NOT: // Filter NOT operator + for (n = MY_MIN((int)strlen(bcp->Cold), FLEN-3); n >= 0; n--) + bcp->Cold[n+2] = bcp->Cold[n]; + bcp->Cold[0] = '^'; + bcp->Cold[1] = '('; + strcat(bcp->Cold, ")"); + break; + default: + for (n = MY_MIN((int)strlen(bcp->Cold), FLEN-4); n >= 0; n--) + bcp->Cold[n+3] = bcp->Cold[n]; + bcp->Cold[0] = ')'; + switch (fp->Opc) { + case OP_AND: bcp->Cold[1] = '&'; break; + case OP_OR: bcp->Cold[1] = '|'; break; + default: bcp->Cold[1] = '?'; + } // endswitch + bcp->Cold[2] = '('; + strcat(bcp->Cold, ")"); + bxp = bcp->Next; + for (n = MY_MIN((int)strlen(bxp->Cold), FLEN-1); n >= 0; n--) + bxp->Cold[n+1] = bxp->Cold[n]; + bxp->Cold[0] = '('; + strncat(bxp->Cold, bcp->Cold, FLEN-strlen(bxp->Cold)); + delete bcp; + bcp = bxp; + } // endswitch + + } // endfor fp + + n = 0; + + if (!bcp) + strncat(ps, "Null-Filter", z); + else do { + if (z > 0) { + if (n++ > 0) { + strncat(ps, "*?*", z); + z = MY_MAX(0, (int)z-3); + } // endif + strncat(ps, bcp->Cold, z); + z -= strlen(bcp->Cold); + } // endif + + bxp = bcp->Next; + delete bcp; + bcp = bxp; + } while (bcp); // enddo + + } // end of Print + + +/* -------------------- Derived Classes Functions -------------------- */ + +/***********************************************************************/ +/* FILTERCMP constructor. */ +/***********************************************************************/ +FILTERCMP::FILTERCMP(PGLOBAL g) + { + Bt = OpBmp(g, Opc); + } // end of FILTERCMP constructor + +/***********************************************************************/ +/* Eval: Compute result value for comparison operators. */ +/***********************************************************************/ +bool FILTERCMP::Eval(PGLOBAL g) + { + if (Arg(0)->Eval(g) || Arg(1)->Eval(g)) + return TRUE; + + Value->SetValue_bool(!(Val(0)->TestValue(Val(1)) & Bt)); + return FALSE; + } // end of Eval + +/***********************************************************************/ +/* Eval: Compute result value for AND filters. */ +/***********************************************************************/ +bool FILTERAND::Eval(PGLOBAL g) + { + if (Arg(0)->Eval(g)) + return TRUE; + + Value->SetValue(Val(0)->GetIntValue()); + + if (!Value->GetIntValue()) + return FALSE; // No need to evaluate 2nd argument + + if (Arg(1)->Eval(g)) + return TRUE; + + Value->SetValue(Val(1)->GetIntValue()); + return FALSE; + } // end of Eval + +/***********************************************************************/ +/* Eval: Compute result value for OR filters. */ +/***********************************************************************/ +bool FILTEROR::Eval(PGLOBAL g) + { + if (Arg(0)->Eval(g)) + return TRUE; + + Value->SetValue(Val(0)->GetIntValue()); + + if (Value->GetIntValue()) + return FALSE; // No need to evaluate 2nd argument + + if (Arg(1)->Eval(g)) + return TRUE; + + Value->SetValue(Val(1)->GetIntValue()); + return FALSE; + } // end of Eval + +/***********************************************************************/ +/* Eval: Compute result value for NOT filters. */ +/***********************************************************************/ +bool FILTERNOT::Eval(PGLOBAL g) + { + if (Arg(0)->Eval(g)) + return TRUE; + + Value->SetValue_bool(!Val(0)->GetIntValue()); + return FALSE; + } // end of Eval + +/***********************************************************************/ +/* Eval: Compute result value for IN filters. */ +/***********************************************************************/ +bool FILTERIN::Eval(PGLOBAL g) + { + if (Arg(0)->Eval(g)) + return TRUE; + + Value->SetValue_bool(((PARRAY)Arg(1))->FilTest(g, Val(0), Opc, Opm)); + return FALSE; + } // end of Eval + +/***********************************************************************/ +/* FILTERTRUE does nothing and returns TRUE. */ +/***********************************************************************/ +void FILTERTRUE::Reset(void) + { + } // end of Reset + +bool FILTERTRUE::Eval(PGLOBAL) + { + return FALSE; + } // end of Eval + +/* ------------------------- Friend Functions ------------------------ */ + +#if 0 +/***********************************************************************/ +/* Prepare: prepare a filter for execution. This implies two things: */ +/* 1) de-linearize the filter to be able to evaluate it recursively. */ +/* This permit to conditionally evaluate only the first argument */ +/* of OP_OR and OP_AND filters without having to pass by an */ +/* intermediate Apply function (as this has a performance cost). */ +/* 2) do all the necessary conversion for all filter block arguments. */ +/***********************************************************************/ +PFIL PrepareFilter(PGLOBAL g, PFIL fp, bool having) + { + PFIL filp = NULL; + + if (trace) + htrc("PrepareFilter: fp=%p having=%d\n", fp, having); +//if (fp) +// fp->Print(g, debug, 0); + + while (fp) { + if (fp->Opc == OP_SEP) + // If separator is not last transform it into an AND filter + if (fp->Next) { + filp = PrepareFilter(g, fp->Next, having); + fp->Arg(1) = filp; + fp->Opc = OP_AND; + fp->Next = NULL; // This will end the loop + } else + break; // Remove eventual ending separator(s) + +// if (fp->Convert(g, having)) +// longjmp(g->jumper[g->jump_level], TYPE_FILTER); + + filp = fp; + fp = fp->Next; + filp->Next = NULL; + } // endwhile + + if (trace) + htrc(" returning filp=%p\n", filp); +//if (filp) +// filp->Print(g, debug, 0); + + return filp; + } // end of PrepareFilter +#endif // 0 + +/***********************************************************************/ +/* ApplyFilter: Apply filtering for a table (where or having clause). */ +/* New algorithm: evaluate from the root a de-linearized filter so */ +/* AND/OR clauses can be optimized throughout the whole tree. */ +/***********************************************************************/ +DllExport bool ApplyFilter(PGLOBAL g, PFIL filp) + { + if (!filp) + return TRUE; + + // Must be done for null tables + filp->Reset(); + +//if (tdbp && tdbp->IsNull()) +// return TRUE; + + if (filp->Eval(g)) + longjmp(g->jumper[g->jump_level], TYPE_FILTER); + + if (trace > 1) + htrc("PlugFilter filp=%p result=%d\n", + filp, filp->GetResult()); + + return filp->GetResult(); + } // end of ApplyFilter diff --git a/storage/connect/filter.h b/storage/connect/filter.h new file mode 100644 index 00000000000..78e066d9ab7 --- /dev/null +++ b/storage/connect/filter.h @@ -0,0 +1,178 @@ +/*************** Filter H Declares Source Code File (.H) ***************/ +/* Name: FILTER.H Version 1.2 */ +/* */ +/* (C) Copyright to the author Olivier BERTRAND 2010-2012 */ +/* */ +/* This file contains the FILTER and derived classes declares. */ +/***********************************************************************/ +#ifndef __FILTER__ +#define __FILTER__ + +/***********************************************************************/ +/* Include required application header files */ +/***********************************************************************/ +#include "xobject.h" + +/***********************************************************************/ +/* Utilities for WHERE condition building. */ +/***********************************************************************/ +PFIL MakeFilter(PGLOBAL g, PFIL filp, OPVAL vop, PFIL fp); +PFIL MakeFilter(PGLOBAL g, PCOL *colp, POPER pop, PPARM pfirst, bool neg); + +/***********************************************************************/ +/* Definition of class FILTER with all its method functions. */ +/* Note: Most virtual implementation functions are not in use yet */ +/* but could be in future system evolution. */ +/***********************************************************************/ +class DllExport FILTER : public XOBJECT { /* Filter description block */ +//friend PFIL PrepareFilter(PGLOBAL, PFIL, bool); + friend DllExport bool ApplyFilter(PGLOBAL, PFIL); + public: + // Constructors + FILTER(PGLOBAL g, POPER pop, PPARM *tp = NULL); + FILTER(PGLOBAL g, OPVAL opc, PPARM *tp = NULL); + FILTER(PFIL fil1); + + // Implementation + virtual int GetType(void) {return TYPE_FILTER;} + virtual int GetResultType(void) {return TYPE_INT;} + virtual int GetLength(void) {return 1;} + virtual int GetLengthEx(void) {assert(FALSE); return 0;} + virtual int GetScale() {return 0;}; + PFIL GetNext(void) {return Next;} + OPVAL GetOpc(void) {return Opc;} + int GetOpm(void) {return Opm;} + int GetArgType(int i) {return Arg(i)->GetType();} + bool GetResult(void) {return Value->GetIntValue() != 0;} + PXOB &Arg(int i) {return Test[i].Arg;} + PVAL &Val(int i) {return Test[i].Value;} + bool &Conv(int i) {return Test[i].Conv;} + void SetNext(PFIL filp) {Next = filp;} + + // Methods + virtual void Reset(void); + virtual bool Compare(PXOB) {return FALSE;} // Not used yet + virtual bool Init(PGLOBAL); + virtual bool Eval(PGLOBAL); + virtual bool SetFormat(PGLOBAL, FORMAT&) {return TRUE;} // NUY +//virtual int CheckColumn(PGLOBAL g, PSQL sqlp, PXOB &xp, int &ag); +//virtual int RefNum(PSQL); +//virtual PXOB SetSelect(PGLOBAL, PSQL, bool) {return NULL;} // NUY +//virtual PXOB CheckSubQuery(PGLOBAL, PSQL); +//virtual bool CheckLocal(PTDB); +//virtual int CheckSpcCol(PTDB tdbp, int n); + virtual void Print(PGLOBAL g, FILE *f, uint n); + virtual void Print(PGLOBAL g, char *ps, uint z); +// PFIL Linearize(bool nosep); +// PFIL Link(PGLOBAL g, PFIL fil2); +// PFIL RemoveLastSep(void); +// PFIL SortJoin(PGLOBAL g); +// bool FindJoinFilter(POPJOIN opj, PFIL fprec, bool teq, +// bool tek, bool tk2, bool tc2, bool tix, bool thx); +// bool CheckHaving(PGLOBAL g, PSQL sqlp); + bool Convert(PGLOBAL g, bool having); +// int SplitFilter(PFIL *fp); +// int SplitFilter(PFIL *fp, PTDB tp, int n); +// PFIL LinkFilter(PGLOBAL g, PFIL fp2); +// PFIL Copy(PTABS t); + + protected: + FILTER(void) {} // Standard constructor not to be used + void Constr(PGLOBAL g, OPVAL opc, int opm, PPARM *tp); + + // Members + PFIL Next; // Used for linearization + OPVAL Opc; // Comparison operator + int Opm; // Modificator + BYTE Bt; // Operator bitmap + struct { + int B_T; // Buffer type + PXOB Arg; // Points to argument + PVAL Value; // Points to argument value + bool Conv; // TRUE if argument must be converted + } Test[2]; + }; // end of class FILTER + +/***********************************************************************/ +/* Derived class FILTERX: used to replace a filter by a derived class */ +/* using an Eval method optimizing the filtering evaluation. */ +/* Note: this works only if the members of the derived class are the */ +/* same than the ones of the original class (NO added members). */ +/***********************************************************************/ +class FILTERX : public FILTER { + public: + // Methods + virtual bool Eval(PGLOBAL) = 0; // just to prevent direct FILTERX use + + // Fake operator new used to change a filter into a derived filter + void * operator new(size_t size, PFIL filp) {return filp;} +#if defined(WIN32) + // Avoid warning C4291 by defining a matching dummy delete operator + void operator delete(void *, PFIL) {} +#else + void operator delete(void *) {} +#endif + }; // end of class FILTERX + +/***********************************************************************/ +/* Derived class FILTEREQ: OP_EQ, no conversion and Xobject args. */ +/***********************************************************************/ +class FILTERCMP : public FILTERX { + public: + // Constructor + FILTERCMP(PGLOBAL g); + + // Methods + virtual bool Eval(PGLOBAL); + }; // end of class FILTEREQ + +/***********************************************************************/ +/* Derived class FILTERAND: OP_AND, no conversion and Xobject args. */ +/***********************************************************************/ +class FILTERAND : public FILTERX { + public: + // Methods + virtual bool Eval(PGLOBAL); + }; // end of class FILTERAND + +/***********************************************************************/ +/* Derived class FILTEROR: OP_OR, no conversion and Xobject args. */ +/***********************************************************************/ +class FILTEROR : public FILTERX { + public: + // Methods + virtual bool Eval(PGLOBAL); + }; // end of class FILTEROR + +/***********************************************************************/ +/* Derived class FILTERNOT: OP_NOT, no conversion and Xobject args. */ +/***********************************************************************/ +class FILTERNOT : public FILTERX { + public: + // Methods + virtual bool Eval(PGLOBAL); + }; // end of class FILTERNOT + +/***********************************************************************/ +/* Derived class FILTERIN: OP_IN, no conversion and Array 2nd arg. */ +/***********************************************************************/ +class FILTERIN : public FILTERX { + public: + // Methods + virtual bool Eval(PGLOBAL); + }; // end of class FILTERIN + +/***********************************************************************/ +/* Derived class FILTERTRUE: Always returns TRUE. */ +/***********************************************************************/ +class FILTERTRUE : public FILTERX { + public: + // Constructor + FILTERTRUE(PVAL valp) {Value = valp; Value->SetValue_bool(TRUE);} + + // Methods + virtual void Reset(void); + virtual bool Eval(PGLOBAL); + }; // end of class FILTERTRUE + +#endif // __FILTER__ diff --git a/storage/connect/global.h b/storage/connect/global.h index c0746e37db7..d35cef2de6f 100644 --- a/storage/connect/global.h +++ b/storage/connect/global.h @@ -1,6 +1,6 @@ /***********************************************************************/ /* GLOBAL.H: Declaration file used by all CONNECT implementations. */ -/* (C) Copyright Olivier Bertrand 1993-2012 */ +/* (C) Copyright Olivier Bertrand 1993-2014 */ /***********************************************************************/ /***********************************************************************/ @@ -23,12 +23,12 @@ #define XML_SUPPORT 1 #endif -#if defined(XMSG) +#if defined(XMSG) // Definition used to read messages from message file. #include "msgid.h" #define MSG(I) PlugReadMessage(NULL, MSG_##I, #I) #define STEP(I) PlugReadMessage(g, MSG_##I, #I) -#elif defined(NEWMSG) +#elif defined(NEWMSG) // Definition used to get messages from resource. #include "msgid.h" #define MSG(I) PlugGetMessage(NULL, MSG_##I) @@ -85,6 +85,7 @@ #define TYPE_INT 7 #define TYPE_DECIM 9 #define TYPE_BIN 10 +#define TYPE_PCHAR 11 #if defined(OS32) #define SYS_STAMP "OS32" @@ -223,6 +224,7 @@ typedef struct _global { /* Global structure */ int Createas; /* To pass info to created table */ void *Xchk; /* indexes in create/alter */ short Alchecked; /* Checked for ALTER */ + short Mrr; /* True when doing mrr */ short Trace; int jump_level; jmp_buf jumper[MAX_JUMP + 2]; diff --git a/storage/connect/ha_connect.cc b/storage/connect/ha_connect.cc index d29e9cd4422..93327fa4530 100644 --- a/storage/connect/ha_connect.cc +++ b/storage/connect/ha_connect.cc @@ -28,7 +28,7 @@ ha_connect will let you create/open/delete tables, the created table can be done specifying an already existing file, the drop table command will just suppress the table definition but not the eventual data file. - Indexes are not supported for all table types but data can be inserted, + Indexes are not supported for all table types but data can be inserted, updated or deleted. You can enable the CONNECT storage engine in your build by doing the @@ -108,7 +108,6 @@ #define MYSQL_SERVER 1 #define DONT_DEFINE_VOID -//#include "sql_partition.h" #include "sql_class.h" #include "create_options.h" #include "mysql_com.h" @@ -116,6 +115,10 @@ #include "sql_parse.h" #include "sql_base.h" #include <sys/stat.h> +#if defined(NEW_WAY) +#include "sql_table.h" +#endif // NEW_WAY +#include "sql_partition.h" #undef OFFSET #define NOPARSE @@ -167,55 +170,72 @@ #define SZWMIN 4194304 // Minimum work area size 4M extern "C" { - char version[]= "Version 1.02.0002 March 16, 2014"; + char version[]= "Version 1.03.0003 August 22, 2014"; + char compver[]= "Version 1.03.0003 " __DATE__ " " __TIME__; + +#if defined(WIN32) + char slash= '\\'; +#else // !WIN32 + char slash= '/'; +#endif // !WIN32 #if defined(XMSG) - char msglang[]; // Default message language + char msglang[]; // Default message language #endif int trace= 0; // The general trace value int xconv= 0; // The type conversion option int zconv= SZCONV; // The text conversion size + USETEMP Use_Temp= TMP_AUTO; // The temporary file use } // extern "C" #if defined(XMAP) bool xmap= false; #endif // XMAP + bool xinfo= false; uint worksize= SZWORK; ulong ha_connect::num= 0; //int DTVAL::Shift= 0; -/* CONNECT system variables */ +/* CONNECT system variables */ static int xtrace= 0; static int conv_size= SZCONV; static uint work_size= SZWORK; static ulong type_conv= 0; +static ulong use_tempfile= 1; #if defined(XMAP) static my_bool indx_map= 0; #endif // XMAP +static my_bool exact_info= 0; /***********************************************************************/ /* Utility functions. */ /***********************************************************************/ PQRYRES OEMColumns(PGLOBAL g, PTOS topt, char *tab, char *db, bool info); void PushWarning(PGLOBAL g, THD *thd, int level); +bool CheckSelf(PGLOBAL g, TABLE_SHARE *s, const char *host, + const char *db, char *tab, const char *src, int port); + static PCONNECT GetUser(THD *thd, PCONNECT xp); static PGLOBAL GetPlug(THD *thd, PCONNECT& lxp); static handler *connect_create_handler(handlerton *hton, - TABLE_SHARE *table, - MEM_ROOT *mem_root); + TABLE_SHARE *table, + MEM_ROOT *mem_root); static int connect_assisted_discovery(handlerton *hton, THD* thd, TABLE_SHARE *table_s, HA_CREATE_INFO *info); +/***********************************************************************/ +/* Global variables update functions. */ +/***********************************************************************/ static void update_connect_xtrace(MYSQL_THD thd, struct st_mysql_sys_var *var, void *var_ptr, const void *save) { - xtrace= *(int *)var_ptr= *(int *)save; + trace= *(int *)var_ptr= *(int *)save; } // end of update_connect_xtrace static void update_connect_zconv(MYSQL_THD thd, @@ -239,6 +259,13 @@ static void update_connect_worksize(MYSQL_THD thd, worksize= (uint)(*(ulong *)var_ptr= *(ulong *)save); } // end of update_connect_worksize +static void update_connect_usetemp(MYSQL_THD thd, + struct st_mysql_sys_var *var, + void *var_ptr, const void *save) +{ + Use_Temp= (USETEMP)(*(ulong *)var_ptr= *(ulong *)save); +} // end of update_connect_usetemp + #if defined(XMAP) static void update_connect_xmap(MYSQL_THD thd, struct st_mysql_sys_var *var, @@ -248,6 +275,13 @@ static void update_connect_xmap(MYSQL_THD thd, } // end of update_connect_xmap #endif // XMAP +static void update_connect_xinfo(MYSQL_THD thd, + struct st_mysql_sys_var *var, + void *var_ptr, const void *save) +{ + xinfo= (bool)(*(my_bool *)var_ptr= *(my_bool *)save); +} // end of update_connect_xinfo + /***********************************************************************/ /* The CONNECT handlerton object. */ /***********************************************************************/ @@ -304,13 +338,29 @@ ha_create_table_option connect_table_option_list[]= ha_create_table_option connect_field_option_list[]= { HA_FOPTION_NUMBER("FLAG", offset, (ulonglong) -1, 0, INT_MAX32, 1), + HA_FOPTION_NUMBER("MAX_DIST", freq, 0, 0, INT_MAX32, 1), // BLK_INDX +//HA_FOPTION_NUMBER("DISTRIB", opt, 0, 0, 2, 1), // used for BLK_INDX HA_FOPTION_NUMBER("FIELD_LENGTH", fldlen, 0, 0, INT_MAX32, 1), HA_FOPTION_STRING("DATE_FORMAT", dateformat), HA_FOPTION_STRING("FIELD_FORMAT", fieldformat), HA_FOPTION_STRING("SPECIAL", special), + HA_FOPTION_ENUM("DISTRIB", opt, "scattered,clustered,sorted", 0), HA_FOPTION_END }; +/* + CREATE TABLE option list (index options) + + These can be specified in the CREATE TABLE per index: + CREATE TABLE ( field ..., .., INDEX .... *here*, ... ) +*/ +ha_create_table_option connect_index_option_list[]= +{ + HA_IOPTION_BOOL("DYNAM", dynamic, 0), + HA_IOPTION_BOOL("MAPPED", mapped, 0), + HA_IOPTION_END +}; + /***********************************************************************/ /* Push G->Message as a MySQL warning. */ /***********************************************************************/ @@ -390,7 +440,7 @@ DllExport LPCSTR PlugSetPath(LPSTR to, LPCSTR name, LPCSTR dir) */ static const char *ha_connect_exts[]= { ".dos", ".fix", ".csv", ".bin", ".fmt", ".dbf", ".xml", ".ini", ".vec", - ".dnx", ".fnx", ".bnx", ".vnx", ".dbx", + ".dnx", ".fnx", ".bnx", ".vnx", ".dbx", ".dop", ".fop", ".bop", ".vop", NULL}; /** @@ -401,7 +451,7 @@ static int connect_init_func(void *p) { DBUG_ENTER("connect_init_func"); - sql_print_information("CONNECT: %s", version); + sql_print_information("CONNECT: %s", compver); // xtrace is now a system variable trace= xtrace; @@ -415,9 +465,11 @@ static int connect_init_func(void *p) connect_hton= (handlerton *)p; connect_hton->state= SHOW_OPTION_YES; connect_hton->create= connect_create_handler; - connect_hton->flags= HTON_TEMPORARY_NOT_SUPPORTED | HTON_NO_PARTITION; +//connect_hton->flags= HTON_TEMPORARY_NOT_SUPPORTED | HTON_NO_PARTITION; + connect_hton->flags= HTON_TEMPORARY_NOT_SUPPORTED; connect_hton->table_options= connect_table_option_list; connect_hton->field_options= connect_field_option_list; + connect_hton->index_options= connect_index_option_list; connect_hton->tablefile_extensions= ha_connect_exts; connect_hton->discover_table_structure= connect_assisted_discovery; @@ -426,7 +478,7 @@ static int connect_init_func(void *p) DTVAL::SetTimeShift(); // Initialize time zone shift once for all DBUG_RETURN(0); -} +} // end of connect_init_func /** @@ -456,13 +508,13 @@ static int connect_done_func(void *p) } // endfor pc DBUG_RETURN(error); -} +} // end of connect_done_func /** @brief Example of simple lock controls. The "share" it creates is a - structure we will pass to each example handler. Do you have to have + structure we will pass to each CONNECT handler. Do you have to have one of these? Well, you have pieces that are used for locking, and they are needed to function. */ @@ -470,20 +522,22 @@ static int connect_done_func(void *p) CONNECT_SHARE *ha_connect::get_share() { CONNECT_SHARE *tmp_share; + lock_shared_ha_data(); - if (!(tmp_share= static_cast<CONNECT_SHARE*>(get_ha_share_ptr()))) - { + + if (!(tmp_share= static_cast<CONNECT_SHARE*>(get_ha_share_ptr()))) { tmp_share= new CONNECT_SHARE; if (!tmp_share) goto err; mysql_mutex_init(con_key_mutex_CONNECT_SHARE_mutex, &tmp_share->mutex, MY_MUTEX_INIT_FAST); set_ha_share_ptr(static_cast<Handler_share*>(tmp_share)); - } -err: + } // endif tmp_share + + err: unlock_shared_ha_data(); return tmp_share; -} +} // end of get_share static handler* connect_create_handler(handlerton *hton, @@ -509,20 +563,29 @@ ha_connect::ha_connect(handlerton *hton, TABLE_SHARE *table_arg) xp= (table) ? GetUser(ha_thd(), NULL) : NULL; if (xp) xp->SetHandler(this); +#if defined(WIN32) + datapath= ".\\"; +#else // !WIN32 + datapath= "./"; +#endif // !WIN32 tdbp= NULL; sdvalin= NULL; sdvalout= NULL; xmod= MODE_ANY; istable= false; -//*tname= '\0'; + *partname= 0; bzero((char*) &xinfo, sizeof(XINFO)); valid_info= false; valid_query_id= 0; creat_query_id= (table && table->in_use) ? table->in_use->query_id : 0; stop= false; alter= false; + mrr= false; + nox= true; + abort= false; indexing= -1; locked= 0; + part_id= NULL; data_file_name= NULL; index_file_name= NULL; enable_activate_all_index= 0; @@ -640,7 +703,13 @@ TABTYPE ha_connect::GetRealType(PTOS pos) const char *ha_connect::index_type(uint inx) { switch (GetIndexType(GetRealType())) { - case 1: return "XPLUG"; + case 1: + if (table_share) + return (GetIndexOption(&table_share->key_info[inx], "Dynamic")) + ? "KINDEX" : "XINDEX"; + else + return "XINDEX"; + case 2: return "REMOTE"; } // endswitch @@ -696,12 +765,15 @@ ulonglong ha_connect::table_flags() const if (pos) { TABTYPE type= hp->GetRealType(pos); - + + if (IsFileType(type)) + flags|= HA_FILE_BASED; + if (IsExactType(type)) flags|= (HA_HAS_RECORDS | HA_STATS_RECORDS_IS_EXACT); // No data change on ALTER for outward tables - if (!IsFileType(type) || hp->FileExists(pos->filename)) + if (!IsFileType(type) || hp->FileExists(pos->filename, true)) flags|= HA_NO_COPY_ON_ALTER; } // endif pos @@ -710,7 +782,7 @@ ulonglong ha_connect::table_flags() const } // end of table_flags /****************************************************************************/ -/* Return the value of an option specified in the option list. */ +/* Return the value of an option specified in an option list. */ /****************************************************************************/ char *GetListOption(PGLOBAL g, const char *opname, const char *oplist, const char *def) @@ -771,6 +843,22 @@ PTOS ha_connect::GetTableOptionStruct(TABLE_SHARE *s) } // end of GetTableOptionStruct /****************************************************************************/ +/* Return the string eventually formatted with partition name. */ +/****************************************************************************/ +char *ha_connect::GetRealString(const char *s) +{ + char *sv; + + if (IsPartitioned() && s) { + sv= (char*)PlugSubAlloc(xp->g, NULL, strlen(s) + strlen(partname)); + sprintf(sv, s, partname); + } else + sv= (char*)s; + + return sv; +} // end of GetRealString + +/****************************************************************************/ /* Return the value of a string option or NULL if not specified. */ /****************************************************************************/ char *ha_connect::GetStringOption(char *opname, char *sdef) @@ -778,16 +866,26 @@ char *ha_connect::GetStringOption(char *opname, char *sdef) char *opval= NULL; PTOS options= GetTableOptionStruct(); - if (!options) + if (!stricmp(opname, "Connect")) { + LEX_STRING cnc= (tshp) ? tshp->connect_string : table->s->connect_string; + + if (cnc.length) + opval= GetRealString(cnc.str); + + } else if (!stricmp(opname, "Query_String")) + opval= thd_query_string(table->in_use)->str; + else if (!stricmp(opname, "Partname")) + opval= partname; + else if (!options) ; else if (!stricmp(opname, "Type")) opval= (char*)options->type; else if (!stricmp(opname, "Filename")) - opval= (char*)options->filename; + opval= GetRealString(options->filename); else if (!stricmp(opname, "Optname")) opval= (char*)options->optname; else if (!stricmp(opname, "Tabname")) - opval= (char*)options->tabname; + opval= GetRealString(options->tabname); else if (!stricmp(opname, "Tablist")) opval= (char*)options->tablist; else if (!stricmp(opname, "Database") || @@ -795,8 +893,6 @@ char *ha_connect::GetStringOption(char *opname, char *sdef) opval= (char*)options->dbname; else if (!stricmp(opname, "Separator")) opval= (char*)options->separator; - else if (!stricmp(opname, "Connect")) - opval= (tshp) ? tshp->connect_string.str : table->s->connect_string.str; else if (!stricmp(opname, "Qchar")) opval= (char*)options->qchar; else if (!stricmp(opname, "Module")) @@ -811,8 +907,6 @@ char *ha_connect::GetStringOption(char *opname, char *sdef) opval= (char*)options->colist; else if (!stricmp(opname, "Data_charset")) opval= (char*)options->data_charset; - else if (!stricmp(opname, "Query_String")) - opval= thd_query_string(table->in_use)->str; if (!opval && options && options->oplist) opval= GetListOption(xp->g, opname, options->oplist); @@ -823,7 +917,7 @@ char *ha_connect::GetStringOption(char *opname, char *sdef) if (!stricmp(opname, "Dbname") || !stricmp(opname, "Database")) opval= (char*)GetDBName(NULL); // Current database else if (!stricmp(opname, "Type")) // Default type - opval= (!options) ? NULL : + opval= (!options) ? NULL : (options->srcdef) ? (char*)"MYSQL" : (options->tabname) ? (char*)"PROXY" : (char*)"DOS"; else if (!stricmp(opname, "User")) // Connected user @@ -897,21 +991,21 @@ bool ha_connect::SetBooleanOption(char *opname, bool b) /****************************************************************************/ int ha_connect::GetIntegerOption(char *opname) { - ulonglong opval= NO_IVAL; - char *pv; - PTOS options= GetTableOptionStruct(); + ulonglong opval= NO_IVAL; + char *pv; + PTOS options= GetTableOptionStruct(); + TABLE_SHARE *tsp= (tshp) ? tshp : table_share; - if (!options) + if (!stricmp(opname, "Avglen")) + opval= (ulonglong)tsp->avg_row_length; + else if (!stricmp(opname, "Estimate")) + opval= (ulonglong)tsp->max_rows; + else if (!options) ; else if (!stricmp(opname, "Lrecl")) opval= options->lrecl; else if (!stricmp(opname, "Elements")) opval= options->elements; - else if (!stricmp(opname, "Estimate")) -// opval= options->estimate; - opval= (int)table->s->max_rows; - else if (!stricmp(opname, "Avglen")) - opval= (int)table->s->avg_row_length; else if (!stricmp(opname, "Multiple")) opval= options->multiple; else if (!stricmp(opname, "Header")) @@ -925,7 +1019,7 @@ int ha_connect::GetIntegerOption(char *opname) if (opval == (ulonglong)NO_IVAL && options && options->oplist) if ((pv= GetListOption(xp->g, opname, options->oplist))) - opval= CharToNumber(pv, strlen(pv), ULONGLONG_MAX, true); + opval= CharToNumber(pv, strlen(pv), ULONGLONG_MAX, true); return (int)opval; } // end of GetIntegerOption @@ -1012,6 +1106,7 @@ void *ha_connect::GetColumnOption(PGLOBAL g, void *field, PCOLINFO pcf) } // endif special pcf->Scale= 0; + pcf->Opt= (fop) ? (int)fop->opt : 0; if ((pcf->Length= fp->field_length) < 0) pcf->Length= 256; // BLOB? @@ -1020,10 +1115,12 @@ void *ha_connect::GetColumnOption(PGLOBAL g, void *field, PCOLINFO pcf) if (fop) { pcf->Offset= (int)fop->offset; + pcf->Freq= (int)fop->freq; pcf->Datefmt= (char*)fop->dateformat; pcf->Fieldfmt= (char*)fop->fieldformat; } else { pcf->Offset= -1; + pcf->Freq= 0; pcf->Datefmt= NULL; pcf->Fieldfmt= NULL; } // endif fop @@ -1050,6 +1147,7 @@ void *ha_connect::GetColumnOption(PGLOBAL g, void *field, PCOLINFO pcf) // Find if collation name ends by _ci if (!strcmp(cp + strlen(cp) - 3, "_ci")) { pcf->Scale= 1; // Case insensitive + pcf->Opt= 0; // Prevent index opt until it is safe } // endif ci break; @@ -1065,7 +1163,7 @@ void *ha_connect::GetColumnOption(PGLOBAL g, void *field, PCOLINFO pcf) // Field_length is only used for DATE columns if (fop && fop->fldlen) pcf->Length= (int)fop->fldlen; - else { + else { int len; if (pcf->Datefmt) { @@ -1119,6 +1217,50 @@ void *ha_connect::GetColumnOption(PGLOBAL g, void *field, PCOLINFO pcf) } // end of GetColumnOption /****************************************************************************/ +/* Return an index option structure. */ +/****************************************************************************/ +PXOS ha_connect::GetIndexOptionStruct(KEY *kp) +{ + return kp->option_struct; +} // end of GetIndexOptionStruct + +/****************************************************************************/ +/* Return a Boolean index option or false if not specified. */ +/****************************************************************************/ +bool ha_connect::GetIndexOption(KEY *kp, char *opname) +{ + bool opval= false; + PXOS options= GetIndexOptionStruct(kp); + + if (options) { + if (!stricmp(opname, "Dynamic")) + opval= options->dynamic; + else if (!stricmp(opname, "Mapped")) + opval= options->mapped; + + } else if (kp->comment.str != NULL) { + char *pv, *oplist= kp->comment.str; + + if ((pv= GetListOption(xp->g, opname, oplist))) + opval= (!*pv || *pv == 'y' || *pv == 'Y' || atoi(pv) != 0); + + } // endif comment + + return opval; +} // end of GetIndexOption + +/****************************************************************************/ +/* Returns the index description structure used to make the index. */ +/****************************************************************************/ +bool ha_connect::IsUnique(uint n) +{ + TABLE_SHARE *s= (table) ? table->s : NULL; + KEY kp= s->key_info[n]; + + return (kp.flags & 1) != 0; +} // end of IsUnique + +/****************************************************************************/ /* Returns the index description structure used to make the index. */ /****************************************************************************/ PIXDEF ha_connect::GetIndexInfo(TABLE_SHARE *s) @@ -1162,7 +1304,7 @@ PIXDEF ha_connect::GetIndexInfo(TABLE_SHARE *s) #if 0 // NIY // Index on auto increment column can be an XXROW index - if (kp.key_part[k].field->flags & AUTO_INCREMENT_FLAG && + if (kp.key_part[k].field->flags & AUTO_INCREMENT_FLAG && kp.uder_defined_key_parts == 1) { char *type= GetStringOption("Type", "DOS"); TABTYPE typ= GetTypeID(type); @@ -1180,6 +1322,8 @@ PIXDEF ha_connect::GetIndexInfo(TABLE_SHARE *s) } // endfor k xdp->SetNParts(kp.user_defined_key_parts); + xdp->Dynamic= GetIndexOption(&kp, "Dynamic"); + xdp->Mapped= GetIndexOption(&kp, "Mapped"); if (pxd) pxd->SetNext(xdp); @@ -1192,6 +1336,17 @@ PIXDEF ha_connect::GetIndexInfo(TABLE_SHARE *s) return toidx; } // end of GetIndexInfo +bool ha_connect::IsPartitioned(void) +{ + if (tshp) + return tshp->partition_info_str_len > 0; + else if (table && table->part_info) + return true; + else + return false; + +} // end of IsPartitioned + const char *ha_connect::GetDBName(const char* name) { return (name) ? name : table->s->db.str; @@ -1202,6 +1357,11 @@ const char *ha_connect::GetTableName(void) return (tshp) ? tshp->table_name.str : table_share->table_name.str; } // end of GetTableName +char *ha_connect::GetPartName(void) +{ + return (IsPartitioned()) ? partname : (char*)GetTableName(); +} // end of GetTableName + #if 0 /****************************************************************************/ /* Returns the column real or special name length of a field. */ @@ -1247,6 +1407,14 @@ void ha_connect::AddColName(char *cp, Field *fp) } // end of AddColName #endif // 0 +/***********************************************************************/ +/* This function sets the current database path. */ +/***********************************************************************/ +void ha_connect::SetDataPath(PGLOBAL g, const char *path) +{ + datapath= SetPath(g, path); +} // end of SetDataPath + /****************************************************************************/ /* Get the table description block of a CONNECT table. */ /****************************************************************************/ @@ -1270,7 +1438,7 @@ PTDB ha_connect::GetTDB(PGLOBAL g) tp->SetMode(xmod); } else if ((tp= CntGetTDB(g, table_name, xmod, this))) { valid_query_id= xp->last_query_id; - tp->SetMode(xmod); +// tp->SetMode(xmod); } else htrc("GetTDB: %s\n", g->Message); @@ -1349,6 +1517,17 @@ int ha_connect::OpenTable(PGLOBAL g, bool del) for (field= table->field; fp= *field; field++) if (bitmap_is_set(ump, fp->field_index)) { strcpy(p, (char*)fp->field_name); + + if (part_id && bitmap_is_set(part_id, fp->field_index)) { + // Trying to update a column used for partitioning + // This cannot be currently done because it may require + // a row to be moved in another partition. + sprintf(g->Message, + "Cannot update column %s because it is used for partitioning", + p); + return HA_ERR_INTERNAL_ERROR; + } // endif part_id + p+= (strlen(p) + 1); } // endif used field @@ -1381,6 +1560,50 @@ int ha_connect::OpenTable(PGLOBAL g, bool del) /****************************************************************************/ +/* CheckColumnList: check that all bitmap columns do exist. */ +/****************************************************************************/ +bool ha_connect::CheckColumnList(PGLOBAL g) +{ + // Check the list of used fields (columns) + int rc; + bool brc= false; + PCOL colp; + Field* *field; + Field* fp; + MY_BITMAP *map= table->read_set; + + // Save stack and allocation environment and prepare error return + if (g->jump_level == MAX_JUMP) { + strcpy(g->Message, MSG(TOO_MANY_JUMPS)); + return true; + } // endif jump_level + + if ((rc= setjmp(g->jumper[++g->jump_level])) == 0) { + for (field= table->field; fp= *field; field++) + if (bitmap_is_set(map, fp->field_index)) { + if (!(colp= tdbp->ColDB(g, (PSZ)fp->field_name, 0))) { + sprintf(g->Message, "Column %s not found in %s", + fp->field_name, tdbp->GetName()); + brc= true; + goto fin; + } // endif colp + + if ((brc= colp->InitValue(g))) + goto fin; + + colp->AddColUse(U_P); // For PLG tables + } // endif + + } else + brc= true; + + fin: + g->jump_level--; + return brc; +} // end of CheckColumnList + + +/****************************************************************************/ /* IsOpened: returns true if the table is already opened. */ /****************************************************************************/ bool ha_connect::IsOpened(void) @@ -1395,12 +1618,14 @@ bool ha_connect::IsOpened(void) /****************************************************************************/ int ha_connect::CloseTable(PGLOBAL g) { - int rc= CntCloseTable(g, tdbp); + int rc= CntCloseTable(g, tdbp, nox, abort); tdbp= NULL; sdvalin=NULL; sdvalout=NULL; valid_info= false; indexing= -1; + nox= true; + abort= false; return rc; } // end of CloseTable @@ -1447,10 +1672,14 @@ int ha_connect::MakeRecord(char *buf) if (bitmap_is_set(map, fp->field_index) || alter) { // This is a used field, fill the buffer with value for (colp= tdbp->GetColumns(); colp; colp= colp->GetNext()) - if (!stricmp(colp->GetName(), (char*)fp->field_name)) + if ((!mrr || colp->GetKcol()) && + !stricmp(colp->GetName(), (char*)fp->field_name)) break; if (!colp) { + if (mrr) + continue; + htrc("Column %s not found\n", fp->field_name); dbug_tmp_restore_column_map(table->write_set, org_bitmap); DBUG_RETURN(HA_ERR_WRONG_IN_RECORD); @@ -1465,7 +1694,7 @@ int ha_connect::MakeRecord(char *buf) case TYPE_DATE: if (!sdvalout) sdvalout= AllocateValue(xp->g, TYPE_STRING, 20); - + switch (fp->type()) { case MYSQL_TYPE_DATE: fmt= "%Y-%m-%d"; @@ -1480,7 +1709,7 @@ int ha_connect::MakeRecord(char *buf) fmt= "%Y-%m-%d %H:%M:%S"; break; } // endswitch type - + // Get date in the format required by MySQL fields value->FormatValue(sdvalout, fmt); p= sdvalout->GetCharValue(); @@ -1502,10 +1731,10 @@ int ha_connect::MakeRecord(char *buf) // Store functions returns 1 on overflow and -1 on fatal error if (rc > 0) { - char buf[128]; + char buf[256]; THD *thd= ha_thd(); - sprintf(buf, "Out of range value %s for column '%s' at row %ld", + sprintf(buf, "Out of range value %.140s for column '%s' at row %ld", value->GetCharString(val), fp->field_name, thd->get_stmt_da()->current_row_for_warning()); @@ -1524,6 +1753,11 @@ int ha_connect::MakeRecord(char *buf) } // endfor field + // This is sometimes required for partition tables because the buf + // can be different from the table->record[0] buffer + if (buf != (char*)table->record[0]) + memcpy(buf, table->record[0], table->s->stored_rec_length); + // This is copied from ha_tina and is necessary to avoid asserts dbug_tmp_restore_column_map(table->write_set, org_bitmap); DBUG_RETURN(rc); @@ -1674,7 +1908,11 @@ bool ha_connect::MakeKeyWhere(PGLOBAL g, char *qry, OPVAL op, char *q, KEY_PART_INFO *kpart; if (active_index == MAX_KEY) - return 0; + return false; + else if (!key) { + strcpy(g->Message, "MakeKeyWhere: No key"); + return true; + } // endif key strcat(qry, " WHERE ("); kfp= &table->key_info[active_index]; @@ -1815,6 +2053,196 @@ const char *ha_connect::GetValStr(OPVAL vop, bool neg) return val; } // end of GetValStr +#if 0 +/***********************************************************************/ +/* Check the WHERE condition and return a CONNECT filter. */ +/***********************************************************************/ +PFIL ha_connect::CheckFilter(PGLOBAL g) +{ + return CondFilter(g, (Item *)pushed_cond); +} // end of CheckFilter +#endif // 0 + +/***********************************************************************/ +/* Check the WHERE condition and return a CONNECT filter. */ +/***********************************************************************/ +PFIL ha_connect::CondFilter(PGLOBAL g, Item *cond) +{ + unsigned int i; + bool ismul= false; + OPVAL vop= OP_XX; + PFIL filp= NULL; + + if (!cond) + return NULL; + + if (xtrace) + htrc("Cond type=%d\n", cond->type()); + + if (cond->type() == COND::COND_ITEM) { + PFIL fp; + Item_cond *cond_item= (Item_cond *)cond; + + if (xtrace) + htrc("Cond: Ftype=%d name=%s\n", cond_item->functype(), + cond_item->func_name()); + + switch (cond_item->functype()) { + case Item_func::COND_AND_FUNC: vop= OP_AND; break; + case Item_func::COND_OR_FUNC: vop= OP_OR; break; + default: return NULL; + } // endswitch functype + + List<Item>* arglist= cond_item->argument_list(); + List_iterator<Item> li(*arglist); + Item *subitem; + + for (i= 0; i < arglist->elements; i++) + if ((subitem= li++)) { + if (!(fp= CondFilter(g, subitem))) { + if (vop == OP_OR) + return NULL; + } else + filp= (filp) ? MakeFilter(g, filp, vop, fp) : fp; + + } else + return NULL; + + } else if (cond->type() == COND::FUNC_ITEM) { + unsigned int i; + bool iscol, neg= FALSE; + PCOL colp[2]= {NULL,NULL}; + PPARM pfirst= NULL, pprec= NULL; + POPER pop; + Item_func *condf= (Item_func *)cond; + Item* *args= condf->arguments(); + + if (xtrace) + htrc("Func type=%d argnum=%d\n", condf->functype(), + condf->argument_count()); + + switch (condf->functype()) { + case Item_func::EQUAL_FUNC: + case Item_func::EQ_FUNC: vop= OP_EQ; break; + case Item_func::NE_FUNC: vop= OP_NE; break; + case Item_func::LT_FUNC: vop= OP_LT; break; + case Item_func::LE_FUNC: vop= OP_LE; break; + case Item_func::GE_FUNC: vop= OP_GE; break; + case Item_func::GT_FUNC: vop= OP_GT; break; + case Item_func::IN_FUNC: vop= OP_IN; + case Item_func::BETWEEN: + ismul= true; + neg= ((Item_func_opt_neg *)condf)->negated; + break; + default: return NULL; + } // endswitch functype + + pop= (POPER)PlugSubAlloc(g, NULL, sizeof(OPER)); + pop->Name= NULL; + pop->Val=vop; + pop->Mod= 0; + + if (condf->argument_count() < 2) + return NULL; + + for (i= 0; i < condf->argument_count(); i++) { + if (xtrace) + htrc("Argtype(%d)=%d\n", i, args[i]->type()); + + if (i >= 2 && !ismul) { + if (xtrace) + htrc("Unexpected arg for vop=%d\n", vop); + + continue; + } // endif i + + if ((iscol= args[i]->type() == COND::FIELD_ITEM)) { + Item_field *pField= (Item_field *)args[i]; + + // IN and BETWEEN clauses should be col VOP list + if (i && ismul) + return NULL; + + if (pField->field->table != table || + !(colp[i]= tdbp->ColDB(g, (PSZ)pField->field->field_name, 0))) + return NULL; // Column does not belong to this table + + if (xtrace) { + htrc("Field index=%d\n", pField->field->field_index); + htrc("Field name=%s\n", pField->field->field_name); + } // endif xtrace + + } else { + char buff[256]; + String *res, tmp(buff, sizeof(buff), &my_charset_bin); + Item_basic_constant *pval= (Item_basic_constant *)args[i]; + PPARM pp= (PPARM)PlugSubAlloc(g, NULL, sizeof(PARM)); + + // IN and BETWEEN clauses should be col VOP list + if (!i && (ismul)) + return NULL; + + if ((res= pval->val_str(&tmp)) == NULL) + return NULL; // To be clarified + + switch (args[i]->real_type()) { + case COND::STRING_ITEM: + pp->Type= TYPE_STRING; + pp->Value= PlugSubAlloc(g, NULL, res->length() + 1); + strncpy((char*)pp->Value, res->ptr(), res->length() + 1); + break; + case COND::INT_ITEM: + pp->Type= TYPE_INT; + pp->Value= PlugSubAlloc(g, NULL, sizeof(int)); + *((int*)pp->Value)= (int)pval->val_int(); + break; + case COND::DATE_ITEM: + pp->Type= TYPE_DATE; + pp->Value= PlugSubAlloc(g, NULL, sizeof(int)); + *((int*)pp->Value)= (int)pval->val_int_from_date(); + break; + case COND::REAL_ITEM: + pp->Type= TYPE_DOUBLE; + pp->Value= PlugSubAlloc(g, NULL, sizeof(double)); + *((double*)pp->Value)= pval->val_real(); + break; + case COND::DECIMAL_ITEM: + pp->Type= TYPE_DOUBLE; + pp->Value= PlugSubAlloc(g, NULL, sizeof(double)); + *((double*)pp->Value)= pval->val_real_from_decimal(); + break; + case COND::CACHE_ITEM: // Possible ??? + case COND::NULL_ITEM: // TODO: handle this + default: + return NULL; + } // endswitch type + + if (xtrace) + htrc("Value=%.*s\n", res->length(), res->ptr()); + + // Append the value to the argument list + if (pprec) + pprec->Next= pp; + else + pfirst= pp; + + pp->Domain= i; + pp->Next= NULL; + pprec= pp; + } // endif type + + } // endfor i + + filp= MakeFilter(g, colp, pop, pfirst, neg); + } else { + if (xtrace) + htrc("Unsupported condition\n"); + + return NULL; + } // endif's type + + return filp; +} // end of CondFilter /***********************************************************************/ /* Check the WHERE condition and return a MYSQL/ODBC/WQL filter. */ @@ -1901,7 +2329,7 @@ PCFIL ha_connect::CheckCond(PGLOBAL g, PCFIL filp, AMT tty, Item *cond) case Item_func::GE_FUNC: vop= OP_GE; break; case Item_func::GT_FUNC: vop= OP_GT; break; case Item_func::IN_FUNC: vop= OP_IN; - case Item_func::BETWEEN: + case Item_func::BETWEEN: ismul= true; neg= ((Item_func_opt_neg *)condf)->negated; break; @@ -2071,35 +2499,36 @@ const COND *ha_connect::cond_push(const COND *cond) DBUG_ENTER("ha_connect::cond_push"); if (tdbp) { - AMT tty= tdbp->GetAmType(); - bool x= (tty == TYPE_AM_MYX || tty == TYPE_AM_XDBC); - bool b= (tty == TYPE_AM_WMI || tty == TYPE_AM_ODBC || - tty == TYPE_AM_TBL || tty == TYPE_AM_MYSQL || - tty == TYPE_AM_PLG || x); + PGLOBAL& g= xp->g; + AMT tty= tdbp->GetAmType(); + bool x= (tty == TYPE_AM_MYX || tty == TYPE_AM_XDBC); + bool b= (tty == TYPE_AM_WMI || tty == TYPE_AM_ODBC || + tty == TYPE_AM_TBL || tty == TYPE_AM_MYSQL || + tty == TYPE_AM_PLG || x); if (b) { - PGLOBAL& g= xp->g; PCFIL filp= (PCFIL)PlugSubAlloc(g, NULL, sizeof(CONDFIL)); filp->Body= (char*)PlugSubAlloc(g, NULL, (x) ? 128 : 0); *filp->Body= 0; filp->Op= OP_XX; filp->Cmds= NULL; - + if (CheckCond(g, filp, tty, (Item *)cond)) { if (xtrace) htrc("cond_push: %s\n", filp->Body); - + if (!x) PlugSubAlloc(g, NULL, strlen(filp->Body) + 1); else cond= NULL; // Does this work? - + tdbp->SetCondFil(filp); } else if (x && cond) tdbp->SetCondFil(filp); // Wrong filter - } // endif b + } else + tdbp->SetFilter(CondFilter(g, (Item *)cond)); } // endif tdbp @@ -2116,7 +2545,7 @@ ha_rows ha_connect::records() if (!valid_info) info(HA_STATUS_VARIABLE); - if (tdbp && tdbp->Cardinality(NULL)) + if (tdbp) return stats.records; else return HA_POS_ERROR; @@ -2158,6 +2587,21 @@ bool ha_connect::get_error_message(int error, String* buf) DBUG_RETURN(false); } // end of get_error_message +/** + Convert a filename partition name to system +*/ +static char *decode(PGLOBAL g, const char *pn) + { + char *buf= (char*)PlugSubAlloc(g, NULL, strlen(pn) + 1); + uint dummy_errors; + uint32 len= copy_and_convert(buf, strlen(pn) + 1, + system_charset_info, + pn, strlen(pn), + &my_charset_filename, + &dummy_errors); + buf[len]= '\0'; + return buf; + } // end of decode /** @brief @@ -2197,9 +2641,29 @@ int ha_connect::open(const char *name, int mode, uint test_if_locked) PGLOBAL g= (xp) ? xp->g : NULL; // Try to set the database environment - if (g) + if (g) { rc= (CntCheckDB(g, this, name)) ? (-2) : 0; - else + + if (g->Mrr) { + // This should only happen for the mrr secondary handler + mrr= true; + g->Mrr= false; + } else + mrr= false; + +#if defined(WITH_PARTITION_STORAGE_ENGINE) + if (table->part_info) { + if (GetStringOption("Filename") || GetStringOption("Tabname") + || GetStringOption("Connect")) { + strcpy(partname, decode(g, strrchr(name, '#') + 1)); +// strcpy(partname, table->part_info->curr_part_elem->partition_name); + part_id= &table->part_info->full_part_field_set; + } else // Inward table + strcpy(partname, strrchr(name, slash) + 1); + part_id= &table->part_info->full_part_field_set; // Temporary + } // endif part_info +#endif // WITH_PARTITION_STORAGE_ENGINE + } else rc= HA_ERR_INTERNAL_ERROR; DBUG_RETURN(rc); @@ -2221,20 +2685,17 @@ int ha_connect::optimize(THD* thd, HA_CHECK_OPT* check_opt) dup->Check |= CHK_OPT; if (tdbp) { - if (((PTDBASE)tdbp)->GetDef()->Indexable() == 2) { - // Nothing to do for remote index - } else if (!((PTDBASE)tdbp)->GetDef()->Indexable()) { - sprintf(g->Message, "optimize: Table %s is not indexable", tdbp->GetName()); - my_message(ER_INDEX_REBUILD, g->Message, MYF(0)); - rc= HA_ERR_UNSUPPORTED; - } else if ((rc= ((PTDBASE)tdbp)->ResetTableOpt(g, true))) { + bool dop= IsTypeIndexable(GetRealType(NULL)); + bool dox= (((PTDBASE)tdbp)->GetDef()->Indexable() == 1); + + if ((rc= ((PTDBASE)tdbp)->ResetTableOpt(g, dop, dox))) { if (rc == RC_INFO) { push_warning(thd, Sql_condition::WARN_LEVEL_WARN, 0, g->Message); rc= 0; } else rc= HA_ERR_INTERNAL_ERROR; - } // endif's + } // endif rc } else rc= HA_ERR_INTERNAL_ERROR; @@ -2306,8 +2767,14 @@ int ha_connect::write_row(uchar *buf) DBUG_ENTER("ha_connect::write_row"); // This is not tested yet - if (xmod == MODE_ALTER) + if (xmod == MODE_ALTER) { + if (IsPartitioned() && GetStringOption("Filename", NULL)) + // Why does this happen now that check_if_supported_inplace_alter is called? + DBUG_RETURN(0); // Alter table on an outward partition table + xmod= MODE_INSERT; + } else if (xmod == MODE_ANY) + DBUG_RETURN(0); // Probably never met // Open the table if it was not opened yet (locked) if (!IsOpened() || xmod != tdbp->GetMode()) { @@ -2319,9 +2786,6 @@ int ha_connect::write_row(uchar *buf) } // endif isopened - if (tdbp->GetMode() == MODE_ANY) - DBUG_RETURN(0); - #if 0 // AUTO_INCREMENT NIY if (table->next_number_field && buf == table->record[0]) { int error; @@ -2341,7 +2805,8 @@ int ha_connect::write_row(uchar *buf) DBUG_PRINT("write_row", ("%s", g->Message)); htrc("write_row: %s\n", g->Message); rc= HA_ERR_INTERNAL_ERROR; - } // endif RC + } else // Table is modified + nox= false; // Indexes to be remade DBUG_RETURN(rc); } // end of write_row @@ -2380,13 +2845,14 @@ int ha_connect::update_row(const uchar *old_data, uchar *new_data) // Check values for possible change in indexed column if ((rc= CheckRecord(g, old_data, new_data))) - return rc; + DBUG_RETURN(rc); if (CntUpdateRow(g, tdbp)) { DBUG_PRINT("update_row", ("%s", g->Message)); htrc("update_row CONNECT: %s\n", g->Message); rc= HA_ERR_INTERNAL_ERROR; - } // endif RC + } else + nox= false; // Table is modified DBUG_RETURN(rc); } // end of update_row @@ -2419,7 +2885,8 @@ int ha_connect::delete_row(const uchar *buf) if (CntDeleteRow(xp->g, tdbp, false)) { rc= HA_ERR_INTERNAL_ERROR; htrc("delete_row CONNECT: %s\n", xp->g->Message); - } // endif DeleteRow + } else + nox= false; // To remake indexes DBUG_RETURN(rc); } // end of delete_row @@ -2438,12 +2905,19 @@ int ha_connect::index_init(uint idx, bool sorted) htrc("index_init: this=%p idx=%u sorted=%d\n", this, idx, sorted); if (GetIndexType(GetRealType()) == 2) { - // This is a remote index - xmod= MODE_READX; + if (xmod == MODE_READ) + // This is a remote index + xmod= MODE_READX; if (!(rc= rnd_init(0))) { - active_index= idx; - indexing= 2; // TO DO: mul? +// if (xmod == MODE_READX) { + active_index= idx; + indexing= IsUnique(idx) ? 1 : 2; +// } else { +// active_index= MAX_KEY; +// indexing= 0; +// } // endif xmod + } //endif rc DBUG_RETURN(rc); @@ -2459,27 +2933,29 @@ int ha_connect::index_init(uint idx, bool sorted) DBUG_RETURN(0); } // endif locked - indexing= CntIndexInit(g, tdbp, (signed)idx); + indexing= CntIndexInit(g, tdbp, (signed)idx, sorted); if (indexing <= 0) { DBUG_PRINT("index_init", ("%s", g->Message)); htrc("index_init CONNECT: %s\n", g->Message); active_index= MAX_KEY; rc= HA_ERR_INTERNAL_ERROR; - } else { + } else if (((PTDBDOX)tdbp)->To_Kindex) { if (((PTDBDOX)tdbp)->To_Kindex->GetNum_K()) { if (((PTDBASE)tdbp)->GetFtype() != RECFM_NAF) ((PTDBDOX)tdbp)->GetTxfp()->ResetBuffer(g); active_index= idx; - } else // Void table - indexing= 0; +// } else { // Void table +// active_index= MAX_KEY; +// indexing= 0; + } // endif Num rc= 0; } // endif indexing if (xtrace) - htrc("index_init: rc=%d indexing=%d active_index=%d\n", + htrc("index_init: rc=%d indexing=%d active_index=%d\n", rc, indexing, active_index); DBUG_RETURN(rc); @@ -2492,6 +2968,7 @@ int ha_connect::index_end() { DBUG_ENTER("index_end"); active_index= MAX_KEY; + ds_mrr.dsmrr_close(); DBUG_RETURN(rnd_end()); } // end of index_end @@ -2505,7 +2982,7 @@ int ha_connect::ReadIndexed(uchar *buf, OPVAL op, const uchar *key, uint key_len //statistic_increment(ha_read_key_count, &LOCK_status); - switch (CntIndexRead(xp->g, tdbp, op, key, (int)key_len)) { + switch (CntIndexRead(xp->g, tdbp, op, key, (int)key_len, mrr)) { case RC_OK: xp->fnd++; rc= MakeRecord((char*)buf); @@ -2563,16 +3040,22 @@ int ha_connect::index_read(uchar * buf, const uchar * key, uint key_len, case HA_READ_KEY_EXACT: op= OP_EQ; break; case HA_READ_AFTER_KEY: op= OP_GT; break; case HA_READ_KEY_OR_NEXT: op= OP_GE; break; - default: DBUG_RETURN(-1); break; + default: DBUG_RETURN(-1); break; } // endswitch find_flag if (xtrace > 1) htrc("%p index_read: op=%d\n", this, op); - if (indexing > 0) + if (indexing > 0) { rc= ReadIndexed(buf, op, key, key_len); - else - rc= HA_ERR_INTERNAL_ERROR; + + if (rc == HA_ERR_INTERNAL_ERROR) { + nox= true; // To block making indexes + abort= true; // Don't rename temp file + } // endif rc + + } else + rc= HA_ERR_INTERNAL_ERROR; // HA_ERR_KEY_NOT_FOUND ? DBUG_RETURN(rc); } // end of index_read @@ -2666,7 +3149,7 @@ int ha_connect::index_last(uchar *buf) rc= ReadIndexed(buf, OP_LAST); DBUG_RETURN(rc); -} // end of index_last +} /****************************************************************************/ @@ -2718,7 +3201,7 @@ int ha_connect::rnd_init(bool scan) } // endif xmod if (xtrace) - htrc("rnd_init: this=%p scan=%d xmod=%d alter=%d\n", + htrc("rnd_init: this=%p scan=%d xmod=%d alter=%d\n", this, scan, xmod, alter); if (!g || !table || xmod == MODE_INSERT) @@ -2726,7 +3209,11 @@ int ha_connect::rnd_init(bool scan) // Do not close the table if it was opened yet (locked?) if (IsOpened()) { - if (tdbp->OpenDB(g)) // Rewind table + if (IsPartitioned() && xmod != MODE_INSERT) + if (CheckColumnList(g)) // map can have been changed + DBUG_RETURN(HA_ERR_INTERNAL_ERROR); + + if (tdbp->OpenDB(g)) // Rewind table DBUG_RETURN(HA_ERR_INTERNAL_ERROR); else DBUG_RETURN(0); @@ -2735,7 +3222,7 @@ int ha_connect::rnd_init(bool scan) tdbp= NULL; // Not valid anymore // When updating, to avoid skipped update, force the table - // handler to retrieve write-only fields to be able to compare + // handler to retrieve write-only fields to be able to compare // records and detect data change. if (xmod == MODE_UPDATE) bitmap_union(table->read_set, table->write_set); @@ -2768,6 +3255,7 @@ int ha_connect::rnd_end() // if (tdbp && xp->last_query_id == valid_query_id) // rc= CloseTable(xp->g); + ds_mrr.dsmrr_close(); DBUG_RETURN(rc); } // end of rnd_end @@ -2861,6 +3349,10 @@ void ha_connect::position(const uchar *record) DBUG_ENTER("ha_connect::position"); //if (((PTDBASE)tdbp)->GetDef()->Indexable()) my_store_ptr(ref, ref_length, (my_off_t)((PTDBASE)tdbp)->GetRecpos()); + + if (trace) + htrc("position: pos=%d\n", ((PTDBASE)tdbp)->GetRecpos()); + DBUG_VOID_RETURN; } // end of position @@ -2887,9 +3379,13 @@ int ha_connect::rnd_pos(uchar *buf, uchar *pos) PTDBASE tp= (PTDBASE)tdbp; DBUG_ENTER("ha_connect::rnd_pos"); - if (!tp->SetRecpos(xp->g, (int)my_get_ptr(pos, ref_length))) + if (!tp->SetRecpos(xp->g, (int)my_get_ptr(pos, ref_length))) { + if (trace) + htrc("rnd_pos: %d\n", tp->GetRecpos()); + + tp->SetFilter(NULL); rc= rnd_next(buf); - else + } else rc= HA_ERR_KEY_NOT_FOUND; DBUG_RETURN(rc); @@ -2944,27 +3440,32 @@ int ha_connect::info(uint flag) if (xtrace) htrc("%p In info: flag=%u valid_info=%d\n", this, flag, valid_info); - if (!valid_info) { - // tdbp must be available to get updated info - if (xp->CheckQuery(valid_query_id) || !tdbp) { - PDBUSER dup= PlgGetUser(g); - PCATLG cat= (dup) ? dup->Catalog : NULL; - - if (xmod == MODE_ANY || xmod == MODE_ALTER) { - // Pure info, not a query - pure= true; - xp->CheckCleanup(); - } // endif xmod - - // This is necessary for getting file length - if (cat && table) - cat->SetDataPath(g, table->s->db.str); - else - DBUG_RETURN(HA_ERR_INTERNAL_ERROR); // Should never happen + // tdbp must be available to get updated info + if (xp->CheckQuery(valid_query_id) || !tdbp) { + PDBUSER dup= PlgGetUser(g); + PCATLG cat= (dup) ? dup->Catalog : NULL; - tdbp= GetTDB(g); - } // endif tdbp + if (xmod == MODE_ANY || xmod == MODE_ALTER) { + // Pure info, not a query + pure= true; + xp->CheckCleanup(); + } // endif xmod + // This is necessary for getting file length +// if (cat && table) +// cat->SetDataPath(g, table->s->db.str); + if (table) + SetDataPath(g, table->s->db.str); + else + DBUG_RETURN(HA_ERR_INTERNAL_ERROR); // Should never happen + + if (!(tdbp= GetTDB(g))) + DBUG_RETURN(HA_ERR_INTERNAL_ERROR); // Should never happen + + valid_info = false; + } // endif tdbp + + if (!valid_info) { valid_info= CntInfo(g, tdbp, &xinfo); if (((signed)xinfo.records) < 0) @@ -3070,7 +3571,8 @@ int ha_connect::delete_all_rows() if (CntDeleteRow(g, tdbp, true)) { htrc("%s\n", g->Message); rc= HA_ERR_INTERNAL_ERROR; - } // endif + } else + nox= false; } // endif rc @@ -3106,15 +3608,15 @@ bool ha_connect::check_privileges(THD *thd, PTOS options, char *dbn) if (options->filename && *options->filename) { char *s, path[FN_REFLEN], dbpath[FN_REFLEN]; #if defined(WIN32) - s= "\\"; + s= "\\"; #else // !WIN32 - s= "/"; + s= "/"; #endif // !WIN32 strcpy(dbpath, mysql_real_data_home); - + if (db) strcat(strcat(dbpath, db), s); - + (void) fn_format(path, options->filename, dbpath, "", MY_RELATIVE_PATH | MY_UNPACK_FILENAME); @@ -3148,7 +3650,7 @@ bool ha_connect::check_privileges(THD *thd, PTOS options, char *dbn) return true; } // end of check_privileges -// Check that two indexes are equivalent +// Check that two indexes are equivalent bool ha_connect::IsSameIndex(PIXDEF xp1, PIXDEF xp2) { bool b= true; @@ -3173,7 +3675,7 @@ bool ha_connect::IsSameIndex(PIXDEF xp1, PIXDEF xp2) return b; } // end of IsSameIndex -MODE ha_connect::CheckMode(PGLOBAL g, THD *thd, +MODE ha_connect::CheckMode(PGLOBAL g, THD *thd, MODE newmode, bool *chk, bool *cras) { if ((trace= xtrace)) { @@ -3216,11 +3718,6 @@ MODE ha_connect::CheckMode(PGLOBAL g, THD *thd, case SQLCOM_RENAME_TABLE: newmode= MODE_ANY; break; - case SQLCOM_DROP_INDEX: - case SQLCOM_CREATE_INDEX: - newmode= MODE_ANY; -// stop= true; - break; case SQLCOM_CREATE_VIEW: case SQLCOM_DROP_VIEW: newmode= MODE_ANY; @@ -3228,6 +3725,13 @@ MODE ha_connect::CheckMode(PGLOBAL g, THD *thd, case SQLCOM_ALTER_TABLE: newmode= MODE_ALTER; break; + case SQLCOM_DROP_INDEX: + case SQLCOM_CREATE_INDEX: +// if (!IsPartitioned()) { + newmode= MODE_ANY; + break; +// } // endif partitioned + default: htrc("Unsupported sql_command=%d\n", thd_sql_command(thd)); strcpy(g->Message, "CONNECT Unsupported command"); @@ -3257,10 +3761,6 @@ MODE ha_connect::CheckMode(PGLOBAL g, THD *thd, case SQLCOM_LOCK_TABLES: locked= 1; break; - case SQLCOM_DROP_INDEX: - case SQLCOM_CREATE_INDEX: - *chk= true; -// stop= true; case SQLCOM_DROP_TABLE: case SQLCOM_RENAME_TABLE: newmode= MODE_ANY; @@ -3273,6 +3773,14 @@ MODE ha_connect::CheckMode(PGLOBAL g, THD *thd, *chk= true; newmode= MODE_ALTER; break; + case SQLCOM_DROP_INDEX: + case SQLCOM_CREATE_INDEX: +// if (!IsPartitioned()) { + *chk= true; + newmode= MODE_ANY; + break; +// } // endif partitioned + default: htrc("Unsupported sql_command=%d\n", thd_sql_command(thd)); strcpy(g->Message, "CONNECT Unsupported command"); @@ -3361,7 +3869,7 @@ int ha_connect::external_lock(THD *thd, int lock_type) if (xtrace) htrc("external_lock: this=%p thd=%p xp=%p g=%p lock_type=%d\n", this, thd, xp, g, lock_type); - + if (!g) DBUG_RETURN(HA_ERR_INTERNAL_ERROR); @@ -3389,7 +3897,7 @@ int ha_connect::external_lock(THD *thd, int lock_type) sprintf(g->Message, "external_lock: unexpected command %d", sqlcom); push_warning(thd, Sql_condition::WARN_LEVEL_WARN, 0, g->Message); DBUG_RETURN(0); - } else if (g->Xchk) { + } else if (g->Xchk) { if (!tdbp) { if (!(tdbp= GetTDB(g))) DBUG_RETURN(HA_ERR_INTERNAL_ERROR); @@ -3493,6 +4001,7 @@ int ha_connect::external_lock(THD *thd, int lock_type) } // endif Close locked= 0; + xmod= MODE_ANY; // For info commands DBUG_RETURN(rc); } // endif MODE_ANY @@ -3526,7 +4035,7 @@ int ha_connect::external_lock(THD *thd, int lock_type) } // endif Xchk } else - g->Xchk= NULL; + g->Xchk= NULL; #endif // 0 if (cras) @@ -3650,11 +4159,6 @@ filename_to_dbname_and_tablename(const char *filename, char *database, size_t database_size, char *table, size_t table_size) { -#if defined(WIN32) - char slash= '\\'; -#else // !WIN32 - char slash= '/'; -#endif // !WIN32 LEX_CSTRING d, t; size_t length= strlen(filename); @@ -3679,10 +4183,10 @@ filename_to_dbname_and_tablename(const char *filename, /** @brief Used to delete or rename a table. By the time delete_table() has been - called all opened references to this table will have been closed + called all opened references to this table will have been closed (and your globally shared references released) ===> too bad!!! The variable name will just be the name of the table. - You will need to remove or rename any files you have created at + You will need to remove or rename any files you have created at this point. @details @@ -3708,7 +4212,7 @@ int ha_connect::delete_or_rename_table(const char *name, const char *to) if (xtrace) { if (to) - htrc("rename_table: this=%p thd=%p sqlcom=%d from=%s to=%s\n", + htrc("rename_table: this=%p thd=%p sqlcom=%d from=%s to=%s\n", this, thd, sqlcom, name, to); else htrc("delete_table: this=%p thd=%p sqlcom=%d name=%s\n", @@ -3729,29 +4233,27 @@ int ha_connect::delete_or_rename_table(const char *name, const char *to) // If a temporary file exists, all the tests below were passed // successfully when making it, so they are not needed anymore // in particular because they sometimes cause DBUG_ASSERT crash. - if (*tabname != '#') { + // Also, for partitioned tables, no test can be done because when + // this function is called, the .par file is already deleted and + // this causes the open_table_def function to fail. + // Not having any other clues (table and table_share are NULL) + // the only mean we have to test for partitioning is this: + if (*tabname != '#' && !strstr(tabname, "#P#")) { // We have to retrieve the information about this table options. ha_table_option_struct *pos; char key[MAX_DBKEY_LENGTH]; uint key_length; TABLE_SHARE *share; +// if ((p= strstr(tabname, "#P#"))) won't work, see above +// *p= 0; // Get the main the table name + key_length= tdc_create_key(key, db, tabname); // share contains the option struct that we need if (!(share= alloc_table_share(db, tabname, key, key_length))) DBUG_RETURN(rc); -#if 0 - if (*tabname == '#') { - // These are in ???? charset after renaming - char *p= strchr(share->path.str, '@'); - strcpy(p, share->table_name.str); - share->path.length= strlen(share->path.str); - share->normalized_path.length= share->path.length; - } // endif tabname -#endif // 0 - // Get the share info from the .frm file if (!open_table_def(thd, share)) { // Now we can work @@ -3761,7 +4263,7 @@ int ha_connect::delete_or_rename_table(const char *name, const char *to) else if (IsFileType(GetRealType(pos)) && !pos->filename) ok= true; - + } // endif pos } else // Avoid infamous DBUG_ASSERT @@ -3840,10 +4342,10 @@ ha_rows ha_connect::records_in_range(uint inx, key_range *min_key, else rows= (ha_rows)nval; - } else if (indexing < 0) - rows= HA_POS_ERROR; - else + } else if (indexing == 0) rows= 100000000; // Don't use missing index + else + rows= HA_POS_ERROR; DBUG_RETURN(rows); } // end of records_in_range @@ -3851,7 +4353,7 @@ ha_rows ha_connect::records_in_range(uint inx, key_range *min_key, /** Convert an ISO-8859-1 column name to UTF-8 */ -static char *encode(PGLOBAL g, char *cnm) +static char *encode(PGLOBAL g, const char *cnm) { char *buf= (char*)PlugSubAlloc(g, NULL, strlen(cnm) * 3); uint dummy_errors; @@ -3862,7 +4364,7 @@ static char *encode(PGLOBAL g, char *cnm) &dummy_errors); buf[len]= '\0'; return buf; - } // end of Encode + } // end of encode /** Store field definition for create. @@ -3870,6 +4372,83 @@ static char *encode(PGLOBAL g, char *cnm) @return Return 0 if ok */ +#if defined(NEW_WAY) +static bool add_fields(PGLOBAL g, + THD *thd, + Alter_info *alter_info, + char *name, + int typ, int len, int dec, + uint type_modifier, + char *rem, +// CHARSET_INFO *cs, +// void *vcolinfo, +// engine_option_value *create_options, + int flg, + bool dbf, + char v) +{ + register Create_field *new_field; + char *length, *decimals= NULL; + enum_field_types type; +//Virtual_column_info *vcol_info= (Virtual_column_info *)vcolinfo; + engine_option_value *crop; + LEX_STRING *comment; + LEX_STRING *field_name; + + DBUG_ENTER("ha_connect::add_fields"); + + if (len) { + if (!v && typ == TYPE_STRING && len > 255) + v= 'V'; // Change CHAR to VARCHAR + + length= (char*)PlugSubAlloc(g, NULL, 8); + sprintf(length, "%d", len); + + if (typ == TYPE_DOUBLE) { + decimals= (char*)PlugSubAlloc(g, NULL, 8); + sprintf(decimals, "%d", min(dec, (min(len, 31) - 1))); + } // endif dec + + } else + length= NULL; + + if (!rem) + rem= ""; + + type= PLGtoMYSQL(typ, dbf, v); + comment= thd->make_lex_string(rem, strlen(rem)); + field_name= thd->make_lex_string(name, strlen(name)); + + switch (v) { + case 'Z': type_modifier|= ZEROFILL_FLAG; + case 'U': type_modifier|= UNSIGNED_FLAG; break; + } // endswitch v + + if (flg) { + engine_option_value *start= NULL, *end= NULL; + LEX_STRING *flag= thd->make_lex_string("flag", 4); + + crop= new(thd->mem_root) engine_option_value(*flag, (ulonglong)flg, + &start, &end, thd->mem_root); + } else + crop= NULL; + + if (check_string_char_length(field_name, "", NAME_CHAR_LEN, + system_charset_info, 1)) { + my_error(ER_TOO_LONG_IDENT, MYF(0), field_name->str); /* purecov: inspected */ + DBUG_RETURN(1); /* purecov: inspected */ + } // endif field_name + + if (!(new_field= new Create_field()) || + new_field->init(thd, field_name->str, type, length, decimals, + type_modifier, NULL, NULL, comment, NULL, + NULL, NULL, 0, NULL, crop, true)) + DBUG_RETURN(1); + + alter_info->create_list.push_back(new_field); + DBUG_RETURN(0); +} // end of add_fields +#else // !NEW_WAY static bool add_field(String *sql, const char *field_name, int typ, int len, int dec, uint tm, const char *rem, char *dft, char *xtra, int flag, bool dbf, char v) @@ -3899,7 +4478,7 @@ static bool add_field(String *sql, const char *field_name, int typ, error|= sql->append(')'); } // endif len - + if (v == 'U') error|= sql->append(" UNSIGNED"); else if (v == 'Z') @@ -3939,6 +4518,7 @@ static bool add_field(String *sql, const char *field_name, int typ, error|= sql->append(','); return error; } // end of add_field +#endif // !NEW_WAY /** Initialise the table share with the new columns. @@ -3946,8 +4526,112 @@ static bool add_field(String *sql, const char *field_name, int typ, @return Return 0 if ok */ -static int init_table_share(THD* thd, - TABLE_SHARE *table_s, +#if defined(NEW_WAY) +//static bool sql_unusable_for_discovery(THD *thd, const char *sql); + +static int init_table_share(THD *thd, + TABLE_SHARE *table_s, + HA_CREATE_INFO *create_info, + Alter_info *alter_info) +{ + KEY *not_used_1; + uint not_used_2; + int rc= 0; + handler *file; + LEX_CUSTRING frm= {0,0}; + + DBUG_ENTER("init_table_share"); + +#if 0 + ulonglong saved_mode= thd->variables.sql_mode; + CHARSET_INFO *old_cs= thd->variables.character_set_client; + Parser_state parser_state; + char *sql_copy; + LEX *old_lex; + Query_arena *arena, backup; + LEX tmp_lex; + + /* + Ouch. Parser may *change* the string it's working on. + Currently (2013-02-26) it is used to permanently disable + conditional comments. + Anyway, let's copy the caller's string... + */ + if (!(sql_copy= thd->strmake(sql, sql_length))) + DBUG_RETURN(HA_ERR_OUT_OF_MEM); + + if (parser_state.init(thd, sql_copy, sql_length)) + DBUG_RETURN(HA_ERR_OUT_OF_MEM); + + thd->variables.sql_mode= MODE_NO_ENGINE_SUBSTITUTION | MODE_NO_DIR_IN_CREATE; + thd->variables.character_set_client= system_charset_info; + old_lex= thd->lex; + thd->lex= &tmp_lex; + + arena= thd->stmt_arena; + + if (arena->is_conventional()) + arena= 0; + else + thd->set_n_backup_active_arena(arena, &backup); + + lex_start(thd); + + if ((error= parse_sql(thd, & parser_state, NULL))) + goto ret; + + if (table_s->sql_unusable_for_discovery(thd, NULL)) { + my_error(ER_SQL_DISCOVER_ERROR, MYF(0), plugin_name(db_plugin)->str, + db.str, table_name.str, sql_copy); + goto ret; + } // endif unusable + + thd->lex->create_info.db_type= plugin_data(db_plugin, handlerton *); + + if (tabledef_version.str) + thd->lex->create_info.tabledef_version= tabledef_version; +#endif // 0 + + tmp_disable_binlog(thd); + + file= mysql_create_frm_image(thd, table_s->db.str, table_s->table_name.str, + create_info, alter_info, C_ORDINARY_CREATE, + ¬_used_1, ¬_used_2, &frm); + if (file) + delete file; + else + rc= OPEN_FRM_CORRUPTED; + + if (!rc && frm.str) { + table_s->option_list= 0; // cleanup existing options ... + table_s->option_struct= 0; // ... if it's an assisted discovery + rc= table_s->init_from_binary_frm_image(thd, true, frm.str, frm.length); + } // endif frm + +//ret: + my_free(const_cast<uchar*>(frm.str)); + reenable_binlog(thd); +#if 0 + lex_end(thd->lex); + thd->lex= old_lex; + if (arena) + thd->restore_active_arena(arena, &backup); + thd->variables.sql_mode= saved_mode; + thd->variables.character_set_client= old_cs; +#endif // 0 + + if (thd->is_error() || rc) { + thd->clear_error(); + my_error(ER_NO_SUCH_TABLE, MYF(0), table_s->db.str, + table_s->table_name.str); + DBUG_RETURN(HA_ERR_NOT_A_TABLE); + } else + DBUG_RETURN(0); + +} // end of init_table_share +#else // !NEW_WAY +static int init_table_share(THD* thd, + TABLE_SHARE *table_s, HA_CREATE_INFO *create_info, // char *dsn, String *sql) @@ -4037,9 +4721,26 @@ static int init_table_share(THD* thd, return table_s->init_from_sql_statement_string(thd, true, sql->ptr(), sql->length()); } // end of init_table_share +#endif // !NEW_WAY + +// Add an option to the create_info option list +static void add_option(THD* thd, HA_CREATE_INFO *create_info, + const char *opname, const char *opval) +{ +#if defined(NEW_WAY) + LEX_STRING *opn= thd->make_lex_string(opname, strlen(opname)); + LEX_STRING *val= thd->make_lex_string(opval, strlen(opval)); + engine_option_value *pov, **start= &create_info->option_list, *end= NULL; + + for (pov= *start; pov; pov= pov->next) + end= pov; + + pov= new(thd->mem_root) engine_option_value(*opn, *val, false, start, &end); +#endif // NEW_WAY +} // end of add_option // Used to check whether a MYSQL table is created on itself -static bool CheckSelf(PGLOBAL g, TABLE_SHARE *s, const char *host, +bool CheckSelf(PGLOBAL g, TABLE_SHARE *s, const char *host, const char *db, char *tab, const char *src, int port) { if (src) @@ -4077,7 +4778,7 @@ static int connect_assisted_discovery(handlerton *hton, THD* thd, const char *fncn= "?"; const char *user, *fn, *db, *host, *pwd, *sep, *tbl, *src; const char *col, *ocl, *rnk, *pic, *fcl, *skc; - char *tab, *dsn, *shm; + char *tab, *dsn, *shm, *dpath; #if defined(WIN32) char *nsp= NULL, *cls= NULL; #endif // WIN32 @@ -4093,10 +4794,15 @@ static int connect_assisted_discovery(handlerton *hton, THD* thd, PDBUSER dup= PlgGetUser(g); PCATLG cat= (dup) ? dup->Catalog : NULL; PTOS topt= table_s->option_struct; +#if defined(NEW_WAY) +//CHARSET_INFO *cs; + Alter_info alter_info; +#else // !NEW_WAY char buf[1024]; String sql(buf, sizeof(buf), system_charset_info); sql.copy(STRING_WITH_LEN("CREATE TABLE whatever ("), system_charset_info); +#endif // !NEW_WAY if (!g) return HA_ERR_INTERNAL_ERROR; @@ -4156,6 +4862,7 @@ static int connect_assisted_discovery(handlerton *hton, THD* thd, ttp= GetTypeID(topt->type); sprintf(g->Message, "No table_type. Was set to %s", topt->type); push_warning(thd, Sql_condition::WARN_LEVEL_WARN, 0, g->Message); + add_option(thd, create_info, "table_type", topt->type); } else if (ttp == TAB_NIY) { sprintf(g->Message, "Unsupported table type %s", topt->type); my_message(ER_UNKNOWN_ERROR, g->Message, MYF(0)); @@ -4188,6 +4895,9 @@ static int connect_assisted_discovery(handlerton *hton, THD* thd, } else if (ttp != TAB_ODBC || !(fnc & (FNC_TABLE | FNC_COL))) tab= table_s->table_name.str; // Default value +#if defined(NEW_WAY) +// add_option(thd, create_info, "tabname", tab); +#endif // NEW_WAY } // endif tab switch (ttp) { @@ -4262,7 +4972,7 @@ static int connect_assisted_discovery(handlerton *hton, THD* thd, } else if (!user) user= "root"; - if (CheckSelf(g, table_s, host, db, tab, src, port)) + if (ok && CheckSelf(g, table_s, host, db, tab, src, port)) ok= false; break; @@ -4313,10 +5023,12 @@ static int connect_assisted_discovery(handlerton *hton, THD* thd, char *cnm, *rem, *dft, *xtra; int i, len, prec, dec, typ, flg; - if (cat) - cat->SetDataPath(g, table_s->db.str); - else - return HA_ERR_INTERNAL_ERROR; // Should never happen +// if (cat) +// cat->SetDataPath(g, table_s->db.str); +// else +// return HA_ERR_INTERNAL_ERROR; // Should never happen + + dpath= SetPath(g, table_s->db.str); if (src && ttp != TAB_PIVOT && ttp != TAB_ODBC) { qrp= SrcColumns(g, host, db, user, pwd, src, port); @@ -4329,7 +5041,7 @@ static int connect_assisted_discovery(handlerton *hton, THD* thd, } else switch (ttp) { case TAB_DBF: - qrp= DBFColumns(g, fn, fnc == FNC_COL); + qrp= DBFColumns(g, dpath, fn, fnc == FNC_COL); break; #if defined(ODBC_SUPPORT) case TAB_ODBC: @@ -4339,7 +5051,7 @@ static int connect_assisted_discovery(handlerton *hton, THD* thd, if (src) { qrp= ODBCSrcCols(g, dsn, (char*)src); src= NULL; // for next tests - } else + } else qrp= ODBCColumns(g, dsn, shm, tab, NULL, mxr, fnc == FNC_COL); break; @@ -4361,12 +5073,12 @@ static int connect_assisted_discovery(handlerton *hton, THD* thd, #endif // ODBC_SUPPORT #if defined(MYSQL_SUPPORT) case TAB_MYSQL: - qrp= MyColumns(g, thd, host, db, user, pwd, tab, + qrp= MyColumns(g, thd, host, db, user, pwd, tab, NULL, port, fnc == FNC_COL); break; #endif // MYSQL_SUPPORT case TAB_CSV: - qrp= CSVColumns(g, fn, spc, qch, hdr, mxe, fnc == FNC_COL); + qrp= CSVColumns(g, dpath, fn, spc, qch, hdr, mxe, fnc == FNC_COL); break; #if defined(WIN32) case TAB_WMI: @@ -4415,15 +5127,19 @@ static int connect_assisted_discovery(handlerton *hton, THD* thd, dec= crp->Prec; flg= crp->Flag; v= crp->Var; - + if (!len && typ == TYPE_STRING) len= 256; // STRBLK's have 0 length // Now add the field +#if defined(NEW_WAY) + rc= add_fields(g, thd, &alter_info, cnm, typ, len, dec, + NOT_NULL_FLAG, "", flg, dbf, v); +#else // !NEW_WAY if (add_field(&sql, cnm, typ, len, dec, NOT_NULL_FLAG, NULL, NULL, NULL, flg, dbf, v)) rc= HA_ERR_OUT_OF_MEM; - +#endif // !NEW_WAY } // endfor crp } else { @@ -4443,7 +5159,12 @@ static int connect_assisted_discovery(handlerton *hton, THD* thd, tm= NOT_NULL_FLAG; cnm= (char*)"noname"; dft= xtra= NULL; +#if defined(NEW_WAY) + rem= ""; +// cs= NULL; +#else // !NEW_WAY rem= NULL; +#endif // !NEW_WAY for (crp= qrp->Colresp; crp; crp= crp->Next) switch (crp->Fld) { @@ -4472,7 +5193,7 @@ static int connect_assisted_discovery(handlerton *hton, THD* thd, case FLD_REM: rem= crp->Kdata->GetCharValue(i); break; -// case FLD_CHARSET: +// case FLD_CHARSET: // No good because remote table is already translated // if (*(csn= crp->Kdata->GetCharValue(i))) // cs= get_charset_by_name(csn, 0); @@ -4525,16 +5246,25 @@ static int connect_assisted_discovery(handlerton *hton, THD* thd, prec= len; // Now add the field +#if defined(NEW_WAY) + rc= add_fields(g, thd, &alter_info, cnm, typ, prec, dec, + tm, rem, 0, dbf, v); +#else // !NEW_WAY if (add_field(&sql, cnm, typ, prec, dec, tm, rem, dft, xtra, 0, dbf, v)) rc= HA_ERR_OUT_OF_MEM; +#endif // !NEW_WAY } // endfor i } // endif fnc +#if defined(NEW_WAY) + rc= init_table_share(thd, table_s, create_info, &alter_info); +#else // !NEW_WAY if (!rc) rc= init_table_share(thd, table_s, create_info, &sql); // rc= init_table_share(thd, table_s, create_info, dsn, &sql); +#endif // !NEW_WAY return rc; } // endif ok @@ -4594,12 +5324,15 @@ int ha_connect::create(const char *name, TABLE *table_arg, HA_CREATE_INFO *create_info) { int rc= RC_OK; - bool dbf; + bool dbf, inward; Field* *field; Field *fp; TABTYPE type; TABLE *st= table; // Probably unuseful - THD *thd= ha_thd(); + THD *thd= ha_thd(); +#if defined(WITH_PARTITION_STORAGE_ENGINE) + partition_info *part_info= table_arg->part_info; +#endif // WITH_PARTITION_STORAGE_ENGINE xp= GetUser(thd, xp); PGLOBAL g= xp->g; @@ -4619,7 +5352,7 @@ int ha_connect::create(const char *name, TABLE *table_arg, // Check table type if (type == TAB_UNDEF) { - options->type= (options->srcdef) ? "MYSQL" : + options->type= (options->srcdef) ? "MYSQL" : (options->tabname) ? "PROXY" : "DOS"; type= GetTypeID(options->type); sprintf(g->Message, "No table_type. Will be set to %s", options->type); @@ -4636,6 +5369,8 @@ int ha_connect::create(const char *name, TABLE *table_arg, if (check_privileges(thd, options, GetDBfromName(name))) DBUG_RETURN(HA_ERR_INTERNAL_ERROR); + inward= IsFileType(type) && !options->filename; + if (options->data_charset) { const CHARSET_INFO *data_charset; @@ -4687,6 +5422,9 @@ int ha_connect::create(const char *name, TABLE *table_arg, } // endif tabname case TAB_MYSQL: +#if defined(WITH_PARTITION_STORAGE_ENGINE) + if (!part_info) +#endif // WITH_PARTITION_STORAGE_ENGINE {const char *src= options->srcdef; char *host, *db, *tab= (char*)options->tabname; int port; @@ -4730,7 +5468,7 @@ int ha_connect::create(const char *name, TABLE *table_arg, DBUG_RETURN(HA_ERR_INTERNAL_ERROR); } // endif CheckSelf - }break; + }break; default: /* do nothing */; break; } // endswitch ttp @@ -4826,8 +5564,8 @@ int ha_connect::create(const char *name, TABLE *table_arg, sprintf(g->Message, "Unsupported 0 length for column %s", fp->field_name); rc= HA_ERR_INTERNAL_ERROR; - my_printf_error(ER_UNKNOWN_ERROR, - "Unsupported 0 length for column %s", + my_printf_error(ER_UNKNOWN_ERROR, + "Unsupported 0 length for column %s", MYF(0), fp->field_name); DBUG_RETURN(rc); } // endif fp @@ -4854,7 +5592,7 @@ int ha_connect::create(const char *name, TABLE *table_arg, } // endswitch type if ((fp)->real_maybe_null() && !IsTypeNullable(type)) { - my_printf_error(ER_UNKNOWN_ERROR, + my_printf_error(ER_UNKNOWN_ERROR, "Table type %s does not support nullable columns", MYF(0), options->type); DBUG_RETURN(HA_ERR_UNSUPPORTED); @@ -4880,8 +5618,7 @@ int ha_connect::create(const char *name, TABLE *table_arg, } // endfor field - if ((sqlcom == SQLCOM_CREATE_TABLE || *GetTableName() == '#') - && IsFileType(type) && !options->filename) { + if ((sqlcom == SQLCOM_CREATE_TABLE || *GetTableName() == '#') && inward) { // The file name is not specified, create a default file in // the database directory named table_name.table_type. // (temporarily not done for XML because a void file causes @@ -4889,8 +5626,6 @@ int ha_connect::create(const char *name, TABLE *table_arg, char buf[256], fn[_MAX_PATH], dbpath[128], lwt[12]; int h; - strcpy(buf, GetTableName()); - // Check for incompatible options if (options->sepindex) { my_message(ER_UNKNOWN_ERROR, @@ -4899,12 +5634,12 @@ int ha_connect::create(const char *name, TABLE *table_arg, DBUG_RETURN(HA_ERR_UNSUPPORTED); } else if (GetTypeID(options->type) == TAB_VEC) if (!table->s->max_rows || options->split) { - my_printf_error(ER_UNKNOWN_ERROR, + my_printf_error(ER_UNKNOWN_ERROR, "%s tables whose file name is unspecified cannot be split", MYF(0), options->type); DBUG_RETURN(HA_ERR_UNSUPPORTED); } else if (options->header == 2) { - my_printf_error(ER_UNKNOWN_ERROR, + my_printf_error(ER_UNKNOWN_ERROR, "header=2 is not allowed for %s tables whose file name is unspecified", MYF(0), options->type); DBUG_RETURN(HA_ERR_UNSUPPORTED); @@ -4917,16 +5652,31 @@ int ha_connect::create(const char *name, TABLE *table_arg, break; } else lwt[i]= tolower(options->type[i]); - - strcat(strcat(buf, "."), lwt); - sprintf(g->Message, "No file name. Table will use %s", buf); - if (sqlcom == SQLCOM_CREATE_TABLE) - push_warning(thd, Sql_condition::WARN_LEVEL_WARN, 0, g->Message); +#if defined(WITH_PARTITION_STORAGE_ENGINE) + if (part_info) { + char *p; + + strcpy(dbpath, name); + p= strrchr(dbpath, slash); + strcpy(partname, ++p); + strcat(strcat(strcpy(buf, p), "."), lwt); + *p= 0; + } else { +#endif // WITH_PARTITION_STORAGE_ENGINE + strcat(strcat(strcpy(buf, GetTableName()), "."), lwt); + sprintf(g->Message, "No file name. Table will use %s", buf); + + if (sqlcom == SQLCOM_CREATE_TABLE) + push_warning(thd, Sql_condition::WARN_LEVEL_WARN, 0, g->Message); + + strcat(strcat(strcpy(dbpath, "./"), table->s->db.str), "/"); +#if defined(WITH_PARTITION_STORAGE_ENGINE) + } // endif part_info +#endif // WITH_PARTITION_STORAGE_ENGINE - strcat(strcat(strcpy(dbpath, "./"), table->s->db.str), "/"); PlugSetPath(fn, buf, dbpath); - + if ((h= ::open(fn, O_CREAT | O_EXCL, 0666)) == -1) { if (errno == EEXIST) sprintf(g->Message, "Default file %s already exists", fn); @@ -4941,32 +5691,47 @@ int ha_connect::create(const char *name, TABLE *table_arg, push_warning(thd, Sql_condition::WARN_LEVEL_WARN, 0, "Congratulation, you just created a read-only void table!"); - } // endif + } // endif sqlcom if (xtrace) htrc("xchk=%p createas=%d\n", g->Xchk, g->Createas); - // To check whether indices have to be made or remade + // To check whether indexes have to be made or remade if (!g->Xchk) { PIXDEF xdp; - // We should be in CREATE TABLE or ALTER_TABLE - if (sqlcom != SQLCOM_CREATE_TABLE && sqlcom != SQLCOM_ALTER_TABLE) + // We should be in CREATE TABLE, ALTER_TABLE or CREATE INDEX + if (!(sqlcom == SQLCOM_CREATE_TABLE || sqlcom == SQLCOM_ALTER_TABLE || + sqlcom == SQLCOM_CREATE_INDEX || sqlcom == SQLCOM_DROP_INDEX)) +// (sqlcom == SQLCOM_CREATE_INDEX && part_info) || +// (sqlcom == SQLCOM_DROP_INDEX && part_info))) push_warning(thd, Sql_condition::WARN_LEVEL_WARN, 0, - "Wrong command in create, please contact CONNECT team"); - - if (sqlcom == SQLCOM_ALTER_TABLE && g->Alchecked == 0 && - (!IsFileType(type) || FileExists(options->filename))) { - // This is an ALTER to CONNECT from another engine. - // It cannot be accepted because the table data would be lost - // except when the target file does not exist. - strcpy(g->Message, "Operation denied. Table data would be lost."); - my_message(ER_UNKNOWN_ERROR, g->Message, MYF(0)); - DBUG_RETURN(HA_ERR_INTERNAL_ERROR); + "Unexpected command in create, please contact CONNECT team"); + +#if defined(WITH_PARTITION_STORAGE_ENGINE) + if (part_info && !inward) + strcpy(partname, decode(g, strrchr(name, '#') + 1)); +// strcpy(partname, part_info->curr_part_elem->partition_name); +#endif // WITH_PARTITION_STORAGE_ENGINE + + if (g->Alchecked == 0 && + (!IsFileType(type) || FileExists(options->filename, false))) { + if (part_info) { + sprintf(g->Message, "Data repartition in %s is unchecked", partname); + push_warning(thd, Sql_condition::WARN_LEVEL_WARN, 0, g->Message); + } else if (sqlcom == SQLCOM_ALTER_TABLE) { + // This is an ALTER to CONNECT from another engine. + // It cannot be accepted because the table data would be modified + // except when the target file does not exist. + strcpy(g->Message, "Operation denied. Table data would be modified."); + my_message(ER_UNKNOWN_ERROR, g->Message, MYF(0)); + DBUG_RETURN(HA_ERR_INTERNAL_ERROR); + } // endif part_info + } // endif outward // Get the index definitions - if (xdp= GetIndexInfo()) { + if ((xdp= GetIndexInfo()) || sqlcom == SQLCOM_DROP_INDEX) { if (options->multiple) { strcpy(g->Message, "Multiple tables are not indexable"); my_message(ER_UNKNOWN_ERROR, g->Message, MYF(0)); @@ -4978,17 +5743,25 @@ int ha_connect::create(const char *name, TABLE *table_arg, } else if (GetIndexType(type) == 1) { PDBUSER dup= PlgGetUser(g); PCATLG cat= (dup) ? dup->Catalog : NULL; - + + SetDataPath(g, table_arg->s->db.str); + if (cat) { - cat->SetDataPath(g, table_arg->s->db.str); - +// cat->SetDataPath(g, table_arg->s->db.str); + +#if defined(WITH_PARTITION_STORAGE_ENGINE) + if (part_info) + strcpy(partname, + decode(g, strrchr(name, (inward ? slash : '#')) + 1)); +#endif // WITH_PARTITION_STORAGE_ENGINE + if ((rc= optimize(table->in_use, NULL))) { htrc("Create rc=%d %s\n", rc, g->Message); my_message(ER_UNKNOWN_ERROR, g->Message, MYF(0)); rc= HA_ERR_INTERNAL_ERROR; } else CloseTable(g); - + } // endif cat } else if (!GetIndexType(type)) { @@ -5016,13 +5789,16 @@ int ha_connect::create(const char *name, TABLE *table_arg, - file does not exist or is void - user has file privilege */ -bool ha_connect::FileExists(const char *fn) +bool ha_connect::FileExists(const char *fn, bool bf) { if (!fn || !*fn) return false; + else if (IsPartitioned() && bf) + return true; if (table) { - char *s, filename[_MAX_PATH], path[128]; + char *s, tfn[_MAX_PATH], filename[_MAX_PATH], path[128]; + bool b= false; int n; struct stat info; @@ -5031,13 +5807,22 @@ bool ha_connect::FileExists(const char *fn) return true; #if defined(WIN32) - s= "\\"; + s= "\\"; #else // !WIN32 - s= "/"; + s= "/"; #endif // !WIN32 + if (IsPartitioned()) { + sprintf(tfn, fn, GetPartName()); + + // This is to avoid an initialization error raised by the + // test on check_table_flags made in ha_partition::open + // that can fail if some partition files are empty. + b= true; + } else + strcpy(tfn, fn); strcat(strcat(strcat(strcpy(path, "."), s), table->s->db.str), s); - PlugSetPath(filename, fn, path); + PlugSetPath(filename, tfn, path); n= stat(filename, &info); if (n < 0) { @@ -5051,7 +5836,7 @@ bool ha_connect::FileExists(const char *fn) return false; } else - return (info.st_size) ? true : false; + return (info.st_size || b) ? true : false; } // endif table @@ -5174,7 +5959,7 @@ bool ha_connect::NoFieldOptionChange(TABLE *tab) */ enum_alter_inplace_result ha_connect::check_if_supported_inplace_alter(TABLE *altered_table, - Alter_inplace_info *ha_alter_info) + Alter_inplace_info *ha_alter_info) { DBUG_ENTER("check_if_supported_alter"); @@ -5211,11 +5996,11 @@ ha_connect::check_if_supported_inplace_alter(TABLE *altered_table, // Index operations Alter_inplace_info::HA_ALTER_FLAGS index_operations= - Alter_inplace_info::ADD_INDEX | + Alter_inplace_info::ADD_INDEX | Alter_inplace_info::DROP_INDEX | - Alter_inplace_info::ADD_UNIQUE_INDEX | + Alter_inplace_info::ADD_UNIQUE_INDEX | Alter_inplace_info::DROP_UNIQUE_INDEX | - Alter_inplace_info::ADD_PK_INDEX | + Alter_inplace_info::ADD_PK_INDEX | Alter_inplace_info::DROP_PK_INDEX; Alter_inplace_info::HA_ALTER_FLAGS inplace_offline_operations= @@ -5223,7 +6008,8 @@ ha_connect::check_if_supported_inplace_alter(TABLE *altered_table, Alter_inplace_info::ALTER_COLUMN_NAME | Alter_inplace_info::ALTER_COLUMN_DEFAULT | Alter_inplace_info::CHANGE_CREATE_OPTION | - Alter_inplace_info::ALTER_RENAME | index_operations; + Alter_inplace_info::ALTER_RENAME | + Alter_inplace_info::ALTER_PARTITIONED | index_operations; if (ha_alter_info->handler_flags & index_operations || !SameString(altered_table, "optname") || @@ -5277,7 +6063,7 @@ ha_connect::check_if_supported_inplace_alter(TABLE *altered_table, char *fn= GetStringOption("filename"); tshp= NULL; - if (FileExists(fn)) { + if (FileExists(fn, false)) { strcpy(g->Message, "Operation denied. Table data would be lost."); my_message(ER_UNKNOWN_ERROR, g->Message, MYF(0)); DBUG_RETURN(HA_ALTER_ERROR); @@ -5321,7 +6107,7 @@ ha_connect::check_if_supported_inplace_alter(TABLE *altered_table, // This was in check_if_incompatible_data if (NoFieldOptionChange(altered_table) && - type == newtyp && + type == newtyp && SameInt(altered_table, "lrecl") && SameInt(altered_table, "elements") && SameInt(altered_table, "header") && @@ -5333,12 +6119,14 @@ ha_connect::check_if_supported_inplace_alter(TABLE *altered_table, fin: if (idx) { // Indexing is only supported inplace - my_message(ER_ALTER_OPERATION_NOT_SUPPORTED, + my_message(ER_ALTER_OPERATION_NOT_SUPPORTED, "Alter operations not supported together by CONNECT", MYF(0)); DBUG_RETURN(HA_ALTER_ERROR); } else if (outward) { - push_warning(thd, Sql_condition::WARN_LEVEL_WARN, 0, - "This is an outward table, table data were not modified."); + if (IsFileType(type)) + push_warning(thd, Sql_condition::WARN_LEVEL_WARN, 0, + "This is an outward table, table data were not modified."); + DBUG_RETURN(HA_ALTER_INPLACE_EXCLUSIVE_LOCK); } else DBUG_RETURN(HA_ALTER_INPLACE_NOT_SUPPORTED); @@ -5363,15 +6151,95 @@ bool ha_connect::check_if_incompatible_data(HA_CREATE_INFO *info, { DBUG_ENTER("ha_connect::check_if_incompatible_data"); // TO DO: really implement and check it. - push_warning(ha_thd(), Sql_condition::WARN_LEVEL_WARN, 0, + push_warning(ha_thd(), Sql_condition::WARN_LEVEL_WARN, 0, "Unexpected call to check_if_incompatible_data."); DBUG_RETURN(COMPATIBLE_DATA_NO); } // end of check_if_incompatible_data +/**************************************************************************** + * CONNECT MRR implementation: use DS-MRR + This is just copied from myisam + ***************************************************************************/ + +int ha_connect::multi_range_read_init(RANGE_SEQ_IF *seq, void *seq_init_param, + uint n_ranges, uint mode, + HANDLER_BUFFER *buf) +{ + return ds_mrr.dsmrr_init(this, seq, seq_init_param, n_ranges, mode, buf); +} // end of multi_range_read_init + +int ha_connect::multi_range_read_next(range_id_t *range_info) +{ + return ds_mrr.dsmrr_next(range_info); +} // end of multi_range_read_next + +ha_rows ha_connect::multi_range_read_info_const(uint keyno, RANGE_SEQ_IF *seq, + void *seq_init_param, + uint n_ranges, uint *bufsz, + uint *flags, Cost_estimate *cost) +{ + /* + This call is here because there is no location where this->table would + already be known. + TODO: consider moving it into some per-query initialization call. + */ + ds_mrr.init(this, table); + + // MMR is implemented for "local" file based tables only + if (!IsFileType(GetRealType(GetTableOptionStruct()))) + *flags|= HA_MRR_USE_DEFAULT_IMPL; + + ha_rows rows= ds_mrr.dsmrr_info_const(keyno, seq, seq_init_param, n_ranges, + bufsz, flags, cost); + xp->g->Mrr= !(*flags & HA_MRR_USE_DEFAULT_IMPL); + return rows; +} // end of multi_range_read_info_const + +ha_rows ha_connect::multi_range_read_info(uint keyno, uint n_ranges, uint keys, + uint key_parts, uint *bufsz, + uint *flags, Cost_estimate *cost) +{ + ds_mrr.init(this, table); + + // MMR is implemented for "local" file based tables only + if (!IsFileType(GetRealType(GetTableOptionStruct()))) + *flags|= HA_MRR_USE_DEFAULT_IMPL; + + ha_rows rows= ds_mrr.dsmrr_info(keyno, n_ranges, keys, key_parts, bufsz, + flags, cost); + xp->g->Mrr= !(*flags & HA_MRR_USE_DEFAULT_IMPL); + return rows; +} // end of multi_range_read_info + + +int ha_connect::multi_range_read_explain_info(uint mrr_mode, char *str, + size_t size) +{ + return ds_mrr.dsmrr_explain_info(mrr_mode, str, size); +} // end of multi_range_read_explain_info + +/* CONNECT MRR implementation ends */ + +#if 0 +// Does this make sens for CONNECT? +Item *ha_connect::idx_cond_push(uint keyno_arg, Item* idx_cond_arg) +{ + pushed_idx_cond_keyno= keyno_arg; + pushed_idx_cond= idx_cond_arg; + in_range_check_pushed_down= TRUE; + if (active_index == pushed_idx_cond_keyno) + mi_set_index_cond_func(file, handler_index_cond_check, this); + return NULL; +} +#endif // 0 + struct st_mysql_storage_engine connect_storage_engine= { MYSQL_HANDLERTON_INTERFACE_VERSION }; +/***********************************************************************/ +/* CONNECT global variables definitions. */ +/***********************************************************************/ // Tracing: 0 no, 1 yes, >1 more tracing static MYSQL_SYSVAR_INT(xtrace, xtrace, PLUGIN_VAR_RQCMDARG, "Console trace value.", @@ -5409,6 +6277,35 @@ static MYSQL_SYSVAR_ENUM( 0, // def (no) &xconv_typelib); // typelib +/** + Temporary file usage: + no: Not using temporary file + auto: Using temporary file when needed + yes: Allways using temporary file + force: Force using temporary file (no MAP) + test: Reserved +*/ +const char *usetemp_names[]= +{ + "NO", "AUTO", "YES", "FORCE", "TEST", NullS +}; + +TYPELIB usetemp_typelib= +{ + array_elements(usetemp_names) - 1, "usetemp_typelib", + usetemp_names, NULL +}; + +static MYSQL_SYSVAR_ENUM( + use_tempfile, // name + use_tempfile, // varname + PLUGIN_VAR_RQCMDARG, // opt + "Temporary file use.", // comment + NULL, // check + update_connect_usetemp, // update function + 1, // def (AUTO) + &usetemp_typelib); // typelib + #if defined(XMAP) // Using file mapping for indexes if true static MYSQL_SYSVAR_BOOL(indx_map, indx_map, PLUGIN_VAR_RQCMDARG, @@ -5421,6 +6318,11 @@ static MYSQL_SYSVAR_UINT(work_size, work_size, PLUGIN_VAR_RQCMDARG, "Size of the CONNECT work area.", NULL, update_connect_worksize, SZWORK, SZWMIN, UINT_MAX, 1); +// Getting exact info values +static MYSQL_SYSVAR_BOOL(exact_info, exact_info, PLUGIN_VAR_RQCMDARG, + "Getting exact info values", + NULL, update_connect_xinfo, 0); + static struct st_mysql_sys_var* connect_system_variables[]= { MYSQL_SYSVAR(xtrace), MYSQL_SYSVAR(conv_size), @@ -5429,6 +6331,8 @@ static struct st_mysql_sys_var* connect_system_variables[]= { MYSQL_SYSVAR(indx_map), #endif // XMAP MYSQL_SYSVAR(work_size), + MYSQL_SYSVAR(use_tempfile), + MYSQL_SYSVAR(exact_info), NULL }; @@ -5442,10 +6346,10 @@ maria_declare_plugin(connect) PLUGIN_LICENSE_GPL, connect_init_func, /* Plugin Init */ connect_done_func, /* Plugin Deinit */ - 0x0102, /* version number (1.02) */ + 0x0103, /* version number (1.03) */ NULL, /* status variables */ connect_system_variables, /* system variables */ - "1.02", /* string version */ + "1.03", /* string version */ MariaDB_PLUGIN_MATURITY_BETA /* maturity */ } maria_declare_plugin_end; diff --git a/storage/connect/ha_connect.h b/storage/connect/ha_connect.h index a8d0be4c03e..9a73c85cdc7 100644 --- a/storage/connect/ha_connect.h +++ b/storage/connect/ha_connect.h @@ -1,4 +1,4 @@ -/* Copyright (C) Olivier Bertrand 2004 - 2013 +/* Copyright (C) Olivier Bertrand 2004 - 2014 This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -50,7 +50,7 @@ typedef struct _xinfo { class XCHK : public BLOCK { public: - XCHK(void) {oldsep= newsep= false; + XCHK(void) {oldsep= newsep= false; oldopn= newopn= NULL; oldpix= newpix= NULL;} @@ -71,7 +71,8 @@ public: typedef class XCHK *PCHK; typedef class user_connect *PCONNECT; typedef struct ha_table_option_struct TOS, *PTOS; -typedef struct ha_field_option_struct FOS, *PFOS; +typedef struct ha_field_option_struct FOS, *PFOS; +typedef struct ha_index_option_struct XOS, *PXOS; extern handlerton *connect_hton; @@ -122,12 +123,27 @@ struct ha_table_option_struct { struct ha_field_option_struct { ulonglong offset; + ulonglong freq; ulonglong fldlen; + uint opt; const char *dateformat; const char *fieldformat; char *special; }; +/* + index options can be declared similarly + using the ha_index_option_struct structure. + + Their values can be specified in the CREATE TABLE per index: + CREATE TABLE ( field ..., .., INDEX .... *here*, ... ) +*/ +struct ha_index_option_struct +{ + bool dynamic; + bool mapped; +}; + /** @brief CONNECT_SHARE is a structure that will be shared among all open handlers. This example implements the minimum of what you will probably need. @@ -166,32 +182,41 @@ public: static bool connect_init(void); static bool connect_end(void); TABTYPE GetRealType(PTOS pos= NULL); + char *GetRealString(const char *s); char *GetStringOption(char *opname, char *sdef= NULL); PTOS GetTableOptionStruct(TABLE_SHARE *s= NULL); bool GetBooleanOption(char *opname, bool bdef); bool SetBooleanOption(char *opname, bool b); int GetIntegerOption(char *opname); + bool GetIndexOption(KEY *kp, char *opname); bool CheckString(const char *str1, const char *str2); bool SameString(TABLE *tab, char *opn); bool SetIntegerOption(char *opname, int n); bool SameInt(TABLE *tab, char *opn); bool SameBool(TABLE *tab, char *opn); - bool FileExists(const char *fn); + bool FileExists(const char *fn, bool bf); bool NoFieldOptionChange(TABLE *tab); PFOS GetFieldOptionStruct(Field *fp); void *GetColumnOption(PGLOBAL g, void *field, PCOLINFO pcf); + PXOS GetIndexOptionStruct(KEY *kp); PIXDEF GetIndexInfo(TABLE_SHARE *s= NULL); const char *GetDBName(const char *name); const char *GetTableName(void); + char *GetPartName(void); //int GetColNameLen(Field *fp); //char *GetColName(Field *fp); //void AddColName(char *cp, Field *fp); TABLE *GetTable(void) {return table;} bool IsSameIndex(PIXDEF xp1, PIXDEF xp2); + bool IsPartitioned(void); + bool IsUnique(uint n); + char *GetDataPath(void) {return (char*)datapath;} + void SetDataPath(PGLOBAL g, const char *path); PTDB GetTDB(PGLOBAL g); int OpenTable(PGLOBAL g, bool del= false); - bool IsOpened(void); + bool CheckColumnList(PGLOBAL g); + bool IsOpened(void); int CloseTable(PGLOBAL g); int MakeRecord(char *buf); int ScanRecord(PGLOBAL g, uchar *buf); @@ -318,17 +343,19 @@ public: @note The pushed conditions form a stack (from which one can remove the last pushed condition using cond_pop). - The table handler filters out rows using (pushed_cond1 AND pushed_cond2 + The table handler filters out rows using (pushed_cond1 AND pushed_cond2 AND ... AND pushed_condN) or less restrictive condition, depending on handler's capabilities. handler->ha_reset() call empties the condition stack. Calls to rnd_init/rnd_end, index_init/index_end etc do not affect the condition stack. - */ + */ virtual const COND *cond_push(const COND *cond); PCFIL CheckCond(PGLOBAL g, PCFIL filp, AMT tty, Item *cond); const char *GetValStr(OPVAL vop, bool neg); +PFIL CondFilter(PGLOBAL g, Item *cond); +//PFIL CheckFilter(PGLOBAL g); /** Number of rows in table. It will only be called if @@ -336,7 +363,7 @@ const char *GetValStr(OPVAL vop, bool neg); */ virtual ha_rows records(); - /** + /** Type of table for caching query CONNECT should not use caching because its tables are external data prone to me modified out of MariaDB @@ -463,6 +490,28 @@ int index_prev(uchar *buf); enum thr_lock_type lock_type); ///< required int optimize(THD* thd, HA_CHECK_OPT* check_opt); + /** + * Multi Range Read interface + */ + int multi_range_read_init(RANGE_SEQ_IF *seq, void *seq_init_param, + uint n_ranges, uint mode, HANDLER_BUFFER *buf); + int multi_range_read_next(range_id_t *range_info); + ha_rows multi_range_read_info_const(uint keyno, RANGE_SEQ_IF *seq, + void *seq_init_param, + uint n_ranges, uint *bufsz, + uint *flags, Cost_estimate *cost); + ha_rows multi_range_read_info(uint keyno, uint n_ranges, uint keys, + uint key_parts, uint *bufsz, + uint *flags, Cost_estimate *cost); + int multi_range_read_explain_info(uint mrr_mode, char *str, size_t size); + + int reset(void) {ds_mrr.dsmrr_close(); return 0;} + + /* Index condition pushdown implementation */ +// Item *idx_cond_push(uint keyno, Item* idx_cond); +private: + DsMrr_impl ds_mrr; + protected: bool check_privileges(THD *thd, PTOS options, char *dbn); MODE CheckMode(PGLOBAL g, THD *thd, MODE newmode, bool *chk, bool *cras); @@ -474,22 +523,27 @@ protected: ulong hnum; // The number of this handler query_id_t valid_query_id; // The one when tdbp was allocated query_id_t creat_query_id; // The one when handler was allocated + char *datapath; // Is the Path of DB data directory PTDB tdbp; // To table class object PVAL sdvalin; // Used to convert date values PVAL sdvalout; // Used to convert date values bool istable; // True for table handler -//char tname[64]; // The table name + char partname[64]; // The partition name MODE xmod; // Table mode XINFO xinfo; // The table info structure bool valid_info; // True if xinfo is valid bool stop; // Used when creating index bool alter; // True when converting to other engine + bool mrr; // True when getting index positions + bool nox; // True when index should not be made + bool abort; // True after error in UPDATE/DELETE int indexing; // Type of indexing for CONNECT int locked; // Table lock + MY_BITMAP *part_id; // Columns used for partition func THR_LOCK_DATA lock_data; public: - TABLE_SHARE *tshp; // Used by called tables + TABLE_SHARE *tshp; // Used by called tables char *data_file_name; char *index_file_name; uint int_table_flags; // Inherited from MyISAM diff --git a/storage/connect/macutil.cpp b/storage/connect/macutil.cpp index 3069aa71cd6..4d3022b91b6 100644 --- a/storage/connect/macutil.cpp +++ b/storage/connect/macutil.cpp @@ -103,7 +103,7 @@ int MACINFO::GetNadap(PGLOBAL g) } // endif MaxSize return N; - } // end of GetMaxSize + } // end of GetNadap /***********************************************************************/ /* GetMacInfo: Get info for all found adapters. */ diff --git a/storage/connect/maputil.cpp b/storage/connect/maputil.cpp index 7104259ebad..97c638b4254 100644 --- a/storage/connect/maputil.cpp +++ b/storage/connect/maputil.cpp @@ -154,7 +154,7 @@ HANDLE CreateFileMap(PGLOBAL g, LPCSTR fileName, } // endswitch // Try to open the addressed file. - fd= global_open(g, MSGID_NONE, fileName, openMode); + fd= global_open(g, MSGID_NONE, fileName, openMode); if (fd != INVALID_HANDLE_VALUE && mode != MODE_INSERT) { /* We must know about the size of the file. */ @@ -164,17 +164,19 @@ HANDLE CreateFileMap(PGLOBAL g, LPCSTR fileName, return INVALID_HANDLE_VALUE; } // endif fstat - filesize = st.st_size; - - // Now we are ready to load the file. If mmap() is available we try - // this first. If not available or it failed we try to load it. - mm->memory = mmap(NULL, filesize, protmode, MAP_SHARED, fd, 0); + if ((filesize = st.st_size)) + // Now we are ready to load the file. If mmap() is available we try + // this first. If not available or it failed we try to load it. + mm->memory = mmap(NULL, filesize, protmode, MAP_SHARED, fd, 0); + else + mm->memory = 0; if (mm->memory != MAP_FAILED) { mm->lenL = (mm->memory != 0) ? filesize : 0; mm->lenH = 0; } else { strcpy(g->Message, "Memory mapping failed"); + close(fd); return INVALID_HANDLE_VALUE; } // endif memory diff --git a/storage/connect/mycat.cc b/storage/connect/mycat.cc index 10b12c0809b..660e2adec2f 100644 --- a/storage/connect/mycat.cc +++ b/storage/connect/mycat.cc @@ -405,9 +405,9 @@ PQRYRES OEMColumns(PGLOBAL g, PTOS topt, char *tab, char *db, bool info) CATALOG::CATALOG(void) { #if defined(WIN32) - DataPath= ".\\"; +//DataPath= ".\\"; #else // !WIN32 - DataPath= "./"; +//DataPath= "./"; #endif // !WIN32 memset(&Ctb, 0, sizeof(CURTAB)); Cbuf= NULL; @@ -433,6 +433,7 @@ void MYCAT::Reset(void) { } // end of Reset +#if 0 /***********************************************************************/ /* This function sets the current database path. */ /***********************************************************************/ @@ -463,6 +464,7 @@ void MYCAT::SetPath(PGLOBAL g, LPCSTR *datapath, const char *path) } // endif path } // end of SetDataPath +#endif // 0 /***********************************************************************/ /* GetTableDesc: retrieve a table descriptor. */ @@ -560,7 +562,7 @@ PTDB MYCAT::GetTable(PGLOBAL g, PTABLE tablep, MODE mode, LPCSTR type) printf("tdb=%p type=%s\n", tdp, tdp->GetType()); if (tablep->GetQualifier()) - SetPath(g, &tdp->Database, tablep->GetQualifier()); + tdp->Database = SetPath(g, tablep->GetQualifier()); tdbp= tdp->GetTable(g, mode); } // endif tdp @@ -571,6 +573,7 @@ PTDB MYCAT::GetTable(PGLOBAL g, PTABLE tablep, MODE mode, LPCSTR type) tdbp->GetAmType()); tablep->SetTo_Tdb(tdbp); tdbp->SetTable(tablep); + tdbp->SetMode(mode); } // endif tdbp return (tdbp); diff --git a/storage/connect/mycat.h b/storage/connect/mycat.h index b45d3a08725..1aaee4ed1e8 100644 --- a/storage/connect/mycat.h +++ b/storage/connect/mycat.h @@ -56,8 +56,8 @@ class MYCAT : public CATALOG { // Methods void Reset(void); - void SetDataPath(PGLOBAL g, const char *path) - {SetPath(g, &DataPath, path);} +//void SetDataPath(PGLOBAL g, const char *path) +// {SetPath(g, &DataPath, path);} bool StoreIndex(PGLOBAL g, PTABDEF defp) {return false;} // Temporary PRELDEF GetTableDesc(PGLOBAL g, LPCSTR name, LPCSTR type, PRELDEF *prp = NULL); @@ -67,7 +67,7 @@ class MYCAT : public CATALOG { protected: PRELDEF MakeTableDesc(PGLOBAL g, LPCSTR name, LPCSTR am); - void SetPath(PGLOBAL g, LPCSTR *datapath, const char *path); +//void SetPath(PGLOBAL g, LPCSTR *datapath, const char *path); // Members ha_connect *Hc; // The Connect handler diff --git a/storage/connect/myconn.cpp b/storage/connect/myconn.cpp index 8700a24ac96..0c4b50f1d0b 100644 --- a/storage/connect/myconn.cpp +++ b/storage/connect/myconn.cpp @@ -364,7 +364,7 @@ PQRYRES SrcColumns(PGLOBAL g, const char *host, const char *db, if (!port) port = mysqld_port; - if (!strnicmp(srcdef, "select ", 7)) { + if (!strnicmp(srcdef, "select ", 7)) { query = (char *)PlugSubAlloc(g, NULL, strlen(srcdef) + 9); strcat(strcpy(query, srcdef), " LIMIT 0"); } else @@ -686,19 +686,48 @@ int MYSQLC::ExecSQL(PGLOBAL g, const char *query, int *w) rc = RC_NF; } // endif field count -if (w) -//*w = mysql_warning_count(m_DB); - *w = m_DB->warning_count; + if (w) +// *w = mysql_warning_count(m_DB); + *w = m_DB->warning_count; return rc; } // end of ExecSQL /***********************************************************************/ +/* Get table size by executing "select count(*) from table_name". */ +/***********************************************************************/ +int MYSQLC::GetTableSize(PGLOBAL g, PSZ query) + { + if (mysql_real_query(m_DB, query, strlen(query))) { +#if defined(_DEBUG) + char *msg = (char*)PlugSubAlloc(g, NULL, 512 + strlen(query)); + + sprintf(msg, "(%d) %s [%s]", mysql_errno(m_DB), + mysql_error(m_DB), query); + strncpy(g->Message, msg, sizeof(g->Message) - 1); + g->Message[sizeof(g->Message) - 1] = 0; +#endif // _DEBUG + return -2; + } // endif mysql_real_query + + if (!(m_Res = mysql_store_result(m_DB))) + return -3; + + // Get the resulting count value + m_Rows = (int)mysql_num_rows(m_Res); // Should be 1 + + if (m_Rows && (m_Row = mysql_fetch_row(m_Res))) + return atoi(*m_Row); + + return -4; + } // end of GetTableSize + +/***********************************************************************/ /* Move to a specific row and column */ /***********************************************************************/ void MYSQLC::DataSeek(my_ulonglong row) { - MYSQL_ROWS *tmp=0; + MYSQL_ROWS *tmp = 0; //DBUG_PRINT("info",("mysql_data_seek(%ld)",(long) row)); if (m_Res->data) @@ -873,7 +902,7 @@ PQRYRES MYSQLC::GetResult(PGLOBAL g, bool pdb) else { if (!*row && crp->Nulls) crp->Nulls[n] = '*'; // Null value - + crp->Kdata->Reset(n); } // endelse *row } @@ -970,7 +999,7 @@ void MYSQLC::DiscardResults(void) while (!mysql_next_result(m_DB)) { res = mysql_store_result(m_DB); mysql_free_result(res); - } // endwhile next result + } // endwhile next result } // end of DiscardResults #endif // 0 diff --git a/storage/connect/myconn.h b/storage/connect/myconn.h index 7e892eece34..65e6531aee4 100644 --- a/storage/connect/myconn.h +++ b/storage/connect/myconn.h @@ -64,6 +64,7 @@ class DllItem MYSQLC { // Methods int GetResultSize(PGLOBAL g, PSZ sql); + int GetTableSize(PGLOBAL g, PSZ query); int Open(PGLOBAL g, const char *host, const char *db, const char *user= "root", const char *pwd= "*", int pt= 0); diff --git a/storage/connect/mysql-test/connect/r/alter.result b/storage/connect/mysql-test/connect/r/alter.result index ccfae3f4ddb..77d775220ec 100644 --- a/storage/connect/mysql-test/connect/r/alter.result +++ b/storage/connect/mysql-test/connect/r/alter.result @@ -21,8 +21,8 @@ DROP INDEX xd ON t1; ALTER TABLE t1 ADD INDEX xc (c), ADD INDEX xd (d); SHOW INDEX FROM t1; Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment -t1 1 xc 1 c A NULL NULL NULL XPLUG -t1 1 xd 1 d A NULL NULL NULL XPLUG +t1 1 xc 1 c A NULL NULL NULL XINDEX +t1 1 xd 1 d A NULL NULL NULL XINDEX ALTER TABLE t1 DROP INDEX xc, DROP INDEX xd; SHOW INDEX FROM t1; Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment @@ -111,8 +111,8 @@ t1 CREATE TABLE `t1` ( ) ENGINE=CONNECT DEFAULT CHARSET=latin1 `TABLE_TYPE`=DBF SHOW INDEX FROM t1; Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment -t1 1 xc 1 c A NULL NULL NULL XPLUG -t1 1 xd 1 d A NULL NULL NULL XPLUG +t1 1 xc 1 c A NULL NULL NULL XINDEX +t1 1 xd 1 d A NULL NULL NULL XINDEX SELECT * FROM t1; c d 1 One @@ -143,8 +143,8 @@ line ALTER TABLE t1 ADD INDEX xc (c), ADD INDEX xd (d); SHOW INDEX FROM t1; Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment -t1 1 xc 1 c A NULL NULL NULL XPLUG -t1 1 xd 1 d A NULL NULL NULL XPLUG +t1 1 xc 1 c A NULL NULL NULL XINDEX +t1 1 xd 1 d A NULL NULL NULL XINDEX SELECT d FROM t1 WHERE c = 2; d Two @@ -218,13 +218,22 @@ Three 3 # Changing to another engine is Ok # However, the data file is not deleted. # -ALTER TABLE t1 ENGINE=MARIA; +ALTER TABLE t1 ENGINE=ARIA; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `d` char(10) NOT NULL /* `FLAG`=11 */, + `c` int(11) NOT NULL /* `FLAG`=0 */ +) ENGINE=Aria DEFAULT CHARSET=latin1 PAGE_CHECKSUM=1 /* `TABLE_TYPE`=fix `FILE_NAME`='tf1.txt' `ENDING`=1 */ +set @old_sql_mode=@@sql_mode; +set sql_mode=ignore_bad_table_options; SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `d` char(10) NOT NULL `FLAG`=11, `c` int(11) NOT NULL `FLAG`=0 ) ENGINE=Aria DEFAULT CHARSET=latin1 PAGE_CHECKSUM=1 `TABLE_TYPE`=fix `FILE_NAME`='tf1.txt' `ENDING`=1 +set sql_mode=@old_sql_mode; SELECT * from t1; d c One 1 @@ -240,7 +249,7 @@ line # Sure enough, the data file was not deleted. # ALTER TABLE t1 ENGINE=CONNECT; -ERROR HY000: Operation denied. Table data would be lost. +ERROR HY000: Operation denied. Table data would be modified. # # But changing back to CONNECT succeed # if the data file does not exist. diff --git a/storage/connect/mysql-test/connect/r/alter_xml.result b/storage/connect/mysql-test/connect/r/alter_xml.result index bd3b281b05b..f2250b78d2d 100644 --- a/storage/connect/mysql-test/connect/r/alter_xml.result +++ b/storage/connect/mysql-test/connect/r/alter_xml.result @@ -35,7 +35,7 @@ Warning 1105 No table_type. Will be set to DOS SELECT * FROM t2; line <?xml version="1.0" encoding="UTF-8"?> -<!-- Created by CONNECT Version 1.02.0002 March 16, 2014 --> +<!-- Created by the MariaDB CONNECT Storage Engine--> <t1> <row> <TH>c</TH> @@ -71,7 +71,7 @@ t1 CREATE TABLE `t1` ( SELECT * FROM t2; line <?xml version="1.0" encoding="UTF-8"?> -<!-- Created by CONNECT Version 1.02.0002 March 16, 2014 --> +<!-- Created by the MariaDB CONNECT Storage Engine--> <t1> <row d="One"> <c>1</c> diff --git a/storage/connect/mysql-test/connect/r/mysql.result b/storage/connect/mysql-test/connect/r/mysql.result index fc2fe2418cf..29f077c3d9f 100644 --- a/storage/connect/mysql-test/connect/r/mysql.result +++ b/storage/connect/mysql-test/connect/r/mysql.result @@ -282,8 +282,6 @@ a 20 30 ALTER TABLE t2 MODIFY a TINYINT; -Warnings: -Warning 1105 This is an outward table, table data were not modified. SHOW CREATE TABLE t2; Table Create Table t2 CREATE TABLE `t2` ( diff --git a/storage/connect/mysql-test/connect/r/occur.result b/storage/connect/mysql-test/connect/r/occur.result index fbcda4660be..a497dfc9942 100644 --- a/storage/connect/mysql-test/connect/r/occur.result +++ b/storage/connect/mysql-test/connect/r/occur.result @@ -193,8 +193,6 @@ Kevin 8 Lisbeth 2 Mary 2 ALTER TABLE xpet MODIFY number INT NOT NULL; -Warnings: -Warning 1105 This is an outward table, table data were not modified. SELECT * FROM xpet; name race number John dog 2 diff --git a/storage/connect/mysql-test/connect/r/part_file.result b/storage/connect/mysql-test/connect/r/part_file.result new file mode 100644 index 00000000000..c679ed95062 --- /dev/null +++ b/storage/connect/mysql-test/connect/r/part_file.result @@ -0,0 +1,346 @@ +set @@global.connect_exact_info=ON; +# This will be used to see what data files are created +CREATE TABLE dr1 ( +fname VARCHAR(256) NOT NULL FLAG=2, +ftype CHAR(8) NOT NULL FLAG=3 +# ,FSIZE INT(6) NOT NULL FLAG=5 removed because Unix size != Windows size +) engine=CONNECT table_type=DIR file_name='t1#P#*.*'; +# +# Testing partitioning on inward table +# +CREATE TABLE t1 ( +id INT NOT NULL, +msg VARCHAR(32) +) ENGINE=CONNECT TABLE_TYPE=CSV AVG_ROW_LENGTH=10 +PARTITION BY RANGE(id) ( +PARTITION first VALUES LESS THAN(10), +PARTITION middle VALUES LESS THAN(50), +PARTITION last VALUES LESS THAN(MAXVALUE)); +INSERT INTO t1 VALUES(4, 'four'),(24, 'twenty four'); +INSERT INTO t1 VALUES(7,'seven'),(10,'ten'),(40,'forty'),(60,'sixty'),(81,'eighty one'); +SELECT partition_name, table_rows FROM information_schema.partitions WHERE table_name = 't1'; +partition_name table_rows +first 2 +middle 3 +last 2 +SELECT * FROM t1; +id msg +4 four +7 seven +24 twenty four +10 ten +40 forty +60 sixty +81 eighty one +EXPLAIN PARTITIONS SELECT * FROM t1 WHERE id > 50; +id select_type table partitions type possible_keys key key_len ref rows Extra +1 SIMPLE t1 last ALL NULL NULL NULL NULL 3 Using where +SELECT * FROM t1 WHERE id > 50; +id msg +60 sixty +81 eighty one +UPDATE t1 set id = 41 WHERE msg = 'four'; +ERROR HY000: Got error 174 'Cannot update column id because it is used for partitioning' from CONNECT +UPDATE t1 set msg = 'quatre' WHERE id = 4; +SELECT * FROM dr1 ORDER BY fname, ftype; +fname ftype +t1#P#first .csv +t1#P#last .csv +t1#P#middle .csv +# +# Altering partitioning on inward table +# +ALTER TABLE t1 +PARTITION by range(id) ( +PARTITION first VALUES LESS THAN(11), +PARTITION middle VALUES LESS THAN(50), +PARTITION last VALUES LESS THAN(MAXVALUE)); +SELECT partition_name, table_rows FROM information_schema.partitions WHERE table_name = 't1'; +partition_name table_rows +first 3 +middle 2 +last 2 +SELECT * FROM dr1 ORDER BY fname, ftype; +fname ftype +t1#P#first .csv +t1#P#last .csv +t1#P#middle .csv +EXPLAIN PARTITIONS SELECT * FROM t1 WHERE id=10; +id select_type table partitions type possible_keys key key_len ref rows Extra +1 SIMPLE t1 first ALL NULL NULL NULL NULL 3 Using where +SELECT * FROM t1 WHERE id=10; +id msg +10 ten +DELETE FROM t1 WHERE id in (4,60); +SELECT * FROM t1; +id msg +7 seven +10 ten +24 twenty four +40 forty +81 eighty one +DROP TABLE t1; +# +# Testing partitioning on a void outward table +# +ALTER TABLE dr1 FILE_NAME='part*.*'; +CREATE TABLE t1 ( +rwid INT(6) DEFAULT 0 SPECIAL=ROWID, +rnum INT(6) DEFAULT 0 SPECIAL=ROWNUM, +prtn VARCHAR(64) DEFAULT '' SPECIAL=PARTID, +tbn VARCHAR(64) DEFAULT '' SPECIAL=TABID, +fid VARCHAR(256) DEFAULT '' SPECIAL=FNAME, +id INT KEY NOT NULL, +msg VARCHAR(32) +) ENGINE=CONNECT TABLE_TYPE=FIX FILE_NAME='part%s.txt'; +ALTER TABLE t1 +PARTITION by range columns(id) ( +PARTITION `1` VALUES LESS THAN(10), +PARTITION `2` VALUES LESS THAN(50), +PARTITION `3` VALUES LESS THAN(MAXVALUE)); +SHOW INDEX FROM t1; +Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment +t1 0 PRIMARY 1 id A NULL NULL NULL XINDEX +INSERT INTO t1(id,msg) VALUES(4, 'four'); +SELECT * FROM dr1 ORDER BY fname, ftype; +fname ftype +part1 .fnx +part1 .txt +INSERT INTO t1(id,msg) VALUES(7,'seven'),(10,'ten'),(40,'forty'),(60,'sixty'),(81,'eighty one'); +INSERT INTO t1(id,msg) VALUES(72,'seventy two'),(20,'twenty'),(1,'one'),(35,'thirty five'),(8,'eight'); +SELECT partition_name, table_rows FROM information_schema.partitions WHERE table_name = 't1'; +partition_name table_rows +1 4 +2 4 +3 3 +SELECT * FROM t1; +rwid rnum prtn tbn fid id msg +1 1 1 t1 part1 4 four +2 2 1 t1 part1 7 seven +3 3 1 t1 part1 1 one +4 4 1 t1 part1 8 eight +1 1 2 t1 part2 10 ten +2 2 2 t1 part2 40 forty +3 3 2 t1 part2 20 twenty +4 4 2 t1 part2 35 thirty five +1 1 3 t1 part3 60 sixty +2 2 3 t1 part3 81 eighty one +3 3 3 t1 part3 72 seventy two +SELECT * FROM t1 order by id; +rwid rnum prtn tbn fid id msg +3 3 1 t1 part1 1 one +1 1 1 t1 part1 4 four +2 2 1 t1 part1 7 seven +4 4 1 t1 part1 8 eight +1 1 2 t1 part2 10 ten +3 3 2 t1 part2 20 twenty +4 4 2 t1 part2 35 thirty five +2 2 2 t1 part2 40 forty +1 1 3 t1 part3 60 sixty +3 3 3 t1 part3 72 seventy two +2 2 3 t1 part3 81 eighty one +EXPLAIN PARTITIONS SELECT * FROM t1 WHERE id = 10; +id select_type table partitions type possible_keys key key_len ref rows Extra +1 SIMPLE t1 2 const PRIMARY PRIMARY 4 const 1 +SELECT * FROM t1 WHERE id = 10; +rwid rnum prtn tbn fid id msg +1 1 2 t1 part2 10 ten +EXPLAIN PARTITIONS SELECT * FROM t1 WHERE id >= 10; +id select_type table partitions type possible_keys key key_len ref rows Extra +1 SIMPLE t1 2,3 range PRIMARY PRIMARY 4 NULL 7 Using where +SELECT * FROM t1 WHERE id >= 10; +rwid rnum prtn tbn fid id msg +1 1 2 t1 part2 10 ten +3 3 2 t1 part2 20 twenty +4 4 2 t1 part2 35 thirty five +2 2 2 t1 part2 40 forty +1 1 3 t1 part3 60 sixty +3 3 3 t1 part3 72 seventy two +2 2 3 t1 part3 81 eighty one +SELECT count(*) FROM t1 WHERE id < 10; +count(*) +4 +SELECT case when id < 10 then 1 when id < 50 then 2 else 3 end as pn, count(*) FROM t1 group by pn; +pn count(*) +1 4 +2 4 +3 3 +SELECT prtn, count(*) FROM t1 group by prtn; +prtn count(*) +1 4 +2 4 +3 3 +EXPLAIN PARTITIONS SELECT * FROM t1 WHERE id > 50; +id select_type table partitions type possible_keys key key_len ref rows Extra +1 SIMPLE t1 3 range PRIMARY PRIMARY 4 NULL 3 Using where +SELECT * FROM t1 WHERE id = 35; +rwid rnum prtn tbn fid id msg +4 4 2 t1 part2 35 thirty five +SELECT * FROM dr1 ORDER BY fname, ftype; +fname ftype +part1 .fnx +part1 .txt +part2 .fnx +part2 .txt +part3 .fnx +part3 .txt +# This does not change the partition file data and is WRONG +ALTER TABLE t1 +PARTITION by range columns(id) ( +PARTITION `1` VALUES LESS THAN(11), +PARTITION `2` VALUES LESS THAN(70), +PARTITION `3` VALUES LESS THAN(MAXVALUE)); +Warnings: +Warning 1105 Data repartition in 1 is unchecked +Warning 1105 Data repartition in 2 is unchecked +Warning 1105 Data repartition in 3 is unchecked +SELECT CASE WHEN id < 11 THEN 1 WHEN id < 70 THEN 2 ELSE 3 END AS pn, COUNT(*) FROM t1 GROUP BY pn; +pn COUNT(*) +1 5 +2 4 +3 2 +SELECT partition_name, table_rows FROM information_schema.partitions WHERE table_name = 't1'; +partition_name table_rows +1 4 +2 4 +3 3 +SELECT * FROM dr1 ORDER BY fname, ftype; +fname ftype +part1 .fnx +part1 .txt +part2 .fnx +part2 .txt +part3 .fnx +part3 .txt +# +# This is the correct way to change partitioning: +# Save table values, erase the table, then re-insert saved values in modified table +# +CREATE TABLE t2 ( +id INT NOT NULL, +msg VARCHAR(32) +) ENGINE=CONNECT TABLE_TYPE=FIX; +Warnings: +Warning 1105 No file name. Table will use t2.fix +INSERT INTO t2 SELECT id, msg FROM t1; +DELETE FROM t1; +INSERT INTO t1(id,msg) SELECT * FROM t2; +SELECT partition_name, table_rows FROM information_schema.partitions WHERE table_name = 't1'; +partition_name table_rows +1 5 +2 4 +3 2 +SELECT * FROM t1; +rwid rnum prtn tbn fid id msg +1 1 1 t1 part1 4 four +2 2 1 t1 part1 7 seven +3 3 1 t1 part1 1 one +4 4 1 t1 part1 8 eight +5 5 1 t1 part1 10 ten +1 1 2 t1 part2 40 forty +2 2 2 t1 part2 20 twenty +3 3 2 t1 part2 35 thirty five +4 4 2 t1 part2 60 sixty +1 1 3 t1 part3 81 eighty one +2 2 3 t1 part3 72 seventy two +SELECT * FROM dr1 ORDER BY fname, ftype; +fname ftype +part1 .fnx +part1 .txt +part2 .fnx +part2 .txt +part3 .fnx +part3 .txt +DROP TABLE t2; +DROP TABLE t1; +# +# Testing partitioning on a populated outward table +# +CREATE TABLE t1 ( +id INT NOT NULL, +msg VARCHAR(32) +) ENGINE=CONNECT TABLE_TYPE=FIX FILE_NAME='part%s.txt' +PARTITION by range columns(id) ( +PARTITION `1` VALUES LESS THAN(11), +PARTITION `2` VALUES LESS THAN(70), +PARTITION `3` VALUES LESS THAN(MAXVALUE)); +Warnings: +Warning 1105 Data repartition in 1 is unchecked +Warning 1105 Data repartition in 2 is unchecked +Warning 1105 Data repartition in 3 is unchecked +SELECT partition_name, table_rows FROM information_schema.partitions WHERE table_name = 't1'; +partition_name table_rows +1 5 +2 4 +3 2 +SELECT * FROM t1 WHERE id < 11; +id msg +4 four +7 seven +1 one +8 eight +10 ten +SELECT * FROM t1 WHERE id >= 70; +id msg +81 eighty one +72 seventy two +SELECT * FROM dr1 ORDER BY fname, ftype; +fname ftype +part1 .fnx +part1 .txt +part2 .fnx +part2 .txt +part3 .fnx +part3 .txt +# +# Testing indexing on a partitioned table +# +CREATE INDEX XID ON t1(id); +SHOW INDEX FROM t1; +Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment +t1 1 XID 1 id A NULL NULL NULL XINDEX +SELECT * FROM dr1 ORDER BY fname, ftype; +fname ftype +part1 .fnx +part1 .txt +part2 .fnx +part2 .txt +part3 .fnx +part3 .txt +EXPLAIN PARTITIONS SELECT * FROM t1 WHERE id = 10; +id select_type table partitions type possible_keys key key_len ref rows Extra +1 SIMPLE t1 1 ref XID XID 4 const 1 +DROP INDEX XID ON t1; +SHOW INDEX FROM t1; +Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment +SELECT * FROM dr1 ORDER BY fname, ftype; +fname ftype +part1 .txt +part2 .txt +part3 .txt +ALTER TABLE t1 ADD PRIMARY KEY (id); +SHOW INDEX FROM t1; +Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment +t1 0 PRIMARY 1 id A NULL NULL NULL XINDEX +SELECT * FROM dr1 ORDER BY fname, ftype; +fname ftype +part1 .fnx +part1 .txt +part2 .fnx +part2 .txt +part3 .fnx +part3 .txt +EXPLAIN PARTITIONS SELECT * FROM t1 WHERE id = 10; +id select_type table partitions type possible_keys key key_len ref rows Extra +1 SIMPLE t1 1 const PRIMARY PRIMARY 4 const 1 +ALTER TABLE t1 DROP PRIMARY KEY; +SHOW INDEX FROM t1; +Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment +SELECT * FROM dr1 ORDER BY fname, ftype; +fname ftype +part1 .txt +part2 .txt +part3 .txt +DROP TABLE t1; +DROP TABLE dr1; +set @@global.connect_exact_info=OFF; diff --git a/storage/connect/mysql-test/connect/r/part_table.result b/storage/connect/mysql-test/connect/r/part_table.result new file mode 100644 index 00000000000..122c328fa59 --- /dev/null +++ b/storage/connect/mysql-test/connect/r/part_table.result @@ -0,0 +1,197 @@ +set @@global.connect_exact_info=ON; +CREATE TABLE xt1 ( +id INT KEY NOT NULL, +msg VARCHAR(32)) +ENGINE=MyISAM; +INSERT INTO xt1 VALUES(4, 'four'),(7,'seven'),(1,'one'),(8,'eight'); +SELECT * FROM xt1; +id msg +4 four +7 seven +1 one +8 eight +CREATE TABLE xt2 ( +id INT KEY NOT NULL, +msg VARCHAR(32)); +INSERT INTO xt2 VALUES(10,'ten'),(40,'forty'),(11,'eleven'),(35,'thirty five'); +SELECT * FROM xt2; +id msg +10 ten +40 forty +11 eleven +35 thirty five +CREATE TABLE xt3 ( +id INT KEY NOT NULL, +msg VARCHAR(32)) +ENGINE=CONNECT TABLE_TYPE=CSV AVG_ROW_LENGTH=10; +Warnings: +Warning 1105 No file name. Table will use xt3.csv +INSERT INTO xt3 VALUES(60,'sixty'),(81,'eighty one'),(72,'seventy two'); +SELECT * FROM xt3; +id msg +60 sixty +81 eighty one +72 seventy two +CREATE TABLE t1 ( +id INT NOT NULL, +msg VARCHAR(32)) +ENGINE=CONNECT TABLE_TYPE=PROXY TABNAME='xt%s' +PARTITION BY RANGE COLUMNS(id) ( +PARTITION `1` VALUES LESS THAN(10), +PARTITION `2` VALUES LESS THAN(50), +PARTITION `3` VALUES LESS THAN(MAXVALUE)); +Warnings: +Warning 1105 Data repartition in 1 is unchecked +Warning 1105 Data repartition in 2 is unchecked +Warning 1105 Data repartition in 3 is unchecked +SELECT partition_name, table_rows FROM information_schema.partitions WHERE table_name = 't1'; +partition_name table_rows +1 4 +2 4 +3 3 +SELECT * FROM t1; +id msg +4 four +7 seven +1 one +8 eight +10 ten +40 forty +11 eleven +35 thirty five +60 sixty +81 eighty one +72 seventy two +DELETE FROM t1; +Warnings: +Note 1105 xt1: 4 affected rows +Note 1105 xt2: 4 affected rows +ALTER TABLE t1 ADD INDEX XID(id); +ERROR HY000: Table type PROXY is not indexable +INSERT INTO t1 VALUES(4, 'four'); +INSERT INTO t1 VALUES(7,'seven'),(10,'ten'),(40,'forty'),(60,'sixty'),(81,'eighty one'); +INSERT INTO t1 VALUES(72,'seventy two'),(11,'eleven'),(1,'one'),(35,'thirty five'),(8,'eight'); +SELECT partition_name, table_rows FROM information_schema.partitions WHERE table_name = 't1'; +partition_name table_rows +1 4 +2 4 +3 3 +SELECT * FROM t1; +id msg +4 four +7 seven +1 one +8 eight +10 ten +40 forty +11 eleven +35 thirty five +60 sixty +81 eighty one +72 seventy two +EXPLAIN PARTITIONS +SELECT * FROM t1 WHERE id = 81; +id select_type table partitions type possible_keys key key_len ref rows Extra +1 SIMPLE t1 3 ALL NULL NULL NULL NULL 4 Using where +DELETE FROM t1; +Warnings: +Note 1105 xt1: 4 affected rows +Note 1105 xt2: 4 affected rows +DROP TABLE t1; +CREATE TABLE t1 ( +id INT KEY NOT NULL, +msg VARCHAR(32)) +ENGINE=CONNECT TABLE_TYPE=MYSQL TABNAME='xt%s' +PARTITION BY RANGE COLUMNS(id) ( +PARTITION `1` VALUES LESS THAN(10), +PARTITION `2` VALUES LESS THAN(50), +PARTITION `3` VALUES LESS THAN(MAXVALUE)); +Warnings: +Warning 1105 Data repartition in 1 is unchecked +Warning 1105 Data repartition in 2 is unchecked +Warning 1105 Data repartition in 3 is unchecked +SHOW INDEX FROM t1; +Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment +t1 0 PRIMARY 1 id NULL NULL NULL NULL REMOTE +INSERT INTO t1 VALUES(4, 'four'); +INSERT INTO t1 VALUES(40, 'forty'); +INSERT INTO t1 VALUES(72,'seventy two'); +INSERT INTO t1 VALUES(7,'seven'),(10,'ten'),(60,'sixty'),(81,'eighty one'),(11,'eleven'),(1,'one'),(35,'thirty five'),(8,'eight'); +SELECT partition_name, table_rows FROM information_schema.partitions WHERE table_name = 't1'; +partition_name table_rows +1 4 +2 4 +3 3 +SELECT * FROM t1; +id msg +4 four +7 seven +1 one +8 eight +40 forty +10 ten +11 eleven +35 thirty five +72 seventy two +60 sixty +81 eighty one +EXPLAIN PARTITIONS SELECT * FROM t1 WHERE id = 81; +id select_type table partitions type possible_keys key key_len ref rows Extra +1 SIMPLE t1 3 const PRIMARY PRIMARY 4 const 1 +SELECT * FROM t1 WHERE id = 7; +id msg +7 seven +SELECT * FROM t1 WHERE id = 35; +id msg +35 thirty five +UPDATE t1 SET msg = 'number' WHERE id in (60,72); +Warnings: +Note 1105 xt3: 2 affected rows +Note 1105 xt3: 0 affected rows +UPDATE t1 SET msg = 'soixante' WHERE id = 60; +Warnings: +Note 1105 xt3: 1 affected rows +SELECT * FROM t1 WHERE id > 50; +id msg +60 soixante +72 number +81 eighty one +UPDATE t1 SET msg = 'big' WHERE id > 50; +Warnings: +Note 1105 xt3: 3 affected rows +UPDATE t1 SET msg = 'sept' WHERE id = 7; +Warnings: +Note 1105 xt1: 1 affected rows +SELECT * FROM t1; +id msg +4 four +7 sept +1 one +8 eight +40 forty +10 ten +11 eleven +35 thirty five +72 big +60 big +81 big +DELETE FROM t1 WHERE id in (60,72); +Warnings: +Note 1105 xt3: 2 affected rows +Note 1105 xt3: 0 affected rows +SELECT * FROM t1; +id msg +4 four +7 sept +1 one +8 eight +40 forty +10 ten +11 eleven +35 thirty five +81 big +DROP TABLE t1; +DROP TABLE xt1; +DROP TABLE xt2; +DROP TABLE xt3; +set @@global.connect_exact_info=OFF; diff --git a/storage/connect/mysql-test/connect/r/pivot.result b/storage/connect/mysql-test/connect/r/pivot.result index 4b39a21d3d9..349db89fa35 100644 --- a/storage/connect/mysql-test/connect/r/pivot.result +++ b/storage/connect/mysql-test/connect/r/pivot.result @@ -59,8 +59,6 @@ Joe 5 14.00 0.00 12.00 # Restricting the columns in a Pivot Table # ALTER TABLE pivex DROP COLUMN week; -Warnings: -Warning 1105 This is an outward table, table data were not modified. SELECT * FROM pivex; Who Beer Car Food Beth 51.00 0.00 29.00 diff --git a/storage/connect/mysql-test/connect/r/temporary.result b/storage/connect/mysql-test/connect/r/temporary.result new file mode 100644 index 00000000000..c4e992f2a64 --- /dev/null +++ b/storage/connect/mysql-test/connect/r/temporary.result @@ -0,0 +1,6 @@ +CREATE TEMPORARY TABLE t1 (a int not null) +ENGINE=CONNECT table_type=MYSQL CONNECTION='mysql://root@127.0.0.1/test/t2'; +ERROR HY000: Table storage engine 'CONNECT' does not support the create option 'TEMPORARY' +CREATE TEMPORARY TABLE t1 +ENGINE=CONNECT table_type=MYSQL CONNECTION='mysql://root@127.0.0.1/test/t2'; +ERROR HY000: Table storage engine 'CONNECT' does not support the create option 'TEMPORARY' diff --git a/storage/connect/mysql-test/connect/r/updelx.result b/storage/connect/mysql-test/connect/r/updelx.result new file mode 100644 index 00000000000..2aed1e06928 --- /dev/null +++ b/storage/connect/mysql-test/connect/r/updelx.result @@ -0,0 +1,2570 @@ +# +# Testing indexed UPDATE and DELETE for all table types +# +# CSV table +CREATE TABLE t1 ( +id INT KEY NOT NULL, +msg VARCHAR(32)) +ENGINE=CONNECT TABLE_TYPE=CSV AVG_ROW_LENGTH=6; +Warnings: +Warning 1105 No file name. Table will use t1.csv +DELETE FROM t1; +INSERT INTO t1 VALUES(4, 'four'),(7,'seven'),(10,'ten'),(40,'forty'),(60,'sixty'),(81,'eighty one'),(72,'seventy two'),(11,'eleven'),(1,'one'),(35,'thirty five'),(8,'eight'); +SELECT * FROM t1; +id msg +4 four +7 seven +10 ten +40 forty +60 sixty +81 eighty one +72 seventy two +11 eleven +1 one +35 thirty five +8 eight +UPDATE t1 SET msg = 'bof' WHERE id = 35; +SELECT * FROM t1; +id msg +4 four +7 seven +10 ten +40 forty +60 sixty +81 eighty one +72 seventy two +11 eleven +1 one +35 bof +8 eight +UPDATE t1 SET msg = 'big' WHERE id > 50; +SELECT * FROM t1; +id msg +4 four +7 seven +10 ten +40 forty +60 big +81 big +72 big +11 eleven +1 one +35 bof +8 eight +UPDATE t1 SET msg = 'updated' WHERE id IN (8,35,60,72); +SELECT * FROM t1; +id msg +4 four +7 seven +10 ten +40 forty +60 updated +81 big +72 updated +11 eleven +1 one +35 updated +8 updated +UPDATE t1 SET msg = 'twin' WHERE id IN (81,10); +SELECT * FROM t1; +id msg +4 four +7 seven +10 twin +40 forty +60 updated +81 twin +72 updated +11 eleven +1 one +35 updated +8 updated +UPDATE t1 SET msg = 'sixty' WHERE id = 60; +SELECT * FROM t1 WHERE id = 60; +id msg +60 sixty +DELETE FROM t1 WHERE id = 4; +SELECT * FROM t1; +id msg +7 seven +10 twin +40 forty +60 sixty +81 twin +72 updated +11 eleven +1 one +35 updated +8 updated +DELETE FROM t1 WHERE id IN (40,11,35); +SELECT * FROM t1; +id msg +7 seven +10 twin +60 sixty +81 twin +72 updated +1 one +8 updated +DELETE FROM t1 WHERE id IN (4,60,1); +SELECT msg FROM t1; +msg +seven +twin +twin +updated +updated +DELETE FROM t1 WHERE id IN (81,72); +SELECT id FROM t1; +id +7 +8 +10 +DELETE FROM t1 WHERE id IN (7,10); +SELECT * FROM t1; +id msg +8 updated +DELETE FROM t1 WHERE id = 8; +SELECT * FROM t1; +id msg +ALTER TABLE t1 MAPPED=YES; +DELETE FROM t1; +INSERT INTO t1 VALUES(4, 'four'),(7,'seven'),(10,'ten'),(40,'forty'),(60,'sixty'),(81,'eighty one'),(72,'seventy two'),(11,'eleven'),(1,'one'),(35,'thirty five'),(8,'eight'); +SELECT * FROM t1; +id msg +4 four +7 seven +10 ten +40 forty +60 sixty +81 eighty one +72 seventy two +11 eleven +1 one +35 thirty five +8 eight +UPDATE t1 SET msg = 'bof' WHERE id = 35; +SELECT * FROM t1; +id msg +4 four +7 seven +10 ten +40 forty +60 sixty +81 eighty one +72 seventy two +11 eleven +1 one +35 bof +8 eight +UPDATE t1 SET msg = 'big' WHERE id > 50; +SELECT * FROM t1; +id msg +4 four +7 seven +10 ten +40 forty +60 big +81 big +72 big +11 eleven +1 one +35 bof +8 eight +UPDATE t1 SET msg = 'updated' WHERE id IN (8,35,60,72); +SELECT * FROM t1; +id msg +4 four +7 seven +10 ten +40 forty +60 updated +81 big +72 updated +11 eleven +1 one +35 updated +8 updated +UPDATE t1 SET msg = 'twin' WHERE id IN (81,10); +SELECT * FROM t1; +id msg +4 four +7 seven +10 twin +40 forty +60 updated +81 twin +72 updated +11 eleven +1 one +35 updated +8 updated +UPDATE t1 SET msg = 'sixty' WHERE id = 60; +SELECT * FROM t1 WHERE id = 60; +id msg +60 sixty +DELETE FROM t1 WHERE id = 4; +SELECT * FROM t1; +id msg +7 seven +10 twin +40 forty +60 sixty +81 twin +72 updated +11 eleven +1 one +35 updated +8 updated +DELETE FROM t1 WHERE id IN (40,11,35); +SELECT * FROM t1; +id msg +7 seven +10 twin +60 sixty +81 twin +72 updated +1 one +8 updated +DELETE FROM t1 WHERE id IN (4,60,1); +SELECT msg FROM t1; +msg +seven +twin +twin +updated +updated +DELETE FROM t1 WHERE id IN (81,72); +SELECT id FROM t1; +id +7 +8 +10 +DELETE FROM t1 WHERE id IN (7,10); +SELECT * FROM t1; +id msg +8 updated +DELETE FROM t1 WHERE id = 8; +SELECT * FROM t1; +id msg +ALTER TABLE t1 MAPPED=NO BLOCK_SIZE=6; +DELETE FROM t1; +INSERT INTO t1 VALUES(4, 'four'),(7,'seven'),(10,'ten'),(40,'forty'),(60,'sixty'),(81,'eighty one'),(72,'seventy two'),(11,'eleven'),(1,'one'),(35,'thirty five'),(8,'eight'); +SELECT * FROM t1; +id msg +4 four +7 seven +10 ten +40 forty +60 sixty +81 eighty one +72 seventy two +11 eleven +1 one +35 thirty five +8 eight +UPDATE t1 SET msg = 'bof' WHERE id = 35; +SELECT * FROM t1; +id msg +4 four +7 seven +10 ten +40 forty +60 sixty +81 eighty one +72 seventy two +11 eleven +1 one +35 bof +8 eight +UPDATE t1 SET msg = 'big' WHERE id > 50; +SELECT * FROM t1; +id msg +4 four +7 seven +10 ten +40 forty +60 big +81 big +72 big +11 eleven +1 one +35 bof +8 eight +UPDATE t1 SET msg = 'updated' WHERE id IN (8,35,60,72); +SELECT * FROM t1; +id msg +4 four +7 seven +10 ten +40 forty +60 updated +81 big +72 updated +11 eleven +1 one +35 updated +8 updated +UPDATE t1 SET msg = 'twin' WHERE id IN (81,10); +SELECT * FROM t1; +id msg +4 four +7 seven +10 twin +40 forty +60 updated +81 twin +72 updated +11 eleven +1 one +35 updated +8 updated +UPDATE t1 SET msg = 'sixty' WHERE id = 60; +SELECT * FROM t1 WHERE id = 60; +id msg +60 sixty +DELETE FROM t1 WHERE id = 4; +SELECT * FROM t1; +id msg +7 seven +10 twin +40 forty +60 sixty +81 twin +72 updated +11 eleven +1 one +35 updated +8 updated +DELETE FROM t1 WHERE id IN (40,11,35); +SELECT * FROM t1; +id msg +7 seven +10 twin +60 sixty +81 twin +72 updated +1 one +8 updated +DELETE FROM t1 WHERE id IN (4,60,1); +SELECT msg FROM t1; +msg +seven +twin +twin +updated +updated +DELETE FROM t1 WHERE id IN (81,72); +SELECT id FROM t1; +id +7 +8 +10 +DELETE FROM t1 WHERE id IN (7,10); +SELECT * FROM t1; +id msg +8 updated +DELETE FROM t1 WHERE id = 8; +SELECT * FROM t1; +id msg +ALTER TABLE t1 MAPPED=YES; +DELETE FROM t1; +INSERT INTO t1 VALUES(4, 'four'),(7,'seven'),(10,'ten'),(40,'forty'),(60,'sixty'),(81,'eighty one'),(72,'seventy two'),(11,'eleven'),(1,'one'),(35,'thirty five'),(8,'eight'); +SELECT * FROM t1; +id msg +4 four +7 seven +10 ten +40 forty +60 sixty +81 eighty one +72 seventy two +11 eleven +1 one +35 thirty five +8 eight +UPDATE t1 SET msg = 'bof' WHERE id = 35; +SELECT * FROM t1; +id msg +4 four +7 seven +10 ten +40 forty +60 sixty +81 eighty one +72 seventy two +11 eleven +1 one +35 bof +8 eight +UPDATE t1 SET msg = 'big' WHERE id > 50; +SELECT * FROM t1; +id msg +4 four +7 seven +10 ten +40 forty +60 big +81 big +72 big +11 eleven +1 one +35 bof +8 eight +UPDATE t1 SET msg = 'updated' WHERE id IN (8,35,60,72); +SELECT * FROM t1; +id msg +4 four +7 seven +10 ten +40 forty +60 updated +81 big +72 updated +11 eleven +1 one +35 updated +8 updated +UPDATE t1 SET msg = 'twin' WHERE id IN (81,10); +SELECT * FROM t1; +id msg +4 four +7 seven +10 twin +40 forty +60 updated +81 twin +72 updated +11 eleven +1 one +35 updated +8 updated +UPDATE t1 SET msg = 'sixty' WHERE id = 60; +SELECT * FROM t1 WHERE id = 60; +id msg +60 sixty +DELETE FROM t1 WHERE id = 4; +SELECT * FROM t1; +id msg +7 seven +10 twin +40 forty +60 sixty +81 twin +72 updated +11 eleven +1 one +35 updated +8 updated +DELETE FROM t1 WHERE id IN (40,11,35); +SELECT * FROM t1; +id msg +7 seven +10 twin +60 sixty +81 twin +72 updated +1 one +8 updated +DELETE FROM t1 WHERE id IN (4,60,1); +SELECT msg FROM t1; +msg +seven +twin +twin +updated +updated +DELETE FROM t1 WHERE id IN (81,72); +SELECT id FROM t1; +id +7 +8 +10 +DELETE FROM t1 WHERE id IN (7,10); +SELECT * FROM t1; +id msg +8 updated +DELETE FROM t1 WHERE id = 8; +SELECT * FROM t1; +id msg +DROP TABLE t1; +# DOS table +CREATE TABLE t1 ( +id INT(4) KEY NOT NULL, +msg VARCHAR(16)) +ENGINE=CONNECT TABLE_TYPE=DOS; +Warnings: +Warning 1105 No file name. Table will use t1.dos +DELETE FROM t1; +INSERT INTO t1 VALUES(4, 'four'),(7,'seven'),(10,'ten'),(40,'forty'),(60,'sixty'),(81,'eighty one'),(72,'seventy two'),(11,'eleven'),(1,'one'),(35,'thirty five'),(8,'eight'); +SELECT * FROM t1; +id msg +4 four +7 seven +10 ten +40 forty +60 sixty +81 eighty one +72 seventy two +11 eleven +1 one +35 thirty five +8 eight +UPDATE t1 SET msg = 'bof' WHERE id = 35; +SELECT * FROM t1; +id msg +4 four +7 seven +10 ten +40 forty +60 sixty +81 eighty one +72 seventy two +11 eleven +1 one +35 bof +8 eight +UPDATE t1 SET msg = 'big' WHERE id > 50; +SELECT * FROM t1; +id msg +4 four +7 seven +10 ten +40 forty +60 big +81 big +72 big +11 eleven +1 one +35 bof +8 eight +UPDATE t1 SET msg = 'updated' WHERE id IN (8,35,60,72); +SELECT * FROM t1; +id msg +4 four +7 seven +10 ten +40 forty +60 updated +81 big +72 updated +11 eleven +1 one +35 updated +8 updated +UPDATE t1 SET msg = 'twin' WHERE id IN (81,10); +SELECT * FROM t1; +id msg +4 four +7 seven +10 twin +40 forty +60 updated +81 twin +72 updated +11 eleven +1 one +35 updated +8 updated +UPDATE t1 SET msg = 'sixty' WHERE id = 60; +SELECT * FROM t1 WHERE id = 60; +id msg +60 sixty +DELETE FROM t1 WHERE id = 4; +SELECT * FROM t1; +id msg +7 seven +10 twin +40 forty +60 sixty +81 twin +72 updated +11 eleven +1 one +35 updated +8 updated +DELETE FROM t1 WHERE id IN (40,11,35); +SELECT * FROM t1; +id msg +7 seven +10 twin +60 sixty +81 twin +72 updated +1 one +8 updated +DELETE FROM t1 WHERE id IN (4,60,1); +SELECT msg FROM t1; +msg +seven +twin +twin +updated +updated +DELETE FROM t1 WHERE id IN (81,72); +SELECT id FROM t1; +id +7 +8 +10 +DELETE FROM t1 WHERE id IN (7,10); +SELECT * FROM t1; +id msg +8 updated +DELETE FROM t1 WHERE id = 8; +SELECT * FROM t1; +id msg +ALTER TABLE t1 MAPPED=YES; +DELETE FROM t1; +INSERT INTO t1 VALUES(4, 'four'),(7,'seven'),(10,'ten'),(40,'forty'),(60,'sixty'),(81,'eighty one'),(72,'seventy two'),(11,'eleven'),(1,'one'),(35,'thirty five'),(8,'eight'); +SELECT * FROM t1; +id msg +4 four +7 seven +10 ten +40 forty +60 sixty +81 eighty one +72 seventy two +11 eleven +1 one +35 thirty five +8 eight +UPDATE t1 SET msg = 'bof' WHERE id = 35; +SELECT * FROM t1; +id msg +4 four +7 seven +10 ten +40 forty +60 sixty +81 eighty one +72 seventy two +11 eleven +1 one +35 bof +8 eight +UPDATE t1 SET msg = 'big' WHERE id > 50; +SELECT * FROM t1; +id msg +4 four +7 seven +10 ten +40 forty +60 big +81 big +72 big +11 eleven +1 one +35 bof +8 eight +UPDATE t1 SET msg = 'updated' WHERE id IN (8,35,60,72); +SELECT * FROM t1; +id msg +4 four +7 seven +10 ten +40 forty +60 updated +81 big +72 updated +11 eleven +1 one +35 updated +8 updated +UPDATE t1 SET msg = 'twin' WHERE id IN (81,10); +SELECT * FROM t1; +id msg +4 four +7 seven +10 twin +40 forty +60 updated +81 twin +72 updated +11 eleven +1 one +35 updated +8 updated +UPDATE t1 SET msg = 'sixty' WHERE id = 60; +SELECT * FROM t1 WHERE id = 60; +id msg +60 sixty +DELETE FROM t1 WHERE id = 4; +SELECT * FROM t1; +id msg +7 seven +10 twin +40 forty +60 sixty +81 twin +72 updated +11 eleven +1 one +35 updated +8 updated +DELETE FROM t1 WHERE id IN (40,11,35); +SELECT * FROM t1; +id msg +7 seven +10 twin +60 sixty +81 twin +72 updated +1 one +8 updated +DELETE FROM t1 WHERE id IN (4,60,1); +SELECT msg FROM t1; +msg +seven +twin +twin +updated +updated +DELETE FROM t1 WHERE id IN (81,72); +SELECT id FROM t1; +id +7 +8 +10 +DELETE FROM t1 WHERE id IN (7,10); +SELECT * FROM t1; +id msg +8 updated +DELETE FROM t1 WHERE id = 8; +SELECT * FROM t1; +id msg +ALTER TABLE t1 MAPPED=NO BLOCK_SIZE=4; +DELETE FROM t1; +INSERT INTO t1 VALUES(4, 'four'),(7,'seven'),(10,'ten'),(40,'forty'),(60,'sixty'),(81,'eighty one'),(72,'seventy two'),(11,'eleven'),(1,'one'),(35,'thirty five'),(8,'eight'); +SELECT * FROM t1; +id msg +4 four +7 seven +10 ten +40 forty +60 sixty +81 eighty one +72 seventy two +11 eleven +1 one +35 thirty five +8 eight +UPDATE t1 SET msg = 'bof' WHERE id = 35; +SELECT * FROM t1; +id msg +4 four +7 seven +10 ten +40 forty +60 sixty +81 eighty one +72 seventy two +11 eleven +1 one +35 bof +8 eight +UPDATE t1 SET msg = 'big' WHERE id > 50; +SELECT * FROM t1; +id msg +4 four +7 seven +10 ten +40 forty +60 big +81 big +72 big +11 eleven +1 one +35 bof +8 eight +UPDATE t1 SET msg = 'updated' WHERE id IN (8,35,60,72); +SELECT * FROM t1; +id msg +4 four +7 seven +10 ten +40 forty +60 updated +81 big +72 updated +11 eleven +1 one +35 updated +8 updated +UPDATE t1 SET msg = 'twin' WHERE id IN (81,10); +SELECT * FROM t1; +id msg +4 four +7 seven +10 twin +40 forty +60 updated +81 twin +72 updated +11 eleven +1 one +35 updated +8 updated +UPDATE t1 SET msg = 'sixty' WHERE id = 60; +SELECT * FROM t1 WHERE id = 60; +id msg +60 sixty +DELETE FROM t1 WHERE id = 4; +SELECT * FROM t1; +id msg +7 seven +10 twin +40 forty +60 sixty +81 twin +72 updated +11 eleven +1 one +35 updated +8 updated +DELETE FROM t1 WHERE id IN (40,11,35); +SELECT * FROM t1; +id msg +7 seven +10 twin +60 sixty +81 twin +72 updated +1 one +8 updated +DELETE FROM t1 WHERE id IN (4,60,1); +SELECT msg FROM t1; +msg +seven +twin +twin +updated +updated +DELETE FROM t1 WHERE id IN (81,72); +SELECT id FROM t1; +id +7 +8 +10 +DELETE FROM t1 WHERE id IN (7,10); +SELECT * FROM t1; +id msg +8 updated +DELETE FROM t1 WHERE id = 8; +SELECT * FROM t1; +id msg +ALTER TABLE t1 MAPPED=YES; +DELETE FROM t1; +INSERT INTO t1 VALUES(4, 'four'),(7,'seven'),(10,'ten'),(40,'forty'),(60,'sixty'),(81,'eighty one'),(72,'seventy two'),(11,'eleven'),(1,'one'),(35,'thirty five'),(8,'eight'); +SELECT * FROM t1; +id msg +4 four +7 seven +10 ten +40 forty +60 sixty +81 eighty one +72 seventy two +11 eleven +1 one +35 thirty five +8 eight +UPDATE t1 SET msg = 'bof' WHERE id = 35; +SELECT * FROM t1; +id msg +4 four +7 seven +10 ten +40 forty +60 sixty +81 eighty one +72 seventy two +11 eleven +1 one +35 bof +8 eight +UPDATE t1 SET msg = 'big' WHERE id > 50; +SELECT * FROM t1; +id msg +4 four +7 seven +10 ten +40 forty +60 big +81 big +72 big +11 eleven +1 one +35 bof +8 eight +UPDATE t1 SET msg = 'updated' WHERE id IN (8,35,60,72); +SELECT * FROM t1; +id msg +4 four +7 seven +10 ten +40 forty +60 updated +81 big +72 updated +11 eleven +1 one +35 updated +8 updated +UPDATE t1 SET msg = 'twin' WHERE id IN (81,10); +SELECT * FROM t1; +id msg +4 four +7 seven +10 twin +40 forty +60 updated +81 twin +72 updated +11 eleven +1 one +35 updated +8 updated +UPDATE t1 SET msg = 'sixty' WHERE id = 60; +SELECT * FROM t1 WHERE id = 60; +id msg +60 sixty +DELETE FROM t1 WHERE id = 4; +SELECT * FROM t1; +id msg +7 seven +10 twin +40 forty +60 sixty +81 twin +72 updated +11 eleven +1 one +35 updated +8 updated +DELETE FROM t1 WHERE id IN (40,11,35); +SELECT * FROM t1; +id msg +7 seven +10 twin +60 sixty +81 twin +72 updated +1 one +8 updated +DELETE FROM t1 WHERE id IN (4,60,1); +SELECT msg FROM t1; +msg +seven +twin +twin +updated +updated +DELETE FROM t1 WHERE id IN (81,72); +SELECT id FROM t1; +id +7 +8 +10 +DELETE FROM t1 WHERE id IN (7,10); +SELECT * FROM t1; +id msg +8 updated +DELETE FROM t1 WHERE id = 8; +SELECT * FROM t1; +id msg +DROP TABLE t1; +# FIX table +CREATE TABLE t1 ( +id INT(4) KEY NOT NULL, +msg VARCHAR(16) CHARSET BINARY DISTRIB=CLUSTERED) +ENGINE=CONNECT TABLE_TYPE=FIX BLOCK_SIZE=4; +Warnings: +Warning 1105 No file name. Table will use t1.fix +DELETE FROM t1; +INSERT INTO t1 VALUES(4, 'four'),(7,'seven'),(10,'ten'),(40,'forty'),(60,'sixty'),(81,'eighty one'),(72,'seventy two'),(11,'eleven'),(1,'one'),(35,'thirty five'),(8,'eight'); +SELECT * FROM t1; +id msg +4 four +7 seven +10 ten +40 forty +60 sixty +81 eighty one +72 seventy two +11 eleven +1 one +35 thirty five +8 eight +UPDATE t1 SET msg = 'bof' WHERE id = 35; +SELECT * FROM t1; +id msg +4 four +7 seven +10 ten +40 forty +60 sixty +81 eighty one +72 seventy two +11 eleven +1 one +35 bof +8 eight +UPDATE t1 SET msg = 'big' WHERE id > 50; +SELECT * FROM t1; +id msg +4 four +7 seven +10 ten +40 forty +60 big +81 big +72 big +11 eleven +1 one +35 bof +8 eight +UPDATE t1 SET msg = 'updated' WHERE id IN (8,35,60,72); +SELECT * FROM t1; +id msg +4 four +7 seven +10 ten +40 forty +60 updated +81 big +72 updated +11 eleven +1 one +35 updated +8 updated +UPDATE t1 SET msg = 'twin' WHERE id IN (81,10); +SELECT * FROM t1; +id msg +4 four +7 seven +10 twin +40 forty +60 updated +81 twin +72 updated +11 eleven +1 one +35 updated +8 updated +UPDATE t1 SET msg = 'sixty' WHERE id = 60; +SELECT * FROM t1 WHERE id = 60; +id msg +60 sixty +DELETE FROM t1 WHERE id = 4; +SELECT * FROM t1; +id msg +7 seven +10 twin +40 forty +60 sixty +81 twin +72 updated +11 eleven +1 one +35 updated +8 updated +DELETE FROM t1 WHERE id IN (40,11,35); +SELECT * FROM t1; +id msg +7 seven +10 twin +60 sixty +81 twin +72 updated +1 one +8 updated +DELETE FROM t1 WHERE id IN (4,60,1); +SELECT msg FROM t1; +msg +seven +twin +twin +updated +updated +DELETE FROM t1 WHERE id IN (81,72); +SELECT id FROM t1; +id +7 +8 +10 +DELETE FROM t1 WHERE id IN (7,10); +SELECT * FROM t1; +id msg +8 updated +DELETE FROM t1 WHERE id = 8; +SELECT * FROM t1; +id msg +ALTER TABLE t1 MAPPED=YES; +DELETE FROM t1; +INSERT INTO t1 VALUES(4, 'four'),(7,'seven'),(10,'ten'),(40,'forty'),(60,'sixty'),(81,'eighty one'),(72,'seventy two'),(11,'eleven'),(1,'one'),(35,'thirty five'),(8,'eight'); +SELECT * FROM t1; +id msg +4 four +7 seven +10 ten +40 forty +60 sixty +81 eighty one +72 seventy two +11 eleven +1 one +35 thirty five +8 eight +UPDATE t1 SET msg = 'bof' WHERE id = 35; +SELECT * FROM t1; +id msg +4 four +7 seven +10 ten +40 forty +60 sixty +81 eighty one +72 seventy two +11 eleven +1 one +35 bof +8 eight +UPDATE t1 SET msg = 'big' WHERE id > 50; +SELECT * FROM t1; +id msg +4 four +7 seven +10 ten +40 forty +60 big +81 big +72 big +11 eleven +1 one +35 bof +8 eight +UPDATE t1 SET msg = 'updated' WHERE id IN (8,35,60,72); +SELECT * FROM t1; +id msg +4 four +7 seven +10 ten +40 forty +60 updated +81 big +72 updated +11 eleven +1 one +35 updated +8 updated +UPDATE t1 SET msg = 'twin' WHERE id IN (81,10); +SELECT * FROM t1; +id msg +4 four +7 seven +10 twin +40 forty +60 updated +81 twin +72 updated +11 eleven +1 one +35 updated +8 updated +UPDATE t1 SET msg = 'sixty' WHERE id = 60; +SELECT * FROM t1 WHERE id = 60; +id msg +60 sixty +DELETE FROM t1 WHERE id = 4; +SELECT * FROM t1; +id msg +7 seven +10 twin +40 forty +60 sixty +81 twin +72 updated +11 eleven +1 one +35 updated +8 updated +DELETE FROM t1 WHERE id IN (40,11,35); +SELECT * FROM t1; +id msg +7 seven +10 twin +60 sixty +81 twin +72 updated +1 one +8 updated +DELETE FROM t1 WHERE id IN (4,60,1); +SELECT msg FROM t1; +msg +seven +twin +twin +updated +updated +DELETE FROM t1 WHERE id IN (81,72); +SELECT id FROM t1; +id +7 +8 +10 +DELETE FROM t1 WHERE id IN (7,10); +SELECT * FROM t1; +id msg +8 updated +DELETE FROM t1 WHERE id = 8; +SELECT * FROM t1; +id msg +ALTER TABLE t1 MAPPED=NO HUGE=YES; +DELETE FROM t1; +INSERT INTO t1 VALUES(4, 'four'),(7,'seven'),(10,'ten'),(40,'forty'),(60,'sixty'),(81,'eighty one'),(72,'seventy two'),(11,'eleven'),(1,'one'),(35,'thirty five'),(8,'eight'); +SELECT * FROM t1; +id msg +4 four +7 seven +10 ten +40 forty +60 sixty +81 eighty one +72 seventy two +11 eleven +1 one +35 thirty five +8 eight +UPDATE t1 SET msg = 'bof' WHERE id = 35; +SELECT * FROM t1; +id msg +4 four +7 seven +10 ten +40 forty +60 sixty +81 eighty one +72 seventy two +11 eleven +1 one +35 bof +8 eight +UPDATE t1 SET msg = 'big' WHERE id > 50; +SELECT * FROM t1; +id msg +4 four +7 seven +10 ten +40 forty +60 big +81 big +72 big +11 eleven +1 one +35 bof +8 eight +UPDATE t1 SET msg = 'updated' WHERE id IN (8,35,60,72); +SELECT * FROM t1; +id msg +4 four +7 seven +10 ten +40 forty +60 updated +81 big +72 updated +11 eleven +1 one +35 updated +8 updated +UPDATE t1 SET msg = 'twin' WHERE id IN (81,10); +SELECT * FROM t1; +id msg +4 four +7 seven +10 twin +40 forty +60 updated +81 twin +72 updated +11 eleven +1 one +35 updated +8 updated +UPDATE t1 SET msg = 'sixty' WHERE id = 60; +SELECT * FROM t1 WHERE id = 60; +id msg +60 sixty +DELETE FROM t1 WHERE id = 4; +SELECT * FROM t1; +id msg +7 seven +10 twin +40 forty +60 sixty +81 twin +72 updated +11 eleven +1 one +35 updated +8 updated +DELETE FROM t1 WHERE id IN (40,11,35); +SELECT * FROM t1; +id msg +7 seven +10 twin +60 sixty +81 twin +72 updated +1 one +8 updated +DELETE FROM t1 WHERE id IN (4,60,1); +SELECT msg FROM t1; +msg +seven +twin +twin +updated +updated +DELETE FROM t1 WHERE id IN (81,72); +SELECT id FROM t1; +id +7 +8 +10 +DELETE FROM t1 WHERE id IN (7,10); +SELECT * FROM t1; +id msg +8 updated +DELETE FROM t1 WHERE id = 8; +SELECT * FROM t1; +id msg +DROP TABLE t1; +# BIN table +CREATE TABLE t1 ( +id INT(4) KEY NOT NULL, +msg VARCHAR(16) CHARSET BINARY DISTRIB=CLUSTERED) +ENGINE=CONNECT TABLE_TYPE=BIN BLOCK_SIZE=8; +Warnings: +Warning 1105 No file name. Table will use t1.bin +DELETE FROM t1; +INSERT INTO t1 VALUES(4, 'four'),(7,'seven'),(10,'ten'),(40,'forty'),(60,'sixty'),(81,'eighty one'),(72,'seventy two'),(11,'eleven'),(1,'one'),(35,'thirty five'),(8,'eight'); +SELECT * FROM t1; +id msg +4 four +7 seven +10 ten +40 forty +60 sixty +81 eighty one +72 seventy two +11 eleven +1 one +35 thirty five +8 eight +UPDATE t1 SET msg = 'bof' WHERE id = 35; +SELECT * FROM t1; +id msg +4 four +7 seven +10 ten +40 forty +60 sixty +81 eighty one +72 seventy two +11 eleven +1 one +35 bof +8 eight +UPDATE t1 SET msg = 'big' WHERE id > 50; +SELECT * FROM t1; +id msg +4 four +7 seven +10 ten +40 forty +60 big +81 big +72 big +11 eleven +1 one +35 bof +8 eight +UPDATE t1 SET msg = 'updated' WHERE id IN (8,35,60,72); +SELECT * FROM t1; +id msg +4 four +7 seven +10 ten +40 forty +60 updated +81 big +72 updated +11 eleven +1 one +35 updated +8 updated +UPDATE t1 SET msg = 'twin' WHERE id IN (81,10); +SELECT * FROM t1; +id msg +4 four +7 seven +10 twin +40 forty +60 updated +81 twin +72 updated +11 eleven +1 one +35 updated +8 updated +UPDATE t1 SET msg = 'sixty' WHERE id = 60; +SELECT * FROM t1 WHERE id = 60; +id msg +60 sixty +DELETE FROM t1 WHERE id = 4; +SELECT * FROM t1; +id msg +7 seven +10 twin +40 forty +60 sixty +81 twin +72 updated +11 eleven +1 one +35 updated +8 updated +DELETE FROM t1 WHERE id IN (40,11,35); +SELECT * FROM t1; +id msg +7 seven +10 twin +60 sixty +81 twin +72 updated +1 one +8 updated +DELETE FROM t1 WHERE id IN (4,60,1); +SELECT msg FROM t1; +msg +seven +twin +twin +updated +updated +DELETE FROM t1 WHERE id IN (81,72); +SELECT id FROM t1; +id +7 +8 +10 +DELETE FROM t1 WHERE id IN (7,10); +SELECT * FROM t1; +id msg +8 updated +DELETE FROM t1 WHERE id = 8; +SELECT * FROM t1; +id msg +ALTER TABLE t1 MAPPED=YES; +DELETE FROM t1; +INSERT INTO t1 VALUES(4, 'four'),(7,'seven'),(10,'ten'),(40,'forty'),(60,'sixty'),(81,'eighty one'),(72,'seventy two'),(11,'eleven'),(1,'one'),(35,'thirty five'),(8,'eight'); +SELECT * FROM t1; +id msg +4 four +7 seven +10 ten +40 forty +60 sixty +81 eighty one +72 seventy two +11 eleven +1 one +35 thirty five +8 eight +UPDATE t1 SET msg = 'bof' WHERE id = 35; +SELECT * FROM t1; +id msg +4 four +7 seven +10 ten +40 forty +60 sixty +81 eighty one +72 seventy two +11 eleven +1 one +35 bof +8 eight +UPDATE t1 SET msg = 'big' WHERE id > 50; +SELECT * FROM t1; +id msg +4 four +7 seven +10 ten +40 forty +60 big +81 big +72 big +11 eleven +1 one +35 bof +8 eight +UPDATE t1 SET msg = 'updated' WHERE id IN (8,35,60,72); +SELECT * FROM t1; +id msg +4 four +7 seven +10 ten +40 forty +60 updated +81 big +72 updated +11 eleven +1 one +35 updated +8 updated +UPDATE t1 SET msg = 'twin' WHERE id IN (81,10); +SELECT * FROM t1; +id msg +4 four +7 seven +10 twin +40 forty +60 updated +81 twin +72 updated +11 eleven +1 one +35 updated +8 updated +UPDATE t1 SET msg = 'sixty' WHERE id = 60; +SELECT * FROM t1 WHERE id = 60; +id msg +60 sixty +DELETE FROM t1 WHERE id = 4; +SELECT * FROM t1; +id msg +7 seven +10 twin +40 forty +60 sixty +81 twin +72 updated +11 eleven +1 one +35 updated +8 updated +DELETE FROM t1 WHERE id IN (40,11,35); +SELECT * FROM t1; +id msg +7 seven +10 twin +60 sixty +81 twin +72 updated +1 one +8 updated +DELETE FROM t1 WHERE id IN (4,60,1); +SELECT msg FROM t1; +msg +seven +twin +twin +updated +updated +DELETE FROM t1 WHERE id IN (81,72); +SELECT id FROM t1; +id +7 +8 +10 +DELETE FROM t1 WHERE id IN (7,10); +SELECT * FROM t1; +id msg +8 updated +DELETE FROM t1 WHERE id = 8; +SELECT * FROM t1; +id msg +ALTER TABLE t1 MAPPED=NO HUGE=YES; +DELETE FROM t1; +INSERT INTO t1 VALUES(4, 'four'),(7,'seven'),(10,'ten'),(40,'forty'),(60,'sixty'),(81,'eighty one'),(72,'seventy two'),(11,'eleven'),(1,'one'),(35,'thirty five'),(8,'eight'); +SELECT * FROM t1; +id msg +4 four +7 seven +10 ten +40 forty +60 sixty +81 eighty one +72 seventy two +11 eleven +1 one +35 thirty five +8 eight +UPDATE t1 SET msg = 'bof' WHERE id = 35; +SELECT * FROM t1; +id msg +4 four +7 seven +10 ten +40 forty +60 sixty +81 eighty one +72 seventy two +11 eleven +1 one +35 bof +8 eight +UPDATE t1 SET msg = 'big' WHERE id > 50; +SELECT * FROM t1; +id msg +4 four +7 seven +10 ten +40 forty +60 big +81 big +72 big +11 eleven +1 one +35 bof +8 eight +UPDATE t1 SET msg = 'updated' WHERE id IN (8,35,60,72); +SELECT * FROM t1; +id msg +4 four +7 seven +10 ten +40 forty +60 updated +81 big +72 updated +11 eleven +1 one +35 updated +8 updated +UPDATE t1 SET msg = 'twin' WHERE id IN (81,10); +SELECT * FROM t1; +id msg +4 four +7 seven +10 twin +40 forty +60 updated +81 twin +72 updated +11 eleven +1 one +35 updated +8 updated +UPDATE t1 SET msg = 'sixty' WHERE id = 60; +SELECT * FROM t1 WHERE id = 60; +id msg +60 sixty +DELETE FROM t1 WHERE id = 4; +SELECT * FROM t1; +id msg +7 seven +10 twin +40 forty +60 sixty +81 twin +72 updated +11 eleven +1 one +35 updated +8 updated +DELETE FROM t1 WHERE id IN (40,11,35); +SELECT * FROM t1; +id msg +7 seven +10 twin +60 sixty +81 twin +72 updated +1 one +8 updated +DELETE FROM t1 WHERE id IN (4,60,1); +SELECT msg FROM t1; +msg +seven +twin +twin +updated +updated +DELETE FROM t1 WHERE id IN (81,72); +SELECT id FROM t1; +id +7 +8 +10 +DELETE FROM t1 WHERE id IN (7,10); +SELECT * FROM t1; +id msg +8 updated +DELETE FROM t1 WHERE id = 8; +SELECT * FROM t1; +id msg +DROP TABLE t1; +# DBF table +CREATE TABLE t1 ( +id INT(4) KEY NOT NULL, +msg VARCHAR(16)) +ENGINE=CONNECT TABLE_TYPE=DBF BLOCK_SIZE=12; +Warnings: +Warning 1105 No file name. Table will use t1.dbf +DELETE FROM t1; +INSERT INTO t1 VALUES(4, 'four'),(7,'seven'),(10,'ten'),(40,'forty'),(60,'sixty'),(81,'eighty one'),(72,'seventy two'),(11,'eleven'),(1,'one'),(35,'thirty five'),(8,'eight'); +SELECT * FROM t1; +id msg +4 four +7 seven +10 ten +40 forty +60 sixty +81 eighty one +72 seventy two +11 eleven +1 one +35 thirty five +8 eight +UPDATE t1 SET msg = 'bof' WHERE id = 35; +SELECT * FROM t1; +id msg +4 four +7 seven +10 ten +40 forty +60 sixty +81 eighty one +72 seventy two +11 eleven +1 one +35 bof +8 eight +UPDATE t1 SET msg = 'big' WHERE id > 50; +SELECT * FROM t1; +id msg +4 four +7 seven +10 ten +40 forty +60 big +81 big +72 big +11 eleven +1 one +35 bof +8 eight +UPDATE t1 SET msg = 'updated' WHERE id IN (8,35,60,72); +SELECT * FROM t1; +id msg +4 four +7 seven +10 ten +40 forty +60 updated +81 big +72 updated +11 eleven +1 one +35 updated +8 updated +UPDATE t1 SET msg = 'twin' WHERE id IN (81,10); +SELECT * FROM t1; +id msg +4 four +7 seven +10 twin +40 forty +60 updated +81 twin +72 updated +11 eleven +1 one +35 updated +8 updated +UPDATE t1 SET msg = 'sixty' WHERE id = 60; +SELECT * FROM t1 WHERE id = 60; +id msg +60 sixty +DELETE FROM t1 WHERE id = 4; +SELECT * FROM t1; +id msg +7 seven +10 twin +40 forty +60 sixty +81 twin +72 updated +11 eleven +1 one +35 updated +8 updated +DELETE FROM t1 WHERE id IN (40,11,35); +SELECT * FROM t1; +id msg +7 seven +10 twin +60 sixty +81 twin +72 updated +1 one +8 updated +DELETE FROM t1 WHERE id IN (4,60,1); +SELECT msg FROM t1; +msg +seven +twin +twin +updated +updated +DELETE FROM t1 WHERE id IN (81,72); +SELECT id FROM t1; +id +7 +8 +10 +DELETE FROM t1 WHERE id IN (7,10); +SELECT * FROM t1; +id msg +8 updated +DELETE FROM t1 WHERE id = 8; +SELECT * FROM t1; +id msg +ALTER TABLE t1 MAPPED=YES; +DELETE FROM t1; +INSERT INTO t1 VALUES(4, 'four'),(7,'seven'),(10,'ten'),(40,'forty'),(60,'sixty'),(81,'eighty one'),(72,'seventy two'),(11,'eleven'),(1,'one'),(35,'thirty five'),(8,'eight'); +SELECT * FROM t1; +id msg +4 four +7 seven +10 ten +40 forty +60 sixty +81 eighty one +72 seventy two +11 eleven +1 one +35 thirty five +8 eight +UPDATE t1 SET msg = 'bof' WHERE id = 35; +SELECT * FROM t1; +id msg +4 four +7 seven +10 ten +40 forty +60 sixty +81 eighty one +72 seventy two +11 eleven +1 one +35 bof +8 eight +UPDATE t1 SET msg = 'big' WHERE id > 50; +SELECT * FROM t1; +id msg +4 four +7 seven +10 ten +40 forty +60 big +81 big +72 big +11 eleven +1 one +35 bof +8 eight +UPDATE t1 SET msg = 'updated' WHERE id IN (8,35,60,72); +SELECT * FROM t1; +id msg +4 four +7 seven +10 ten +40 forty +60 updated +81 big +72 updated +11 eleven +1 one +35 updated +8 updated +UPDATE t1 SET msg = 'twin' WHERE id IN (81,10); +SELECT * FROM t1; +id msg +4 four +7 seven +10 twin +40 forty +60 updated +81 twin +72 updated +11 eleven +1 one +35 updated +8 updated +UPDATE t1 SET msg = 'sixty' WHERE id = 60; +SELECT * FROM t1 WHERE id = 60; +id msg +60 sixty +DELETE FROM t1 WHERE id = 4; +SELECT * FROM t1; +id msg +7 seven +10 twin +40 forty +60 sixty +81 twin +72 updated +11 eleven +1 one +35 updated +8 updated +DELETE FROM t1 WHERE id IN (40,11,35); +SELECT * FROM t1; +id msg +7 seven +10 twin +60 sixty +81 twin +72 updated +1 one +8 updated +DELETE FROM t1 WHERE id IN (4,60,1); +SELECT msg FROM t1; +msg +seven +twin +twin +updated +updated +DELETE FROM t1 WHERE id IN (81,72); +SELECT id FROM t1; +id +7 +8 +10 +DELETE FROM t1 WHERE id IN (7,10); +SELECT * FROM t1; +id msg +8 updated +DELETE FROM t1 WHERE id = 8; +SELECT * FROM t1; +id msg +DROP TABLE t1; +# VEC table +CREATE TABLE t1 ( +id INT(4) KEY NOT NULL, +msg VARCHAR(16)) +ENGINE=CONNECT TABLE_TYPE=VEC BLOCK_SIZE=6 MAX_ROWS=16; +Warnings: +Warning 1105 No file name. Table will use t1.vec +DELETE FROM t1; +INSERT INTO t1 VALUES(4, 'four'),(7,'seven'),(10,'ten'),(40,'forty'),(60,'sixty'),(81,'eighty one'),(72,'seventy two'),(11,'eleven'),(1,'one'),(35,'thirty five'),(8,'eight'); +SELECT * FROM t1; +id msg +4 four +7 seven +10 ten +40 forty +60 sixty +81 eighty one +72 seventy two +11 eleven +1 one +35 thirty five +8 eight +UPDATE t1 SET msg = 'bof' WHERE id = 35; +SELECT * FROM t1; +id msg +4 four +7 seven +10 ten +40 forty +60 sixty +81 eighty one +72 seventy two +11 eleven +1 one +35 bof +8 eight +UPDATE t1 SET msg = 'big' WHERE id > 50; +SELECT * FROM t1; +id msg +4 four +7 seven +10 ten +40 forty +60 big +81 big +72 big +11 eleven +1 one +35 bof +8 eight +UPDATE t1 SET msg = 'updated' WHERE id IN (8,35,60,72); +SELECT * FROM t1; +id msg +4 four +7 seven +10 ten +40 forty +60 updated +81 big +72 updated +11 eleven +1 one +35 updated +8 updated +UPDATE t1 SET msg = 'twin' WHERE id IN (81,10); +SELECT * FROM t1; +id msg +4 four +7 seven +10 twin +40 forty +60 updated +81 twin +72 updated +11 eleven +1 one +35 updated +8 updated +UPDATE t1 SET msg = 'sixty' WHERE id = 60; +SELECT * FROM t1 WHERE id = 60; +id msg +60 sixty +DELETE FROM t1 WHERE id = 4; +SELECT * FROM t1; +id msg +7 seven +10 twin +40 forty +60 sixty +81 twin +72 updated +11 eleven +1 one +35 updated +8 updated +DELETE FROM t1 WHERE id IN (40,11,35); +SELECT * FROM t1; +id msg +7 seven +10 twin +60 sixty +81 twin +72 updated +1 one +8 updated +DELETE FROM t1 WHERE id IN (4,60,1); +SELECT msg FROM t1; +msg +seven +twin +twin +updated +updated +DELETE FROM t1 WHERE id IN (81,72); +SELECT id FROM t1; +id +7 +8 +10 +DELETE FROM t1 WHERE id IN (7,10); +SELECT * FROM t1; +id msg +8 updated +DELETE FROM t1 WHERE id = 8; +SELECT * FROM t1; +id msg +ALTER TABLE t1 MAPPED=YES; +DELETE FROM t1; +INSERT INTO t1 VALUES(4, 'four'),(7,'seven'),(10,'ten'),(40,'forty'),(60,'sixty'),(81,'eighty one'),(72,'seventy two'),(11,'eleven'),(1,'one'),(35,'thirty five'),(8,'eight'); +SELECT * FROM t1; +id msg +4 four +7 seven +10 ten +40 forty +60 sixty +81 eighty one +72 seventy two +11 eleven +1 one +35 thirty five +8 eight +UPDATE t1 SET msg = 'bof' WHERE id = 35; +SELECT * FROM t1; +id msg +4 four +7 seven +10 ten +40 forty +60 sixty +81 eighty one +72 seventy two +11 eleven +1 one +35 bof +8 eight +UPDATE t1 SET msg = 'big' WHERE id > 50; +SELECT * FROM t1; +id msg +4 four +7 seven +10 ten +40 forty +60 big +81 big +72 big +11 eleven +1 one +35 bof +8 eight +UPDATE t1 SET msg = 'updated' WHERE id IN (8,35,60,72); +SELECT * FROM t1; +id msg +4 four +7 seven +10 ten +40 forty +60 updated +81 big +72 updated +11 eleven +1 one +35 updated +8 updated +UPDATE t1 SET msg = 'twin' WHERE id IN (81,10); +SELECT * FROM t1; +id msg +4 four +7 seven +10 twin +40 forty +60 updated +81 twin +72 updated +11 eleven +1 one +35 updated +8 updated +UPDATE t1 SET msg = 'sixty' WHERE id = 60; +SELECT * FROM t1 WHERE id = 60; +id msg +60 sixty +DELETE FROM t1 WHERE id = 4; +SELECT * FROM t1; +id msg +7 seven +10 twin +40 forty +60 sixty +81 twin +72 updated +11 eleven +1 one +35 updated +8 updated +DELETE FROM t1 WHERE id IN (40,11,35); +SELECT * FROM t1; +id msg +7 seven +10 twin +60 sixty +81 twin +72 updated +1 one +8 updated +DELETE FROM t1 WHERE id IN (4,60,1); +SELECT msg FROM t1; +msg +seven +twin +twin +updated +updated +DELETE FROM t1 WHERE id IN (81,72); +SELECT id FROM t1; +id +7 +8 +10 +DELETE FROM t1 WHERE id IN (7,10); +SELECT * FROM t1; +id msg +8 updated +DELETE FROM t1 WHERE id = 8; +SELECT * FROM t1; +id msg +ALTER TABLE t1 MAPPED=NO HUGE=YES; +DELETE FROM t1; +INSERT INTO t1 VALUES(4, 'four'),(7,'seven'),(10,'ten'),(40,'forty'),(60,'sixty'),(81,'eighty one'),(72,'seventy two'),(11,'eleven'),(1,'one'),(35,'thirty five'),(8,'eight'); +SELECT * FROM t1; +id msg +4 four +7 seven +10 ten +40 forty +60 sixty +81 eighty one +72 seventy two +11 eleven +1 one +35 thirty five +8 eight +UPDATE t1 SET msg = 'bof' WHERE id = 35; +SELECT * FROM t1; +id msg +4 four +7 seven +10 ten +40 forty +60 sixty +81 eighty one +72 seventy two +11 eleven +1 one +35 bof +8 eight +UPDATE t1 SET msg = 'big' WHERE id > 50; +SELECT * FROM t1; +id msg +4 four +7 seven +10 ten +40 forty +60 big +81 big +72 big +11 eleven +1 one +35 bof +8 eight +UPDATE t1 SET msg = 'updated' WHERE id IN (8,35,60,72); +SELECT * FROM t1; +id msg +4 four +7 seven +10 ten +40 forty +60 updated +81 big +72 updated +11 eleven +1 one +35 updated +8 updated +UPDATE t1 SET msg = 'twin' WHERE id IN (81,10); +SELECT * FROM t1; +id msg +4 four +7 seven +10 twin +40 forty +60 updated +81 twin +72 updated +11 eleven +1 one +35 updated +8 updated +UPDATE t1 SET msg = 'sixty' WHERE id = 60; +SELECT * FROM t1 WHERE id = 60; +id msg +60 sixty +DELETE FROM t1 WHERE id = 4; +SELECT * FROM t1; +id msg +7 seven +10 twin +40 forty +60 sixty +81 twin +72 updated +11 eleven +1 one +35 updated +8 updated +DELETE FROM t1 WHERE id IN (40,11,35); +SELECT * FROM t1; +id msg +7 seven +10 twin +60 sixty +81 twin +72 updated +1 one +8 updated +DELETE FROM t1 WHERE id IN (4,60,1); +SELECT msg FROM t1; +msg +seven +twin +twin +updated +updated +DELETE FROM t1 WHERE id IN (81,72); +SELECT id FROM t1; +id +7 +8 +10 +DELETE FROM t1 WHERE id IN (7,10); +SELECT * FROM t1; +id msg +8 updated +DELETE FROM t1 WHERE id = 8; +SELECT * FROM t1; +id msg +DROP TABLE t1; +# Split VEC table (outward) +CREATE TABLE t1 ( +id INT(4) KEY NOT NULL, +msg VARCHAR(16)) +ENGINE=CONNECT TABLE_TYPE=VEC BLOCK_SIZE=6 FILE_NAME='tx.vec'; +DELETE FROM t1; +INSERT INTO t1 VALUES(4, 'four'),(7,'seven'),(10,'ten'),(40,'forty'),(60,'sixty'),(81,'eighty one'),(72,'seventy two'),(11,'eleven'),(1,'one'),(35,'thirty five'),(8,'eight'); +SELECT * FROM t1; +id msg +4 four +7 seven +10 ten +40 forty +60 sixty +81 eighty one +72 seventy two +11 eleven +1 one +35 thirty five +8 eight +UPDATE t1 SET msg = 'bof' WHERE id = 35; +SELECT * FROM t1; +id msg +4 four +7 seven +10 ten +40 forty +60 sixty +81 eighty one +72 seventy two +11 eleven +1 one +35 bof +8 eight +UPDATE t1 SET msg = 'big' WHERE id > 50; +SELECT * FROM t1; +id msg +4 four +7 seven +10 ten +40 forty +60 big +81 big +72 big +11 eleven +1 one +35 bof +8 eight +UPDATE t1 SET msg = 'updated' WHERE id IN (8,35,60,72); +SELECT * FROM t1; +id msg +4 four +7 seven +10 ten +40 forty +60 updated +81 big +72 updated +11 eleven +1 one +35 updated +8 updated +UPDATE t1 SET msg = 'twin' WHERE id IN (81,10); +SELECT * FROM t1; +id msg +4 four +7 seven +10 twin +40 forty +60 updated +81 twin +72 updated +11 eleven +1 one +35 updated +8 updated +UPDATE t1 SET msg = 'sixty' WHERE id = 60; +SELECT * FROM t1 WHERE id = 60; +id msg +60 sixty +DELETE FROM t1 WHERE id = 4; +SELECT * FROM t1; +id msg +7 seven +10 twin +40 forty +60 sixty +81 twin +72 updated +11 eleven +1 one +35 updated +8 updated +DELETE FROM t1 WHERE id IN (40,11,35); +SELECT * FROM t1; +id msg +7 seven +10 twin +60 sixty +81 twin +72 updated +1 one +8 updated +DELETE FROM t1 WHERE id IN (4,60,1); +SELECT msg FROM t1; +msg +seven +twin +twin +updated +updated +DELETE FROM t1 WHERE id IN (81,72); +SELECT id FROM t1; +id +7 +8 +10 +DELETE FROM t1 WHERE id IN (7,10); +SELECT * FROM t1; +id msg +8 updated +DELETE FROM t1 WHERE id = 8; +SELECT * FROM t1; +id msg +ALTER TABLE t1 MAPPED=YES; +DELETE FROM t1; +INSERT INTO t1 VALUES(4, 'four'),(7,'seven'),(10,'ten'),(40,'forty'),(60,'sixty'),(81,'eighty one'),(72,'seventy two'),(11,'eleven'),(1,'one'),(35,'thirty five'),(8,'eight'); +SELECT * FROM t1; +id msg +4 four +7 seven +10 ten +40 forty +60 sixty +81 eighty one +72 seventy two +11 eleven +1 one +35 thirty five +8 eight +UPDATE t1 SET msg = 'bof' WHERE id = 35; +SELECT * FROM t1; +id msg +4 four +7 seven +10 ten +40 forty +60 sixty +81 eighty one +72 seventy two +11 eleven +1 one +35 bof +8 eight +UPDATE t1 SET msg = 'big' WHERE id > 50; +SELECT * FROM t1; +id msg +4 four +7 seven +10 ten +40 forty +60 big +81 big +72 big +11 eleven +1 one +35 bof +8 eight +UPDATE t1 SET msg = 'updated' WHERE id IN (8,35,60,72); +SELECT * FROM t1; +id msg +4 four +7 seven +10 ten +40 forty +60 updated +81 big +72 updated +11 eleven +1 one +35 updated +8 updated +UPDATE t1 SET msg = 'twin' WHERE id IN (81,10); +SELECT * FROM t1; +id msg +4 four +7 seven +10 twin +40 forty +60 updated +81 twin +72 updated +11 eleven +1 one +35 updated +8 updated +UPDATE t1 SET msg = 'sixty' WHERE id = 60; +SELECT * FROM t1 WHERE id = 60; +id msg +60 sixty +DELETE FROM t1 WHERE id = 4; +SELECT * FROM t1; +id msg +7 seven +10 twin +40 forty +60 sixty +81 twin +72 updated +11 eleven +1 one +35 updated +8 updated +DELETE FROM t1 WHERE id IN (40,11,35); +SELECT * FROM t1; +id msg +7 seven +10 twin +60 sixty +81 twin +72 updated +1 one +8 updated +DELETE FROM t1 WHERE id IN (4,60,1); +SELECT msg FROM t1; +msg +seven +twin +twin +updated +updated +DELETE FROM t1 WHERE id IN (81,72); +SELECT id FROM t1; +id +7 +8 +10 +DELETE FROM t1 WHERE id IN (7,10); +SELECT * FROM t1; +id msg +8 updated +DELETE FROM t1 WHERE id = 8; +SELECT * FROM t1; +id msg +DROP TABLE t1; diff --git a/storage/connect/mysql-test/connect/r/xml.result b/storage/connect/mysql-test/connect/r/xml.result index 5018eec47fc..eea53bf55c7 100644 --- a/storage/connect/mysql-test/connect/r/xml.result +++ b/storage/connect/mysql-test/connect/r/xml.result @@ -416,7 +416,7 @@ DROP TABLE t1; SET @a=LOAD_FILE('MYSQLD_DATADIR/test/t1.xml'); SELECT CAST(@a AS CHAR CHARACTER SET latin1); CAST(@a AS CHAR CHARACTER SET latin1) <?xml version="1.0" encoding="iso-8859-1"?> -<!-- Created by CONNECT Version 1.02.0002 March 16, 2014 --> +<!-- Created by the MariaDB CONNECT Storage Engine--> <t1> <line> <node>ÀÃÂÃ</node> diff --git a/storage/connect/mysql-test/connect/t/alter.test b/storage/connect/mysql-test/connect/t/alter.test index 299381b925a..49f34996bbd 100644 --- a/storage/connect/mysql-test/connect/t/alter.test +++ b/storage/connect/mysql-test/connect/t/alter.test @@ -105,8 +105,12 @@ SELECT * FROM t1; --echo # Changing to another engine is Ok
--echo # However, the data file is not deleted.
--echo #
-ALTER TABLE t1 ENGINE=MARIA;
+ALTER TABLE t1 ENGINE=ARIA;
SHOW CREATE TABLE t1;
+set @old_sql_mode=@@sql_mode;
+set sql_mode=ignore_bad_table_options;
+SHOW CREATE TABLE t1;
+set sql_mode=@old_sql_mode;
SELECT * from t1;
SELECT * from t2;
diff --git a/storage/connect/mysql-test/connect/t/part_file.test b/storage/connect/mysql-test/connect/t/part_file.test new file mode 100644 index 00000000000..6efd2b9b580 --- /dev/null +++ b/storage/connect/mysql-test/connect/t/part_file.test @@ -0,0 +1,166 @@ +--source include/have_partition.inc
+let $MYSQLD_DATADIR= `select @@datadir`;
+
+set @@global.connect_exact_info=ON;
+
+--echo # This will be used to see what data files are created
+CREATE TABLE dr1 (
+ fname VARCHAR(256) NOT NULL FLAG=2,
+ ftype CHAR(8) NOT NULL FLAG=3
+# ,FSIZE INT(6) NOT NULL FLAG=5 removed because Unix size != Windows size
+) engine=CONNECT table_type=DIR file_name='t1#P#*.*';
+
+--echo #
+--echo # Testing partitioning on inward table
+--echo #
+CREATE TABLE t1 (
+ id INT NOT NULL,
+ msg VARCHAR(32)
+) ENGINE=CONNECT TABLE_TYPE=CSV AVG_ROW_LENGTH=10
+PARTITION BY RANGE(id) (
+PARTITION first VALUES LESS THAN(10),
+PARTITION middle VALUES LESS THAN(50),
+PARTITION last VALUES LESS THAN(MAXVALUE));
+INSERT INTO t1 VALUES(4, 'four'),(24, 'twenty four');
+INSERT INTO t1 VALUES(7,'seven'),(10,'ten'),(40,'forty'),(60,'sixty'),(81,'eighty one');
+SELECT partition_name, table_rows FROM information_schema.partitions WHERE table_name = 't1';
+SELECT * FROM t1;
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE id > 50;
+SELECT * FROM t1 WHERE id > 50;
+#TODO: Differences between Linux and Windows
+#SHOW TABLE STATUS LIKE 't1';
+--error ER_GET_ERRMSG
+UPDATE t1 set id = 41 WHERE msg = 'four';
+UPDATE t1 set msg = 'quatre' WHERE id = 4;
+SELECT * FROM dr1 ORDER BY fname, ftype;
+--echo #
+--echo # Altering partitioning on inward table
+--echo #
+ALTER TABLE t1
+PARTITION by range(id) (
+PARTITION first VALUES LESS THAN(11),
+PARTITION middle VALUES LESS THAN(50),
+PARTITION last VALUES LESS THAN(MAXVALUE));
+SELECT partition_name, table_rows FROM information_schema.partitions WHERE table_name = 't1';
+SELECT * FROM dr1 ORDER BY fname, ftype;
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE id=10;
+SELECT * FROM t1 WHERE id=10;
+DELETE FROM t1 WHERE id in (4,60);
+SELECT * FROM t1;
+DROP TABLE t1;
+# TODO: this fails on Linux
+#SELECT * FROM dr1;
+
+--echo #
+--echo # Testing partitioning on a void outward table
+--echo #
+ALTER TABLE dr1 FILE_NAME='part*.*';
+CREATE TABLE t1 (
+ rwid INT(6) DEFAULT 0 SPECIAL=ROWID,
+ rnum INT(6) DEFAULT 0 SPECIAL=ROWNUM,
+ prtn VARCHAR(64) DEFAULT '' SPECIAL=PARTID,
+ tbn VARCHAR(64) DEFAULT '' SPECIAL=TABID,
+ fid VARCHAR(256) DEFAULT '' SPECIAL=FNAME,
+ id INT KEY NOT NULL,
+ msg VARCHAR(32)
+) ENGINE=CONNECT TABLE_TYPE=FIX FILE_NAME='part%s.txt';
+--replace_result $MYSQLD_DATADIR "DATADIR/"
+ALTER TABLE t1
+PARTITION by range columns(id) (
+PARTITION `1` VALUES LESS THAN(10),
+PARTITION `2` VALUES LESS THAN(50),
+PARTITION `3` VALUES LESS THAN(MAXVALUE));
+SHOW INDEX FROM t1;
+# TODO: this fails on Linux
+#SELECT * FROM dr1 ORDER BY fname, ftype;
+INSERT INTO t1(id,msg) VALUES(4, 'four');
+SELECT * FROM dr1 ORDER BY fname, ftype;
+INSERT INTO t1(id,msg) VALUES(7,'seven'),(10,'ten'),(40,'forty'),(60,'sixty'),(81,'eighty one');
+INSERT INTO t1(id,msg) VALUES(72,'seventy two'),(20,'twenty'),(1,'one'),(35,'thirty five'),(8,'eight');
+SELECT partition_name, table_rows FROM information_schema.partitions WHERE table_name = 't1';
+SELECT * FROM t1;
+SELECT * FROM t1 order by id;
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE id = 10;
+SELECT * FROM t1 WHERE id = 10;
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE id >= 10;
+SELECT * FROM t1 WHERE id >= 10;
+SELECT count(*) FROM t1 WHERE id < 10;
+SELECT case when id < 10 then 1 when id < 50 then 2 else 3 end as pn, count(*) FROM t1 group by pn;
+SELECT prtn, count(*) FROM t1 group by prtn;
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE id > 50;
+SELECT * FROM t1 WHERE id = 35;
+SELECT * FROM dr1 ORDER BY fname, ftype;
+--echo # This does not change the partition file data and is WRONG
+ALTER TABLE t1
+PARTITION by range columns(id) (
+PARTITION `1` VALUES LESS THAN(11),
+PARTITION `2` VALUES LESS THAN(70),
+PARTITION `3` VALUES LESS THAN(MAXVALUE));
+SELECT CASE WHEN id < 11 THEN 1 WHEN id < 70 THEN 2 ELSE 3 END AS pn, COUNT(*) FROM t1 GROUP BY pn;
+SELECT partition_name, table_rows FROM information_schema.partitions WHERE table_name = 't1';
+SELECT * FROM dr1 ORDER BY fname, ftype;
+--echo #
+--echo # This is the correct way to change partitioning:
+--echo # Save table values, erase the table, then re-insert saved values in modified table
+--echo #
+CREATE TABLE t2 (
+ id INT NOT NULL,
+ msg VARCHAR(32)
+) ENGINE=CONNECT TABLE_TYPE=FIX;
+INSERT INTO t2 SELECT id, msg FROM t1;
+DELETE FROM t1;
+INSERT INTO t1(id,msg) SELECT * FROM t2;
+SELECT partition_name, table_rows FROM information_schema.partitions WHERE table_name = 't1';
+SELECT * FROM t1;
+SELECT * FROM dr1 ORDER BY fname, ftype;
+DROP TABLE t2;
+DROP TABLE t1;
+
+--echo #
+--echo # Testing partitioning on a populated outward table
+--echo #
+CREATE TABLE t1 (
+ id INT NOT NULL,
+ msg VARCHAR(32)
+) ENGINE=CONNECT TABLE_TYPE=FIX FILE_NAME='part%s.txt'
+PARTITION by range columns(id) (
+PARTITION `1` VALUES LESS THAN(11),
+PARTITION `2` VALUES LESS THAN(70),
+PARTITION `3` VALUES LESS THAN(MAXVALUE));
+SELECT partition_name, table_rows FROM information_schema.partitions WHERE table_name = 't1';
+SELECT * FROM t1 WHERE id < 11;
+SELECT * FROM t1 WHERE id >= 70;
+SELECT * FROM dr1 ORDER BY fname, ftype;
+
+--echo #
+--echo # Testing indexing on a partitioned table
+--echo #
+CREATE INDEX XID ON t1(id);
+SHOW INDEX FROM t1;
+SELECT * FROM dr1 ORDER BY fname, ftype;
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE id = 10;
+DROP INDEX XID ON t1;
+SHOW INDEX FROM t1;
+SELECT * FROM dr1 ORDER BY fname, ftype;
+ALTER TABLE t1 ADD PRIMARY KEY (id);
+SHOW INDEX FROM t1;
+SELECT * FROM dr1 ORDER BY fname, ftype;
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE id = 10;
+ALTER TABLE t1 DROP PRIMARY KEY;
+SHOW INDEX FROM t1;
+SELECT * FROM dr1 ORDER BY fname, ftype;
+DROP TABLE t1;
+DROP TABLE dr1;
+
+#
+# Clean up
+#
+set @@global.connect_exact_info=OFF;
+
+--remove_file $MYSQLD_DATADIR/test/part1.txt
+--remove_file $MYSQLD_DATADIR/test/part2.txt
+--remove_file $MYSQLD_DATADIR/test/part3.txt
+#--remove_file $MYSQLD_DATADIR/test/part%s.fnx
+#--remove_file $MYSQLD_DATADIR/test/part1.fnx
+#--remove_file $MYSQLD_DATADIR/test/part2.fnx
+#--remove_file $MYSQLD_DATADIR/test/part3.fnx
diff --git a/storage/connect/mysql-test/connect/t/part_table.test b/storage/connect/mysql-test/connect/t/part_table.test new file mode 100644 index 00000000000..c5358d63c8e --- /dev/null +++ b/storage/connect/mysql-test/connect/t/part_table.test @@ -0,0 +1,92 @@ +--source include/not_embedded.inc
+--source include/have_partition.inc
+
+set @@global.connect_exact_info=ON;
+
+#
+# These will be used by the t1 table partition table
+#
+CREATE TABLE xt1 (
+id INT KEY NOT NULL,
+msg VARCHAR(32))
+ENGINE=MyISAM;
+INSERT INTO xt1 VALUES(4, 'four'),(7,'seven'),(1,'one'),(8,'eight');
+SELECT * FROM xt1;
+
+CREATE TABLE xt2 (
+id INT KEY NOT NULL,
+msg VARCHAR(32));
+INSERT INTO xt2 VALUES(10,'ten'),(40,'forty'),(11,'eleven'),(35,'thirty five');
+SELECT * FROM xt2;
+
+CREATE TABLE xt3 (
+id INT KEY NOT NULL,
+msg VARCHAR(32))
+ENGINE=CONNECT TABLE_TYPE=CSV AVG_ROW_LENGTH=10;
+INSERT INTO xt3 VALUES(60,'sixty'),(81,'eighty one'),(72,'seventy two');
+SELECT * FROM xt3;
+
+#
+# Based on PROXY the table is not indexable
+#
+CREATE TABLE t1 (
+id INT NOT NULL,
+msg VARCHAR(32))
+ENGINE=CONNECT TABLE_TYPE=PROXY TABNAME='xt%s'
+PARTITION BY RANGE COLUMNS(id) (
+PARTITION `1` VALUES LESS THAN(10),
+PARTITION `2` VALUES LESS THAN(50),
+PARTITION `3` VALUES LESS THAN(MAXVALUE));
+SELECT partition_name, table_rows FROM information_schema.partitions WHERE table_name = 't1';
+SELECT * FROM t1;
+DELETE FROM t1;
+--error ER_UNKNOWN_ERROR
+ALTER TABLE t1 ADD INDEX XID(id);
+INSERT INTO t1 VALUES(4, 'four');
+INSERT INTO t1 VALUES(7,'seven'),(10,'ten'),(40,'forty'),(60,'sixty'),(81,'eighty one');
+INSERT INTO t1 VALUES(72,'seventy two'),(11,'eleven'),(1,'one'),(35,'thirty five'),(8,'eight');
+SELECT partition_name, table_rows FROM information_schema.partitions WHERE table_name = 't1';
+SELECT * FROM t1;
+EXPLAIN PARTITIONS
+SELECT * FROM t1 WHERE id = 81;
+DELETE FROM t1;
+DROP TABLE t1;
+
+#
+# Based on MYSQL the table is indexable
+#
+CREATE TABLE t1 (
+id INT KEY NOT NULL,
+msg VARCHAR(32))
+ENGINE=CONNECT TABLE_TYPE=MYSQL TABNAME='xt%s'
+PARTITION BY RANGE COLUMNS(id) (
+PARTITION `1` VALUES LESS THAN(10),
+PARTITION `2` VALUES LESS THAN(50),
+PARTITION `3` VALUES LESS THAN(MAXVALUE));
+SHOW INDEX FROM t1;
+INSERT INTO t1 VALUES(4, 'four');
+INSERT INTO t1 VALUES(40, 'forty');
+INSERT INTO t1 VALUES(72,'seventy two');
+INSERT INTO t1 VALUES(7,'seven'),(10,'ten'),(60,'sixty'),(81,'eighty one'),(11,'eleven'),(1,'one'),(35,'thirty five'),(8,'eight');
+SELECT partition_name, table_rows FROM information_schema.partitions WHERE table_name = 't1';
+SELECT * FROM t1;
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE id = 81;
+SELECT * FROM t1 WHERE id = 7;
+SELECT * FROM t1 WHERE id = 35;
+UPDATE t1 SET msg = 'number' WHERE id in (60,72);
+UPDATE t1 SET msg = 'soixante' WHERE id = 60;
+SELECT * FROM t1 WHERE id > 50;
+UPDATE t1 SET msg = 'big' WHERE id > 50;
+UPDATE t1 SET msg = 'sept' WHERE id = 7;
+SELECT * FROM t1;
+DELETE FROM t1 WHERE id in (60,72);
+SELECT * FROM t1;
+DROP TABLE t1;
+DROP TABLE xt1;
+DROP TABLE xt2;
+DROP TABLE xt3;
+
+#
+# Clean up
+#
+set @@global.connect_exact_info=OFF;
diff --git a/storage/connect/mysql-test/connect/t/temporary.test b/storage/connect/mysql-test/connect/t/temporary.test new file mode 100644 index 00000000000..dda066c874b --- /dev/null +++ b/storage/connect/mysql-test/connect/t/temporary.test @@ -0,0 +1,13 @@ +# +# CONNECT tables cannot be TEMPORARY +# + +--error ER_ILLEGAL_HA_CREATE_OPTION +CREATE TEMPORARY TABLE t1 (a int not null) + ENGINE=CONNECT table_type=MYSQL CONNECTION='mysql://root@127.0.0.1/test/t2'; + +# also with assisted discovery +--error ER_ILLEGAL_HA_CREATE_OPTION +CREATE TEMPORARY TABLE t1 + ENGINE=CONNECT table_type=MYSQL CONNECTION='mysql://root@127.0.0.1/test/t2'; + diff --git a/storage/connect/mysql-test/connect/t/updelx.inc b/storage/connect/mysql-test/connect/t/updelx.inc new file mode 100644 index 00000000000..f38a59b9983 --- /dev/null +++ b/storage/connect/mysql-test/connect/t/updelx.inc @@ -0,0 +1,25 @@ +DELETE FROM t1; +INSERT INTO t1 VALUES(4, 'four'),(7,'seven'),(10,'ten'),(40,'forty'),(60,'sixty'),(81,'eighty one'),(72,'seventy two'),(11,'eleven'),(1,'one'),(35,'thirty five'),(8,'eight'); +SELECT * FROM t1; +UPDATE t1 SET msg = 'bof' WHERE id = 35; +SELECT * FROM t1; +UPDATE t1 SET msg = 'big' WHERE id > 50; +SELECT * FROM t1; +UPDATE t1 SET msg = 'updated' WHERE id IN (8,35,60,72); +SELECT * FROM t1; +UPDATE t1 SET msg = 'twin' WHERE id IN (81,10); +SELECT * FROM t1; +UPDATE t1 SET msg = 'sixty' WHERE id = 60; +SELECT * FROM t1 WHERE id = 60; +DELETE FROM t1 WHERE id = 4; +SELECT * FROM t1; +DELETE FROM t1 WHERE id IN (40,11,35); +SELECT * FROM t1; +DELETE FROM t1 WHERE id IN (4,60,1); +SELECT msg FROM t1; +DELETE FROM t1 WHERE id IN (81,72); +SELECT id FROM t1; +DELETE FROM t1 WHERE id IN (7,10); +SELECT * FROM t1; +DELETE FROM t1 WHERE id = 8; +SELECT * FROM t1; diff --git a/storage/connect/mysql-test/connect/t/updelx.test b/storage/connect/mysql-test/connect/t/updelx.test new file mode 100644 index 00000000000..ab336d4b168 --- /dev/null +++ b/storage/connect/mysql-test/connect/t/updelx.test @@ -0,0 +1,96 @@ +-- source include/not_embedded.inc
+let $MYSQLD_DATADIR= `select @@datadir`;
+
+--echo #
+--echo # Testing indexed UPDATE and DELETE for all table types
+--echo #
+
+--echo # CSV table
+CREATE TABLE t1 (
+id INT KEY NOT NULL,
+msg VARCHAR(32))
+ENGINE=CONNECT TABLE_TYPE=CSV AVG_ROW_LENGTH=6;
+-- source updelx.inc
+ALTER TABLE t1 MAPPED=YES;
+-- source updelx.inc
+ALTER TABLE t1 MAPPED=NO BLOCK_SIZE=6;
+-- source updelx.inc
+ALTER TABLE t1 MAPPED=YES;
+-- source updelx.inc
+DROP TABLE t1;
+
+--echo # DOS table
+CREATE TABLE t1 (
+id INT(4) KEY NOT NULL,
+msg VARCHAR(16))
+ENGINE=CONNECT TABLE_TYPE=DOS;
+-- source updelx.inc
+ALTER TABLE t1 MAPPED=YES;
+-- source updelx.inc
+ALTER TABLE t1 MAPPED=NO BLOCK_SIZE=4;
+-- source updelx.inc
+ALTER TABLE t1 MAPPED=YES;
+-- source updelx.inc
+DROP TABLE t1;
+
+--echo # FIX table
+CREATE TABLE t1 (
+id INT(4) KEY NOT NULL,
+msg VARCHAR(16) CHARSET BINARY DISTRIB=CLUSTERED)
+ENGINE=CONNECT TABLE_TYPE=FIX BLOCK_SIZE=4;
+-- source updelx.inc
+ALTER TABLE t1 MAPPED=YES;
+-- source updelx.inc
+ALTER TABLE t1 MAPPED=NO HUGE=YES;
+-- source updelx.inc
+DROP TABLE t1;
+
+--echo # BIN table
+CREATE TABLE t1 (
+id INT(4) KEY NOT NULL,
+msg VARCHAR(16) CHARSET BINARY DISTRIB=CLUSTERED)
+ENGINE=CONNECT TABLE_TYPE=BIN BLOCK_SIZE=8;
+-- source updelx.inc
+ALTER TABLE t1 MAPPED=YES;
+-- source updelx.inc
+ALTER TABLE t1 MAPPED=NO HUGE=YES;
+-- source updelx.inc
+DROP TABLE t1;
+
+--echo # DBF table
+CREATE TABLE t1 (
+id INT(4) KEY NOT NULL,
+msg VARCHAR(16))
+ENGINE=CONNECT TABLE_TYPE=DBF BLOCK_SIZE=12;
+-- source updelx.inc
+ALTER TABLE t1 MAPPED=YES;
+-- source updelx.inc
+#ALTER TABLE t1 MAPPED=NO HUGE=YES;
+#-- source updelx.inc
+DROP TABLE t1;
+
+--echo # VEC table
+CREATE TABLE t1 (
+id INT(4) KEY NOT NULL,
+msg VARCHAR(16))
+ENGINE=CONNECT TABLE_TYPE=VEC BLOCK_SIZE=6 MAX_ROWS=16;
+-- source updelx.inc
+ALTER TABLE t1 MAPPED=YES;
+-- source updelx.inc
+ALTER TABLE t1 MAPPED=NO HUGE=YES;
+-- source updelx.inc
+DROP TABLE t1;
+
+--echo # Split VEC table (outward)
+CREATE TABLE t1 (
+id INT(4) KEY NOT NULL,
+msg VARCHAR(16))
+ENGINE=CONNECT TABLE_TYPE=VEC BLOCK_SIZE=6 FILE_NAME='tx.vec';
+-- source updelx.inc
+ALTER TABLE t1 MAPPED=YES;
+-- source updelx.inc
+DROP TABLE t1;
+
+# Cleanup
+--remove_file $MYSQLD_DATADIR/test/tx1.vec
+--remove_file $MYSQLD_DATADIR/test/tx2.vec
diff --git a/storage/connect/plgdbsem.h b/storage/connect/plgdbsem.h index 9466bd83bea..7d5931285ce 100644 --- a/storage/connect/plgdbsem.h +++ b/storage/connect/plgdbsem.h @@ -28,6 +28,8 @@ enum BLKTYP {TYPE_TABLE = 50, /* Table Name/Srcdef/... Block */ TYPE_COLUMN = 51, /* Column Name/Qualifier Block */ TYPE_TDB = 53, /* Table Description Block */ TYPE_COLBLK = 54, /* Column Description Block */ + TYPE_FILTER = 55, /* Filter Description Block */ + TYPE_ARRAY = 63, /* General array type */ TYPE_PSZ = 64, /* Pointer to String ended by 0 */ TYPE_SQL = 65, /* Pointer to SQL block */ TYPE_XOBJECT = 69, /* Extended DB object */ @@ -83,6 +85,7 @@ enum AMT {TYPE_AM_ERROR = 0, /* Type not defined */ TYPE_AM_SRVID = 5, /* SERVID type (special column) */ TYPE_AM_TABID = 6, /* TABID type (special column) */ TYPE_AM_CNSID = 7, /* CONSTID type (special column) */ + TYPE_AM_PRTID = 8, /* PARTID type (special column) */ TYPE_AM_COUNT = 10, /* CPT AM type no (count table) */ TYPE_AM_DCD = 20, /* Decode access method type no */ TYPE_AM_CMS = 30, /* CMS access method type no */ @@ -371,6 +374,7 @@ typedef class COLDEF *PCOLDEF; typedef class CONSTANT *PCONST; typedef class VALUE *PVAL; typedef class VALBLK *PVBLK; +typedef class FILTER *PFIL; typedef struct _fblock *PFBLOCK; typedef struct _mblock *PMBLOCK; @@ -416,9 +420,10 @@ typedef struct { /* User application block */ PFBLOCK Openlist; /* To file/map open list */ PMBLOCK Memlist; /* To memory block list */ PXUSED Xlist; /* To used index list */ + int Maxbmp; /* Maximum XDB2 bitmap size */ int Check; /* General level of checking */ int Numlines; /* Number of lines involved */ - USETEMP UseTemp; /* Use temporary file */ +//USETEMP UseTemp; /* Use temporary file */ int Vtdbno; /* Used for TDB number setting */ bool Remote; /* true: if remotely called */ bool Proginfo; /* true: return progress info */ @@ -461,6 +466,21 @@ typedef struct _tabs { } TABS; /***********************************************************************/ +/* Argument of expression, function, filter etc. (Xobject) */ +/***********************************************************************/ +typedef struct _arg { /* Argument */ + PXOB To_Obj; /* To the argument object */ + PVAL Value; /* Argument value */ + bool Conv; /* TRUE if conversion is required */ + } ARGBLK, *PARG; + +typedef struct _oper { /* Operator */ + PSZ Name; /* The input/output operator name */ + OPVAL Val; /* Operator numeric value */ + int Mod; /* The modificator */ + } OPER, *POPER; + +/***********************************************************************/ /* Following definitions are used to define table fields (columns). */ /***********************************************************************/ enum XFLD {FLD_NO = 0, /* Not a field definition item */ @@ -529,7 +549,9 @@ typedef struct _colres { PPARM Vcolist(PGLOBAL, PTDB, PSZ, bool); void PlugPutOut(PGLOBAL, FILE *, short, void *, uint); void PlugLineDB(PGLOBAL, PSZ, short, void *, uint); -char *PlgGetDataPath(PGLOBAL g); +//ar *PlgGetDataPath(PGLOBAL g); +char *SetPath(PGLOBAL g, const char *path); +char *ExtractFromPath(PGLOBAL, char *, char *, OPVAL); void AddPointer(PTABS, void *); PDTP MakeDateFormat(PGLOBAL, PSZ, bool, bool, int); int ExtractDate(char *, PDTP, int, int val[6]); @@ -538,14 +560,15 @@ int ExtractDate(char *, PDTP, int, int val[6]); /* Allocate the result structure that will contain result data. */ /**************************************************************************/ DllExport PQRYRES PlgAllocResult(PGLOBAL g, int ncol, int maxres, int ids, - int *buftyp, XFLD *fldtyp, - unsigned int *length, + int *buftyp, XFLD *fldtyp, + unsigned int *length, bool blank, bool nonull); /***********************************************************************/ /* Exported utility routines. */ /***********************************************************************/ DllExport FILE *PlugOpenFile(PGLOBAL, LPCSTR, LPCSTR); +DllExport FILE *PlugReopenFile(PGLOBAL, PFBLOCK, LPCSTR); DllExport int PlugCloseFile(PGLOBAL, PFBLOCK, bool all = false); DllExport void PlugCleanup(PGLOBAL, bool); DllExport bool GetPromptAnswer(PGLOBAL, char *); @@ -561,7 +584,7 @@ DllExport void *PlgDBrealloc(PGLOBAL, void *, MBLOCK&, size_t); DllExport void NewPointer(PTABS, void *, void *); DllExport char *GetIni(int n= 0); DllExport void SetTrc(void); -DllExport char *GetListOption(PGLOBAL, const char *, const char *, +DllExport char *GetListOption(PGLOBAL, const char *, const char *, const char *def=NULL); #define MSGID_NONE 0 diff --git a/storage/connect/plgdbutl.cpp b/storage/connect/plgdbutl.cpp index c0f7fc1c253..c5b66e8f5e6 100644 --- a/storage/connect/plgdbutl.cpp +++ b/storage/connect/plgdbutl.cpp @@ -336,17 +336,8 @@ PDBUSER PlgMakeUser(PGLOBAL g) } // endif dbuserp memset(dbuserp, 0, sizeof(DBUSERBLK)); -//dbuserp->Act2 = g->Activityp; -//#if defined(UNIX) -// dbuserp->LineLen = 160; -//#else -// dbuserp->LineLen = 78; -//#endif -//dbuserp->Maxres = MAXRES; -//dbuserp->Maxlin = MAXLIN; -//dbuserp->Maxbmp = MAXBMP; -//dbuserp->AlgChoice = AMOD_AUTO; - dbuserp->UseTemp = TMP_AUTO; + dbuserp->Maxbmp = MAXBMP; +//dbuserp->UseTemp = TMP_AUTO; dbuserp->Check = CHK_ALL; strcpy(dbuserp->Server, "CONNECT"); return dbuserp; @@ -382,6 +373,7 @@ PCATLG PlgGetCatalog(PGLOBAL g, bool jump) return cat; } // end of PlgGetCatalog +#if 0 /***********************************************************************/ /* PlgGetDataPath: returns the default data path. */ /***********************************************************************/ @@ -391,6 +383,64 @@ char *PlgGetDataPath(PGLOBAL g) return (cat) ? cat->GetDataPath() : NULL; } // end of PlgGetDataPath +#endif // 0 + +/***********************************************************************/ +/* This function returns a database path. */ +/***********************************************************************/ +char *SetPath(PGLOBAL g, const char *path) +{ + char *buf= NULL; + + if (path) { + size_t len= strlen(path) + (*path != '.' ? 4 : 1); + + buf= (char*)PlugSubAlloc(g, NULL, len); + + if (PlugIsAbsolutePath(path)) { + strcpy(buf, path); + return buf; + } // endif path + + if (*path != '.') { +#if defined(WIN32) + char *s= "\\"; +#else // !WIN32 + char *s= "/"; +#endif // !WIN32 + strcat(strcat(strcat(strcpy(buf, "."), s), path), s); + } else + strcpy(buf, path); + + } // endif path + + return buf; +} // end of SetPath + +/***********************************************************************/ +/* Extract from a path name the required component. */ +/* This function assumes there is enough space in the buffer. */ +/***********************************************************************/ +char *ExtractFromPath(PGLOBAL g, char *pBuff, char *FileName, OPVAL op) + { + char *drive = NULL, *direc = NULL, *fname = NULL, *ftype = NULL; + + switch (op) { // Determine which part to extract +#if !defined(UNIX) + case OP_FDISK: drive = pBuff; break; +#endif // !UNIX + case OP_FPATH: direc = pBuff; break; + case OP_FNAME: fname = pBuff; break; + case OP_FTYPE: ftype = pBuff; break; + default: + sprintf(g->Message, MSG(INVALID_OPER), op, "ExtractFromPath"); + return NULL; + } // endswitch op + + // Now do the extraction + _splitpath(FileName, drive, direc, fname, ftype); + return pBuff; + } // end of PlgExtractFromPath /***********************************************************************/ /* Check the occurence and matching of a pattern against a string. */ @@ -820,6 +870,23 @@ FILE *PlugOpenFile(PGLOBAL g, LPCSTR fname, LPCSTR ftype) /* Close file routine: the purpose of this routine is to avoid */ /* double closing that freeze the system on some Unix platforms. */ /***********************************************************************/ +FILE *PlugReopenFile(PGLOBAL g, PFBLOCK fp, LPCSTR md) + { + FILE *fop; + + if ((fop = global_fopen(g, MSGID_OPEN_MODE_STRERROR, fp->Fname, md))) { + fp->Count = 1; + fp->Type = TYPE_FB_FILE; + fp->File = fop; + } /* endif fop */ + + return (fop); + } // end of PlugOpenFile + +/***********************************************************************/ +/* Close file routine: the purpose of this routine is to avoid */ +/* double closing that freeze the system on some Unix platforms. */ +/***********************************************************************/ int PlugCloseFile(PGLOBAL g, PFBLOCK fp, bool all) { int rc = 0; diff --git a/storage/connect/plugutil.c b/storage/connect/plugutil.c index 201aa5a4371..c3b77544983 100644 --- a/storage/connect/plugutil.c +++ b/storage/connect/plugutil.c @@ -2,11 +2,11 @@ /* */ /* PROGRAM NAME: PLUGUTIL */ /* ------------- */ -/* Version 2.7 */ +/* Version 2.8 */ /* */ /* COPYRIGHT: */ /* ---------- */ -/* (C) Copyright to the author Olivier BERTRAND 1993-2012 */ +/* (C) Copyright to the author Olivier BERTRAND 1993-2014 */ /* */ /* WHAT THIS PROGRAM DOES: */ /* ----------------------- */ @@ -134,7 +134,7 @@ PGLOBAL PlugInit(LPCSTR Language, uint worksize) PGLOBAL g; if (trace > 1) - htrc("PlugInit: Language='%s'\n", + htrc("PlugInit: Language='%s'\n", ((!Language) ? "Null" : (char*)Language)); if (!(g = malloc(sizeof(GLOBAL)))) { @@ -145,6 +145,7 @@ PGLOBAL PlugInit(LPCSTR Language, uint worksize) g->Trace = 0; g->Createas = 0; g->Alchecked = 0; + g->Mrr = 0; g->Activityp = g->ActivityStart = NULL; g->Xchk = NULL; strcpy(g->Message, ""); @@ -298,7 +299,7 @@ LPCSTR PlugSetPath(LPSTR pBuff, LPCSTR prefix, LPCSTR FileName, LPCSTR defpath) case '/': break; default: - // This supposes that defdir ends with a SLASH + // This supposes that defdir ends with a SLASH strcpy(direc, strcat(defdir, direc)); } // endswitch @@ -321,13 +322,13 @@ LPCSTR PlugSetPath(LPSTR pBuff, LPCSTR prefix, LPCSTR FileName, LPCSTR defpath) /***********************************************************************/ /* PlugGetMessage: get a message from the message file. */ /***********************************************************************/ -char *PlugReadMessage(PGLOBAL g, int mid, char *m) +char *PlugReadMessage(PGLOBAL g, int mid, char *m) { char msgfile[_MAX_PATH], msgid[32], buff[256]; char *msg; FILE *mfile = NULL; - GetPrivateProfileString("Message", msglang, "Message\\english.msg", + GetPrivateProfileString("Message", msglang, "Message\\english.msg", msgfile, _MAX_PATH, plgini); if (!(mfile = fopen(msgfile, "rt"))) { @@ -377,7 +378,7 @@ char *PlugReadMessage(PGLOBAL g, int mid, char *m) /***********************************************************************/ /* PlugGetMessage: get a message from the resource string table. */ /***********************************************************************/ -char *PlugGetMessage(PGLOBAL g, int mid) +char *PlugGetMessage(PGLOBAL g, int mid) { char *msg; @@ -442,7 +443,7 @@ void *PlugAllocMem(PGLOBAL g, uint size) htrc("Memory of %u allocated at %p\n", size, areap); else htrc("PlugAllocMem: %s\n", g->Message); - + } // endif trace return (areap); @@ -521,7 +522,7 @@ void *PlugSubAlloc(PGLOBAL g, void *memp, size_t size) /***********************************************************************/ char *PlugDup(PGLOBAL g, const char *str) { - char *buf; + char *buf; size_t len; if (str && (len = strlen(str))) { diff --git a/storage/connect/reldef.cpp b/storage/connect/reldef.cpp index e7a96a12908..22076b78086 100644 --- a/storage/connect/reldef.cpp +++ b/storage/connect/reldef.cpp @@ -1,11 +1,11 @@ /************* RelDef CPP Program Source Code File (.CPP) **************/ /* PROGRAM NAME: REFDEF */ /* ------------- */ -/* Version 1.3 */ +/* Version 1.4 */ /* */ /* COPYRIGHT: */ /* ---------- */ -/* (C) Copyright to the author Olivier BERTRAND 2004-2012 */ +/* (C) Copyright to the author Olivier BERTRAND 2004-2014 */ /* */ /* WHAT THIS PROGRAM DOES: */ /* ----------------------- */ @@ -49,6 +49,8 @@ #include "tabmul.h" #include "ha_connect.h" +extern "C" int trace; +extern "C" USETEMP Use_Temp; /* --------------------------- Class RELDEF -------------------------- */ @@ -127,24 +129,39 @@ int RELDEF::GetCharCatInfo(PSZ what, PSZ sdef, char *buf, int size) } // end of GetCharCatInfo /***********************************************************************/ +/* To be used by any TDB's. */ +/***********************************************************************/ +bool RELDEF::Partitioned(void) + { + return Hc->IsPartitioned(); + } // end of Partitioned + +/***********************************************************************/ /* This function returns string table information. */ /* Default parameter is "*" to get the handler default. */ /***********************************************************************/ char *RELDEF::GetStringCatInfo(PGLOBAL g, PSZ what, PSZ sdef) { - char *sval= NULL, *s= Hc->GetStringOption(what, sdef); + char *name, *sval= NULL, *s= Hc->GetStringOption(what, sdef); if (s) { - sval= (char*)PlugSubAlloc(g, NULL, strlen(s) + 1); - strcpy(sval, s); + if (!Hc->IsPartitioned() || + (stricmp(what, "filename") && stricmp(what, "tabname") + && stricmp(what, "connect"))) { + sval= (char*)PlugSubAlloc(g, NULL, strlen(s) + 1); + strcpy(sval, s); + } else + sval= s; + } else if (!stricmp(what, "filename")) { // Return default file name char *ftype= Hc->GetStringOption("Type", "*"); int i, n; if (IsFileType(GetTypeID(ftype))) { - sval= (char*)PlugSubAlloc(g, NULL, strlen(Hc->GetTableName()) + 12); - strcat(strcpy(sval, Hc->GetTableName()), "."); + name= Hc->GetPartName(); + sval= (char*)PlugSubAlloc(g, NULL, strlen(name) + 12); + strcat(strcpy(sval, name), "."); n= strlen(sval); // Fold ftype to lower case @@ -211,6 +228,14 @@ bool TABDEF::Define(PGLOBAL g, PCATLG cat, LPCSTR name, LPCSTR am) } // end of Define /***********************************************************************/ +/* This function returns the database data path. */ +/***********************************************************************/ +PSZ TABDEF::GetPath(void) + { + return (Database) ? (PSZ)Database : Hc->GetDataPath(); + } // end of GetPath + +/***********************************************************************/ /* This function returns column table information. */ /***********************************************************************/ int TABDEF::GetColCatInfo(PGLOBAL g) @@ -546,7 +571,7 @@ PTDB OEMDEF::GetTable(PGLOBAL g, MODE mode) PTXF txfp = NULL; PDOSDEF defp = (PDOSDEF)Pxdef; bool map = defp->Mapped && mode != MODE_INSERT && - !(PlgGetUser(g)->UseTemp == TMP_FORCE && + !(Use_Temp == TMP_FORCE && (mode == MODE_UPDATE || mode == MODE_DELETE)); int cmpr = defp->Compressed; @@ -559,10 +584,8 @@ PTDB OEMDEF::GetTable(PGLOBAL g, MODE mode) #if defined(ZIP_SUPPORT) if (cmpr == 1) txfp = new(g) ZIPFAM(defp); - else { - strcpy(g->Message, "Compress 2 not supported yet"); - return NULL; - } // endelse + else + txfp = new(g) ZLBFAM(defp); #else // !ZIP_SUPPORT strcpy(g->Message, "Compress not supported"); return NULL; @@ -613,8 +636,10 @@ COLCRT::COLCRT(PSZ name) Offset = -1; Long = -1; Precision = -1; + Freq = -1; Key = -1; Scale = -1; + Opt = -1; DataType = '*'; } // end of COLCRT constructor for table creation @@ -628,8 +653,10 @@ COLCRT::COLCRT(void) Offset = 0; Long = 0; Precision = 0; + Freq = 0; Key = 0; Scale = 0; + Opt = 0; DataType = '*'; } // end of COLCRT constructor for table & view definition @@ -640,6 +667,14 @@ COLCRT::COLCRT(void) /***********************************************************************/ COLDEF::COLDEF(void) : COLCRT() { + To_Min = NULL; + To_Max = NULL; + To_Pos = NULL; + Xdb2 = FALSE; + To_Bmap = NULL; + To_Dval = NULL; + Ndv = 0; + Nbm = 0; Buf_Type = TYPE_ERROR; Clen = 0; Poff = 0; @@ -671,7 +706,9 @@ int COLDEF::Define(PGLOBAL g, void *memp, PCOLINFO cfp, int poff) Precision = cfp->Precision; Scale = cfp->Scale; Long = cfp->Length; + Opt = cfp->Opt; Key = cfp->Key; + Freq = cfp->Freq; if (cfp->Remark && *cfp->Remark) { Desc = (PSZ)PlugSubAlloc(g, memp, strlen(cfp->Remark) + 1); diff --git a/storage/connect/reldef.h b/storage/connect/reldef.h index 29e4bf77f44..a1dfe87dca8 100644 --- a/storage/connect/reldef.h +++ b/storage/connect/reldef.h @@ -1,7 +1,7 @@ /*************** RelDef H Declares Source Code File (.H) ***************/ -/* Name: RELDEF.H Version 1.3 */ +/* Name: RELDEF.H Version 1.5 */ /* */ -/* (C) Copyright to the author Olivier BERTRAND 2004-2012 */ +/* (C) Copyright to the author Olivier BERTRAND 2004-2014 */ /* */ /* This file contains the DEF classes definitions. */ /***********************************************************************/ @@ -13,7 +13,7 @@ #include "catalog.h" #include "my_sys.h" -typedef class INDEXDEF *PIXDEF; +typedef class INDEXDEF *PIXDEF; typedef class ha_connect *PHC; /***********************************************************************/ @@ -42,6 +42,7 @@ class DllExport RELDEF : public BLOCK { // Relation definition block // Methods bool GetBoolCatInfo(PSZ what, bool bdef); bool SetIntCatInfo(PSZ what, int ival); + bool Partitioned(void); int GetIntCatInfo(PSZ what, int idef); int GetSizeCatInfo(PSZ what, PSZ sdef); int GetCharCatInfo(PSZ what, PSZ sdef, char *buf, int size); @@ -78,8 +79,9 @@ class DllExport TABDEF : public RELDEF { /* Logical table descriptor */ void SetNext(PTABDEF tdfp) {Next = tdfp;} int GetMultiple(void) {return Multiple;} int GetPseudo(void) {return Pseudo;} - PSZ GetPath(void) - {return (Database) ? (PSZ)Database : Cat->GetDataPath();} + PSZ GetPath(void); +//PSZ GetPath(void) +// {return (Database) ? (PSZ)Database : Cat->GetDataPath();} bool SepIndex(void) {return GetBoolCatInfo("SepIndex", false);} bool IsReadOnly(void) {return Read_Only;} virtual AMT GetDefType(void) {return TYPE_AM_TAB;} @@ -89,7 +91,7 @@ class DllExport TABDEF : public RELDEF { /* Logical table descriptor */ const CHARSET_INFO *data_charset() {return m_data_charset;} // Methods - int GetColCatInfo(PGLOBAL g); + int GetColCatInfo(PGLOBAL g); void SetIndexInfo(void); bool DropTable(PGLOBAL g, PSZ name); virtual bool Define(PGLOBAL g, PCATLG cat, LPCSTR name, LPCSTR am); @@ -156,6 +158,8 @@ class DllExport COLCRT : public BLOCK { /* Column description block PSZ GetName(void) {return Name;} PSZ GetDecode(void) {return Decode;} PSZ GetFmt(void) {return Fmt;} + int GetOpt(void) {return Opt;} + int GetFreq(void) {return Freq;} int GetLong(void) {return Long;} int GetPrecision(void) {return Precision;} int GetOffset(void) {return Offset;} @@ -172,6 +176,8 @@ class DllExport COLCRT : public BLOCK { /* Column description block int Key; /* Key (greater than 1 if multiple) */ int Precision; /* Logical column length */ int Scale; /* Decimals for float/decimal values */ + int Opt; /* 0:Not 1:clustered 2:sorted-asc 3:desc */ + int Freq; /* Estimated number of different values */ char DataType; /* Internal data type (C, N, F, T) */ }; // end of COLCRT @@ -193,12 +199,34 @@ class DllExport COLDEF : public COLCRT { /* Column description block int GetClen(void) {return Clen;} int GetType(void) {return Buf_Type;} int GetPoff(void) {return Poff;} + void *GetMin(void) {return To_Min;} + void SetMin(void *minp) {To_Min = minp;} + void *GetMax(void) {return To_Max;} + void SetMax(void *maxp) {To_Max = maxp;} + bool GetXdb2(void) {return Xdb2;} + void SetXdb2(bool b) {Xdb2 = b;} + void *GetBmap(void) {return To_Bmap;} + void SetBmap(void *bmp) {To_Bmap = bmp;} + void *GetDval(void) {return To_Dval;} + void SetDval(void *dvp) {To_Dval = dvp;} + int GetNdv(void) {return Ndv;} + void SetNdv(int ndv) {Ndv = ndv;} + int GetNbm(void) {return Nbm;} + void SetNbm(int nbm) {Nbm = nbm;} int Define(PGLOBAL g, void *memp, PCOLINFO cfp, int poff); void Define(PGLOBAL g, PCOL colp); bool IsSpecial(void) {return (Flags & U_SPECIAL) ? true : false;} bool IsVirtual(void) {return (Flags & U_VIRTUAL) ? true : false;} protected: + void *To_Min; /* Point to array of block min values */ + void *To_Max; /* Point to array of block max values */ + int *To_Pos; /* Point to array of block positions */ + bool Xdb2; /* TRUE if to be optimized by XDB2 */ + void *To_Bmap; /* To array of block bitmap values */ + void *To_Dval; /* To array of column distinct values */ + int Ndv; /* Number of distinct values */ + int Nbm; /* Number of ULONG in bitmap (XDB2) */ int Buf_Type; /* Internal data type */ int Clen; /* Internal data size in chars (bytes) */ int Poff; /* Calculated offset for Packed tables */ diff --git a/storage/connect/tabcol.h b/storage/connect/tabcol.h index fdee653207e..3bfc37e69c1 100644 --- a/storage/connect/tabcol.h +++ b/storage/connect/tabcol.h @@ -97,7 +97,7 @@ class DllExport COLUMN: public XOBJECT { // Column Name/Qualifier block. /***********************************************************************/ /* Definition of class SPCCOL with all its method functions. */ /* Note: Currently the special columns are ROWID, ROWNUM, FILEID, */ -/* SERVID, TABID, and CONID. */ +/* SERVID, TABID, PARTID, and CONID. */ /***********************************************************************/ class SPCCOL: public COLUMN { // Special Column Name/Qualifier block. public: diff --git a/storage/connect/tabdos.cpp b/storage/connect/tabdos.cpp index e66a84f2fa4..c60c9b034f9 100644 --- a/storage/connect/tabdos.cpp +++ b/storage/connect/tabdos.cpp @@ -57,12 +57,29 @@ #include "tabdos.h" #include "tabfix.h" #include "tabmul.h" +#include "array.h" +#include "blkfil.h" /***********************************************************************/ /* DB static variables. */ /***********************************************************************/ int num_read, num_there, num_eq[2]; // Statistics -extern "C" int trace; + +extern "C" int trace; +extern "C" USETEMP Use_Temp; +extern bool xinfo; + +/***********************************************************************/ +/* Size of optimize file header. */ +/***********************************************************************/ +#define NZ 4 + +/***********************************************************************/ +/* Min and Max blocks contains zero ended fields (blank = false). */ +/* No conversion of block values (check = true). */ +/***********************************************************************/ +PVBLK AllocValBlock(PGLOBAL, void *, int, int, int len= 0, int prec= 0, + bool check= true, bool blank= false, bool un= false); /* --------------------------- Class DOSDEF -------------------------- */ @@ -81,6 +98,9 @@ DOSDEF::DOSDEF(void) Huge = false; Accept = false; Eof = false; + To_Pos = NULL; + Optimized = 0; + AllocBlks = 0; Compressed = 0; Lrecl = 0; AvgLen = 0; @@ -90,7 +110,6 @@ DOSDEF::DOSDEF(void) Maxerr = 0; ReadMode = 0; Ending = 0; -//Mtime = 0; } // end of DOSDEF constructor /***********************************************************************/ @@ -116,8 +135,8 @@ bool DOSDEF::DefineAM(PGLOBAL g, LPCSTR am, int poff) Compressed = GetIntCatInfo("Compressed", 0); Mapped = GetBoolCatInfo("Mapped", map); - Block = GetIntCatInfo("Blocks", 0); - Last = GetIntCatInfo("Last", 0); +//Block = GetIntCatInfo("Blocks", 0); +//Last = GetIntCatInfo("Last", 0); Ending = GetIntCatInfo("Ending", CRLF); if (Recfm == RECFM_FIX || Recfm == RECFM_BIN) { @@ -137,46 +156,62 @@ bool DOSDEF::DefineAM(PGLOBAL g, LPCSTR am, int poff) return false; } // end of DefineAM -#if 0 /***********************************************************************/ -/* DeleteTableFile: Delete DOS/UNIX table files using platform API. */ -/* If the table file is protected (declared as read/only) we still */ -/* erase the the eventual optimize and index files but return true. */ +/* Get the full path/name of the optization file. */ /***********************************************************************/ -bool DOSDEF::DeleteTableFile(PGLOBAL g) +bool DOSDEF::GetOptFileName(PGLOBAL g, char *filename) { - char filename[_MAX_PATH]; - bool rc = false; + char *ftype; - // Now delete the table file itself if not protected - if (!IsReadOnly()) { - rc = Erase(filename); - } else - rc =true; + switch (Recfm) { + case RECFM_VAR: ftype = ".dop"; break; + case RECFM_FIX: ftype = ".fop"; break; + case RECFM_BIN: ftype = ".bop"; break; + case RECFM_VCT: ftype = ".vop"; break; + case RECFM_DBF: ftype = ".dbp"; break; + default: + sprintf(g->Message, MSG(INVALID_FTYPE), Recfm); + return true; + } // endswitch Ftype - return rc; // Return true if error - } // end of DeleteTableFile + PlugSetPath(filename, Ofn, GetPath()); + strcat(PlugRemoveType(filename, filename), ftype); + return false; + } // end of GetOptFileName /***********************************************************************/ -/* Erase: This was made a separate routine because a strange thing */ -/* happened when DeleteTablefile was defined for the VCTDEF class: */ -/* when called from Catalog, the DOSDEF routine was still called even */ -/* when the class was VCTDEF. It also minimizes the specific code. */ +/* After an optimize error occured, remove all set optimize values. */ /***********************************************************************/ -bool DOSDEF::Erase(char *filename) +void DOSDEF::RemoveOptValues(PGLOBAL g) { - bool rc; - - PlugSetPath(filename, Fn, GetPath()); + char filename[_MAX_PATH]; + PCOLDEF cdp; + + // Delete settings of optimized columns + for (cdp = To_Cols; cdp; cdp = cdp->GetNext()) + if (cdp->GetOpt()) { + cdp->SetMin(NULL); + cdp->SetMax(NULL); + cdp->SetNdv(0); + cdp->SetNbm(0); + cdp->SetDval(NULL); + cdp->SetBmap(NULL); + } // endif Opt + + // Delete block position setting for not fixed tables + To_Pos = NULL; + AllocBlks = 0; + + // Delete any eventually ill formed non matching optimization file + if (!GetOptFileName(g, filename)) #if defined(WIN32) - rc = !DeleteFile(filename); + DeleteFile(filename); #else // UNIX - rc = remove(filename); -#endif // UNIX + remove(filename); +#endif // WIN32 - return rc; // Return true if error - } // end of Erase -#endif // 0 + Optimized = 0; + } // end of RemoveOptValues /***********************************************************************/ /* DeleteIndexFile: Delete DOS/UNIX index file(s) using platform API. */ @@ -191,7 +226,7 @@ bool DOSDEF::DeleteIndexFile(PGLOBAL g, PIXDEF pxdf) return false; // No index // If true indexes are in separate files - sep = GetBoolCatInfo("SepIndex", false); + sep = GetBoolCatInfo("SepIndex", false); if (!sep && pxdf) { strcpy(g->Message, MSG(NO_RECOV_SPACE)); @@ -221,6 +256,10 @@ bool DOSDEF::DeleteIndexFile(PGLOBAL g, PIXDEF pxdf) #endif char direc[_MAX_DIR]; char fname[_MAX_FNAME]; + bool all = !pxdf; + + if (all) + pxdf = To_Indx; for (; pxdf; pxdf = pxdf->GetNext()) { _splitpath(Ofn, drive, direc, fname, NULL); @@ -228,10 +267,16 @@ bool DOSDEF::DeleteIndexFile(PGLOBAL g, PIXDEF pxdf) _makepath(filename, drive, direc, fname, ftype); PlugSetPath(filename, filename, GetPath()); #if defined(WIN32) - rc |= !DeleteFile(filename); + if (!DeleteFile(filename)) + rc |= (GetLastError() != ERROR_FILE_NOT_FOUND); #else // UNIX - rc |= remove(filename); + if (remove(filename)) + rc |= (errno != ENOENT); #endif // UNIX + + if (!all) + break; + } // endfor pxdf } else { // !sep @@ -239,9 +284,11 @@ bool DOSDEF::DeleteIndexFile(PGLOBAL g, PIXDEF pxdf) PlugSetPath(filename, Ofn, GetPath()); strcat(PlugRemoveType(filename, filename), ftype); #if defined(WIN32) - rc = !DeleteFile(filename); + if (!DeleteFile(filename)) + rc = (GetLastError() != ERROR_FILE_NOT_FOUND); #else // UNIX - rc = remove(filename); + if (remove(filename)) + rc = (errno != ENOENT); #endif // UNIX } // endif sep @@ -269,7 +316,7 @@ bool DOSDEF::InvalidateIndex(PGLOBAL g) PTDB DOSDEF::GetTable(PGLOBAL g, MODE mode) { // Mapping not used for insert - USETEMP tmp = PlgGetUser(g)->UseTemp; + USETEMP tmp = Use_Temp; bool map = Mapped && mode != MODE_INSERT && !(tmp != TMP_NO && Recfm == RECFM_VAR && mode == MODE_UPDATE) && @@ -314,10 +361,9 @@ PTDB DOSDEF::GetTable(PGLOBAL g, MODE mode) #if defined(ZIP_SUPPORT) if (Compressed == 1) txfp = new(g) ZIPFAM(this); - else { - strcpy(g->Message, "Compress 2 not supported yet"); - return NULL; - } // endelse + else + txfp = new(g) ZLBFAM(this); + #else // !ZIP_SUPPORT sprintf(g->Message, MSG(NO_FEAT_SUPPORT), "ZIP"); return NULL; @@ -334,6 +380,36 @@ PTDB DOSDEF::GetTable(PGLOBAL g, MODE mode) if (Multiple) tdbp = new(g) TDBMUL(tdbp); + else + /*******************************************************************/ + /* For block tables, get eventually saved optimization values. */ + /*******************************************************************/ + if (tdbp->GetBlockValues(g)) { + PushWarning(g, tdbp); +// return NULL; // causes a crash when deleting index + } else if (Recfm == RECFM_VAR || Compressed > 1) { + if (IsOptimized()) { + if (map) { + txfp = new(g) MBKFAM(this); + } else if (Compressed) { +#if defined(ZIP_SUPPORT) + if (Compressed == 1) + txfp = new(g) ZBKFAM(this); + else { + txfp->SetBlkPos(To_Pos); + ((PZLBFAM)txfp)->SetOptimized(To_Pos != NULL); + } // endelse +#else + sprintf(g->Message, MSG(NO_FEAT_SUPPORT), "ZIP"); + return NULL; +#endif + } else + txfp = new(g) BLKFAM(this); + + ((PTDBDOS)tdbp)->SetTxfp(txfp); + } // endif Optimized + + } // endif Recfm return tdbp; } // end of GetTable @@ -353,7 +429,13 @@ TDBDOS::TDBDOS(PDOSDEF tdp, PTXF txfp) : TDBASE(tdp) AvgLen = tdp->AvgLen; Ftype = tdp->Recfm; To_Line = NULL; - Cardinal = -1; +//To_BlkIdx = NULL; + To_BlkFil = NULL; + SavFil = NULL; +//Xeval = 0; + Beval = 0; + Abort = false; + Indxd = false; } // end of TDBDOS standard constructor TDBDOS::TDBDOS(PGLOBAL g, PTDBDOS tdbp) : TDBASE(tdbp) @@ -363,7 +445,13 @@ TDBDOS::TDBDOS(PGLOBAL g, PTDBDOS tdbp) : TDBASE(tdbp) AvgLen = tdbp->AvgLen; Ftype = tdbp->Ftype; To_Line = tdbp->To_Line; - Cardinal = tdbp->Cardinal; +//To_BlkIdx = tdbp->To_BlkIdx; + To_BlkFil = tdbp->To_BlkFil; + SavFil = tdbp->SavFil; +//Xeval = tdbp->Xeval; + Beval = tdbp->Beval; + Abort = tdbp->Abort; + Indxd = tdbp->Indxd; } // end of TDBDOS copy constructor // Method @@ -406,37 +494,1098 @@ void TDBDOS::PrintAM(FILE *f, char *m) /***********************************************************************/ /* Remake the indexes after the table was modified. */ /***********************************************************************/ -int TDBDOS::ResetTableOpt(PGLOBAL g, bool dox) +int TDBDOS::ResetTableOpt(PGLOBAL g, bool dop, bool dox) { - int rc = RC_OK; + int prc = RC_OK, rc = RC_OK; + + if (!GetFileLength(g)) { + // Void table, delete all opt and index files + PDOSDEF defp = (PDOSDEF)To_Def; + + defp->RemoveOptValues(g); + return (defp->DeleteIndexFile(g, NULL)) ? RC_INFO : RC_OK; + } // endif GetFileLength MaxSize = -1; // Size must be recalculated Cardinal = -1; // as well as Cardinality - if (dox) { + PTXF xp = Txfp; + + To_Filter = NULL; // Disable filtering +//To_BlkIdx = NULL; // and index filtering + To_BlkFil = NULL; // and block filtering + + // After the table was modified the indexes + // are invalid and we should mark them as such... + (void)((PDOSDEF)To_Def)->InvalidateIndex(g); + + if (dop) { + Columns = NULL; // Not used anymore + + if (Txfp->Blocked) { + // MakeBlockValues must be executed in non blocked mode + // except for ZLIB access method. + if (Txfp->GetAmType() == TYPE_AM_MAP) { + Txfp = new(g) MAPFAM((PDOSDEF)To_Def); +#if defined(ZIP_SUPPORT) + } else if (Txfp->GetAmType() == TYPE_AM_ZIP) { + Txfp = new(g) ZIPFAM((PDOSDEF)To_Def); + } else if (Txfp->GetAmType() == TYPE_AM_ZLIB) { + Txfp->Reset(); + ((PZLBFAM)Txfp)->SetOptimized(false); +#endif // ZIP_SUPPORT + } else if (Txfp->GetAmType() == TYPE_AM_BLK) + Txfp = new(g) DOSFAM((PDOSDEF)To_Def); + + Txfp->SetTdbp(this); + } else + Txfp->Reset(); + + Use = USE_READY; // So the table can be reopened + Mode = MODE_ANY; // Just to be clean + rc = MakeBlockValues(g); // Redo optimization + } // endif dop + + if (dox && (rc == RC_OK || rc == RC_INFO)) { // Remake eventual indexes - if (Mode != MODE_UPDATE) - To_SetCols = NULL; // Only used on Update +// if (Mode != MODE_UPDATE) + To_SetCols = NULL; // Positions are changed Columns = NULL; // Not used anymore Txfp->Reset(); // New start Use = USE_READY; // So the table can be reopened Mode = MODE_READ; // New mode + prc = rc; - if (!(PlgGetUser(g)->Check & CHK_OPT)) { - // After the table was modified the indexes - // are invalid and we should mark them as such... - rc = ((PDOSDEF)To_Def)->InvalidateIndex(g); - } else - // ... or we should remake them. + if (PlgGetUser(g)->Check & CHK_OPT) + // We must remake all indexes. rc = MakeIndex(g, NULL, false); + rc = (rc == RC_INFO) ? prc : rc; } // endif dox return rc; } // end of ResetTableOpt /***********************************************************************/ +/* Calculate the block sizes so block I/O can be used and also the */ +/* Min/Max values for clustered/sorted table columns. */ +/***********************************************************************/ +int TDBDOS::MakeBlockValues(PGLOBAL g) + { + int i, lg, nrec, rc, n = 0; + int curnum, curblk, block, savndv, savnbm; + int last __attribute__((unused)); + void *savmin, *savmax; + bool blocked, xdb2 = false; +//POOLHEADER save; + PCOLDEF cdp; + PDOSDEF defp = (PDOSDEF)To_Def; + PDOSCOL colp = NULL; + PDBUSER dup = PlgGetUser(g); + PCATLG cat = defp->GetCat(); +//void *memp = cat->GetDescp(); + + if ((nrec = defp->GetElemt()) < 2) { + if (!To_Def->Partitioned()) { + // This may be wrong to do in some cases + strcpy(g->Message, MSG(TABLE_NOT_OPT)); + return RC_INFO; // Not to be optimized + } else + return RC_OK; + + } else if (GetMaxSize(g) == 0 || !(dup->Check & CHK_OPT)) { + // Suppress the opt file firstly if the table is void, + // secondly when it was modified with OPTIMIZATION unchecked + // because it is no more valid. + defp->RemoveOptValues(g); // Erase opt file + return RC_OK; // void table + } else if (MaxSize < 0) + return RC_FX; + + defp->SetOptimized(0); + + // Estimate the number of needed blocks + block = (int)((MaxSize + (int)nrec - 1) / (int)nrec); + + // We have to use local variables because Txfp->CurBlk is set + // to Rows+1 by unblocked variable length table access methods. + curblk = -1; + curnum = nrec - 1; + last = 0; + Txfp->Block = block; // This is useful mainly for + Txfp->CurBlk = curblk; // blocked tables (ZLBFAM), for + Txfp->CurNum = curnum; // others it is just to be clean. + + /*********************************************************************/ + /* Allocate the array of block starting positions. */ + /*********************************************************************/ +//if (memp) +// save = *(PPOOLHEADER)memp; + + Txfp->BlkPos = (int*)PlugSubAlloc(g, NULL, (block + 1) * sizeof(int)); + + /*********************************************************************/ + /* Allocate the blocks for clustered columns. */ + /*********************************************************************/ + blocked = Txfp->Blocked; // Save + Txfp->Blocked = true; // So column block can be allocated + + for (cdp = defp->GetCols(), i = 1; cdp; cdp = cdp->GetNext(), i++) + if (cdp->GetOpt()) { + lg = cdp->GetClen(); + + if (cdp->GetFreq() && cdp->GetFreq() <= dup->Maxbmp) { + cdp->SetXdb2(true); + savndv = cdp->GetNdv(); + cdp->SetNdv(0); // Reset Dval number of values + xdb2 = true; + savmax = cdp->GetDval(); + cdp->SetDval(PlugSubAlloc(g, NULL, cdp->GetFreq() * lg)); + savnbm = cdp->GetNbm(); + cdp->SetNbm(0); // Prevent Bmap allocation +// savmin = cdp->GetBmap(); +// cdp->SetBmap(PlugSubAlloc(g, NULL, block * sizeof(int))); + + if (trace) + htrc("Dval(%p) Bmap(%p) col(%d) %s Block=%d lg=%d\n", + cdp->GetDval(), cdp->GetBmap(), i, cdp->GetName(), block, lg); + + // colp will be initialized with proper Dval VALBLK + colp = (PDOSCOL)MakeCol(g, cdp, colp, i); + colp->InitValue(g); // Allocate column value buffer + cdp->SetNbm(savnbm); +// cdp->SetBmap(savmin); // Can be reused if the new size + cdp->SetDval(savmax); // is not greater than this one. + cdp->SetNdv(savndv); + } else { + cdp->SetXdb2(false); // Maxbmp may have been reset + savmin = cdp->GetMin(); + savmax = cdp->GetMax(); + cdp->SetMin(PlugSubAlloc(g, NULL, block * lg)); + cdp->SetMax(PlugSubAlloc(g, NULL, block * lg)); + + // Valgrind complains if there are uninitialised bytes + // after the null character ending + if (IsTypeChar(cdp->GetType())) { + memset(cdp->GetMin(), 0, block * lg); + memset(cdp->GetMax(), 0, block * lg); + } // endif Type + + if (trace) + htrc("min(%p) max(%p) col(%d) %s Block=%d lg=%d\n", + cdp->GetMin(), cdp->GetMax(), i, cdp->GetName(), block, lg); + + // colp will be initialized with proper opt VALBLK's + colp = (PDOSCOL)MakeCol(g, cdp, colp, i); + colp->InitValue(g); // Allocate column value buffer + cdp->SetMin(savmin); // Can be reused if the number + cdp->SetMax(savmax); // of blocks does not change. + } // endif Freq + + } // endif Clustered + + // No optimised columns. Still useful for blocked variable tables. + if (!colp && defp->Recfm != RECFM_VAR) { + strcpy(g->Message, "No optimised columns"); + return RC_INFO; + } // endif colp + + Txfp->Blocked = blocked; + + /*********************************************************************/ + /* Now do calculate the optimization values. */ + /*********************************************************************/ + Mode = MODE_READ; + + if (OpenDB(g)) + return RC_FX; + + if (xdb2) { + /*********************************************************************/ + /* Retrieve the distinct values of XDB2 columns. */ + /*********************************************************************/ + if (GetDistinctColumnValues(g, nrec)) + return RC_FX; + + OpenDB(g); // Rewind the table file + } // endif xdb2 + +#if defined(PROG_INFO) + /*********************************************************************/ + /* Initialize progress information */ + /*********************************************************************/ + char *p = (char *)PlugSubAlloc(g, NULL, 24 + strlen(Name)); + + dup->Step = strcat(strcpy(p, MSG(OPTIMIZING)), Name); + dup->ProgMax = GetProgMax(g); + dup->ProgCur = 0; +#endif // SOCKET_MODE || THREAD + + /*********************************************************************/ + /* Make block starting pos and min/max values of cluster columns. */ + /*********************************************************************/ + while ((rc = ReadDB(g)) == RC_OK) { + if (blocked) { + // A blocked FAM class handles CurNum and CurBlk (ZLBFAM) + if (!Txfp->CurNum) + Txfp->BlkPos[Txfp->CurBlk] = Txfp->GetPos(); + + } else { + if (++curnum >= nrec) { + if (++curblk >= block) { + strcpy(g->Message, MSG(BAD_BLK_ESTIM)); + goto err; + } else + curnum = 0; + + // Get block starting position + Txfp->BlkPos[curblk] = Txfp->GetPos(); + } // endif CurNum + + last = curnum + 1; // curnum is zero based + Txfp->CurBlk = curblk; // Used in COLDOS::SetMinMax + Txfp->CurNum = curnum; // Used in COLDOS::SetMinMax + } // endif blocked + + /*******************************************************************/ + /* Now calculate the min and max values for the cluster columns. */ + /*******************************************************************/ + for (colp = (PDOSCOL)Columns; colp; colp = (PDOSCOL)colp->GetNext()) + if (colp->Clustered == 2) { + if (colp->SetBitMap(g)) + goto err; + + } else + if (colp->SetMinMax(g)) + goto err; // Currently: column is not sorted + +#if defined(PROG_INFO) + if (!dup->Step) { + strcpy(g->Message, MSG(OPT_CANCELLED)); + goto err; + } else + dup->ProgCur = GetProgCur(); +#endif // PROG_INFO + + n++; // Used to calculate block and last + } // endwhile + + if (rc == RC_EF) { + Txfp->Nrec = nrec; + +#if 0 // No good because Curblk and CurNum after EOF are different + // depending on whether the file is mapped or not mapped. + if (blocked) { +// Txfp->Block = Txfp->CurBlk + 1; + Txfp->Last = (Txfp->CurNum) ? Txfp->CurNum : nrec; +// Txfp->Last = (Txfp->CurNum) ? Txfp->CurNum + 1 : nrec; + Txfp->Block = Txfp->CurBlk + (Txfp->Last == nrec ? 0 : 1); + } else { + Txfp->Block = curblk + 1; + Txfp->Last = last; + } // endif blocked +#endif // 0 + + // New values of Block and Last + Txfp->Block = (n + nrec - 1) / nrec; + Txfp->Last = (n % nrec) ? (n % nrec) : nrec; + + // This is needed to be able to calculate the last block size + Txfp->BlkPos[Txfp->Block] = Txfp->GetNextPos(); + } else + goto err; + + /*********************************************************************/ + /* Save the optimization values for this table. */ + /*********************************************************************/ + if (!SaveBlockValues(g)) { + defp->Block = Txfp->Block; + defp->Last = Txfp->Last; + CloseDB(g); + defp->SetIntCatInfo("Blocks", Txfp->Block); + defp->SetIntCatInfo("Last", Txfp->Last); + return RC_OK; + } // endif SaveBlockValues + + err: + // Restore Desc memory suballocation +//if (memp) +// *(PPOOLHEADER)memp = save; + + defp->RemoveOptValues(g); + CloseDB(g); + return RC_FX; + } // end of MakeBlockValues + +/***********************************************************************/ +/* Save the block and Min/Max values for this table. */ +/* The problem here is to avoid name duplication, because more than */ +/* one data file can have the same name (but different types) and/or */ +/* the same data file can be used with different block sizes. This is */ +/* why we use Ofn that defaults to the file name but can be set to a */ +/* different name if necessary. */ +/***********************************************************************/ +bool TDBDOS::SaveBlockValues(PGLOBAL g) + { + char filename[_MAX_PATH]; + int lg, n[NZ + 2]; + size_t nbk, ndv, nbm, block = Txfp->Block; + bool rc = false; + FILE *opfile; + PDOSCOL colp; + PDOSDEF defp = (PDOSDEF)To_Def; + + if (defp->GetOptFileName(g, filename)) + return true; + + if (!(opfile = fopen(filename, "wb"))) { + sprintf(g->Message, MSG(OPEN_MODE_ERROR), + "wb", (int)errno, filename); + strcat(strcat(g->Message, ": "), strerror(errno)); + + if (trace) + htrc("%s\n", g->Message); + + return true; + } // endif opfile + + memset(n, 0, sizeof(n)); // To avoid valgrind warning + + if (Ftype == RECFM_VAR || defp->Compressed == 2) { + /*******************************************************************/ + /* Write block starting positions into the opt file. */ + /*******************************************************************/ + block++; + lg = sizeof(int); + n[0] = Txfp->Last; n[1] = lg; n[2] = Txfp->Nrec; n[3] = Txfp->Block; + + if (fwrite(n, sizeof(int), NZ, opfile) != NZ) { + sprintf(g->Message, MSG(OPT_HEAD_WR_ERR), strerror(errno)); + rc = true; + } // endif size + + if (fwrite(Txfp->BlkPos, lg, block, opfile) != block) { + sprintf(g->Message, MSG(OPTBLK_WR_ERR), strerror(errno)); + rc = true; + } // endif size + + block--; // = Txfp->Block; + } // endif Ftype + + /*********************************************************************/ + /* Write the Min/Max values into the opt file. */ + /*********************************************************************/ + for (colp = (PDOSCOL)Columns; colp; colp = (PDOSCOL)colp->Next) { + lg = colp->Value->GetClen(); + + // Now start the writing process + if (colp->Clustered == 2) { + // New XDB2 block optimization. Will be recognized when reading + // because the column index is negated. + ndv = colp->Ndv; nbm = colp->Nbm; + nbk = nbm * block; + n[0] = -colp->Index; n[1] = lg; n[2] = Txfp->Nrec; n[3] = block; + n[4] = ndv; n[5] = nbm; + + if (fwrite(n, sizeof(int), NZ + 2, opfile) != NZ + 2) { + sprintf(g->Message, MSG(OPT_HEAD_WR_ERR), strerror(errno)); + rc = true; + } // endif size + + if (fwrite(colp->Dval->GetValPointer(), lg, ndv, opfile) != ndv) { + sprintf(g->Message, MSG(OPT_DVAL_WR_ERR), strerror(errno)); + rc = true; + } // endif size + + if (fwrite(colp->Bmap->GetValPointer(), sizeof(int), nbk, opfile) != nbk) { + sprintf(g->Message, MSG(OPT_BMAP_WR_ERR), strerror(errno)); + rc = true; + } // endif size + + } else { + n[0] = colp->Index; n[1] = lg; n[2] = Txfp->Nrec; n[3] = block; + + if (fwrite(n, sizeof(int), NZ, opfile) != NZ) { + sprintf(g->Message, MSG(OPT_HEAD_WR_ERR), strerror(errno)); + rc = true; + } // endif size + + if (fwrite(colp->Min->GetValPointer(), lg, block, opfile) != block) { + sprintf(g->Message, MSG(OPT_MIN_WR_ERR), strerror(errno)); + rc = true; + } // endif size + + if (fwrite(colp->Max->GetValPointer(), lg, block, opfile) != block) { + sprintf(g->Message, MSG(OPT_MAX_WR_ERR), strerror(errno)); + rc = true; + } // endif size + + } // endif Clustered + + } // endfor colp + + fclose(opfile); + return rc; + } // end of SaveBlockValues + +/***********************************************************************/ +/* Read the Min/Max values for this table. */ +/* The problem here is to avoid name duplication, because more than */ +/* one data file can have the same name (but different types) and/or */ +/* the same data file can be used with different block sizes. This is */ +/* why we use Ofn that defaults to the file name but can be set to a */ +/* different name if necessary. */ +/***********************************************************************/ +bool TDBDOS::GetBlockValues(PGLOBAL g) + { + char filename[_MAX_PATH]; + int i, lg, n[NZ]; + int nrec, block = 0, last = 0, allocblk = 0; + int len; + bool newblk = false; + size_t ndv, nbm, nbk, blk; + FILE *opfile; + PCOLDEF cdp; + PDOSDEF defp = (PDOSDEF)To_Def; + PCATLG cat = defp->GetCat(); + +#if 0 + if (Mode == MODE_INSERT && Txfp->GetAmType() == TYPE_AM_DOS) + return false; +#endif // WIN32 + + if (defp->Optimized) + return false; // Already done or to be redone + + if (Ftype == RECFM_VAR || defp->Compressed == 2) { + /*******************************************************************/ + /* Variable length file that can be read by block. */ + /*******************************************************************/ + nrec = (defp->GetElemt()) ? defp->GetElemt() : 1; + + if (nrec > 1) { + // The table can be declared optimized if it is void. + // This is useful to handle Insert in optimized mode. + char filename[_MAX_PATH]; + int h; + int flen = -1; + + PlugSetPath(filename, defp->Fn, GetPath()); + h = open(filename, O_RDONLY); + flen = (h == -1 && errno == ENOENT) ? 0 : _filelength(h); + + if (h != -1) + close(h); + + if (!flen) { + defp->SetOptimized(1); + return false; + } // endif flen + + } else + return false; // Not optimisable + + cdp = defp->GetCols(); + i = 1; + } else { + /*******************************************************************/ + /* Fixed length file. Opt file exists only for clustered columns. */ + /*******************************************************************/ + // Check for existence of clustered columns + for (cdp = defp->GetCols(), i = 1; cdp; cdp = cdp->GetNext(), i++) + if (cdp->GetOpt()) + break; + + if (!cdp) + return false; // No optimization needed + + if ((len = Cardinality(g)) < 0) + return true; // Table error + else if (!len) + return false; // File does not exist yet + + block = Txfp->Block; // Was set in Cardinality + nrec = Txfp->Nrec; + } // endif Ftype + + if (defp->GetOptFileName(g, filename)) + return true; + + if (!(opfile = fopen(filename, "rb"))) + return false; // No saved values + + if (Ftype == RECFM_VAR || defp->Compressed == 2) { + /*******************************************************************/ + /* Read block starting positions from the opt file. */ + /*******************************************************************/ + lg = sizeof(int); + + if (fread(n, sizeof(int), NZ, opfile) != NZ) { + sprintf(g->Message, MSG(OPT_HEAD_RD_ERR), strerror(errno)); + goto err; + } // endif size + + if (n[1] != lg || n[2] != nrec) { + sprintf(g->Message, MSG(OPT_NOT_MATCH), filename); + goto err; + } // endif + + last = n[0]; + block = n[3]; + blk = block + 1; + + defp->To_Pos = (int*)PlugSubAlloc(g, NULL, blk * lg); + + if (fread(defp->To_Pos, lg, blk, opfile) != blk) { + sprintf(g->Message, MSG(OPTBLK_RD_ERR), strerror(errno)); + goto err; + } // endif size + + } // endif Ftype + + /*********************************************************************/ + /* Read the Min/Max values from the opt file. */ + /*********************************************************************/ + for (; cdp; cdp = cdp->GetNext(), i++) + if (cdp->GetOpt()) { + lg = cdp->GetClen(); + blk = block; + + // Now start the reading process. + if (fread(n, sizeof(int), NZ, opfile) != NZ) { + sprintf(g->Message, MSG(OPT_HEAD_RD_ERR), strerror(errno)); + goto err; + } // endif size + + if (n[0] == -i) { + // Read the XDB2 opt values from the opt file + if (n[1] != lg || n[2] != nrec || n[3] != block) { + sprintf(g->Message, MSG(OPT_NOT_MATCH), filename); + goto err; + } // endif + + if (fread(n, sizeof(int), 2, opfile) != 2) { + sprintf(g->Message, MSG(OPT_HEAD_RD_ERR), strerror(errno)); + goto err; + } // endif fread + + ndv = n[0]; nbm = n[1]; nbk = nbm * blk; + + if (cdp->GetNdv() < (int)ndv || !cdp->GetDval()) + cdp->SetDval(PlugSubAlloc(g, NULL, ndv * lg)); + + cdp->SetNdv((int)ndv); + + if (fread(cdp->GetDval(), lg, ndv, opfile) != ndv) { + sprintf(g->Message, MSG(OPT_DVAL_RD_ERR), strerror(errno)); + goto err; + } // endif size + + if (newblk || cdp->GetNbm() < (int)nbm || !cdp->GetBmap()) + cdp->SetBmap(PlugSubAlloc(g, NULL, nbk * sizeof(int))); + + cdp->SetNbm((int)nbm); + + if (fread(cdp->GetBmap(), sizeof(int), nbk, opfile) != nbk) { + sprintf(g->Message, MSG(OPT_BMAP_RD_ERR), strerror(errno)); + goto err; + } // endif size + + cdp->SetXdb2(true); + } else { + // Read the Min/Max values from the opt file + if (n[0] != i || n[1] != lg || n[2] != nrec || n[3] != block) { + sprintf(g->Message, MSG(OPT_NOT_MATCH), filename); + goto err; + } // endif + + if (newblk || !cdp->GetMin()) + cdp->SetMin(PlugSubAlloc(g, NULL, blk * lg)); + + if (fread(cdp->GetMin(), lg, blk, opfile) != blk) { + sprintf(g->Message, MSG(OPT_MIN_RD_ERR), strerror(errno)); + goto err; + } // endif size + + if (newblk || !cdp->GetMax()) + cdp->SetMax(PlugSubAlloc(g, NULL, blk * lg)); + + if (fread(cdp->GetMax(), lg, blk, opfile) != blk) { + sprintf(g->Message, MSG(OPT_MAX_RD_ERR), strerror(errno)); + goto err; + } // endif size + + cdp->SetXdb2(false); + } // endif n[0] (XDB2) + + } // endif Clustered + + defp->SetBlock(block); + defp->Last = last; // For Cardinality + defp->SetAllocBlks(block); + defp->SetOptimized(1); + fclose(opfile); + MaxSize = -1; // Can be refined later + return false; + + err: + defp->RemoveOptValues(g); + fclose(opfile); + + // Ignore error if not in mode CHK_OPT + return (PlgGetUser(g)->Check & CHK_OPT) != 0; + } // end of GetBlockValues + +/***********************************************************************/ +/* This fonction is used while making XDB2 block optimization. */ +/* It constructs for each elligible columns, the sorted list of the */ +/* distinct values existing in the column. This function uses an */ +/* algorithm that permit to get several sets of distinct values by */ +/* reading the table only once, which cannot be done using a standard */ +/* SQL query. */ +/***********************************************************************/ +bool TDBDOS::GetDistinctColumnValues(PGLOBAL g, int nrec) + { + char *p; + int rc, blk, n = 0; + PDOSCOL colp; + PDBUSER dup = PlgGetUser(g); + + /*********************************************************************/ + /* Initialize progress information */ + /*********************************************************************/ + p = (char *)PlugSubAlloc(g, NULL, 48 + strlen(Name)); + dup->Step = strcat(strcpy(p, MSG(GET_DIST_VALS)), Name); + dup->ProgMax = GetProgMax(g); + dup->ProgCur = 0; + + while ((rc = ReadDB(g)) == RC_OK) { + for (colp = (PDOSCOL)Columns; colp; colp = (PDOSCOL)colp->Next) + if (colp->Clustered == 2) + if (colp->AddDistinctValue(g)) + return true; // Too many distinct values + +#if defined(SOCKET_MODE) + if (SendProgress(dup)) { + strcpy(g->Message, MSG(OPT_CANCELLED)); + return true; + } else +#elif defined(THREAD) + if (!dup->Step) { + strcpy(g->Message, MSG(OPT_CANCELLED)); + return true; + } else +#endif // THREAD + dup->ProgCur = GetProgCur(); + + n++; + } // endwhile + + if (rc != RC_EF) + return true; + + // Reset the number of table blocks +//nrec = ((PDOSDEF)To_Def)->GetElemt(); (or default value) + blk = (n + nrec - 1) / nrec; + Txfp->Block = blk; // Useful mainly for ZLBFAM ??? + + // Set Nbm, Bmap for XDB2 columns + for (colp = (PDOSCOL)Columns; colp; colp = (PDOSCOL)colp->Next) + if (colp->Clustered == 2) { +// colp->Cdp->SetNdv(colp->Ndv); + colp->Nbm = (colp->Ndv + MAXBMP - 1) / MAXBMP; + colp->Bmap = AllocValBlock(g, NULL, TYPE_INT, colp->Nbm * blk); + } // endif Clustered + + return false; + } // end of GetDistinctColumnValues + +/***********************************************************************/ +/* Analyze the filter and construct the Block Evaluation Filter. */ +/* This is possible when a filter contains predicates implying a */ +/* column marked as "clustered" or "sorted" matched to a constant */ +/* argument. It is then possible by comparison against the smallest */ +/* and largest column values in each block to determine whether the */ +/* filter condition will be always true or always false for the block.*/ +/***********************************************************************/ +PBF TDBDOS::InitBlockFilter(PGLOBAL g, PFIL filp) + { + bool blk = Txfp->Blocked; + + if (To_BlkFil) + return To_BlkFil; // Already done + else if (!filp) + return NULL; + else if (blk) { + if (Txfp->GetAmType() == TYPE_AM_DBF) + /*****************************************************************/ + /* If RowID is used in this query, block optimization cannot be */ + /* used because currently the file must be read sequentially. */ + /*****************************************************************/ + for (PCOL cp = Columns; cp; cp = cp->GetNext()) + if (cp->GetAmType() == TYPE_AM_ROWID && !((RIDBLK*)cp)->GetRnm()) + return NULL; + + } // endif blk + + int i, op = filp->GetOpc(), opm = filp->GetOpm(), n = 0; + bool cnv[2]; + PCOL colp; + PXOB arg[2] = {NULL,NULL}; + PBF *fp = NULL, bfp = NULL; + + switch (op) { + case OP_EQ: + case OP_NE: + case OP_GT: + case OP_GE: + case OP_LT: + case OP_LE: + if (! opm) { + for (i = 0; i < 2; i++) { + arg[i] = filp->Arg(i); + cnv[i] = filp->Conv(i); + } // endfor i + + bfp = CheckBlockFilari(g, arg, op, cnv); + break; + } // endif !opm + + // if opm, pass thru + case OP_IN: + if (filp->GetArgType(0) == TYPE_COLBLK && + filp->GetArgType(1) == TYPE_ARRAY) { + arg[0] = filp->Arg(0); + arg[1] = filp->Arg(1); + colp = (PCOL)arg[0]; + + if (colp->GetTo_Tdb() == this) { + // Block evaluation is possible for... + if (colp->GetAmType() == TYPE_AM_ROWID) { + // Special column ROWID and constant array, but + // currently we don't know how to retrieve a RowID + // from a DBF table that is not sequentially read. +// if (Txfp->GetAmType() != TYPE_AM_DBF || +// ((RIDBLK*)arg[0])->GetRnm()) + bfp = new(g) BLKSPCIN(g, this, op, opm, arg, Txfp->Nrec); + + } else if (blk && Txfp->Nrec > 1 && colp->IsClustered()) + // Clustered column and constant array + if (colp->GetClustered() == 2) + bfp = new(g) BLKFILIN2(g, this, op, opm, arg); + else + bfp = new(g) BLKFILIN(g, this, op, opm, arg); + + } // endif this + +#if 0 + } else if (filp->GetArgType(0) == TYPE_SCALF && + filp->GetArgType(1) == TYPE_ARRAY) { + arg[0] = filp->Arg(0); + arg[1] = filp->Arg(1); + + if (((PSCALF)arg[0])->GetOp() == OP_ROW && + arg[1]->GetResultType() == TYPE_LIST) { + PARRAY par = (PARRAY)arg[1]; + LSTVAL *vlp = (LSTVAL*)par->GetValue(); + + ((SFROW*)arg[0])->GetParms(n); + + if (n != vlp->GetN()) + return NULL; + else + n = par->GetNval(); + + arg[1] = new(g) CONSTANT(vlp); + fp = (PBF*)PlugSubAlloc(g, NULL, n * sizeof(PBF)); + cnv[0] = cnv[1] = false; + + if (op == OP_IN) + op = OP_EQ; + + for (i = 0; i < n; i++) { + par->GetNthValue(vlp, i); + + if (!(fp[i] = CheckBlockFilari(g, arg, op, cnv))) + return NULL; + + } // endfor i + + bfp = new(g) BLKFILLOG(this, (opm == 2 ? OP_AND : OP_OR), fp, n); + } // endif ROW +#endif // 0 + + } // endif Type + + break; + case OP_AND: + case OP_OR: + fp = (PBF*)PlugSubAlloc(g, NULL, 2 * sizeof(PBF)); + fp[0] = InitBlockFilter(g, (PFIL)(filp->Arg(0))); + fp[1] = InitBlockFilter(g, (PFIL)(filp->Arg(1))); + + if (fp[0] || fp[1]) + bfp = new(g) BLKFILLOG(this, op, fp, 2); + + break; + case OP_NOT: + fp = (PBF*)PlugSubAlloc(g, NULL, sizeof(PBF)); + + if ((*fp = InitBlockFilter(g, (PFIL)(filp->Arg(0))))) + bfp = new(g) BLKFILLOG(this, op, fp, 1); + + break; + case OP_LIKE: + default: + break; + } // endswitch op + + return bfp; + } // end of InitBlockFilter + +/***********************************************************************/ +/* Analyze the passed arguments and construct the Block Filter. */ +/***********************************************************************/ +PBF TDBDOS::CheckBlockFilari(PGLOBAL g, PXOB *arg, int op, bool *cnv) + { +//int i, n1, n2, ctype = TYPE_ERROR, n = 0, type[2] = {0,0}; +//bool conv = false, xdb2 = false, ok = false, b[2]; +//PXOB *xarg1, *xarg2 = NULL, xp[2]; + int i, n = 0, type[2] = {0,0}; + int ctype __attribute__((unused)); + bool conv = false, xdb2 = false, ok = false; + PXOB *xarg2 = NULL, xp[2]; + PCOL colp; +//LSTVAL *vlp = NULL; +//SFROW *sfr[2]; + PBF *fp = NULL, bfp = NULL; + + ctype= TYPE_ERROR; + for (i = 0; i < 2; i++) { + switch (arg[i]->GetType()) { + case TYPE_CONST: + type[i] = 1; + ctype = arg[i]->GetResultType(); + break; + case TYPE_COLBLK: + conv = cnv[i]; + colp = (PCOL)arg[i]; + + if (colp->GetTo_Tdb() == this) { + if (colp->GetAmType() == TYPE_AM_ROWID) { + // Currently we don't know how to retrieve a RowID + // from a DBF table that is not sequentially read. +// if (Txfp->GetAmType() != TYPE_AM_DBF || +// ((RIDBLK*)arg[i])->GetRnm()) + type[i] = 5; + + } else if (Txfp->Blocked && Txfp->Nrec > 1 && + colp->IsClustered()) { + type[i] = 2; + xdb2 = colp->GetClustered() == 2; + } // endif Clustered + + } else if (colp->GetColUse(U_CORREL)) { + // This is a column pointing to the outer query of a + // correlated subquery, it has a constant value during + // each execution of the subquery. + type[i] = 1; + ctype = arg[i]->GetResultType(); + } // endif this + + break; +// case TYPE_SCALF: +// if (((PSCALF)arg[i])->GetOp() == OP_ROW) { +// sfr[i] = (SFROW*)arg[i]; +// type[i] = 7; +// } // endif Op + +// break; + default: + break; + } // endswitch ArgType + + if (!type[i]) + break; + + n += type[i]; + } // endfor i + + if (n == 3 || n == 6) { + if (conv) { + // The constant has not the good type and will not match + // the block min/max values. Warn and abort. + sprintf(g->Message, "Block opt: %s", MSG(VALTYPE_NOMATCH)); + PushWarning(g, this); + return NULL; + } // endif Conv + + if (type[0] == 1) { + // Make it always as Column-op-Value + *xp = arg[0]; + arg[0] = arg[1]; + arg[1] = *xp; + + switch (op) { + case OP_GT: op = OP_LT; break; + case OP_GE: op = OP_LE; break; + case OP_LT: op = OP_GT; break; + case OP_LE: op = OP_GE; break; + } // endswitch op + + } // endif + +#if defined(_DEBUG) +// assert(arg[0]->GetResultType() == ctype); +#endif + + if (n == 3) { + if (xdb2) { + if (((PDOSCOL)arg[0])->GetNbm() == 1) + bfp = new(g) BLKFILAR2(g, this, op, arg); + else // Multiple bitmap made of several ULONG's + bfp = new(g) BLKFILMR2(g, this, op, arg); + } else + bfp = new(g) BLKFILARI(g, this, op, arg); + + } else // n = 6 + bfp = new(g) BLKSPCARI(this, op, arg, Txfp->Nrec); + +#if 0 + } else if (n == 8 || n == 14) { + if (n == 8 && ctype != TYPE_LIST) { + // Should never happen + strcpy(g->Message, "Block opt: bad constant"); + longjmp(g->jumper[g->jump_level], 99); + } // endif Conv + + if (type[0] == 1) { + // Make it always as Column-op-Value + sfr[0] = sfr[1]; + arg[1] = arg[0]; + + switch (op) { + case OP_GT: op = OP_LT; break; + case OP_GE: op = OP_LE; break; + case OP_LT: op = OP_GT; break; + case OP_LE: op = OP_GE; break; + } // endswitch op + + } // endif + + xarg1 = sfr[0]->GetParms(n1); + + if (n == 8) { + vlp = (LSTVAL*)arg[1]->GetValue(); + n2 = vlp->GetN(); + xp[1] = new(g) CONSTANT((PVAL)NULL); + } else + xarg2 = sfr[1]->GetParms(n2); + + if (n1 != n2) + return NULL; // Should we flag an error ? + + fp = (PBF*)PlugSubAlloc(g, NULL, n1 * sizeof(PBF)); + + for (i = 0; i < n1; i++) { + xp[0] = xarg1[i]; + + if (n == 8) + ((CONSTANT*)xp[1])->SetValue(vlp->GetSubVal(i)); + else + xp[1] = xarg2[i]; + + b[0] = b[1] = (xp[0]->GetResultType() != xp[1]->GetResultType()); + ok |= ((fp[i] = CheckBlockFilari(g, xp, op, b)) != NULL); + } // endfor i + + if (ok) + bfp = new(g) BLKFILLOG(this, OP_AND, fp, n1); +#endif // 0 + + } // endif n + + return bfp; + } // end of CheckBlockFilari + +/***********************************************************************/ +/* ResetBlkFil: reset the block filter and restore filtering, or make */ +/* the block filter if To_Filter was not set when opening the table. */ +/***********************************************************************/ +void TDBDOS::ResetBlockFilter(PGLOBAL g) + { + if (!To_BlkFil) { + if (To_Filter) + if ((To_BlkFil = InitBlockFilter(g, To_Filter))) { + htrc("BlkFil=%p\n", To_BlkFil); + MaxSize = -1; // To be recalculated + } // endif To_BlkFil + + return; + } // endif To_BlkFil + + To_BlkFil->Reset(g); + + if (SavFil && !To_Filter) { + // Restore filter if it was disabled by optimization + To_Filter = SavFil; + SavFil = NULL; + } // endif + + Beval = 0; + } // end of ResetBlockFilter + +/***********************************************************************/ +/* Block optimization: evaluate the block index filter against */ +/* the min and max values of this block and return: */ +/* RC_OK: if some records in the block can meet filter criteria. */ +/* RC_NF: if no record in the block can meet filter criteria. */ +/* RC_EF: if no record in the remaining file can meet filter criteria.*/ +/* In addition, temporarily supress filtering if all the records in */ +/* the block meet filter criteria. */ +/***********************************************************************/ +int TDBDOS::TestBlock(PGLOBAL g) + { + int rc = RC_OK; + + if (To_BlkFil && Beval != 2) { + // Check for block filtering evaluation + if (Beval == 1) { + // Filter was removed for last block, restore it + To_Filter = SavFil; + SavFil = NULL; + } // endif Beval + + // Check for valid records in new block + switch (Beval = To_BlkFil->BlockEval(g)) { + case -2: // No more valid values in file + rc = RC_EF; + break; + case -1: // No valid values in block + rc = RC_NF; + break; + case 1: // All block values are valid + case 2: // All subsequent file values are Ok + // Before suppressing the filter for the block(s) it is + // necessary to reset the filtered columns to NOT_READ + // so their new values are retrieved by the SELECT list. + if (To_Filter) // Can be NULL when externally called (XDB) + To_Filter->Reset(); + + SavFil = To_Filter; + To_Filter = NULL; // So remove filter + } // endswitch Beval + + if (trace) + htrc("BF Eval Beval=%d\n", Beval); + + } // endif To_BlkFil + + return rc; + } // end of TestBlock + +/***********************************************************************/ /* Check whether we have to create/update permanent indexes. */ /***********************************************************************/ int TDBDOS::MakeIndex(PGLOBAL g, PIXDEF pxdf, bool add) @@ -454,7 +1603,13 @@ int TDBDOS::MakeIndex(PGLOBAL g, PIXDEF pxdf, bool add) Mode = MODE_READ; Use = USE_READY; dfp = (PDOSDEF)To_Def; - fixed = Cardinality(g) >= 0; + + if (!Cardinality(g)) { + // Void table erase eventual index file(s) + (void)dfp->DeleteIndexFile(g, NULL); + return RC_OK; + } else + fixed = Ftype != RECFM_VAR; // Are we are called from CreateTable or CreateIndex? if (pxdf) { @@ -482,7 +1637,7 @@ int TDBDOS::MakeIndex(PGLOBAL g, PIXDEF pxdf, bool add) // Allocate all columns that will be used by indexes. // This must be done before opening the table so specific - // column initialization can be done ( in particular by TDBVCT) + // column initialization can be done (in particular by TDBVCT) for (n = 0, xdp = pxdf; xdp; xdp = xdp->GetNext()) for (kdp = xdp->GetToKeyParts(); kdp; kdp = kdp->GetNext()) { if (!(colp = ColDB(g, kdp->GetName(), 0))) { @@ -580,6 +1735,113 @@ err: } // end of MakeIndex /***********************************************************************/ +/* Make a dynamic index. */ +/***********************************************************************/ +bool TDBDOS::InitialyzeIndex(PGLOBAL g, PIXDEF xdp, bool sorted) + { + int k, rc; + bool brc, dynamic; + PCOL colp; + PCOLDEF cdp; + PVAL valp; + PXLOAD pxp; + PKXBASE kxp; + PKPDEF kdp; + + if (!xdp && !(xdp = To_Xdp)) { + strcpy(g->Message, "NULL dynamic index"); + return true; + } else + dynamic = To_Filter && xdp->IsUnique() && xdp->IsDynamic(); +// dynamic = To_Filter && xdp->IsDynamic(); NIY + + // Allocate the key columns definition block + Knum = xdp->GetNparts(); + To_Key_Col = (PCOL*)PlugSubAlloc(g, NULL, Knum * sizeof(PCOL)); + + // Get the key column description list + for (k = 0, kdp = xdp->GetToKeyParts(); kdp; kdp = kdp->GetNext()) + if (!(colp = ColDB(g, kdp->GetName(), 0)) || colp->InitValue(g)) { + sprintf(g->Message, "Wrong column %s", kdp->GetName()); + return true; + } else + To_Key_Col[k++] = colp; + +#if defined(_DEBUG) + if (k != Knum) { + sprintf(g->Message, "Key part number mismatch for %s", + xdp->GetName()); + return 0; + } // endif k +#endif // _DEBUG + + // Allocate the pseudo constants that will contain the key values + To_Link = (PXOB*)PlugSubAlloc(g, NULL, Knum * sizeof(PXOB)); + + for (k = 0, kdp = xdp->GetToKeyParts(); kdp; k++, kdp = kdp->GetNext()) { + cdp = Key(k)->GetCdp(); + valp = AllocateValue(g, cdp->GetType(), cdp->GetLength()); + To_Link[k]= new(g) CONSTANT(valp); + } // endfor k + + // Make the index on xdp + if (!xdp->IsAuto()) { + if (!dynamic) { + if (((PDOSDEF)To_Def)->Huge) + pxp = new(g) XHUGE; + else + pxp = new(g) XFILE; + + } else + pxp = NULL; + + if (Knum == 1) // Single index + kxp = new(g) XINDXS(this, xdp, pxp, To_Key_Col, To_Link); + else // Multi-Column index + kxp = new(g) XINDEX(this, xdp, pxp, To_Key_Col, To_Link); + + } else // Column contains same values as ROWID + kxp = new(g) XXROW(this); + + // Prepare error return + if (g->jump_level == MAX_JUMP) { + strcpy(g->Message, MSG(TOO_MANY_JUMPS)); + return true; + } // endif + + if (!(rc = setjmp(g->jumper[++g->jump_level])) != 0) { + if (dynamic) { + ResetBlockFilter(g); + kxp->SetDynamic(dynamic); + brc = kxp->Make(g, xdp); + } else + brc = kxp->Init(g); + + if (!brc) { + if (Txfp->GetAmType() == TYPE_AM_BLK) { + // Cannot use indexing in DOS block mode + Txfp = new(g) DOSFAM((PBLKFAM)Txfp, (PDOSDEF)To_Def); + Txfp->AllocateBuffer(g); + To_BlkFil = NULL; + } // endif AmType + + To_Kindex= kxp; + + if (!(sorted && To_Kindex->IsSorted()) && + ((Mode == MODE_UPDATE && IsUsingTemp(g)) || + (Mode == MODE_DELETE && Txfp->GetAmType() != TYPE_AM_DBF))) + Indxd = true; + + } // endif brc + + } else + brc = true; + + g->jump_level--; + return brc; + } // end of InitialyzeIndex + +/***********************************************************************/ /* DOS GetProgMax: get the max value for progress information. */ /***********************************************************************/ int TDBDOS::GetProgMax(PGLOBAL g) @@ -619,11 +1881,82 @@ int TDBDOS::RowNumber(PGLOBAL g, bool b) /***********************************************************************/ int TDBDOS::Cardinality(PGLOBAL g) { + int n = Txfp->Cardinality(NULL); + if (!g) - return Txfp->Cardinality(g); + return (Mode == MODE_ANY) ? 1 : n; + + if (Cardinal < 0) { + if (!Txfp->Blocked && n == 0) { + // Info command, we try to return exact row number + PDOSDEF dfp = (PDOSDEF)To_Def; + PIXDEF xdp = dfp->To_Indx; + + if (xdp && xdp->IsValid()) { + // Cardinality can be retreived from one index + PXLOAD pxp; + + if (dfp->Huge) + pxp = new(g) XHUGE; + else + pxp = new(g) XFILE; + + PXINDEX kxp = new(g) XINDEX(this, xdp, pxp, NULL, NULL); + + if (!(kxp->GetAllSizes(g, Cardinal))) + return Cardinal; + + } // endif Mode + + if (Mode == MODE_ANY && xinfo) { + // Using index impossible or failed, do it the hard way + Mode = MODE_READ; + To_Line = (char*)PlugSubAlloc(g, NULL, Lrecl + 1); + + if (Txfp->OpenTableFile(g)) + return (Cardinal = Txfp->Cardinality(g)); + + for (Cardinal = 0; n != RC_EF;) + if (!(n = Txfp->ReadBuffer(g))) + Cardinal++; + + Txfp->CloseTableFile(g, false); + Mode = MODE_ANY; + } else { + // Return the best estimate + int len = GetFileLength(g); + + if (len >= 0) { + int rec; + + if (trace) + htrc("Estimating lines len=%d ending=%d/n", + len, ((PDOSDEF)To_Def)->Ending); + + /*************************************************************/ + /* Estimate the number of lines in the table (if not known) */ + /* by dividing the file length by the average record length. */ + /*************************************************************/ + rec = ((PDOSDEF)To_Def)->Ending; + + if (AvgLen <= 0) // No given average estimate + rec += EstimatedLength(g); + else // An estimate was given for the average record length + rec += AvgLen; + + Cardinal = (len + rec - 1) / rec; - if (Cardinal < 0) - Cardinal = Txfp->Cardinality(g); + if (trace) + htrc("avglen=%d MaxSize%d\n", rec, Cardinal); + + } // endif len + + } // endif Mode + + } else + Cardinal = Txfp->Cardinality(g); + + } // endif Cardinal return Cardinal; } // end of Cardinality @@ -641,30 +1974,21 @@ int TDBDOS::GetMaxSize(PGLOBAL g) int len = GetFileLength(g); if (len >= 0) { + int rec; + if (trace) - htrc("Estimating lines len=%d ending=%d\n", + htrc("Estimating lines len=%d ending=%d/n", len, ((PDOSDEF)To_Def)->Ending); /*****************************************************************/ /* Estimate the number of lines in the table (if not known) by */ - /* dividing the file length by the minimum line length assuming */ - /* only the last column can be of variable length. This will be */ - /* a ceiling estimate (as last column is never totally absent). */ + /* dividing the file length by minimum record length. */ /*****************************************************************/ - int rec = ((PDOSDEF)To_Def)->Ending; // +2: CRLF +1: LF - - if (AvgLen <= 0) // No given average estimate - rec += EstimatedLength(g); - else // A lower estimate was given for the average record length - rec += (int)AvgLen; - - if (trace) - htrc(" Filen=%d min_rec=%d\n", len, rec); - + rec = EstimatedLength(g) + ((PDOSDEF)To_Def)->Ending; MaxSize = (len + rec - 1) / rec; if (trace) - htrc(" Estimated max_K=%d\n", MaxSize); + htrc("avglen=%d MaxSize%d\n", rec, MaxSize); } // endif len @@ -697,10 +2021,8 @@ int TDBDOS::EstimatedLength(PGLOBAL g) /***********************************************************************/ bool TDBDOS::IsUsingTemp(PGLOBAL g) { - USETEMP usetemp = PlgGetUser(g)->UseTemp; - - return (usetemp == TMP_YES || usetemp == TMP_FORCE || - (usetemp == TMP_AUTO && Mode == MODE_UPDATE)); + return (Use_Temp == TMP_YES || Use_Temp == TMP_FORCE || + (Use_Temp == TMP_AUTO && Mode == MODE_UPDATE)); } // end of IsUsingTemp /***********************************************************************/ @@ -723,7 +2045,7 @@ bool TDBDOS::OpenDB(PGLOBAL g) Txfp->Rewind(); // see comment in Work.log if (SkipHeader(g)) - return TRUE; + return true; } else /*****************************************************************/ @@ -731,6 +2053,7 @@ bool TDBDOS::OpenDB(PGLOBAL g) /*****************************************************************/ To_Kindex->Reset(); + ResetBlockFilter(g); return false; } // endif use @@ -739,7 +2062,7 @@ bool TDBDOS::OpenDB(PGLOBAL g) Txfp = new(g) DOSFAM((PDOSDEF)To_Def); Txfp->SetTdbp(this); } else if (Txfp->Blocked && (Mode == MODE_DELETE || - (Mode == MODE_UPDATE && PlgGetUser(g)->UseTemp != TMP_NO))) { + (Mode == MODE_UPDATE && Use_Temp != TMP_NO))) { /*******************************************************************/ /* Delete is not currently handled in block mode neither Update */ /* when using a temporary file. */ @@ -750,7 +2073,7 @@ bool TDBDOS::OpenDB(PGLOBAL g) else if (Txfp->GetAmType() == TYPE_AM_ZIP) Txfp = new(g) ZIPFAM((PDOSDEF)To_Def); #endif // ZIP_SUPPORT - else if (Txfp->GetAmType() != TYPE_AM_DOS) + else // if (Txfp->GetAmType() != TYPE_AM_DOS) ??? Txfp = new(g) DOSFAM((PDOSDEF)To_Def); Txfp->SetTdbp(this); @@ -767,6 +2090,11 @@ bool TDBDOS::OpenDB(PGLOBAL g) Use = USE_OPEN; // Do it now in case we are recursively called /*********************************************************************/ + /* Allocate the block filter tree if evaluation is possible. */ + /*********************************************************************/ + To_BlkFil = InitBlockFilter(g, To_Filter); + + /*********************************************************************/ /* Allocate the line buffer plus a null character. */ /*********************************************************************/ To_Line = (char*)PlugSubAlloc(g, NULL, Lrecl + 1); @@ -847,13 +2175,10 @@ int TDBDOS::ReadDB(PGLOBAL g) } // end of ReadDB /***********************************************************************/ -/* WriteDB: Data Base write routine for DOS access method. */ +/* PrepareWriting: Prepare the line to write. */ /***********************************************************************/ -int TDBDOS::WriteDB(PGLOBAL g) +bool TDBDOS::PrepareWriting(PGLOBAL g) { - if (trace > 1) - htrc("DOS WriteDB: R%d Mode=%d \n", Tdb_No, Mode); - if (!Ftype && (Mode == MODE_INSERT || Txfp->GetUseTemp())) { char *p; @@ -868,6 +2193,20 @@ int TDBDOS::WriteDB(PGLOBAL g) *(++p) = '\0'; } // endif Mode + return false; + } // end of WriteDB + +/***********************************************************************/ +/* WriteDB: Data Base write routine for DOS access method. */ +/***********************************************************************/ +int TDBDOS::WriteDB(PGLOBAL g) + { + if (trace > 1) + htrc("DOS WriteDB: R%d Mode=%d \n", Tdb_No, Mode); + + // Make the line to write + (void)PrepareWriting(g); + if (trace > 1) htrc("Write: line is='%s'\n", To_Line); @@ -894,7 +2233,8 @@ void TDBDOS::CloseDB(PGLOBAL g) To_Kindex = NULL; } // endif - Txfp->CloseTableFile(g); + Txfp->CloseTableFile(g, Abort); + RestoreNrec(); } // end of CloseDB // ------------------------ DOSCOL functions ---------------------------- @@ -923,6 +2263,40 @@ DOSCOL::DOSCOL(PGLOBAL g, PCOLDEF cdp, PTDB tp, PCOL cp, int i, PSZ am) Deplac = cdp->GetOffset(); Long = cdp->GetLong(); To_Val = NULL; + Clustered = cdp->GetOpt(); + Sorted = (cdp->GetOpt() == 2) ? 1 : 0; + Ndv = 0; // Currently used only for XDB2 + Nbm = 0; // Currently used only for XDB2 + Min = NULL; + Max = NULL; + Bmap = NULL; + Dval = NULL; + Buf = NULL; + + if (txfp->Blocked && Opt && (cdp->GetMin() || cdp->GetDval())) { + int nblk = txfp->GetBlock(); + + Clustered = (cdp->GetXdb2()) ? 2 : 1; + Sorted = (cdp->GetOpt() > 1) ? 1 : 0; // Currently ascending only + + if (Clustered == 1) { + Min = AllocValBlock(g, cdp->GetMin(), Buf_Type, nblk, Long, prec); + Max = AllocValBlock(g, cdp->GetMax(), Buf_Type, nblk, Long, prec); + } else { // Clustered == 2 + // Ndv is the number of distinct values in Dval. Ndv and Nbm + // may be 0 when optimizing because Ndval is not filled yet, + // but the size of the passed Dval memory block is Ok. + Ndv = cdp->GetNdv(); + Dval = AllocValBlock(g, cdp->GetDval(), Buf_Type, Ndv, Long, prec); + + // Bmap cannot be allocated when optimizing, we must know Nbm first + if ((Nbm = cdp->GetNbm())) + Bmap = AllocValBlock(g, cdp->GetBmap(), TYPE_INT, Nbm * nblk); + + } // endif Clustered + + } // endif Opt + OldVal = NULL; // Currently used only in MinMax Dsp = 0; Ldz = false; @@ -970,9 +2344,39 @@ DOSCOL::DOSCOL(DOSCOL *col1, PTDB tdbp) : COLBLK(col1, tdbp) Dcm = col1->Dcm; OldVal = col1->OldVal; Buf = col1->Buf; + Clustered = col1->Clustered; + Sorted = col1->Sorted; + Min = col1->Min; + Max = col1->Max; + Bmap = col1->Bmap; + Dval = col1->Dval; + Ndv = col1->Ndv; + Nbm = col1->Nbm; } // end of DOSCOL copy constructor /***********************************************************************/ +/* VarSize: This function tells UpdateDB whether or not the block */ +/* optimization file must be redone if this column is updated, even */ +/* it is not sorted or clustered. This applies to the last column of */ +/* a variable length table that is blocked, because if it is updated */ +/* using a temporary file, the block size may be modified. */ +/***********************************************************************/ +bool DOSCOL::VarSize(void) + { + PTDBDOS tdbp = (PTDBDOS)To_Tdb; + PTXF txfp = tdbp->Txfp; + + if (Cdp && !Cdp->GetNext() // Must be the last column + && tdbp->Ftype == RECFM_VAR // of a DOS variable length + && txfp->Blocked // blocked table + && txfp->GetUseTemp()) // using a temporary file. + return true; + else + return false; + + } // end VarSize + +/***********************************************************************/ /* SetBuffer: prepare a column block for write operation. */ /***********************************************************************/ bool DOSCOL::SetBuffer(PGLOBAL g, PVAL value, bool ok, bool check) @@ -1260,6 +2664,144 @@ void DOSCOL::WriteColumn(PGLOBAL g) } // end of WriteColumn /***********************************************************************/ +/* SetMinMax: Calculate minimum and maximum values for one block. */ +/* Note: TYPE_STRING is stored and processed with zero ended strings */ +/* to be matching the way the FILTER Eval function processes them. */ +/***********************************************************************/ +bool DOSCOL::SetMinMax(PGLOBAL g) + { + PTDBDOS tp = (PTDBDOS)To_Tdb; + + ReadColumn(g); // Extract column value from current line + + if (CheckSorted(g)) + return true; + + if (!tp->Txfp->CurNum) { + Min->SetValue(Value, tp->Txfp->CurBlk); + Max->SetValue(Value, tp->Txfp->CurBlk); + } else { + Min->SetMin(Value, tp->Txfp->CurBlk); + Max->SetMax(Value, tp->Txfp->CurBlk); + } // endif CurNum + + return false; + } // end of SetMinMax + +/***********************************************************************/ +/* SetBitMap: Calculate the bit map of existing values in one block. */ +/* Note: TYPE_STRING is processed with zero ended strings */ +/* to be matching the way the FILTER Eval function processes them. */ +/***********************************************************************/ +bool DOSCOL::SetBitMap(PGLOBAL g) + { + int i, m, n; + uint *bmp; + PTDBDOS tp = (PTDBDOS)To_Tdb; + PDBUSER dup = PlgGetUser(g); + + n = tp->Txfp->CurNum; + bmp = (uint*)Bmap->GetValPtr(Nbm * tp->Txfp->CurBlk); + + // Extract column value from current line + ReadColumn(g); + + if (CheckSorted(g)) + return true; + + if (!n) // New block + for (m = 0; m < Nbm; m++) + bmp[m] = 0; // Reset the new bit map + + if ((i = Dval->Find(Value)) < 0) { + char buf[32]; + + sprintf(g->Message, MSG(DVAL_NOTIN_LIST), + Value->GetCharString(buf), Name); + return true; + } else if (i >= dup->Maxbmp) { + sprintf(g->Message, MSG(OPT_LOGIC_ERR), i); + return true; + } else { + m = i / MAXBMP; +#if defined(_DEBUG) + assert (m < Nbm); +#endif // _DEBUG + bmp[m] |= (1 << (i % MAXBMP)); + } // endif's i + + return false; + } // end of SetBitMap + +/***********************************************************************/ +/* Checks whether a column declared as sorted is sorted indeed. */ +/***********************************************************************/ +bool DOSCOL::CheckSorted(PGLOBAL g) + { + if (Sorted) + if (OldVal) { + // Verify whether this column is sorted all right + if (OldVal->CompareValue(Value) > 0) { + // Column is no more in ascending order + sprintf(g->Message, MSG(COL_NOT_SORTED), Name, To_Tdb->GetName()); + Sorted = false; + return true; + } else + OldVal->SetValue_pval(Value); + + } else + OldVal = AllocateValue(g, Value); + + return false; + } // end of CheckSorted + +/***********************************************************************/ +/* AddDistinctValue: Check whether this value already exist in the */ +/* list and if not add it to the distinct values list. */ +/***********************************************************************/ +bool DOSCOL::AddDistinctValue(PGLOBAL g) + { + bool found = false; + int i, m, n; + + ReadColumn(g); // Extract column value from current line + + // Perhaps a better algorithm can be used when Ndv gets bigger + // Here we cannot use Find because we must get the index of where + // to insert a new value if it is not found in the array. + for (n = 0; n < Ndv; n++) { + m = Dval->CompVal(Value, n); + + if (m > 0) + continue; + else if (!m) + found = true; // Already there + + break; + } // endfor n + + if (!found) { + // Check whether we have room for an additional value + if (Ndv == Freq) { + // Too many values because of wrong Freq setting + sprintf(g->Message, MSG(BAD_FREQ_SET), Name); + return true; + } // endif Ndv + + // New value, add it to the list before the nth value + Dval->SetNval(Ndv + 1); + + for (i = Ndv; i > n; i--) + Dval->Move(i - 1, i); + + Dval->SetValue(Value, n); + Ndv++; + } // endif found + + return false; + } // end of AddDistinctValue + +/***********************************************************************/ /* Make file output of a Dos column descriptor block. */ /***********************************************************************/ void DOSCOL::Print(PGLOBAL g, FILE *f, uint n) diff --git a/storage/connect/tabdos.h b/storage/connect/tabdos.h index 79a2659fb70..1c772e8bf23 100644 --- a/storage/connect/tabdos.h +++ b/storage/connect/tabdos.h @@ -12,9 +12,12 @@ #include "xtable.h" // Table base class declares #include "colblk.h" // Column base class declares #include "xindex.h" +#include "filter.h" //pedef struct _tabdesc *PTABD; // For friend setting typedef class TXTFAM *PTXF; +typedef class BLOCKFILTER *PBF; +typedef class BLOCKINDEX *PBX; /***********************************************************************/ /* DOS table. */ @@ -47,6 +50,11 @@ class DllExport DOSDEF : public TABDEF { /* Logical table description */ bool GetEof(void) {return Eof;} int GetBlksize(void) {return Blksize;} int GetEnding(void) {return Ending;} + bool IsOptimized(void) {return (Optimized == 1);} + void SetOptimized(int opt) {Optimized = opt;} + void SetAllocBlks(int blks) {AllocBlks = blks;} + int GetAllocBlks(void) {return AllocBlks;} + int *GetTo_Pos(void) {return To_Pos;} // Methods virtual int Indexable(void) @@ -55,6 +63,8 @@ class DllExport DOSDEF : public TABDEF { /* Logical table description */ virtual bool DefineAM(PGLOBAL g, LPCSTR am, int poff); virtual PTDB GetTable(PGLOBAL g, MODE mode); bool InvalidateIndex(PGLOBAL g); + bool GetOptFileName(PGLOBAL g, char *filename); + void RemoveOptValues(PGLOBAL g); protected: //virtual bool Erase(char *filename); @@ -69,6 +79,9 @@ class DllExport DOSDEF : public TABDEF { /* Logical table description */ bool Huge; /* true for files larger than 2GB */ bool Accept; /* true if wrong lines are accepted (DBF)*/ bool Eof; /* true if an EOF (0xA) character exists */ + int *To_Pos; /* To array of block starting positions */ + int Optimized; /* 0: No, 1:Yes, 2:Redo optimization */ + int AllocBlks; /* Number of suballocated opt blocks */ int Compressed; /* 0: No, 1: gz, 2:zlib compressed file */ int Lrecl; /* Size of biggest record */ int AvgLen; /* Average size of records */ @@ -112,6 +125,7 @@ class DllExport TDBDOS : public TDBASE { virtual AMT GetAmType(void) {return Txfp->GetAmType();} virtual PSZ GetFile(PGLOBAL g) {return Txfp->To_File;} virtual void SetFile(PGLOBAL g, PSZ fn) {Txfp->To_File = fn;} + virtual void SetAbort(bool b) {Abort = b;} virtual RECFM GetFtype(void) {return Ftype;} virtual bool SkipHeader(PGLOBAL g) {return false;} virtual void RestoreNrec(void) {Txfp->SetNrec(1);} @@ -122,8 +136,15 @@ class DllExport TDBDOS : public TDBASE { virtual PTDB CopyOne(PTABS t); virtual void ResetDB(void) {Txfp->Reset();} virtual bool IsUsingTemp(PGLOBAL g); + virtual bool IsIndexed(void) {return Indxd;} virtual void ResetSize(void) {MaxSize = Cardinal = -1;} - virtual int ResetTableOpt(PGLOBAL g, bool dox); + virtual int ResetTableOpt(PGLOBAL g, bool dop, bool dox); + virtual int MakeBlockValues(PGLOBAL g); + virtual bool SaveBlockValues(PGLOBAL g); + virtual bool GetBlockValues(PGLOBAL g); + virtual PBF InitBlockFilter(PGLOBAL g, PFIL filp); +//virtual PBX InitBlockIndex(PGLOBAL g); + virtual int TestBlock(PGLOBAL g); virtual void PrintAM(FILE *f, char *m); // Database routines @@ -132,7 +153,7 @@ class DllExport TDBDOS : public TDBASE { virtual int GetFileLength(PGLOBAL g) {return Txfp->GetFileLength(g);} virtual int GetProgMax(PGLOBAL g); virtual int GetProgCur(void); - virtual int GetAffectedRows(void) {return Txfp->GetDelRows();} +//virtual int GetAffectedRows(void) {return Txfp->GetDelRows();} virtual int GetRecpos(void) {return Txfp->GetPos();} virtual bool SetRecpos(PGLOBAL g, int recpos) {return Txfp->SetPos(g, recpos);} @@ -151,15 +172,26 @@ class DllExport TDBDOS : public TDBASE { // Optimization routines virtual int MakeIndex(PGLOBAL g, PIXDEF pxdf, bool add); + bool InitialyzeIndex(PGLOBAL g, PIXDEF xdp, bool sorted); + void ResetBlockFilter(PGLOBAL g); + bool GetDistinctColumnValues(PGLOBAL g, int nrec); protected: + virtual bool PrepareWriting(PGLOBAL g); + PBF CheckBlockFilari(PGLOBAL g, PXOB *arg, int op, bool *cnv); + // Members PTXF Txfp; // To the File access method class +//PBX To_BlkIdx; // To index test block + PBF To_BlkFil; // To evaluation block filter + PFIL SavFil; // Saved hidden filter char *To_Line; // Points to current processed line - int Cardinal; // Table Cardinality - RECFM Ftype; // File type: 0-var 1-fixed 2-binary (VCT) + bool Abort; // TRUE when aborting UPDATE/DELETE + bool Indxd; // TRUE for indexed UPDATE/DELETE int Lrecl; // Logical Record Length int AvgLen; // Logical Record Average Length +//int Xeval; // BlockTest return value + int Beval; // BlockEval return value }; // end of class TDBDOS /***********************************************************************/ @@ -178,19 +210,38 @@ class DllExport DOSCOL : public COLBLK { // Implementation virtual int GetAmType(void) {return TYPE_AM_DOS;} virtual void SetTo_Val(PVAL valp) {To_Val = valp;} + virtual int GetClustered(void) {return Clustered;} + virtual int IsClustered(void) {return (Clustered && + ((PDOSDEF)(((PTDBDOS)To_Tdb)->To_Def))->IsOptimized());} + virtual int IsSorted(void) {return Sorted;} + virtual PVBLK GetMin(void) {return Min;} + virtual PVBLK GetMax(void) {return Max;} + virtual int GetNdv(void) {return Ndv;} + virtual int GetNbm(void) {return Nbm;} + virtual PVBLK GetBmap(void) {return Bmap;} + virtual PVBLK GetDval(void) {return Dval;} // Methods + virtual bool VarSize(void); virtual bool SetBuffer(PGLOBAL g, PVAL value, bool ok, bool check); virtual void ReadColumn(PGLOBAL g); virtual void WriteColumn(PGLOBAL g); virtual void Print(PGLOBAL g, FILE *, uint); protected: + virtual bool SetMinMax(PGLOBAL g); + virtual bool SetBitMap(PGLOBAL g); + bool CheckSorted(PGLOBAL g); + bool AddDistinctValue(PGLOBAL g); // Default constructor not to be used DOSCOL(void) {} // Members + PVBLK Min; // Array of block min values + PVBLK Max; // Array of block max values + PVBLK Bmap; // Array of block bitmap values + PVBLK Dval; // Array of column distinct values PVAL To_Val; // To value used for Update/Insert PVAL OldVal; // The previous value of the object. char *Buf; // Buffer used in read/write operations @@ -199,6 +250,10 @@ class DllExport DOSCOL : public COLBLK { bool Nod; // True if no decimal point int Dcm; // Last Dcm digits are decimals int Deplac; // Offset in dos_buf + int Clustered; // 0:No 1:Yes + int Sorted; // 0:No 1:Asc (2:Desc - NIY) + int Ndv; // Number of distinct values + int Nbm; // Number of uint in bitmap }; // end of class DOSCOL #endif // __TABDOS_H diff --git a/storage/connect/tabfix.cpp b/storage/connect/tabfix.cpp index cda08b7e3a6..91f06536272 100644 --- a/storage/connect/tabfix.cpp +++ b/storage/connect/tabfix.cpp @@ -45,11 +45,15 @@ #include "filamfix.h" #include "filamdbf.h" #include "tabfix.h" // TDBFIX, FIXCOL classes declares +#include "array.h" +#include "blkfil.h" /***********************************************************************/ /* DB static variables. */ /***********************************************************************/ -extern "C" int trace; +extern "C" int trace; +extern "C" USETEMP Use_Temp; + extern int num_read, num_there, num_eq[2]; // Statistics static const longlong M2G = 0x80000000; static const longlong M4G = (longlong)2 * M2G; @@ -61,12 +65,10 @@ static const longlong M4G = (longlong)2 * M2G; /***********************************************************************/ TDBFIX::TDBFIX(PDOSDEF tdp, PTXF txfp) : TDBDOS(tdp, txfp) { -//Cardinal = -1; } // end of TDBFIX standard constructor TDBFIX::TDBFIX(PGLOBAL g, PTDBFIX tdbp) : TDBDOS(g, tdbp) { -//Cardinal = tdbp->Cardinal; } // end of TDBFIX copy constructor // Method @@ -123,10 +125,48 @@ PCOL TDBFIX::MakeCol(PGLOBAL g, PCOLDEF cdp, PCOL cprec, int n) /***********************************************************************/ /* Remake the indexes after the table was modified. */ /***********************************************************************/ -int TDBFIX::ResetTableOpt(PGLOBAL g, bool dox) +int TDBFIX::ResetTableOpt(PGLOBAL g, bool dop, bool dox) { + int prc, rc = RC_OK; + + To_Filter = NULL; // Disable filtering +//To_BlkIdx = NULL; // and block filtering + To_BlkFil = NULL; // and index filtering + Cardinality(g); // If called by create RestoreNrec(); // May have been modified - return TDBDOS::ResetTableOpt(g, dox); + MaxSize = -1; // Size must be recalculated + Cardinal = -1; // as well as Cardinality + + // After the table was modified the indexes + // are invalid and we should mark them as such... + rc = ((PDOSDEF)To_Def)->InvalidateIndex(g); + + if (dop) { + Columns = NULL; // Not used anymore + Txfp->Reset(); +// OldBlk = CurBlk = -1; +// ReadBlks = CurNum = Rbuf = Modif = 0; + Use = USE_READY; // So the table can be reopened + Mode = MODE_ANY; // Just to be clean + rc = MakeBlockValues(g); // Redo optimization + } // endif dop + + if (dox && (rc == RC_OK || rc == RC_INFO)) { + // Remake eventual indexes + Columns = NULL; // Not used anymore + Txfp->Reset(); // New start + Use = USE_READY; // So the table can be reopened + Mode = MODE_READ; // New mode + prc = rc; + + if (PlgGetUser(g)->Check & CHK_OPT) + // We must remake indexes. + rc = MakeIndex(g, NULL, FALSE); + + rc = (rc == RC_INFO) ? prc : rc; + } // endif dox + + return rc; } // end of ResetTableOpt /***********************************************************************/ @@ -138,6 +178,11 @@ void TDBFIX::RestoreNrec(void) Txfp->Nrec = (To_Def && To_Def->GetElemt()) ? To_Def->GetElemt() : DOS_BUFF_LEN; Txfp->Blksize = Txfp->Nrec * Txfp->Lrecl; + + if (Cardinal >= 0) + Txfp->Block = (Cardinal > 0) + ? (Cardinal + Txfp->Nrec - 1) / Txfp->Nrec : 0; + } // endif Padded } // end of RestoreNrec @@ -163,9 +208,18 @@ int TDBFIX::Cardinality(PGLOBAL g) /***********************************************************************/ int TDBFIX::GetMaxSize(PGLOBAL g) { - if (MaxSize < 0) + if (MaxSize < 0) { MaxSize = Cardinality(g); + if (MaxSize > 0 && (To_BlkFil = InitBlockFilter(g, To_Filter)) + && !To_BlkFil->Correlated()) { + // Use BlockTest to reduce the estimated size + MaxSize = Txfp->MaxBlkSize(g, MaxSize); + ResetBlockFilter(g); + } // endif To_BlkFil + + } // endif MaxSize + return MaxSize; } // end of GetMaxSize @@ -217,9 +271,11 @@ int TDBFIX::RowNumber(PGLOBAL g, bool b) /***********************************************************************/ bool TDBFIX::IsUsingTemp(PGLOBAL g) { - USETEMP usetemp = PlgGetUser(g)->UseTemp; - - return (usetemp == TMP_YES || usetemp == TMP_FORCE); + // Not ready yet to handle using a temporary file with mapping + // or while deleting from DBF files. + return ((Use_Temp == TMP_YES && Txfp->GetAmType() != TYPE_AM_MAP && + !(Mode == MODE_DELETE && Txfp->GetAmType() == TYPE_AM_DBF)) || + Use_Temp == TMP_FORCE || Use_Temp == TMP_TEST); } // end of IsUsingTemp /***********************************************************************/ @@ -246,11 +302,13 @@ bool TDBFIX::OpenDB(PGLOBAL g) else Txfp->Rewind(); // see comment in Work.log + ResetBlockFilter(g); return false; } // endif use - if (Mode == MODE_DELETE && !Next && Txfp->GetAmType() == TYPE_AM_MAP) { - // Delete all lines. Not handled in MAP mode + if (Mode == MODE_DELETE && Txfp->GetAmType() == TYPE_AM_MAP && + (!Next || Use_Temp == TMP_FORCE)) { + // Delete all lines or using temp. Not handled in MAP mode Txfp = new(g) FIXFAM((PDOSDEF)To_Def); Txfp->SetTdbp(this); } // endif Mode @@ -277,8 +335,13 @@ bool TDBFIX::OpenDB(PGLOBAL g) /*********************************************************************/ To_Line = Txfp->GetBuf(); // For WriteDB + /*********************************************************************/ + /* Allocate the block filter tree if evaluation is possible. */ + /*********************************************************************/ + To_BlkFil = InitBlockFilter(g, To_Filter); + if (trace) - htrc("OpenDos: R%hd mode=%d\n", Tdb_No, Mode); + htrc("OpenFix: R%hd mode=%d BlkFil=%p\n", Tdb_No, Mode, To_BlkFil); /*********************************************************************/ /* Reset buffer access according to indexing and to mode. */ diff --git a/storage/connect/tabfix.h b/storage/connect/tabfix.h index bcd171b37bb..7d5b964da2a 100644 --- a/storage/connect/tabfix.h +++ b/storage/connect/tabfix.h @@ -38,7 +38,7 @@ class DllExport TDBFIX : public TDBDOS { virtual void ResetDB(void); virtual bool IsUsingTemp(PGLOBAL g); virtual int RowNumber(PGLOBAL g, bool b = false); - virtual int ResetTableOpt(PGLOBAL g, bool dox); + virtual int ResetTableOpt(PGLOBAL g, bool dop, bool dox); virtual void ResetSize(void); virtual int GetBadLines(void) {return Txfp->GetNerr();} @@ -51,6 +51,8 @@ class DllExport TDBFIX : public TDBDOS { virtual int WriteDB(PGLOBAL g); protected: + virtual bool PrepareWriting(PGLOBAL g) {return false;} + // Members are inherited from TDBDOS }; // end of class TDBFIX @@ -89,7 +91,8 @@ class TDBDCL : public TDBCAT { protected: // Specific routines - virtual PQRYRES GetResult(PGLOBAL g) {return DBFColumns(g, Fn, false);} + virtual PQRYRES GetResult(PGLOBAL g) + {return DBFColumns(g, ((PTABDEF)To_Def)->GetPath(), Fn, false);} // Members char *Fn; // The DBF file (path) name diff --git a/storage/connect/tabfmt.cpp b/storage/connect/tabfmt.cpp index 7665395167d..c1119c57065 100644 --- a/storage/connect/tabfmt.cpp +++ b/storage/connect/tabfmt.cpp @@ -1,11 +1,11 @@ /************* TabFmt C++ Program Source Code File (.CPP) **************/ /* PROGRAM NAME: TABFMT */ /* ------------- */ -/* Version 3.8 */ +/* Version 3.9 */ /* */ /* COPYRIGHT: */ /* ---------- */ -/* (C) Copyright to the author Olivier BERTRAND 2001 - 2013 */ +/* (C) Copyright to the author Olivier BERTRAND 2001 - 2014 */ /* */ /* WHAT THIS PROGRAM DOES: */ /* ----------------------- */ @@ -16,7 +16,7 @@ /***********************************************************************/ /***********************************************************************/ -/* Include relevant MariaDB header file. */ +/* Include relevant MariaDB header file. */ /***********************************************************************/ #include "my_global.h" @@ -66,7 +66,8 @@ #define MAXCOL 200 /* Default max column nb in result */ #define TYPE_UNKNOWN 10 /* Must be greater than other types */ -extern "C" int trace; +extern "C" int trace; +extern "C" USETEMP Use_Temp; /***********************************************************************/ /* CSVColumns: constructs the result blocks containing the description */ @@ -75,8 +76,8 @@ extern "C" int trace; /* of types (TYPE_STRING < TYPE_DOUBLE < TYPE_INT) (1 < 2 < 7). */ /* If these values are changed, this will have to be revisited. */ /***********************************************************************/ -PQRYRES CSVColumns(PGLOBAL g, const char *fn, char sep, char q, - int hdr, int mxr, bool info) +PQRYRES CSVColumns(PGLOBAL g, char *dp, const char *fn, char sep, + char q, int hdr, int mxr, bool info) { static int buftyp[] = {TYPE_STRING, TYPE_SHORT, TYPE_STRING, TYPE_INT, TYPE_INT, TYPE_SHORT}; @@ -130,7 +131,7 @@ PQRYRES CSVColumns(PGLOBAL g, const char *fn, char sep, char q, /*********************************************************************/ /* Open the input file. */ /*********************************************************************/ - PlugSetPath(filename, fn, PlgGetDataPath(g)); + PlugSetPath(filename, fn, dp); if (!(infile= global_fopen(g, MSGID_CANNOT_OPEN, filename, "r"))) return NULL; @@ -145,7 +146,7 @@ PQRYRES CSVColumns(PGLOBAL g, const char *fn, char sep, char q, n = strlen(buf) + 1; buf[n - 2] = '\0'; #if defined(UNIX) - // The file can be imported from Windows + // The file can be imported from Windows if (buf[n - 3] == '\r') buf[n - 3] = 0; #endif // UNIX @@ -202,7 +203,7 @@ PQRYRES CSVColumns(PGLOBAL g, const char *fn, char sep, char q, n = strlen(buf); buf[n - 1] = '\0'; #if defined(UNIX) - // The file can be imported from Windows + // The file can be imported from Windows if (buf[n - 2] == '\r') buf[n - 2] = 0; #endif // UNIX @@ -392,7 +393,7 @@ CSVDEF::CSVDEF(void) Fmtd = Accept = Header = false; Maxerr = 0; Quoted = -1; - Sep = ','; + Sep = ','; Qot = '\0'; } // end of CSVDEF constructor @@ -441,7 +442,7 @@ PTDB CSVDEF::GetTable(PGLOBAL g, MODE mode) PTDBASE tdbp; if (Catfunc != FNC_COL) { - USETEMP tmp = PlgGetUser(g)->UseTemp; + USETEMP tmp = Use_Temp; bool map = Mapped && mode != MODE_INSERT && !(tmp != TMP_NO && mode == MODE_UPDATE) && !(tmp == TMP_FORCE && @@ -458,10 +459,9 @@ PTDB CSVDEF::GetTable(PGLOBAL g, MODE mode) #if defined(ZIP_SUPPORT) if (Compressed == 1) txfp = new(g) ZIPFAM(this); - else { - strcpy(g->Message, "Compress 2 not supported yet"); - return NULL; - } // endelse + else + txfp = new(g) ZLBFAM(this); + #else // !ZIP_SUPPORT strcpy(g->Message, "Compress not supported"); return NULL; @@ -480,6 +480,36 @@ PTDB CSVDEF::GetTable(PGLOBAL g, MODE mode) if (Multiple) tdbp = new(g) TDBMUL(tdbp); + else + /*****************************************************************/ + /* For block tables, get eventually saved optimization values. */ + /*****************************************************************/ + if (tdbp->GetBlockValues(g)) { + PushWarning(g, tdbp); +// return NULL; // causes a crash when deleting index + } else { + if (IsOptimized()) { + if (map) { + txfp = new(g) MBKFAM(this); + } else if (Compressed) { +#if defined(ZIP_SUPPORT) + if (Compressed == 1) + txfp = new(g) ZBKFAM(this); + else { + txfp->SetBlkPos(To_Pos); + ((PZLBFAM)txfp)->SetOptimized(To_Pos != NULL); + } // endelse +#else + sprintf(g->Message, MSG(NO_FEAT_SUPPORT), "ZIP"); + return NULL; +#endif + } else + txfp = new(g) BLKFAM(this); + + ((PTDBDOS)tdbp)->SetTxfp(txfp); + } // endif Optimized + + } // endelse } else tdbp = new(g)TDBCCL(this); @@ -591,34 +621,27 @@ bool TDBCSV::CheckErr(void) /***********************************************************************/ int TDBCSV::EstimatedLength(PGLOBAL g) { + int n = 0; + PCOLDEF cdp; + if (trace) htrc("EstimatedLength: Fields=%d Columns=%p\n", Fields, Columns); - - if (!Fields) { - PCSVCOL colp; - - for (colp = (PCSVCOL)Columns; colp; colp = (PCSVCOL)colp->Next) - if (!colp->IsSpecial() && !colp->IsVirtual()) // A true column - Fields = MY_MAX(Fields, (int)colp->Fldnum); - - if (Columns) - Fields++; // Fldnum was 0 based - } // endif Fields + for (cdp = To_Def->GetCols(); cdp; cdp = cdp->GetNext()) + if (!cdp->IsSpecial() && !cdp->IsVirtual()) // A true column + n++; - return (int)Fields; // Number of separators if all fields are null + return --n; // Number of separators if all fields are null } // end of Estimated Length #if 0 /***********************************************************************/ -/* CSV tables favor the use temporary files for Update. */ +/* CSV tables needs the use temporary files for Update. */ /***********************************************************************/ bool TDBCSV::IsUsingTemp(PGLOBAL g) { - USETEMP usetemp = PlgGetUser(g)->UseTemp; - - return (usetemp == TMP_YES || usetemp == TMP_FORCE || - (usetemp == TMP_AUTO && Mode == MODE_UPDATE)); + return (Use_Temp == TMP_YES || Use_Temp == TMP_FORCE || + (Use_Temp == TMP_AUTO && Mode == MODE_UPDATE)); } // end of IsUsingTemp #endif // 0 (Same as TDBDOS one) @@ -649,7 +672,7 @@ bool TDBCSV::OpenDB(PGLOBAL g) } else for (cdp = tdp->GetCols(); cdp; cdp = cdp->GetNext()) - if (!cdp->IsVirtual()) + if (!cdp->IsSpecial() && !cdp->IsVirtual()) Fields++; Offset = (int*)PlugSubAlloc(g, NULL, sizeof(int) * Fields); @@ -686,7 +709,7 @@ bool TDBCSV::OpenDB(PGLOBAL g) } else // MODE_UPDATE for (cdp = tdp->GetCols(); cdp; cdp = cdp->GetNext()) - if (!cdp->IsVirtual()) { + if (!cdp->IsSpecial() && !cdp->IsVirtual()) { i = cdp->GetOffset() - 1; len = cdp->GetLength(); Field[i] = (PSZ)PlugSubAlloc(g, NULL, len + 1); @@ -905,9 +928,9 @@ int TDBCSV::ReadBuffer(PGLOBAL g) } // end of ReadBuffer /***********************************************************************/ -/* Data Base write routine CSV file access method. */ +/* Prepare the line to write. */ /***********************************************************************/ -int TDBCSV::WriteDB(PGLOBAL g) +bool TDBCSV::PrepareWriting(PGLOBAL g) { char sep[2], qot[2]; int i, nlen, oldlen = strlen(To_Line); @@ -918,7 +941,7 @@ int TDBCSV::WriteDB(PGLOBAL g) // Before writing the line we must check its length if ((nlen = CheckWrite(g)) < 0) - return RC_FX; + return true; // Before writing the line we must make it sep[0] = Sep; @@ -981,6 +1004,18 @@ int TDBCSV::WriteDB(PGLOBAL g) if (trace > 1) htrc("Write: line is=%s", To_Line); + return false; + } // end of PrepareWriting + +/***********************************************************************/ +/* Data Base write routine CSV file access method. */ +/***********************************************************************/ +int TDBCSV::WriteDB(PGLOBAL g) + { + // Before writing the line we must check and prepare it + if (PrepareWriting(g)) + return RC_FX; + /*********************************************************************/ /* Now start the writing process. */ /*********************************************************************/ @@ -1080,7 +1115,7 @@ PCOL TDBFMT::MakeCol(PGLOBAL g, PCOLDEF cdp, PCOL cprec, int n) int TDBFMT::EstimatedLength(PGLOBAL g) { // This is rather stupid !!! - return ((PDOSDEF)To_Def)->GetEnding() + (int)((Lrecl / 10) + 1); + return ((PDOSDEF)To_Def)->GetEnding() + (int)((Lrecl / 10) + 1); } // end of EstimatedLength /***********************************************************************/ @@ -1118,7 +1153,8 @@ bool TDBFMT::OpenDB(PGLOBAL g) // Get the column formats for (cdp = tdp->GetCols(); cdp; cdp = cdp->GetNext()) - if (!cdp->IsVirtual() && (i = cdp->GetOffset() - 1) < Fields) { + if (!cdp->IsSpecial() && !cdp->IsVirtual() + && (i = cdp->GetOffset() - 1) < Fields) { if (!(pfm = cdp->GetFmt())) { sprintf(g->Message, MSG(NO_FLD_FORMAT), i + 1, Name); return true; @@ -1275,6 +1311,25 @@ CSVCOL::CSVCOL(CSVCOL *col1, PTDB tdbp) : DOSCOL(col1, tdbp) } // end of CSVCOL copy constructor /***********************************************************************/ +/* VarSize: This function tells UpdateDB whether or not the block */ +/* optimization file must be redone if this column is updated, even */ +/* it is not sorted or clustered. This applies to a blocked table, */ +/* because if it is updated using a temporary file, the block size */ +/* may be modified. */ +/***********************************************************************/ +bool CSVCOL::VarSize(void) + { + PTXF txfp = ((PTDBCSV)To_Tdb)->Txfp; + + if (txfp->IsBlocked() && txfp->GetUseTemp()) + // Blocked table using a temporary file + return true; + else + return false; + + } // end VarSize + +/***********************************************************************/ /* ReadColumn: call DOSCOL::ReadColumn after having set the offet */ /* and length of the field to read as calculated by TDBCSV::ReadDB. */ /***********************************************************************/ @@ -1408,7 +1463,7 @@ TDBCCL::TDBCCL(PCSVDEF tdp) : TDBCAT(tdp) Hdr = tdp->Header; Mxr = tdp->Maxerr; Qtd = tdp->Quoted; - Sep = tdp->Sep; + Sep = tdp->Sep; } // end of TDBCCL constructor /***********************************************************************/ @@ -1416,7 +1471,8 @@ TDBCCL::TDBCCL(PCSVDEF tdp) : TDBCAT(tdp) /***********************************************************************/ PQRYRES TDBCCL::GetResult(PGLOBAL g) { - return CSVColumns(g, Fn, Sep, Qtd, Hdr, Mxr, false); - } // end of GetResult + return CSVColumns(g, ((PTABDEF)To_Def)->GetPath(), + Fn, Sep, Qtd, Hdr, Mxr, false); + } // end of GetResult /* ------------------------ End of TabFmt ---------------------------- */ diff --git a/storage/connect/tabfmt.h b/storage/connect/tabfmt.h index 5efa824c2e2..1b39a47e7d9 100644 --- a/storage/connect/tabfmt.h +++ b/storage/connect/tabfmt.h @@ -1,7 +1,7 @@ /*************** TabFmt H Declares Source Code File (.H) ***************/ -/* Name: TABFMT.H Version 2.3 */ +/* Name: TABFMT.H Version 2.4 */ /* */ -/* (C) Copyright to the author Olivier BERTRAND 2001-2013 */ +/* (C) Copyright to the author Olivier BERTRAND 2001-2014 */ /* */ /* This file contains the CSV and FMT classes declares. */ /***********************************************************************/ @@ -13,8 +13,8 @@ typedef class TDBFMT *PTDBFMT; /***********************************************************************/ /* Functions used externally. */ /***********************************************************************/ -PQRYRES CSVColumns(PGLOBAL g, const char *fn, char sep, char q, - int hdr, int mxr, bool info); +PQRYRES CSVColumns(PGLOBAL g, char *dp, const char *fn, char sep, + char q, int hdr, int mxr, bool info); /***********************************************************************/ /* CSV table. */ @@ -80,6 +80,8 @@ class TDBCSV : public TDBDOS { virtual bool CheckErr(void); protected: + virtual bool PrepareWriting(PGLOBAL g); + // Members PSZ *Field; // Field to write to current line int *Offset; // Column offsets for current record @@ -111,6 +113,7 @@ class CSVCOL : public DOSCOL { virtual int GetAmType() {return TYPE_AM_CSV;} // Methods + virtual bool VarSize(void); virtual void ReadColumn(PGLOBAL g); virtual void WriteColumn(PGLOBAL g); @@ -157,6 +160,9 @@ class TDBFMT : public TDBCSV { virtual int EstimatedLength(PGLOBAL g); protected: + virtual bool PrepareWriting(PGLOBAL g) + {strcpy(g->Message, "FMT is read only"); return true;} + // Members PSZ *FldFormat; // Field read format void *To_Fld; // To field test buffer @@ -173,15 +179,15 @@ class TDBCCL : public TDBCAT { TDBCCL(PCSVDEF tdp); protected: - // Specific routines - virtual PQRYRES GetResult(PGLOBAL g); + // Specific routines + virtual PQRYRES GetResult(PGLOBAL g); // Members char *Fn; // The CSV file (path) name bool Hdr; // true if first line contains headers int Mxr; // Maximum number of bad records int Qtd; // Quoting level for quoted fields - char Sep; // Separator for standard CSV files + char Sep; // Separator for standard CSV files }; // end of class TDBCCL /* ------------------------- End of TabFmt.H ------------------------- */ diff --git a/storage/connect/table.cpp b/storage/connect/table.cpp index 325e80945f7..5db50d44787 100644 --- a/storage/connect/table.cpp +++ b/storage/connect/table.cpp @@ -45,19 +45,22 @@ TDB::TDB(PTABDEF tdp) : Tdb_No(++Tnum) { Use = USE_NO; To_Orig = NULL; + To_Filter = NULL; To_CondFil = NULL; Next = NULL; Name = (tdp) ? tdp->GetName() : NULL; To_Table = NULL; Columns = NULL; Degree = (tdp) ? tdp->GetDegree() : 0; - Mode = MODE_READ; + Mode = MODE_ANY; + Cardinal = -1; } // end of TDB standard constructor TDB::TDB(PTDB tdbp) : Tdb_No(++Tnum) { Use = tdbp->Use; To_Orig = tdbp; + To_Filter = NULL; To_CondFil = NULL; Next = NULL; Name = tdbp->Name; @@ -65,6 +68,7 @@ TDB::TDB(PTDB tdbp) : Tdb_No(++Tnum) Columns = NULL; Degree = tdbp->Degree; Mode = tdbp->Mode; + Cardinal = tdbp->Cardinal; } // end of TDB copy constructor // Methods @@ -137,7 +141,9 @@ TDBASE::TDBASE(PTABDEF tdp) : TDB(tdp) To_Link = NULL; To_Key_Col = NULL; To_Kindex = NULL; + To_Xdp = NULL; To_SetCols = NULL; + Ftype = RECFM_NAF; MaxSize = -1; Knum = 0; Read_Only = (tdp) ? tdp->IsReadOnly() : false; @@ -147,8 +153,14 @@ TDBASE::TDBASE(PTABDEF tdp) : TDB(tdp) TDBASE::TDBASE(PTDBASE tdbp) : TDB(tdbp) { To_Def = tdbp->To_Def; + To_Link = tdbp->To_Link; + To_Key_Col = tdbp->To_Key_Col; + To_Kindex = tdbp->To_Kindex; + To_Xdp = tdbp->To_Xdp; To_SetCols = tdbp->To_SetCols; // ??? + Ftype = tdbp->Ftype; MaxSize = tdbp->MaxSize; + Knum = tdbp->Knum; Read_Only = tdbp->Read_Only; m_data_charset= tdbp->m_data_charset; } // end of TDBASE copy constructor @@ -167,7 +179,7 @@ PCATLG TDBASE::GetCat(void) CHARSET_INFO *TDBASE::data_charset(void) { // If no DATA_CHARSET is specified, we assume that character - // set of the remote data is the same with CHARACTER SET + // set of the remote data is the same with CHARACTER SET // definition of the SQL column. return m_data_charset ? m_data_charset : &my_charset_bin; } // end of data_charset @@ -204,10 +216,11 @@ PCOL TDBASE::ColDB(PGLOBAL g, PSZ name, int num) /* Also find where to insert the new block. */ /*****************************************************************/ for (cp = Columns; cp; cp = cp->GetNext()) - if (cp->GetIndex() < i) + if ((num && cp->GetIndex() == i) || + (name && !stricmp(cp->GetName(), name))) + break; // Found + else if (cp->GetIndex() < i) cprec = cp; - else if (cp->GetIndex() == i) - break; if (trace) htrc("cdp(%d).Name=%s cp=%p\n", i, cdp->GetName(), cp); @@ -219,12 +232,12 @@ PCOL TDBASE::ColDB(PGLOBAL g, PSZ name, int num) colp = cp; else if (!(cdp->Flags & U_SPECIAL)) colp = MakeCol(g, cdp, cprec, i); - else if (Mode == MODE_READ) + else if (Mode != MODE_INSERT) colp = InsertSpcBlk(g, cdp); if (trace) htrc("colp=%p\n", colp); - + if (name || num) break; else if (colp && !colp->IsSpecial()) @@ -259,22 +272,38 @@ PCOL TDBASE::InsertSpcBlk(PGLOBAL g, PCOLDEF cdp) PCOL colp; cp= new(g) COLUMN(cdp->GetName()); - cp->SetTo_Table(To_Table); - if (!stricmp(name, "FILEID") || - !stricmp(name, "SERVID")) { + if (! To_Table) { + strcpy(g->Message, "Cannot make special column: To_Table is NULL"); + return NULL; + } else + cp->SetTo_Table(To_Table); + + if (!stricmp(name, "FILEID") || !stricmp(name, "FDISK") || + !stricmp(name, "FPATH") || !stricmp(name, "FNAME") || + !stricmp(name, "FTYPE") || !stricmp(name, "SERVID")) { if (!To_Def || !(To_Def->GetPseudo() & 2)) { sprintf(g->Message, MSG(BAD_SPEC_COLUMN)); return NULL; } // endif Pseudo if (!stricmp(name, "FILEID")) - colp = new(g) FIDBLK(cp); + colp = new(g) FIDBLK(cp, OP_XX); + else if (!stricmp(name, "FDISK")) + colp = new(g) FIDBLK(cp, OP_FDISK); + else if (!stricmp(name, "FPATH")) + colp = new(g) FIDBLK(cp, OP_FPATH); + else if (!stricmp(name, "FNAME")) + colp = new(g) FIDBLK(cp, OP_FNAME); + else if (!stricmp(name, "FTYPE")) + colp = new(g) FIDBLK(cp, OP_FTYPE); else colp = new(g) SIDBLK(cp); } else if (!stricmp(name, "TABID")) { colp = new(g) TIDBLK(cp); + } else if (!stricmp(name, "PARTID")) { + colp = new(g) PRTBLK(cp); //} else if (!stricmp(name, "CONID")) { // colp = new(g) CIDBLK(cp); } else if (!stricmp(name, "ROWID")) { @@ -297,7 +326,7 @@ PCOL TDBASE::InsertSpcBlk(PGLOBAL g, PCOLDEF cdp) /***********************************************************************/ /* ResetTableOpt: Wrong for this table type. */ /***********************************************************************/ -int TDBASE::ResetTableOpt(PGLOBAL g, bool dox) +int TDBASE::ResetTableOpt(PGLOBAL g, bool dop, bool dox) { strcpy(g->Message, "This table is not indexable"); return RC_INFO; @@ -324,7 +353,7 @@ void TDBASE::ResetKindex(PGLOBAL g, PKXBASE kxp) /***********************************************************************/ /* SetRecpos: Replace the table at the specified position. */ /***********************************************************************/ -bool TDBASE::SetRecpos(PGLOBAL g, int recpos) +bool TDBASE::SetRecpos(PGLOBAL g, int recpos) { strcpy(g->Message, MSG(SETRECPOS_NIY)); return true; @@ -389,8 +418,8 @@ PCOL TDBCAT::MakeCol(PGLOBAL g, PCOLDEF cdp, PCOL cprec, int n) /***********************************************************************/ bool TDBCAT::Initialize(PGLOBAL g) { - if (Init) - return false; + if (Init) + return false; if (!(Qrp = GetResult(g))) return true; @@ -405,9 +434,9 @@ bool TDBCAT::Initialize(PGLOBAL g) PushWarning(g, this); } // endif Badlines - Init = true; - return false; - } // end of Initialize + Init = true; + return false; + } // end of Initialize /***********************************************************************/ /* CAT: Get the number of properties. */ @@ -487,7 +516,7 @@ bool TDBCAT::InitCol(PGLOBAL g) /***********************************************************************/ /* SetRecpos: Replace the table at the specified position. */ /***********************************************************************/ -bool TDBCAT::SetRecpos(PGLOBAL g, int recpos) +bool TDBCAT::SetRecpos(PGLOBAL g, int recpos) { N = recpos - 1; return false; diff --git a/storage/connect/tabmac.h b/storage/connect/tabmac.h index eb115b18049..5e6c98d68fb 100644 --- a/storage/connect/tabmac.h +++ b/storage/connect/tabmac.h @@ -58,6 +58,7 @@ class TDBMAC : public TDBASE { // Database routines virtual PCOL MakeCol(PGLOBAL g, PCOLDEF cdp, PCOL cprec, int n); + virtual int Cardinality(PGLOBAL g) {return GetMaxSize(g);} virtual int GetMaxSize(PGLOBAL g); virtual bool OpenDB(PGLOBAL g); virtual int ReadDB(PGLOBAL g); diff --git a/storage/connect/tabmul.cpp b/storage/connect/tabmul.cpp index 6e4a63d0f6d..4b40e6c5509 100755 --- a/storage/connect/tabmul.cpp +++ b/storage/connect/tabmul.cpp @@ -680,7 +680,7 @@ char* TDBDIR::Path(PGLOBAL g) #if defined(WIN32) if (!*Drive) { - PlugSetPath(Fpath, To_File, cat->GetDataPath()); + PlugSetPath(Fpath, To_File, ((PTABDEF)To_Def)->GetPath()); _splitpath(Fpath, Drive, Direc, Fname, Ftype); } else _makepath(Fpath, Drive, Direc, Fname, Ftype); // Usefull ??? @@ -688,7 +688,7 @@ char* TDBDIR::Path(PGLOBAL g) return Fpath; #else // !WIN32 if (!Done) { - PlugSetPath(Fpath, To_File, cat->GetDataPath()); + PlugSetPath(Fpath, To_File, ((PTABDEF)To_Def)->GetPath()); _splitpath(Fpath, NULL, Direc, Fname, Ftype); strcat(strcpy(Pattern, Fname), Ftype); Done = true; diff --git a/storage/connect/tabmysql.cpp b/storage/connect/tabmysql.cpp index 37c72501840..6acdcbb3a8e 100644 --- a/storage/connect/tabmysql.cpp +++ b/storage/connect/tabmysql.cpp @@ -1,11 +1,11 @@ /************* TabMySQL C++ Program Source Code File (.CPP) *************/ /* PROGRAM NAME: TABMYSQL */ /* ------------- */ -/* Version 1.7 */ +/* Version 1.9 */ /* */ /* AUTHOR: */ /* ------- */ -/* Olivier BERTRAND 2007-2013 */ +/* Olivier BERTRAND 2007-2014 */ /* */ /* WHAT THIS PROGRAM DOES: */ /* ----------------------- */ @@ -68,6 +68,11 @@ void PrintResult(PGLOBAL, PSEM, PQRYRES); #endif // _CONSOLE extern "C" int trace; +extern bool xinfo; + +// Used to check whether a MYSQL table is created on itself +bool CheckSelf(PGLOBAL g, TABLE_SHARE *s, const char *host, + const char *db, char *tab, const char *src, int port); /* -------------- Implementation of the MYSQLDEF class --------------- */ @@ -109,7 +114,7 @@ bool MYSQLDEF::GetServerInfo(PGLOBAL g, const char *server_name) } // endif server_name // get_server_by_name() clones the server if exists and allocates - // copies of strings in the supplied mem_root + // copies of strings in the supplied mem_root if (!(server= get_server_by_name(mem, server_name, &server_buffer))) { DBUG_PRINT("info", ("get_server_by_name returned > 0 error condition!")); /* need to come up with error handling */ @@ -181,7 +186,7 @@ bool MYSQLDEF::ParseURL(PGLOBAL g, char *url, bool b) // connection name of either "server" or "server/table" // ok, so we do a little parsing, but not completely! if ((Tabname= strchr(url, '/'))) { - // If there is a single '/' in the connection string, + // If there is a single '/' in the connection string, // this means the user is specifying a table name *Tabname++= '\0'; @@ -260,7 +265,7 @@ bool MYSQLDEF::ParseURL(PGLOBAL g, char *url, bool b) } // endif / } // endif Tabname - + } // endif database if ((sport = strchr(Hostname, ':'))) @@ -311,7 +316,7 @@ bool MYSQLDEF::DefineAM(PGLOBAL g, LPCSTR am, int poff) // Normal case of specific MYSQL table url = GetStringCatInfo(g, "Connect", NULL); - if (!url || !*url) { + if (!url || !*url) { // Not using the connection URL Hostname = GetStringCatInfo(g, "Host", "localhost"); Database = GetStringCatInfo(g, "Database", "*"); @@ -353,8 +358,12 @@ bool MYSQLDEF::DefineAM(PGLOBAL g, LPCSTR am, int poff) Tabname = Name; } // endif am - if ((Srcdef = GetStringCatInfo(g, "Srcdef", NULL))) + if ((Srcdef = GetStringCatInfo(g, "Srcdef", NULL))) { + Read_Only = true; Isview = true; + } else if (CheckSelf(g, Hc->GetTable()->s, Hostname, Database, + Tabname, Srcdef, Portnumber)) + return true; // Used for Update and Delete Qrystr = GetStringCatInfo(g, "Query_String", "?"); @@ -436,7 +445,7 @@ TDBMYSQL::TDBMYSQL(PGLOBAL g, PTDBMY tdbp) : TDBASE(tdbp) Tabname = tdbp->Tabname; Srcdef = tdbp->Srcdef; User = tdbp->User; - Pwd = tdbp->Pwd; + Pwd = tdbp->Pwd; Qrystr = tdbp->Qrystr; Quoted = tdbp->Quoted; Port = tdbp->Port; @@ -603,9 +612,7 @@ bool TDBMYSQL::MakeInsert(PGLOBAL g) else qlen += colp->GetLength(); - } // endif Prep - - if (Prep) + } else // Prep strcat(valist, "?"); } // endfor colp @@ -647,7 +654,7 @@ int TDBMYSQL::MakeCommand(PGLOBAL g) // Make a lower case copy of the originale query - qrystr = (char*)PlugSubAlloc(g, NULL, strlen(Qrystr) + 1); + qrystr = (char*)PlugSubAlloc(g, NULL, strlen(Qrystr) + 5); strlwr(strcpy(qrystr, Qrystr)); // Check whether the table name is equal to a keyword @@ -667,6 +674,7 @@ int TDBMYSQL::MakeCommand(PGLOBAL g) strcat(Query, Tabname); strcat(Query, Qrystr + (p - qrystr) + strlen(name)); + strlwr(strcpy(qrystr, Query)); } else { sprintf(g->Message, "Cannot use this %s command", (Mode == MODE_UPDATE) ? "UPDATE" : "DELETE"); @@ -740,33 +748,50 @@ int TDBMYSQL::MakeDelete(PGLOBAL g) #endif // 0 /***********************************************************************/ -/* XCV GetMaxSize: returns the maximum number of rows in the table. */ +/* MYSQL Cardinality: returns the number of rows in the table. */ /***********************************************************************/ -int TDBMYSQL::GetMaxSize(PGLOBAL g) - { - if (MaxSize < 0) { -#if 0 - if (MakeSelect(g)) - return -2; +int TDBMYSQL::Cardinality(PGLOBAL g) +{ + if (!g) + return (Mode == MODE_ANY && !Srcdef) ? 1 : 0; - if (!Myc.Connected()) { - if (Myc.Open(g, Host, Database, User, Pwd, Port)) - return -1; + if (Cardinal < 0 && Mode == MODE_ANY && !Srcdef && xinfo) { + // Info command, we must return the exact table row number + char query[96]; + MYSQLC myc; - } // endif connected + if (myc.Open(g, Host, Database, User, Pwd, Port)) + return -1; - if ((MaxSize = Myc.GetResultSize(g, Query)) < 0) { - Myc.Close(); - return -3; - } // endif MaxSize + strcpy(query, "SELECT COUNT(*) FROM "); - // FIXME: Columns should be known when Info calls GetMaxSize - if (!Columns) - Query = NULL; // Must be remade when columns are known -#endif // 0 + if (Quoted > 0) + strcat(strcat(strcat(query, "`"), Tabname), "`"); + else + strcat(query, Tabname); + + Cardinal = myc.GetTableSize(g, query); + myc.Close(); + } else + Cardinal = 10; // To make MySQL happy + + return Cardinal; +} // end of Cardinality + +/***********************************************************************/ +/* MYSQL GetMaxSize: returns the maximum number of rows in the table. */ +/***********************************************************************/ +int TDBMYSQL::GetMaxSize(PGLOBAL g) + { + if (MaxSize < 0) { + if (Mode == MODE_DELETE) + // Return 0 in mode DELETE in case of delete all. + MaxSize = 0; + else if (!Cardinality(NULL)) + MaxSize = 10; // To make MySQL happy + else if ((MaxSize = Cardinality(g)) < 0) + MaxSize = 12; // So we can see an error occured - // Return 0 in mode DELETE in case of delete all. - MaxSize = (Mode == MODE_DELETE) ? 0 : 10; // To make MySQL happy } // endif MaxSize return MaxSize; @@ -881,11 +906,12 @@ bool TDBMYSQL::OpenDB(PGLOBAL g) } // endif MakeInsert if (m_Rc != RC_FX) { + int rc __attribute__((unused)); char cmd[64]; int w; sprintf(cmd, "ALTER TABLE `%s` DISABLE KEYS", Tabname); - m_Rc = Myc.ExecSQL(g, cmd, &w); + rc = Myc.ExecSQL(g, cmd, &w); // may fail for some engines } // endif m_Rc } else @@ -1012,7 +1038,8 @@ bool TDBMYSQL::ReadKey(PGLOBAL g, OPVAL op, const void *key, int len) { int oldlen = strlen(Query); - if (op == OP_NEXT) + if (!key || op == OP_NEXT || + Mode == MODE_UPDATE || Mode == MODE_DELETE) return false; else if (op == OP_FIRST) { if (To_CondFil) @@ -1031,7 +1058,7 @@ bool TDBMYSQL::ReadKey(PGLOBAL g, OPVAL op, const void *key, int len) m_Rc = Myc.ExecSQL(g, Query); Query[oldlen] = 0; - return false; + return (m_Rc == RC_FX) ? true : false; } // end of ReadKey /***********************************************************************/ @@ -1081,13 +1108,13 @@ int TDBMYSQL::WriteDB(PGLOBAL g) // Make the Insert command value list for (PCOL colp = Columns; colp; colp = colp->GetNext()) { if (!colp->GetValue()->IsNull()) { - if (colp->GetResultType() == TYPE_STRING || + if (colp->GetResultType() == TYPE_STRING || colp->GetResultType() == TYPE_DATE) strcat(Qbuf, "'"); strcat(Qbuf, colp->GetValue()->GetCharString(buf)); - if (colp->GetResultType() == TYPE_STRING || + if (colp->GetResultType() == TYPE_STRING || colp->GetResultType() == TYPE_DATE) strcat(Qbuf, "'"); @@ -1109,7 +1136,7 @@ int TDBMYSQL::DeleteDB(PGLOBAL g, int irc) { if (irc == RC_FX) // Send the DELETE (all) command to the remote table - return (SendCommand(g) == RC_FX) ? RC_FX : RC_OK; + return (SendCommand(g) == RC_FX) ? RC_FX : RC_OK; else return RC_OK; // Ignore @@ -1129,7 +1156,7 @@ void TDBMYSQL::CloseDB(PGLOBAL g) dup->Step = "Enabling indexes"; sprintf(cmd, "ALTER TABLE `%s` ENABLE KEYS", Tabname); Myc.m_Rows = -1; // To execute the query - m_Rc = Myc.ExecSQL(g, cmd, &w); + m_Rc = Myc.ExecSQL(g, cmd, &w); // May fail for some engines } // endif m_Rc Myc.Close(); @@ -1178,6 +1205,7 @@ MYSQLCOL::MYSQLCOL(MYSQL_FIELD *fld, PTDB tdbp, int i, PSZ am) char v = (!strcmp(chset, "binary")) ? 'B' : 0; Name = fld->name; + Opt = 0; Precision = Long = fld->length; Buf_Type = MYSQLtoPLG(fld->type, &v); strcpy(Format.Type, GetFormatType(Buf_Type)); @@ -1376,7 +1404,7 @@ void MYSQLCOL::WriteColumn(PGLOBAL g) /***********************************************************************/ /* Implementation of the TDBMYEXC class. */ /***********************************************************************/ -TDBMYEXC::TDBMYEXC(PMYDEF tdp) : TDBMYSQL(tdp) +TDBMYEXC::TDBMYEXC(PMYDEF tdp) : TDBMYSQL(tdp) { Cmdlist = NULL; Cmdcol = NULL; @@ -1528,7 +1556,7 @@ int TDBMYEXC::ReadDB(PGLOBAL g) if (Cmdlist) { // Process query to send int rc; - + do { Query = Cmdlist->Cmd; @@ -1548,7 +1576,7 @@ int TDBMYEXC::ReadDB(PGLOBAL g) case RC_INFO: Shw = true; } // endswitch rc - + Cmdlist = (Nerr > Mxr) ? NULL : Cmdlist->Next; } while (rc == RC_INFO); @@ -1645,11 +1673,11 @@ void MYXCOL::WriteColumn(PGLOBAL g) /***********************************************************************/ TDBMCL::TDBMCL(PMYDEF tdp) : TDBCAT(tdp) { - Host = tdp->Hostname; - Db = tdp->Database; - Tab = tdp->Tabname; - User = tdp->Username; - Pwd = tdp->Password; + Host = tdp->Hostname; + Db = tdp->Database; + Tab = tdp->Tabname; + User = tdp->Username; + Pwd = tdp->Password; Port = tdp->Portnumber; } // end of TDBMCL constructor @@ -1659,4 +1687,4 @@ TDBMCL::TDBMCL(PMYDEF tdp) : TDBCAT(tdp) PQRYRES TDBMCL::GetResult(PGLOBAL g) { return MyColumns(g, NULL, Host, Db, User, Pwd, Tab, NULL, Port, false); - } // end of GetResult + } // end of GetResult diff --git a/storage/connect/tabmysql.h b/storage/connect/tabmysql.h index 96991fb14c1..68cf453a9e6 100644 --- a/storage/connect/tabmysql.h +++ b/storage/connect/tabmysql.h @@ -1,4 +1,4 @@ -// TDBMYSQL.H Olivier Bertrand 2007-2013 +// TDBMYSQL.H Olivier Bertrand 2007-2014 #include "myconn.h" // MySQL connection declares typedef class MYSQLDEF *PMYDEF; @@ -81,7 +81,7 @@ class TDBMYSQL : public TDBASE { // Methods virtual PTDB CopyOne(PTABS t); - virtual int GetAffectedRows(void) {return AftRows;} +//virtual int GetAffectedRows(void) {return AftRows;} virtual int GetRecpos(void) {return N;} virtual int GetProgMax(PGLOBAL g); virtual void ResetDB(void) {N = 0;} @@ -92,6 +92,7 @@ class TDBMYSQL : public TDBASE { // Database routines virtual PCOL MakeCol(PGLOBAL g, PCOLDEF cdp, PCOL cprec, int n); + virtual int Cardinality(PGLOBAL g); virtual int GetMaxSize(PGLOBAL g); virtual bool OpenDB(PGLOBAL g); virtual int ReadDB(PGLOBAL g); diff --git a/storage/connect/tabodbc.cpp b/storage/connect/tabodbc.cpp index 65226c9e36f..023d7efa708 100644 --- a/storage/connect/tabodbc.cpp +++ b/storage/connect/tabodbc.cpp @@ -1,11 +1,11 @@ /************* Tabodbc C++ Program Source Code File (.CPP) *************/ /* PROGRAM NAME: TABODBC */ /* ------------- */ -/* Version 2.7 */ +/* Version 2.8 */ /* */ /* COPYRIGHT: */ /* ---------- */ -/* (C) Copyright to the author Olivier BERTRAND 2000-2013 */ +/* (C) Copyright to the author Olivier BERTRAND 2000-2014 */ /* */ /* WHAT THIS PROGRAM DOES: */ /* ----------------------- */ @@ -76,6 +76,7 @@ #include "sql_string.h" extern "C" int trace; +extern bool xinfo; /***********************************************************************/ /* DB static variables. */ @@ -100,7 +101,13 @@ ODBCDEF::ODBCDEF(void) /***********************************************************************/ bool ODBCDEF::DefineAM(PGLOBAL g, LPCSTR am, int poff) { - Desc = Connect = GetStringCatInfo(g, "Connect", ""); + Desc = Connect = GetStringCatInfo(g, "Connect", NULL); + + if (!Connect && !Catfunc) { + sprintf(g->Message, "Missing connection for ODBC table %s", Name); + return true; + } // endif Connect + Tabname = GetStringCatInfo(g, "Name", (Catfunc & (FNC_TABLE | FNC_COL)) ? NULL : Name); Tabname = GetStringCatInfo(g, "Tabname", Tabname); @@ -108,7 +115,10 @@ bool ODBCDEF::DefineAM(PGLOBAL g, LPCSTR am, int poff) Tabschema = GetStringCatInfo(g, "Schema", Tabschema); Tabcat = GetStringCatInfo(g, "Qualifier", NULL); Tabcat = GetStringCatInfo(g, "Catalog", Tabcat); - Srcdef = GetStringCatInfo(g, "Srcdef", NULL); + + if ((Srcdef = GetStringCatInfo(g, "Srcdef", NULL))) + Read_Only = true; + Qrystr = GetStringCatInfo(g, "Query_String", "?"); Sep = GetStringCatInfo(g, "Separator", NULL); Catver = GetIntCatInfo("Catver", 2); @@ -655,40 +665,58 @@ void TDBODBC::ResetSize(void) } // end of ResetSize /***********************************************************************/ +/* ODBC Cardinality: returns table size in number of rows. */ +/***********************************************************************/ +int TDBODBC::Cardinality(PGLOBAL g) + { + if (!g) + return (Mode == MODE_ANY && !Srcdef) ? 1 : 0; + + if (Cardinal < 0 && Mode == MODE_ANY && !Srcdef && xinfo) { + // Info command, we must return the exact table row number + char qry[96], tbn[64]; + ODBConn *ocp = new(g) ODBConn(g, this); + + if (ocp->Open(Connect, Options) < 1) + return -1; + + // Table name can be encoded in UTF-8 + Decode(TableName, tbn, sizeof(tbn)); + strcpy(qry, "SELECT COUNT(*) FROM "); + + if (Quote) + strcat(strcat(strcat(qry, Quote), tbn), Quote); + else + strcat(qry, tbn); + + // Allocate a Count(*) column (must not use the default constructor) + Cnp = new(g) ODBCCOL; + Cnp->InitValue(g); + + if ((Cardinal = ocp->GetResultSize(qry, Cnp)) < 0) + return -3; + + ocp->Close(); + } else + Cardinal = 10; // To make MySQL happy + + return Cardinal; + } // end of Cardinality + +/***********************************************************************/ /* ODBC GetMaxSize: returns table size estimate in number of lines. */ /***********************************************************************/ int TDBODBC::GetMaxSize(PGLOBAL g) { if (MaxSize < 0) { - // Make MariaDB happy - MaxSize = (Mode == MODE_DELETE) ? 0 : 10; -#if 0 - // This is unuseful and takes time - if (Srcdef) { - // Return a reasonable guess - MaxSize = 100; - return MaxSize; - } // endif Srcdef - - if (!Ocp) - Ocp = new(g) ODBConn(g, this); - - if (!Ocp->IsOpen()) - if (Ocp->Open(Connect, Options) < 1) - return -1; - - if (!Count && !(Count = MakeSQL(g, true))) - return -2; - - if (!Cnp) { - // Allocate a Count(*) column (must not use the default constructor) - Cnp = new(g) ODBCCOL; - Cnp->InitValue(g); - } // endif Cnp - - if ((MaxSize = Ocp->GetResultSize(Count, Cnp)) < 0) - return -3; -#endif // 0 + if (Mode == MODE_DELETE) + // Return 0 in mode DELETE in case of delete all. + MaxSize = 0; + else if (!Cardinality(NULL)) + MaxSize = 10; // To make MySQL happy + else if ((MaxSize = Cardinality(g)) < 0) + MaxSize = 12; // So we can see an error occured + } // endif MaxSize return MaxSize; diff --git a/storage/connect/tabodbc.h b/storage/connect/tabodbc.h index 5db8cbb8cff..360f52c9d21 100644 --- a/storage/connect/tabodbc.h +++ b/storage/connect/tabodbc.h @@ -88,14 +88,15 @@ class TDBODBC : public TDBASE { virtual PSZ GetFile(PGLOBAL g); virtual void SetFile(PGLOBAL g, PSZ fn); virtual void ResetSize(void); - virtual int GetAffectedRows(void) {return AftRows;} +//virtual int GetAffectedRows(void) {return AftRows;} virtual PSZ GetServer(void) {return "ODBC";} virtual int Indexable(void) {return 2;} // Database routines virtual PCOL MakeCol(PGLOBAL g, PCOLDEF cdp, PCOL cprec, int n); - virtual int GetProgMax(PGLOBAL g); + virtual int Cardinality(PGLOBAL g); virtual int GetMaxSize(PGLOBAL g); + virtual int GetProgMax(PGLOBAL g); virtual bool OpenDB(PGLOBAL g); virtual int ReadDB(PGLOBAL g); virtual int WriteDB(PGLOBAL g); diff --git a/storage/connect/tabpivot.h b/storage/connect/tabpivot.h index c7248ee2e1d..25d139e895f 100644 --- a/storage/connect/tabpivot.h +++ b/storage/connect/tabpivot.h @@ -105,6 +105,7 @@ class TDBPIVOT : public TDBPRX { // Database routines virtual PCOL MakeCol(PGLOBAL g, PCOLDEF cdp, PCOL cprec, int n); + virtual int Cardinality(PGLOBAL g) {return (g) ? 10 : 0;} virtual int GetMaxSize(PGLOBAL g); virtual bool OpenDB(PGLOBAL g); virtual int ReadDB(PGLOBAL g); diff --git a/storage/connect/tabsys.cpp b/storage/connect/tabsys.cpp index e8ea7f4e43a..ae92c0771b6 100644 --- a/storage/connect/tabsys.cpp +++ b/storage/connect/tabsys.cpp @@ -1,9 +1,9 @@ /************* TabSys C++ Program Source Code File (.CPP) **************/ /* PROGRAM NAME: TABSYS */ /* ------------- */ -/* Version 2.2 */ +/* Version 2.3 */ /* */ -/* Author Olivier BERTRAND 2004-2013 */ +/* Author Olivier BERTRAND 2004-2014 */ /* */ /* This program are the INI/CFG tables classes. */ /***********************************************************************/ @@ -203,18 +203,35 @@ PCOL TDBINI::MakeCol(PGLOBAL g, PCOLDEF cdp, PCOL cprec, int n) } // end of MakeCol /***********************************************************************/ -/* INI GetMaxSize: returns the number of sections in the INI file. */ +/* INI Cardinality: returns the number of sections in the INI file. */ /***********************************************************************/ -int TDBINI::GetMaxSize(PGLOBAL g) +int TDBINI::Cardinality(PGLOBAL g) { - if (MaxSize < 0 && GetSeclist(g)) { + if (!g) + return 1; + + if (Cardinal < 0) { // Count the number of sections from the section list - char *p; + char *p = GetSeclist(g); + + Cardinal = 0; + + if (p) + for (; *p; p += (strlen(p) + 1)) + Cardinal++; - for (MaxSize = 0, p = Seclist; *p; p += (strlen(p) + 1)) - MaxSize++; + } // endif Cardinal - } // endif MaxSize + return Cardinal; + } // end of Cardinality + +/***********************************************************************/ +/* INI GetMaxSize: returns the table cardinality. */ +/***********************************************************************/ +int TDBINI::GetMaxSize(PGLOBAL g) + { + if (MaxSize < 0) + MaxSize = Cardinality(g); return MaxSize; } // end of GetMaxSize @@ -609,22 +626,28 @@ PCOL TDBXIN::MakeCol(PGLOBAL g, PCOLDEF cdp, PCOL cprec, int n) } // end of MakeCol /***********************************************************************/ -/* XIN GetMaxSize: returns the number of sections in the XIN file. */ +/* XIN Cardinality: returns the number of keys in the XIN file. */ /***********************************************************************/ -int TDBXIN::GetMaxSize(PGLOBAL g) +int TDBXIN::Cardinality(PGLOBAL g) { - if (MaxSize < 0 && GetSeclist(g)) { + if (!g) + return 1; + + if (Cardinal < 0) { // Count the number of keys from the section list - char *p, *k; + char *k, *p = GetSeclist(g); - for (MaxSize = 0, p = Seclist; *p; p += (strlen(p) + 1)) - for (k = GetKeylist(g, p); *k; k += (strlen(k) + 1)) - MaxSize++; + Cardinal = 0; - } // endif MaxSize + if (p) + for (; *p; p += (strlen(p) + 1)) + for (k = GetKeylist(g, p); *k; k += (strlen(k) + 1)) + Cardinal++; - return MaxSize; - } // end of GetMaxSize + } // endif Cardinal + + return Cardinal; + } // end of Cardinality /***********************************************************************/ /* Record position is Section+Key. */ @@ -633,7 +656,7 @@ int TDBXIN::GetRecpos(void) { union { short X[2]; // Section and Key offsets - int Xpos; // File position + int Xpos; // File position }; // end of union X[0] = (short)(Section - Seclist); @@ -648,7 +671,7 @@ bool TDBXIN::SetRecpos(PGLOBAL g, int recpos) { union { short X[2]; // Section and Key offsets - int Xpos; // File position + int Xpos; // File position }; // end of union Xpos = recpos; diff --git a/storage/connect/tabsys.h b/storage/connect/tabsys.h index 38b71d62ac4..aa45c260bc2 100644 --- a/storage/connect/tabsys.h +++ b/storage/connect/tabsys.h @@ -1,181 +1,182 @@ -/*************** TabSys H Declares Source Code File (.H) ***************/ -/* Name: TABSYS.H Version 2.2 */ -/* */ -/* (C) Copyright to the author Olivier BERTRAND 2004-2013 */ -/* */ -/* This file contains the XDB system tables classes declares. */ -/***********************************************************************/ -typedef class INIDEF *PINIDEF; -typedef class TDBINI *PTDBINI; -typedef class INICOL *PINICOL; -typedef class TDBXIN *PTDBXIN; -typedef class XINCOL *PXINCOL; - -/* --------------------------- INI classes --------------------------- */ - -/***********************************************************************/ -/* INI, XDB and XCL tables. */ -/***********************************************************************/ -class DllExport INIDEF : public TABDEF { /* INI table description */ - friend class TDBINI; - friend class TDBXIN; - friend class TDBXTB; - friend class TDBRTB; - friend class TDBXCL; - public: - // Constructor - INIDEF(void); - - // Implementation - virtual const char *GetType(void) {return "INI";} - - // Methods - virtual bool DefineAM(PGLOBAL g, LPCSTR am, int poff); - virtual PTDB GetTable(PGLOBAL g, MODE m); - - protected: - // Members - char *Fn; /* Path/Name of corresponding file */ - char *Xname; /* The eventual table name */ - char Layout; /* R: Row, C: Column */ - int Ln; /* Length of section list buffer */ - }; // end of INIDEF - -/***********************************************************************/ -/* This is the class declaration for the INI tables. */ -/* These are tables represented by a INI like file. */ -/***********************************************************************/ -class TDBINI : public TDBASE { - friend class INICOL; - public: - // Constructor - TDBINI(PINIDEF tdp); - TDBINI(PTDBINI tdbp); - - // Implementation - virtual AMT GetAmType(void) {return TYPE_AM_INI;} - virtual PTDB Duplicate(PGLOBAL g) {return (PTDB)new(g) TDBINI(this);} - - // Methods - virtual PTDB CopyOne(PTABS t); - virtual int GetRecpos(void) {return N;} - virtual int GetProgCur(void) {return N;} - virtual int GetAffectedRows(void) {return 0;} - virtual PSZ GetFile(PGLOBAL g) {return Ifile;} - virtual void SetFile(PGLOBAL g, PSZ fn) {Ifile = fn;} - virtual void ResetDB(void) {Seclist = Section = NULL; N = 0;} - virtual void ResetSize(void) {MaxSize = -1; Seclist = NULL;} - virtual int RowNumber(PGLOBAL g, bool b = false) {return N;} - char *GetSeclist(PGLOBAL g); - - // Database routines - virtual PCOL MakeCol(PGLOBAL g, PCOLDEF cdp, PCOL cprec, int n); - virtual int GetMaxSize(PGLOBAL g); - virtual bool OpenDB(PGLOBAL g); - virtual int ReadDB(PGLOBAL g); - virtual int WriteDB(PGLOBAL g); - virtual int DeleteDB(PGLOBAL g, int irc); - virtual void CloseDB(PGLOBAL g); - - protected: - // Members - char *Ifile; // The INI file - char *Seclist; // The section list - char *Section; // The current section - int Seclen; // Length of seclist buffer - int N; // The current section index - }; // end of class TDBINI - -/***********************************************************************/ -/* Class INICOL: XDB table access method column descriptor. */ -/***********************************************************************/ -class INICOL : public COLBLK { - public: - // Constructors - INICOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PSZ am = "INI"); - INICOL(INICOL *colp, PTDB tdbp); // Constructor used in copy process - - // Implementation - virtual int GetAmType(void) {return TYPE_AM_INI;} - virtual void SetTo_Val(PVAL valp) {To_Val = valp;} - - // Methods - virtual bool SetBuffer(PGLOBAL g, PVAL value, bool ok, bool check); - virtual void ReadColumn(PGLOBAL g); - virtual void WriteColumn(PGLOBAL g); - virtual void AllocBuf(PGLOBAL g); - - protected: - // Default constructor not to be used - INICOL(void) {} - - // Members - char *Valbuf; // To the key value buffer - int Flag; // Tells what set in value - int Long; // Buffer length - PVAL To_Val; // To value used for Update/Insert - }; // end of class INICOL - -/* --------------------------- XINI class ---------------------------- */ - -/***********************************************************************/ -/* This is the class declaration for the XINI tables. */ -/* These are tables represented by a INI like file */ -/* having 3 columns Section, Key, and Value. */ -/***********************************************************************/ -class TDBXIN : public TDBINI { - friend class XINCOL; - public: - // Constructor - TDBXIN(PINIDEF tdp); - TDBXIN(PTDBXIN tdbp); - - // Implementation - virtual AMT GetAmType(void) {return TYPE_AM_INI;} - virtual PTDB Duplicate(PGLOBAL g) {return (PTDB)new(g) TDBXIN(this);} - - // Methods - virtual PTDB CopyOne(PTABS t); - virtual int GetRecpos(void); - virtual bool SetRecpos(PGLOBAL g, int recpos); - virtual void ResetDB(void) - {Seclist = Section = Keycur = NULL; N = 0; Oldsec = -1;} - char *GetKeylist(PGLOBAL g, char *sec); - - // Database routines - virtual PCOL MakeCol(PGLOBAL g, PCOLDEF cdp, PCOL cprec, int n); - virtual int GetMaxSize(PGLOBAL g); - virtual bool OpenDB(PGLOBAL g); - virtual int ReadDB(PGLOBAL g); - virtual int WriteDB(PGLOBAL g); - virtual int DeleteDB(PGLOBAL g, int irc); - - protected: - // Members - char *Keylist; // The key list - char *Keycur; // The current key - int Keylen; // Length of keylist buffer - short Oldsec; // Last current section - }; // end of class TDBXIN - -/***********************************************************************/ -/* Class XINCOL: XIN table access method column descriptor. */ -/***********************************************************************/ -class XINCOL : public INICOL { - public: - // Constructors - XINCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PSZ am = "INI"); - XINCOL(XINCOL *colp, PTDB tdbp); // Constructor used in copy process - - // Implementation - - // Methods - virtual void ReadColumn(PGLOBAL g); - virtual void WriteColumn(PGLOBAL g); - - protected: - // Default constructor not to be used - XINCOL(void) {} - - // Members - }; // end of class XINICOL +/*************** TabSys H Declares Source Code File (.H) ***************/
+/* Name: TABSYS.H Version 2.3 */
+/* */
+/* (C) Copyright to the author Olivier BERTRAND 2004-2014 */
+/* */
+/* This file contains the XDB system tables classes declares. */
+/***********************************************************************/
+typedef class INIDEF *PINIDEF;
+typedef class TDBINI *PTDBINI;
+typedef class INICOL *PINICOL;
+typedef class TDBXIN *PTDBXIN;
+typedef class XINCOL *PXINCOL;
+
+/* --------------------------- INI classes --------------------------- */
+
+/***********************************************************************/
+/* INI, XDB and XCL tables. */
+/***********************************************************************/
+class DllExport INIDEF : public TABDEF { /* INI table description */
+ friend class TDBINI;
+ friend class TDBXIN;
+ friend class TDBXTB;
+ friend class TDBRTB;
+ friend class TDBXCL;
+ public:
+ // Constructor
+ INIDEF(void);
+
+ // Implementation
+ virtual const char *GetType(void) {return "INI";}
+
+ // Methods
+ virtual bool DefineAM(PGLOBAL g, LPCSTR am, int poff);
+ virtual PTDB GetTable(PGLOBAL g, MODE m);
+
+ protected:
+ // Members
+ char *Fn; /* Path/Name of corresponding file */
+ char *Xname; /* The eventual table name */
+ char Layout; /* R: Row, C: Column */
+ int Ln; /* Length of section list buffer */
+ }; // end of INIDEF
+
+/***********************************************************************/
+/* This is the class declaration for the INI tables. */
+/* These are tables represented by a INI like file. */
+/***********************************************************************/
+class TDBINI : public TDBASE {
+ friend class INICOL;
+ public:
+ // Constructor
+ TDBINI(PINIDEF tdp);
+ TDBINI(PTDBINI tdbp);
+
+ // Implementation
+ virtual AMT GetAmType(void) {return TYPE_AM_INI;}
+ virtual PTDB Duplicate(PGLOBAL g) {return (PTDB)new(g) TDBINI(this);}
+
+ // Methods
+ virtual PTDB CopyOne(PTABS t);
+ virtual int GetRecpos(void) {return N;}
+ virtual int GetProgCur(void) {return N;}
+//virtual int GetAffectedRows(void) {return 0;}
+ virtual PSZ GetFile(PGLOBAL g) {return Ifile;}
+ virtual void SetFile(PGLOBAL g, PSZ fn) {Ifile = fn;}
+ virtual void ResetDB(void) {Seclist = Section = NULL; N = 0;}
+ virtual void ResetSize(void) {MaxSize = -1; Seclist = NULL;}
+ virtual int RowNumber(PGLOBAL g, bool b = false) {return N;}
+ char *GetSeclist(PGLOBAL g);
+
+ // Database routines
+ virtual PCOL MakeCol(PGLOBAL g, PCOLDEF cdp, PCOL cprec, int n);
+ virtual int Cardinality(PGLOBAL g);
+ virtual int GetMaxSize(PGLOBAL g);
+ virtual bool OpenDB(PGLOBAL g);
+ virtual int ReadDB(PGLOBAL g);
+ virtual int WriteDB(PGLOBAL g);
+ virtual int DeleteDB(PGLOBAL g, int irc);
+ virtual void CloseDB(PGLOBAL g);
+
+ protected:
+ // Members
+ char *Ifile; // The INI file
+ char *Seclist; // The section list
+ char *Section; // The current section
+ int Seclen; // Length of seclist buffer
+ int N; // The current section index
+ }; // end of class TDBINI
+
+/***********************************************************************/
+/* Class INICOL: XDB table access method column descriptor. */
+/***********************************************************************/
+class INICOL : public COLBLK {
+ public:
+ // Constructors
+ INICOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PSZ am = "INI");
+ INICOL(INICOL *colp, PTDB tdbp); // Constructor used in copy process
+
+ // Implementation
+ virtual int GetAmType(void) {return TYPE_AM_INI;}
+ virtual void SetTo_Val(PVAL valp) {To_Val = valp;}
+
+ // Methods
+ virtual bool SetBuffer(PGLOBAL g, PVAL value, bool ok, bool check);
+ virtual void ReadColumn(PGLOBAL g);
+ virtual void WriteColumn(PGLOBAL g);
+ virtual void AllocBuf(PGLOBAL g);
+
+ protected:
+ // Default constructor not to be used
+ INICOL(void) {}
+
+ // Members
+ char *Valbuf; // To the key value buffer
+ int Flag; // Tells what set in value
+ int Long; // Buffer length
+ PVAL To_Val; // To value used for Update/Insert
+ }; // end of class INICOL
+
+/* --------------------------- XINI class ---------------------------- */
+
+/***********************************************************************/
+/* This is the class declaration for the XINI tables. */
+/* These are tables represented by a INI like file */
+/* having 3 columns Section, Key, and Value. */
+/***********************************************************************/
+class TDBXIN : public TDBINI {
+ friend class XINCOL;
+ public:
+ // Constructor
+ TDBXIN(PINIDEF tdp);
+ TDBXIN(PTDBXIN tdbp);
+
+ // Implementation
+ virtual AMT GetAmType(void) {return TYPE_AM_INI;}
+ virtual PTDB Duplicate(PGLOBAL g) {return (PTDB)new(g) TDBXIN(this);}
+
+ // Methods
+ virtual PTDB CopyOne(PTABS t);
+ virtual int GetRecpos(void);
+ virtual bool SetRecpos(PGLOBAL g, int recpos);
+ virtual void ResetDB(void)
+ {Seclist = Section = Keycur = NULL; N = 0; Oldsec = -1;}
+ char *GetKeylist(PGLOBAL g, char *sec);
+
+ // Database routines
+ virtual PCOL MakeCol(PGLOBAL g, PCOLDEF cdp, PCOL cprec, int n);
+ virtual int Cardinality(PGLOBAL g);
+ virtual bool OpenDB(PGLOBAL g);
+ virtual int ReadDB(PGLOBAL g);
+ virtual int WriteDB(PGLOBAL g);
+ virtual int DeleteDB(PGLOBAL g, int irc);
+
+ protected:
+ // Members
+ char *Keylist; // The key list
+ char *Keycur; // The current key
+ int Keylen; // Length of keylist buffer
+ short Oldsec; // Last current section
+ }; // end of class TDBXIN
+
+/***********************************************************************/
+/* Class XINCOL: XIN table access method column descriptor. */
+/***********************************************************************/
+class XINCOL : public INICOL {
+ public:
+ // Constructors
+ XINCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PSZ am = "INI");
+ XINCOL(XINCOL *colp, PTDB tdbp); // Constructor used in copy process
+
+ // Implementation
+
+ // Methods
+ virtual void ReadColumn(PGLOBAL g);
+ virtual void WriteColumn(PGLOBAL g);
+
+ protected:
+ // Default constructor not to be used
+ XINCOL(void) {}
+
+ // Members
+ }; // end of class XINICOL
diff --git a/storage/connect/tabtbl.cpp b/storage/connect/tabtbl.cpp index 0aeeb0b9d8d..f5a516ad1d0 100644 --- a/storage/connect/tabtbl.cpp +++ b/storage/connect/tabtbl.cpp @@ -350,7 +350,34 @@ bool TDBTBL::TestFil(PGLOBAL g, PCFIL filp, PTABLE tabp) } // end of TestFil /***********************************************************************/ -/* Sum up the sizes of all sub-tables. */ +/* Sum up the cardinality of all sub-tables. */ +/***********************************************************************/ +int TDBTBL::Cardinality(PGLOBAL g) + { + if (Cardinal < 0) { + int tsz; + + if (!Tablist && InitTableList(g)) + return 0; // Cannot be calculated at this stage + + Cardinal = 0; + + for (PTABLE tabp = Tablist; tabp; tabp = tabp->GetNext()) { + if ((tsz = tabp->GetTo_Tdb()->Cardinality(g)) < 0) { + Cardinal = -1; + return tsz; + } // endif mxsz + + Cardinal += tsz; + } // endfor i + + } // endif Cardinal + + return Cardinal; + } // end of Cardinality + +/***********************************************************************/ +/* Sum up the maximum sizes of all sub-tables. */ /***********************************************************************/ int TDBTBL::GetMaxSize(PGLOBAL g) { @@ -435,7 +462,7 @@ bool TDBTBL::OpenDB(PGLOBAL g) if ((CurTable = Tablist)) { Tdbp = (PTDBASE)CurTable->GetTo_Tdb(); - Tdbp->SetMode(Mode); +// Tdbp->SetMode(Mode); // Tdbp->ResetDB(); // Tdbp->ResetSize(); @@ -685,7 +712,7 @@ bool TDBTBM::OpenDB(PGLOBAL g) /*********************************************************************/ if ((CurTable = Tablist)) { Tdbp = (PTDBASE)CurTable->GetTo_Tdb(); - Tdbp->SetMode(Mode); +// Tdbp->SetMode(Mode); // Check and initialize the subtable columns for (PCOL cp = Columns; cp; cp = cp->GetNext()) diff --git a/storage/connect/tabtbl.h b/storage/connect/tabtbl.h index fc35179f2ea..8bf440985ea 100644 --- a/storage/connect/tabtbl.h +++ b/storage/connect/tabtbl.h @@ -78,6 +78,7 @@ class DllExport TDBTBL : public TDBPRX { // Database routines virtual PCOL MakeCol(PGLOBAL g, PCOLDEF cdp, PCOL cprec, int n); + virtual int Cardinality(PGLOBAL g); virtual int GetMaxSize(PGLOBAL g); virtual int RowNumber(PGLOBAL g, bool b = FALSE); virtual PCOL InsertSpecialColumn(PGLOBAL g, PCOL scp); diff --git a/storage/connect/tabutil.cpp b/storage/connect/tabutil.cpp index 4b9046e08d1..f4a8f2ee470 100644 --- a/storage/connect/tabutil.cpp +++ b/storage/connect/tabutil.cpp @@ -313,7 +313,7 @@ bool PRXDEF::DefineAM(PGLOBAL g, LPCSTR am, int poff) if (!(tab = GetStringCatInfo(g, "Tabname", NULL))) { if (!def) { strcpy(g->Message, "Missing object table definition"); - return TRUE; + return true; } else tab = "Noname"; @@ -327,7 +327,7 @@ bool PRXDEF::DefineAM(PGLOBAL g, LPCSTR am, int poff) Tablep = new(g) XTAB(tab, def); Tablep->SetQualifier(db); - return FALSE; + return false; } // end of DefineAM /***********************************************************************/ @@ -352,6 +352,28 @@ TDBPRX::TDBPRX(PPRXDEF tdp) : TDBASE(tdp) Tdbp = NULL; // The object table } // end of TDBPRX constructor +TDBPRX::TDBPRX(PGLOBAL g, PTDBPRX tdbp) : TDBASE(tdbp) + { + Tdbp = tdbp->Tdbp; + } // end of TDBPRX copy constructor + +// Method +PTDB TDBPRX::CopyOne(PTABS t) + { + PTDB tp; + PPRXCOL cp1, cp2; + PGLOBAL g = t->G; + + tp = new(g) TDBPRX(g, this); + + for (cp1 = (PPRXCOL)Columns; cp1; cp1 = (PPRXCOL)cp1->GetNext()) { + cp2 = new(g) PRXCOL(cp1, tp); // Make a copy + NewPointer(t, cp1, cp2); + } // endfor cp1 + + return tp; + } // end of CopyOne + /***********************************************************************/ /* Get the PTDB of the sub-table. */ /***********************************************************************/ @@ -403,7 +425,7 @@ PTDBASE TDBPRX::GetSubTable(PGLOBAL g, PTABLE tabp, bool b) if (mysql) { #if defined(MYSQL_SUPPORT) // Access sub-table via MySQL API - if (!(tdbp= cat->GetTable(g, tabp, MODE_READ, "MYPRX"))) { + if (!(tdbp= cat->GetTable(g, tabp, Mode, "MYPRX"))) { char buf[MAX_STR]; strcpy(buf, g->Message); @@ -415,6 +437,9 @@ PTDBASE TDBPRX::GetSubTable(PGLOBAL g, PTABLE tabp, bool b) if (db) ((PTDBMY)tdbp)->SetDatabase(tabp->GetQualifier()); + if (Mode == MODE_UPDATE || Mode == MODE_DELETE) + tdbp->SetName(Name); // For Make_Command + #else // !MYSQL_SUPPORT sprintf(g->Message, "%s.%s is not a CONNECT table", db, tblp->Name); @@ -423,7 +448,7 @@ PTDBASE TDBPRX::GetSubTable(PGLOBAL g, PTABLE tabp, bool b) } else { // Sub-table is a CONNECT table tabp->Next = To_Table; // For loop checking - tdbp = cat->GetTable(g, tabp); + tdbp = cat->GetTable(g, tabp, Mode); } // endif mysql if (s) { @@ -456,11 +481,12 @@ bool TDBPRX::InitTable(PGLOBAL g) if (!Tdbp) { // Get the table description block of this table if (!(Tdbp = GetSubTable(g, ((PPRXDEF)To_Def)->Tablep))) - return TRUE; + return true; +// Tdbp->SetMode(Mode); } // endif Tdbp - return FALSE; + return false; } // end of InitTable /***********************************************************************/ @@ -472,6 +498,21 @@ PCOL TDBPRX::MakeCol(PGLOBAL g, PCOLDEF cdp, PCOL cprec, int n) } // end of MakeCol /***********************************************************************/ +/* PRX Cardinality: returns the number of rows in the table. */ +/***********************************************************************/ +int TDBPRX::Cardinality(PGLOBAL g) + { + if (Cardinal < 0) { + if (InitTable(g)) + return 0; + + Cardinal = Tdbp->Cardinality(g); + } // endif MaxSize + + return Cardinal; + } // end of GetMaxSize + +/***********************************************************************/ /* PRX GetMaxSize: returns the maximum number of rows in the table. */ /***********************************************************************/ int TDBPRX::GetMaxSize(PGLOBAL g) @@ -507,32 +548,49 @@ bool TDBPRX::OpenDB(PGLOBAL g) return Tdbp->OpenDB(g); } // endif use - if (Mode != MODE_READ) { - /*******************************************************************/ - /* Currently XCOL tables cannot be modified. */ - /*******************************************************************/ - strcpy(g->Message, "PROXY tables are read only"); - return TRUE; - } // endif Mode - if (InitTable(g)) - return TRUE; + return true; + else if (Mode != MODE_READ && (Read_Only || Tdbp->IsReadOnly())) { + strcpy(g->Message, "Cannot modify a read only table"); + return true; + } // endif tp /*********************************************************************/ /* Check and initialize the subtable columns. */ /*********************************************************************/ for (PCOL cp = Columns; cp; cp = cp->GetNext()) - if (((PPRXCOL)cp)->Init(g)) - return TRUE; + if (((PPRXCOL)cp)->Init(g, Tdbp)) + return true; + + /*********************************************************************/ + /* In Update mode, the updated column blocks must be distinct from */ + /* the read column blocks. So make a copy of the TDB and allocate */ + /* its column blocks in mode write (required by XML tables). */ + /*********************************************************************/ + if (Mode == MODE_UPDATE) { + PTDBASE utp; + + if (!(utp= (PTDBASE)Tdbp->Duplicate(g))) { + sprintf(g->Message, MSG(INV_UPDT_TABLE), Tdbp->GetName()); + return true; + } // endif tp + + for (PCOL cp = To_SetCols; cp; cp = cp->GetNext()) + if (((PPRXCOL)cp)->Init(g, utp)) + return true; + + } else if (Mode == MODE_DELETE) + Tdbp->SetNext(Next); /*********************************************************************/ /* Physically open the object table. */ /*********************************************************************/ if (Tdbp->OpenDB(g)) - return TRUE; + return true; + Tdbp->SetNext(NULL); Use = USE_OPEN; - return FALSE; + return false; } // end of OpenDB /***********************************************************************/ @@ -551,8 +609,7 @@ int TDBPRX::ReadDB(PGLOBAL g) /***********************************************************************/ int TDBPRX::WriteDB(PGLOBAL g) { - sprintf(g->Message, "%s tables are read only", To_Def->GetType()); - return RC_FX; + return Tdbp->WriteDB(g); } // end of WriteDB /***********************************************************************/ @@ -560,9 +617,7 @@ int TDBPRX::WriteDB(PGLOBAL g) /***********************************************************************/ int TDBPRX::DeleteDB(PGLOBAL g, int irc) { - sprintf(g->Message, "Delete not enabled for %s tables", - To_Def->GetType()); - return RC_FX; + return Tdbp->DeleteDB(g, irc); } // end of DeleteDB /***********************************************************************/ @@ -594,7 +649,7 @@ PRXCOL::PRXCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PSZ am) //strcpy(F_Date, cdp->F_Date); Colp = NULL; To_Val = NULL; - Pseudo = FALSE; + Pseudo = false; Colnum = cdp->GetOffset(); // If columns are retrieved by number if (trace) @@ -603,29 +658,48 @@ PRXCOL::PRXCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PSZ am) } // end of PRXCOL constructor /***********************************************************************/ +/* PRXCOL constructor used for copying columns. */ +/* tdbp is the pointer to the new table descriptor. */ +/***********************************************************************/ +PRXCOL::PRXCOL(PRXCOL *col1, PTDB tdbp) : COLBLK(col1, tdbp) + { + Colp = col1->Colp; + To_Val = col1->To_Val; + Pseudo = col1->Pseudo; + Colnum = col1->Colnum; + } // end of PRXCOL copy constructor + +/***********************************************************************/ /* PRXCOL initialization routine. */ /* Look for the matching column in the object table. */ /***********************************************************************/ -bool PRXCOL::Init(PGLOBAL g) +bool PRXCOL::Init(PGLOBAL g, PTDBASE tp) { - PTDBPRX tdbp = (PTDBPRX)To_Tdb; + if (!tp) + tp = ((PTDBPRX)To_Tdb)->Tdbp; - if (!(Colp = tdbp->Tdbp->ColDB(g, Name, 0)) && Colnum) - Colp = tdbp->Tdbp->ColDB(g, NULL, Colnum); + if (!(Colp = tp->ColDB(g, Name, 0)) && Colnum) + Colp = tp->ColDB(g, NULL, Colnum); if (Colp) { + MODE mode = To_Tdb->GetMode(); + // May not have been done elsewhere Colp->InitValue(g); To_Val = Colp->GetValue(); + if (mode == MODE_INSERT || mode == MODE_UPDATE) + if (Colp->SetBuffer(g, Colp->GetValue(), true, false)) + return true; + // this may be needed by some tables (which?) Colp->SetColUse(ColUse); } else { - sprintf(g->Message, MSG(NO_MATCHING_COL), Name, tdbp->Tdbp->GetName()); - return TRUE; + sprintf(g->Message, MSG(NO_MATCHING_COL), Name, tp->GetName()); + return true; } // endif Colp - return FALSE; + return false; } // end of Init /***********************************************************************/ @@ -659,6 +733,21 @@ void PRXCOL::ReadColumn(PGLOBAL g) } // end of ReadColumn +/***********************************************************************/ +/* WriteColumn: */ +/***********************************************************************/ +void PRXCOL::WriteColumn(PGLOBAL g) + { + if (trace > 1) + htrc("PRX WriteColumn: name=%s\n", Name); + + if (Colp) { + To_Val->SetValue_pval(Value); + Colp->WriteColumn(g); + } // endif Colp + + } // end of WriteColumn + /* ---------------------------TDBTBC class --------------------------- */ /***********************************************************************/ diff --git a/storage/connect/tabutil.h b/storage/connect/tabutil.h index c87065befba..11f18be074a 100644 --- a/storage/connect/tabutil.h +++ b/storage/connect/tabutil.h @@ -57,13 +57,17 @@ class DllExport TDBPRX : public TDBASE { friend class PRXDEF; friend class PRXCOL; public: - // Constructor + // Constructors TDBPRX(PPRXDEF tdp); + TDBPRX(PGLOBAL g, PTDBPRX tdbp); // Implementation virtual AMT GetAmType(void) {return TYPE_AM_PRX;} + virtual PTDB Duplicate(PGLOBAL g) + {return (PTDB)new(g) TDBPRX(g, this);} // Methods + virtual PTDB CopyOne(PTABS t); virtual int GetRecpos(void) {return Tdbp->GetRecpos();} virtual void ResetDB(void) {Tdbp->ResetDB();} virtual int RowNumber(PGLOBAL g, bool b = FALSE); @@ -72,6 +76,7 @@ class DllExport TDBPRX : public TDBASE { // Database routines virtual PCOL MakeCol(PGLOBAL g, PCOLDEF cdp, PCOL cprec, int n); virtual bool InitTable(PGLOBAL g); + virtual int Cardinality(PGLOBAL g); virtual int GetMaxSize(PGLOBAL g); virtual bool OpenDB(PGLOBAL g); virtual int ReadDB(PGLOBAL g); @@ -97,15 +102,19 @@ class DllExport PRXCOL : public COLBLK { public: // Constructors PRXCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PSZ am = "PRX"); + PRXCOL(PRXCOL *colp, PTDB tdbp); // Constructor used in copy process // Implementation - virtual int GetAmType(void) {return TYPE_AM_PRX;} + virtual int GetAmType(void) {return TYPE_AM_PRX;} // Methods - virtual void Reset(void); - virtual bool IsSpecial(void) {return Pseudo;} - virtual void ReadColumn(PGLOBAL g); - bool Init(PGLOBAL g); + virtual void Reset(void); + virtual bool IsSpecial(void) {return Pseudo;} + virtual bool SetBuffer(PGLOBAL g, PVAL value, bool ok, bool check) + {return false;} + virtual void ReadColumn(PGLOBAL g); + virtual void WriteColumn(PGLOBAL g); + bool Init(PGLOBAL g, PTDBASE tp = NULL); protected: // Default constructor not to be used diff --git a/storage/connect/tabvct.cpp b/storage/connect/tabvct.cpp index 73dfef6a4d6..6d7059e2306 100644 --- a/storage/connect/tabvct.cpp +++ b/storage/connect/tabvct.cpp @@ -32,7 +32,7 @@ /***********************************************************************/ /***********************************************************************/ -/* Include relevant MariaDB header file. */ +/* Include relevant MariaDB header file. */ /***********************************************************************/ #include "my_global.h" #if defined(WIN32) @@ -76,7 +76,8 @@ char *strerror(int num); #endif // UNIX -extern "C" int trace; +extern "C" int trace; +extern "C" USETEMP Use_Temp; /***********************************************************************/ /* Char VCT column blocks are right filled with blanks (blank = true) */ @@ -95,7 +96,10 @@ bool VCTDEF::DefineAM(PGLOBAL g, LPCSTR am, int poff) { DOSDEF::DefineAM(g, "BIN", poff); - Estimate = GetIntCatInfo("Estimate", 0); + if ((Estimate = GetIntCatInfo("Estimate", 0))) + Elemt = MY_MIN(Elemt, Estimate); + + // Split treated as INT to get default value Split = GetIntCatInfo("Split", (Estimate) ? 0 : 1); Header = GetIntCatInfo("Header", 0); @@ -103,7 +107,7 @@ bool VCTDEF::DefineAM(PGLOBAL g, LPCSTR am, int poff) if (Estimate && !Split && !Header) { char *fn = GetStringCatInfo(g, "Filename", "?"); - // No separate header file fo urbi tables + // No separate header file for urbi tables Header = (*fn == '?') ? 3 : 2; } // endif Estimate @@ -205,7 +209,7 @@ PTDB VCTDEF::GetTable(PGLOBAL g, MODE mode) // Mapping not used for insert (except for true VEC not split tables) // or when UseTemp is forced bool map = Mapped && (Estimate || mode != MODE_INSERT) && - !(PlgGetUser(g)->UseTemp == TMP_FORCE && + !(Use_Temp == TMP_FORCE && (mode == MODE_UPDATE || mode == MODE_DELETE)); PTXF txfp; PTDB tdbp; @@ -282,6 +286,15 @@ PCOL TDBVCT::MakeCol(PGLOBAL g, PCOLDEF cdp, PCOL cprec, int n) } // end of MakeCol /***********************************************************************/ +/* VEC tables are not ready yet to use temporary files. */ +/***********************************************************************/ +bool TDBVCT::IsUsingTemp(PGLOBAL g) + { + // For developpers + return (Use_Temp == TMP_TEST); + } // end of IsUsingTemp + +/***********************************************************************/ /* VCT Access Method opening routine. */ /* New method now that this routine is called recursively (last table */ /* first in reverse order): index blocks are immediately linked to */ @@ -302,14 +315,19 @@ bool TDBVCT::OpenDB(PGLOBAL g) To_Kindex->Reset(); Txfp->Rewind(); + ResetBlockFilter(g); return false; } // endif Use /*********************************************************************/ /* Delete all is not handled using file mapping. */ /*********************************************************************/ - if (Mode == MODE_DELETE && !Next && Txfp->GetAmType() == TYPE_AM_MAP) { - Txfp = new(g) VCTFAM((PVCTDEF)To_Def); + if (Mode == MODE_DELETE && !Next && Txfp->GetAmType() == TYPE_AM_VMP) { + if (IsSplit()) + Txfp = new(g) VECFAM((PVCTDEF)To_Def); + else + Txfp = new(g) VCTFAM((PVCTDEF)To_Def); + Txfp->SetTdbp(this); } // endif Mode @@ -324,6 +342,11 @@ bool TDBVCT::OpenDB(PGLOBAL g) Use = USE_OPEN; // Do it now in case we are recursively called /*********************************************************************/ + /* Allocate the block filter tree if evaluation is possible. */ + /*********************************************************************/ + To_BlkFil = InitBlockFilter(g, To_Filter); + + /*********************************************************************/ /* Reset buffer access according to indexing and to mode. */ /*********************************************************************/ Txfp->ResetBuffer(g); @@ -382,7 +405,7 @@ void TDBVCT::CloseDB(PGLOBAL g) To_Kindex = NULL; } // endif - Txfp->CloseTableFile(g); + Txfp->CloseTableFile(g, false); } // end of CloseDB // ------------------------ VCTCOL functions ---------------------------- diff --git a/storage/connect/tabvct.h b/storage/connect/tabvct.h index 7dc416a5779..8ad3c8e21be 100644 --- a/storage/connect/tabvct.h +++ b/storage/connect/tabvct.h @@ -20,12 +20,13 @@ typedef class VCTCOL *PVCTCOL; /* VCT table. */ /***********************************************************************/ class DllExport VCTDEF : public DOSDEF { /* Logical table description */ + friend class TDBVCT; friend class VCTFAM; friend class VECFAM; friend class VMPFAM; public: // Constructor - VCTDEF(void) {Split = Estimate = Header = 0;} + VCTDEF(void) {Split = false; Estimate = Header = 0;} // Implementation virtual const char *GetType(void) {return "VCT";} @@ -39,9 +40,9 @@ class DllExport VCTDEF : public DOSDEF { /* Logical table description */ int MakeFnPattern(char *fpat); // Members - int Split; /* Columns in separate files */ + bool Split; /* Columns in separate files */ int Estimate; /* Estimated maximum size of table */ - int Header; /* 0: no, 1: separate, 2: in data file */ + int Header; /* 0: no, 1: separate, 2: in data file */ }; // end of VCTDEF /***********************************************************************/ @@ -64,9 +65,11 @@ class DllExport TDBVCT : public TDBFIX { virtual AMT GetAmType(void) {return TYPE_AM_VCT;} virtual PTDB Duplicate(PGLOBAL g) {return (PTDB)new(g) TDBVCT(g, this);} + bool IsSplit(void) {return ((VCTDEF*)To_Def)->Split;} // Methods virtual PTDB CopyOne(PTABS t); + virtual bool IsUsingTemp(PGLOBAL g); // Database routines virtual PCOL MakeCol(PGLOBAL g, PCOLDEF cdp, PCOL cprec, int n); @@ -101,7 +104,7 @@ class DllExport VCTCOL : public DOSCOL { virtual void ReadColumn(PGLOBAL g); virtual void WriteColumn(PGLOBAL g); virtual bool SetBuffer(PGLOBAL g, PVAL value, bool ok, bool check); - virtual void SetOk(void); + virtual void SetOk(void); protected: virtual void ReadBlock(PGLOBAL g); diff --git a/storage/connect/tabwmi.h b/storage/connect/tabwmi.h index 8ff5262941e..6f25c0de258 100644 --- a/storage/connect/tabwmi.h +++ b/storage/connect/tabwmi.h @@ -1,150 +1,151 @@ -// TABWMI.H Olivier Bertrand 2012 -// WMI: Virtual table to Get WMI information -#define _WIN32_DCOM -#include <wbemidl.h> -# pragma comment(lib, "wbemuuid.lib") -#include <iostream> -using namespace std; -#include <comdef.h> - -/***********************************************************************/ -/* Definitions. */ -/***********************************************************************/ -typedef class WMIDEF *PWMIDEF; -typedef class TDBWMI *PTDBWMI; -typedef class WMICOL *PWMICOL; -typedef class TDBWCL *PTDBWCL; -typedef class WCLCOL *PWCLCOL; - -/***********************************************************************/ -/* Structure used by WMI column info functions. */ -/***********************************************************************/ -typedef struct _WMIutil { - IWbemServices *Svc; - IWbemClassObject *Cobj; -} WMIUTIL, *PWMIUT; - -/***********************************************************************/ -/* Functions used externally. */ -/***********************************************************************/ -PQRYRES WMIColumns(PGLOBAL g, char *nsp, char *cls, bool info); - -/* -------------------------- WMI classes ---------------------------- */ - -/***********************************************************************/ -/* WMI: Virtual table to get the WMI information. */ -/***********************************************************************/ -class WMIDEF : public TABDEF { /* Logical table description */ - friend class TDBWMI; - friend class TDBWCL; - friend class TDBWCX; - public: - // Constructor - WMIDEF(void) {Pseudo = 3; Nspace = NULL; Wclass = NULL; Ems = 0;} - - // Implementation - virtual const char *GetType(void) {return "WMI";} - - // Methods - virtual bool DefineAM(PGLOBAL g, LPCSTR am, int poff); - virtual PTDB GetTable(PGLOBAL g, MODE m); - - protected: - // Members - char *Nspace; - char *Wclass; - int Ems; - }; // end of WMIDEF - -/***********************************************************************/ -/* This is the class declaration for the WMI table. */ -/***********************************************************************/ -class TDBWMI : public TDBASE { - friend class WMICOL; - public: - // Constructor - TDBWMI(PWMIDEF tdp); - - // Implementation - virtual AMT GetAmType(void) {return TYPE_AM_WMI;} - - // Methods - virtual int GetRecpos(void); - virtual int GetProgCur(void) {return N;} - virtual int RowNumber(PGLOBAL g, bool b = false) {return N + 1;} - - // Database routines - virtual PCOL MakeCol(PGLOBAL g, PCOLDEF cdp, PCOL cprec, int n); - virtual int GetMaxSize(PGLOBAL g); - virtual bool OpenDB(PGLOBAL g); - virtual int ReadDB(PGLOBAL g); - virtual int WriteDB(PGLOBAL g); - virtual int DeleteDB(PGLOBAL g, int irc); - virtual void CloseDB(PGLOBAL g); - - protected: - // Specific routines - bool Initialize(PGLOBAL g); - char *MakeWQL(PGLOBAL g); - void DoubleSlash(PGLOBAL g); - bool GetWMIInfo(PGLOBAL g); - - // Members - IWbemServices *Svc; // IWbemServices pointer - IEnumWbemClassObject *Enumerator; - IWbemClassObject *ClsObj; - char *Nspace; // Namespace - char *Wclass; // Class name - char *ObjPath; // Used for direct access - char *Kvp; // Itou - int Ems; // Estimated max size - PCOL Kcol; // Key column - HRESULT Res; - PVBLK Vbp; - bool Init; - bool Done; - ULONG Rc; - int N; // Row number - }; // end of class TDBWMI - -/***********************************************************************/ -/* Class WMICOL: WMI Address column. */ -/***********************************************************************/ -class WMICOL : public COLBLK { - friend class TDBWMI; - public: - // Constructors - WMICOL(PCOLDEF cdp, PTDB tdbp, int n); - - // Implementation - virtual int GetAmType(void) {return TYPE_AM_WMI;} - - // Methods - virtual void ReadColumn(PGLOBAL g); - - protected: - WMICOL(void) {} // Default constructor not to be used - - // Members - PTDBWMI Tdbp; // Points to WMI table block - VARIANT Prop; // Property value - CIMTYPE Ctype; // CIM Type - HRESULT Res; - }; // end of class WMICOL - -/***********************************************************************/ -/* This is the class declaration for the WMI catalog table. */ -/***********************************************************************/ -class TDBWCL : public TDBCAT { - public: - // Constructor - TDBWCL(PWMIDEF tdp); - - protected: - // Specific routines - virtual PQRYRES GetResult(PGLOBAL g); - - // Members - char *Nsp; // Name space - char *Cls; // Class - }; // end of class TDBWCL +// TABWMI.H Olivier Bertrand 2012
+// WMI: Virtual table to Get WMI information
+#define _WIN32_DCOM
+#include <wbemidl.h>
+# pragma comment(lib, "wbemuuid.lib")
+#include <iostream>
+using namespace std;
+#include <comdef.h>
+
+/***********************************************************************/
+/* Definitions. */
+/***********************************************************************/
+typedef class WMIDEF *PWMIDEF;
+typedef class TDBWMI *PTDBWMI;
+typedef class WMICOL *PWMICOL;
+typedef class TDBWCL *PTDBWCL;
+typedef class WCLCOL *PWCLCOL;
+
+/***********************************************************************/
+/* Structure used by WMI column info functions. */
+/***********************************************************************/
+typedef struct _WMIutil {
+ IWbemServices *Svc;
+ IWbemClassObject *Cobj;
+} WMIUTIL, *PWMIUT;
+
+/***********************************************************************/
+/* Functions used externally. */
+/***********************************************************************/
+PQRYRES WMIColumns(PGLOBAL g, char *nsp, char *cls, bool info);
+
+/* -------------------------- WMI classes ---------------------------- */
+
+/***********************************************************************/
+/* WMI: Virtual table to get the WMI information. */
+/***********************************************************************/
+class WMIDEF : public TABDEF { /* Logical table description */
+ friend class TDBWMI;
+ friend class TDBWCL;
+ friend class TDBWCX;
+ public:
+ // Constructor
+ WMIDEF(void) {Pseudo = 3; Nspace = NULL; Wclass = NULL; Ems = 0;}
+
+ // Implementation
+ virtual const char *GetType(void) {return "WMI";}
+
+ // Methods
+ virtual bool DefineAM(PGLOBAL g, LPCSTR am, int poff);
+ virtual PTDB GetTable(PGLOBAL g, MODE m);
+
+ protected:
+ // Members
+ char *Nspace;
+ char *Wclass;
+ int Ems;
+ }; // end of WMIDEF
+
+/***********************************************************************/
+/* This is the class declaration for the WMI table. */
+/***********************************************************************/
+class TDBWMI : public TDBASE {
+ friend class WMICOL;
+ public:
+ // Constructor
+ TDBWMI(PWMIDEF tdp);
+
+ // Implementation
+ virtual AMT GetAmType(void) {return TYPE_AM_WMI;}
+
+ // Methods
+ virtual int GetRecpos(void);
+ virtual int GetProgCur(void) {return N;}
+ virtual int RowNumber(PGLOBAL g, bool b = false) {return N + 1;}
+
+ // Database routines
+ virtual PCOL MakeCol(PGLOBAL g, PCOLDEF cdp, PCOL cprec, int n);
+ virtual int Cardinality(PGLOBAL g) {return GetMaxSize(g);} + virtual int GetMaxSize(PGLOBAL g);
+ virtual bool OpenDB(PGLOBAL g);
+ virtual int ReadDB(PGLOBAL g);
+ virtual int WriteDB(PGLOBAL g);
+ virtual int DeleteDB(PGLOBAL g, int irc);
+ virtual void CloseDB(PGLOBAL g);
+
+ protected:
+ // Specific routines
+ bool Initialize(PGLOBAL g);
+ char *MakeWQL(PGLOBAL g);
+ void DoubleSlash(PGLOBAL g);
+ bool GetWMIInfo(PGLOBAL g);
+
+ // Members
+ IWbemServices *Svc; // IWbemServices pointer
+ IEnumWbemClassObject *Enumerator;
+ IWbemClassObject *ClsObj;
+ char *Nspace; // Namespace
+ char *Wclass; // Class name
+ char *ObjPath; // Used for direct access
+ char *Kvp; // Itou
+ int Ems; // Estimated max size
+ PCOL Kcol; // Key column
+ HRESULT Res;
+ PVBLK Vbp;
+ bool Init;
+ bool Done;
+ ULONG Rc;
+ int N; // Row number
+ }; // end of class TDBWMI
+
+/***********************************************************************/
+/* Class WMICOL: WMI Address column. */
+/***********************************************************************/
+class WMICOL : public COLBLK {
+ friend class TDBWMI;
+ public:
+ // Constructors
+ WMICOL(PCOLDEF cdp, PTDB tdbp, int n);
+
+ // Implementation
+ virtual int GetAmType(void) {return TYPE_AM_WMI;}
+
+ // Methods
+ virtual void ReadColumn(PGLOBAL g);
+
+ protected:
+ WMICOL(void) {} // Default constructor not to be used
+
+ // Members
+ PTDBWMI Tdbp; // Points to WMI table block
+ VARIANT Prop; // Property value
+ CIMTYPE Ctype; // CIM Type
+ HRESULT Res;
+ }; // end of class WMICOL
+
+/***********************************************************************/
+/* This is the class declaration for the WMI catalog table. */
+/***********************************************************************/
+class TDBWCL : public TDBCAT {
+ public:
+ // Constructor
+ TDBWCL(PWMIDEF tdp);
+
+ protected:
+ // Specific routines
+ virtual PQRYRES GetResult(PGLOBAL g);
+
+ // Members
+ char *Nsp; // Name space
+ char *Cls; // Class
+ }; // end of class TDBWCL
diff --git a/storage/connect/tabxml.cpp b/storage/connect/tabxml.cpp index 1e9c172cdb3..88c029aefd2 100644 --- a/storage/connect/tabxml.cpp +++ b/storage/connect/tabxml.cpp @@ -366,7 +366,7 @@ int TDBXML::LoadTableFile(PGLOBAL g, char *filename) /*********************************************************************/ /* Firstly we check whether this file have been already loaded. */ /*********************************************************************/ - if (Mode == MODE_READ) + if (Mode == MODE_READ || Mode == MODE_ANY) for (fp = dup->Openlist; fp; fp = fp->Next) if (fp->Type == type && fp->Length && fp->Count) if (!stricmp(fp->Fname, filename)) @@ -522,8 +522,8 @@ bool TDBXML::Initialize(PGLOBAL g) To_Xb = Docp->LinkXblock(g, Mode, rc, filename); // Add a CONNECT comment node -// sprintf(buf, MSG(CREATED_PLUGDB), version); - sprintf(buf, " Created by CONNECT %s ", version); +// sprintf(buf, " Created by CONNECT %s ", version); + strcpy(buf, " Created by the MariaDB CONNECT Storage Engine"); Docp->AddComment(g, buf); if (XmlDB) { diff --git a/storage/connect/tabxml.h b/storage/connect/tabxml.h index 23c46c03a8b..a3dc0a2b54c 100644 --- a/storage/connect/tabxml.h +++ b/storage/connect/tabxml.h @@ -1,4 +1,3 @@ - /*************** Tabxml H Declares Source Code File (.H) ***************/ /* Name: TABXML.H Version 1.6 */ /* */ diff --git a/storage/connect/user_connect.cc b/storage/connect/user_connect.cc index de32be8cdb3..b5f835c9cc9 100644 --- a/storage/connect/user_connect.cc +++ b/storage/connect/user_connect.cc @@ -1,4 +1,4 @@ -/* Copyright (C) Olivier Bertrand 2004 - 2012 +/* Copyright (C) Olivier Bertrand 2004 - 2014 This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -68,7 +68,7 @@ user_connect::user_connect(THD *thd, const char *dbn) g= NULL; last_query_id= 0; count= 0; - + // Statistics nrd= fnd= nfd= 0; tb1= 0; @@ -95,9 +95,9 @@ bool user_connect::user_init() PDBUSER dup= NULL; // Areasize= 64M because of VEC tables. Should be parameterisable -//g= PlugInit(NULL, 67108864); -//g= PlugInit(NULL, 134217728); // 128M was because of old embedded tests - g= PlugInit(NULL, worksize); +//g= PlugInit(NULL, 67108864); +//g= PlugInit(NULL, 134217728); // 128M was because of old embedded tests + g= PlugInit(NULL, worksize); // Check whether the initialization is complete if (!g || !g->Sarea || PlugSubSet(g, g->Sarea, g->Sarea_Size) @@ -162,6 +162,7 @@ bool user_connect::CheckCleanup(void) g->Xchk = NULL; g->Createas = 0; g->Alchecked = 0; + g->Mrr = 0; last_query_id= thdp->query_id; if (trace) diff --git a/storage/connect/valblk.cpp b/storage/connect/valblk.cpp index ded7240cb2e..3827deec43d 100644 --- a/storage/connect/valblk.cpp +++ b/storage/connect/valblk.cpp @@ -101,6 +101,9 @@ PVBLK AllocValBlock(PGLOBAL g, void *mp, int type, int nval, int len, blkp = new(g) TYPBLK<char>(mp, nval, type); break; + case TYPE_PCHAR: + blkp = new(g) PTRBLK(g, mp, nval); + break; default: sprintf(g->Message, MSG(BAD_VALBLK_TYPE), type); return NULL; @@ -468,6 +471,36 @@ template <> uchar TYPBLK<uchar>::GetTypedValue(PVBLK blk, int n) {return blk->GetUTinyValue(n);} +/***********************************************************************/ +/* Set one value in a block if val is less than the current value. */ +/***********************************************************************/ +template <class TYPE> +void TYPBLK<TYPE>::SetMin(PVAL valp, int n) + { + CheckParms(valp, n) + TYPE tval = GetTypedValue(valp); + TYPE& tmin = Typp[n]; + + if (tval < tmin) + tmin = tval; + + } // end of SetMin + +/***********************************************************************/ +/* Set one value in a block if val is greater than the current value. */ +/***********************************************************************/ +template <class TYPE> +void TYPBLK<TYPE>::SetMax(PVAL valp, int n) + { + CheckParms(valp, n) + TYPE tval = GetTypedValue(valp); + TYPE& tmin = Typp[n]; + + if (tval > tmin) + tmin = tval; + + } // end of SetMax + #if 0 /***********************************************************************/ /* Set many values in a block from values in another block. */ @@ -794,14 +827,44 @@ void CHRBLK::SetValue(PVBLK pv, int n1, int n2) longjmp(g->jumper[g->jump_level], Type); } // endif Type - if (!(b = pv->IsNull(n2) && Nullable)) + if (!(b = pv->IsNull(n2))) memcpy(Chrp + n1 * Long, ((CHRBLK*)pv)->Chrp + n2 * Long, Long); else Reset(n1); - SetNull(n1, b); + SetNull(n1, b && Nullable); } // end of SetValue +/***********************************************************************/ +/* Set one value in a block if val is less than the current value. */ +/***********************************************************************/ +void CHRBLK::SetMin(PVAL valp, int n) + { + CheckParms(valp, n) + CheckBlanks + char *vp = valp->GetCharValue(); + char *bp = Chrp + n * Long; + + if (((Ci) ? strnicmp(vp, bp, Long) : strncmp(vp, bp, Long)) < 0) + memcpy(bp, vp, Long); + + } // end of SetMin + +/***********************************************************************/ +/* Set one value in a block if val is greater than the current value. */ +/***********************************************************************/ +void CHRBLK::SetMax(PVAL valp, int n) + { + CheckParms(valp, n) + CheckBlanks + char *vp = valp->GetCharValue(); + char *bp = Chrp + n * Long; + + if (((Ci) ? strnicmp(vp, bp, Long) : strncmp(vp, bp, Long)) > 0) + memcpy(bp, vp, Long); + + } // end of SetMax + #if 0 /***********************************************************************/ /* Set many values in a block from values in another block. */ @@ -1127,6 +1190,34 @@ void STRBLK::SetValue(char *sp, uint len, int n) } // end of SetValue /***********************************************************************/ +/* Set one value in a block if val is less than the current value. */ +/***********************************************************************/ +void STRBLK::SetMin(PVAL valp, int n) + { + CheckParms(valp, n) + char *vp = valp->GetCharValue(); + char *bp = Strp[n]; + + if (strcmp(vp, bp) < 0) + SetValue(valp, n); + + } // end of SetMin + +/***********************************************************************/ +/* Set one value in a block if val is greater than the current value. */ +/***********************************************************************/ +void STRBLK::SetMax(PVAL valp, int n) + { + CheckParms(valp, n) + char *vp = valp->GetCharValue(); + char *bp = Strp[n]; + + if (strcmp(vp, bp) > 0) + SetValue(valp, n); + + } // end of SetMax + +/***********************************************************************/ /* Move one value from i to j. */ /***********************************************************************/ void STRBLK::Move(int i, int j) @@ -1265,5 +1356,61 @@ void DATBLK::SetValue(PSZ p, int n) } // end of SetValue + +/* -------------------------- Class PTRBLK --------------------------- */ + +/***********************************************************************/ +/* Compare two values of the block. */ +/***********************************************************************/ +int PTRBLK::CompVal(int i1, int i2) + { + return (Strp[i1] > Strp[i2]) ? 1 : (Strp[i1] < Strp[i2]) ? (-1) : 0; + } // end of CompVal + + +/* -------------------------- Class MBVALS --------------------------- */ + +/***********************************************************************/ +/* Allocate a value block according to type,len, and nb of values. */ +/***********************************************************************/ +PVBLK MBVALS::Allocate(PGLOBAL g, int type, int len, int prec, + int n, bool sub) + { + Mblk.Sub = sub; + Mblk.Size = n * GetTypeSize(type, len); + + if (!PlgDBalloc(g, NULL, Mblk)) { + sprintf(g->Message, MSG(ALLOC_ERROR), "MBVALS::Allocate"); + return NULL; + } else + Vblk = AllocValBlock(g, Mblk.Memp, type, n, len, prec, + TRUE, TRUE, FALSE); + + return Vblk; + } // end of Allocate + +/***********************************************************************/ +/* Reallocate the value block according to the new size. */ +/***********************************************************************/ +bool MBVALS::ReAllocate(PGLOBAL g, int n) + { + if (!PlgDBrealloc(g, NULL, Mblk, n * Vblk->GetVlen())) { + sprintf(g->Message, MSG(ALLOC_ERROR), "MBVALS::ReAllocate"); + return TRUE; + } else + Vblk->ReAlloc(Mblk.Memp, n); + + return FALSE; + } // end of ReAllocate + +/***********************************************************************/ +/* Free the value block. */ +/***********************************************************************/ +void MBVALS::Free(void) + { + PlgDBfree(Mblk); + Vblk = NULL; + } // end of Free + /* ------------------------- End of Valblk --------------------------- */ diff --git a/storage/connect/valblk.h b/storage/connect/valblk.h index a9b1debe098..654db0b57b7 100644 --- a/storage/connect/valblk.h +++ b/storage/connect/valblk.h @@ -18,11 +18,41 @@ /***********************************************************************/ /* Utility used to allocate value blocks. */ /***********************************************************************/ -DllExport PVBLK AllocValBlock(PGLOBAL, void*, int, int, int, int, +DllExport PVBLK AllocValBlock(PGLOBAL, void*, int, int, int, int, bool, bool, bool); const char *GetFmt(int type, bool un = false); /***********************************************************************/ +/* DB static external variables. */ +/***********************************************************************/ +extern MBLOCK Nmblk; /* Used to initialize MBLOCK's */ + +/***********************************************************************/ +/* Class MBVALS is a utility class for (re)allocating VALBLK's. */ +/***********************************************************************/ +class MBVALS : public BLOCK { +//friend class LSTBLK; + friend class ARRAY; + public: + // Constructors + MBVALS(void) {Vblk = NULL; Mblk = Nmblk;} + + // Methods + void *GetMemp(void) {return Mblk.Memp;} + PVBLK Allocate(PGLOBAL g, int type, int len, int prec, + int n, bool sub = FALSE); + bool ReAllocate(PGLOBAL g, int n); + void Free(void); + + protected: + // Members + PVBLK Vblk; // Pointer to VALBLK + MBLOCK Mblk; // The memory block + }; // end of class MBVALS + +typedef class MBVALS *PMBV; + +/***********************************************************************/ /* Class VALBLK represent a base class for variable blocks. */ /***********************************************************************/ class VALBLK : public BLOCK { @@ -78,6 +108,8 @@ class VALBLK : public BLOCK { virtual void SetValue(char *sp, uint len, int n) {assert(false);} virtual void SetValue(PVAL valp, int n) = 0; virtual void SetValue(PVBLK pv, int n1, int n2) = 0; + virtual void SetMin(PVAL valp, int n) = 0; + virtual void SetMax(PVAL valp, int n) = 0; virtual void Move(int i, int j) = 0; virtual int CompVal(PVAL vp, int n) = 0; virtual int CompVal(int i1, int i2) = 0; @@ -153,6 +185,8 @@ class TYPBLK : public VALBLK { {Typp[n] = (TYPE)cval; SetNull(n, false);} virtual void SetValue(PVAL valp, int n); virtual void SetValue(PVBLK pv, int n1, int n2); + virtual void SetMin(PVAL valp, int n); + virtual void SetMax(PVAL valp, int n); virtual void Move(int i, int j); virtual int CompVal(PVAL vp, int n); virtual int CompVal(int i1, int i2); @@ -203,6 +237,8 @@ class CHRBLK : public VALBLK { virtual void SetValue(char *sp, uint len, int n); virtual void SetValue(PVAL valp, int n); virtual void SetValue(PVBLK pv, int n1, int n2); + virtual void SetMin(PVAL valp, int n); + virtual void SetMax(PVAL valp, int n); virtual void Move(int i, int j); virtual int CompVal(PVAL vp, int n); virtual int CompVal(int i1, int i2); @@ -213,11 +249,11 @@ class CHRBLK : public VALBLK { protected: // Members - char* const &Chrp; // Pointer to char buffer - PSZ Valp; // Used to make a zero ended value - bool Blanks; // True for right filling with blanks - bool Ci; // True if case insensitive - int Long; // Length of each string + char* const &Chrp; // Pointer to char buffer + PSZ Valp; // Used to make a zero ended value + bool Blanks; // True for right filling with blanks + bool Ci; // True if case insensitive + int Long; // Length of each string }; // end of class CHRBLK /***********************************************************************/ @@ -254,6 +290,8 @@ class STRBLK : public VALBLK { virtual void SetValue(char *sp, uint len, int n); virtual void SetValue(PVAL valp, int n); virtual void SetValue(PVBLK pv, int n1, int n2); + virtual void SetMin(PVAL valp, int n); + virtual void SetMax(PVAL valp, int n); virtual void Move(int i, int j); virtual int CompVal(PVAL vp, int n); virtual int CompVal(int i1, int i2); @@ -291,5 +329,28 @@ class DATBLK : public TYPBLK<int> { PVAL Dvalp; // Date value used to convert string }; // end of class DATBLK +/***********************************************************************/ +/* Class PTRBLK: represent a block of char pointers. */ +/* Currently this class is used only by the ARRAY class to make and */ +/* sort a list of char pointers. */ +/***********************************************************************/ +class PTRBLK : public STRBLK { + friend class ARRAY; + friend PVBLK AllocValBlock(PGLOBAL, void *, int, int, int, int, + bool, bool, bool); + protected: + // Constructors + PTRBLK(PGLOBAL g, void *mp, int size) : STRBLK(g, mp, size) {} + + // Implementation + + // Methods + virtual void SetValue(PSZ p, int n) {Strp[n] = p;} + virtual int CompVal(int i1, int i2); + + protected: + // Members + }; // end of class PTRBLK + #endif // __VALBLK__H__ diff --git a/storage/connect/value.cpp b/storage/connect/value.cpp index 12b6aced1cd..4c1c36369ef 100644 --- a/storage/connect/value.cpp +++ b/storage/connect/value.cpp @@ -92,6 +92,32 @@ PSZ strlwr(PSZ s); #endif // !WIN32 /***********************************************************************/ +/* Returns the bitmap representing the conditions that must not be */ +/* met when returning from TestValue for a given operator. */ +/* Bit one is EQ, bit 2 is LT, and bit 3 is GT. */ +/***********************************************************************/ +BYTE OpBmp(PGLOBAL g, OPVAL opc) + { + BYTE bt; + + switch (opc) { + case OP_IN: + case OP_EQ: bt = 0x06; break; + case OP_NE: bt = 0x01; break; + case OP_GT: bt = 0x03; break; + case OP_GE: bt = 0x02; break; + case OP_LT: bt = 0x05; break; + case OP_LE: bt = 0x04; break; + case OP_EXIST: bt = 0x00; break; + default: + sprintf(g->Message, MSG(BAD_FILTER_OP), opc); + longjmp(g->jumper[g->jump_level], TYPE_ARRAY); + } // endswitch opc + + return bt; + } // end of OpBmp + +/***********************************************************************/ /* Get a long long number from its character representation. */ /* IN p: Pointer to the numeric string */ /* IN n: The string length */ @@ -101,7 +127,7 @@ PSZ strlwr(PSZ s); /* OUT minus: Set to true if the number is negative */ /* Returned val: The resulting number */ /***********************************************************************/ -ulonglong CharToNumber(char *p, int n, ulonglong maxval, +ulonglong CharToNumber(char *p, int n, ulonglong maxval, bool un, bool *minus, bool *rc) { char *p2; @@ -110,7 +136,7 @@ ulonglong CharToNumber(char *p, int n, ulonglong maxval, if (minus) *minus = false; if (rc) *rc = false; - + // Eliminate leading blanks or 0 for (p2 = p + n; p < p2 && (*p == ' ' || *p == '0'); p++) ; @@ -158,6 +184,7 @@ PSZ GetTypeName(int type) case TYPE_TINY: name = "TINY"; break; case TYPE_DECIM: name = "DECIMAL"; break; case TYPE_BIN: name = "BINARY"; break; + case TYPE_PCHAR: name = "PCHAR"; break; default: name = "UNKNOWN"; break; } // endswitch type @@ -179,6 +206,7 @@ int GetTypeSize(int type, int len) case TYPE_DATE: len = sizeof(int); break; case TYPE_DOUBLE: len = sizeof(double); break; case TYPE_TINY: len = sizeof(char); break; + case TYPE_PCHAR: len = sizeof(char*); break; default: len = 0; } // endswitch type @@ -202,6 +230,7 @@ char *GetFormatType(int type) case TYPE_TINY: c = "T"; break; case TYPE_DECIM: c = "M"; break; case TYPE_BIN: c = "B"; break; + case TYPE_PCHAR: c = "P"; break; } // endswitch type return c; @@ -224,6 +253,7 @@ int GetFormatType(char c) case 'T': type = TYPE_TINY; break; case 'M': type = TYPE_DECIM; break; case 'B': type = TYPE_BIN; break; + case 'P': type = TYPE_PCHAR; break; } // endswitch type return type; @@ -284,6 +314,53 @@ const char *GetFmt(int type, bool un) } // end of GetFmt /***********************************************************************/ +/* ConvertType: what this function does is to determine the type to */ +/* which should be converted a value so no precision would be lost. */ +/* This can be a numeric type if num is true or non numeric if false. */ +/* Note: this is an ultra simplified version of this function that */ +/* should become more and more complex as new types are added. */ +/* Not evaluated types (TYPE_VOID or TYPE_UNDEF) return false from */ +/* IsType... functions so match does not prevent correct setting. */ +/***********************************************************************/ +int ConvertType(int target, int type, CONV kind, bool match) + { + switch (kind) { + case CNV_CHAR: + if (match && (!IsTypeChar(target) || !IsTypeChar(type))) + return TYPE_ERROR; + + return TYPE_STRING; + case CNV_NUM: + if (match && (!IsTypeNum(target) || !IsTypeNum(type))) + return TYPE_ERROR; + + return (target == TYPE_DOUBLE || type == TYPE_DOUBLE) ? TYPE_DOUBLE + : (target == TYPE_DATE || type == TYPE_DATE) ? TYPE_DATE + : (target == TYPE_BIGINT || type == TYPE_BIGINT) ? TYPE_BIGINT + : (target == TYPE_INT || type == TYPE_INT) ? TYPE_INT + : (target == TYPE_SHORT || type == TYPE_SHORT) ? TYPE_SHORT + : TYPE_TINY; + default: + if (target == TYPE_ERROR || target == type) + return type; + + if (match && ((IsTypeChar(target) && !IsTypeChar(type)) || + (IsTypeNum(target) && !IsTypeNum(type)))) + return TYPE_ERROR; + + return (target == TYPE_DOUBLE || type == TYPE_DOUBLE) ? TYPE_DOUBLE + : (target == TYPE_DATE || type == TYPE_DATE) ? TYPE_DATE + : (target == TYPE_BIGINT || type == TYPE_BIGINT) ? TYPE_BIGINT + : (target == TYPE_INT || type == TYPE_INT) ? TYPE_INT + : (target == TYPE_SHORT || type == TYPE_SHORT) ? TYPE_SHORT + : (target == TYPE_STRING || type == TYPE_STRING) ? TYPE_STRING + : (target == TYPE_TINY || type == TYPE_TINY) ? TYPE_TINY + : TYPE_ERROR; + } // endswitch kind + + } // end of ConvertType + +/***********************************************************************/ /* AllocateConstant: allocates a constant Value. */ /***********************************************************************/ PVAL AllocateValue(PGLOBAL g, void *value, short type) @@ -300,7 +377,7 @@ PVAL AllocateValue(PGLOBAL g, void *value, short type) case TYPE_SHORT: valp = new(g) TYPVAL<short>(*(short*)value, TYPE_SHORT); break; - case TYPE_INT: + case TYPE_INT: valp = new(g) TYPVAL<int>(*(int*)value, TYPE_INT); break; case TYPE_BIGINT: @@ -333,10 +410,10 @@ PVAL AllocateValue(PGLOBAL g, int type, int len, int prec, case TYPE_STRING: valp = new(g) TYPVAL<PSZ>(g, (PSZ)NULL, len, prec); break; - case TYPE_DATE: + case TYPE_DATE: valp = new(g) DTVAL(g, len, prec, fmt); break; - case TYPE_INT: + case TYPE_INT: if (uns) valp = new(g) TYPVAL<uint>((uint)0, TYPE_INT, 0, true); else @@ -382,6 +459,74 @@ PVAL AllocateValue(PGLOBAL g, int type, int len, int prec, return valp; } // end of AllocateValue +/***********************************************************************/ +/* Allocate a constant Value converted to newtype. */ +/* Can also be used to copy a Value eventually converted. */ +/***********************************************************************/ +PVAL AllocateValue(PGLOBAL g, PVAL valp, int newtype, int uns) + { + PSZ p, sp; + bool un = (uns < 0) ? false : (uns > 0) ? true : valp->IsUnsigned(); + + if (newtype == TYPE_VOID) // Means allocate a value of the same type + newtype = valp->GetType(); + + switch (newtype) { + case TYPE_STRING: + p = (PSZ)PlugSubAlloc(g, NULL, 1 + valp->GetValLen()); + + if ((sp = valp->GetCharString(p)) != p) + strcpy (p, sp); + + valp = new(g) TYPVAL<PSZ>(g, p, valp->GetValLen(), valp->GetValPrec()); + break; + case TYPE_SHORT: + if (un) + valp = new(g) TYPVAL<ushort>(valp->GetUShortValue(), + TYPE_SHORT, 0, true); + else + valp = new(g) TYPVAL<short>(valp->GetShortValue(), TYPE_SHORT); + + break; + case TYPE_INT: + if (un) + valp = new(g) TYPVAL<uint>(valp->GetUIntValue(), TYPE_INT, 0, true); + else + valp = new(g) TYPVAL<int>(valp->GetIntValue(), TYPE_INT); + + break; + case TYPE_BIGINT: + if (un) + valp = new(g) TYPVAL<ulonglong>(valp->GetUBigintValue(), + TYPE_BIGINT, 0, true); + else + valp = new(g) TYPVAL<longlong>(valp->GetBigintValue(), TYPE_BIGINT); + + break; + case TYPE_DATE: + valp = new(g) DTVAL(g, valp->GetIntValue()); + break; + case TYPE_DOUBLE: + valp = new(g) TYPVAL<double>(valp->GetFloatValue(), TYPE_DOUBLE, + valp->GetValPrec()); + break; + case TYPE_TINY: + if (un) + valp = new(g) TYPVAL<uchar>(valp->GetUTinyValue(), + TYPE_TINY, 0, true); + else + valp = new(g) TYPVAL<char>(valp->GetTinyValue(), TYPE_TINY); + + break; + default: + sprintf(g->Message, MSG(BAD_VALUE_TYPE), newtype); + return NULL; + } // endswitch type + + valp->SetGlobal(g); + return valp; + } // end of AllocateValue + /* -------------------------- Class VALUE ---------------------------- */ /***********************************************************************/ @@ -418,6 +563,18 @@ const char *VALUE::GetXfmt(void) return fmt; } // end of GetFmt +/***********************************************************************/ +/* Returns a BYTE indicating the comparison between two values. */ +/* Bit 1 indicates equality, Bit 2 less than, and Bit3 greater than. */ +/* More than 1 bit can be set only in the case of TYPE_LIST. */ +/***********************************************************************/ +BYTE VALUE::TestValue(PVAL vp) + { + int n = CompareValue(vp); + + return (n > 0) ? 0x04 : (n < 0) ? 0x02 : 0x01; + } // end of TestValue + /* -------------------------- Class TYPVAL ---------------------------- */ /***********************************************************************/ @@ -543,8 +700,8 @@ bool TYPVAL<TYPE>::SetValue_char(char *p, int n) { bool rc, minus; ulonglong maxval = MaxVal(); - ulonglong val = CharToNumber(p, n, maxval, Unsigned, &minus, &rc); - + ulonglong val = CharToNumber(p, n, maxval, Unsigned, &minus, &rc); + if (minus && val < maxval) Tval = (TYPE)(-(signed)val); else @@ -566,7 +723,7 @@ bool TYPVAL<double>::SetValue_char(char *p, int n) if (p) { char buf[64]; - for (; n > 0 && *p == ' '; p++) + for (; n > 0 && *p == ' '; p++) n--; memcpy(buf, p, MY_MIN(n, 31)); @@ -789,6 +946,24 @@ bool TYPVAL<TYPE>::IsEqual(PVAL vp, bool chktype) } // end of IsEqual /***********************************************************************/ +/* Compare values and returns 1, 0 or -1 according to comparison. */ +/* This function is used for evaluation of numeric filters. */ +/***********************************************************************/ +template <class TYPE> +int TYPVAL<TYPE>::CompareValue(PVAL vp) + { +//assert(vp->GetType() == Type); + + // Process filtering on numeric values. + TYPE n = GetTypedValue(vp); + +//if (trace) +// htrc(" Comparing: val=%d,%d\n", Tval, n); + + return (Tval > n) ? 1 : (Tval < n) ? (-1) : 0; + } // end of CompareValue + +/***********************************************************************/ /* FormatValue: This function set vp (a STRING value) to the string */ /* constructed from its own value formated using the fmt format. */ /* This function assumes that the format matches the value type. */ @@ -870,11 +1045,11 @@ TYPVAL<PSZ>::TYPVAL(PGLOBAL g, PSZ s, int n, int c) if (!s) { if (g) { - Strp = (char *)PlugSubAlloc(g, NULL, Len + 1); - Strp[Len] = '\0'; - } else - assert(false); - + Strp = (char *)PlugSubAlloc(g, NULL, Len + 1); + Strp[Len] = '\0'; + } else + assert(false); + } else Strp = s; @@ -888,8 +1063,8 @@ TYPVAL<PSZ>::TYPVAL(PGLOBAL g, PSZ s, int n, int c) char TYPVAL<PSZ>::GetTinyValue(void) { bool m; - ulonglong val = CharToNumber(Strp, strlen(Strp), INT_MAX8, false, &m); - + ulonglong val = CharToNumber(Strp, strlen(Strp), INT_MAX8, false, &m); + return (m && val < INT_MAX8) ? (char)(-(signed)val) : (char)val; } // end of GetTinyValue @@ -898,7 +1073,7 @@ char TYPVAL<PSZ>::GetTinyValue(void) /***********************************************************************/ uchar TYPVAL<PSZ>::GetUTinyValue(void) { - return (uchar)CharToNumber(Strp, strlen(Strp), UINT_MAX8, true); + return (uchar)CharToNumber(Strp, strlen(Strp), UINT_MAX8, true); } // end of GetUTinyValue /***********************************************************************/ @@ -907,8 +1082,8 @@ uchar TYPVAL<PSZ>::GetUTinyValue(void) short TYPVAL<PSZ>::GetShortValue(void) { bool m; - ulonglong val = CharToNumber(Strp, strlen(Strp), INT_MAX16, false, &m); - + ulonglong val = CharToNumber(Strp, strlen(Strp), INT_MAX16, false, &m); + return (m && val < INT_MAX16) ? (short)(-(signed)val) : (short)val; } // end of GetShortValue @@ -917,7 +1092,7 @@ short TYPVAL<PSZ>::GetShortValue(void) /***********************************************************************/ ushort TYPVAL<PSZ>::GetUShortValue(void) { - return (ushort)CharToNumber(Strp, strlen(Strp), UINT_MAX16, true); + return (ushort)CharToNumber(Strp, strlen(Strp), UINT_MAX16, true); } // end of GetUshortValue /***********************************************************************/ @@ -926,8 +1101,8 @@ ushort TYPVAL<PSZ>::GetUShortValue(void) int TYPVAL<PSZ>::GetIntValue(void) { bool m; - ulonglong val = CharToNumber(Strp, strlen(Strp), INT_MAX32, false, &m); - + ulonglong val = CharToNumber(Strp, strlen(Strp), INT_MAX32, false, &m); + return (m && val < INT_MAX32) ? (int)(-(signed)val) : (int)val; } // end of GetIntValue @@ -936,7 +1111,7 @@ int TYPVAL<PSZ>::GetIntValue(void) /***********************************************************************/ uint TYPVAL<PSZ>::GetUIntValue(void) { - return (uint)CharToNumber(Strp, strlen(Strp), UINT_MAX32, true); + return (uint)CharToNumber(Strp, strlen(Strp), UINT_MAX32, true); } // end of GetUintValue /***********************************************************************/ @@ -945,8 +1120,8 @@ uint TYPVAL<PSZ>::GetUIntValue(void) longlong TYPVAL<PSZ>::GetBigintValue(void) { bool m; - ulonglong val = CharToNumber(Strp, strlen(Strp), INT_MAX64, false, &m); - + ulonglong val = CharToNumber(Strp, strlen(Strp), INT_MAX64, false, &m); + return (m && val < INT_MAX64) ? (-(signed)val) : (longlong)val; } // end of GetBigintValue @@ -955,7 +1130,7 @@ longlong TYPVAL<PSZ>::GetBigintValue(void) /***********************************************************************/ ulonglong TYPVAL<PSZ>::GetUBigintValue(void) { - return CharToNumber(Strp, strlen(Strp), ULONGLONG_MAX, true); + return CharToNumber(Strp, strlen(Strp), ULONGLONG_MAX, true); } // end of GetUBigintValue /***********************************************************************/ @@ -989,18 +1164,18 @@ bool TYPVAL<PSZ>::SetValue_char(char *p, int n) if ((n = MY_MIN(n, Len))) { strncpy(Strp, p, n); -// for (p = Strp + n - 1; p >= Strp && (*p == ' ' || *p == '\0'); p--) ; +// for (p = Strp + n - 1; p >= Strp && (*p == ' ' || *p == '\0'); p--) ; for (p = Strp + n - 1; p >= Strp; p--) if (*p && *p != ' ') break; - *(++p) = '\0'; + *(++p) = '\0'; - if (trace > 1) - htrc(" Setting string to: '%s'\n", Strp); - - } else - Reset(); + if (trace > 1) + htrc(" Setting string to: '%s'\n", Strp); + + } else + Reset(); Null = false; } else { @@ -1239,6 +1414,32 @@ bool TYPVAL<PSZ>::IsEqual(PVAL vp, bool chktype) } // end of IsEqual /***********************************************************************/ +/* Compare values and returns 1, 0 or -1 according to comparison. */ +/* This function is used for evaluation of numeric filters. */ +/***********************************************************************/ +int TYPVAL<PSZ>::CompareValue(PVAL vp) + { + int n; +//assert(vp->GetType() == Type); + + if (trace) + htrc(" Comparing: val='%s','%s'\n", Strp, vp->GetCharValue()); + + // Process filtering on character strings. + if (Ci || vp->IsCi()) + n = stricmp(Strp, vp->GetCharValue()); + else + n = strcmp(Strp, vp->GetCharValue()); + +#if defined(WIN32) + if (n == _NLSCMPERROR) + return n; // Here we should raise an error +#endif // WIN32 + + return (n > 0) ? 1 : (n < 0) ? -1 : 0; + } // end of CompareValue + +/***********************************************************************/ /* FormatValue: This function set vp (a STRING value) to the string */ /* constructed from its own value formated using the fmt format. */ /* This function assumes that the format matches the value type. */ @@ -1304,7 +1505,7 @@ bool DECVAL::IsZero(void) /***********************************************************************/ /* DECIMAL: Reset value to zero. */ /***********************************************************************/ -void DECVAL::Reset(void) +void DECVAL::Reset(void) { int i = 0; @@ -1383,18 +1584,18 @@ bool DECVAL::SetValue_char(char *p, int n) if ((n = MY_MIN(n, Len))) { strncpy(Strp, p, n); -// for (p = Strp + n - 1; p >= Strp && (*p == ' ' || *p == '\0'); p--) ; +// for (p = Strp + n - 1; p >= Strp && (*p == ' ' || *p == '\0'); p--) ; for (p = Strp + n - 1; p >= Strp; p--) if (*p && *p != ' ') break; - *(++p) = '\0'; + *(++p) = '\0'; - if (trace > 1) - htrc(" Setting string to: '%s'\n", Strp); - - } else - Reset(); + if (trace > 1) + htrc(" Setting string to: '%s'\n", Strp); + + } else + Reset(); Null = false; } else { @@ -1464,6 +1665,23 @@ bool DECVAL::IsEqual(PVAL vp, bool chktype) return !strcmp(Strp, vp->GetCharString(buf)); } // end of IsEqual +/***********************************************************************/ +/* Compare values and returns 1, 0 or -1 according to comparison. */ +/* This function is used for evaluation of numeric filters. */ +/***********************************************************************/ +int DECVAL::CompareValue(PVAL vp) + { +//assert(vp->GetType() == Type); + + // Process filtering on numeric values. + double f = atof(Strp), n = vp->GetFloatValue(); + +//if (trace) +// htrc(" Comparing: val=%d,%d\n", f, n); + + return (f > n) ? 1 : (f < n) ? (-1) : 0; + } // end of CompareValue + #if 0 /***********************************************************************/ /* FormatValue: This function set vp (a STRING value) to the string */ @@ -2062,7 +2280,7 @@ bool DTVAL::MakeTime(struct tm *ptm) time_t t = mktime_mysql(ptm); if (trace > 1) - htrc("MakeTime from (%d,%d,%d,%d,%d,%d)\n", + htrc("MakeTime from (%d,%d,%d,%d,%d,%d)\n", ptm->tm_year, ptm->tm_mon, ptm->tm_mday, ptm->tm_hour, ptm->tm_min, ptm->tm_sec); @@ -2085,7 +2303,7 @@ bool DTVAL::MakeTime(struct tm *ptm) Tval= (int) t; if (trace > 1) - htrc("MakeTime Ival=%d\n", Tval); + htrc("MakeTime Ival=%d\n", Tval); return false; } // end of MakeTime @@ -2169,7 +2387,7 @@ bool DTVAL::MakeDate(PGLOBAL g, int *val, int nval) } // endfor i if (trace > 1) - htrc("MakeDate datm=(%d,%d,%d,%d,%d,%d)\n", + htrc("MakeDate datm=(%d,%d,%d,%d,%d,%d)\n", datm.tm_year, datm.tm_mon, datm.tm_mday, datm.tm_hour, datm.tm_min, datm.tm_sec); diff --git a/storage/connect/value.h b/storage/connect/value.h index 56992d5bc26..151ddacf509 100644 --- a/storage/connect/value.h +++ b/storage/connect/value.h @@ -46,10 +46,13 @@ DllExport char *GetFormatType(int); DllExport int GetFormatType(char); DllExport bool IsTypeChar(int type); DllExport bool IsTypeNum(int type); +DllExport int ConvertType(int, int, CONV, bool match = false); +DllExport PVAL AllocateValue(PGLOBAL, PVAL, int = TYPE_VOID, int = 0); DllExport PVAL AllocateValue(PGLOBAL, int, int len = 0, int prec = 0, bool uns = false, PSZ fmt = NULL); -DllExport ulonglong CharToNumber(char *, int, ulonglong, bool, +DllExport ulonglong CharToNumber(char *, int, ulonglong, bool, bool *minus = NULL, bool *rc = NULL); +DllExport BYTE OpBmp(PGLOBAL g, OPVAL opc); /***********************************************************************/ /* Class VALUE represents a constant or variable of any valid type. */ @@ -93,6 +96,9 @@ class DllExport VALUE : public BLOCK { virtual bool SetValue_pval(PVAL valp, bool chktype = false) = 0; virtual bool SetValue_char(char *p, int n) = 0; virtual void SetValue_psz(PSZ s) = 0; + virtual void SetValue_bool(bool b) {assert(FALSE);} + virtual int CompareValue(PVAL vp) = 0; + virtual BYTE TestValue(PVAL vp); virtual void SetValue(char c) {assert(false);} virtual void SetValue(uchar c) {assert(false);} virtual void SetValue(short i) {assert(false);} @@ -161,6 +167,8 @@ class DllExport TYPVAL : public VALUE { virtual bool SetValue_pval(PVAL valp, bool chktype); virtual bool SetValue_char(char *p, int n); virtual void SetValue_psz(PSZ s); + virtual void SetValue_bool(bool b) {Tval = (b) ? 1 : 0;} + virtual int CompareValue(PVAL vp); virtual void SetValue(char c) {Tval = (TYPE)c; Null = false;} virtual void SetValue(uchar c) {Tval = (TYPE)c; Null = false;} virtual void SetValue(short i) {Tval = (TYPE)i; Null = false;} @@ -199,7 +207,7 @@ class DllExport TYPVAL : public VALUE { /* Specific STRING class. */ /***********************************************************************/ template <> -class DllExport TYPVAL<PSZ>: public VALUE { +class DllExport TYPVAL<PSZ>: public VALUE { public: // Constructors TYPVAL(PSZ s); @@ -240,6 +248,7 @@ class DllExport TYPVAL<PSZ>: public VALUE { virtual void SetValue(ulonglong n); virtual void SetValue(double f); virtual void SetBinValue(void *p); + virtual int CompareValue(PVAL vp); virtual bool GetBinValue(void *buf, int buflen, bool go); virtual char *ShowValue(char *buf, int); virtual char *GetCharString(char *p); @@ -256,7 +265,7 @@ class DllExport TYPVAL<PSZ>: public VALUE { /***********************************************************************/ /* Specific DECIMAL class. */ /***********************************************************************/ -class DllExport DECVAL: public TYPVAL<PSZ> { +class DllExport DECVAL: public TYPVAL<PSZ> { public: // Constructors DECVAL(PSZ s); @@ -272,6 +281,7 @@ class DllExport DECVAL: public TYPVAL<PSZ> { virtual bool GetBinValue(void *buf, int buflen, bool go); virtual char *ShowValue(char *buf, int); virtual bool IsEqual(PVAL vp, bool chktype); + virtual int CompareValue(PVAL vp); // Members }; // end of class DECVAL @@ -279,7 +289,7 @@ class DllExport DECVAL: public TYPVAL<PSZ> { /***********************************************************************/ /* Specific BINARY class. */ /***********************************************************************/ -class DllExport BINVAL: public VALUE { +class DllExport BINVAL: public VALUE { public: // Constructors //BINVAL(void *p); @@ -320,6 +330,7 @@ class DllExport BINVAL: public VALUE { virtual void SetValue(double f); virtual void SetBinValue(void *p); virtual bool GetBinValue(void *buf, int buflen, bool go); + virtual int CompareValue(PVAL vp) {assert(false); return 0;} virtual char *ShowValue(char *buf, int); virtual char *GetCharString(char *p); virtual bool IsEqual(PVAL vp, bool chktype); diff --git a/storage/connect/xindex.cpp b/storage/connect/xindex.cpp index fad7495fa82..7cc52580760 100755 --- a/storage/connect/xindex.cpp +++ b/storage/connect/xindex.cpp @@ -1,7 +1,7 @@ /***************** Xindex C++ Class Xindex Code (.CPP) *****************/ -/* Name: XINDEX.CPP Version 2.8 */ +/* Name: XINDEX.CPP Version 2.9 */ /* */ -/* (C) Copyright to the author Olivier BERTRAND 2004-2013 */ +/* (C) Copyright to the author Olivier BERTRAND 2004-2014 */ /* */ /* This file contains the class XINDEX implementation code. */ /***********************************************************************/ @@ -45,11 +45,12 @@ //nclude "array.h" #include "filamtxt.h" #include "tabdos.h" +#include "tabvct.h" /***********************************************************************/ /* Macro or external routine definition */ /***********************************************************************/ -#define NZ 7 +#define NZ 8 #define NW 5 #define MAX_INDX 10 #ifndef INVALID_SET_FILE_POINTER @@ -112,6 +113,8 @@ INDEXDEF::INDEXDEF(char *name, bool uniq, int n) Unique = uniq; Invalid = false; AutoInc = false; + Dynamic = false; + Mapped = false; Nparts = 0; ID = n; //Offset = 0; @@ -165,6 +168,8 @@ XXBASE::XXBASE(PTDBDOS tbxp, bool b) : CSORT(b), Op = OP_EQ; To_KeyCol = NULL; Mul = false; + Srtd = false; + Dynamic = false; Val_K = -1; Nblk = Sblk = 0; Thresh = 7; @@ -237,25 +242,27 @@ void XINDEX::Reset(void) /***********************************************************************/ /* XINDEX Close: terminate index and free all allocated data. */ -/* Do not reset other values that are used at return to make. */ +/* Do not reset values that are used at return to make. */ /***********************************************************************/ void XINDEX::Close(void) { // Close file or view of file - X->Close(); + if (X) + X->Close(); // De-allocate data PlgDBfree(Record); PlgDBfree(Index); PlgDBfree(Offset); - // De-allocate Key data - for (PXCOL kcp = To_KeyCol; kcp; kcp = kcp->Next) - kcp->FreeData(); + for (PXCOL kcp = To_KeyCol; kcp; kcp = kcp->Next) { + // Column values cannot be retrieved from key anymore + if (kcp->Colp) + kcp->Colp->SetKcol(NULL); - // Column values cannot be retrieved from key anymore - for (int k = 0; k < Nk; k++) - To_Cols[k]->SetKcol(NULL); + // De-allocate Key data + kcp->FreeData(); + } // endfor kcp } // end of Close @@ -276,6 +283,25 @@ int XINDEX::Qcompare(int *i1, int *i2) } // end of Qcompare /***********************************************************************/ +/* AddColumns: here we try to determine whether it is worthwhile to */ +/* add to the keys the values of the columns selected for this table. */ +/* Sure enough, it is done while records are read and permit to avoid */ +/* reading the table while doing the join (Dynamic index only) */ +/***********************************************************************/ +bool XINDEX::AddColumns(PIXDEF xdp) + { + if (!Dynamic) + return false; // Not applying to static index + else if (IsMul()) + return false; // Not done yet for multiple index + else if (Tbxp->GetAmType() == TYPE_AM_VCT && ((PTDBVCT)Tbxp)->IsSplit()) + return false; // This would require to read additional files + else + return true; + + } // end of AddColumns + +/***********************************************************************/ /* Make: Make and index on key column(s). */ /***********************************************************************/ bool XINDEX::Make(PGLOBAL g, PIXDEF sxp) @@ -283,13 +309,18 @@ bool XINDEX::Make(PGLOBAL g, PIXDEF sxp) /*********************************************************************/ /* Table can be accessed through an index. */ /*********************************************************************/ - int k, rc = RC_OK; + int k, nk = Nk, rc = RC_OK; int *bof, i, j, n, ndf, nkey; PKPDEF kdfp = Xdp->GetToKeyParts(); - bool brc = true; + bool brc = false; PCOL colp; - PXCOL kp, prev = NULL, kcp = NULL; - PDBUSER dup = (PDBUSER)g->Activityp->Aptr; + PFIL filp = Tdbp->GetFilter(); + PXCOL kp, addcolp, prev = NULL, kcp = NULL; +//PDBUSER dup = (PDBUSER)g->Activityp->Aptr; + +#if defined(_DEBUG) + assert(X || Nk == 1); +#endif // _DEBUG /*********************************************************************/ /* Allocate the storage that will contain the keys and the file */ @@ -347,6 +378,51 @@ bool XINDEX::Make(PGLOBAL g, PIXDEF sxp) To_LastCol = prev; + if (AddColumns(sxp)) { + PCOL kolp = To_Cols[0]; // Temporary while imposing Nk = 1 + + i = 0; + + // Allocate the accompanying + for (colp = Tbxp->GetColumns(); colp; colp = colp->GetNext()) { + // Count how many columns to add +// for (k = 0; k < Nk; k++) +// if (colp == To_Cols[k]) +// break; + +// if (k == nk) + if (colp != kolp) + i++; + + } // endfor colp + + if (i && i < 10) // Should be a parameter + for (colp = Tbxp->GetColumns(); colp; colp = colp->GetNext()) { +// for (k = 0; k < Nk; k++) +// if (colp == To_Cols[k]) +// break; + +// if (k < nk) + if (colp == kolp) + continue; // This is a key column + + kcp = new(g) KXYCOL(this); + + if (kcp->Init(g, colp, n, true, 0)) + return true; + + if (trace) + htrc("Adding colp=%p Buf_Type=%d size=%d\n", + colp, colp->GetResultType(), n); + + nk++; + prev->Next = kcp; + prev = kcp; + } // endfor colp + + } // endif AddColumns + +#if 0 /*********************************************************************/ /* Get the starting information for progress. */ /*********************************************************************/ @@ -354,18 +430,19 @@ bool XINDEX::Make(PGLOBAL g, PIXDEF sxp) sprintf((char*)dup->Step, MSG(BUILD_INDEX), Xdp->GetName(), Tdbp->Name); dup->ProgMax = Tdbp->GetProgMax(g); dup->ProgCur = 0; +#endif // 0 /*********************************************************************/ /* Standard init: read the file and construct the index table. */ /* Note: reading will be sequential as To_Kindex is not set. */ /*********************************************************************/ - for (i = nkey = 0; i < n && rc != RC_EF; i++) { -#if defined(THREAD) + for (i = nkey = 0; rc != RC_EF; i++) { +#if 0 if (!dup->Step) { strcpy(g->Message, MSG(QUERY_CANCELLED)); longjmp(g->jumper[g->jump_level], 99); } // endif Step -#endif // THREAD +#endif // 0 /*******************************************************************/ /* Read a valid record from table file. */ @@ -373,16 +450,19 @@ bool XINDEX::Make(PGLOBAL g, PIXDEF sxp) rc = Tdbp->ReadDB(g); // Update progress information - dup->ProgCur = Tdbp->GetProgCur(); +// dup->ProgCur = Tdbp->GetProgCur(); // Check return code and do whatever must be done according to it switch (rc) { case RC_OK: - break; - case RC_EF: - goto end_of_file; + if (ApplyFilter(g, filp)) + break; + + // passthru case RC_NF: continue; + case RC_EF: + goto end_of_file; default: sprintf(g->Message, MSG(RC_READING), rc, Tdbp->Name); goto err; @@ -392,20 +472,25 @@ bool XINDEX::Make(PGLOBAL g, PIXDEF sxp) /* Get and Store the file position of the last read record for */ /* future direct access. */ /*******************************************************************/ - To_Rec[nkey] = Tdbp->GetRecpos(); + if (nkey == n) { + sprintf(g->Message, MSG(TOO_MANY_KEYS), nkey); + return true; + } else + To_Rec[nkey] = Tdbp->GetRecpos(); /*******************************************************************/ /* Get the keys and place them in the key blocks. */ /*******************************************************************/ for (k = 0, kcp = To_KeyCol; - k < Nk && kcp; + k < nk && kcp; k++, kcp = kcp->Next) { - colp = To_Cols[k]; - colp->Reset(); +// colp = To_Cols[k]; + colp = kcp->Colp; - colp->ReadColumn(g); -// if (colp->ReadColumn(g)) -// goto err; + if (!colp->GetStatus(BUF_READ)) + colp->ReadColumn(g); + else + colp->Reset(); kcp->SetValue(colp, nkey); } // endfor k @@ -416,7 +501,7 @@ bool XINDEX::Make(PGLOBAL g, PIXDEF sxp) end_of_file: // Update progress information - dup->ProgCur = Tdbp->GetProgMax(g); +//dup->ProgCur = Tdbp->GetProgMax(g); /*********************************************************************/ /* Record the Index size and eventually resize memory allocation. */ @@ -451,18 +536,30 @@ bool XINDEX::Make(PGLOBAL g, PIXDEF sxp) goto err; // Error } // endif alloc + // We must separate keys and added columns before sorting + addcolp = To_LastCol->Next; + To_LastCol->Next = NULL; + // Call the sort program, it returns the number of distinct values if ((Ndif = Qsort(g, Num_K)) < 0) goto err; // Error during sort + if (trace) + htrc("Make: Nk=%d n=%d Num_K=%d Ndif=%d addcolp=%p BlkFil=%p X=%p\n", + Nk, n, Num_K, Ndif, addcolp, Tdbp->To_BlkFil, X); + // Check whether the unique index is unique indeed if (!Mul) if (Ndif < Num_K) { strcpy(g->Message, MSG(INDEX_NOT_UNIQ)); + brc = true; goto err; } else PlgDBfree(Offset); // Not used anymore + // Restore kcp list + To_LastCol->Next = addcolp; + // Use the index to physically reorder the xindex Srtd = Reorder(g); @@ -487,7 +584,7 @@ bool XINDEX::Make(PGLOBAL g, PIXDEF sxp) } else { Mul = false; // Current index is unique PlgDBfree(Offset); // Not used anymore - MaxSame = 1; // Reset it when remaking an index + MaxSame = 1; // Reset it when remaking an index } // endif Ndif /*********************************************************************/ @@ -502,7 +599,7 @@ bool XINDEX::Make(PGLOBAL g, PIXDEF sxp) /* except if the subset originally contains unique values. */ /*********************************************************************/ // Update progress information - dup->Step = STEP(REDUCE_INDEX); +//dup->Step = STEP(REDUCE_INDEX); ndf = Ndif; To_LastCol->Mxs = MaxSame; @@ -550,9 +647,11 @@ bool XINDEX::Make(PGLOBAL g, PIXDEF sxp) /*********************************************************************/ /* For sorted columns and fixed record size, file position can be */ /* calculated, so the Record array can be discarted. */ + /* Not true for DBF tables because of eventual soft deleted lines. */ /* Note: for Num_K = 1 any non null value is Ok. */ /*********************************************************************/ - if (Srtd && Tdbp->Ftype != RECFM_VAR) { + if (Srtd && !filp && Tdbp->Ftype != RECFM_VAR + && Tdbp->Txfp->GetAmType() != TYPE_AM_DBF) { Incr = (Num_K > 1) ? To_Rec[1] : Num_K; PlgDBfree(Record); } // endif Srtd @@ -579,14 +678,24 @@ bool XINDEX::Make(PGLOBAL g, PIXDEF sxp) Cur_K = Num_K; /*********************************************************************/ - /* Save the index so it has not to be recalculated. */ + /* Save the xindex so it has not to be recalculated. */ /*********************************************************************/ - if (!SaveIndex(g, sxp)) - brc = false; + if (X) { + if (SaveIndex(g, sxp)) + brc = true; + + } else { // Dynamic index + // Indicate that key column values can be found from KEYCOL's + for (kcp = To_KeyCol; kcp; kcp = kcp->Next) + kcp->Colp->SetKcol(kcp); + + Tdbp->SetFilter(NULL); // Not used anymore + } // endif X err: // We don't need the index anymore - Close(); + if (X || brc) + Close(); if (brc) printf("%s\n", g->Message); @@ -630,6 +739,7 @@ bool XINDEX::Reorder(PGLOBAL g) register int i, j, k, n; bool sorted = true; PXCOL kcp; +#if 0 PDBUSER dup = (PDBUSER)g->Activityp->Aptr; if (Num_K > 500000) { @@ -639,6 +749,7 @@ bool XINDEX::Reorder(PGLOBAL g) dup->ProgCur = 0; } else dup = NULL; +#endif // 0 if (!Pex) return Srtd; @@ -647,8 +758,8 @@ bool XINDEX::Reorder(PGLOBAL g) if (Pex[i] == Num_K) { // Already moved continue; } else if (Pex[i] == i) { // Already placed - if (dup) - dup->ProgCur++; +// if (dup) +// dup->ProgCur++; continue; } // endif's Pex @@ -677,8 +788,8 @@ bool XINDEX::Reorder(PGLOBAL g) To_Rec[j] = To_Rec[k]; } // endif k - if (dup) - dup->ProgCur++; +// if (dup) +// dup->ProgCur++; } // endfor j @@ -706,11 +817,11 @@ bool XINDEX::SaveIndex(PGLOBAL g, PIXDEF sxp) bool sep, rc = false; PXCOL kcp = To_KeyCol; PDOSDEF defp = (PDOSDEF)Tdbp->To_Def; - PDBUSER dup = PlgGetUser(g); +//PDBUSER dup = PlgGetUser(g); - dup->Step = STEP(SAVING_INDEX); - dup->ProgMax = 15 + 16 * Nk; - dup->ProgCur = 0; +//dup->Step = STEP(SAVING_INDEX); +//dup->ProgMax = 15 + 16 * Nk; +//dup->ProgCur = 0; switch (Tdbp->Ftype) { case RECFM_VAR: ftype = ".dnx"; break; @@ -755,31 +866,32 @@ bool XINDEX::SaveIndex(PGLOBAL g, PIXDEF sxp) /*********************************************************************/ /* Write the index values on the index file. */ /*********************************************************************/ - n[0] = ID; // To check validity + n[0] = ID + MAX_INDX; // To check validity n[1] = Nk; // The number of indexed columns n[2] = nof; // The offset array size or 0 n[3] = Num_K; // The index size n[4] = Incr; // Increment of record positions n[5] = Nblk; n[6] = Sblk; + n[7] = Srtd ? 1 : 0; // Values are sorted in the file -#if defined(TRACE) - printf("Saving index %s\n", Xdp->GetName()); - printf("ID=%d Nk=%d nof=%d Num_K=%d Incr=%d Nblk=%d Sblk=%d\n", - ID, Nk, nof, Num_K, Incr, Nblk, Sblk); -#endif // TRACE + if (trace) { + htrc("Saving index %s\n", Xdp->GetName()); + htrc("ID=%d Nk=%d nof=%d Num_K=%d Incr=%d Nblk=%d Sblk=%d Srtd=%d\n", + ID, Nk, nof, Num_K, Incr, Nblk, Sblk, Srtd); + } // endif trace size = X->Write(g, n, NZ, sizeof(int), rc); - dup->ProgCur = 1; +//dup->ProgCur = 1; if (Mul) // Write the offset array size += X->Write(g, Pof, nof, sizeof(int), rc); - dup->ProgCur = 5; +//dup->ProgCur = 5; if (!Incr) // Write the record position array(s) size += X->Write(g, To_Rec, Num_K, sizeof(int), rc); - dup->ProgCur = 15; +//dup->ProgCur = 15; for (; kcp; kcp = kcp->Next) { n[0] = kcp->Ndf; // Number of distinct sub-values @@ -789,25 +901,24 @@ bool XINDEX::SaveIndex(PGLOBAL g, PIXDEF sxp) n[4] = kcp->Type; // To be checked later size += X->Write(g, n, NW, sizeof(int), rc); - dup->ProgCur += 1; +// dup->ProgCur += 1; if (n[2]) size += X->Write(g, kcp->To_Bkeys, Nblk, kcp->Klen, rc); - dup->ProgCur += 5; +// dup->ProgCur += 5; size += X->Write(g, kcp->To_Keys, n[0], kcp->Klen, rc); - dup->ProgCur += 5; +// dup->ProgCur += 5; if (n[1]) size += X->Write(g, kcp->Kof, n[1], sizeof(int), rc); - dup->ProgCur += 5; +// dup->ProgCur += 5; } // endfor kcp -#if defined(TRACE) - printf("Index %s saved, Size=%d\n", Xdp->GetName(), Size); -#endif // TRACE + if (trace) + htrc("Index %s saved, Size=%d\n", Xdp->GetName(), size); end: X->Close(fn, id); @@ -896,9 +1007,8 @@ bool XINDEX::Init(PGLOBAL g) PlugSetPath(fn, fn, Tdbp->GetPath()); -#if defined(TRACE) - printf("Index %s file: %s\n", Xdp->GetName(), fn); -#endif // TRACE + if (trace) + htrc("Index %s file: %s\n", Xdp->GetName(), fn); /*********************************************************************/ /* Open the index file and check its validity. */ @@ -907,21 +1017,31 @@ bool XINDEX::Init(PGLOBAL g) goto err; // No saved values // Now start the reading process. - if (X->Read(g, nv, NZ, sizeof(int))) + if (X->Read(g, nv, NZ - 1, sizeof(int))) goto err; -#if defined(TRACE) - printf("nv=%d %d %d %d %d %d %d\n", - nv[0], nv[1], nv[2], nv[3], nv[4], nv[5], nv[6]); -#endif // TRACE + if (nv[0] >= MAX_INDX) { + // New index format + if (X->Read(g, nv + 7, 1, sizeof(int))) + goto err; + + Srtd = nv[7] != 0; + nv[0] -= MAX_INDX; + } else + Srtd = false; + + if (trace) + htrc("nv=%d %d %d %d %d %d %d (%d)\n", + nv[0], nv[1], nv[2], nv[3], nv[4], nv[5], nv[6], Srtd); // The test on ID was suppressed because MariaDB can change an index ID - // when other indexes are added or deleted + // when other indexes are added or deleted if (/*nv[0] != ID ||*/ nv[1] != Nk) { sprintf(g->Message, MSG(BAD_INDEX_FILE), fn); -#if defined(TRACE) - printf("nv[0]=%d ID=%d nv[1]=%d Nk=%d\n", nv[0], ID, nv[1], Nk); -#endif // TRACE + + if (trace) + htrc("nv[0]=%d ID=%d nv[1]=%d Nk=%d\n", nv[0], ID, nv[1], Nk); + goto err; } // endif @@ -1140,9 +1260,8 @@ bool XINDEX::MapInit(PGLOBAL g) PlugSetPath(fn, fn, Tdbp->GetPath()); -#if defined(TRACE) - printf("Index %s file: %s\n", Xdp->GetName(), fn); -#endif // TRACE + if (trace) + htrc("Index %s file: %s\n", Xdp->GetName(), fn); /*********************************************************************/ /* Get a view on the part of the index file containing this index. */ @@ -1157,24 +1276,33 @@ bool XINDEX::MapInit(PGLOBAL g) // Position the memory base at the offset of this index mbase += noff[id].Low; } // endif id - + // Now start the mapping process. nv = (int*)mbase; - mbase += NZ * sizeof(int); -#if defined(TRACE) - printf("nv=%d %d %d %d %d %d %d\n", - nv[0], nv[1], nv[2], nv[3], nv[4], nv[5], nv[6]); -#endif // TRACE + if (nv[0] >= MAX_INDX) { + // New index format + Srtd = nv[7] != 0; + nv[0] -= MAX_INDX; + mbase += NZ * sizeof(int); + } else { + Srtd = false; + mbase += (NZ - 1) * sizeof(int); + } // endif nv + + if (trace) + htrc("nv=%d %d %d %d %d %d %d %d\n", + nv[0], nv[1], nv[2], nv[3], nv[4], nv[5], nv[6], Srtd); // The test on ID was suppressed because MariaDB can change an index ID - // when other indexes are added or deleted + // when other indexes are added or deleted if (/*nv[0] != ID ||*/ nv[1] != Nk) { // Not this index sprintf(g->Message, MSG(BAD_INDEX_FILE), fn); -#if defined(TRACE) - printf("nv[0]=%d ID=%d nv[1]=%d Nk=%d\n", nv[0], ID, nv[1], Nk); -#endif // TRACE + + if (trace) + htrc("nv[0]=%d ID=%d nv[1]=%d Nk=%d\n", nv[0], ID, nv[1], Nk); + goto err; } // endif nv @@ -1272,16 +1400,19 @@ err: /***********************************************************************/ /* Get Ndif and Num_K from the index file. */ /***********************************************************************/ -bool XINDEX::GetAllSizes(PGLOBAL g, int &ndif, int &numk) +bool XINDEX::GetAllSizes(PGLOBAL g,/* int &ndif,*/ int &numk) { char *ftype; char fn[_MAX_PATH]; - int n, nv[NZ], id = -1; - bool estim = false; + int nv[NZ], id = -1; // n +//bool estim = false; + bool rc = true; PDOSDEF defp = (PDOSDEF)Tdbp->To_Def; - ndif = numk = 0; +// ndif = numk = 0; + numk = 0; +#if 0 /*********************************************************************/ /* Get the estimated table size. */ /* Note: for fixed tables we must use cardinality to avoid the call */ @@ -1309,6 +1440,7 @@ bool XINDEX::GetAllSizes(PGLOBAL g, int &ndif, int &numk) strcpy(g->Message, MSG(NO_KEY_COL)); return true; // Error } // endif Nk +#endif // 0 switch (Tdbp->Ftype) { case RECFM_VAR: ftype = ".dnx"; break; @@ -1341,9 +1473,8 @@ bool XINDEX::GetAllSizes(PGLOBAL g, int &ndif, int &numk) PlugSetPath(fn, fn, Tdbp->GetPath()); -#if defined(TRACE) - printf("Index %s file: %s\n", Xdp->GetName(), fn); -#endif // TRACE + if (trace) + htrc("Index %s file: %s\n", Xdp->GetName(), fn); /*********************************************************************/ /* Open the index file and check its validity. */ @@ -1359,20 +1490,21 @@ bool XINDEX::GetAllSizes(PGLOBAL g, int &ndif, int &numk) if (X->Read(g, nv, NZ, sizeof(int))) goto err; -#if defined(TRACE) - printf("nv=%d %d %d %d\n", nv[0], nv[1], nv[2], nv[3]); -#endif // TRACE + if (trace) + htrc("nv=%d %d %d %d\n", nv[0], nv[1], nv[2], nv[3]); // The test on ID was suppressed because MariaDB can change an index ID - // when other indexes are added or deleted + // when other indexes are added or deleted if (/*nv[0] != ID ||*/ nv[1] != Nk) { sprintf(g->Message, MSG(BAD_INDEX_FILE), fn); -#if defined(TRACE) - printf("nv[0]=%d ID=%d nv[1]=%d Nk=%d\n", nv[0], ID, nv[1], Nk); -#endif // TRACE + + if (trace) + htrc("nv[0]=%d ID=%d nv[1]=%d Nk=%d\n", nv[0], ID, nv[1], Nk); + goto err; } // endif +#if 0 if (nv[2]) { Mul = true; Ndif = nv[2] - 1; // nv[2] is offset size, equal to Ndif + 1 @@ -1388,9 +1520,11 @@ bool XINDEX::GetAllSizes(PGLOBAL g, int &ndif, int &numk) sprintf(g->Message, MSG(OPT_NOT_MATCH), fn); goto err; } // endif +#endif // 0 Num_K = nv[3]; +#if 0 if (Nk > 1) { if (nv[2] && X->Seek(g, nv[2] * sizeof(int), 0, SEEK_CUR)) goto err; @@ -1411,17 +1545,18 @@ bool XINDEX::GetAllSizes(PGLOBAL g, int &ndif, int &numk) Ndif = nv[0]; } // endif Nk +#endif // 0 /*********************************************************************/ /* Set size values. */ /*********************************************************************/ - ndif = Ndif; +//ndif = Ndif; numk = Num_K; - return false; + rc = false; err: X->Close(); - return true; + return rc; } // end of GetAllSizes /***********************************************************************/ @@ -1455,7 +1590,7 @@ int XINDEX::Range(PGLOBAL g, int limit, bool incl) n = k; // if (limit) // n = (Mul) ? k : kp->Val_K; -// else +// else // n = (Mul) ? Pof[kp->Val_K + 1] - k : 1; } else { @@ -1642,9 +1777,8 @@ int XINDEX::Fetch(PGLOBAL g) break; case OP_SAME: // Read next same // Logically the key values should be the same as before -#if defined(TRACE) - printf("looking for next same value\n"); -#endif // TRACE + if (trace > 1) + htrc("looking for next same value\n"); if (NextVal(true)) { Op = OP_EQ; @@ -1690,9 +1824,9 @@ int XINDEX::Fetch(PGLOBAL g) Nth++; -#if defined(TRACE) - printf("Fetch: Looking for new value\n"); -#endif // TRACE + if (trace > 1) + htrc("Fetch: Looking for new value\n"); + Cur_K = FastFind(Nval); if (Cur_K >= Num_K) @@ -1848,8 +1982,7 @@ int XINDEX::FastFind(int nv) XINDXS::XINDXS(PTDBDOS tdbp, PIXDEF xdp, PXLOAD pxp, PCOL *cp, PXOB *xp) : XINDEX(tdbp, xdp, pxp, cp, xp) { -//Srtd = To_Cols[0]->GetOpt() < 0; // ????? - Srtd = false; + Srtd = To_Cols[0]->GetOpt() == 2; } // end of XINDXS constructor /***********************************************************************/ @@ -1891,7 +2024,7 @@ int XINDXS::Range(PGLOBAL g, int limit, bool incl) if (k < Num_K || Op != OP_EQ) if (limit) n = (Mul) ? k : kp->Val_K; - else + else n = (Mul) ? Pof[kp->Val_K + 1] - k : 1; } else { @@ -1987,10 +2120,9 @@ int XINDXS::Fetch(PGLOBAL g) To_KeyCol->Val_K = Cur_K = 0; Op = OP_NEXT; break; - case OP_SAME: // Read next same -#if defined(TRACE) -// printf("looking for next same value\n"); -#endif // TRACE + case OP_SAME: // Read next same + if (trace > 1) + htrc("looking for next same value\n"); if (!Mul || NextVal(true)) { Op = OP_EQ; @@ -2023,18 +2155,17 @@ int XINDXS::Fetch(PGLOBAL g) /* Look for the first key equal to the link column values */ /* and return its rank whithin the index table. */ /*****************************************************************/ - if (To_KeyCol->InitFind(g, To_Vals[0])) - return -1; // No more constant values - else - Nth++; - -#if defined(TRACE) - printf("Fetch: Looking for new value\n"); -#endif // TRACE - - Cur_K = FastFind(1); - - if (Cur_K >= Num_K) + if (To_KeyCol->InitFind(g, To_Vals[0])) + return -1; // No more constant values + else + Nth++; + + if (trace > 1) + htrc("Fetch: Looking for new value\n"); + + Cur_K = FastFind(1); + + if (Cur_K >= Num_K) // Rank not whithin index table, signal record not found return -2; else if (Mul) @@ -2119,7 +2250,10 @@ int XINDXS::FastFind(int nk) n = 0; } // endif sup - kcp->Val_K = i; // Used by FillValue + // Loop on kcp because of dynamic indexing + for (; kcp; kcp = kcp->Next) + kcp->Val_K = i; // Used by FillValue + return ((n) ? Num_K : (Mul) ? Pof[i] : i); } // end of FastFind @@ -2195,7 +2329,7 @@ bool XFILE::Open(PGLOBAL g, char *filename, int id, MODE mode) sprintf(g->Message, MSG(FUNC_ERRNO), errno, "Xseek"); return true; } // endif - + NewOff.Low = (int)ftell(Xfile); } else if (mode == MODE_WRITE) { if (id >= 0) { @@ -2218,7 +2352,7 @@ bool XFILE::Open(PGLOBAL g, char *filename, int id, MODE mode) sprintf(g->Message, MSG(FUNC_ERRNO), errno, "Xseek"); return true; } // endif - + } // endif mode return false; @@ -2336,7 +2470,7 @@ void *XFILE::FileView(PGLOBAL g, char *fn) /***********************************************************************/ bool XHUGE::Open(PGLOBAL g, char *filename, int id, MODE mode) { - IOFF noff[MAX_INDX]; + IOFF noff[MAX_INDX]; if (Hfile != INVALID_HANDLE_VALUE) { sprintf(g->Message, MSG(FILE_OPEN_YET), filename); @@ -2344,7 +2478,7 @@ bool XHUGE::Open(PGLOBAL g, char *filename, int id, MODE mode) } // endif if (trace) - htrc(" Xopen: filename=%s mode=%d\n", filename, mode); + htrc(" Xopen: filename=%s id=%d mode=%d\n", filename, id, mode); #if defined(WIN32) LONG high = 0; @@ -2424,19 +2558,19 @@ bool XHUGE::Open(PGLOBAL g, char *filename, int id, MODE mode) } // endif rc // Position the cursor at the offset of this index - rc = SetFilePointer(Hfile, noff[id].Low, + rc = SetFilePointer(Hfile, noff[id].Low, (PLONG)&noff[id].High, FILE_BEGIN); if (rc == INVALID_SET_FILE_POINTER) { sprintf(g->Message, MSG(FUNC_ERRNO), GetLastError(), "SetFilePointer"); return true; } // endif - + } // endif Mode #else // UNIX int oflag = O_LARGEFILE; // Enable file size > 2G - mode_t pmod = 0; + mode_t pmod = S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP | S_IROTH | S_IWOTH; /*********************************************************************/ /* Create the file object according to access mode */ @@ -2447,7 +2581,7 @@ bool XHUGE::Open(PGLOBAL g, char *filename, int id, MODE mode) break; case MODE_WRITE: oflag |= O_WRONLY | O_CREAT | O_TRUNC; - pmod = S_IREAD | S_IWRITE; +// pmod = S_IREAD | S_IWRITE; break; case MODE_INSERT: oflag |= (O_WRONLY | O_APPEND); @@ -2479,7 +2613,10 @@ bool XHUGE::Open(PGLOBAL g, char *filename, int id, MODE mode) sprintf(g->Message, MSG(FUNC_ERRNO), errno, "Seek"); return true; } // endif - + + if (trace) + htrc("INSERT: NewOff=%lld\n", NewOff.Val); + } else if (mode == MODE_WRITE) { if (id >= 0) { // New not sep index file. Write the header. @@ -2487,19 +2624,27 @@ bool XHUGE::Open(PGLOBAL g, char *filename, int id, MODE mode) NewOff.Low = write(Hfile, &noff, sizeof(noff)); } // endif id + if (trace) + htrc("WRITE: NewOff=%lld\n", NewOff.Val); + } else if (mode == MODE_READ && id >= 0) { // Get offset from the header if (read(Hfile, noff, sizeof(noff)) != sizeof(noff)) { sprintf(g->Message, MSG(READ_ERROR), "Index file", strerror(errno)); return true; - } // endif MAX_INDX + } // endif read + + if (trace) + htrc("noff[%d]=%lld\n", id, noff[id].Val); // Position the cursor at the offset of this index - if (!lseek64(Hfile, noff[id].Val, SEEK_SET)) { - sprintf(g->Message, MSG(FUNC_ERRNO), errno, "Hseek"); + if (lseek64(Hfile, noff[id].Val, SEEK_SET) < 0) { + sprintf(g->Message, "(XHUGE)lseek64: %s (%lld)", strerror(errno), noff[id].Val); + printf("%s\n", g->Message); +// sprintf(g->Message, MSG(FUNC_ERRNO), errno, "Hseek"); return true; - } // endif - + } // endif lseek64 + } // endif mode #endif // UNIX @@ -2526,15 +2671,15 @@ bool XHUGE::Seek(PGLOBAL g, int low, int high, int origin) if (lseek64(Hfile, pos, origin) < 0) { sprintf(g->Message, MSG(ERROR_IN_LSK), errno); -#if defined(TRACE) - printf("lseek64 error %d\n", errno); -#endif // TRACE + + if (trace) + htrc("lseek64 error %d\n", errno); + return true; } // endif lseek64 -#if defined(TRACE) - printf("Seek: low=%d high=%d\n", low, high); -#endif // TRACE + if (trace) + htrc("Seek: low=%d high=%d\n", low, high); #endif // UNIX return false; @@ -2632,12 +2777,15 @@ int XHUGE::Write(PGLOBAL g, void *buf, int n, int size, bool& rc) /***********************************************************************/ void XHUGE::Close(char *fn, int id) { + if (trace) + htrc("XHUGE::Close: fn=%s id=%d NewOff=%lld\n", fn, id, NewOff.Val); + #if defined(WIN32) if (id >= 0 && fn) { CloseFileHandle(Hfile); Hfile = CreateFile(fn, GENERIC_READ | GENERIC_WRITE, 0, NULL, OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, NULL); - + if (Hfile != INVALID_HANDLE_VALUE) if (SetFilePointer(Hfile, id * sizeof(IOFF), NULL, FILE_BEGIN) != INVALID_SET_FILE_POINTER) { @@ -2649,10 +2797,18 @@ void XHUGE::Close(char *fn, int id) } // endif id #else // !WIN32 if (id >= 0 && fn) { - fcntl(Hfile, F_SETFD, O_WRONLY); - - if (lseek(Hfile, id * sizeof(IOFF), SEEK_SET)) - write(Hfile, &NewOff, sizeof(IOFF)); + if (Hfile != INVALID_HANDLE_VALUE) { + if (lseek64(Hfile, id * sizeof(IOFF), SEEK_SET) >= 0) { + ssize_t nbw = write(Hfile, &NewOff, sizeof(IOFF)); + + if (nbw != (signed)sizeof(IOFF)) + htrc("Error writing index file header: %s\n", strerror(errno)); + + } else + htrc("(XHUGE::Close)lseek64: %s (%d)\n", strerror(errno), id); + + } else + htrc("(XHUGE)error reopening %s: %s\n", fn, strerror(errno)); } // endif id #endif // !WIN32 @@ -2678,6 +2834,7 @@ void *XHUGE::FileView(PGLOBAL g, char *fn) /***********************************************************************/ XXROW::XXROW(PTDBDOS tdbp) : XXBASE(tdbp, false) { + Srtd = true; Tdbp = tdbp; Valp = NULL; } // end of XXROW constructor @@ -2788,7 +2945,7 @@ int XXROW::FastFind(int nk) /***********************************************************************/ /* KXYCOL public constructor. */ /***********************************************************************/ -KXYCOL::KXYCOL(PKXBASE kp) : To_Keys(Keys.Memp), +KXYCOL::KXYCOL(PKXBASE kp) : To_Keys(Keys.Memp), To_Bkeys(Bkeys.Memp), Kof((CPINT&)Koff.Memp) { Next = NULL; @@ -2821,7 +2978,7 @@ bool KXYCOL::Init(PGLOBAL g, PCOL colp, int n, bool sm, int kln) int len = colp->GetLength(), prec = colp->GetScale(); // Currently no indexing on NULL columns - if (colp->IsNullable()) { + if (colp->IsNullable() && kln) { sprintf(g->Message, "Cannot index nullable column %s", colp->GetName()); return true; } // endif nullable @@ -2860,8 +3017,7 @@ bool KXYCOL::Init(PGLOBAL g, PCOL colp, int n, bool sm, int kln) // Store this information to avoid sorting when already done if (Asc) -// IsSorted = colp->GetOpt() == 2; - IsSorted = false; + IsSorted = colp->GetOpt() == 2; //SetNulls(colp->IsNullable()); for when null columns will be indexable Colp = colp; @@ -2885,9 +3041,9 @@ BYTE* KXYCOL::MapInit(PGLOBAL g, PCOL colp, int *n, BYTE *m) Type = colp->GetResultType(); - if (trace) - htrc("MapInit(%p): colp=%p type=%d n=%d len=%d m=%p\n", - this, colp, Type, n[0], len, m); + if (trace) + htrc("MapInit(%p): colp=%p type=%d n=%d len=%d m=%p\n", + this, colp, Type, n[0], len, m); // Allocate the Value object used when moving items Valp = AllocateValue(g, Type, len, prec, colp->IsUnsigned()); diff --git a/storage/connect/xindex.h b/storage/connect/xindex.h index 62430ffa0ad..c702baeec83 100644 --- a/storage/connect/xindex.h +++ b/storage/connect/xindex.h @@ -87,7 +87,9 @@ class DllExport INDEXDEF : public BLOCK { /* Index description block */ void SetNext(PIXDEF pxdf) {Next = pxdf;} PSZ GetName(void) {return (PSZ)Name;} bool IsUnique(void) {return Unique;} + bool IsDynamic(void) {return Dynamic;} bool IsAuto(void) {return AutoInc;} + bool IsValid(void) {return !Invalid;} void SetAuto(bool b) {AutoInc = b;} void SetInvalid(bool b) {Invalid = b;} int GetNparts(void) {return Nparts;} @@ -115,6 +117,8 @@ class DllExport INDEXDEF : public BLOCK { /* Index description block */ bool Unique; /* true if defined as unique */ bool Invalid; /* true if marked as Invalid */ bool AutoInc; /* true if unique key in auto increment */ + bool Dynamic; /* KINDEX style */ + bool Mapped; /* Use file mapping */ int Nparts; /* Number of key parts */ int ID; /* Index ID number */ int MaxSame; /* Max number of same values */ @@ -174,6 +178,8 @@ class DllExport XXBASE : public CSORT, public BLOCK { virtual void Reset(void) = 0; virtual bool IsMul(void) {return false;} virtual bool IsRandom(void) {return true;} + virtual bool IsDynamic(void) {return Dynamic;} + virtual void SetDynamic(bool dyn) {Dynamic = dyn;} virtual bool HaveSame(void) {return false;} virtual int GetCurPos(void) {return Cur_K;} virtual void SetNval(int n) {assert(n == 1);} @@ -186,12 +192,14 @@ class DllExport XXBASE : public CSORT, public BLOCK { void SetNth(int n) {Nth = n;} int *GetPof(void) {return Pof;} int *GetPex(void) {return Pex;} + bool IsSorted(void) {return Srtd;} void FreeIndex(void) {PlgDBfree(Index);} // Methods virtual void Print(PGLOBAL g, FILE *f, uint n); virtual void Print(PGLOBAL g, char *ps, uint z); virtual bool Init(PGLOBAL g) = 0; + virtual bool Make(PGLOBAL g, PIXDEF sxp) = 0; #if defined(XMAP) virtual bool MapInit(PGLOBAL g) = 0; #endif // XMAP @@ -223,6 +231,7 @@ class DllExport XXBASE : public CSORT, public BLOCK { OPVAL Op; // Search operator bool Mul; // true if multiple bool Srtd; // true for sorted column + bool Dynamic; // true when dynamically made int Val_K; // Index of current value int Nblk; // Number of blocks int Sblk; // Block size @@ -268,9 +277,10 @@ class DllExport XINDEX : public XXBASE { virtual bool Make(PGLOBAL g, PIXDEF sxp); virtual bool SaveIndex(PGLOBAL g, PIXDEF sxp); virtual bool Reorder(PGLOBAL g); - bool GetAllSizes(PGLOBAL g, int &ndif, int &numk); + bool GetAllSizes(PGLOBAL g,/* int &ndif,*/ int &numk); protected: + bool AddColumns(PIXDEF xdp); bool NextValDif(void); // Members @@ -417,6 +427,7 @@ class DllExport XXROW : public XXBASE { virtual int MaxRange(void) {return 1;} virtual int Range(PGLOBAL g, int limit = 0, bool incl = true); virtual int Qcompare(int *, int *) {assert(false); return 0;} + virtual bool Make(PGLOBAL g, PIXDEF sxp) {return false;} virtual void Close(void) {} protected: diff --git a/storage/connect/xobject.cpp b/storage/connect/xobject.cpp index 05b408da2d2..cdc2ef9bf62 100644 --- a/storage/connect/xobject.cpp +++ b/storage/connect/xobject.cpp @@ -1,175 +1,186 @@ -/************ Xobject C++ Functions Source Code File (.CPP) ************/ -/* Name: XOBJECT.CPP Version 2.2 */ -/* */ -/* (C) Copyright to the author Olivier BERTRAND 1998-2012 */ -/* */ -/* This file contains base XOBJECT class functions. */ -/* Also here is the implementation of the CONSTANT class. */ -/***********************************************************************/ - -/***********************************************************************/ -/* Include mariaDB header file. */ -/***********************************************************************/ -#include "my_global.h" - -/***********************************************************************/ -/* Include required application header files */ -/* global.h is header containing all global Plug declarations. */ -/* plgdbsem.h is header containing the DB applic. declarations. */ -/***********************************************************************/ -#include "global.h" -#include "plgdbsem.h" -#include "xobject.h" - -/***********************************************************************/ -/* Macro definitions. */ -/***********************************************************************/ -#if defined(_DEBUG) || defined(DEBTRACE) -#define ASSERT(B) assert(B); -#else -#define ASSERT(B) -#endif - -/***********************************************************************/ -/* The one and only needed void object. */ -/***********************************************************************/ -XVOID Xvoid; -PXOB const pXVOID = &Xvoid; // Pointer used by other classes - -/* ------------------------- Class XOBJECT --------------------------- */ - -/***********************************************************************/ -/* GetCharValue: returns the Result value as a char string. */ -/* Using GetCharValue provides no conversion from numeric types. */ -/***********************************************************************/ -PSZ XOBJECT::GetCharValue(void) - { - ASSERT(Value) - return Value->GetCharValue(); - } // end of GetCharValue() - -/***********************************************************************/ -/* GetShortValue: returns the Result value as a short integer. */ -/***********************************************************************/ -short XOBJECT::GetShortValue(void) - { - ASSERT(Value) - return Value->GetShortValue(); - } // end of GetShortValue - -/***********************************************************************/ -/* GetIntValue: returns the Result value as a int integer. */ -/***********************************************************************/ -int XOBJECT::GetIntValue(void) - { - ASSERT(Value) - return Value->GetIntValue(); - } // end of GetIntValue - -/***********************************************************************/ -/* GetFloatValue: returns the Result value as a double float. */ -/***********************************************************************/ -double XOBJECT::GetFloatValue(void) - { - ASSERT(Value) - return Value->GetFloatValue(); - } // end of GetFloatValue - -/* ------------------------- Class CONSTANT -------------------------- */ - -/***********************************************************************/ -/* CONSTANT public constructor. */ -/***********************************************************************/ -CONSTANT::CONSTANT(PGLOBAL g, void *value, short type) - { - if (!(Value = AllocateValue(g, value, (int)type))) - longjmp(g->jumper[g->jump_level], TYPE_CONST); - - Constant = true; - } // end of CONSTANT constructor - -/***********************************************************************/ -/* CONSTANT public constructor. */ -/***********************************************************************/ -CONSTANT::CONSTANT(PGLOBAL g, int n) - { - if (!(Value = AllocateValue(g, &n, TYPE_INT))) - longjmp(g->jumper[g->jump_level], TYPE_CONST); - - Constant = true; - } // end of CONSTANT constructor - -/***********************************************************************/ -/* GetLengthEx: returns an evaluation of the constant string length. */ -/* Note: When converting from token to string, length has to be */ -/* specified but we need the domain length, not the value length. */ -/***********************************************************************/ -int CONSTANT::GetLengthEx(void) - { - return Value->GetValLen(); - } // end of GetLengthEx - -/***********************************************************************/ -/* Compare: returns true if this object is equivalent to xp. */ -/***********************************************************************/ -bool CONSTANT::Compare(PXOB xp) - { - if (this == xp) - return true; - else if (xp->GetType() != TYPE_CONST) - return false; - else - return Value->IsEqual(xp->GetValue(), true); - - } // end of Compare - -#if 0 -/***********************************************************************/ -/* Rephrase: temporary implementation used by PlugRephraseSQL. */ -/***********************************************************************/ -bool CONSTANT::Rephrase(PGLOBAL g, PSZ work) - { - switch (Value->GetType()) { - case TYPE_STRING: - sprintf(work + strlen(work), "'%s'", Value->GetCharValue()); - break; - case TYPE_SHORT: - sprintf(work + strlen(work), "%hd", Value->GetShortValue()); - break; - case TYPE_INT: - case TYPE_DATE: - sprintf(work + strlen(work), "%d", Value->GetIntValue()); - break; - case TYPE_DOUBLE: - sprintf(work + strlen(work), "%lf", Value->GetFloatValue()); - break; - case TYPE_BIGINT: - sprintf(work + strlen(work), "%lld", Value->GetBigintValue()); - break; - case TYPE_TINY: - sprintf(work + strlen(work), "%d", Value->GetTinyValue()); - break; - default: - sprintf(g->Message, MSG(BAD_CONST_TYPE), Value->GetType()); - return false; - } // endswitch - - return false; - } // end of Rephrase -#endif // 0 - -/***********************************************************************/ -/* Make file output of a constant object. */ -/***********************************************************************/ -void CONSTANT::Print(PGLOBAL g, FILE *f, uint n) - { - Value->Print(g, f, n); - } /* end of Print */ - -/***********************************************************************/ -/* Make string output of a constant object. */ -/***********************************************************************/ -void CONSTANT::Print(PGLOBAL g, char *ps, uint z) - { - Value->Print(g, ps, z); - } /* end of Print */ +/************ Xobject C++ Functions Source Code File (.CPP) ************/
+/* Name: XOBJECT.CPP Version 2.3 */
+/* */
+/* (C) Copyright to the author Olivier BERTRAND 1998-2014 */
+/* */
+/* This file contains base XOBJECT class functions. */
+/* Also here is the implementation of the CONSTANT class. */
+/***********************************************************************/
+
+/***********************************************************************/
+/* Include mariaDB header file. */
+/***********************************************************************/
+#include "my_global.h"
+
+/***********************************************************************/
+/* Include required application header files */
+/* global.h is header containing all global Plug declarations. */
+/* plgdbsem.h is header containing the DB applic. declarations. */
+/***********************************************************************/
+#include "global.h"
+#include "plgdbsem.h"
+#include "xobject.h"
+
+/***********************************************************************/
+/* Macro definitions. */
+/***********************************************************************/
+#if defined(_DEBUG) || defined(DEBTRACE)
+#define ASSERT(B) assert(B);
+#else
+#define ASSERT(B)
+#endif
+
+/***********************************************************************/
+/* The one and only needed void object. */
+/***********************************************************************/
+XVOID Xvoid;
+PXOB const pXVOID = &Xvoid; // Pointer used by other classes
+
+/* ------------------------- Class XOBJECT --------------------------- */
+
+/***********************************************************************/
+/* GetCharValue: returns the Result value as a char string. */
+/* Using GetCharValue provides no conversion from numeric types. */
+/***********************************************************************/
+PSZ XOBJECT::GetCharValue(void)
+ {
+ ASSERT(Value)
+ return Value->GetCharValue();
+ } // end of GetCharValue()
+
+/***********************************************************************/
+/* GetShortValue: returns the Result value as a short integer. */
+/***********************************************************************/
+short XOBJECT::GetShortValue(void)
+ {
+ ASSERT(Value)
+ return Value->GetShortValue();
+ } // end of GetShortValue
+
+/***********************************************************************/
+/* GetIntValue: returns the Result value as a int integer. */
+/***********************************************************************/
+int XOBJECT::GetIntValue(void)
+ {
+ ASSERT(Value)
+ return Value->GetIntValue();
+ } // end of GetIntValue
+
+/***********************************************************************/
+/* GetFloatValue: returns the Result value as a double float. */
+/***********************************************************************/
+double XOBJECT::GetFloatValue(void)
+ {
+ ASSERT(Value)
+ return Value->GetFloatValue();
+ } // end of GetFloatValue
+
+/* ------------------------- Class CONSTANT -------------------------- */
+
+/***********************************************************************/
+/* CONSTANT public constructor. */
+/***********************************************************************/
+CONSTANT::CONSTANT(PGLOBAL g, void *value, short type)
+ {
+ if (!(Value = AllocateValue(g, value, (int)type)))
+ longjmp(g->jumper[g->jump_level], TYPE_CONST);
+
+ Constant = true;
+ } // end of CONSTANT constructor
+
+/***********************************************************************/
+/* CONSTANT public constructor. */
+/***********************************************************************/
+CONSTANT::CONSTANT(PGLOBAL g, int n)
+ {
+ if (!(Value = AllocateValue(g, &n, TYPE_INT)))
+ longjmp(g->jumper[g->jump_level], TYPE_CONST);
+
+ Constant = true;
+ } // end of CONSTANT constructor
+
+/***********************************************************************/
+/* GetLengthEx: returns an evaluation of the constant string length. */
+/* Note: When converting from token to string, length has to be */
+/* specified but we need the domain length, not the value length. */
+/***********************************************************************/
+int CONSTANT::GetLengthEx(void)
+ {
+ return Value->GetValLen();
+ } // end of GetLengthEx
+
+/***********************************************************************/
+/* Convert a constant to the given type. */
+/***********************************************************************/
+void CONSTANT::Convert(PGLOBAL g, int newtype)
+ {
+ if (Value->GetType() != newtype)
+ if (!(Value = AllocateValue(g, Value, newtype)))
+ longjmp(g->jumper[g->jump_level], TYPE_CONST);
+
+ } // end of Convert
+
+/***********************************************************************/
+/* Compare: returns true if this object is equivalent to xp. */
+/***********************************************************************/
+bool CONSTANT::Compare(PXOB xp)
+ {
+ if (this == xp)
+ return true;
+ else if (xp->GetType() != TYPE_CONST)
+ return false;
+ else
+ return Value->IsEqual(xp->GetValue(), true);
+
+ } // end of Compare
+
+#if 0
+/***********************************************************************/
+/* Rephrase: temporary implementation used by PlugRephraseSQL. */
+/***********************************************************************/
+bool CONSTANT::Rephrase(PGLOBAL g, PSZ work)
+ {
+ switch (Value->GetType()) {
+ case TYPE_STRING:
+ sprintf(work + strlen(work), "'%s'", Value->GetCharValue());
+ break;
+ case TYPE_SHORT:
+ sprintf(work + strlen(work), "%hd", Value->GetShortValue());
+ break;
+ case TYPE_INT:
+ case TYPE_DATE:
+ sprintf(work + strlen(work), "%d", Value->GetIntValue());
+ break;
+ case TYPE_DOUBLE:
+ sprintf(work + strlen(work), "%lf", Value->GetFloatValue());
+ break;
+ case TYPE_BIGINT:
+ sprintf(work + strlen(work), "%lld", Value->GetBigintValue());
+ break;
+ case TYPE_TINY:
+ sprintf(work + strlen(work), "%d", Value->GetTinyValue());
+ break;
+ default:
+ sprintf(g->Message, MSG(BAD_CONST_TYPE), Value->GetType());
+ return false;
+ } // endswitch
+
+ return false;
+ } // end of Rephrase
+#endif // 0
+
+/***********************************************************************/
+/* Make file output of a constant object. */
+/***********************************************************************/
+void CONSTANT::Print(PGLOBAL g, FILE *f, uint n)
+ {
+ Value->Print(g, f, n);
+ } /* end of Print */
+
+/***********************************************************************/
+/* Make string output of a constant object. */
+/***********************************************************************/
+void CONSTANT::Print(PGLOBAL g, char *ps, uint z)
+ {
+ Value->Print(g, ps, z);
+ } /* end of Print */
diff --git a/storage/connect/xobject.h b/storage/connect/xobject.h index 15ba6d99f33..1621b4e82ff 100644 --- a/storage/connect/xobject.h +++ b/storage/connect/xobject.h @@ -1,118 +1,119 @@ -/*************** Xobject H Declares Source Code File (.H) **************/ -/* Name: XOBJECT.H Version 2.3 */ -/* */ -/* (C) Copyright to the author Olivier BERTRAND 1998-2012 */ -/* */ -/* This file contains the XOBJECT and derived classes declares. */ -/***********************************************************************/ - -#ifndef __XOBJECT__H -#define __XOBJECT__H - -/***********************************************************************/ -/* Include required application header files */ -/* block.h is header containing Block global declarations. */ -/***********************************************************************/ -#include "block.h" -#include "valblk.h" // includes value.h - -/***********************************************************************/ -/* Types used in some class definitions. */ -/***********************************************************************/ -//typedef struct _tabdesc *PTABD; // For friend setting - -/***********************************************************************/ -/* The pointer to the one and only needed void object. */ -/***********************************************************************/ -extern PXOB const pXVOID; - -/***********************************************************************/ -/* Class XOBJECT is the base class for all classes that can be used */ -/* in evaluation operations: FILTER, EXPRESSION, SCALF, FNC, COLBLK, */ -/* SELECT, FILTER as well as all the constant object types. */ -/***********************************************************************/ -class DllExport XOBJECT : public BLOCK { - public: - XOBJECT(void) {Value = NULL; Constant = false;} - - // Implementation - PVAL GetValue(void) {return Value;} - bool IsConstant(void) {return Constant;} - virtual int GetType(void) {return TYPE_XOBJECT;} - virtual int GetResultType(void) {return TYPE_VOID;} - virtual int GetKey(void) {return 0;} -#if defined(_DEBUG) - virtual void SetKey(int k) {assert(false);} -#else // !_DEBUG - virtual void SetKey(int k) {} // Only defined for COLBLK -#endif // !_DEBUG - virtual int GetLength(void) = 0; - virtual int GetLengthEx(void) = 0; - virtual PSZ GetCharValue(void); - virtual short GetShortValue(void); - virtual int GetIntValue(void); - virtual double GetFloatValue(void); - virtual int GetScale(void) = 0; - - // Methods - virtual void Reset(void) {} - virtual bool Compare(PXOB) = 0; - virtual bool Init(PGLOBAL) {return false;} - virtual bool Eval(PGLOBAL) {return false;} - virtual bool SetFormat(PGLOBAL, FORMAT&) = 0; - - protected: - PVAL Value; // The current value of the object. - bool Constant; // true for an object having a constant value. - }; // end of class XOBJECT - -/***********************************************************************/ -/* Class XVOID: represent a void (null) object. */ -/* Used to represent a void parameter for count(*) or for a filter. */ -/***********************************************************************/ -class DllExport XVOID : public XOBJECT { - public: - XVOID(void) {Constant = true;} - - // Implementation - virtual int GetType(void) {return TYPE_VOID;} - virtual int GetLength(void) {return 0;} - virtual int GetLengthEx(void) {return 0;} - virtual PSZ GetCharValue(void) {return NULL;} - virtual int GetIntValue(void) {return 0;} - virtual double GetFloatValue(void) {return 0.0;} - virtual int GetScale() {return 0;} - - // Methods - virtual bool Compare(PXOB xp) {return xp->GetType() == TYPE_VOID;} - virtual bool SetFormat(PGLOBAL, FORMAT&) {return true;} - }; // end of class XVOID - - -/***********************************************************************/ -/* Class CONSTANT: represents a constant XOBJECT of any value type. */ -/* Note that the CONSTANT class is a friend of the VALUE class; */ -/***********************************************************************/ -class DllExport CONSTANT : public XOBJECT { - public: - CONSTANT(PGLOBAL g, void *value, short type); - CONSTANT(PGLOBAL g, int n); - CONSTANT(PVAL valp) {Value = valp; Constant = true;} - - // Implementation - virtual int GetType(void) {return TYPE_CONST;} - virtual int GetResultType(void) {return Value->Type;} - virtual int GetLength(void) {return Value->GetValLen();} - virtual int GetScale() {return Value->GetValPrec();} - virtual int GetLengthEx(void); - - // Methods - virtual bool Compare(PXOB xp); - virtual bool SetFormat(PGLOBAL g, FORMAT& fmt) - {return Value->SetConstFormat(g, fmt);} - void SetValue(PVAL vp) {Value = vp;} - virtual void Print(PGLOBAL g, FILE *, uint); - virtual void Print(PGLOBAL g, char *, uint); - }; // end of class CONSTANT - -#endif +/*************** Xobject H Declares Source Code File (.H) **************/
+/* Name: XOBJECT.H Version 2.4 */
+/* */
+/* (C) Copyright to the author Olivier BERTRAND 1998-2014 */
+/* */
+/* This file contains the XOBJECT and derived classes declares. */
+/***********************************************************************/
+
+#ifndef __XOBJECT__H
+#define __XOBJECT__H
+
+/***********************************************************************/
+/* Include required application header files */
+/* block.h is header containing Block global declarations. */
+/***********************************************************************/
+#include "block.h"
+#include "valblk.h" // includes value.h
+
+/***********************************************************************/
+/* Types used in some class definitions. */
+/***********************************************************************/
+//typedef struct _tabdesc *PTABD; // For friend setting
+
+/***********************************************************************/
+/* The pointer to the one and only needed void object. */
+/***********************************************************************/
+extern PXOB const pXVOID;
+
+/***********************************************************************/
+/* Class XOBJECT is the base class for all classes that can be used */
+/* in evaluation operations: FILTER, EXPRESSION, SCALF, FNC, COLBLK, */
+/* SELECT, FILTER as well as all the constant object types. */
+/***********************************************************************/
+class DllExport XOBJECT : public BLOCK {
+ public:
+ XOBJECT(void) {Value = NULL; Constant = false;}
+
+ // Implementation
+ PVAL GetValue(void) {return Value;}
+ bool IsConstant(void) {return Constant;}
+ virtual int GetType(void) {return TYPE_XOBJECT;}
+ virtual int GetResultType(void) {return TYPE_VOID;}
+ virtual int GetKey(void) {return 0;}
+#if defined(_DEBUG)
+ virtual void SetKey(int k) {assert(false);}
+#else // !_DEBUG
+ virtual void SetKey(int k) {} // Only defined for COLBLK
+#endif // !_DEBUG
+ virtual int GetLength(void) = 0;
+ virtual int GetLengthEx(void) = 0;
+ virtual PSZ GetCharValue(void);
+ virtual short GetShortValue(void);
+ virtual int GetIntValue(void);
+ virtual double GetFloatValue(void);
+ virtual int GetScale(void) = 0;
+
+ // Methods
+ virtual void Reset(void) {}
+ virtual bool Compare(PXOB) = 0;
+ virtual bool Init(PGLOBAL) {return false;}
+ virtual bool Eval(PGLOBAL) {return false;}
+ virtual bool SetFormat(PGLOBAL, FORMAT&) = 0;
+
+ protected:
+ PVAL Value; // The current value of the object.
+ bool Constant; // true for an object having a constant value.
+ }; // end of class XOBJECT
+
+/***********************************************************************/
+/* Class XVOID: represent a void (null) object. */
+/* Used to represent a void parameter for count(*) or for a filter. */
+/***********************************************************************/
+class DllExport XVOID : public XOBJECT {
+ public:
+ XVOID(void) {Constant = true;}
+
+ // Implementation
+ virtual int GetType(void) {return TYPE_VOID;}
+ virtual int GetLength(void) {return 0;}
+ virtual int GetLengthEx(void) {return 0;}
+ virtual PSZ GetCharValue(void) {return NULL;}
+ virtual int GetIntValue(void) {return 0;}
+ virtual double GetFloatValue(void) {return 0.0;}
+ virtual int GetScale() {return 0;}
+
+ // Methods
+ virtual bool Compare(PXOB xp) {return xp->GetType() == TYPE_VOID;}
+ virtual bool SetFormat(PGLOBAL, FORMAT&) {return true;}
+ }; // end of class XVOID
+
+
+/***********************************************************************/
+/* Class CONSTANT: represents a constant XOBJECT of any value type. */
+/* Note that the CONSTANT class is a friend of the VALUE class; */
+/***********************************************************************/
+class DllExport CONSTANT : public XOBJECT {
+ public:
+ CONSTANT(PGLOBAL g, void *value, short type);
+ CONSTANT(PGLOBAL g, int n);
+ CONSTANT(PVAL valp) {Value = valp; Constant = true;}
+
+ // Implementation
+ virtual int GetType(void) {return TYPE_CONST;}
+ virtual int GetResultType(void) {return Value->Type;}
+ virtual int GetLength(void) {return Value->GetValLen();}
+ virtual int GetScale() {return Value->GetValPrec();}
+ virtual int GetLengthEx(void);
+
+ // Methods
+ virtual bool Compare(PXOB xp);
+ virtual bool SetFormat(PGLOBAL g, FORMAT& fmt)
+ {return Value->SetConstFormat(g, fmt);}
+ void Convert(PGLOBAL g, int newtype);
+ void SetValue(PVAL vp) {Value = vp;}
+ virtual void Print(PGLOBAL g, FILE *, uint);
+ virtual void Print(PGLOBAL g, char *, uint);
+ }; // end of class CONSTANT
+
+#endif
diff --git a/storage/connect/xtable.h b/storage/connect/xtable.h index 1f937bba6c1..628ab96135d 100644 --- a/storage/connect/xtable.h +++ b/storage/connect/xtable.h @@ -62,6 +62,8 @@ class DllExport TDB: public BLOCK { // Table Descriptor Block. inline PCOL GetColumns(void) {return Columns;} inline int GetDegree(void) {return Degree;} inline MODE GetMode(void) {return Mode;} + inline PFIL GetFilter(void) {return To_Filter;} + inline void SetFilter(PFIL fp) {To_Filter = fp;} inline void SetOrig(PTDB txp) {To_Orig = txp;} inline void SetUse(TUSE n) {Use = n;} inline void SetCondFil(PCFIL cfp) {To_CondFil = cfp;} @@ -72,16 +74,17 @@ class DllExport TDB: public BLOCK { // Table Descriptor Block. inline void SetDegree(int degree) {Degree = degree;} inline void SetMode(MODE mode) {Mode = mode;} - //Properties + // Properties virtual AMT GetAmType(void) {return TYPE_AM_ERROR;} virtual int GetTdb_No(void) {return Tdb_No;} virtual PTDB GetNext(void) {return Next;} virtual PCATLG GetCat(void) {return NULL;} + virtual void SetAbort(bool b) {;} // Methods virtual bool IsSame(PTDB tp) {return tp == this;} virtual bool GetBlockValues(PGLOBAL g) {return false;} - virtual int Cardinality(PGLOBAL g) {return (g) ? -1 : 0;} + virtual int Cardinality(PGLOBAL g) {return 0;} virtual int GetMaxSize(PGLOBAL) = 0; virtual int GetProgMax(PGLOBAL) = 0; virtual int GetProgCur(void) = 0; @@ -91,7 +94,7 @@ class DllExport TDB: public BLOCK { // Table Descriptor Block. virtual PTDB Duplicate(PGLOBAL g) {return NULL;} virtual PTDB CopyOne(PTABS t) {return this;} virtual PTDB Copy(PTABS t); - virtual void PrintAM(FILE *f, char *m) + virtual void PrintAM(FILE *f, char *m) {fprintf(f, "%s AM(%d)\n", m, GetAmType());} virtual void Print(PGLOBAL g, FILE *f, uint n); virtual void Print(PGLOBAL g, char *ps, uint z); @@ -113,6 +116,7 @@ class DllExport TDB: public BLOCK { // Table Descriptor Block. // Members PTDB To_Orig; // Pointer to original if it is a copy TUSE Use; + PFIL To_Filter; PCFIL To_CondFil; // To condition filter structure static int Tnum; // Used to generate Tdb_no's const int Tdb_No; // GetTdb_No() is always 0 for OPJOIN @@ -122,6 +126,7 @@ class DllExport TDB: public BLOCK { // Table Descriptor Block. PCOL Columns; // Points to the first column of the table MODE Mode; // 10 Read, 30 Update, 40 Insert, 50 Delete int Degree; // Number of columns + int Cardinal; // Table number of rows }; // end of class TDB /***********************************************************************/ @@ -142,6 +147,8 @@ class DllExport TDBASE : public TDB { inline PKXBASE GetKindex(void) {return To_Kindex;} inline PCOL GetSetCols(void) {return To_SetCols;} inline void SetSetCols(PCOL colp) {To_SetCols = colp;} + inline void SetKey_Col(PCOL *cpp) {To_Key_Col = cpp;} + inline void SetXdp(PIXDEF xdp) {To_Xdp = xdp;} inline void SetKindex(PKXBASE kxp) {To_Kindex = kxp;} // Properties @@ -150,11 +157,12 @@ class DllExport TDBASE : public TDB { // Methods virtual bool IsUsingTemp(PGLOBAL g) {return false;} + virtual bool IsIndexed(void) {return false;} virtual PCATLG GetCat(void); virtual PSZ GetPath(void); virtual void PrintAM(FILE *f, char *m); virtual RECFM GetFtype(void) {return RECFM_NAF;} - virtual int GetAffectedRows(void) {return -1;} +//virtual int GetAffectedRows(void) {return -1;} virtual int GetRecpos(void) = 0; virtual bool SetRecpos(PGLOBAL g, int recpos); virtual bool IsReadOnly(void) {return Read_Only;} @@ -168,7 +176,7 @@ class DllExport TDBASE : public TDB { virtual void ResetDB(void) {} virtual void ResetSize(void) {MaxSize = -1;} virtual void RestoreNrec(void) {} - virtual int ResetTableOpt(PGLOBAL g, bool dox); + virtual int ResetTableOpt(PGLOBAL g, bool dop, bool dox); virtual PSZ GetServer(void) {return "Current";} // Database routines @@ -181,17 +189,21 @@ class DllExport TDBASE : public TDB { virtual int MakeIndex(PGLOBAL g, PIXDEF pxdf, bool add) {strcpy(g->Message, "Remote index"); return RC_INFO;} virtual bool ReadKey(PGLOBAL g, OPVAL op, const void *key, int len) - {assert(false); return true;} - + {assert(false); return true;} protected: + virtual bool PrepareWriting(PGLOBAL g) {strcpy(g->Message, + "This function should not be called for this table"); return true;} + // Members PTABDEF To_Def; // Points to catalog description block PXOB *To_Link; // Points to column of previous relations PCOL *To_Key_Col; // Points to key columns in current file PKXBASE To_Kindex; // Points to table key index + PIXDEF To_Xdp; // To the index definition block PCOL To_SetCols; // Points to updated columns - int MaxSize; // Max size in number of lines + RECFM Ftype; // File type: 0-var 1-fixed 2-binary (VCT) + int MaxSize; // Max size in number of lines int Knum; // Size of key arrays bool Read_Only; // True for read only tables const CHARSET_INFO *m_data_charset; @@ -213,7 +225,7 @@ class DllExport TDBCAT : public TDBASE { virtual int GetRecpos(void) {return N;} virtual int GetProgCur(void) {return N;} virtual int RowNumber(PGLOBAL g, bool b = false) {return N + 1;} - virtual bool SetRecpos(PGLOBAL g, int recpos); + virtual bool SetRecpos(PGLOBAL g, int recpos); // Database routines virtual PCOL MakeCol(PGLOBAL g, PCOLDEF cdp, PCOL cprec, int n); @@ -231,9 +243,9 @@ class DllExport TDBCAT : public TDBASE { bool InitCol(PGLOBAL g); // Members - PQRYRES Qrp; + PQRYRES Qrp; int N; // Row number - bool Init; + bool Init; }; // end of class TDBCAT /***********************************************************************/ diff --git a/storage/heap/hp_block.c b/storage/heap/hp_block.c index 1c40f982422..aa5343a0717 100644 --- a/storage/heap/hp_block.c +++ b/storage/heap/hp_block.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved. +/* Copyright (c) 2000, 2014, Oracle and/or its affiliates. All rights reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -76,8 +76,8 @@ int hp_get_new_block(HP_SHARE *info, HP_BLOCK *block, size_t *alloc_length) When level 1 is full, we allocate data for HPTRS_IN_NODE at level 2 and 1 + X rows at level 0. */ - *alloc_length= (sizeof(HP_PTRS)* ((i == block->levels) ? i : i - 1) + - block->records_in_block* block->recbuffer); + *alloc_length= (sizeof(HP_PTRS) * ((i == block->levels) ? i : i - 1) + + (ulonglong)block->records_in_block * block->recbuffer); if (!(root=(HP_PTRS*) my_malloc(*alloc_length, MYF(MY_WME | (info->internal ? diff --git a/storage/heap/hp_create.c b/storage/heap/hp_create.c index 30831f229ac..a68a35e63fb 100644 --- a/storage/heap/hp_create.c +++ b/storage/heap/hp_create.c @@ -1,4 +1,5 @@ -/* Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved. +/* Copyright (c) 2000, 2011, Oracle and/or its affiliates. + Copyright (c) 2010, 2014, SkySQL Ab. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by diff --git a/storage/heap/hp_write.c b/storage/heap/hp_write.c index 6b96e1c31a3..c6205508a90 100644 --- a/storage/heap/hp_write.c +++ b/storage/heap/hp_write.c @@ -385,7 +385,8 @@ int hp_write_key(HP_INFO *info, HP_KEYDEF *keyinfo, pos=empty; do { - if (! hp_rec_key_cmp(keyinfo, record, pos->ptr_to_rec, 1)) + if (pos->hash_of_key == hash_of_key && + ! hp_rec_key_cmp(keyinfo, record, pos->ptr_to_rec, 1)) { DBUG_RETURN(my_errno=HA_ERR_FOUND_DUPP_KEY); } diff --git a/storage/innobase/CMakeLists.txt b/storage/innobase/CMakeLists.txt index e783f3e6459..964294a962d 100644 --- a/storage/innobase/CMakeLists.txt +++ b/storage/innobase/CMakeLists.txt @@ -81,7 +81,6 @@ IF(NOT CMAKE_CROSSCOMPILING) long x; long y; long res; - char c; x = 10; y = 123; @@ -102,6 +101,16 @@ IF(NOT CMAKE_CROSSCOMPILING) if (res != 123 + 10 || x != 123 + 10) { return(1); } + return(0); + }" + HAVE_IB_GCC_ATOMIC_BUILTINS + ) + CHECK_C_SOURCE_RUNS( + " + int main() + { + long res; + char c; c = 10; res = __sync_lock_test_and_set(&c, 123); @@ -110,7 +119,7 @@ IF(NOT CMAKE_CROSSCOMPILING) } return(0); }" - HAVE_IB_GCC_ATOMIC_BUILTINS + HAVE_IB_GCC_ATOMIC_BUILTINS_BYTE ) CHECK_C_SOURCE_RUNS( "#include<stdint.h> @@ -132,16 +141,47 @@ IF(NOT CMAKE_CROSSCOMPILING) }" HAVE_IB_GCC_ATOMIC_BUILTINS_64 ) + CHECK_C_SOURCE_RUNS( + "#include<stdint.h> + int main() + { + __sync_synchronize(); + return(0); + }" + HAVE_IB_GCC_SYNC_SYNCHRONISE + ) + CHECK_C_SOURCE_RUNS( + "#include<stdint.h> + int main() + { + __atomic_thread_fence(__ATOMIC_ACQUIRE); + __atomic_thread_fence(__ATOMIC_RELEASE); + return(0); + }" + HAVE_IB_GCC_ATOMIC_THREAD_FENCE + ) ENDIF() IF(HAVE_IB_GCC_ATOMIC_BUILTINS) ADD_DEFINITIONS(-DHAVE_IB_GCC_ATOMIC_BUILTINS=1) ENDIF() +IF(HAVE_IB_GCC_ATOMIC_BUILTINS_BYTE) + ADD_DEFINITIONS(-DHAVE_IB_GCC_ATOMIC_BUILTINS_BYTE=1) +ENDIF() + IF(HAVE_IB_GCC_ATOMIC_BUILTINS_64) ADD_DEFINITIONS(-DHAVE_IB_GCC_ATOMIC_BUILTINS_64=1) ENDIF() +IF(HAVE_IB_GCC_SYNC_SYNCHRONISE) + ADD_DEFINITIONS(-DHAVE_IB_GCC_SYNC_SYNCHRONISE=1) +ENDIF() + +IF(HAVE_IB_GCC_ATOMIC_THREAD_FENCE) + ADD_DEFINITIONS(-DHAVE_IB_GCC_ATOMIC_THREAD_FENCE=1) +ENDIF() + # either define HAVE_IB_ATOMIC_PTHREAD_T_GCC or not IF(NOT CMAKE_CROSSCOMPILING) CHECK_C_SOURCE_RUNS( @@ -223,10 +263,21 @@ IF(CMAKE_SYSTEM_NAME STREQUAL "SunOS") return(0); } " HAVE_IB_ATOMIC_PTHREAD_T_SOLARIS) + CHECK_C_SOURCE_COMPILES( + "#include <mbarrier.h> + int main() { + __machine_r_barrier(); + __machine_w_barrier(); + return(0); + }" + HAVE_IB_MACHINE_BARRIER_SOLARIS) ENDIF() IF(HAVE_IB_ATOMIC_PTHREAD_T_SOLARIS) ADD_DEFINITIONS(-DHAVE_IB_ATOMIC_PTHREAD_T_SOLARIS=1) ENDIF() + IF(HAVE_IB_MACHINE_BARRIER_SOLARIS) + ADD_DEFINITIONS(-DHAVE_IB_MACHINE_BARRIER_SOLARIS=1) + ENDIF() ENDIF() @@ -244,6 +295,7 @@ ENDIF() IF(MSVC) ADD_DEFINITIONS(-DHAVE_WINDOWS_ATOMICS) + ADD_DEFINITIONS(-DHAVE_WINDOWS_MM_FENCE) ENDIF() diff --git a/storage/innobase/api/api0api.cc b/storage/innobase/api/api0api.cc index bb65dd82216..a060cbc7270 100644 --- a/storage/innobase/api/api0api.cc +++ b/storage/innobase/api/api0api.cc @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 2008, 2013, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2008, 2014, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -2044,6 +2044,8 @@ ib_cursor_delete_row( const rec_t* rec; ib_bool_t page_format; mtr_t mtr; + rec_t* copy = NULL; + byte ptr[UNIV_PAGE_SIZE_MAX]; page_format = static_cast<ib_bool_t>( dict_table_is_comp(index->table)); @@ -2052,16 +2054,27 @@ ib_cursor_delete_row( if (btr_pcur_restore_position( BTR_SEARCH_LEAF, pcur, &mtr)) { + mem_heap_t* heap = NULL; + ulint offsets_[REC_OFFS_NORMAL_SIZE]; + ulint* offsets = offsets_; + + rec_offs_init(offsets_); rec = btr_pcur_get_rec(pcur); - } else { - rec = NULL; + + /* Since mtr will be commited, the rec + will not be protected. Make a copy of + the rec. */ + offsets = rec_get_offsets( + rec, index, offsets, ULINT_UNDEFINED, &heap); + ut_ad(rec_offs_size(offsets) < UNIV_PAGE_SIZE_MAX); + copy = rec_copy(ptr, rec, offsets); } mtr_commit(&mtr); - if (rec && !rec_get_deleted_flag(rec, page_format)) { - err = ib_delete_row(cursor, pcur, rec); + if (copy && !rec_get_deleted_flag(copy, page_format)) { + err = ib_delete_row(cursor, pcur, copy); } else { err = DB_RECORD_NOT_FOUND; } diff --git a/storage/innobase/btr/btr0cur.cc b/storage/innobase/btr/btr0cur.cc index e12e8af2eb8..9b24244f35e 100644 --- a/storage/innobase/btr/btr0cur.cc +++ b/storage/innobase/btr/btr0cur.cc @@ -202,15 +202,6 @@ btr_rec_free_externally_stored_fields( mtr_t* mtr); /*!< in: mini-transaction handle which contains an X-latch to record page and to the index tree */ -/***********************************************************//** -Gets the externally stored size of a record, in units of a database page. -@return externally stored part, in units of a database page */ -static -ulint -btr_rec_get_externally_stored_len( -/*==============================*/ - const rec_t* rec, /*!< in: record */ - const ulint* offsets);/*!< in: array returned by rec_get_offsets() */ #endif /* !UNIV_HOTBACKUP */ /******************************************************//** @@ -271,6 +262,7 @@ btr_cur_latch_leaves( case BTR_MODIFY_TREE: /* x-latch also brothers from left to right */ left_page_no = btr_page_get_prev(page, mtr); + mode = latch_mode; if (left_page_no != FIL_NULL) { get_block = btr_block_get( @@ -2569,6 +2561,31 @@ make_external: ut_ad(flags & BTR_KEEP_POS_FLAG); } + if (big_rec_vec) { + const ulint redo_10p = srv_log_file_size * UNIV_PAGE_SIZE / 10; + ulint total_blob_len = 0; + + /* Calculate the total number of bytes for blob data */ + for (ulint i = 0; i < big_rec_vec->n_fields; i++) { + total_blob_len += big_rec_vec->fields[i].len; + } + + if (total_blob_len > redo_10p) { + ib_logf(IB_LOG_LEVEL_ERROR, "The total blob data" + " length (" ULINTPF ") is greater than" + " 10%% of the redo log file size (" UINT64PF + "). Please increase innodb_log_file_size.", + total_blob_len, srv_log_file_size); + if (n_reserved > 0) { + fil_space_release_free_extents( + index->space, n_reserved); + } + + err = DB_TOO_BIG_RECORD; + goto err_exit; + } + } + /* Store state of explicit locks on rec on the page infimum record, before deleting rec. The page infimum acts as a dummy carrier of the locks, taking care also of lock releases, before we can move the locks @@ -4047,15 +4064,15 @@ btr_rec_get_field_ref_offs( #define btr_rec_get_field_ref(rec, offsets, n) \ ((rec) + btr_rec_get_field_ref_offs(offsets, n)) -/***********************************************************//** -Gets the externally stored size of a record, in units of a database page. +/** Gets the externally stored size of a record, in units of a database page. +@param[in] rec record +@param[in] offsets array returned by rec_get_offsets() @return externally stored part, in units of a database page */ -static + ulint btr_rec_get_externally_stored_len( -/*==============================*/ - const rec_t* rec, /*!< in: record */ - const ulint* offsets)/*!< in: array returned by rec_get_offsets() */ + const rec_t* rec, + const ulint* offsets) { ulint n_fields; ulint total_extern_len = 0; @@ -4390,6 +4407,7 @@ btr_store_big_rec_extern_fields( buf_block_t** freed_pages = NULL; ulint n_freed_pages = 0; dberr_t error = DB_SUCCESS; + ulint total_blob_len = 0; ut_ad(rec_offs_validate(rec, index, offsets)); ut_ad(rec_offs_any_extern(offsets)); @@ -4409,6 +4427,23 @@ btr_store_big_rec_extern_fields( rec_page_no = buf_block_get_page_no(rec_block); ut_a(fil_page_get_type(page_align(rec)) == FIL_PAGE_INDEX); + const ulint redo_10p = (srv_log_file_size * UNIV_PAGE_SIZE / 10); + + /* Calculate the total number of bytes for blob data */ + for (ulint i = 0; i < big_rec_vec->n_fields; i++) { + total_blob_len += big_rec_vec->fields[i].len; + } + + if (total_blob_len > redo_10p) { + ut_ad(op == BTR_STORE_INSERT); + ib_logf(IB_LOG_LEVEL_ERROR, "The total blob data length" + " (" ULINTPF ") is greater than 10%% of the" + " redo log file size (" UINT64PF "). Please" + " increase innodb_log_file_size.", + total_blob_len, srv_log_file_size); + return(DB_TOO_BIG_RECORD); + } + if (page_zip) { int err; diff --git a/storage/innobase/buf/buf0buf.cc b/storage/innobase/buf/buf0buf.cc index b6b80341d8d..327051774e1 100644 --- a/storage/innobase/buf/buf0buf.cc +++ b/storage/innobase/buf/buf0buf.cc @@ -2888,12 +2888,6 @@ got_block: ut_ad(buf_block_get_state(fix_block) == BUF_BLOCK_FILE_PAGE); -#if UNIV_WORD_SIZE == 4 - /* On 32-bit systems, there is no padding in buf_page_t. On - other systems, Valgrind could complain about uninitialized pad - bytes. */ - UNIV_MEM_ASSERT_RW(&fix_block->page, sizeof(fix_block->page)); -#endif #if defined UNIV_DEBUG || defined UNIV_IBUF_DEBUG if ((mode == BUF_GET_IF_IN_POOL || mode == BUF_GET_IF_IN_POOL_OR_WATCH) @@ -5408,7 +5402,7 @@ buf_get_free_list_len(void) #else /* !UNIV_HOTBACKUP */ /********************************************************************//** -Inits a page to the buffer buf_pool, for use in ibbackup --restore. */ +Inits a page to the buffer buf_pool, for use in mysqlbackup --restore. */ UNIV_INTERN void buf_page_init_for_backup_restore( diff --git a/storage/innobase/buf/buf0flu.cc b/storage/innobase/buf/buf0flu.cc index e5448bf6406..441fcfb4d94 100644 --- a/storage/innobase/buf/buf0flu.cc +++ b/storage/innobase/buf/buf0flu.cc @@ -2215,6 +2215,10 @@ af_get_pct_for_dirty() { ulint dirty_pct = buf_get_modified_ratio_pct(); + if (dirty_pct > 0 && srv_max_buf_pool_modified_pct == 0) { + return(100); + } + ut_a(srv_max_dirty_pages_pct_lwm <= srv_max_buf_pool_modified_pct); diff --git a/storage/innobase/buf/buf0lru.cc b/storage/innobase/buf/buf0lru.cc index ec30c063a72..8574a6101e7 100644 --- a/storage/innobase/buf/buf0lru.cc +++ b/storage/innobase/buf/buf0lru.cc @@ -1818,13 +1818,6 @@ buf_LRU_free_page( rw_lock_x_lock(hash_lock); mutex_enter(block_mutex); -#if UNIV_WORD_SIZE == 4 - /* On 32-bit systems, there is no padding in buf_page_t. On - other systems, Valgrind could complain about uninitialized pad - bytes. */ - UNIV_MEM_ASSERT_RW(bpage, sizeof *bpage); -#endif - if (!buf_page_can_relocate(bpage)) { /* Do not free buffer fixed or I/O-fixed blocks. */ @@ -1862,12 +1855,6 @@ func_exit: ut_ad(buf_page_in_file(bpage)); ut_ad(bpage->in_LRU_list); ut_ad(!bpage->in_flush_list == !bpage->oldest_modification); -#if UNIV_WORD_SIZE == 4 - /* On 32-bit systems, there is no padding in buf_page_t. On - other systems, Valgrind could complain about uninitialized pad - bytes. */ - UNIV_MEM_ASSERT_RW(bpage, sizeof *bpage); -#endif #ifdef UNIV_DEBUG if (buf_debug_prints) { @@ -1940,13 +1927,6 @@ func_exit: ut_ad(prev_b->in_LRU_list); ut_ad(buf_page_in_file(prev_b)); -#if UNIV_WORD_SIZE == 4 - /* On 32-bit systems, there is no - padding in buf_page_t. On other - systems, Valgrind could complain about - uninitialized pad bytes. */ - UNIV_MEM_ASSERT_RW(prev_b, sizeof *prev_b); -#endif UT_LIST_INSERT_AFTER(LRU, buf_pool->LRU, prev_b, b); @@ -2172,13 +2152,6 @@ buf_LRU_block_remove_hashed( ut_a(buf_page_get_io_fix(bpage) == BUF_IO_NONE); ut_a(bpage->buf_fix_count == 0); -#if UNIV_WORD_SIZE == 4 - /* On 32-bit systems, there is no padding in - buf_page_t. On other systems, Valgrind could complain - about uninitialized pad bytes. */ - UNIV_MEM_ASSERT_RW(bpage, sizeof *bpage); -#endif - buf_LRU_remove_block(bpage); buf_pool->freed_page_clock += 1; @@ -2263,6 +2236,24 @@ buf_LRU_block_remove_hashed( " in the hash table\n", (ulong) bpage->space, (ulong) bpage->offset); +#ifdef UNIV_DEBUG + fprintf(stderr, + "InnoDB: in_page_hash %lu in_zip_hash %lu\n" + " in_free_list %lu in_flush_list %lu in_LRU_list %lu\n" + " zip.data %p zip_size %lu page_state %d\n", + bpage->in_page_hash, bpage->in_zip_hash, + bpage->in_free_list, bpage->in_flush_list, + bpage->in_LRU_list, bpage->zip.data, + buf_page_get_zip_size(bpage), + buf_page_get_state(bpage)); +#else + fprintf(stderr, + "InnoDB: zip.data %p zip_size %lu page_state %d\n", + bpage->zip.data, + buf_page_get_zip_size(bpage), + buf_page_get_state(bpage)); +#endif + if (hashed_bpage) { fprintf(stderr, "InnoDB: In hash table we find block" diff --git a/storage/innobase/dict/dict0crea.cc b/storage/innobase/dict/dict0crea.cc index ff892749d4f..30523ff2af4 100644 --- a/storage/innobase/dict/dict0crea.cc +++ b/storage/innobase/dict/dict0crea.cc @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1996, 2013, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1996, 2014, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -1611,26 +1611,25 @@ dict_create_add_foreign_to_dictionary( return(error); } -/********************************************************************//** -Adds foreign key definitions to data dictionary tables in the database. -@return error code or DB_SUCCESS */ +/** Adds the given set of foreign key objects to the dictionary tables +in the database. This function does not modify the dictionary cache. The +caller must ensure that all foreign key objects contain a valid constraint +name in foreign->id. +@param[in] local_fk_set set of foreign key objects, to be added to +the dictionary tables +@param[in] table table to which the foreign key objects in +local_fk_set belong to +@param[in,out] trx transaction +@return error code or DB_SUCCESS */ UNIV_INTERN dberr_t dict_create_add_foreigns_to_dictionary( /*===================================*/ - ulint start_id,/*!< in: if we are actually doing ALTER TABLE - ADD CONSTRAINT, we want to generate constraint - numbers which are bigger than in the table so - far; we number the constraints from - start_id + 1 up; start_id should be set to 0 if - we are creating a new table, or if the table - so far has no constraints for which the name - was generated here */ - dict_table_t* table, /*!< in: table */ - trx_t* trx) /*!< in: transaction */ + const dict_foreign_set& local_fk_set, + const dict_table_t* table, + trx_t* trx) { dict_foreign_t* foreign; - ulint number = start_id + 1; dberr_t error; ut_ad(mutex_own(&(dict_sys->mutex))); @@ -1643,17 +1642,12 @@ dict_create_add_foreigns_to_dictionary( return(DB_ERROR); } - for (foreign = UT_LIST_GET_FIRST(table->foreign_list); - foreign; - foreign = UT_LIST_GET_NEXT(foreign_list, foreign)) { + for (dict_foreign_set::const_iterator it = local_fk_set.begin(); + it != local_fk_set.end(); + ++it) { - error = dict_create_add_foreign_id(&number, table->name, - foreign); - - if (error != DB_SUCCESS) { - - return(error); - } + foreign = *it; + ut_ad(foreign->id != NULL); error = dict_create_add_foreign_to_dictionary(table->name, foreign, trx); diff --git a/storage/innobase/dict/dict0dict.cc b/storage/innobase/dict/dict0dict.cc index ad5c9d42e81..b9345ef8055 100644 --- a/storage/innobase/dict/dict0dict.cc +++ b/storage/innobase/dict/dict0dict.cc @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1996, 2013, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1996, 2014, Oracle and/or its affiliates. All Rights Reserved. Copyright (c) 2012, Facebook Inc. Copyright (c) 2013, SkySQL Ab. All Rights Reserved. @@ -28,6 +28,7 @@ Created 1/8/1996 Heikki Tuuri #include "dict0dict.h" #include "fts0fts.h" #include "fil0fil.h" +#include <algorithm> #ifdef UNIV_NONINL #include "dict0dict.ic" @@ -51,6 +52,7 @@ UNIV_INTERN dict_index_t* dict_ind_compact; #include "btr0btr.h" #include "btr0cur.h" #include "btr0sea.h" +#include "os0once.h" #include "page0zip.h" #include "page0page.h" #include "pars0pars.h" @@ -103,7 +105,7 @@ UNIV_INTERN ulong zip_pad_max = 50; UNIV_INTERN mysql_pfs_key_t dict_operation_lock_key; UNIV_INTERN mysql_pfs_key_t index_tree_rw_lock_key; UNIV_INTERN mysql_pfs_key_t index_online_log_key; -UNIV_INTERN mysql_pfs_key_t dict_table_stats_latch_key; +UNIV_INTERN mysql_pfs_key_t dict_table_stats_key; #endif /* UNIV_PFS_RWLOCK */ #ifdef UNIV_PFS_MUTEX @@ -122,6 +124,11 @@ UNIV_INTERN mysql_pfs_key_t dict_foreign_err_mutex_key; /** Identifies generated InnoDB foreign key names */ static char dict_ibfk[] = "_ibfk_"; +bool innodb_table_stats_not_found = false; +bool innodb_index_stats_not_found = false; +static bool innodb_table_stats_not_found_reported = false; +static bool innodb_index_stats_not_found_reported = false; + /*******************************************************************//** Tries to find column names for the index and sets the col field of the index. @@ -320,6 +327,82 @@ dict_mutex_exit_for_mysql(void) mutex_exit(&(dict_sys->mutex)); } +/** Allocate and init a dict_table_t's stats latch. +This function must not be called concurrently on the same table object. +@param[in,out] table_void table whose stats latch to create */ +static +void +dict_table_stats_latch_alloc( + void* table_void) +{ + dict_table_t* table = static_cast<dict_table_t*>(table_void); + + table->stats_latch = new(std::nothrow) rw_lock_t; + + ut_a(table->stats_latch != NULL); + + rw_lock_create(dict_table_stats_key, table->stats_latch, + SYNC_INDEX_TREE); +} + +/** Deinit and free a dict_table_t's stats latch. +This function must not be called concurrently on the same table object. +@param[in,out] table table whose stats latch to free */ +static +void +dict_table_stats_latch_free( + dict_table_t* table) +{ + rw_lock_free(table->stats_latch); + delete table->stats_latch; +} + +/** Create a dict_table_t's stats latch or delay for lazy creation. +This function is only called from either single threaded environment +or from a thread that has not shared the table object with other threads. +@param[in,out] table table whose stats latch to create +@param[in] enabled if false then the latch is disabled +and dict_table_stats_lock()/unlock() become noop on this table. */ + +void +dict_table_stats_latch_create( + dict_table_t* table, + bool enabled) +{ + if (!enabled) { + table->stats_latch = NULL; + table->stats_latch_created = os_once::DONE; + return; + } + +#ifdef HAVE_ATOMIC_BUILTINS + /* We create this lazily the first time it is used. */ + table->stats_latch = NULL; + table->stats_latch_created = os_once::NEVER_DONE; +#else /* HAVE_ATOMIC_BUILTINS */ + + dict_table_stats_latch_alloc(table); + + table->stats_latch_created = os_once::DONE; +#endif /* HAVE_ATOMIC_BUILTINS */ +} + +/** Destroy a dict_table_t's stats latch. +This function is only called from either single threaded environment +or from a thread that has not shared the table object with other threads. +@param[in,out] table table whose stats latch to destroy */ + +void +dict_table_stats_latch_destroy( + dict_table_t* table) +{ + if (table->stats_latch_created == os_once::DONE + && table->stats_latch != NULL) { + + dict_table_stats_latch_free(table); + } +} + /**********************************************************************//** Lock the appropriate latch to protect a given table's statistics. */ UNIV_INTERN @@ -332,6 +415,14 @@ dict_table_stats_lock( ut_ad(table != NULL); ut_ad(table->magic_n == DICT_TABLE_MAGIC_N); +#ifdef HAVE_ATOMIC_BUILTINS + os_once::do_or_wait_for_done( + &table->stats_latch_created, + dict_table_stats_latch_alloc, table); +#else /* HAVE_ATOMIC_BUILTINS */ + ut_ad(table->stats_latch_created == os_once::DONE); +#endif /* HAVE_ATOMIC_BUILTINS */ + if (table->stats_latch == NULL) { /* This is a dummy table object that is private in the current thread and is not shared between multiple threads, thus we @@ -1163,8 +1254,8 @@ dict_table_can_be_evicted( #endif /* UNIV_SYNC_DEBUG */ ut_a(table->can_be_evicted); - ut_a(UT_LIST_GET_LEN(table->foreign_list) == 0); - ut_a(UT_LIST_GET_LEN(table->referenced_list) == 0); + ut_a(table->foreign_set.empty()); + ut_a(table->referenced_set.empty()); if (table->n_ref_count == 0) { dict_index_t* index; @@ -1380,6 +1471,22 @@ dict_index_find_on_id_low( return(NULL); } +/** Function object to remove a foreign key constraint from the +referenced_set of the referenced table. The foreign key object is +also removed from the dictionary cache. The foreign key constraint +is not removed from the foreign_set of the table containing the +constraint. */ +struct dict_foreign_remove_partial +{ + void operator()(dict_foreign_t* foreign) { + dict_table_t* table = foreign->referenced_table; + if (table != NULL) { + table->referenced_set.erase(foreign); + } + dict_foreign_free(foreign); + } +}; + /**********************************************************************//** Renames a table object. @return TRUE if success */ @@ -1554,27 +1661,25 @@ dict_table_rename_in_cache( system tables through a call of dict_load_foreigns. */ /* Remove the foreign constraints from the cache */ - foreign = UT_LIST_GET_LAST(table->foreign_list); - - while (foreign != NULL) { - dict_foreign_remove_from_cache(foreign); - foreign = UT_LIST_GET_LAST(table->foreign_list); - } + std::for_each(table->foreign_set.begin(), + table->foreign_set.end(), + dict_foreign_remove_partial()); + table->foreign_set.clear(); /* Reset table field in referencing constraints */ + for (dict_foreign_set::iterator it + = table->referenced_set.begin(); + it != table->referenced_set.end(); + ++it) { - foreign = UT_LIST_GET_FIRST(table->referenced_list); - - while (foreign != NULL) { + foreign = *it; foreign->referenced_table = NULL; foreign->referenced_index = NULL; - foreign = UT_LIST_GET_NEXT(referenced_list, foreign); } - /* Make the list of referencing constraints empty */ - - UT_LIST_INIT(table->referenced_list); + /* Make the set of referencing constraints empty */ + table->referenced_set.clear(); return(DB_SUCCESS); } @@ -1583,9 +1688,19 @@ dict_table_rename_in_cache( the constraint id of new format >= 4.0.18 constraints. Note that at this point we have already changed table->name to the new name. */ - foreign = UT_LIST_GET_FIRST(table->foreign_list); + dict_foreign_set fk_set; + + for (;;) { + + dict_foreign_set::iterator it + = table->foreign_set.begin(); + + if (it == table->foreign_set.end()) { + break; + } + + foreign = *it; - while (foreign != NULL) { if (ut_strlen(foreign->foreign_table_name) < ut_strlen(table->name)) { /* Allocate a longer name buffer; @@ -1735,12 +1850,18 @@ dict_table_rename_in_cache( mem_free(old_id); } - foreign = UT_LIST_GET_NEXT(foreign_list, foreign); + table->foreign_set.erase(it); + fk_set.insert(foreign); } - for (foreign = UT_LIST_GET_FIRST(table->referenced_list); - foreign != NULL; - foreign = UT_LIST_GET_NEXT(referenced_list, foreign)) { + ut_a(table->foreign_set.empty()); + table->foreign_set.swap(fk_set); + + for (dict_foreign_set::iterator it = table->referenced_set.begin(); + it != table->referenced_set.end(); + ++it) { + + foreign = *it; if (ut_strlen(foreign->referenced_table_name) < ut_strlen(table->name)) { @@ -1810,27 +1931,17 @@ dict_table_remove_from_cache_low( ut_ad(mutex_own(&(dict_sys->mutex))); ut_ad(table->magic_n == DICT_TABLE_MAGIC_N); -#if 0 - fputs("Removing table ", stderr); - ut_print_name(stderr, table->name, ULINT_UNDEFINED); - fputs(" from dictionary cache\n", stderr); -#endif - /* Remove the foreign constraints from the cache */ - - for (foreign = UT_LIST_GET_LAST(table->foreign_list); - foreign != NULL; - foreign = UT_LIST_GET_LAST(table->foreign_list)) { - - dict_foreign_remove_from_cache(foreign); - } + std::for_each(table->foreign_set.begin(), table->foreign_set.end(), + dict_foreign_remove_partial()); + table->foreign_set.clear(); /* Reset table field in referencing constraints */ + for (dict_foreign_set::iterator it = table->referenced_set.begin(); + it != table->referenced_set.end(); + ++it) { - for (foreign = UT_LIST_GET_FIRST(table->referenced_list); - foreign != NULL; - foreign = UT_LIST_GET_NEXT(referenced_list, foreign)) { - + foreign = *it; foreign->referenced_table = NULL; foreign->referenced_index = NULL; } @@ -3053,7 +3164,7 @@ dict_table_is_referenced_by_foreign_key( /*====================================*/ const dict_table_t* table) /*!< in: InnoDB table */ { - return(UT_LIST_GET_LEN(table->referenced_list) > 0); + return(!table->referenced_set.empty()); } /*********************************************************************//** @@ -3073,9 +3184,11 @@ dict_table_get_referenced_constraint( ut_ad(index != NULL); ut_ad(table != NULL); - for (foreign = UT_LIST_GET_FIRST(table->referenced_list); - foreign; - foreign = UT_LIST_GET_NEXT(referenced_list, foreign)) { + for (dict_foreign_set::iterator it = table->referenced_set.begin(); + it != table->referenced_set.end(); + ++it) { + + foreign = *it; if (foreign->referenced_index == index) { @@ -3104,9 +3217,11 @@ dict_table_get_foreign_constraint( ut_ad(index != NULL); ut_ad(table != NULL); - for (foreign = UT_LIST_GET_FIRST(table->foreign_list); - foreign; - foreign = UT_LIST_GET_NEXT(foreign_list, foreign)) { + for (dict_foreign_set::iterator it = table->foreign_set.begin(); + it != table->foreign_set.end(); + ++it) { + + foreign = *it; if (foreign->foreign_index == index) { @@ -3117,17 +3232,6 @@ dict_table_get_foreign_constraint( return(NULL); } -/*********************************************************************//** -Frees a foreign key struct. */ -UNIV_INTERN -void -dict_foreign_free( -/*==============*/ - dict_foreign_t* foreign) /*!< in, own: foreign key struct */ -{ - mem_heap_free(foreign->heap); -} - /**********************************************************************//** Removes a foreign constraint struct from the dictionary cache. */ UNIV_INTERN @@ -3139,16 +3243,12 @@ dict_foreign_remove_from_cache( ut_ad(mutex_own(&(dict_sys->mutex))); ut_a(foreign); - if (foreign->referenced_table) { - UT_LIST_REMOVE(referenced_list, - foreign->referenced_table->referenced_list, - foreign); + if (foreign->referenced_table != NULL) { + foreign->referenced_table->referenced_set.erase(foreign); } - if (foreign->foreign_table) { - UT_LIST_REMOVE(foreign_list, - foreign->foreign_table->foreign_list, - foreign); + if (foreign->foreign_table != NULL) { + foreign->foreign_table->foreign_set.erase(foreign); } dict_foreign_free(foreign); @@ -3162,33 +3262,21 @@ static dict_foreign_t* dict_foreign_find( /*==============*/ - dict_table_t* table, /*!< in: table object */ - const char* id) /*!< in: foreign constraint id */ + dict_table_t* table, /*!< in: table object */ + dict_foreign_t* foreign) /*!< in: foreign constraint */ { - dict_foreign_t* foreign; - ut_ad(mutex_own(&(dict_sys->mutex))); - foreign = UT_LIST_GET_FIRST(table->foreign_list); + dict_foreign_set::iterator it = table->foreign_set.find(foreign); - while (foreign) { - if (ut_strcmp(id, foreign->id) == 0) { - - return(foreign); - } - - foreign = UT_LIST_GET_NEXT(foreign_list, foreign); + if (it != table->foreign_set.end()) { + return(*it); } - foreign = UT_LIST_GET_FIRST(table->referenced_list); - - while (foreign) { - if (ut_strcmp(id, foreign->id) == 0) { - - return(foreign); - } + it = table->referenced_set.find(foreign); - foreign = UT_LIST_GET_NEXT(referenced_list, foreign); + if (it != table->referenced_set.end()) { + return(*it); } return(NULL); @@ -3348,11 +3436,11 @@ dict_foreign_add_to_cache( ut_a(for_table || ref_table); if (for_table) { - for_in_cache = dict_foreign_find(for_table, foreign->id); + for_in_cache = dict_foreign_find(for_table, foreign); } if (!for_in_cache && ref_table) { - for_in_cache = dict_foreign_find(ref_table, foreign->id); + for_in_cache = dict_foreign_find(ref_table, foreign); } if (for_in_cache) { @@ -3389,9 +3477,12 @@ dict_foreign_add_to_cache( for_in_cache->referenced_table = ref_table; for_in_cache->referenced_index = index; - UT_LIST_ADD_LAST(referenced_list, - ref_table->referenced_list, - for_in_cache); + + std::pair<dict_foreign_set::iterator, bool> ret + = ref_table->referenced_set.insert(for_in_cache); + + ut_a(ret.second); /* second is true if the insertion + took place */ added_to_referenced_list = TRUE; } @@ -3420,10 +3511,13 @@ dict_foreign_add_to_cache( if (for_in_cache == foreign) { if (added_to_referenced_list) { - UT_LIST_REMOVE( - referenced_list, - ref_table->referenced_list, - for_in_cache); + const dict_foreign_set::size_type n + = ref_table->referenced_set + .erase(for_in_cache); + + ut_a(n == 1); /* the number of + elements removed must + be one */ } mem_heap_free(foreign->heap); @@ -3434,9 +3528,11 @@ dict_foreign_add_to_cache( for_in_cache->foreign_table = for_table; for_in_cache->foreign_index = index; - UT_LIST_ADD_LAST(foreign_list, - for_table->foreign_list, - for_in_cache); + std::pair<dict_foreign_set::iterator, bool> ret + = for_table->foreign_set.insert(for_in_cache); + + ut_a(ret.second); /* second is true if the insertion + took place */ } /* We need to move the table to the non-LRU end of the table LRU @@ -4014,9 +4110,12 @@ dict_table_get_highest_foreign_id( ut_a(table); len = ut_strlen(table->name); - foreign = UT_LIST_GET_FIRST(table->foreign_list); - while (foreign) { + for (dict_foreign_set::iterator it = table->foreign_set.begin(); + it != table->foreign_set.end(); + ++it) { + foreign = *it; + if (ut_strlen(foreign->id) > ((sizeof dict_ibfk) - 1) + len && 0 == ut_memcmp(foreign->id, table->name, len) && 0 == ut_memcmp(foreign->id + len, @@ -4035,8 +4134,6 @@ dict_table_get_highest_foreign_id( } } } - - foreign = UT_LIST_GET_NEXT(foreign_list, foreign); } return(biggest_id); @@ -4097,6 +4194,7 @@ dict_create_foreign_constraints_low( dict_table_t* referenced_table; dict_table_t* table_to_alter; ulint highest_id_so_far = 0; + ulint number = 1; dict_index_t* index; dict_foreign_t* foreign; const char* ptr = sql_string; @@ -4115,6 +4213,8 @@ dict_create_foreign_constraints_low( const dict_col_t*columns[500]; const char* column_names[500]; const char* referenced_table_name; + dict_foreign_set local_fk_set; + dict_foreign_set_free local_fk_set_free(local_fk_set); ut_ad(!srv_read_only_mode); ut_ad(mutex_own(&(dict_sys->mutex))); @@ -4179,6 +4279,7 @@ dict_create_foreign_constraints_low( table_to_alter); } + number = highest_id_so_far + 1; /* Scan for foreign key declarations in a loop */ loop: /* Scan either to "CONSTRAINT" or "FOREIGN", whichever is closer */ @@ -4223,7 +4324,7 @@ loop: command, determine if there are any foreign keys, and if so, immediately reject the command if the table is a temporary one. For now, this kludge will work. */ - if (reject_fks && (UT_LIST_GET_LEN(table->foreign_list) > 0)) { + if (reject_fks && !local_fk_set.empty()) { return(DB_CANNOT_ADD_CONSTRAINT); } @@ -4233,7 +4334,17 @@ loop: to the data dictionary system tables on disk */ error = dict_create_add_foreigns_to_dictionary( - highest_id_so_far, table, trx); + local_fk_set, table, trx); + + if (error == DB_SUCCESS) { + + table->foreign_set.insert(local_fk_set.begin(), + local_fk_set.end()); + std::for_each(local_fk_set.begin(), + local_fk_set.end(), + dict_foreign_add_to_referenced_table()); + local_fk_set.clear(); + } return(error); } @@ -4392,6 +4503,24 @@ col_loop1: strcpy(foreign->id + db_len + 1, constraint_name); } + if (foreign->id == NULL) { + error = dict_create_add_foreign_id(&number, + table->name, foreign); + if (error != DB_SUCCESS) { + dict_foreign_free(foreign); + return(error); + } + } + + std::pair<dict_foreign_set::iterator, bool> ret + = local_fk_set.insert(foreign); + + if (!ret.second) { + /* A duplicate foreign key name has been found */ + dict_foreign_free(foreign); + return(DB_CANNOT_ADD_CONSTRAINT); + } + foreign->foreign_table = table; foreign->foreign_table_name = mem_heap_strdup( foreign->heap, table->name); @@ -4417,8 +4546,6 @@ col_loop1: checking of foreign key constraints! */ if (!success || (!referenced_table && trx->check_foreigns)) { - dict_foreign_free(foreign); - mutex_enter(&dict_foreign_err_mutex); dict_foreign_error_report_low(ef, name); fprintf(ef, "%s:\nCannot resolve table name close to:\n" @@ -4432,7 +4559,6 @@ col_loop1: ptr = dict_accept(cs, ptr, "(", &success); if (!success) { - dict_foreign_free(foreign); dict_foreign_report_syntax_err(name, start_of_latest_foreign, ptr); return(DB_CANNOT_ADD_CONSTRAINT); @@ -4447,7 +4573,6 @@ col_loop2: i++; if (!success) { - dict_foreign_free(foreign); mutex_enter(&dict_foreign_err_mutex); dict_foreign_error_report_low(ef, name); @@ -4468,7 +4593,6 @@ col_loop2: ptr = dict_accept(cs, ptr, ")", &success); if (!success || foreign->n_fields != i) { - dict_foreign_free(foreign); dict_foreign_report_syntax_err(name, start_of_latest_foreign, ptr); @@ -4494,7 +4618,6 @@ scan_on_conditions: ptr = dict_accept(cs, ptr, "UPDATE", &success); if (!success) { - dict_foreign_free(foreign); dict_foreign_report_syntax_err( name, start_of_latest_foreign, ptr); @@ -4532,7 +4655,6 @@ scan_on_conditions: ptr = dict_accept(cs, ptr, "ACTION", &success); if (!success) { - dict_foreign_free(foreign); dict_foreign_report_syntax_err( name, start_of_latest_foreign, ptr); @@ -4551,7 +4673,6 @@ scan_on_conditions: ptr = dict_accept(cs, ptr, "SET", &success); if (!success) { - dict_foreign_free(foreign); dict_foreign_report_syntax_err(name, start_of_latest_foreign, ptr); return(DB_CANNOT_ADD_CONSTRAINT); @@ -4560,7 +4681,6 @@ scan_on_conditions: ptr = dict_accept(cs, ptr, "NULL", &success); if (!success) { - dict_foreign_free(foreign); dict_foreign_report_syntax_err(name, start_of_latest_foreign, ptr); return(DB_CANNOT_ADD_CONSTRAINT); @@ -4573,8 +4693,6 @@ scan_on_conditions: /* It is not sensible to define SET NULL if the column is not allowed to be NULL! */ - dict_foreign_free(foreign); - mutex_enter(&dict_foreign_err_mutex); dict_foreign_error_report_low(ef, name); fprintf(ef, "%s:\n" @@ -4600,8 +4718,6 @@ try_find_index: if (n_on_deletes > 1 || n_on_updates > 1) { /* It is an error to define more than 1 action */ - dict_foreign_free(foreign); - mutex_enter(&dict_foreign_err_mutex); dict_foreign_error_report_low(ef, name); fprintf(ef, "%s:\n" @@ -4623,7 +4739,6 @@ try_find_index: foreign->foreign_index, TRUE, FALSE); if (!index) { - dict_foreign_free(foreign); mutex_enter(&dict_foreign_err_mutex); dict_foreign_error_report_low(ef, name); fprintf(ef, "%s:\n" @@ -4667,16 +4782,6 @@ try_find_index: = mem_heap_strdup(foreign->heap, column_names[i]); } - /* We found an ok constraint definition: add to the lists */ - - UT_LIST_ADD_LAST(foreign_list, table->foreign_list, foreign); - - if (referenced_table) { - UT_LIST_ADD_LAST(referenced_list, - referenced_table->referenced_list, - foreign); - } - goto loop; } /************************************************************************** @@ -4762,7 +4867,6 @@ dict_foreign_parse_drop_constraints( const char*** constraints_to_drop) /*!< out: id's of the constraints to drop */ { - dict_foreign_t* foreign; ibool success; char* str; size_t len; @@ -4839,25 +4943,10 @@ loop: (*constraints_to_drop)[*n] = id; (*n)++; - /* Look for the given constraint id */ - - foreign = UT_LIST_GET_FIRST(table->foreign_list); - - while (foreign != NULL) { - if (0 == innobase_strcasecmp(foreign->id, id) - || (strchr(foreign->id, '/') - && 0 == innobase_strcasecmp( - id, - dict_remove_db_name(foreign->id)))) { - /* Found */ - break; - } - - foreign = UT_LIST_GET_NEXT(foreign_list, foreign); - } - - - if (foreign == NULL) { + if (std::find_if(table->foreign_set.begin(), + table->foreign_set.end(), + dict_foreign_matches_id(id)) + == table->foreign_set.end()) { if (!srv_read_only_mode) { FILE* ef = dict_foreign_err_file; @@ -5184,7 +5273,6 @@ dict_table_print( dict_table_t* table) /*!< in: table */ { dict_index_t* index; - dict_foreign_t* foreign; ulint i; ut_ad(mutex_own(&(dict_sys->mutex))); @@ -5221,23 +5309,15 @@ dict_table_print( index = UT_LIST_GET_NEXT(indexes, index); } - table->stat_initialized = FALSE; - dict_table_stats_unlock(table, RW_X_LATCH); - foreign = UT_LIST_GET_FIRST(table->foreign_list); - - while (foreign != NULL) { - dict_foreign_print_low(foreign); - foreign = UT_LIST_GET_NEXT(foreign_list, foreign); - } + std::for_each(table->foreign_set.begin(), + table->foreign_set.end(), + dict_foreign_print_low); - foreign = UT_LIST_GET_FIRST(table->referenced_list); - - while (foreign != NULL) { - dict_foreign_print_low(foreign); - foreign = UT_LIST_GET_NEXT(referenced_list, foreign); - } + std::for_each(table->referenced_set.begin(), + table->referenced_set.end(), + dict_foreign_print_low); } /**********************************************************************//** @@ -5445,15 +5525,12 @@ dict_print_info_on_foreign_keys( mutex_enter(&(dict_sys->mutex)); - foreign = UT_LIST_GET_FIRST(table->foreign_list); - - if (foreign == NULL) { - mutex_exit(&(dict_sys->mutex)); + for (dict_foreign_set::iterator it = table->foreign_set.begin(); + it != table->foreign_set.end(); + ++it) { - return; - } + foreign = *it; - while (foreign != NULL) { if (create_table_format) { dict_print_info_on_foreign_key_in_create_format( file, trx, foreign, TRUE); @@ -5510,8 +5587,6 @@ dict_print_info_on_foreign_keys( fputs(" ON UPDATE NO ACTION", file); } } - - foreign = UT_LIST_GET_NEXT(foreign_list, foreign); } mutex_exit(&(dict_sys->mutex)); @@ -5843,10 +5918,11 @@ dict_foreign_replace_index( ut_ad(index->to_be_dropped); ut_ad(index->table == table); - for (foreign = UT_LIST_GET_FIRST(table->foreign_list); - foreign; - foreign = UT_LIST_GET_NEXT(foreign_list, foreign)) { + for (dict_foreign_set::iterator it = table->foreign_set.begin(); + it != table->foreign_set.end(); + ++it) { + foreign = *it; if (foreign->foreign_index == index) { ut_ad(foreign->foreign_table == index->table); @@ -5866,10 +5942,11 @@ dict_foreign_replace_index( } } - for (foreign = UT_LIST_GET_FIRST(table->referenced_list); - foreign; - foreign = UT_LIST_GET_NEXT(referenced_list, foreign)) { + for (dict_foreign_set::iterator it = table->referenced_set.begin(); + it != table->referenced_set.end(); + ++it) { + foreign = *it; if (foreign->referenced_index == index) { ut_ad(foreign->referenced_table == index->table); @@ -6025,14 +6102,34 @@ dict_table_schema_check( table = dict_table_get_low(req_schema->table_name); if (table == NULL) { + bool should_print=true; /* no such table */ - ut_snprintf(errstr, errstr_sz, - "Table %s not found.", - ut_format_name(req_schema->table_name, - TRUE, buf, sizeof(buf))); + if (innobase_strcasecmp(req_schema->table_name, "mysql/innodb_table_stats") == 0) { + if (innodb_table_stats_not_found_reported == false) { + innodb_table_stats_not_found = true; + innodb_table_stats_not_found_reported = true; + } else { + should_print = false; + } + } else if (innobase_strcasecmp(req_schema->table_name, "mysql/innodb_index_stats") == 0 ) { + if (innodb_index_stats_not_found_reported == false) { + innodb_index_stats_not_found = true; + innodb_index_stats_not_found_reported = true; + } else { + should_print = false; + } + } - return(DB_TABLE_NOT_FOUND); + if (should_print) { + ut_snprintf(errstr, errstr_sz, + "Table %s not found.", + ut_format_name(req_schema->table_name, + TRUE, buf, sizeof(buf))); + return(DB_TABLE_NOT_FOUND); + } else { + return(DB_STATS_DO_NOT_EXIST); + } } if (table->ibd_file_missing) { @@ -6169,24 +6266,24 @@ dict_table_schema_check( } } - if (req_schema->n_foreign != UT_LIST_GET_LEN(table->foreign_list)) { + if (req_schema->n_foreign != table->foreign_set.size()) { ut_snprintf( errstr, errstr_sz, - "Table %s has %lu foreign key(s) pointing to other " - "tables, but it must have %lu.", + "Table %s has " ULINTPF " foreign key(s) pointing" + " to other tables, but it must have %lu.", ut_format_name(req_schema->table_name, TRUE, buf, sizeof(buf)), - UT_LIST_GET_LEN(table->foreign_list), + static_cast<ulint>(table->foreign_set.size()), req_schema->n_foreign); return(DB_ERROR); } - if (req_schema->n_referenced != UT_LIST_GET_LEN(table->referenced_list)) { + if (req_schema->n_referenced != table->referenced_set.size()) { ut_snprintf( errstr, errstr_sz, - "There are %lu foreign key(s) pointing to %s, " + "There are " ULINTPF " foreign key(s) pointing to %s, " "but there must be %lu.", - UT_LIST_GET_LEN(table->referenced_list), + static_cast<ulint>(table->referenced_set.size()), ut_format_name(req_schema->table_name, TRUE, buf, sizeof(buf)), req_schema->n_referenced); diff --git a/storage/innobase/dict/dict0mem.cc b/storage/innobase/dict/dict0mem.cc index 60daeea3a96..885627a61bc 100644 --- a/storage/innobase/dict/dict0mem.cc +++ b/storage/innobase/dict/dict0mem.cc @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1996, 2013, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1996, 2014, Oracle and/or its affiliates. All Rights Reserved. Copyright (c) 2012, Facebook Inc. This program is free software; you can redistribute it and/or modify it under @@ -95,9 +95,9 @@ dict_mem_table_create( ut_d(table->magic_n = DICT_TABLE_MAGIC_N); - table->stats_latch = new rw_lock_t; - rw_lock_create(dict_table_stats_latch_key, table->stats_latch, - SYNC_INDEX_TREE); + /* true means that the stats latch will be enabled - + dict_table_stats_lock() will not be noop. */ + dict_table_stats_latch_create(table, true); #ifndef UNIV_HOTBACKUP table->autoinc_lock = static_cast<ib_lock_t*>( @@ -124,6 +124,9 @@ dict_mem_table_create( } #endif /* !UNIV_HOTBACKUP */ + new(&table->foreign_set) dict_foreign_set(); + new(&table->referenced_set) dict_foreign_set(); + return(table); } @@ -154,8 +157,10 @@ dict_mem_table_free( mutex_free(&(table->autoinc_mutex)); #endif /* UNIV_HOTBACKUP */ - rw_lock_free(table->stats_latch); - delete table->stats_latch; + dict_table_stats_latch_destroy(table); + + table->foreign_set.~dict_foreign_set(); + table->referenced_set.~dict_foreign_set(); ut_free(table->name); mem_heap_free(table->heap); @@ -327,10 +332,15 @@ dict_mem_table_col_rename_low( table->col_names = col_names; } + dict_foreign_t* foreign; + /* Replace the field names in every foreign key constraint. */ - for (dict_foreign_t* foreign = UT_LIST_GET_FIRST(table->foreign_list); - foreign != NULL; - foreign = UT_LIST_GET_NEXT(foreign_list, foreign)) { + for (dict_foreign_set::iterator it = table->foreign_set.begin(); + it != table->foreign_set.end(); + ++it) { + + foreign = *it; + for (unsigned f = 0; f < foreign->n_fields; f++) { /* These can point straight to table->col_names, because the foreign key @@ -342,10 +352,12 @@ dict_mem_table_col_rename_low( } } - for (dict_foreign_t* foreign = UT_LIST_GET_FIRST( - table->referenced_list); - foreign != NULL; - foreign = UT_LIST_GET_NEXT(referenced_list, foreign)) { + for (dict_foreign_set::iterator it = table->referenced_set.begin(); + it != table->referenced_set.end(); + ++it) { + + foreign = *it; + for (unsigned f = 0; f < foreign->n_fields; f++) { /* foreign->referenced_col_names[] need to be copies, because the constraint may become diff --git a/storage/innobase/dict/dict0stats.cc b/storage/innobase/dict/dict0stats.cc index bec0079942b..e0a2880e214 100644 --- a/storage/innobase/dict/dict0stats.cc +++ b/storage/innobase/dict/dict0stats.cc @@ -46,6 +46,7 @@ Created Jan 06, 2010 Vasil Dimov #include "ut0rnd.h" /* ut_rnd_interval() */ #include "ut0ut.h" /* ut_format_name(), ut_time() */ +#include <algorithm> #include <map> #include <vector> @@ -127,10 +128,11 @@ where n=1..n_uniq. #endif /* UNIV_STATS_DEBUG */ /* Gets the number of leaf pages to sample in persistent stats estimation */ -#define N_SAMPLE_PAGES(index) \ - ((index)->table->stats_sample_pages != 0 ? \ - (index)->table->stats_sample_pages : \ - srv_stats_persistent_sample_pages) +#define N_SAMPLE_PAGES(index) \ + static_cast<ib_uint64_t>( \ + (index)->table->stats_sample_pages != 0 \ + ? (index)->table->stats_sample_pages \ + : srv_stats_persistent_sample_pages) /* number of distinct records on a given level that are required to stop descending to lower levels and fetch N_SAMPLE_PAGES(index) records @@ -268,10 +270,12 @@ dict_stats_persistent_storage_check( mutex_exit(&(dict_sys->mutex)); } - if (ret != DB_SUCCESS) { + if (ret != DB_SUCCESS && ret != DB_STATS_DO_NOT_EXIST) { ut_print_timestamp(stderr); fprintf(stderr, " InnoDB: Error: %s\n", errstr); return(false); + } else if (ret == DB_STATS_DO_NOT_EXIST) { + return false; } /* else */ @@ -430,9 +434,9 @@ dict_stats_table_clone_create( t->corrupted = table->corrupted; /* This private object "t" is not shared with other threads, so - we do not need the stats_latch. The lock/unlock routines will do - nothing if stats_latch is NULL. */ - t->stats_latch = NULL; + we do not need the stats_latch (thus we pass false below). The + dict_table_stats_lock()/unlock() routines will do nothing. */ + dict_table_stats_latch_create(t, false); UT_LIST_INIT(t->indexes); @@ -511,6 +515,7 @@ dict_stats_table_clone_free( /*========================*/ dict_table_t* t) /*!< in: dummy table object to free */ { + dict_table_stats_latch_destroy(t); mem_heap_free(t->heap); } @@ -1330,35 +1335,40 @@ enum page_scan_method_t { }; /* @} */ -/*********************************************************************//** -Scan a page, reading records from left to right and counting the number -of distinct records on that page (looking only at the first n_prefix -columns). If scan_method is QUIT_ON_FIRST_NON_BORING then the function +/** Scan a page, reading records from left to right and counting the number +of distinct records (looking only at the first n_prefix +columns) and the number of external pages pointed by records from this page. +If scan_method is QUIT_ON_FIRST_NON_BORING then the function will return as soon as it finds a record that does not match its neighbor to the right, which means that in the case of QUIT_ON_FIRST_NON_BORING the returned n_diff can either be 0 (empty page), 1 (the whole page has all keys equal) or 2 (the function found a non-boring record and returned). +@param[out] out_rec record, or NULL +@param[out] offsets1 rec_get_offsets() working space (must +be big enough) +@param[out] offsets2 rec_get_offsets() working space (must +be big enough) +@param[in] index index of the page +@param[in] page the page to scan +@param[in] n_prefix look at the first n_prefix columns +@param[in] scan_method scan to the end of the page or not +@param[out] n_diff number of distinct records encountered +@param[out] n_external_pages if this is non-NULL then it will be set +to the number of externally stored pages which were encountered @return offsets1 or offsets2 (the offsets of *out_rec), or NULL if the page is empty and does not contain user records. */ -UNIV_INLINE __attribute__((nonnull)) +UNIV_INLINE ulint* dict_stats_scan_page( -/*=================*/ - const rec_t** out_rec, /*!< out: record, or NULL */ - ulint* offsets1, /*!< out: rec_get_offsets() - working space (must be big - enough) */ - ulint* offsets2, /*!< out: rec_get_offsets() - working space (must be big - enough) */ - dict_index_t* index, /*!< in: index of the page */ - const page_t* page, /*!< in: the page to scan */ - ulint n_prefix, /*!< in: look at the first - n_prefix columns */ - page_scan_method_t scan_method, /*!< in: scan to the end of - the page or not */ - ib_uint64_t* n_diff) /*!< out: number of distinct - records encountered */ + const rec_t** out_rec, + ulint* offsets1, + ulint* offsets2, + dict_index_t* index, + const page_t* page, + ulint n_prefix, + page_scan_method_t scan_method, + ib_uint64_t* n_diff, + ib_uint64_t* n_external_pages) { ulint* offsets_rec = offsets1; ulint* offsets_next_rec = offsets2; @@ -1376,6 +1386,12 @@ dict_stats_scan_page( get_next = page_rec_get_next_const; } + const bool should_count_external_pages = n_external_pages != NULL; + + if (should_count_external_pages) { + *n_external_pages = 0; + } + rec = get_next(page_get_infimum_rec(page)); if (page_rec_is_supremum(rec)) { @@ -1388,6 +1404,11 @@ dict_stats_scan_page( offsets_rec = rec_get_offsets(rec, index, offsets_rec, ULINT_UNDEFINED, &heap); + if (should_count_external_pages) { + *n_external_pages += btr_rec_get_externally_stored_len( + rec, offsets_rec); + } + next_rec = get_next(rec); *n_diff = 1; @@ -1438,6 +1459,11 @@ dict_stats_scan_page( offsets_next_rec = offsets_tmp; } + if (should_count_external_pages) { + *n_external_pages += btr_rec_get_externally_stored_len( + rec, offsets_rec); + } + next_rec = get_next(next_rec); } @@ -1448,19 +1474,25 @@ func_exit: return(offsets_rec); } -/*********************************************************************//** -Dive below the current position of a cursor and calculate the number of +/** Dive below the current position of a cursor and calculate the number of distinct records on the leaf page, when looking at the fist n_prefix -columns. +columns. Also calculate the number of external pages pointed by records +on the leaf page. +@param[in] cur cursor +@param[in] n_prefix look at the first n_prefix columns +when comparing records +@param[out] n_diff number of distinct records +@param[out] n_external_pages number of external pages +@param[in,out] mtr mini-transaction @return number of distinct records on the leaf page */ static -ib_uint64_t +void dict_stats_analyze_index_below_cur( -/*===============================*/ - const btr_cur_t*cur, /*!< in: cursor */ - ulint n_prefix, /*!< in: look at the first n_prefix - columns when comparing records */ - mtr_t* mtr) /*!< in/out: mini-transaction */ + const btr_cur_t* cur, + ulint n_prefix, + ib_uint64_t* n_diff, + ib_uint64_t* n_external_pages, + mtr_t* mtr) { dict_index_t* index; ulint space; @@ -1473,7 +1505,6 @@ dict_stats_analyze_index_below_cur( ulint* offsets1; ulint* offsets2; ulint* offsets_rec; - ib_uint64_t n_diff; /* the result */ ulint size; index = btr_cur_get_index(cur); @@ -1509,6 +1540,10 @@ dict_stats_analyze_index_below_cur( page_no = btr_node_ptr_get_child_page_no(rec, offsets_rec); + /* assume no external pages by default - in case we quit from this + function without analyzing any leaf pages */ + *n_external_pages = 0; + /* descend to the leaf level on the B-tree */ for (;;) { @@ -1527,20 +1562,24 @@ dict_stats_analyze_index_below_cur( /* search for the first non-boring record on the page */ offsets_rec = dict_stats_scan_page( &rec, offsets1, offsets2, index, page, n_prefix, - QUIT_ON_FIRST_NON_BORING, &n_diff); + QUIT_ON_FIRST_NON_BORING, n_diff, NULL); /* pages on level > 0 are not allowed to be empty */ ut_a(offsets_rec != NULL); /* if page is not empty (offsets_rec != NULL) then n_diff must be > 0, otherwise there is a bug in dict_stats_scan_page() */ - ut_a(n_diff > 0); + ut_a(*n_diff > 0); - if (n_diff == 1) { + if (*n_diff == 1) { /* page has all keys equal and the end of the page was reached by dict_stats_scan_page(), no need to descend to the leaf level */ mem_heap_free(heap); - return(1); + /* can't get an estimate for n_external_pages here + because we do not dive to the leaf level, assume no + external pages (*n_external_pages was assigned to 0 + above). */ + return; } /* else */ @@ -1548,7 +1587,7 @@ dict_stats_analyze_index_below_cur( first non-boring record it finds, then the returned n_diff can either be 0 (empty page), 1 (page has all keys equal) or 2 (non-boring record was found) */ - ut_a(n_diff == 2); + ut_a(*n_diff == 2); /* we have a non-boring record in rec, descend below it */ @@ -1559,11 +1598,14 @@ dict_stats_analyze_index_below_cur( ut_ad(btr_page_get_level(page, mtr) == 0); /* scan the leaf page and find the number of distinct keys, - when looking only at the first n_prefix columns */ + when looking only at the first n_prefix columns; also estimate + the number of externally stored pages pointed by records on this + page */ offsets_rec = dict_stats_scan_page( &rec, offsets1, offsets2, index, page, n_prefix, - COUNT_ALL_NON_BORING_AND_SKIP_DEL_MARKED, &n_diff); + COUNT_ALL_NON_BORING_AND_SKIP_DEL_MARKED, n_diff, + n_external_pages); #if 0 DEBUG_PRINTF(" %s(): n_diff below page_no=%lu: " UINT64PF "\n", @@ -1571,133 +1613,146 @@ dict_stats_analyze_index_below_cur( #endif mem_heap_free(heap); - - return(n_diff); } -/*********************************************************************//** -For a given level in an index select N_SAMPLE_PAGES(index) -(or less) records from that level and dive below them to the corresponding -leaf pages, then scan those leaf pages and save the sampling results in -index->stat_n_diff_key_vals[n_prefix - 1] and the number of pages scanned in -index->stat_n_sample_sizes[n_prefix - 1]. */ +/** Input data that is used to calculate dict_index_t::stat_n_diff_key_vals[] +for each n-columns prefix (n from 1 to n_uniq). */ +struct n_diff_data_t { + /** Index of the level on which the descent through the btree + stopped. level 0 is the leaf level. This is >= 1 because we + avoid scanning the leaf level because it may contain too many + pages and doing so is useless when combined with the random dives - + if we are to scan the leaf level, this means a full scan and we can + simply do that instead of fiddling with picking random records higher + in the tree and to dive below them. At the start of the analyzing + we may decide to do full scan of the leaf level, but then this + structure is not used in that code path. */ + ulint level; + + /** Number of records on the level where the descend through the btree + stopped. When we scan the btree from the root, we stop at some mid + level, choose some records from it and dive below them towards a leaf + page to analyze. */ + ib_uint64_t n_recs_on_level; + + /** Number of different key values that were found on the mid level. */ + ib_uint64_t n_diff_on_level; + + /** Number of leaf pages that are analyzed. This is also the same as + the number of records that we pick from the mid level and dive below + them. */ + ib_uint64_t n_leaf_pages_to_analyze; + + /** Cumulative sum of the number of different key values that were + found on all analyzed pages. */ + ib_uint64_t n_diff_all_analyzed_pages; + + /** Cumulative sum of the number of external pages (stored outside of + the btree but in the same file segment). */ + ib_uint64_t n_external_pages_sum; +}; + +/** Estimate the number of different key values in an index when looking at +the first n_prefix columns. For a given level in an index select +n_diff_data->n_leaf_pages_to_analyze records from that level and dive below +them to the corresponding leaf pages, then scan those leaf pages and save the +sampling results in n_diff_data->n_diff_all_analyzed_pages. +@param[in] index index +@param[in] n_prefix look at first 'n_prefix' columns when +comparing records +@param[in] boundaries a vector that contains +n_diff_data->n_diff_on_level integers each of which represents the index (on +level 'level', counting from left/smallest to right/biggest from 0) of the +last record from each group of distinct keys +@param[in,out] n_diff_data n_diff_all_analyzed_pages and +n_external_pages_sum in this structure will be set by this function. The +members level, n_diff_on_level and n_leaf_pages_to_analyze must be set by the +caller in advance - they are used by some calculations inside this function +@param[in,out] mtr mini-transaction */ static void dict_stats_analyze_index_for_n_prefix( -/*==================================*/ - dict_index_t* index, /*!< in/out: index */ - ulint level, /*!< in: level, must be >= 1 */ - ib_uint64_t total_recs_on_level, - /*!< in: total number of - records on the given level */ - ulint n_prefix, /*!< in: look at first - n_prefix columns when - comparing records */ - ib_uint64_t n_diff_for_this_prefix, - /*!< in: number of distinct - records on the given level, - when looking at the first - n_prefix columns */ - boundaries_t* boundaries, /*!< in: array that contains - n_diff_for_this_prefix - integers each of which - represents the index (on the - level, counting from - left/smallest to right/biggest - from 0) of the last record - from each group of distinct - keys */ - mtr_t* mtr) /*!< in/out: mini-transaction */ + dict_index_t* index, + ulint n_prefix, + const boundaries_t* boundaries, + n_diff_data_t* n_diff_data, + mtr_t* mtr) { btr_pcur_t pcur; const page_t* page; ib_uint64_t rec_idx; - ib_uint64_t last_idx_on_level; - ib_uint64_t n_recs_to_dive_below; - ib_uint64_t n_diff_sum_of_all_analyzed_pages; ib_uint64_t i; #if 0 DEBUG_PRINTF(" %s(table=%s, index=%s, level=%lu, n_prefix=%lu, " - "n_diff_for_this_prefix=" UINT64PF ")\n", + "n_diff_on_level=" UINT64PF ")\n", __func__, index->table->name, index->name, level, - n_prefix, n_diff_for_this_prefix); + n_prefix, n_diff_data->n_diff_on_level); #endif ut_ad(mtr_memo_contains(mtr, dict_index_get_lock(index), MTR_MEMO_S_LOCK)); - /* if some of those is 0 then this means that there is exactly one - page in the B-tree and it is empty and we should have done full scan - and should not be here */ - ut_ad(total_recs_on_level > 0); - ut_ad(n_diff_for_this_prefix > 0); - - /* this must be at least 1 */ - ut_ad(N_SAMPLE_PAGES(index) > 0); - /* Position pcur on the leftmost record on the leftmost page on the desired level. */ btr_pcur_open_at_index_side( true, index, BTR_SEARCH_LEAF | BTR_ALREADY_S_LATCHED, - &pcur, true, level, mtr); + &pcur, true, n_diff_data->level, mtr); btr_pcur_move_to_next_on_page(&pcur); page = btr_pcur_get_page(&pcur); + const rec_t* first_rec = btr_pcur_get_rec(&pcur); + + /* We shouldn't be scanning the leaf level. The caller of this function + should have stopped the descend on level 1 or higher. */ + ut_ad(n_diff_data->level > 0); + ut_ad(!page_is_leaf(page)); + /* The page must not be empty, except when it is the root page (and the whole index is empty). */ - ut_ad(btr_pcur_is_on_user_rec(&pcur) || page_is_leaf(page)); - ut_ad(btr_pcur_get_rec(&pcur) - == page_rec_get_next_const(page_get_infimum_rec(page))); + ut_ad(btr_pcur_is_on_user_rec(&pcur)); + ut_ad(first_rec == page_rec_get_next_const(page_get_infimum_rec(page))); /* check that we are indeed on the desired level */ - ut_a(btr_page_get_level(page, mtr) == level); + ut_a(btr_page_get_level(page, mtr) == n_diff_data->level); /* there should not be any pages on the left */ ut_a(btr_page_get_prev(page, mtr) == FIL_NULL); /* check whether the first record on the leftmost page is marked - as such, if we are on a non-leaf level */ - ut_a((level == 0) - == !(REC_INFO_MIN_REC_FLAG & rec_get_info_bits( - btr_pcur_get_rec(&pcur), page_is_comp(page)))); + as such; we are on a non-leaf level */ + ut_a(rec_get_info_bits(first_rec, page_is_comp(page)) + & REC_INFO_MIN_REC_FLAG); - last_idx_on_level = boundaries->at( - static_cast<unsigned int>(n_diff_for_this_prefix - 1)); + const ib_uint64_t last_idx_on_level = boundaries->at( + static_cast<unsigned>(n_diff_data->n_diff_on_level - 1)); rec_idx = 0; - n_diff_sum_of_all_analyzed_pages = 0; - - n_recs_to_dive_below = ut_min(N_SAMPLE_PAGES(index), - n_diff_for_this_prefix); - - for (i = 0; i < n_recs_to_dive_below; i++) { - ib_uint64_t left; - ib_uint64_t right; - ib_uint64_t rnd; - ib_uint64_t dive_below_idx; + n_diff_data->n_diff_all_analyzed_pages = 0; + n_diff_data->n_external_pages_sum = 0; - /* there are n_diff_for_this_prefix elements + for (i = 0; i < n_diff_data->n_leaf_pages_to_analyze; i++) { + /* there are n_diff_on_level elements in 'boundaries' and we divide those elements - into n_recs_to_dive_below segments, for example: + into n_leaf_pages_to_analyze segments, for example: - let n_diff_for_this_prefix=100, n_recs_to_dive_below=4, then: + let n_diff_on_level=100, n_leaf_pages_to_analyze=4, then: segment i=0: [0, 24] segment i=1: [25, 49] segment i=2: [50, 74] segment i=3: [75, 99] or - let n_diff_for_this_prefix=1, n_recs_to_dive_below=1, then: + let n_diff_on_level=1, n_leaf_pages_to_analyze=1, then: segment i=0: [0, 0] or - let n_diff_for_this_prefix=2, n_recs_to_dive_below=2, then: + let n_diff_on_level=2, n_leaf_pages_to_analyze=2, then: segment i=0: [0, 0] segment i=1: [1, 1] or - let n_diff_for_this_prefix=13, n_recs_to_dive_below=7, then: + let n_diff_on_level=13, n_leaf_pages_to_analyze=7, then: segment i=0: [0, 0] segment i=1: [1, 2] segment i=2: [3, 4] @@ -1708,9 +1763,12 @@ dict_stats_analyze_index_for_n_prefix( then we select a random record from each segment and dive below it */ - left = n_diff_for_this_prefix * i / n_recs_to_dive_below; - right = n_diff_for_this_prefix * (i + 1) - / n_recs_to_dive_below - 1; + const ib_uint64_t n_diff = n_diff_data->n_diff_on_level; + const ib_uint64_t n_pick + = n_diff_data->n_leaf_pages_to_analyze; + + const ib_uint64_t left = n_diff * i / n_pick; + const ib_uint64_t right = n_diff * (i + 1) / n_pick - 1; ut_a(left <= right); ut_a(right <= last_idx_on_level); @@ -1718,11 +1776,11 @@ dict_stats_analyze_index_for_n_prefix( /* we do not pass (left, right) because we do not want to ask ut_rnd_interval() to work with too big numbers since ib_uint64_t could be bigger than ulint */ - rnd = static_cast<ib_uint64_t>( - ut_rnd_interval(0, static_cast<ulint>(right - left))); + const ulint rnd = ut_rnd_interval( + 0, static_cast<ulint>(right - left)); - dive_below_idx = boundaries->at( - static_cast<unsigned int>(left + rnd)); + const ib_uint64_t dive_below_idx + = boundaries->at(static_cast<unsigned>(left + rnd)); #if 0 DEBUG_PRINTF(" %s(): dive below record with index=" @@ -1758,9 +1816,13 @@ dict_stats_analyze_index_for_n_prefix( ut_a(rec_idx == dive_below_idx); ib_uint64_t n_diff_on_leaf_page; + ib_uint64_t n_external_pages; - n_diff_on_leaf_page = dict_stats_analyze_index_below_cur( - btr_pcur_get_btr_cur(&pcur), n_prefix, mtr); + dict_stats_analyze_index_below_cur(btr_pcur_get_btr_cur(&pcur), + n_prefix, + &n_diff_on_leaf_page, + &n_external_pages, + mtr); /* We adjust n_diff_on_leaf_page here to avoid counting one record twice - once as the last on some page and once @@ -1780,37 +1842,86 @@ dict_stats_analyze_index_for_n_prefix( n_diff_on_leaf_page--; } - n_diff_sum_of_all_analyzed_pages += n_diff_on_leaf_page; - } - - /* n_diff_sum_of_all_analyzed_pages can be 0 here if all the leaf - pages sampled contained only delete-marked records. In this case - we should assign 0 to index->stat_n_diff_key_vals[n_prefix - 1], which - the formula below does. */ + n_diff_data->n_diff_all_analyzed_pages += n_diff_on_leaf_page; - /* See REF01 for an explanation of the algorithm */ - index->stat_n_diff_key_vals[n_prefix - 1] - = index->stat_n_leaf_pages - - * n_diff_for_this_prefix - / total_recs_on_level - - * n_diff_sum_of_all_analyzed_pages - / n_recs_to_dive_below; + n_diff_data->n_external_pages_sum += n_external_pages; + } - index->stat_n_sample_sizes[n_prefix - 1] = n_recs_to_dive_below; + btr_pcur_close(&pcur); +} - DEBUG_PRINTF(" %s(): n_diff=" UINT64PF " for n_prefix=%lu " - "(%lu" - " * " UINT64PF " / " UINT64PF - " * " UINT64PF " / " UINT64PF ")\n", - __func__, index->stat_n_diff_key_vals[n_prefix - 1], - n_prefix, - index->stat_n_leaf_pages, - n_diff_for_this_prefix, total_recs_on_level, - n_diff_sum_of_all_analyzed_pages, n_recs_to_dive_below); +/** Set dict_index_t::stat_n_diff_key_vals[] and stat_n_sample_sizes[]. +@param[in] n_diff_data input data to use to derive the results +@param[in,out] index index whose stat_n_diff_key_vals[] to set */ +UNIV_INLINE +void +dict_stats_index_set_n_diff( + const n_diff_data_t* n_diff_data, + dict_index_t* index) +{ + for (ulint n_prefix = dict_index_get_n_unique(index); + n_prefix >= 1; + n_prefix--) { + /* n_diff_all_analyzed_pages can be 0 here if + all the leaf pages sampled contained only + delete-marked records. In this case we should assign + 0 to index->stat_n_diff_key_vals[n_prefix - 1], which + the formula below does. */ + + const n_diff_data_t* data = &n_diff_data[n_prefix - 1]; + + ut_ad(data->n_leaf_pages_to_analyze > 0); + ut_ad(data->n_recs_on_level > 0); + + ulint n_ordinary_leaf_pages; + + if (data->level == 1) { + /* If we know the number of records on level 1, then + this number is the same as the number of pages on + level 0 (leaf). */ + n_ordinary_leaf_pages = data->n_recs_on_level; + } else { + /* If we analyzed D ordinary leaf pages and found E + external pages in total linked from those D ordinary + leaf pages, then this means that the ratio + ordinary/external is D/E. Then the ratio ordinary/total + is D / (D + E). Knowing that the total number of pages + is T (including ordinary and external) then we estimate + that the total number of ordinary leaf pages is + T * D / (D + E). */ + n_ordinary_leaf_pages + = index->stat_n_leaf_pages + * data->n_leaf_pages_to_analyze + / (data->n_leaf_pages_to_analyze + + data->n_external_pages_sum); + } - btr_pcur_close(&pcur); + /* See REF01 for an explanation of the algorithm */ + index->stat_n_diff_key_vals[n_prefix - 1] + = n_ordinary_leaf_pages + + * data->n_diff_on_level + / data->n_recs_on_level + + * data->n_diff_all_analyzed_pages + / data->n_leaf_pages_to_analyze; + + index->stat_n_sample_sizes[n_prefix - 1] + = data->n_leaf_pages_to_analyze; + + DEBUG_PRINTF(" %s(): n_diff=" UINT64PF " for n_prefix=%lu" + " (%lu" + " * " UINT64PF " / " UINT64PF + " * " UINT64PF " / " UINT64PF ")\n", + __func__, + index->stat_n_diff_key_vals[n_prefix - 1], + n_prefix, + index->stat_n_leaf_pages, + data->n_diff_on_level, + data->n_recs_on_level, + data->n_diff_all_analyzed_pages, + data->n_leaf_pages_to_analyze); + } } /*********************************************************************//** @@ -1828,10 +1939,8 @@ dict_stats_analyze_index( bool level_is_analyzed; ulint n_uniq; ulint n_prefix; - ib_uint64_t* n_diff_on_level; ib_uint64_t total_recs; ib_uint64_t total_pages; - boundaries_t* n_diff_boundaries; mtr_t mtr; ulint size; DBUG_ENTER("dict_stats_analyze_index"); @@ -1917,11 +2026,18 @@ dict_stats_analyze_index( DBUG_VOID_RETURN; } - /* set to zero */ - n_diff_on_level = reinterpret_cast<ib_uint64_t*> - (mem_zalloc(n_uniq * sizeof(ib_uint64_t))); + /* For each level that is being scanned in the btree, this contains the + number of different key values for all possible n-column prefixes. */ + ib_uint64_t* n_diff_on_level = new ib_uint64_t[n_uniq]; - n_diff_boundaries = new boundaries_t[n_uniq]; + /* For each level that is being scanned in the btree, this contains the + index of the last record from each group of equal records (when + comparing only the first n columns, n=1..n_uniq). */ + boundaries_t* n_diff_boundaries = new boundaries_t[n_uniq]; + + /* For each n-column prefix this array contains the input data that is + used to calculate dict_index_t::stat_n_diff_key_vals[]. */ + n_diff_data_t* n_diff_data = new n_diff_data_t[n_uniq]; /* total_recs is also used to estimate the number of pages on one level below, so at the start we have 1 page (the root) */ @@ -2033,12 +2149,12 @@ dict_stats_analyze_index( level_is_analyzed = true; - if (n_diff_on_level[n_prefix - 1] - >= N_DIFF_REQUIRED(index) - || level == 1) { - /* we found a good level with many distinct - records or we have reached the last level we - could scan */ + if (level == 1 + || n_diff_on_level[n_prefix - 1] + >= N_DIFF_REQUIRED(index)) { + /* we have reached the last level we could scan + or we found a good level with many distinct + records */ break; } @@ -2051,7 +2167,6 @@ found_level: " distinct records for n_prefix=%lu\n", __func__, level, n_diff_on_level[n_prefix - 1], n_prefix); - /* here we are either on level 1 or the level that we are on contains >= N_DIFF_REQUIRED distinct keys or we did not scan deeper levels because they would contain too many pages */ @@ -2060,20 +2175,47 @@ found_level: ut_ad(level_is_analyzed); + /* if any of these is 0 then there is exactly one page in the + B-tree and it is empty and we should have done full scan and + should not be here */ + ut_ad(total_recs > 0); + ut_ad(n_diff_on_level[n_prefix - 1] > 0); + + ut_ad(N_SAMPLE_PAGES(index) > 0); + + n_diff_data_t* data = &n_diff_data[n_prefix - 1]; + + data->level = level; + + data->n_recs_on_level = total_recs; + + data->n_diff_on_level = n_diff_on_level[n_prefix - 1]; + + data->n_leaf_pages_to_analyze = std::min( + N_SAMPLE_PAGES(index), + n_diff_on_level[n_prefix - 1]); + /* pick some records from this level and dive below them for the given n_prefix */ dict_stats_analyze_index_for_n_prefix( - index, level, total_recs, n_prefix, - n_diff_on_level[n_prefix - 1], - &n_diff_boundaries[n_prefix - 1], &mtr); + index, n_prefix, &n_diff_boundaries[n_prefix - 1], + data, &mtr); } mtr_commit(&mtr); delete[] n_diff_boundaries; - mem_free(n_diff_on_level); + delete[] n_diff_on_level; + + /* n_prefix == 0 means that the above loop did not end up prematurely + due to tree being changed and so n_diff_data[] is set up. */ + if (n_prefix == 0) { + dict_stats_index_set_n_diff(n_diff_data, index); + } + + delete[] n_diff_data; dict_stats_assert_initialized_index(index); DBUG_VOID_RETURN; @@ -2248,17 +2390,21 @@ dict_stats_save_index_stat( "END;", trx); if (ret != DB_SUCCESS) { - char buf_table[MAX_FULL_NAME_LEN]; - char buf_index[MAX_FULL_NAME_LEN]; - ut_print_timestamp(stderr); - fprintf(stderr, - " InnoDB: Cannot save index statistics for table " - "%s, index %s, stat name \"%s\": %s\n", - ut_format_name(index->table->name, TRUE, - buf_table, sizeof(buf_table)), - ut_format_name(index->name, FALSE, - buf_index, sizeof(buf_index)), - stat_name, ut_strerr(ret)); + if (innodb_index_stats_not_found == false && + index->stats_error_printed == false) { + char buf_table[MAX_FULL_NAME_LEN]; + char buf_index[MAX_FULL_NAME_LEN]; + ut_print_timestamp(stderr); + fprintf(stderr, + " InnoDB: Cannot save index statistics for table " + "%s, index %s, stat name \"%s\": %s\n", + ut_format_name(index->table->name, TRUE, + buf_table, sizeof(buf_table)), + ut_format_name(index->name, FALSE, + buf_index, sizeof(buf_index)), + stat_name, ut_strerr(ret)); + index->stats_error_printed = true; + } } return(ret); @@ -2973,20 +3119,24 @@ dict_stats_update_for_index( } /* else */ - /* Fall back to transient stats since the persistent - storage is not present or is corrupted */ - char buf_table[MAX_FULL_NAME_LEN]; - char buf_index[MAX_FULL_NAME_LEN]; - ut_print_timestamp(stderr); - fprintf(stderr, - " InnoDB: Recalculation of persistent statistics " - "requested for table %s index %s but the required " - "persistent statistics storage is not present or is " - "corrupted. Using transient stats instead.\n", - ut_format_name(index->table->name, TRUE, - buf_table, sizeof(buf_table)), - ut_format_name(index->name, FALSE, - buf_index, sizeof(buf_index))); + if (innodb_index_stats_not_found == false && + index->stats_error_printed == false) { + /* Fall back to transient stats since the persistent + storage is not present or is corrupted */ + char buf_table[MAX_FULL_NAME_LEN]; + char buf_index[MAX_FULL_NAME_LEN]; + ut_print_timestamp(stderr); + fprintf(stderr, + " InnoDB: Recalculation of persistent statistics " + "requested for table %s index %s but the required " + "persistent statistics storage is not present or is " + "corrupted. Using transient stats instead.\n", + ut_format_name(index->table->name, TRUE, + buf_table, sizeof(buf_table)), + ut_format_name(index->name, FALSE, + buf_index, sizeof(buf_index))); + index->stats_error_printed = false; + } } dict_table_stats_lock(index->table, RW_X_LATCH); @@ -3071,13 +3221,17 @@ dict_stats_update( /* Fall back to transient stats since the persistent storage is not present or is corrupted */ - ut_print_timestamp(stderr); - fprintf(stderr, - " InnoDB: Recalculation of persistent statistics " - "requested for table %s but the required persistent " - "statistics storage is not present or is corrupted. " - "Using transient stats instead.\n", - ut_format_name(table->name, TRUE, buf, sizeof(buf))); + if (innodb_table_stats_not_found == false && + table->stats_error_printed == false) { + ut_print_timestamp(stderr); + fprintf(stderr, + " InnoDB: Recalculation of persistent statistics " + "requested for table %s but the required persistent " + "statistics storage is not present or is corrupted. " + "Using transient stats instead.\n", + ut_format_name(table->name, TRUE, buf, sizeof(buf))); + table->stats_error_printed = true; + } goto transient; @@ -3121,17 +3275,21 @@ dict_stats_update( /* persistent statistics storage does not exist or is corrupted, calculate the transient stats */ - ut_print_timestamp(stderr); - fprintf(stderr, - " InnoDB: Error: Fetch of persistent " - "statistics requested for table %s but the " - "required system tables %s and %s are not " - "present or have unexpected structure. " - "Using transient stats instead.\n", - ut_format_name(table->name, TRUE, - buf, sizeof(buf)), - TABLE_STATS_NAME_PRINT, - INDEX_STATS_NAME_PRINT); + if (innodb_table_stats_not_found == false && + table->stats_error_printed == false) { + ut_print_timestamp(stderr); + fprintf(stderr, + " InnoDB: Error: Fetch of persistent " + "statistics requested for table %s but the " + "required system tables %s and %s are not " + "present or have unexpected structure. " + "Using transient stats instead.\n", + ut_format_name(table->name, TRUE, + buf, sizeof(buf)), + TABLE_STATS_NAME_PRINT, + INDEX_STATS_NAME_PRINT); + table->stats_error_printed = true; + } goto transient; } @@ -3202,16 +3360,19 @@ dict_stats_update( dict_stats_table_clone_free(t); - ut_print_timestamp(stderr); - fprintf(stderr, - " InnoDB: Error fetching persistent statistics " - "for table %s from %s and %s: %s. " - "Using transient stats method instead.\n", - ut_format_name(table->name, TRUE, buf, - sizeof(buf)), - TABLE_STATS_NAME, - INDEX_STATS_NAME, - ut_strerr(err)); + if (innodb_table_stats_not_found == false && + table->stats_error_printed == false) { + ut_print_timestamp(stderr); + fprintf(stderr, + " InnoDB: Error fetching persistent statistics " + "for table %s from %s and %s: %s. " + "Using transient stats method instead.\n", + ut_format_name(table->name, TRUE, buf, + sizeof(buf)), + TABLE_STATS_NAME, + INDEX_STATS_NAME, + ut_strerr(err)); + } goto transient; } diff --git a/storage/innobase/fil/fil0fil.cc b/storage/innobase/fil/fil0fil.cc index e406d008705..8fd2b11e9f3 100644 --- a/storage/innobase/fil/fil0fil.cc +++ b/storage/innobase/fil/fil0fil.cc @@ -123,7 +123,7 @@ completes, we decrement the count and return the file node to the LRU-list if the count drops to zero. */ /** When mysqld is run, the default directory "." is the mysqld datadir, -but in the MySQL Embedded Server Library and ibbackup it is not the default +but in the MySQL Embedded Server Library and mysqlbackup it is not the default directory, and we must set the base file path explicitly */ UNIV_INTERN const char* fil_path_to_mysql_datadir = "."; @@ -792,7 +792,7 @@ fil_node_open_file( fprintf(stderr, "InnoDB: Error: the size of single-table" " tablespace file %s\n" - "InnoDB: is only "UINT64PF"," + "InnoDB: is only " UINT64PF "," " should be at least %lu!\n", node->name, size_bytes, @@ -2074,8 +2074,8 @@ fil_check_first_page( } /*******************************************************************//** -Reads the flushed lsn, arch no, and tablespace flag fields from a data -file at database startup. +Reads the flushed lsn, arch no, space_id and tablespace flag fields from +the first page of a data file at database startup. @retval NULL on success, or if innodb_force_recovery is set @return pointer to an error message string */ UNIV_INTERN @@ -2117,7 +2117,13 @@ fil_read_first_page( fil_space_is_page_compressed(orig_space_id) : FALSE); - *flags = fsp_header_get_flags(page); + /* The FSP_HEADER on page 0 is only valid for the first file + in a tablespace. So if this is not the first datafile, leave + *flags and *space_id as they were read from the first file and + do not validate the first page. */ + if (!one_read_already) { + *flags = fsp_header_get_flags(page); + } /* Page is page compressed page, need to decompress, before continue. */ @@ -2126,14 +2132,14 @@ fil_read_first_page( fil_decompress_page(NULL, page, UNIV_PAGE_SIZE, &write_size); } - *space_id = fsp_header_get_space_id(page); - - flushed_lsn = mach_read_from_8(page + FIL_PAGE_FILE_FLUSH_LSN); - if (!one_read_already) { + *space_id = fsp_header_get_space_id(page); + check_msg = fil_check_first_page(page); } + flushed_lsn = mach_read_from_8(page + FIL_PAGE_FILE_FLUSH_LSN); + ut_free(buf); if (check_msg) { @@ -2341,13 +2347,13 @@ exists and the space id in it matches. Replays the create operation if a file at that path does not exist yet. If the database directory for the file to be created does not exist, then we create the directory, too. -Note that ibbackup --apply-log sets fil_path_to_mysql_datadir to point to the -datadir that we should use in replaying the file operations. +Note that mysqlbackup --apply-log sets fil_path_to_mysql_datadir to point to +the datadir that we should use in replaying the file operations. InnoDB recovery does not replay these fully since it always sets the space id -to zero. But ibbackup does replay them. TODO: If remote tablespaces are used, -ibbackup will only create tables in the default directory since MLOG_FILE_CREATE -and MLOG_FILE_CREATE2 only know the tablename, not the path. +to zero. But mysqlbackup does replay them. TODO: If remote tablespaces are +used, mysqlbackup will only create tables in the default directory since +MLOG_FILE_CREATE and MLOG_FILE_CREATE2 only know the tablename, not the path. @return end of log record, or NULL if the record was not completely contained between ptr and end_ptr */ @@ -2434,11 +2440,11 @@ fil_op_log_parse_or_replay( } /* Let us try to perform the file operation, if sensible. Note that - ibbackup has at this stage already read in all space id info to the + mysqlbackup has at this stage already read in all space id info to the fil0fil.cc data structures. NOTE that our algorithm is not guaranteed to work correctly if there - were renames of tables during the backup. See ibbackup code for more + were renames of tables during the backup. See mysqlbackup code for more on the problem. */ switch (type) { @@ -2853,12 +2859,12 @@ fil_delete_tablespace( if (err == DB_SUCCESS) { #ifndef UNIV_HOTBACKUP /* Write a log record about the deletion of the .ibd - file, so that ibbackup can replay it in the + file, so that mysqlbackup can replay it in the --apply-log phase. We use a dummy mtr and the familiar log write mechanism. */ mtr_t mtr; - /* When replaying the operation in ibbackup, do not try + /* When replaying the operation in mysqlbackup, do not try to write any log record */ mtr_start(&mtr); @@ -4550,7 +4556,7 @@ will_not_choose: " (< 4 pages 16 kB each),\n" "InnoDB: or the space id in the file header" " is not sensible.\n" - "InnoDB: This can happen in an ibbackup run," + "InnoDB: This can happen in an mysqlbackup run," " and is not dangerous.\n", fsp->filepath, fsp->id, fsp->filepath, size); os_file_close(fsp->file); @@ -4587,7 +4593,7 @@ will_not_choose: "InnoDB: because space %s with the same id\n" "InnoDB: was scanned earlier. This can happen" " if you have renamed tables\n" - "InnoDB: during an ibbackup run.\n", + "InnoDB: during an mysqlbackup run.\n", fsp->filepath, fsp->id, fsp->filepath, space->name); os_file_close(fsp->file); @@ -5303,9 +5309,9 @@ file_extended: #ifdef UNIV_HOTBACKUP /********************************************************************//** Extends all tablespaces to the size stored in the space header. During the -ibbackup --apply-log phase we extended the spaces on-demand so that log records -could be applied, but that may have left spaces still too small compared to -the size stored in the space header. */ +mysqlbackup --apply-log phase we extended the spaces on-demand so that log +records could be applied, but that may have left spaces still too small +compared to the size stored in the space header. */ UNIV_INTERN void fil_extend_tablespaces_to_stored_len(void) @@ -5817,7 +5823,7 @@ fil_io( page_compression_level = fsp_flags_get_page_compression_level(space->flags); #ifdef UNIV_HOTBACKUP - /* In ibbackup do normal i/o, not aio */ + /* In mysqlbackup do normal i/o, not aio */ if (type == OS_FILE_READ) { ret = os_file_read(node->handle, buf, offset, len); } else { @@ -5831,7 +5837,7 @@ fil_io( offset, len, node, message, write_size, page_compressed, page_compression_level); #endif /* UNIV_HOTBACKUP */ - ut_a(ret); + if (mode == OS_AIO_SYNC) { /* The i/o operation is already completed when we return from @@ -5846,7 +5852,10 @@ fil_io( ut_ad(fil_validate_skip()); } - return(DB_SUCCESS); + if (!ret) { + return(DB_OUT_OF_FILE_SPACE); + } else { + } return(DB_SUCCESS); } #ifndef UNIV_HOTBACKUP diff --git a/storage/innobase/fts/fts0ast.cc b/storage/innobase/fts/fts0ast.cc index d6c19c0050a..dd48ffee14d 100644 --- a/storage/innobase/fts/fts0ast.cc +++ b/storage/innobase/fts/fts0ast.cc @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 2007, 2013, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2007, 2014, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -83,11 +83,11 @@ UNIV_INTERN fts_ast_node_t* fts_ast_create_node_term( /*=====================*/ - void* arg, /*!< in: ast state instance */ - const char* ptr) /*!< in: ast term string */ + void* arg, /*!< in: ast state instance */ + const fts_ast_string_t* ptr) /*!< in: ast term string */ { fts_ast_state_t* state = static_cast<fts_ast_state_t*>(arg); - ulint len = strlen(ptr); + ulint len = ptr->len; ulint cur_pos = 0; fts_ast_node_t* node = NULL; fts_ast_node_t* node_list = NULL; @@ -101,8 +101,9 @@ fts_ast_create_node_term( cur_len = innobase_mysql_fts_get_token( state->charset, - reinterpret_cast<const byte*>(ptr) + cur_pos, - reinterpret_cast<const byte*>(ptr) + len, &str, &offset); + reinterpret_cast<const byte*>(ptr->str) + cur_pos, + reinterpret_cast<const byte*>(ptr->str) + len, + &str, &offset); if (cur_len == 0) { break; @@ -124,10 +125,8 @@ fts_ast_create_node_term( node->type = FTS_AST_TERM; - node->term.ptr = static_cast<byte*>(ut_malloc( - str.f_len + 1)); - memcpy(node->term.ptr, str.f_str, str.f_len); - node->term.ptr[str.f_len] = '\0'; + node->term.ptr = fts_ast_string_create( + str.f_str, str.f_len); fts_ast_state_add_node( static_cast<fts_ast_state_t*>(arg), node); @@ -160,25 +159,21 @@ UNIV_INTERN fts_ast_node_t* fts_ast_create_node_text( /*=====================*/ - void* arg, /*!< in: ast state instance */ - const char* ptr) /*!< in: ast text string */ + void* arg, /*!< in: ast state instance */ + const fts_ast_string_t* ptr) /*!< in: ast text string */ { - ulint len = strlen(ptr); + ulint len = ptr->len; fts_ast_node_t* node = NULL; + /* Once we come here, the string must have at least 2 quotes "" + around the query string, which could be empty. Also the query + string may contain 0x00 in it, we don't treat it as null-terminated. */ + ut_ad(len >= 2); + ut_ad(ptr->str[0] == '\"' && ptr->str[len - 1] == '\"'); - ut_ad(len >= 1); - - if (len <= 2) { - /* There is a way to directly supply null terminator - in the query string (by using 0x220022) and get here, - and certainly it would not make a valid query text */ - ut_ad(ptr[0] == '\"'); - - if (len == 2) { - ut_ad(ptr[1] == '\"'); - } - + if (len == 2) { + /* If the query string contains nothing except quotes, + it's obviously an invalid query. */ return(NULL); } @@ -188,11 +183,9 @@ fts_ast_create_node_text( len -= 2; node->type = FTS_AST_TEXT; - node->text.ptr = static_cast<byte*>(ut_malloc(len + 1)); - /*!< Skip copying the first quote */ - memcpy(node->text.ptr, ptr + 1, len); - node->text.ptr[len] = 0; + node->text.ptr = fts_ast_string_create( + reinterpret_cast<const byte*>(ptr->str + 1), len); node->text.distance = ULINT_UNDEFINED; fts_ast_state_add_node((fts_ast_state_t*) arg, node); @@ -275,14 +268,14 @@ fts_ast_free_node( switch (node->type) { case FTS_AST_TEXT: if (node->text.ptr) { - ut_free(node->text.ptr); + fts_ast_string_free(node->text.ptr); node->text.ptr = NULL; } break; case FTS_AST_TERM: if (node->term.ptr) { - ut_free(node->term.ptr); + fts_ast_string_free(node->term.ptr); node->term.ptr = NULL; } break; @@ -421,10 +414,10 @@ fts_ast_state_free( fts_ast_node_t* next = node->next_alloc; if (node->type == FTS_AST_TEXT && node->text.ptr) { - ut_free(node->text.ptr); + fts_ast_string_free(node->text.ptr); node->text.ptr = NULL; } else if (node->type == FTS_AST_TERM && node->term.ptr) { - ut_free(node->term.ptr); + fts_ast_string_free(node->term.ptr); node->term.ptr = NULL; } @@ -445,11 +438,13 @@ fts_ast_node_print( { switch (node->type) { case FTS_AST_TEXT: - printf("TEXT: %s\n", node->text.ptr); + printf("TEXT: "); + fts_ast_string_print(node->text.ptr); break; case FTS_AST_TERM: - printf("TERM: %s\n", node->term.ptr); + printf("TERM: "); + fts_ast_string_print(node->term.ptr); break; case FTS_AST_LIST: @@ -628,3 +623,74 @@ fts_ast_visit( return(error); } + +/** +Create an ast string object, with NUL-terminator, so the string +has one more byte than len +@param[in] str pointer to string +@param[in] len length of the string +@return ast string with NUL-terminator */ +UNIV_INTERN +fts_ast_string_t* +fts_ast_string_create( + const byte* str, + ulint len) +{ + fts_ast_string_t* ast_str; + + ut_ad(len > 0); + + ast_str = static_cast<fts_ast_string_t*> + (ut_malloc(sizeof(fts_ast_string_t))); + ast_str->str = static_cast<byte*>(ut_malloc(len + 1)); + + ast_str->len = len; + memcpy(ast_str->str, str, len); + ast_str->str[len] = '\0'; + + return(ast_str); +} + +/** +Free an ast string instance +@param[in,out] ast_str string to free */ +UNIV_INTERN +void +fts_ast_string_free( + fts_ast_string_t* ast_str) +{ + if (ast_str != NULL) { + ut_free(ast_str->str); + ut_free(ast_str); + } +} + +/** +Translate ast string of type FTS_AST_NUMB to unsigned long by strtoul +@param[in] str string to translate +@param[in] base the base +@return translated number */ +UNIV_INTERN +ulint +fts_ast_string_to_ul( + const fts_ast_string_t* ast_str, + int base) +{ + return(strtoul(reinterpret_cast<const char*>(ast_str->str), + NULL, base)); +} + +/** +Print the ast string +@param[in] str string to print */ +UNIV_INTERN +void +fts_ast_string_print( + const fts_ast_string_t* ast_str) +{ + for (ulint i = 0; i < ast_str->len; ++i) { + printf("%c", ast_str->str[i]); + } + + printf("\n"); +} diff --git a/storage/innobase/fts/fts0blex.cc b/storage/innobase/fts/fts0blex.cc index f83523825d2..7d0acb00a3b 100644 --- a/storage/innobase/fts/fts0blex.cc +++ b/storage/innobase/fts/fts0blex.cc @@ -451,7 +451,7 @@ static yyconst flex_int16_t yy_chk[32] = #line 1 "fts0blex.l" /***************************************************************************** -Copyright (c) 2007, 2011, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2007, 2014, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -806,7 +806,7 @@ case 3: YY_RULE_SETUP #line 53 "fts0blex.l" { - val->token = strdup(fts0bget_text(yyscanner)); + val->token = fts_ast_string_create(reinterpret_cast<const byte*>(fts0bget_text(yyscanner)), fts0bget_leng(yyscanner)); return(FTS_NUMB); } @@ -815,7 +815,7 @@ case 4: YY_RULE_SETUP #line 59 "fts0blex.l" { - val->token = strdup(fts0bget_text(yyscanner)); + val->token = fts_ast_string_create(reinterpret_cast<const byte*>(fts0bget_text(yyscanner)), fts0bget_leng(yyscanner)); return(FTS_TERM); } @@ -824,7 +824,7 @@ case 5: YY_RULE_SETUP #line 65 "fts0blex.l" { - val->token = strdup(fts0bget_text(yyscanner)); + val->token = fts_ast_string_create(reinterpret_cast<const byte*>(fts0bget_text(yyscanner)), fts0bget_leng(yyscanner)); return(FTS_TEXT); } diff --git a/storage/innobase/fts/fts0blex.l b/storage/innobase/fts/fts0blex.l index 6193f0df187..ae6e8ffaa48 100644 --- a/storage/innobase/fts/fts0blex.l +++ b/storage/innobase/fts/fts0blex.l @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 2007, 2011, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2007, 2014, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -51,19 +51,19 @@ this program; if not, write to the Free Software Foundation, Inc., } [0-9]+ { - val->token = strdup(fts0bget_text(yyscanner)); + val->token = fts_ast_string_create(reinterpret_cast<const byte*>(fts0bget_text(yyscanner)), fts0bget_leng(yyscanner)); return(FTS_NUMB); } [^" \n*()+\-<>~@%]* { - val->token = strdup(fts0bget_text(yyscanner)); + val->token = fts_ast_string_create(reinterpret_cast<const byte*>(fts0bget_text(yyscanner)), fts0bget_leng(yyscanner)); return(FTS_TERM); } \"[^\"\n]*\" { - val->token = strdup(fts0bget_text(yyscanner)); + val->token = fts_ast_string_create(reinterpret_cast<const byte*>(fts0bget_text(yyscanner)), fts0bget_leng(yyscanner)); return(FTS_TEXT); } diff --git a/storage/innobase/fts/fts0fts.cc b/storage/innobase/fts/fts0fts.cc index 4a667686795..848d60f6e3f 100644 --- a/storage/innobase/fts/fts0fts.cc +++ b/storage/innobase/fts/fts0fts.cc @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 2011, 2013, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2011, 2014, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -44,6 +44,13 @@ Full Text Search interface /** Column name from the FTS config table */ #define FTS_MAX_CACHE_SIZE_IN_MB "cache_size_in_mb" +/** Verify if a aux table name is a obsolete table +by looking up the key word in the obsolete table names */ +#define FTS_IS_OBSOLETE_AUX_TABLE(table_name) \ + (strstr((table_name), "DOC_ID") != NULL \ + || strstr((table_name), "ADDED") != NULL \ + || strstr((table_name), "STOPWORDS") != NULL) + /** This is maximum FTS cache for each table and would be a configurable variable */ UNIV_INTERN ulong fts_max_cache_size; @@ -601,8 +608,10 @@ fts_cache_init( cache->total_size = 0; + mutex_enter((ib_mutex_t*) &cache->deleted_lock); cache->deleted_doc_ids = ib_vector_create( cache->sync_heap, sizeof(fts_update_t), 4); + mutex_exit((ib_mutex_t*) &cache->deleted_lock); /* Reset the cache data for all the FTS indexes. */ for (i = 0; i < ib_vector_size(cache->indexes); ++i) { @@ -1130,7 +1139,10 @@ fts_cache_clear( cache->sync_heap->arg = NULL; cache->total_size = 0; + + mutex_enter((ib_mutex_t*) &cache->deleted_lock); cache->deleted_doc_ids = NULL; + mutex_exit((ib_mutex_t*) &cache->deleted_lock); } /*********************************************************************//** @@ -1947,10 +1959,15 @@ fts_create_one_index_table( char* table_name = fts_get_table_name(fts_table); dberr_t error; CHARSET_INFO* charset; + ulint flags2 = 0; ut_ad(index->type & DICT_FTS); - new_table = dict_mem_table_create(table_name, 0, 5, 1, 0); + if (srv_file_per_table) { + flags2 = DICT_TF2_USE_TABLESPACE; + } + + new_table = dict_mem_table_create(table_name, 0, 5, 1, flags2); field = dict_index_get_nth_field(index, 0); charset = innobase_get_fts_charset( @@ -1979,7 +1996,7 @@ fts_create_one_index_table( dict_mem_table_add_col(new_table, heap, "ilist", DATA_BLOB, 4130048, 0); - error = row_create_table_for_mysql(new_table, trx, true); + error = row_create_table_for_mysql(new_table, trx, false); if (error != DB_SUCCESS) { trx->error_state = error; @@ -2244,11 +2261,15 @@ static fts_trx_t* fts_trx_create( /*===========*/ - trx_t* trx) /*!< in: InnoDB transaction */ + trx_t* trx) /*!< in/out: InnoDB + transaction */ { - fts_trx_t* ftt; - ib_alloc_t* heap_alloc; - mem_heap_t* heap = mem_heap_create(1024); + fts_trx_t* ftt; + ib_alloc_t* heap_alloc; + mem_heap_t* heap = mem_heap_create(1024); + trx_named_savept_t* savep; + + ut_a(trx->fts_trx == NULL); ftt = static_cast<fts_trx_t*>(mem_heap_alloc(heap, sizeof(fts_trx_t))); ftt->trx = trx; @@ -2266,6 +2287,14 @@ fts_trx_create( fts_savepoint_create(ftt->savepoints, NULL, NULL); fts_savepoint_create(ftt->last_stmt, NULL, NULL); + /* Copy savepoints that already set before. */ + for (savep = UT_LIST_GET_FIRST(trx->trx_savepoints); + savep != NULL; + savep = UT_LIST_GET_NEXT(trx_savepoints, savep)) { + + fts_savepoint_take(trx, ftt, savep->name); + } + return(ftt); } @@ -4359,6 +4388,7 @@ fts_sync_commit( /* We need to do this within the deleted lock since fts_delete() can attempt to add a deleted doc id to the cache deleted id array. */ fts_cache_clear(cache); + DEBUG_SYNC_C("fts_deleted_doc_ids_clear"); fts_cache_init(cache); rw_lock_x_unlock(&cache->lock); @@ -5160,6 +5190,12 @@ fts_cache_append_deleted_doc_ids( mutex_enter((ib_mutex_t*) &cache->deleted_lock); + if (cache->deleted_doc_ids == NULL) { + mutex_exit((ib_mutex_t*) &cache->deleted_lock); + return; + } + + for (i = 0; i < ib_vector_size(cache->deleted_doc_ids); ++i) { fts_update_t* update; @@ -5445,16 +5481,15 @@ void fts_savepoint_take( /*===============*/ trx_t* trx, /*!< in: transaction */ + fts_trx_t* fts_trx, /*!< in: fts transaction */ const char* name) /*!< in: savepoint name */ { mem_heap_t* heap; - fts_trx_t* fts_trx; fts_savepoint_t* savepoint; fts_savepoint_t* last_savepoint; ut_a(name != NULL); - fts_trx = trx->fts_trx; heap = fts_trx->heap; /* The implied savepoint must exist. */ @@ -5771,7 +5806,7 @@ fts_savepoint_rollback( ut_a(ib_vector_size(savepoints) > 0); /* Restore the savepoint. */ - fts_savepoint_take(trx, name); + fts_savepoint_take(trx, trx->fts_trx, name); } } @@ -5837,6 +5872,12 @@ fts_is_aux_table_name( } } + /* Could be obsolete common tables. */ + if (strncmp(ptr, "ADDED", len) == 0 + || strncmp(ptr, "STOPWORDS", len) == 0) { + return(true); + } + /* Try and read the index id. */ if (!fts_read_object_id(&table->index_id, ptr)) { return(FALSE); @@ -6433,6 +6474,56 @@ fts_check_and_drop_orphaned_tables( mem_free(path); } + } else { + if (FTS_IS_OBSOLETE_AUX_TABLE(aux_table->name)) { + + /* Current table could be one of the three + obsolete tables, in this case, we should + always try to drop it but not rename it. + This could happen when we try to upgrade + from older server to later one, which doesn't + contain these obsolete tables. */ + drop = true; + + dberr_t err; + trx_t* trx_drop = + trx_allocate_for_background(); + + trx_drop->op_info = "Drop obsolete aux tables"; + trx_drop->dict_operation_lock_mode = RW_X_LATCH; + + trx_start_for_ddl(trx_drop, TRX_DICT_OP_TABLE); + + err = row_drop_table_for_mysql( + aux_table->name, trx_drop, false, true); + + trx_drop->dict_operation_lock_mode = 0; + + if (err != DB_SUCCESS) { + /* We don't need to worry about the + failure, since server would try to + drop it on next restart, even if + the table was broken. */ + + ib_logf(IB_LOG_LEVEL_WARN, + "Fail to drop obsolete aux" + " table '%s', which is" + " harmless. will try to drop" + " it on next restart.", + aux_table->name); + + fts_sql_rollback(trx_drop); + } else { + ib_logf(IB_LOG_LEVEL_INFO, + "Dropped obsolete aux" + " table '%s'.", + aux_table->name); + + fts_sql_commit(trx_drop); + } + + trx_free_for_background(trx_drop); + } } #ifdef _WIN32 if (!drop && rename) { diff --git a/storage/innobase/fts/fts0opt.cc b/storage/innobase/fts/fts0opt.cc index a9f3a25530d..910a00cd521 100644 --- a/storage/innobase/fts/fts0opt.cc +++ b/storage/innobase/fts/fts0opt.cc @@ -95,7 +95,7 @@ enum fts_msg_type_t { /** Compressed list of words that have been read from FTS INDEX that needs to be optimized. */ struct fts_zip_t { - ulint status; /*!< Status of (un)/zip operation */ + lint status; /*!< Status of (un)/zip operation */ ulint n_words; /*!< Number of words compressed */ diff --git a/storage/innobase/fts/fts0pars.cc b/storage/innobase/fts/fts0pars.cc index 83d465b0988..7f0ba4e0c1b 100644 --- a/storage/innobase/fts/fts0pars.cc +++ b/storage/innobase/fts/fts0pars.cc @@ -100,6 +100,8 @@ extern int ftserror(const char* p); #define YYPARSE_PARAM state #define YYLEX_PARAM ((fts_ast_state_t*) state)->lexer +#define YYTOKENFREE(token) fts_ast_string_free((token)) + typedef int (*fts_scanner_alt)(YYSTYPE* val, yyscan_t yyscanner); typedef int (*fts_scanner)(); @@ -154,9 +156,9 @@ typedef union YYSTYPE /* Line 293 of yacc.c */ #line 61 "fts0pars.y" - int oper; - char* token; - fts_ast_node_t* node; + int oper; + fts_ast_string_t* token; + fts_ast_node_t* node; @@ -632,6 +634,19 @@ while (YYID (0)) #define YYTERROR 1 #define YYERRCODE 256 +#define YYERRCLEANUP \ +do \ + switch (yylastchar) \ + { \ + case FTS_NUMB: \ + case FTS_TEXT: \ + case FTS_TERM: \ + YYTOKENFREE(yylval.token); \ + break; \ + default: \ + break; \ + } \ +while (YYID (0)) /* YYLLOC_DEFAULT -- Set CURRENT to span from RHS[1] to RHS[N]. If N is 0, then set CURRENT to the empty location which ends @@ -1169,6 +1184,8 @@ yyparse () { /* The lookahead symbol. */ int yychar; +/* The backup of yychar when there is an error and we're in yyerrlab. */ +int yylastchar; /* The semantic value of the lookahead symbol. */ YYSTYPE yylval; @@ -1524,8 +1541,8 @@ yyreduce: /* Line 1806 of yacc.c */ #line 141 "fts0pars.y" { - fts_ast_term_set_distance((yyvsp[(1) - (3)].node), strtoul((yyvsp[(3) - (3)].token), NULL, 10)); - free((yyvsp[(3) - (3)].token)); + fts_ast_term_set_distance((yyvsp[(1) - (3)].node), fts_ast_string_to_ul((yyvsp[(3) - (3)].token), 10)); + fts_ast_string_free((yyvsp[(3) - (3)].token)); } break; @@ -1557,8 +1574,8 @@ yyreduce: { (yyval.node) = fts_ast_create_node_list(state, (yyvsp[(1) - (4)].node)); fts_ast_add_node((yyval.node), (yyvsp[(2) - (4)].node)); - fts_ast_term_set_distance((yyvsp[(2) - (4)].node), strtoul((yyvsp[(4) - (4)].token), NULL, 10)); - free((yyvsp[(4) - (4)].token)); + fts_ast_term_set_distance((yyvsp[(2) - (4)].node), fts_ast_string_to_ul((yyvsp[(4) - (4)].token), 10)); + fts_ast_string_free((yyvsp[(4) - (4)].token)); } break; @@ -1623,7 +1640,7 @@ yyreduce: #line 191 "fts0pars.y" { (yyval.node) = fts_ast_create_node_term(state, (yyvsp[(1) - (1)].token)); - free((yyvsp[(1) - (1)].token)); + fts_ast_string_free((yyvsp[(1) - (1)].token)); } break; @@ -1633,7 +1650,7 @@ yyreduce: #line 196 "fts0pars.y" { (yyval.node) = fts_ast_create_node_term(state, (yyvsp[(1) - (1)].token)); - free((yyvsp[(1) - (1)].token)); + fts_ast_string_free((yyvsp[(1) - (1)].token)); } break; @@ -1652,7 +1669,7 @@ yyreduce: #line 207 "fts0pars.y" { (yyval.node) = fts_ast_create_node_text(state, (yyvsp[(1) - (1)].token)); - free((yyvsp[(1) - (1)].token)); + fts_ast_string_free((yyvsp[(1) - (1)].token)); } break; @@ -1700,6 +1717,8 @@ yyreduce: | yyerrlab -- here on detecting error | `------------------------------------*/ yyerrlab: + /* Backup yychar, in case we would change it. */ + yylastchar = yychar; /* Make sure we have latest lookahead translation. See comments at user semantic actions for why this is necessary. */ yytoken = yychar == YYEMPTY ? YYEMPTY : YYTRANSLATE (yychar); @@ -1755,7 +1774,11 @@ yyerrlab: { /* Return failure if at end of input. */ if (yychar == YYEOF) - YYABORT; + { + /* Since we don't need the token, we have to free it first. */ + YYERRCLEANUP; + YYABORT; + } } else { @@ -1812,7 +1835,11 @@ yyerrlab1: /* Pop the current state because it cannot handle the error token. */ if (yyssp == yyss) - YYABORT; + { + /* Since we don't need the error token, we have to free it first. */ + YYERRCLEANUP; + YYABORT; + } yydestruct ("Error: popping", diff --git a/storage/innobase/fts/fts0pars.y b/storage/innobase/fts/fts0pars.y index ff22e9a9873..e48036e82fe 100644 --- a/storage/innobase/fts/fts0pars.y +++ b/storage/innobase/fts/fts0pars.y @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 2007, 2013, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2007, 2014, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -59,9 +59,9 @@ struct fts_lexer_struct { %} %union { - int oper; - char* token; - fts_ast_node_t* node; + int oper; + fts_ast_string_t* token; + fts_ast_node_t* node; }; /* Enable re-entrant parser */ @@ -139,8 +139,8 @@ expr : term { } | text '@' FTS_NUMB { - fts_ast_term_set_distance($1, strtoul($3, NULL, 10)); - free($3); + fts_ast_term_set_distance($1, fts_ast_string_to_ul($3, 10)); + fts_ast_string_free($3); } | prefix term '*' { @@ -157,8 +157,8 @@ expr : term { | prefix text '@' FTS_NUMB { $$ = fts_ast_create_node_list(state, $1); fts_ast_add_node($$, $2); - fts_ast_term_set_distance($2, strtoul($4, NULL, 10)); - free($4); + fts_ast_term_set_distance($2, fts_ast_string_to_ul($4, 10)); + fts_ast_string_free($4); } | prefix text { @@ -190,12 +190,12 @@ prefix : '-' { term : FTS_TERM { $$ = fts_ast_create_node_term(state, $1); - free($1); + fts_ast_string_free($1); } | FTS_NUMB { $$ = fts_ast_create_node_term(state, $1); - free($1); + fts_ast_string_free($1); } /* Ignore leading '*' */ @@ -206,7 +206,7 @@ term : FTS_TERM { text : FTS_TEXT { $$ = fts_ast_create_node_text(state, $1); - free($1); + fts_ast_string_free($1); } ; %% diff --git a/storage/innobase/fts/fts0que.cc b/storage/innobase/fts/fts0que.cc index adbf433e608..f26fd89ac76 100644 --- a/storage/innobase/fts/fts0que.cc +++ b/storage/innobase/fts/fts0que.cc @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 2007, 2013, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2007, 2014, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -2800,20 +2800,19 @@ fts_query_get_token( ulint str_len; byte* new_ptr = NULL; - str_len = ut_strlen((char*) node->term.ptr); + str_len = node->term.ptr->len; ut_a(node->type == FTS_AST_TERM); token->f_len = str_len; - token->f_str = node->term.ptr; + token->f_str = node->term.ptr->str; if (node->term.wildcard) { token->f_str = static_cast<byte*>(ut_malloc(str_len + 2)); token->f_len = str_len + 1; - /* Need to copy the NUL character too. */ - memcpy(token->f_str, node->term.ptr, str_len + 1); + memcpy(token->f_str, node->term.ptr->str, str_len); token->f_str[str_len] = '%'; token->f_str[token->f_len] = 0; @@ -2848,8 +2847,8 @@ fts_query_visitor( switch (node->type) { case FTS_AST_TEXT: - token.f_str = node->text.ptr; - token.f_len = ut_strlen((char*) token.f_str); + token.f_str = node->text.ptr->str; + token.f_len = node->text.ptr->len; if (query->oper == FTS_EXIST) { ut_ad(query->intersection == NULL); @@ -2878,8 +2877,8 @@ fts_query_visitor( break; case FTS_AST_TERM: - token.f_str = node->term.ptr; - token.f_len = ut_strlen(reinterpret_cast<char*>(token.f_str)); + token.f_str = node->term.ptr->str; + token.f_len = node->term.ptr->len; /* Add the word to our RB tree that will be used to calculate this terms per document frequency. */ @@ -3191,13 +3190,9 @@ fts_query_read_node( to assign the frequency on search string behalf. */ if (query->cur_node->type == FTS_AST_TERM && query->cur_node->term.wildcard) { - - /* These cast are safe since we only care about the - terminating NUL character as an end of string marker. */ - term.f_len = ut_strlen(reinterpret_cast<char*> - (query->cur_node->term.ptr)); + term.f_len = query->cur_node->term.ptr->len; ut_ad(FTS_MAX_WORD_LEN >= term.f_len); - memcpy(term.f_str, query->cur_node->term.ptr, term.f_len); + memcpy(term.f_str, query->cur_node->term.ptr->str, term.f_len); } else { term.f_len = word->f_len; ut_ad(FTS_MAX_WORD_LEN >= word->f_len); @@ -3507,14 +3502,15 @@ fts_query_prepare_result( doc_freq = rbt_value(fts_doc_freq_t, node); /* Don't put deleted docs into result */ - if (fts_bsearch(array, 0, static_cast<int>(size), doc_freq->doc_id) - >= 0) { + if (fts_bsearch(array, 0, static_cast<int>(size), + doc_freq->doc_id) >= 0) { + /* one less matching doc count */ + --word_freq->doc_count; continue; } ranking.doc_id = doc_freq->doc_id; - ranking.rank = static_cast<fts_rank_t>( - doc_freq->freq * word_freq->idf * word_freq->idf); + ranking.rank = static_cast<fts_rank_t>(doc_freq->freq); ranking.words = NULL; fts_query_add_ranking(query, result->rankings_by_id, @@ -3527,6 +3523,25 @@ fts_query_prepare_result( } } + /* Calculate IDF only after we exclude the deleted items */ + fts_query_calculate_idf(query); + + node = rbt_first(query->word_freqs); + word_freq = rbt_value(fts_word_freq_t, node); + + /* Calculate the ranking for each doc */ + for (node = rbt_first(result->rankings_by_id); + node != NULL; + node = rbt_next(result->rankings_by_id, node)) { + + fts_ranking_t* ranking; + + ranking = rbt_value(fts_ranking_t, node); + + ranking->rank = static_cast<fts_rank_t>( + ranking->rank * word_freq->idf * word_freq->idf); + } + return(result); } @@ -3900,6 +3915,7 @@ fts_query( /* Get the deleted doc ids that are in the cache. */ fts_cache_append_deleted_doc_ids( index->table->fts->cache, query.deleted->doc_ids); + DEBUG_SYNC_C("fts_deleted_doc_ids_append"); /* Sort the vector so that we can do a binary search over the ids. */ ib_vector_sort(query.deleted->doc_ids, fts_update_doc_id_cmp); @@ -3956,7 +3972,8 @@ fts_query( } /* Calculate the inverse document frequency of the terms. */ - if (query.error == DB_SUCCESS) { + if (query.error == DB_SUCCESS + && query.flags != FTS_OPT_RANKING) { fts_query_calculate_idf(&query); } diff --git a/storage/innobase/fts/fts0tlex.cc b/storage/innobase/fts/fts0tlex.cc index ef17ab1acf2..b744fbf0763 100644 --- a/storage/innobase/fts/fts0tlex.cc +++ b/storage/innobase/fts/fts0tlex.cc @@ -447,7 +447,7 @@ static yyconst flex_int16_t yy_chk[29] = #line 1 "fts0tlex.l" /***************************************************************************** -Copyright (c) 2007, 2011, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2007, 2014, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -802,7 +802,7 @@ case 3: YY_RULE_SETUP #line 54 "fts0tlex.l" { - val->token = strdup(fts0tget_text(yyscanner)); + val->token = fts_ast_string_create(reinterpret_cast<const byte*>(fts0tget_text(yyscanner)), fts0tget_leng(yyscanner)); return(FTS_TEXT); } @@ -811,7 +811,7 @@ case 4: YY_RULE_SETUP #line 60 "fts0tlex.l" { - val->token = strdup(fts0tget_text(yyscanner)); + val->token = fts_ast_string_create(reinterpret_cast<const byte*>(fts0tget_text(yyscanner)), fts0tget_leng(yyscanner)); return(FTS_TERM); } diff --git a/storage/innobase/fts/fts0tlex.l b/storage/innobase/fts/fts0tlex.l index a18c2a55081..4f55a83afe5 100644 --- a/storage/innobase/fts/fts0tlex.l +++ b/storage/innobase/fts/fts0tlex.l @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 2007, 2011, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2007, 2014, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -52,13 +52,13 @@ this program; if not, write to the Free Software Foundation, Inc., } \"[^\"\n]*\" { - val->token = strdup(fts0tget_text(yyscanner)); + val->token = fts_ast_string_create(reinterpret_cast<const byte*>(fts0tget_text(yyscanner)), fts0tget_leng(yyscanner)); return(FTS_TEXT); } [^" \n\%]* { - val->token = strdup(fts0tget_text(yyscanner)); + val->token = fts_ast_string_create(reinterpret_cast<const byte*>(fts0tget_text(yyscanner)), fts0tget_leng(yyscanner)); return(FTS_TERM); } diff --git a/storage/innobase/handler/ha_innodb.cc b/storage/innobase/handler/ha_innodb.cc index d866d6b7764..84d7b1a12c8 100644 --- a/storage/innobase/handler/ha_innodb.cc +++ b/storage/innobase/handler/ha_innodb.cc @@ -426,7 +426,7 @@ static PSI_rwlock_info all_innodb_rwlocks[] = { {&trx_purge_latch_key, "trx_purge_latch", 0}, {&index_tree_rw_lock_key, "index_tree_rw_lock", 0}, {&index_online_log_key, "index_online_log", 0}, - {&dict_table_stats_latch_key, "dict_table_stats", 0}, + {&dict_table_stats_key, "dict_table_stats", 0}, {&hash_table_rw_lock_key, "hash_table_locks", 0} }; # endif /* UNIV_PFS_RWLOCK */ @@ -3640,6 +3640,14 @@ innobase_end( if (innodb_inited) { + THD *thd= current_thd; + if (thd) { // may be UNINSTALL PLUGIN statement + trx_t* trx = thd_to_trx(thd); + if (trx) { + trx_free_for_mysql(trx); + } + } + srv_fast_shutdown = (ulint) innobase_fast_shutdown; innodb_inited = 0; @@ -3784,7 +3792,7 @@ innobase_commit_ordered_2( { DBUG_ENTER("innobase_commit_ordered_2"); - /* We need current binlog position for ibbackup to work. + /* We need current binlog position for mysqlbackup to work. Note, the position is current because commit_ordered is guaranteed to be called in same sequenece as writing to binlog. */ @@ -4300,6 +4308,7 @@ innobase_release_savepoint( DBUG_ASSERT(hton == innodb_hton_ptr); trx = check_trx_exists(thd); + trx_start_if_not_started(trx); /* TODO: use provided savepoint data area to store savepoint data */ @@ -4355,7 +4364,7 @@ innobase_savepoint( error = trx_savepoint_for_mysql(trx, name, (ib_int64_t)0); if (error == DB_SUCCESS && trx->fts_trx != NULL) { - fts_savepoint_take(trx, name); + fts_savepoint_take(trx, trx->fts_trx, name); } DBUG_RETURN(convert_error_code_to_mysql(error, 0, NULL)); @@ -4390,7 +4399,7 @@ innobase_close_connection( sql_print_warning( "MySQL is closing a connection that has an active " - "InnoDB transaction. "TRX_ID_FMT" row modifications " + "InnoDB transaction. " TRX_ID_FMT " row modifications " "will roll back.", trx->undo_no); } @@ -4453,16 +4462,23 @@ innobase_kill_query( #endif /* WITH_WSREP */ trx = thd_to_trx(thd); - if (trx) - { - /* Cancel a pending lock request. */ - lock_mutex_enter(); - trx_mutex_enter(trx); - if (trx->lock.wait_lock) - lock_cancel_waiting_and_release(trx->lock.wait_lock); - trx_mutex_exit(trx); - lock_mutex_exit(); - } + if (trx) { + THD *cur = current_thd; + THD *owner = trx->current_lock_mutex_owner; + + /* Cancel a pending lock request. */ + if (owner != cur) { + lock_mutex_enter(); + } + trx_mutex_enter(trx); + if (trx->lock.wait_lock) { + lock_cancel_waiting_and_release(trx->lock.wait_lock); + } + trx_mutex_exit(trx); + if (owner != cur) { + lock_mutex_exit(); + } + } DBUG_VOID_RETURN; } @@ -4509,14 +4525,11 @@ handler::Table_flags ha_innobase::table_flags() const /*============================*/ { - THD *thd = ha_thd(); /* Need to use tx_isolation here since table flags is (also) called before prebuilt is inited. */ - ulong const tx_isolation = thd_tx_isolation(thd); + ulong const tx_isolation = thd_tx_isolation(ha_thd()); - if (tx_isolation <= ISO_READ_COMMITTED && - !(tx_isolation == ISO_READ_COMMITTED && - thd_rpl_is_parallel(thd))) { + if (tx_isolation <= ISO_READ_COMMITTED) { return(int_table_flags); } @@ -8059,7 +8072,7 @@ calc_row_difference( if (doc_id < prebuilt->table->fts->cache->next_doc_id) { fprintf(stderr, "InnoDB: FTS Doc ID must be larger than" - " "IB_ID_FMT" for table", + " " IB_ID_FMT " for table", innodb_table->fts->cache->next_doc_id - 1); ut_print_name(stderr, trx, @@ -8071,9 +8084,9 @@ calc_row_difference( - prebuilt->table->fts->cache->next_doc_id) >= FTS_DOC_ID_MAX_STEP) { fprintf(stderr, - "InnoDB: Doc ID "UINT64PF" is too" + "InnoDB: Doc ID " UINT64PF " is too" " big. Its difference with largest" - " Doc ID used "UINT64PF" cannot" + " Doc ID used " UINT64PF " cannot" " exceed or equal to %d\n", doc_id, prebuilt->table->fts->cache->next_doc_id - 1, @@ -8814,6 +8827,29 @@ ha_innobase::innobase_get_index( index = innobase_index_lookup(share, keynr); if (index) { + if (!key || ut_strcmp(index->name, key->name) != 0) { + fprintf(stderr, "InnoDB: [Error] Index for key no %u" + " mysql name %s , InnoDB name %s for table %s\n", + keynr, key ? key->name : "NULL", + index->name, + prebuilt->table->name); + + for(ulint i=0; i < table->s->keys; i++) { + index = innobase_index_lookup(share, i); + key = table->key_info + keynr; + + if (index) { + + fprintf(stderr, "InnoDB: [Note] Index for key no %u" + " mysql name %s , InnoDB name %s for table %s\n", + keynr, key ? key->name : "NULL", + index->name, + prebuilt->table->name); + } + } + + } + ut_a(ut_strcmp(index->name, key->name) == 0); } else { /* Can't find index with keynr in the translation @@ -12881,6 +12917,34 @@ ha_innobase::info_low( break; } + DBUG_EXECUTE_IF("ib_ha_innodb_stat_not_initialized", + index->table->stat_initialized = FALSE;); + + if (!ib_table->stat_initialized || + (index->table != ib_table || + !index->table->stat_initialized)) { + fprintf(stderr, + "InnoDB: Warning: Index %s points to table %s" " and ib_table %s statistics is initialized %d " + " but index table %s initialized %d " + " mysql table is %s. Have you mixed " + "up .frm files from different " + "installations? " + "See " REFMAN + "innodb-troubleshooting.html\n", + index->name, + index->table->name, + ib_table->name, + ib_table->stat_initialized, + index->table->name, + index->table->stat_initialized, + table->s->table_name.str + ); + + /* This is better than + assert on below function */ + dict_stats_init(index->table); + } + rec_per_key = innodb_rec_per_key( index, j, stats.records); @@ -13562,9 +13626,13 @@ ha_innobase::get_foreign_key_list( mutex_enter(&(dict_sys->mutex)); - for (foreign = UT_LIST_GET_FIRST(prebuilt->table->foreign_list); - foreign != NULL; - foreign = UT_LIST_GET_NEXT(foreign_list, foreign)) { + for (dict_foreign_set::iterator it + = prebuilt->table->foreign_set.begin(); + it != prebuilt->table->foreign_set.end(); + ++it) { + + foreign = *it; + pf_key_info = get_foreign_key_info(thd, foreign); if (pf_key_info) { f_key_list->push_back(pf_key_info); @@ -13600,9 +13668,13 @@ ha_innobase::get_parent_foreign_key_list( mutex_enter(&(dict_sys->mutex)); - for (foreign = UT_LIST_GET_FIRST(prebuilt->table->referenced_list); - foreign != NULL; - foreign = UT_LIST_GET_NEXT(referenced_list, foreign)) { + for (dict_foreign_set::iterator it + = prebuilt->table->referenced_set.begin(); + it != prebuilt->table->referenced_set.end(); + ++it) { + + foreign = *it; + pf_key_info = get_foreign_key_info(thd, foreign); if (pf_key_info) { f_key_list->push_back(pf_key_info); @@ -13635,8 +13707,8 @@ ha_innobase::can_switch_engines(void) "determining if there are foreign key constraints"; row_mysql_freeze_data_dictionary(prebuilt->trx); - can_switch = !UT_LIST_GET_FIRST(prebuilt->table->referenced_list) - && !UT_LIST_GET_FIRST(prebuilt->table->foreign_list); + can_switch = prebuilt->table->referenced_set.empty() + && prebuilt->table->foreign_set.empty(); row_mysql_unfreeze_data_dictionary(prebuilt->trx); prebuilt->trx->op_info = ""; @@ -18670,6 +18742,11 @@ static MYSQL_SYSVAR_ULONG(saved_page_number_debug, NULL, innodb_save_page_no, 0, 0, UINT_MAX32, 0); #endif /* UNIV_DEBUG */ +static MYSQL_SYSVAR_UINT(simulate_comp_failures, srv_simulate_comp_failures, + PLUGIN_VAR_NOCMDARG, + "Simulate compression failures.", + NULL, NULL, 0, 0, 99, 0); + static MYSQL_SYSVAR_BOOL(force_primary_key, srv_force_primary_key, PLUGIN_VAR_OPCMDARG, @@ -18877,6 +18954,7 @@ static struct st_mysql_sys_var* innobase_system_variables[]= { MYSQL_SYSVAR(fil_make_page_dirty_debug), MYSQL_SYSVAR(saved_page_number_debug), #endif /* UNIV_DEBUG */ + MYSQL_SYSVAR(simulate_comp_failures), MYSQL_SYSVAR(force_primary_key), MYSQL_SYSVAR(use_trim), MYSQL_SYSVAR(compression_algorithm), @@ -19211,7 +19289,7 @@ ib_senderrf( va_start(args, code); - myf l; + myf l=0; switch(level) { case IB_LOG_LEVEL_INFO: diff --git a/storage/innobase/handler/handler0alter.cc b/storage/innobase/handler/handler0alter.cc index 123dbdf254d..329eae81854 100644 --- a/storage/innobase/handler/handler0alter.cc +++ b/storage/innobase/handler/handler0alter.cc @@ -607,15 +607,9 @@ innobase_init_foreign( /* Check if any existing foreign key has the same id, this is needed only if user supplies the constraint name */ - for (const dict_foreign_t* existing_foreign - = UT_LIST_GET_FIRST(table->foreign_list); - existing_foreign != 0; - existing_foreign = UT_LIST_GET_NEXT( - foreign_list, existing_foreign)) { - - if (ut_strcmp(existing_foreign->id, foreign->id) == 0) { - return(false); - } + if (table->foreign_set.find(foreign) + != table->foreign_set.end()) { + return(false); } } @@ -2253,14 +2247,18 @@ innobase_check_foreigns_low( const char* col_name, bool drop) { + dict_foreign_t* foreign; ut_ad(mutex_own(&dict_sys->mutex)); /* Check if any FOREIGN KEY constraints are defined on this column. */ - for (const dict_foreign_t* foreign = UT_LIST_GET_FIRST( - user_table->foreign_list); - foreign; - foreign = UT_LIST_GET_NEXT(foreign_list, foreign)) { + + for (dict_foreign_set::iterator it = user_table->foreign_set.begin(); + it != user_table->foreign_set.end(); + ++it) { + + foreign = *it; + if (!drop && !(foreign->type & (DICT_FOREIGN_ON_DELETE_SET_NULL | DICT_FOREIGN_ON_UPDATE_SET_NULL))) { @@ -2292,10 +2290,13 @@ innobase_check_foreigns_low( /* Check if any FOREIGN KEY constraints in other tables are referring to the column that is being dropped. */ - for (const dict_foreign_t* foreign = UT_LIST_GET_FIRST( - user_table->referenced_list); - foreign; - foreign = UT_LIST_GET_NEXT(referenced_list, foreign)) { + for (dict_foreign_set::iterator it + = user_table->referenced_set.begin(); + it != user_table->referenced_set.end(); + ++it) { + + foreign = *it; + if (innobase_dropping_foreign(foreign, drop_fk, n_drop_fk)) { continue; } @@ -3631,11 +3632,12 @@ check_if_ok_to_rename: continue; } - for (dict_foreign_t* foreign = UT_LIST_GET_FIRST( - prebuilt->table->foreign_list); - foreign != NULL; - foreign = UT_LIST_GET_NEXT( - foreign_list, foreign)) { + for (dict_foreign_set::iterator it + = prebuilt->table->foreign_set.begin(); + it != prebuilt->table->foreign_set.end(); + ++it) { + + dict_foreign_t* foreign = *it; const char* fid = strchr(foreign->id, '/'); DBUG_ASSERT(fid); @@ -4481,10 +4483,12 @@ err_exit: rename_foreign: trx->op_info = "renaming column in SYS_FOREIGN_COLS"; - for (const dict_foreign_t* foreign = UT_LIST_GET_FIRST( - user_table->foreign_list); - foreign != NULL; - foreign = UT_LIST_GET_NEXT(foreign_list, foreign)) { + for (dict_foreign_set::iterator it = user_table->foreign_set.begin(); + it != user_table->foreign_set.end(); + ++it) { + + dict_foreign_t* foreign = *it; + for (unsigned i = 0; i < foreign->n_fields; i++) { if (strcmp(foreign->foreign_col_names[i], from)) { continue; @@ -4514,10 +4518,12 @@ rename_foreign: } } - for (const dict_foreign_t* foreign = UT_LIST_GET_FIRST( - user_table->referenced_list); - foreign != NULL; - foreign = UT_LIST_GET_NEXT(referenced_list, foreign)) { + for (dict_foreign_set::iterator it + = user_table->referenced_set.begin(); + it != user_table->referenced_set.end(); + ++it) { + + dict_foreign_t* foreign = *it; for (unsigned i = 0; i < foreign->n_fields; i++) { if (strcmp(foreign->referenced_col_names[i], from)) { continue; @@ -4841,8 +4847,8 @@ innobase_update_foreign_cache( column names. No need to pass col_names or to drop constraints from the data dictionary cache. */ DBUG_ASSERT(!ctx->col_names); - DBUG_ASSERT(UT_LIST_GET_LEN(user_table->foreign_list) == 0); - DBUG_ASSERT(UT_LIST_GET_LEN(user_table->referenced_list) == 0); + DBUG_ASSERT(user_table->foreign_set.empty()); + DBUG_ASSERT(user_table->referenced_set.empty()); user_table = ctx->new_table; } else { /* Drop the foreign key constraints if the diff --git a/storage/innobase/include/btr0cur.h b/storage/innobase/include/btr0cur.h index 833166e783c..f1e4406fcf7 100644 --- a/storage/innobase/include/btr0cur.h +++ b/storage/innobase/include/btr0cur.h @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1994, 2013, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1994, 2014, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -576,6 +576,17 @@ void btr_estimate_number_of_different_key_vals( /*======================================*/ dict_index_t* index); /*!< in: index */ + +/** Gets the externally stored size of a record, in units of a database page. +@param[in] rec record +@param[in] offsets array returned by rec_get_offsets() +@return externally stored part, in units of a database page */ + +ulint +btr_rec_get_externally_stored_len( + const rec_t* rec, + const ulint* offsets); + /*******************************************************************//** Marks non-updated off-page fields as disowned by this record. The ownership must be transferred to the updated record which is inserted elsewhere in the diff --git a/storage/innobase/include/buf0buf.h b/storage/innobase/include/buf0buf.h index 351bf5a26db..bd2a4924cc9 100644 --- a/storage/innobase/include/buf0buf.h +++ b/storage/innobase/include/buf0buf.h @@ -448,7 +448,7 @@ buf_page_create( mtr_t* mtr); /*!< in: mini-transaction handle */ #else /* !UNIV_HOTBACKUP */ /********************************************************************//** -Inits a page to the buffer buf_pool, for use in ibbackup --restore. */ +Inits a page to the buffer buf_pool, for use in mysqlbackup --restore. */ UNIV_INTERN void buf_page_init_for_backup_restore( diff --git a/storage/innobase/include/buf0buf.ic b/storage/innobase/include/buf0buf.ic index b15a7fb11b6..6e419674f98 100644 --- a/storage/innobase/include/buf0buf.ic +++ b/storage/innobase/include/buf0buf.ic @@ -1160,12 +1160,6 @@ buf_page_hash_get_low( ut_a(buf_page_in_file(bpage)); ut_ad(bpage->in_page_hash); ut_ad(!bpage->in_zip_hash); -#if UNIV_WORD_SIZE == 4 - /* On 32-bit systems, there is no padding in - buf_page_t. On other systems, Valgrind could complain - about uninitialized pad bytes. */ - UNIV_MEM_ASSERT_RW(bpage, sizeof *bpage); -#endif } return(bpage); diff --git a/storage/innobase/include/dict0crea.h b/storage/innobase/include/dict0crea.h index 6ec1079957b..67eab9058da 100644 --- a/storage/innobase/include/dict0crea.h +++ b/storage/innobase/include/dict0crea.h @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1996, 2012, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1996, 2014, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -124,28 +124,24 @@ dict_create_add_foreign_id( const char* name, /*!< in: table name */ dict_foreign_t* foreign)/*!< in/out: foreign key */ __attribute__((nonnull)); -/********************************************************************//** -Adds foreign key definitions to data dictionary tables in the database. We -look at table->foreign_list, and also generate names to constraints that were -not named by the user. A generated constraint has a name of the format -databasename/tablename_ibfk_NUMBER, where the numbers start from 1, and are -given locally for this table, that is, the number is not global, as in the -old format constraints < 4.0.18 it used to be. -@return error code or DB_SUCCESS */ + +/** Adds the given set of foreign key objects to the dictionary tables +in the database. This function does not modify the dictionary cache. The +caller must ensure that all foreign key objects contain a valid constraint +name in foreign->id. +@param[in] local_fk_set set of foreign key objects, to be added to +the dictionary tables +@param[in] table table to which the foreign key objects in +local_fk_set belong to +@param[in,out] trx transaction +@return error code or DB_SUCCESS */ UNIV_INTERN dberr_t dict_create_add_foreigns_to_dictionary( /*===================================*/ - ulint start_id,/*!< in: if we are actually doing ALTER TABLE - ADD CONSTRAINT, we want to generate constraint - numbers which are bigger than in the table so - far; we number the constraints from - start_id + 1 up; start_id should be set to 0 if - we are creating a new table, or if the table - so far has no constraints for which the name - was generated here */ - dict_table_t* table, /*!< in: table */ - trx_t* trx) /*!< in: transaction */ + const dict_foreign_set& local_fk_set, + const dict_table_t* table, + trx_t* trx) __attribute__((nonnull, warn_unused_result)); /****************************************************************//** Creates the tablespaces and datafiles system tables inside InnoDB diff --git a/storage/innobase/include/dict0dict.h b/storage/innobase/include/dict0dict.h index 7d14df09cb2..b1a82b4b60f 100644 --- a/storage/innobase/include/dict0dict.h +++ b/storage/innobase/include/dict0dict.h @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1996, 2013, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1996, 2014, Oracle and/or its affiliates. All Rights Reserved. Copyright (c) 2012, Facebook Inc. Copyright (c) 2013, SkySQL Ab. All Rights Reserved. @@ -46,6 +46,9 @@ Created 1/8/1996 Heikki Tuuri #include "fsp0fsp.h" #include "dict0pagecompress.h" +extern bool innodb_table_stats_not_found; +extern bool innodb_index_stats_not_found; + #ifndef UNIV_HOTBACKUP # include "sync0sync.h" # include "sync0rw.h" @@ -1448,6 +1451,28 @@ UNIV_INTERN void dict_mutex_exit_for_mysql(void); /*===========================*/ + +/** Create a dict_table_t's stats latch or delay for lazy creation. +This function is only called from either single threaded environment +or from a thread that has not shared the table object with other threads. +@param[in,out] table table whose stats latch to create +@param[in] enabled if false then the latch is disabled +and dict_table_stats_lock()/unlock() become noop on this table. */ + +void +dict_table_stats_latch_create( + dict_table_t* table, + bool enabled); + +/** Destroy a dict_table_t's stats latch. +This function is only called from either single threaded environment +or from a thread that has not shared the table object with other threads. +@param[in,out] table table whose stats latch to destroy */ + +void +dict_table_stats_latch_destroy( + dict_table_t* table); + /**********************************************************************//** Lock the appropriate latch to protect a given table's statistics. table->id is used to pick the corresponding latch from a global array of diff --git a/storage/innobase/include/dict0mem.h b/storage/innobase/include/dict0mem.h index 46f8690fd5f..6528ee39acc 100644 --- a/storage/innobase/include/dict0mem.h +++ b/storage/innobase/include/dict0mem.h @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1996, 2013, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1996, 2014, Oracle and/or its affiliates. All Rights Reserved. Copyright (c) 2012, Facebook Inc. Copyright (c) 2013, SkySQL Ab. All Rights Reserved. @@ -47,6 +47,9 @@ Created 1/8/1996 Heikki Tuuri #include "hash0hash.h" #include "trx0types.h" #include "fts0fts.h" +#include "os0once.h" +#include <set> +#include <algorithm> /* Forward declaration. */ struct ib_rbt_t; @@ -682,6 +685,9 @@ struct dict_index_t{ ulint stat_n_leaf_pages; /*!< approximate number of leaf pages in the index tree */ + bool stats_error_printed; + /*!< has persistent statistics error printed + for this index ? */ /* @} */ /** Statistics for defragmentation, these numbers are estimations and could be very inaccurate at certain times, e.g. right after restart, @@ -777,12 +783,106 @@ struct dict_foreign_t{ does not generate new indexes implicitly */ dict_index_t* referenced_index;/*!< referenced index */ - UT_LIST_NODE_T(dict_foreign_t) - foreign_list; /*!< list node for foreign keys of the - table */ - UT_LIST_NODE_T(dict_foreign_t) - referenced_list;/*!< list node for referenced - keys of the table */ +}; + +/** Compare two dict_foreign_t objects using their ids. Used in the ordering +of dict_table_t::foreign_set and dict_table_t::referenced_set. It returns +true if the first argument is considered to go before the second in the +strict weak ordering it defines, and false otherwise. */ +struct dict_foreign_compare { + + bool operator()( + const dict_foreign_t* lhs, + const dict_foreign_t* rhs) const + { + return(ut_strcmp(lhs->id, rhs->id) < 0); + } +}; + +/** A function object to find a foreign key with the given index as the +referenced index. Return the foreign key with matching criteria or NULL */ +struct dict_foreign_with_index { + + dict_foreign_with_index(const dict_index_t* index) + : m_index(index) + {} + + bool operator()(const dict_foreign_t* foreign) const + { + return(foreign->referenced_index == m_index); + } + + const dict_index_t* m_index; +}; + +/* A function object to check if the foreign constraint is between different +tables. Returns true if foreign key constraint is between different tables, +false otherwise. */ +struct dict_foreign_different_tables { + + bool operator()(const dict_foreign_t* foreign) const + { + return(foreign->foreign_table != foreign->referenced_table); + } +}; + +/** A function object to check if the foreign key constraint has the same +name as given. If the full name of the foreign key constraint doesn't match, +then, check if removing the database name from the foreign key constraint +matches. Return true if it matches, false otherwise. */ +struct dict_foreign_matches_id { + + dict_foreign_matches_id(const char* id) + : m_id(id) + {} + + bool operator()(const dict_foreign_t* foreign) const + { + if (0 == innobase_strcasecmp(foreign->id, m_id)) { + return(true); + } + if (const char* pos = strchr(foreign->id, '/')) { + if (0 == innobase_strcasecmp(m_id, pos + 1)) { + return(true); + } + } + return(false); + } + + const char* m_id; +}; + +typedef std::set<dict_foreign_t*, dict_foreign_compare> dict_foreign_set; + +/*********************************************************************//** +Frees a foreign key struct. */ +inline +void +dict_foreign_free( +/*==============*/ + dict_foreign_t* foreign) /*!< in, own: foreign key struct */ +{ + mem_heap_free(foreign->heap); +} + +/** The destructor will free all the foreign key constraints in the set +by calling dict_foreign_free() on each of the foreign key constraints. +This is used to free the allocated memory when a local set goes out +of scope. */ +struct dict_foreign_set_free { + + dict_foreign_set_free(const dict_foreign_set& foreign_set) + : m_foreign_set(foreign_set) + {} + + ~dict_foreign_set_free() + { + std::for_each(m_foreign_set.begin(), + m_foreign_set.end(), + dict_foreign_free); + } + + const dict_foreign_set& m_foreign_set; }; /** The flags for ON_UPDATE and ON_DELETE can be ORed; the default is that @@ -804,6 +904,8 @@ the table, DML from memcached will be blocked. */ /** Data structure for a database table. Most fields will be initialized to 0, NULL or FALSE in dict_mem_table_create(). */ struct dict_table_t{ + + table_id_t id; /*!< id of the table */ mem_heap_t* heap; /*!< memory heap */ char* name; /*!< table name */ @@ -858,13 +960,16 @@ struct dict_table_t{ hash_node_t id_hash; /*!< hash chain node */ UT_LIST_BASE_NODE_T(dict_index_t) indexes; /*!< list of indexes of the table */ - UT_LIST_BASE_NODE_T(dict_foreign_t) - foreign_list;/*!< list of foreign key constraints + + dict_foreign_set foreign_set; + /*!< set of foreign key constraints in the table; these refer to columns in other tables */ - UT_LIST_BASE_NODE_T(dict_foreign_t) - referenced_list;/*!< list of foreign key constraints + + dict_foreign_set referenced_set; + /*!< list of foreign key constraints which refer to this table */ + UT_LIST_NODE_T(dict_table_t) table_LRU; /*!< node of the LRU list of tables */ unsigned fk_max_recusive_level:8; @@ -914,6 +1019,10 @@ struct dict_table_t{ initialized in dict_table_add_to_cache() */ /** Statistics for query optimization */ /* @{ */ + + volatile os_once::state_t stats_latch_created; + /*!< Creation state of 'stats_latch'. */ + rw_lock_t* stats_latch; /*!< this latch protects: dict_table_t::stat_initialized dict_table_t::stat_n_rows (*) @@ -1022,6 +1131,9 @@ struct dict_table_t{ /*!< see BG_STAT_* above. Writes are covered by dict_sys->mutex. Dirty reads are possible. */ + bool stats_error_printed; + /*!< Has persistent stats error beein + already printed for this table ? */ /* @} */ /*----------------------*/ /**!< The following fields are used by the @@ -1099,6 +1211,19 @@ struct dict_table_t{ #endif /* UNIV_DEBUG */ }; +/** A function object to add the foreign key constraint to the referenced set +of the referenced table, if it exists in the dictionary cache. */ +struct dict_foreign_add_to_referenced_table { + void operator()(dict_foreign_t* foreign) const + { + if (dict_table_t* table = foreign->referenced_table) { + std::pair<dict_foreign_set::iterator, bool> ret + = table->referenced_set.insert(foreign); + ut_a(ret.second); + } + } +}; + #ifndef UNIV_NONINL #include "dict0mem.ic" #endif diff --git a/storage/innobase/include/fil0fil.h b/storage/innobase/include/fil0fil.h index bb4cfe8fe92..5148773d95c 100644 --- a/storage/innobase/include/fil0fil.h +++ b/storage/innobase/include/fil0fil.h @@ -49,7 +49,7 @@ struct fil_space_t; typedef std::list<const char*> space_name_list_t; /** When mysqld is run, the default directory "." is the mysqld datadir, -but in the MySQL Embedded Server Library and ibbackup it is not the default +but in the MySQL Embedded Server Library and mysqlbackup it is not the default directory, and we must set the base file path explicitly */ extern const char* fil_path_to_mysql_datadir; @@ -454,8 +454,8 @@ exists and the space id in it matches. Replays the create operation if a file at that path does not exist yet. If the database directory for the file to be created does not exist, then we create the directory, too. -Note that ibbackup --apply-log sets fil_path_to_mysql_datadir to point to the -datadir that we should use in replaying the file operations. +Note that mysqlbackup --apply-log sets fil_path_to_mysql_datadir to point to +the datadir that we should use in replaying the file operations. @return end of log record, or NULL if the record was not completely contained between ptr and end_ptr */ UNIV_INTERN @@ -708,9 +708,9 @@ fil_space_for_table_exists_in_mem( #else /* !UNIV_HOTBACKUP */ /********************************************************************//** Extends all tablespaces to the size stored in the space header. During the -ibbackup --apply-log phase we extended the spaces on-demand so that log records -could be appllied, but that may have left spaces still too small compared to -the size stored in the space header. */ +mysqlbackup --apply-log phase we extended the spaces on-demand so that log +records could be appllied, but that may have left spaces still too small +compared to the size stored in the space header. */ UNIV_INTERN void fil_extend_tablespaces_to_stored_len(void); diff --git a/storage/innobase/include/fts0ast.h b/storage/innobase/include/fts0ast.h index c0aac6d8e4c..50ee587e282 100644 --- a/storage/innobase/include/fts0ast.h +++ b/storage/innobase/include/fts0ast.h @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 2007, 2013, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2007, 2014, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -76,6 +76,7 @@ enum fts_ast_oper_t { struct fts_lexer_t; struct fts_ast_node_t; struct fts_ast_state_t; +struct fts_ast_string_t; typedef dberr_t (*fts_ast_callback)(fts_ast_oper_t, fts_ast_node_t*, void*); @@ -101,16 +102,16 @@ extern fts_ast_node_t* fts_ast_create_node_term( /*=====================*/ - void* arg, /*!< in: ast state */ - const char* ptr); /*!< in: term string */ + void* arg, /*!< in: ast state */ + const fts_ast_string_t* ptr); /*!< in: term string */ /******************************************************************** Create an AST text node */ extern fts_ast_node_t* fts_ast_create_node_text( /*=====================*/ - void* arg, /*!< in: ast state */ - const char* ptr); /*!< in: text string */ + void* arg, /*!< in: ast state */ + const fts_ast_string_t* ptr); /*!< in: text string */ /******************************************************************** Create an AST expr list node */ extern @@ -233,16 +234,66 @@ fts_lexer_free( free */ __attribute__((nonnull)); +/** +Create an ast string object, with NUL-terminator, so the string +has one more byte than len +@param[in] str pointer to string +@param[in] len length of the string +@return ast string with NUL-terminator */ +UNIV_INTERN +fts_ast_string_t* +fts_ast_string_create( + const byte* str, + ulint len); + +/** +Free an ast string instance +@param[in,out] ast_str string to free */ +UNIV_INTERN +void +fts_ast_string_free( + fts_ast_string_t* ast_str); + +/** +Translate ast string of type FTS_AST_NUMB to unsigned long by strtoul +@param[in] str string to translate +@param[in] base the base +@return translated number */ +UNIV_INTERN +ulint +fts_ast_string_to_ul( + const fts_ast_string_t* ast_str, + int base); + +/** +Print the ast string +@param[in] str string to print */ +UNIV_INTERN +void +fts_ast_string_print( + const fts_ast_string_t* ast_str); + +/* String of length len. +We always store the string of length len with a terminating '\0', +regardless of there is any 0x00 in the string itself */ +struct fts_ast_string_t { + /*!< Pointer to string. */ + byte* str; + + /*!< Length of the string. */ + ulint len; +}; + /* Query term type */ struct fts_ast_term_t { - byte* ptr; /*!< Pointer to term string.*/ - ibool wildcard; /*!< TRUE if wild card set.*/ + fts_ast_string_t* ptr; /*!< Pointer to term string.*/ + ibool wildcard; /*!< TRUE if wild card set.*/ }; /* Query text type */ struct fts_ast_text_t { - byte* ptr; /*!< Pointer to term string.*/ - ulint distance; /*!< > 0 if proximity distance + fts_ast_string_t* ptr; /*!< Pointer to text string.*/ + ulint distance; /*!< > 0 if proximity distance set */ }; diff --git a/storage/innobase/include/fts0fts.h b/storage/innobase/include/fts0fts.h index 5bea5bc0e97..a2996ecacc8 100644 --- a/storage/innobase/include/fts0fts.h +++ b/storage/innobase/include/fts0fts.h @@ -745,6 +745,7 @@ void fts_savepoint_take( /*===============*/ trx_t* trx, /*!< in: transaction */ + fts_trx_t* fts_trx, /*!< in: fts transaction */ const char* name) /*!< in: savepoint name */ __attribute__((nonnull)); /**********************************************************************//** diff --git a/storage/innobase/include/fts0pars.h b/storage/innobase/include/fts0pars.h index 50f636944e5..8108e811599 100644 --- a/storage/innobase/include/fts0pars.h +++ b/storage/innobase/include/fts0pars.h @@ -53,9 +53,9 @@ typedef union YYSTYPE /* Line 2068 of yacc.c */ #line 61 "fts0pars.y" - int oper; - char* token; - fts_ast_node_t* node; + int oper; + fts_ast_string_t* token; + fts_ast_node_t* node; diff --git a/storage/innobase/include/lock0lock.h b/storage/innobase/include/lock0lock.h index b62453de15f..d96fdfa9d89 100644 --- a/storage/innobase/include/lock0lock.h +++ b/storage/innobase/include/lock0lock.h @@ -299,7 +299,7 @@ lock_rec_insert_check_and_lock( inserted record maybe should inherit LOCK_GAP type locks from the successor record */ - __attribute__((nonnull, warn_unused_result)); + __attribute__((nonnull(2,3,4,6,7), warn_unused_result)); /*********************************************************************//** Checks if locks of other transactions prevent an immediate modify (update, delete mark, or delete unmark) of a clustered index record. If they do, diff --git a/storage/innobase/include/log0log.h b/storage/innobase/include/log0log.h index 61318e34561..ad9710b1870 100644 --- a/storage/innobase/include/log0log.h +++ b/storage/innobase/include/log0log.h @@ -147,6 +147,13 @@ lsn_t log_get_lsn(void); /*=============*/ /************************************************************//** +Gets the current lsn. +@return current lsn */ +UNIV_INLINE +lsn_t +log_get_lsn_nowait(void); +/*=============*/ +/************************************************************//** Gets the last lsn that is fully flushed to disk. @return last flushed lsn */ UNIV_INLINE @@ -649,13 +656,13 @@ extern log_t* log_sys; megabyte. This information might have been used - since ibbackup version 0.35 but + since mysqlbackup version 0.35 but before 1.41 to decide if unused ends of non-auto-extending data files in space 0 can be truncated. This information was made obsolete - by ibbackup --compress. */ + by mysqlbackup --compress. */ #define LOG_CHECKPOINT_FSP_MAGIC_N (12 + LOG_CHECKPOINT_ARRAY_END) /*!< Not used (0); This magic number tells if the @@ -684,7 +691,7 @@ extern log_t* log_sys; /* a 32-byte field which contains the string 'ibbackup' and the creation time if the log file was - created by ibbackup --restore; + created by mysqlbackup --restore; when mysqld is first time started on the restored database, it can print helpful info for the user */ diff --git a/storage/innobase/include/log0log.ic b/storage/innobase/include/log0log.ic index 67792395ac9..7c79eb96ca9 100644 --- a/storage/innobase/include/log0log.ic +++ b/storage/innobase/include/log0log.ic @@ -434,6 +434,26 @@ log_get_flush_lsn(void) return(lsn); } +/************************************************************//** +Gets the current lsn with a trylock +@return current lsn or 0 if false*/ +UNIV_INLINE +lsn_t +log_get_lsn_nowait(void) +/*=============*/ +{ + lsn_t lsn; + + if (mutex_enter_nowait(&(log_sys->mutex))) + return 0; + + lsn = log_sys->lsn; + + mutex_exit(&(log_sys->mutex)); + + return(lsn); +} + /**************************************************************** Gets the log group capacity. It is OK to read the value without holding log_sys->mutex because it is constant. diff --git a/storage/innobase/include/os0file.h b/storage/innobase/include/os0file.h index 8f19d396c92..15f4f0235b5 100644 --- a/storage/innobase/include/os0file.h +++ b/storage/innobase/include/os0file.h @@ -127,7 +127,7 @@ enum os_file_create_t { #define OS_FILE_READ_ONLY 333 #define OS_FILE_READ_WRITE 444 -#define OS_FILE_READ_ALLOW_DELETE 555 /* for ibbackup */ +#define OS_FILE_READ_ALLOW_DELETE 555 /* for mysqlbackup */ /* Options for file_create */ #define OS_FILE_AIO 61 diff --git a/storage/innobase/include/os0once.h b/storage/innobase/include/os0once.h new file mode 100644 index 00000000000..a8bbaf1d2d4 --- /dev/null +++ b/storage/innobase/include/os0once.h @@ -0,0 +1,125 @@ +/***************************************************************************** + +Copyright (c) 2014, Oracle and/or its affiliates. All Rights Reserved. + +This program is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free Software +Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, but WITHOUT +ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. + +You should have received a copy of the GNU General Public License along with +this program; if not, write to the Free Software Foundation, Inc., +51 Franklin Street, Suite 500, Boston, MA 02110-1335 USA + +*****************************************************************************/ + +/**************************************************//** +@file include/os0once.h +A class that aids executing a given function exactly once in a multi-threaded +environment. + +Created Feb 20, 2014 Vasil Dimov +*******************************************************/ + +#ifndef os0once_h +#define os0once_h + +#include "univ.i" + +#include "os0sync.h" +#include "ut0ut.h" + +/** Execute a given function exactly once in a multi-threaded environment +or wait for the function to be executed by another thread. + +Example usage: +First the user must create a control variable of type os_once::state_t and +assign it os_once::NEVER_DONE. +Then the user must pass this variable, together with a function to be +executed to os_once::do_or_wait_for_done(). + +Multiple threads can call os_once::do_or_wait_for_done() simultaneously with +the same (os_once::state_t) control variable. The provided function will be +called exactly once and when os_once::do_or_wait_for_done() returns then this +function has completed execution, by this or another thread. In other words +os_once::do_or_wait_for_done() will either execute the provided function or +will wait for its execution to complete if it is already called by another +thread or will do nothing if the function has already completed its execution +earlier. + +This mimics pthread_once(3), but unfortunatelly pthread_once(3) does not +support passing arguments to the init_routine() function. We should use +std::call_once() when we start compiling with C++11 enabled. */ +class os_once { +public: + /** Control variables' state type */ + typedef ib_uint32_t state_t; + + /** Not yet executed. */ + static const state_t NEVER_DONE = 0; + + /** Currently being executed by this or another thread. */ + static const state_t IN_PROGRESS = 1; + + /** Finished execution. */ + static const state_t DONE = 2; + +#ifdef HAVE_ATOMIC_BUILTINS + /** Call a given function or wait its execution to complete if it is + already called by another thread. + @param[in,out] state control variable + @param[in] do_func function to call + @param[in,out] do_func_arg an argument to pass to do_func(). */ + static + void + do_or_wait_for_done( + volatile state_t* state, + void (*do_func)(void*), + void* do_func_arg) + { + /* Avoid calling os_compare_and_swap_uint32() in the most + common case. */ + if (*state == DONE) { + return; + } + + if (os_compare_and_swap_uint32(state, + NEVER_DONE, IN_PROGRESS)) { + /* We are the first. Call the function. */ + + do_func(do_func_arg); + + const bool swapped = os_compare_and_swap_uint32( + state, IN_PROGRESS, DONE); + + ut_a(swapped); + } else { + /* The state is not NEVER_DONE, so either it is + IN_PROGRESS (somebody is calling the function right + now or DONE (it has already been called and completed). + Wait for it to become DONE. */ + for (;;) { + const state_t s = *state; + + switch (s) { + case DONE: + return; + case IN_PROGRESS: + break; + case NEVER_DONE: + /* fall through */ + default: + ut_error; + } + + UT_RELAX_CPU(); + } + } + } +#endif /* HAVE_ATOMIC_BUILTINS */ +}; + +#endif /* os0once_h */ diff --git a/storage/innobase/include/os0sync.h b/storage/innobase/include/os0sync.h index 9b4ce2343c5..8bf57677ecf 100644 --- a/storage/innobase/include/os0sync.h +++ b/storage/innobase/include/os0sync.h @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1995, 2011, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1995, 2014, Oracle and/or its affiliates. All Rights Reserved. Copyright (c) 2008, Google Inc. Portions of this file contain modifications contributed and copyrighted by @@ -357,6 +357,10 @@ Atomic compare-and-swap and increment for InnoDB. */ # define HAVE_ATOMIC_BUILTINS +# ifdef HAVE_IB_GCC_ATOMIC_BUILTINS_BYTE +# define HAVE_ATOMIC_BUILTINS_BYTE +# endif + # ifdef HAVE_IB_GCC_ATOMIC_BUILTINS_64 # define HAVE_ATOMIC_BUILTINS_64 # endif @@ -434,9 +438,13 @@ Returns the old value of *ptr, atomically sets *ptr to new_val */ # define os_atomic_test_and_set_ulint(ptr, new_val) \ __sync_lock_test_and_set(ptr, new_val) +# define os_atomic_lock_release_byte(ptr) \ + __sync_lock_release(ptr) + #elif defined(HAVE_IB_SOLARIS_ATOMICS) # define HAVE_ATOMIC_BUILTINS +# define HAVE_ATOMIC_BUILTINS_BYTE # define HAVE_ATOMIC_BUILTINS_64 /* If not compiling with GCC or GCC doesn't support the atomic @@ -515,9 +523,13 @@ Returns the old value of *ptr, atomically sets *ptr to new_val */ # define os_atomic_test_and_set_ulint(ptr, new_val) \ atomic_swap_ulong(ptr, new_val) +# define os_atomic_lock_release_byte(ptr) \ + (void) atomic_swap_uchar(ptr, 0) + #elif defined(HAVE_WINDOWS_ATOMICS) # define HAVE_ATOMIC_BUILTINS +# define HAVE_ATOMIC_BUILTINS_BYTE # ifndef _WIN32 # define HAVE_ATOMIC_BUILTINS_64 @@ -574,7 +586,8 @@ Returns true if swapped, ptr is pointer to target, old_val is value to compare to, new_val is the value to swap in. */ # define os_compare_and_swap_uint32(ptr, old_val, new_val) \ - (win_cmp_and_xchg_dword(ptr, new_val, old_val) == old_val) + (InterlockedCompareExchange(reinterpret_cast<volatile long*>(ptr), \ + new_val, old_val) == old_val) # define os_compare_and_swap_ulint(ptr, old_val, new_val) \ (win_cmp_and_xchg_ulint(ptr, new_val, old_val) == old_val) @@ -637,6 +650,9 @@ clobbered */ # define os_atomic_test_and_set_ulong(ptr, new_val) \ InterlockedExchange(ptr, new_val) +# define os_atomic_lock_release_byte(ptr) \ + (void) InterlockedExchange(ptr, 0) + #else # define IB_ATOMICS_STARTUP_MSG \ "Mutexes and rw_locks use InnoDB's own implementation" @@ -684,6 +700,65 @@ for synchronization */ os_decrement_counter_by_amount(mutex, counter, 1);\ } while (0); +/** barrier definitions for memory ordering */ +#if defined __i386__ || defined __x86_64__ || defined _M_IX86 || defined _M_X64 || defined __WIN__ +/* Performance regression was observed at some conditions for Intel +architecture. Disable memory barrier for Intel architecture for now. */ +# define os_rmb do { } while(0) +# define os_wmb do { } while(0) +# define os_isync do { } while(0) +# define IB_MEMORY_BARRIER_STARTUP_MSG \ + "Memory barrier is not used" +#elif defined(HAVE_IB_GCC_ATOMIC_THREAD_FENCE) +# define HAVE_MEMORY_BARRIER +# define os_rmb __atomic_thread_fence(__ATOMIC_ACQUIRE) +# define os_wmb __atomic_thread_fence(__ATOMIC_RELEASE) +#ifdef __powerpc__ +# define os_isync __asm __volatile ("isync":::"memory") +#else +#define os_isync do { } while(0) +#endif + +# define IB_MEMORY_BARRIER_STARTUP_MSG \ + "GCC builtin __atomic_thread_fence() is used for memory barrier" + +#elif defined(HAVE_IB_GCC_SYNC_SYNCHRONISE) +# define HAVE_MEMORY_BARRIER +# define os_rmb __sync_synchronize() +# define os_wmb __sync_synchronize() +# define os_isync __sync_synchronize() +# define IB_MEMORY_BARRIER_STARTUP_MSG \ + "GCC builtin __sync_synchronize() is used for memory barrier" + +#elif defined(HAVE_IB_MACHINE_BARRIER_SOLARIS) +# define HAVE_MEMORY_BARRIER +# include <mbarrier.h> +# define os_rmb __machine_r_barrier() +# define os_wmb __machine_w_barrier() +# define os_isync os_rmb; os_wmb +# define IB_MEMORY_BARRIER_STARTUP_MSG \ + "Solaris memory ordering functions are used for memory barrier" + +#elif defined(HAVE_WINDOWS_MM_FENCE) +# define HAVE_MEMORY_BARRIER +# include <intrin.h> +# define os_rmb _mm_lfence() +# define os_wmb _mm_sfence() +# define os_isync os_rmb; os_wmb +# define IB_MEMORY_BARRIER_STARTUP_MSG \ + "_mm_lfence() and _mm_sfence() are used for memory barrier" + +# define os_atomic_lock_release_byte(ptr) \ + (void) InterlockedExchange(ptr, 0) + +#else +# define os_rmb do { } while(0) +# define os_wmb do { } while(0) +# define os_isync do { } while(0) +# define IB_MEMORY_BARRIER_STARTUP_MSG \ + "Memory barrier is not used" +#endif + #ifndef UNIV_NONINL #include "os0sync.ic" #endif diff --git a/storage/innobase/include/srv0srv.h b/storage/innobase/include/srv0srv.h index 06d5f61f638..6844d0fd0c6 100644 --- a/storage/innobase/include/srv0srv.h +++ b/storage/innobase/include/srv0srv.h @@ -506,6 +506,9 @@ extern struct export_var_t export_vars; /** Global counters */ extern srv_stats_t srv_stats; +/** Simulate compression failures. */ +extern uint srv_simulate_comp_failures; + # ifdef UNIV_PFS_THREAD /* Keys to register InnoDB threads with performance schema */ extern mysql_pfs_key_t buf_page_cleaner_thread_key; diff --git a/storage/innobase/include/sync0rw.h b/storage/innobase/include/sync0rw.h index 34cd8ef4bd6..b36e04f2810 100644 --- a/storage/innobase/include/sync0rw.h +++ b/storage/innobase/include/sync0rw.h @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1995, 2011, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1995, 2014, Oracle and/or its affiliates. All Rights Reserved. Copyright (c) 2008, Google Inc. Portions of this file contain modifications contributed and copyrighted by @@ -108,14 +108,8 @@ extern ib_mutex_t rw_lock_list_mutex; #ifdef UNIV_SYNC_DEBUG /* The global mutex which protects debug info lists of all rw-locks. To modify the debug info list of an rw-lock, this mutex has to be - acquired in addition to the mutex protecting the lock. */ -extern ib_mutex_t rw_lock_debug_mutex; -extern os_event_t rw_lock_debug_event; /*!< If deadlock detection does - not get immediately the mutex it - may wait for this event */ -extern ibool rw_lock_debug_waiters; /*!< This is set to TRUE, if - there may be waiters for the event */ +extern os_fast_mutex_t rw_lock_debug_mutex; #endif /* UNIV_SYNC_DEBUG */ /** Counters for RW locks. */ @@ -141,7 +135,7 @@ extern mysql_pfs_key_t trx_i_s_cache_lock_key; extern mysql_pfs_key_t trx_purge_latch_key; extern mysql_pfs_key_t index_tree_rw_lock_key; extern mysql_pfs_key_t index_online_log_key; -extern mysql_pfs_key_t dict_table_stats_latch_key; +extern mysql_pfs_key_t dict_table_stats_key; extern mysql_pfs_key_t trx_sys_rw_lock_key; extern mysql_pfs_key_t hash_table_rw_lock_key; #endif /* UNIV_PFS_RWLOCK */ diff --git a/storage/innobase/include/sync0rw.ic b/storage/innobase/include/sync0rw.ic index 467a8b4eb44..bb05ae7daf1 100644 --- a/storage/innobase/include/sync0rw.ic +++ b/storage/innobase/include/sync0rw.ic @@ -93,6 +93,7 @@ rw_lock_set_waiter_flag( (void) os_compare_and_swap_ulint(&lock->waiters, 0, 1); #else /* INNODB_RW_LOCKS_USE_ATOMICS */ lock->waiters = 1; + os_wmb; #endif /* INNODB_RW_LOCKS_USE_ATOMICS */ } @@ -110,6 +111,7 @@ rw_lock_reset_waiter_flag( (void) os_compare_and_swap_ulint(&lock->waiters, 1, 0); #else /* INNODB_RW_LOCKS_USE_ATOMICS */ lock->waiters = 0; + os_wmb; #endif /* INNODB_RW_LOCKS_USE_ATOMICS */ } @@ -199,7 +201,10 @@ rw_lock_lock_word_decr( ulint amount) /*!< in: amount to decrement */ { #ifdef INNODB_RW_LOCKS_USE_ATOMICS - lint local_lock_word = lock->lock_word; + lint local_lock_word; + + os_rmb; + local_lock_word = lock->lock_word; while (local_lock_word > 0) { if (os_compare_and_swap_lint(&lock->lock_word, local_lock_word, diff --git a/storage/innobase/include/sync0sync.h b/storage/innobase/include/sync0sync.h index f26e66f1a87..b1d99f7244c 100644 --- a/storage/innobase/include/sync0sync.h +++ b/storage/innobase/include/sync0sync.h @@ -49,6 +49,8 @@ extern "C" my_bool timed_mutexes; #ifdef HAVE_WINDOWS_ATOMICS typedef LONG lock_word_t; /*!< On Windows, InterlockedExchange operates on LONG variable */ +#elif defined(HAVE_ATOMIC_BUILTINS) && !defined(HAVE_ATOMIC_BUILTINS_BYTE) +typedef ulint lock_word_t; #else typedef byte lock_word_t; #endif diff --git a/storage/innobase/include/sync0sync.ic b/storage/innobase/include/sync0sync.ic index f34f3f90b63..f9017230497 100644 --- a/storage/innobase/include/sync0sync.ic +++ b/storage/innobase/include/sync0sync.ic @@ -80,7 +80,11 @@ ib_mutex_test_and_set( ib_mutex_t* mutex) /*!< in: mutex */ { #if defined(HAVE_ATOMIC_BUILTINS) +# if defined(HAVE_ATOMIC_BUILTINS_BYTE) return(os_atomic_test_and_set_byte(&mutex->lock_word, 1)); +# else + return(os_atomic_test_and_set_ulint(&mutex->lock_word, 1)); +# endif #else ibool ret; @@ -92,6 +96,7 @@ ib_mutex_test_and_set( ut_a(mutex->lock_word == 0); mutex->lock_word = 1; + os_wmb; } return((byte) ret); @@ -108,10 +113,7 @@ mutex_reset_lock_word( ib_mutex_t* mutex) /*!< in: mutex */ { #if defined(HAVE_ATOMIC_BUILTINS) - /* In theory __sync_lock_release should be used to release the lock. - Unfortunately, it does not work properly alone. The workaround is - that more conservative __sync_lock_test_and_set is used instead. */ - os_atomic_test_and_set_byte(&mutex->lock_word, 0); + os_atomic_lock_release_byte(&mutex->lock_word); #else mutex->lock_word = 0; @@ -147,6 +149,7 @@ mutex_get_waiters( ptr = &(mutex->waiters); + os_rmb; return(*ptr); /* Here we assume that the read of a single word from memory is atomic */ } @@ -181,6 +184,7 @@ mutex_exit_func( to wake up possible hanging threads if they are missed in mutex_signal_object. */ + os_isync; if (mutex_get_waiters(mutex) != 0) { mutex_signal_object(mutex); diff --git a/storage/innobase/include/trx0trx.h b/storage/innobase/include/trx0trx.h index a30bbdbebb2..7c92445b796 100644 --- a/storage/innobase/include/trx0trx.h +++ b/storage/innobase/include/trx0trx.h @@ -992,6 +992,11 @@ struct trx_t{ count of tables being flushed. */ /*------------------------------*/ + THD* current_lock_mutex_owner; + /*!< If this is equal to current_thd, + then in innobase_kill_query() we know we + already hold the lock_sys->mutex. */ + /*------------------------------*/ #ifdef UNIV_DEBUG ulint start_line; /*!< Track where it was started from */ const char* start_file; /*!< Filename where it was started */ diff --git a/storage/innobase/include/univ.i b/storage/innobase/include/univ.i index 6c299aadb0f..685d89dc854 100644 --- a/storage/innobase/include/univ.i +++ b/storage/innobase/include/univ.i @@ -45,7 +45,7 @@ Created 1/20/1994 Heikki Tuuri #define INNODB_VERSION_MAJOR 5 #define INNODB_VERSION_MINOR 6 -#define INNODB_VERSION_BUGFIX 17 +#define INNODB_VERSION_BUGFIX 20 /* The following is the InnoDB version as shown in SELECT plugin_version FROM information_schema.plugins; @@ -466,10 +466,10 @@ typedef unsigned __int64 ib_uint64_t; typedef unsigned __int32 ib_uint32_t; #else /* Use the integer types and formatting strings defined in the C99 standard. */ -# define UINT32PF "%"PRIu32 -# define INT64PF "%"PRId64 -# define UINT64PF "%"PRIu64 -# define UINT64PFx "%016"PRIx64 +# define UINT32PF "%" PRIu32 +# define INT64PF "%" PRId64 +# define UINT64PF "%" PRIu64 +# define UINT64PFx "%016" PRIx64 # define DBUG_LSN_PF UINT64PF typedef int64_t ib_int64_t; typedef uint64_t ib_uint64_t; diff --git a/storage/innobase/lock/lock0lock.cc b/storage/innobase/lock/lock0lock.cc index 61da49d1345..bd88e7e94de 100644 --- a/storage/innobase/lock/lock0lock.cc +++ b/storage/innobase/lock/lock0lock.cc @@ -49,6 +49,7 @@ Created 5/7/1996 Heikki Tuuri #include "btr0btr.h" #include "dict0boot.h" #include <set> +#include "mysql/plugin.h" #include <mysql/service_wsrep.h> @@ -375,6 +376,11 @@ struct lock_stack_t { ulint heap_no; /*!< heap number if rec lock */ }; +extern "C" void thd_report_wait_for(const MYSQL_THD thd, MYSQL_THD other_thd); +extern "C" int thd_need_wait_for(const MYSQL_THD thd); +extern "C" +int thd_need_ordering_with(const MYSQL_THD thd, const MYSQL_THD other_thd); + /** Stack to use during DFS search. Currently only a single stack is required because there is no parallel deadlock check. This stack is protected by the lock_sys_t::mutex. */ @@ -390,6 +396,14 @@ UNIV_INTERN mysql_pfs_key_t lock_sys_mutex_key; UNIV_INTERN mysql_pfs_key_t lock_sys_wait_mutex_key; #endif /* UNIV_PFS_MUTEX */ +/* Buffer to collect THDs to report waits for. */ +struct thd_wait_reports { + struct thd_wait_reports *next; /*!< List link */ + ulint used; /*!< How many elements in waitees[] */ + trx_t *waitees[64]; /*!< Trxs for thd_report_wait_for() */ +}; + + #ifdef UNIV_DEBUG UNIV_INTERN ibool lock_print_waits = FALSE; @@ -1020,6 +1034,32 @@ lock_rec_has_to_wait( return(FALSE); } + if ((type_mode & LOCK_GAP || lock_rec_get_gap(lock2)) && + !thd_need_ordering_with(trx->mysql_thd, + lock2->trx->mysql_thd)) { + /* If the upper server layer has already decided on the + commit order between the transaction requesting the + lock and the transaction owning the lock, we do not + need to wait for gap locks. Such ordeering by the upper + server layer happens in parallel replication, where the + commit order is fixed to match the original order on the + master. + + Such gap locks are mainly needed to get serialisability + between transactions so that they will be binlogged in + the correct order so that statement-based replication + will give the correct results. Since the right order + was already determined on the master, we do not need + to enforce it again here. + + Skipping the locks is not essential for correctness, + since in case of deadlock we will just kill the later + transaction and retry it. But it can save some + unnecessary rollbacks and retries. */ + + return (FALSE); + } + #ifdef WITH_WSREP /* if BF thread is locking and has conflict with another BF thread, we need to look at trx ordering and lock types */ @@ -4143,7 +4183,8 @@ static trx_id_t lock_deadlock_search( /*=================*/ - lock_deadlock_ctx_t* ctx) /*!< in/out: deadlock context */ + lock_deadlock_ctx_t* ctx, /*!< in/out: deadlock context */ + struct thd_wait_reports*waitee_ptr) /*!< in/out: list of waitees */ { const lock_t* lock; ulint heap_no; @@ -4224,38 +4265,64 @@ lock_deadlock_search( /* Select the joining transaction as the victim. */ return(ctx->start->id); - } else if (lock->trx->lock.que_state == TRX_QUE_LOCK_WAIT) { + } else { + /* We do not need to report autoinc locks to the upper + layer. These locks are released before commit, so they + can not cause deadlocks with binlog-fixed commit + order. */ + if (waitee_ptr && + (lock_get_type_low(lock) != LOCK_TABLE || + lock_get_mode(lock) != LOCK_AUTO_INC)) { + if (waitee_ptr->used == + sizeof(waitee_ptr->waitees) / + sizeof(waitee_ptr->waitees[0])) { + waitee_ptr->next = + (struct thd_wait_reports *) + mem_alloc(sizeof(*waitee_ptr)); + waitee_ptr = waitee_ptr->next; + if (!waitee_ptr) { + ctx->too_deep = TRUE; + return(ctx->start->id); + } + waitee_ptr->next = NULL; + waitee_ptr->used = 0; + } + waitee_ptr->waitees[waitee_ptr->used++] = lock->trx; + } - /* Another trx ahead has requested a lock in an - incompatible mode, and is itself waiting for a lock. */ + if (lock->trx->lock.que_state == TRX_QUE_LOCK_WAIT) { - ++ctx->cost; + /* Another trx ahead has requested a lock in an + incompatible mode, and is itself waiting for a lock. */ - /* Save current search state. */ - if (!lock_deadlock_push(ctx, lock, heap_no)) { + ++ctx->cost; - /* Unable to save current search state, stack - size not big enough. */ + /* Save current search state. */ + if (!lock_deadlock_push(ctx, lock, heap_no)) { - ctx->too_deep = TRUE; + /* Unable to save current search state, stack + size not big enough. */ + ctx->too_deep = TRUE; #ifdef WITH_WSREP if (wsrep_thd_is_BF(ctx->start->mysql_thd, TRUE)) return(lock->trx->id); else #endif /* WITH_WSREP */ + return(ctx->start->id); - } + } - ctx->wait_lock = lock->trx->lock.wait_lock; - lock = lock_get_first_lock(ctx, &heap_no); + ctx->wait_lock = lock->trx->lock.wait_lock; + lock = lock_get_first_lock(ctx, &heap_no); - if (lock->trx->lock.deadlock_mark > ctx->mark_start) { + if (lock->trx->lock.deadlock_mark > ctx->mark_start) { + lock = lock_get_next_lock(ctx, lock, heap_no); + } + + } else { lock = lock_get_next_lock(ctx, lock, heap_no); } - - } else { - lock = lock_get_next_lock(ctx, lock, heap_no); } } @@ -4320,6 +4387,48 @@ lock_deadlock_trx_rollback( trx_mutex_exit(trx); } +static +void +lock_report_waiters_to_mysql( +/*=======================*/ + struct thd_wait_reports* waitee_buf_ptr, /*!< in: set of trxs */ + THD* mysql_thd, /*!< in: THD */ + trx_id_t victim_trx_id) /*!< in: Trx selected + as deadlock victim, if + any */ +{ + struct thd_wait_reports* p; + struct thd_wait_reports* q; + ulint i; + + p = waitee_buf_ptr; + while (p) { + i = 0; + while (i < p->used) { + trx_t *w_trx = p->waitees[i]; + /* There is no need to report waits to a trx already + selected as a victim. */ + if (w_trx->id != victim_trx_id) { + /* If thd_report_wait_for() decides to kill the + transaction, then we will get a call back into + innobase_kill_query. We mark this by setting + current_lock_mutex_owner, so we can avoid trying + to recursively take lock_sys->mutex. */ + w_trx->current_lock_mutex_owner = mysql_thd; + thd_report_wait_for(mysql_thd, w_trx->mysql_thd); + w_trx->current_lock_mutex_owner = NULL; + } + ++i; + } + q = p->next; + if (p != waitee_buf_ptr) { + mem_free(p); + } + p = q; + } +} + + /********************************************************************//** Checks if a joining lock request results in a deadlock. If a deadlock is found this function will resolve the dadlock by choosing a victim transaction @@ -4335,13 +4444,23 @@ lock_deadlock_check_and_resolve( const lock_t* lock, /*!< in: lock the transaction is requesting */ const trx_t* trx) /*!< in: transaction */ { - trx_id_t victim_trx_id; + trx_id_t victim_trx_id; + struct thd_wait_reports waitee_buf; + struct thd_wait_reports*waitee_buf_ptr; + THD* start_mysql_thd; ut_ad(trx != NULL); ut_ad(lock != NULL); ut_ad(lock_mutex_own()); assert_trx_in_list(trx); + start_mysql_thd = trx->mysql_thd; + if (start_mysql_thd && thd_need_wait_for(start_mysql_thd)) { + waitee_buf_ptr = &waitee_buf; + } else { + waitee_buf_ptr = NULL; + } + /* Try and resolve as many deadlocks as possible. */ do { lock_deadlock_ctx_t ctx; @@ -4354,7 +4473,19 @@ lock_deadlock_check_and_resolve( ctx.wait_lock = lock; ctx.mark_start = lock_mark_counter; - victim_trx_id = lock_deadlock_search(&ctx); + if (waitee_buf_ptr) { + waitee_buf_ptr->next = NULL; + waitee_buf_ptr->used = 0; + } + + victim_trx_id = lock_deadlock_search(&ctx, waitee_buf_ptr); + + /* Report waits to upper layer, as needed. */ + if (waitee_buf_ptr) { + lock_report_waiters_to_mysql(waitee_buf_ptr, + start_mysql_thd, + victim_trx_id); + } /* Search too deep, we rollback the joining transaction. */ if (ctx.too_deep) { diff --git a/storage/innobase/log/log0log.cc b/storage/innobase/log/log0log.cc index b7a46dafbd2..d1418dcaab5 100644 --- a/storage/innobase/log/log0log.cc +++ b/storage/innobase/log/log0log.cc @@ -1250,7 +1250,7 @@ log_group_file_header_flush( mach_write_to_4(buf + LOG_GROUP_ID, group->id); mach_write_to_8(buf + LOG_FILE_START_LSN, start_lsn); - /* Wipe over possible label of ibbackup --restore */ + /* Wipe over possible label of mysqlbackup --restore */ memcpy(buf + LOG_FILE_WAS_CREATED_BY_HOT_BACKUP, " ", 4); dest_offset = nth_file * group->file_size; @@ -1997,7 +1997,7 @@ log_reset_first_header_and_checkpoint( lsn = start + LOG_BLOCK_HDR_SIZE; - /* Write the label of ibbackup --restore */ + /* Write the label of mysqlbackup --restore */ strcpy((char*) hdr_buf + LOG_FILE_WAS_CREATED_BY_HOT_BACKUP, "ibbackup "); ut_sprintf_timestamp((char*) hdr_buf diff --git a/storage/innobase/log/log0recv.cc b/storage/innobase/log/log0recv.cc index abfdda1b73e..0143ecf1c1e 100644 --- a/storage/innobase/log/log0recv.cc +++ b/storage/innobase/log/log0recv.cc @@ -60,7 +60,7 @@ Created 9/20/1997 Heikki Tuuri /** This is set to FALSE if the backup was originally taken with the -ibbackup --include regexp option: then we do not want to create tables in +mysqlbackup --include regexp option: then we do not want to create tables in directories which were not included */ UNIV_INTERN ibool recv_replay_file_ops = TRUE; #endif /* !UNIV_HOTBACKUP */ @@ -2057,7 +2057,7 @@ recv_apply_log_recs_for_backup(void) /* Extend the tablespace's last file if the page_no does not fall inside its bounds; we assume the last - file is auto-extending, and ibbackup copied the file + file is auto-extending, and mysqlbackup copied the file when it still was smaller */ success = fil_extend_space_to_desired_size( @@ -2428,10 +2428,10 @@ loop: #ifdef UNIV_HOTBACKUP if (recv_replay_file_ops) { - /* In ibbackup --apply-log, replay an .ibd file - operation, if possible; note that - fil_path_to_mysql_datadir is set in ibbackup to - point to the datadir we should use there */ + /* In mysqlbackup --apply-log, replay an .ibd + file operation, if possible; note that + fil_path_to_mysql_datadir is set in mysqlbackup + to point to the datadir we should use there */ if (NULL == fil_op_log_parse_or_replay( body, end_ptr, type, @@ -3091,17 +3091,17 @@ recv_recovery_from_checkpoint_start_func( if (srv_read_only_mode) { ib_logf(IB_LOG_LEVEL_ERROR, - "Cannot restore from ibbackup, InnoDB running " - "in read-only mode!"); + "Cannot restore from mysqlbackup, InnoDB " + "running in read-only mode!"); return(DB_ERROR); } - /* This log file was created by ibbackup --restore: print + /* This log file was created by mysqlbackup --restore: print a note to the user about it */ ib_logf(IB_LOG_LEVEL_INFO, - "The log file was created by ibbackup --apply-log " + "The log file was created by mysqlbackup --apply-log " "at %s. The following crash recovery is part of a " "normal restore.", log_hdr_buf + LOG_FILE_WAS_CREATED_BY_HOT_BACKUP); diff --git a/storage/innobase/mysql-test/storage_engine/col_opt_not_null.opt b/storage/innobase/mysql-test/storage_engine/col_opt_not_null.opt new file mode 100644 index 00000000000..40445305fc6 --- /dev/null +++ b/storage/innobase/mysql-test/storage_engine/col_opt_not_null.opt @@ -0,0 +1 @@ +--innodb_log_file_size=100M diff --git a/storage/innobase/mysql-test/storage_engine/col_opt_null.opt b/storage/innobase/mysql-test/storage_engine/col_opt_null.opt new file mode 100644 index 00000000000..40445305fc6 --- /dev/null +++ b/storage/innobase/mysql-test/storage_engine/col_opt_null.opt @@ -0,0 +1 @@ +--innodb_log_file_size=100M diff --git a/storage/innobase/mysql-test/storage_engine/define_engine.inc b/storage/innobase/mysql-test/storage_engine/define_engine.inc index 7d7b0c7407a..77e384d2351 100644 --- a/storage/innobase/mysql-test/storage_engine/define_engine.inc +++ b/storage/innobase/mysql-test/storage_engine/define_engine.inc @@ -41,5 +41,9 @@ let $default_char_type = CHAR(8); # e.g. creation of an additional schema or table, etc. # The cleanup part should be defined in cleanup_engine.inc +CALL mtr.add_suppression("InnoDB: Resizing redo log from .* to .* pages, LSN=.*"); +CALL mtr.add_suppression("InnoDB: Starting to delete and rewrite log files."); +CALL mtr.add_suppression("InnoDB: New log files created, LSN=.*"); + --enable_query_log --enable_result_log diff --git a/storage/innobase/mysql-test/storage_engine/suite.opt b/storage/innobase/mysql-test/storage_engine/suite.opt index 66f581b56d0..034b58f2628 100644 --- a/storage/innobase/mysql-test/storage_engine/suite.opt +++ b/storage/innobase/mysql-test/storage_engine/suite.opt @@ -1,2 +1 @@ ---innodb - +--innodb --ignore-builtin-innodb --plugin-load=ha_innodb diff --git a/storage/innobase/mysql-test/storage_engine/type_blob.opt b/storage/innobase/mysql-test/storage_engine/type_blob.opt new file mode 100644 index 00000000000..40445305fc6 --- /dev/null +++ b/storage/innobase/mysql-test/storage_engine/type_blob.opt @@ -0,0 +1 @@ +--innodb_log_file_size=100M diff --git a/storage/innobase/mysql-test/storage_engine/type_text.opt b/storage/innobase/mysql-test/storage_engine/type_text.opt new file mode 100644 index 00000000000..40445305fc6 --- /dev/null +++ b/storage/innobase/mysql-test/storage_engine/type_text.opt @@ -0,0 +1 @@ +--innodb_log_file_size=100M diff --git a/storage/innobase/os/os0file.cc b/storage/innobase/os/os0file.cc index ea232444af6..0fc5d1c047b 100644 --- a/storage/innobase/os/os0file.cc +++ b/storage/innobase/os/os0file.cc @@ -2072,7 +2072,7 @@ os_file_delete_if_exists_func( bool ret; ulint count = 0; loop: - /* In Windows, deleting an .ibd file may fail if ibbackup is copying + /* In Windows, deleting an .ibd file may fail if mysqlbackup is copying it */ ret = DeleteFile((LPCTSTR) name); @@ -2097,7 +2097,7 @@ loop: ib_logf(IB_LOG_LEVEL_WARN, "Delete of file %s failed.", name); } - os_thread_sleep(1000000); /* sleep for a second */ + os_thread_sleep(500000); /* sleep for 0.5 second */ if (count > 2000) { @@ -2135,7 +2135,7 @@ os_file_delete_func( BOOL ret; ulint count = 0; loop: - /* In Windows, deleting an .ibd file may fail if ibbackup is copying + /* In Windows, deleting an .ibd file may fail if mysqlbackup is copying it */ ret = DeleteFile((LPCTSTR) name); @@ -2158,7 +2158,7 @@ loop: fprintf(stderr, "InnoDB: Warning: cannot delete file %s\n" - "InnoDB: Are you running ibbackup" + "InnoDB: Are you running mysqlbackup" " to back up the file?\n", name); } @@ -2938,7 +2938,7 @@ try_again: } ib_logf(IB_LOG_LEVEL_ERROR, - "Tried to read "ULINTPF" bytes at offset " UINT64PF". " + "Tried to read " ULINTPF " bytes at offset " UINT64PF ". " "Was only able to read %ld.", n, offset, (lint) ret); #endif /* __WIN__ */ #ifdef __WIN__ @@ -3143,6 +3143,7 @@ os_file_write_func( DWORD high; ulint n_retries = 0; ulint err; + DWORD saved_error = 0; #ifndef UNIV_HOTBACKUP ulint i; #endif /* !UNIV_HOTBACKUP */ @@ -3233,8 +3234,10 @@ retry: } if (!os_has_said_disk_full) { + char *winmsg = NULL; - err = (ulint) GetLastError(); + saved_error = GetLastError(); + err = (ulint) saved_error; ut_print_timestamp(stderr); @@ -3251,6 +3254,23 @@ retry: name, offset, (ulong) n, (ulong) len, (ulong) err); + /* Ask Windows to prepare a standard message for a + GetLastError() */ + + FormatMessage(FORMAT_MESSAGE_ALLOCATE_BUFFER | + FORMAT_MESSAGE_FROM_SYSTEM | + FORMAT_MESSAGE_IGNORE_INSERTS, + NULL, saved_error, + MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT), + (LPSTR)&winmsg, 0, NULL); + + if (winmsg) { + fprintf(stderr, + "InnoDB: FormatMessage: Error number %lu means '%s'.\n", + (ulong) saved_error, winmsg); + LocalFree(winmsg); + } + if (strerror((int) err) != NULL) { fprintf(stderr, "InnoDB: Error number %lu means '%s'.\n", @@ -3279,12 +3299,11 @@ retry: } if (!os_has_said_disk_full) { - ut_print_timestamp(stderr); fprintf(stderr, " InnoDB: Error: Write to file %s failed" - " at offset "UINT64PF".\n" + " at offset " UINT64PF ".\n" "InnoDB: %lu bytes should have been written," " only %ld were written.\n" "InnoDB: Operating system error number %lu.\n" @@ -4954,11 +4973,16 @@ os_aio_func( wake_later = mode & OS_AIO_SIMULATED_WAKE_LATER; mode = mode & (~OS_AIO_SIMULATED_WAKE_LATER); + DBUG_EXECUTE_IF("ib_os_aio_func_io_failure_28", + mode = OS_AIO_SYNC;); + if (mode == OS_AIO_SYNC #ifdef WIN_ASYNC_IO && !srv_use_native_aio #endif /* WIN_ASYNC_IO */ ) { + ibool ret; + /* This is actually an ordinary synchronous read or write: no need to use an i/o-handler thread. NOTE that if we use Windows async i/o, Windows does not allow us to use @@ -4973,14 +4997,24 @@ os_aio_func( and os_file_write_func() */ if (type == OS_FILE_READ) { - return(os_file_read_func(file, buf, offset, n, - page_compression)); + ret = os_file_read_func(file, buf, offset, n, + page_compression); + } else { + + ut_ad(!srv_read_only_mode); + ut_a(type == OS_FILE_WRITE); + + ret = os_file_write_func(name, file, buf, offset, n); } - ut_ad(!srv_read_only_mode); - ut_a(type == OS_FILE_WRITE); + DBUG_EXECUTE_IF("ib_os_aio_func_io_failure_28", + os_has_said_disk_full = FALSE;); + DBUG_EXECUTE_IF("ib_os_aio_func_io_failure_28", + ret = 0;); + DBUG_EXECUTE_IF("ib_os_aio_func_io_failure_28", + errno = 28;); - return(os_file_write_func(name, file, buf, offset, n)); + return ret; } try_again: @@ -5876,7 +5910,13 @@ consecutive_loop: aio_slot->page_compression); } - ut_a(ret); + DBUG_EXECUTE_IF("ib_os_aio_func_io_failure_28_2", + os_has_said_disk_full = FALSE;); + DBUG_EXECUTE_IF("ib_os_aio_func_io_failure_28_2", + ret = 0;); + DBUG_EXECUTE_IF("ib_os_aio_func_io_failure_28_2", + errno = 28;); + srv_set_io_thread_op_info(global_segment, "file i/o done"); if (aio_slot->type == OS_FILE_READ && n_consecutive > 1) { diff --git a/storage/innobase/page/page0zip.cc b/storage/innobase/page/page0zip.cc index 46fa9fea078..71660dfc1ae 100644 --- a/storage/innobase/page/page0zip.cc +++ b/storage/innobase/page/page0zip.cc @@ -1309,6 +1309,30 @@ page_zip_compress( MONITOR_INC(MONITOR_PAGE_COMPRESS); + /* Simulate a compression failure with a probability determined by + innodb_simulate_comp_failures, only if the page has 2 or more + records. */ + + if (srv_simulate_comp_failures + && !dict_index_is_ibuf(index) + && page_get_n_recs(page) >= 2 + && ((ulint)(rand() % 100) < srv_simulate_comp_failures) + && strcasecmp(index->table_name, "IBUF_DUMMY") != 0) { + +#ifdef UNIV_DEBUG + fprintf(stderr, + "InnoDB: Simulating a compression failure" + " for table %s, index %s, page %lu (%s)\n", + index->table_name, + index->name, + page_get_page_no(page), + page_is_leaf(page) ? "leaf" : "non-leaf"); + +#endif + + goto err_exit; + } + heap = mem_heap_create(page_zip_get_size(page_zip) + n_fields * (2 + sizeof(ulint)) + REC_OFFS_HEADER_SIZE @@ -3254,24 +3278,8 @@ page_zip_validate_low( temp_page_buf = static_cast<byte*>(ut_malloc(2 * UNIV_PAGE_SIZE)); temp_page = static_cast<byte*>(ut_align(temp_page_buf, UNIV_PAGE_SIZE)); -#ifdef UNIV_DEBUG_VALGRIND - /* Get detailed information on the valid bits in case the - UNIV_MEM_ASSERT_RW() checks fail. The v-bits of page[], - page_zip->data[] or page_zip could be viewed at temp_page[] or - temp_page_zip in a debugger when running valgrind --db-attach. */ - (void) VALGRIND_GET_VBITS(page, temp_page, UNIV_PAGE_SIZE); UNIV_MEM_ASSERT_RW(page, UNIV_PAGE_SIZE); -# if UNIV_WORD_SIZE == 4 - VALGRIND_GET_VBITS(page_zip, &temp_page_zip, sizeof temp_page_zip); - /* On 32-bit systems, there is no padding in page_zip_des_t. - On other systems, Valgrind could complain about uninitialized - pad bytes. */ - UNIV_MEM_ASSERT_RW(page_zip, sizeof *page_zip); -# endif - (void) VALGRIND_GET_VBITS(page_zip->data, temp_page, - page_zip_get_size(page_zip)); UNIV_MEM_ASSERT_RW(page_zip->data, page_zip_get_size(page_zip)); -#endif /* UNIV_DEBUG_VALGRIND */ temp_page_zip = *page_zip; valid = page_zip_decompress(&temp_page_zip, temp_page, TRUE); diff --git a/storage/innobase/row/row0ins.cc b/storage/innobase/row/row0ins.cc index e6487730a77..c45d6554627 100644 --- a/storage/innobase/row/row0ins.cc +++ b/storage/innobase/row/row0ins.cc @@ -151,35 +151,37 @@ row_ins_alloc_sys_fields( ut_ad(row && table && heap); ut_ad(dtuple_get_n_fields(row) == dict_table_get_n_cols(table)); - /* 1. Allocate buffer for row id */ + /* allocate buffer to hold the needed system created hidden columns. */ + uint len = DATA_ROW_ID_LEN + DATA_TRX_ID_LEN + DATA_ROLL_PTR_LEN; + ptr = static_cast<byte*>(mem_heap_zalloc(heap, len)); + /* 1. Populate row-id */ col = dict_table_get_sys_col(table, DATA_ROW_ID); dfield = dtuple_get_nth_field(row, dict_col_get_no(col)); - ptr = static_cast<byte*>(mem_heap_zalloc(heap, DATA_ROW_ID_LEN)); - dfield_set_data(dfield, ptr, DATA_ROW_ID_LEN); node->row_id_buf = ptr; - /* 3. Allocate buffer for trx id */ + ptr += DATA_ROW_ID_LEN; + /* 2. Populate trx id */ col = dict_table_get_sys_col(table, DATA_TRX_ID); dfield = dtuple_get_nth_field(row, dict_col_get_no(col)); - ptr = static_cast<byte*>(mem_heap_zalloc(heap, DATA_TRX_ID_LEN)); dfield_set_data(dfield, ptr, DATA_TRX_ID_LEN); node->trx_id_buf = ptr; - /* 4. Allocate buffer for roll ptr */ + ptr += DATA_TRX_ID_LEN; + + /* 3. Populate roll ptr */ col = dict_table_get_sys_col(table, DATA_ROLL_PTR); dfield = dtuple_get_nth_field(row, dict_col_get_no(col)); - ptr = static_cast<byte*>(mem_heap_zalloc(heap, DATA_ROLL_PTR_LEN)); dfield_set_data(dfield, ptr, DATA_ROLL_PTR_LEN); } @@ -1736,12 +1738,11 @@ do_possible_lock_wait: table case (check_ref == 0), since MDL lock will prevent concurrent DDL and DML on the same table */ if (!check_ref) { - for (const dict_foreign_t* check_foreign - = UT_LIST_GET_FIRST( table->referenced_list); - check_foreign; - check_foreign = UT_LIST_GET_NEXT( - referenced_list, check_foreign)) { - if (check_foreign == foreign) { + for (dict_foreign_set::iterator it + = table->referenced_set.begin(); + it != table->referenced_set.end(); + ++it) { + if (*it == foreign) { verified = true; break; } @@ -1789,12 +1790,15 @@ row_ins_check_foreign_constraints( trx = thr_get_trx(thr); - foreign = UT_LIST_GET_FIRST(table->foreign_list); - DEBUG_SYNC_C_IF_THD(thr_get_trx(thr)->mysql_thd, "foreign_constraint_check_for_ins"); - while (foreign) { + for (dict_foreign_set::iterator it = table->foreign_set.begin(); + it != table->foreign_set.end(); + ++it) { + + foreign = *it; + if (foreign->foreign_index == index) { dict_table_t* ref_table = NULL; dict_table_t* foreign_table = foreign->foreign_table; @@ -1850,8 +1854,6 @@ row_ins_check_foreign_constraints( return(err); } } - - foreign = UT_LIST_GET_NEXT(foreign_list, foreign); } return(DB_SUCCESS); @@ -2883,7 +2885,7 @@ row_ins_clust_index_entry( dberr_t err; ulint n_uniq; - if (UT_LIST_GET_FIRST(index->table->foreign_list)) { + if (!index->table->foreign_set.empty()) { err = row_ins_check_foreign_constraints( index->table, index, entry, thr); if (err != DB_SUCCESS) { @@ -2941,7 +2943,7 @@ row_ins_sec_index_entry( mem_heap_t* offsets_heap; mem_heap_t* heap; - if (UT_LIST_GET_FIRST(index->table->foreign_list)) { + if (!index->table->foreign_set.empty()) { err = row_ins_check_foreign_constraints(index->table, index, entry, thr); if (err != DB_SUCCESS) { diff --git a/storage/innobase/row/row0merge.cc b/storage/innobase/row/row0merge.cc index a5e4c492ac9..c0ddc2dda06 100644 --- a/storage/innobase/row/row0merge.cc +++ b/storage/innobase/row/row0merge.cc @@ -795,7 +795,7 @@ row_merge_read( if (UNIV_UNLIKELY(!success)) { ut_print_timestamp(stderr); fprintf(stderr, - " InnoDB: failed to read merge block at "UINT64PF"\n", + " InnoDB: failed to read merge block at " UINT64PF "\n", ofs); } diff --git a/storage/innobase/row/row0mysql.cc b/storage/innobase/row/row0mysql.cc index 8def475e1f9..c5f4680f4ff 100644 --- a/storage/innobase/row/row0mysql.cc +++ b/storage/innobase/row/row0mysql.cc @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 2000, 2013, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2000, 2014, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -64,6 +64,7 @@ Created 9/17/2000 Heikki Tuuri #include "m_string.h" #include "my_sys.h" #include "ha_prototypes.h" +#include <algorithm> /** Provide optional 4.x backwards compatibility for 5.0 and above */ UNIV_INTERN ibool row_rollback_on_timeout = FALSE; @@ -1360,7 +1361,7 @@ error_exit: if (doc_id < next_doc_id) { fprintf(stderr, "InnoDB: FTS Doc ID must be large than" - " "UINT64PF" for table", + " " UINT64PF " for table", next_doc_id - 1); ut_print_name(stderr, trx, TRUE, table->name); putc('\n', stderr); @@ -1375,9 +1376,9 @@ error_exit: if (doc_id - next_doc_id >= FTS_DOC_ID_MAX_STEP) { fprintf(stderr, - "InnoDB: Doc ID "UINT64PF" is too" + "InnoDB: Doc ID " UINT64PF " is too" " big. Its difference with largest" - " used Doc ID "UINT64PF" cannot" + " used Doc ID " UINT64PF " cannot" " exceed or equal to %d\n", doc_id, next_doc_id - 1, FTS_DOC_ID_MAX_STEP); @@ -1574,8 +1575,6 @@ init_fts_doc_id_for_ref( { dict_foreign_t* foreign; - foreign = UT_LIST_GET_FIRST(table->referenced_list); - table->fk_max_recusive_level = 0; (*depth)++; @@ -1587,17 +1586,25 @@ init_fts_doc_id_for_ref( /* Loop through this table's referenced list and also recursively traverse each table's foreign table list */ - while (foreign && foreign->foreign_table) { - if (foreign->foreign_table->fts) { - fts_init_doc_id(foreign->foreign_table); + for (dict_foreign_set::iterator it = table->referenced_set.begin(); + it != table->referenced_set.end(); + ++it) { + + foreign = *it; + + if (foreign->foreign_table == NULL) { + break; } - if (UT_LIST_GET_LEN(foreign->foreign_table->referenced_list) - > 0 && foreign->foreign_table != table) { - init_fts_doc_id_for_ref(foreign->foreign_table, depth); + if (foreign->foreign_table->fts != NULL) { + fts_init_doc_id(foreign->foreign_table); } - foreign = UT_LIST_GET_NEXT(referenced_list, foreign); + if (!foreign->foreign_table->referenced_set.empty() + && foreign->foreign_table != table) { + init_fts_doc_id_for_ref( + foreign->foreign_table, depth); + } } } @@ -2826,43 +2833,47 @@ row_discard_tablespace_foreign_key_checks( const trx_t* trx, /*!< in: transaction handle */ const dict_table_t* table) /*!< in: table to be discarded */ { - const dict_foreign_t* foreign; + + if (srv_read_only_mode || !trx->check_foreigns) { + return(DB_SUCCESS); + } /* Check if the table is referenced by foreign key constraints from some other table (not the table itself) */ + dict_foreign_set::iterator it + = std::find_if(table->referenced_set.begin(), + table->referenced_set.end(), + dict_foreign_different_tables()); - for (foreign = UT_LIST_GET_FIRST(table->referenced_list); - foreign && foreign->foreign_table == table; - foreign = UT_LIST_GET_NEXT(referenced_list, foreign)) { - + if (it == table->referenced_set.end()) { + return(DB_SUCCESS); } - if (!srv_read_only_mode && foreign && trx->check_foreigns) { + const dict_foreign_t* foreign = *it; + FILE* ef = dict_foreign_err_file; - FILE* ef = dict_foreign_err_file; + ut_ad(foreign->foreign_table != table); + ut_ad(foreign->referenced_table == table); - /* We only allow discarding a referenced table if - FOREIGN_KEY_CHECKS is set to 0 */ + /* We only allow discarding a referenced table if + FOREIGN_KEY_CHECKS is set to 0 */ - mutex_enter(&dict_foreign_err_mutex); + mutex_enter(&dict_foreign_err_mutex); - rewind(ef); + rewind(ef); - ut_print_timestamp(ef); + ut_print_timestamp(ef); - fputs(" Cannot DISCARD table ", ef); - ut_print_name(stderr, trx, TRUE, table->name); - fputs("\n" - "because it is referenced by ", ef); - ut_print_name(stderr, trx, TRUE, foreign->foreign_table_name); - putc('\n', ef); + fputs(" Cannot DISCARD table ", ef); + ut_print_name(stderr, trx, TRUE, table->name); + fputs("\n" + "because it is referenced by ", ef); + ut_print_name(stderr, trx, TRUE, foreign->foreign_table_name); + putc('\n', ef); - mutex_exit(&dict_foreign_err_mutex); - - return(DB_CANNOT_DROP_CONSTRAINT); - } + mutex_exit(&dict_foreign_err_mutex); - return(DB_SUCCESS); + return(DB_CANNOT_DROP_CONSTRAINT); } /*********************************************************************//** @@ -3165,7 +3176,6 @@ row_truncate_table_for_mysql( dict_table_t* table, /*!< in: table handle */ trx_t* trx) /*!< in: transaction handle */ { - dict_foreign_t* foreign; dberr_t err; mem_heap_t* heap; byte* buf; @@ -3257,18 +3267,17 @@ row_truncate_table_for_mysql( /* Check if the table is referenced by foreign key constraints from some other table (not the table itself) */ - for (foreign = UT_LIST_GET_FIRST(table->referenced_list); - foreign != 0 && foreign->foreign_table == table; - foreign = UT_LIST_GET_NEXT(referenced_list, foreign)) { - - /* Do nothing. */ - } + dict_foreign_set::iterator it + = std::find_if(table->referenced_set.begin(), + table->referenced_set.end(), + dict_foreign_different_tables()); if (!srv_read_only_mode - && foreign + && it != table->referenced_set.end() && trx->check_foreigns) { - FILE* ef = dict_foreign_err_file; + FILE* ef = dict_foreign_err_file; + dict_foreign_t* foreign = *it; /* We only allow truncating a referenced table if FOREIGN_KEY_CHECKS is set to 0 */ @@ -3871,42 +3880,45 @@ row_drop_table_for_mysql( /* Check if the table is referenced by foreign key constraints from some other table (not the table itself) */ - foreign = UT_LIST_GET_FIRST(table->referenced_list); + if (!srv_read_only_mode && trx->check_foreigns) { - while (foreign && foreign->foreign_table == table) { -check_next_foreign: - foreign = UT_LIST_GET_NEXT(referenced_list, foreign); - } + for (dict_foreign_set::iterator it + = table->referenced_set.begin(); + it != table->referenced_set.end(); + ++it) { - if (!srv_read_only_mode - && foreign - && trx->check_foreigns - && !(drop_db && dict_tables_have_same_db( - name, foreign->foreign_table_name_lookup))) { - FILE* ef = dict_foreign_err_file; + foreign = *it; - /* We only allow dropping a referenced table if - FOREIGN_KEY_CHECKS is set to 0 */ + const bool ref_ok = drop_db + && dict_tables_have_same_db( + name, + foreign->foreign_table_name_lookup); - err = DB_CANNOT_DROP_CONSTRAINT; + if (foreign->foreign_table != table && !ref_ok) { - mutex_enter(&dict_foreign_err_mutex); - rewind(ef); - ut_print_timestamp(ef); + FILE* ef = dict_foreign_err_file; - fputs(" Cannot drop table ", ef); - ut_print_name(ef, trx, TRUE, name); - fputs("\n" - "because it is referenced by ", ef); - ut_print_name(ef, trx, TRUE, foreign->foreign_table_name); - putc('\n', ef); - mutex_exit(&dict_foreign_err_mutex); + /* We only allow dropping a referenced table + if FOREIGN_KEY_CHECKS is set to 0 */ - goto funct_exit; - } + err = DB_CANNOT_DROP_CONSTRAINT; + + mutex_enter(&dict_foreign_err_mutex); + rewind(ef); + ut_print_timestamp(ef); - if (foreign && trx->check_foreigns) { - goto check_next_foreign; + fputs(" Cannot drop table ", ef); + ut_print_name(ef, trx, TRUE, name); + fputs("\n" + "because it is referenced by ", ef); + ut_print_name(ef, trx, TRUE, + foreign->foreign_table_name); + putc('\n', ef); + mutex_exit(&dict_foreign_err_mutex); + + goto funct_exit; + } + } } /* TODO: could we replace the counter n_foreign_key_checks_running diff --git a/storage/innobase/row/row0sel.cc b/storage/innobase/row/row0sel.cc index 359ae3f2c21..e5a7694cb93 100644 --- a/storage/innobase/row/row0sel.cc +++ b/storage/innobase/row/row0sel.cc @@ -877,16 +877,15 @@ row_sel_get_clust_rec( if (!node->read_view) { /* Try to place a lock on the index record */ - - /* If innodb_locks_unsafe_for_binlog option is used - or this session is using READ COMMITTED isolation level - we lock only the record, i.e., next-key locking is - not used. */ ulint lock_type; trx_t* trx; trx = thr_get_trx(thr); + /* If innodb_locks_unsafe_for_binlog option is used + or this session is using READ COMMITTED or lower isolation level + we lock only the record, i.e., next-key locking is + not used. */ if (srv_locks_unsafe_for_binlog || trx->isolation_level <= TRX_ISO_READ_COMMITTED) { lock_type = LOCK_REC_NOT_GAP; @@ -1502,12 +1501,6 @@ rec_loop: search result set, resulting in the phantom problem. */ if (!consistent_read) { - - /* If innodb_locks_unsafe_for_binlog option is used - or this session is using READ COMMITTED isolation - level, we lock only the record, i.e., next-key - locking is not used. */ - rec_t* next_rec = page_rec_get_next(rec); ulint lock_type; trx_t* trx; @@ -1517,6 +1510,10 @@ rec_loop: offsets = rec_get_offsets(next_rec, index, offsets, ULINT_UNDEFINED, &heap); + /* If innodb_locks_unsafe_for_binlog option is used + or this session is using READ COMMITTED or lower isolation + level, we lock only the record, i.e., next-key + locking is not used. */ if (srv_locks_unsafe_for_binlog || trx->isolation_level <= TRX_ISO_READ_COMMITTED) { @@ -1565,12 +1562,6 @@ skip_lock: if (!consistent_read) { /* Try to place a lock on the index record */ - - /* If innodb_locks_unsafe_for_binlog option is used - or this session is using READ COMMITTED isolation level, - we lock only the record, i.e., next-key locking is - not used. */ - ulint lock_type; trx_t* trx; @@ -1579,6 +1570,10 @@ skip_lock: trx = thr_get_trx(thr); + /* If innodb_locks_unsafe_for_binlog option is used + or this session is using READ COMMITTED or lower isolation level, + we lock only the record, i.e., next-key locking is + not used. */ if (srv_locks_unsafe_for_binlog || trx->isolation_level <= TRX_ISO_READ_COMMITTED) { @@ -4227,7 +4222,7 @@ rec_loop: /* Try to place a lock on the index record */ /* If innodb_locks_unsafe_for_binlog option is used - or this session is using a READ COMMITTED isolation + or this session is using a READ COMMITTED or lower isolation level we do not lock gaps. Supremum record is really a gap and therefore we do not set locks there. */ @@ -4369,7 +4364,7 @@ wrong_offs: /* Try to place a gap lock on the index record only if innodb_locks_unsafe_for_binlog option is not set or this session is not - using a READ COMMITTED isolation level. */ + using a READ COMMITTED or lower isolation level. */ err = sel_set_rec_lock( btr_pcur_get_block(pcur), @@ -4418,7 +4413,7 @@ wrong_offs: /* Try to place a gap lock on the index record only if innodb_locks_unsafe_for_binlog option is not set or this session is not - using a READ COMMITTED isolation level. */ + using a READ COMMITTED or lower isolation level. */ err = sel_set_rec_lock( btr_pcur_get_block(pcur), diff --git a/storage/innobase/row/row0upd.cc b/storage/innobase/row/row0upd.cc index b0581945835..8a27f325218 100644 --- a/storage/innobase/row/row0upd.cc +++ b/storage/innobase/row/row0upd.cc @@ -51,6 +51,7 @@ Created 12/27/1996 Heikki Tuuri #include "pars0sym.h" #include "eval0eval.h" #include "buf0lru.h" +#include <algorithm> #include <mysql/plugin.h> #include <mysql/service_wsrep.h> @@ -138,12 +139,10 @@ row_upd_index_is_referenced( trx_t* trx) /*!< in: transaction */ { dict_table_t* table = index->table; - dict_foreign_t* foreign; ibool froze_data_dict = FALSE; ibool is_referenced = FALSE; - if (!UT_LIST_GET_FIRST(table->referenced_list)) { - + if (table->referenced_set.empty()) { return(FALSE); } @@ -152,19 +151,13 @@ row_upd_index_is_referenced( froze_data_dict = TRUE; } - foreign = UT_LIST_GET_FIRST(table->referenced_list); - - while (foreign) { - if (foreign->referenced_index == index) { - - is_referenced = TRUE; - goto func_exit; - } + dict_foreign_set::iterator it + = std::find_if(table->referenced_set.begin(), + table->referenced_set.end(), + dict_foreign_with_index(index)); - foreign = UT_LIST_GET_NEXT(referenced_list, foreign); - } + is_referenced = (it != table->referenced_set.end()); -func_exit: if (froze_data_dict) { row_mysql_unfreeze_data_dictionary(trx); } @@ -185,7 +178,7 @@ wsrep_row_upd_index_is_foreign( ibool froze_data_dict = FALSE; ibool is_referenced = FALSE; - if (!UT_LIST_GET_FIRST(table->foreign_list)) { + if (table->foreign_set.empty()) { return(FALSE); } @@ -195,16 +188,18 @@ wsrep_row_upd_index_is_foreign( froze_data_dict = TRUE; } - foreign = UT_LIST_GET_FIRST(table->foreign_list); + for (dict_foreign_set::iterator it= table->foreign_set.begin(); + it != table->foreign_set.end(); + ++ it) + { + foreign= *it; - while (foreign) { if (foreign->foreign_index == index) { is_referenced = TRUE; goto func_exit; } - foreign = UT_LIST_GET_NEXT(foreign_list, foreign); } func_exit: @@ -246,7 +241,7 @@ row_upd_check_references_constraints( dberr_t err; ibool got_s_lock = FALSE; - if (UT_LIST_GET_FIRST(table->referenced_list) == NULL) { + if (table->referenced_set.empty()) { return(DB_SUCCESS); } @@ -273,9 +268,13 @@ row_upd_check_references_constraints( } run_again: - foreign = UT_LIST_GET_FIRST(table->referenced_list); - while (foreign) { + for (dict_foreign_set::iterator it = table->referenced_set.begin(); + it != table->referenced_set.end(); + ++it) { + + foreign = *it; + /* Note that we may have an update which updates the index record, but does NOT update the first fields which are referenced in a foreign key constraint. Then the update does @@ -328,8 +327,6 @@ run_again: goto func_exit; } } - - foreign = UT_LIST_GET_NEXT(referenced_list, foreign); } err = DB_SUCCESS; @@ -366,7 +363,7 @@ wsrep_row_upd_check_foreign_constraints( ibool got_s_lock = FALSE; ibool opened = FALSE; - if (UT_LIST_GET_FIRST(table->foreign_list) == NULL) { + if (table->foreign_set.empty()) { return(DB_SUCCESS); } @@ -393,9 +390,12 @@ wsrep_row_upd_check_foreign_constraints( row_mysql_freeze_data_dictionary(trx); } - foreign = UT_LIST_GET_FIRST(table->foreign_list); + for (dict_foreign_set::iterator it= table->foreign_set.begin(); + it != table->foreign_set.end(); + ++ it) + { + foreign= *it; - while (foreign) { /* Note that we may have an update which updates the index record, but does NOT update the first fields which are referenced in a foreign key constraint. Then the update does @@ -446,7 +446,6 @@ wsrep_row_upd_check_foreign_constraints( } } - foreign = UT_LIST_GET_NEXT(foreign_list, foreign); } err = DB_SUCCESS; diff --git a/storage/innobase/srv/srv0mon.cc b/storage/innobase/srv/srv0mon.cc index f276efdc021..5880e03073e 100644 --- a/storage/innobase/srv/srv0mon.cc +++ b/storage/innobase/srv/srv0mon.cc @@ -41,8 +41,8 @@ Created 12/9/2009 Jimmy Yang /* Macro to standardize the counter names for counters in the "monitor_buf_page" module as they have very structured defines */ #define MONITOR_BUF_PAGE(name, description, code, op, op_code) \ - {"buffer_page_"op"_"name, "buffer_page_io", \ - "Number of "description" Pages "op, \ + {"buffer_page_" op "_" name, "buffer_page_io", \ + "Number of " description " Pages " op, \ MONITOR_GROUP_MODULE, MONITOR_DEFAULT_START, \ MONITOR_##code##_##op_code} diff --git a/storage/innobase/srv/srv0srv.cc b/storage/innobase/srv/srv0srv.cc index 0c0075c98f1..ca5c22d8ee4 100644 --- a/storage/innobase/srv/srv0srv.cc +++ b/storage/innobase/srv/srv0srv.cc @@ -376,7 +376,12 @@ UNIV_INTERN ulong srv_doublewrite_batch_size = 120; UNIV_INTERN ulong srv_replication_delay = 0; /*-------------------------------------------*/ +#ifdef HAVE_MEMORY_BARRIER +/* No idea to wait long with memory barriers */ +UNIV_INTERN ulong srv_n_spin_wait_rounds = 15; +#else UNIV_INTERN ulong srv_n_spin_wait_rounds = 30; +#endif UNIV_INTERN ulong srv_spin_wait_delay = 6; UNIV_INTERN ibool srv_priority_boost = TRUE; @@ -509,6 +514,9 @@ current_time % 5 != 0. */ #endif /* MEM_PERIODIC_CHECK */ # define SRV_MASTER_DICT_LRU_INTERVAL (47) +/** Simulate compression failures. */ +UNIV_INTERN uint srv_simulate_comp_failures = 0; + /** Acquire the system_mutex. */ #define srv_sys_mutex_enter() do { \ mutex_enter(&srv_sys->mutex); \ @@ -1775,9 +1783,10 @@ loop: /* Try to track a strange bug reported by Harald Fuchs and others, where the lsn seems to decrease at times */ - new_lsn = log_get_lsn(); + /* We have to use nowait to ensure we don't block */ + new_lsn= log_get_lsn_nowait(); - if (new_lsn < old_lsn) { + if (new_lsn && new_lsn < old_lsn) { ut_print_timestamp(stderr); fprintf(stderr, " InnoDB: Error: old log sequence number " LSN_PF @@ -1789,7 +1798,8 @@ loop: ut_ad(0); } - old_lsn = new_lsn; + if (new_lsn) + old_lsn = new_lsn; if (difftime(time(NULL), srv_last_monitor_time) > 60) { /* We referesh InnoDB Monitor values so that averages are diff --git a/storage/innobase/srv/srv0start.cc b/storage/innobase/srv/srv0start.cc index bfdcbfaeee0..2692636dcb5 100644 --- a/storage/innobase/srv/srv0start.cc +++ b/storage/innobase/srv/srv0start.cc @@ -1533,6 +1533,7 @@ innobase_start_or_create_for_mysql(void) char logfilename[10000]; char* logfile0 = NULL; size_t dirnamelen; + bool sys_datafiles_created = false; /* This should be initialized early */ ut_init_timer(); @@ -1661,6 +1662,19 @@ innobase_start_or_create_for_mysql(void) "" IB_ATOMICS_STARTUP_MSG ""); ib_logf(IB_LOG_LEVEL_INFO, + "" IB_MEMORY_BARRIER_STARTUP_MSG ""); + +#ifndef HAVE_MEMORY_BARRIER +#if defined __i386__ || defined __x86_64__ || defined _M_IX86 || defined _M_X64 || defined __WIN__ +#else + ib_logf(IB_LOG_LEVEL_WARN, + "MySQL was built without a memory barrier capability on this" + " architecture, which might allow a mutex/rw_lock violation" + " under high thread concurrency. This may cause a hang."); +#endif /* IA32 or AMD64 */ +#endif /* HAVE_MEMORY_BARRIER */ + + ib_logf(IB_LOG_LEVEL_INFO, "Compressed tables use zlib " ZLIB_VERSION #ifdef UNIV_ZIP_DEBUG " with validation" @@ -2211,9 +2225,9 @@ innobase_start_or_create_for_mysql(void) } else if (size != srv_log_file_size) { ib_logf(IB_LOG_LEVEL_ERROR, "Log file %s is" - " of different size "UINT64PF" bytes" + " of different size " UINT64PF " bytes" " than other log" - " files "UINT64PF" bytes!", + " files " UINT64PF " bytes!", logfilename, size << UNIV_PAGE_SIZE_SHIFT, (os_offset_t) srv_log_file_size @@ -2462,6 +2476,15 @@ files_checked: dict_check = DICT_CHECK_NONE_LOADED; } + /* Create the SYS_TABLESPACES and SYS_DATAFILES system table */ + err = dict_create_or_check_sys_tablespace(); + if (err != DB_SUCCESS) { + return(err); + } + + sys_datafiles_created = true; + + /* This function assumes that SYS_DATAFILES exists */ dict_check_tablespaces_and_store_max_id(dict_check); } @@ -2635,13 +2658,6 @@ files_checked: srv_undo_logs = ULONG_UNDEFINED; } - /* Flush the changes made to TRX_SYS_PAGE by trx_sys_create_rsegs()*/ - if (!srv_force_recovery && !srv_read_only_mode) { - bool success = buf_flush_list(ULINT_MAX, LSN_MAX, NULL); - ut_a(success); - buf_flush_wait_batch_end(NULL, BUF_FLUSH_LIST); - } - if (!srv_read_only_mode) { /* Create the thread which watches the timeouts for lock waits */ @@ -2666,10 +2682,13 @@ files_checked: return(err); } - /* Create the SYS_TABLESPACES system table */ - err = dict_create_or_check_sys_tablespace(); - if (err != DB_SUCCESS) { - return(err); + /* Create the SYS_TABLESPACES and SYS_DATAFILES system tables if we + have not done that already on crash recovery. */ + if (sys_datafiles_created == false) { + err = dict_create_or_check_sys_tablespace(); + if (err != DB_SUCCESS) { + return(err); + } } srv_is_being_started = FALSE; diff --git a/storage/innobase/sync/sync0arr.cc b/storage/innobase/sync/sync0arr.cc index 2cfb693f8ba..f643e5b794f 100644 --- a/storage/innobase/sync/sync0arr.cc +++ b/storage/innobase/sync/sync0arr.cc @@ -182,6 +182,33 @@ sync_array_get_nth_cell( } /******************************************************************//** +Looks for a cell with the given thread id. +@return pointer to cell or NULL if not found */ +static +sync_cell_t* +sync_array_find_thread( +/*===================*/ + sync_array_t* arr, /*!< in: wait array */ + os_thread_id_t thread) /*!< in: thread id */ +{ + ulint i; + sync_cell_t* cell; + + for (i = 0; i < arr->n_cells; i++) { + + cell = sync_array_get_nth_cell(arr, i); + + if (cell->wait_object != NULL + && os_thread_eq(cell->thread, thread)) { + + return(cell); /* Found */ + } + } + + return(NULL); /* Not found */ +} + +/******************************************************************//** Reserves the mutex semaphore protecting a sync array. */ static void @@ -432,8 +459,10 @@ static void sync_array_cell_print( /*==================*/ - FILE* file, /*!< in: file where to print */ - sync_cell_t* cell) /*!< in: sync cell */ + FILE* file, /*!< in: file where to print */ + sync_cell_t* cell, /*!< in: sync cell */ + os_thread_id_t* reserver) /*!< out: write reserver or + 0 */ { ib_mutex_t* mutex; rw_lock_t* rwlock; @@ -454,19 +483,21 @@ sync_array_cell_print( been freed meanwhile */ mutex = cell->old_wait_mutex; - fprintf(file, - "Mutex at %p created file %s line %lu, lock var %lu\n" + if (mutex) { + fprintf(file, + "Mutex at %p created file %s line %lu, lock var %lu\n" #ifdef UNIV_SYNC_DEBUG - "Last time reserved in file %s line %lu, " + "Last time reserved in file %s line %lu, " #endif /* UNIV_SYNC_DEBUG */ - "waiters flag %lu\n", - (void*) mutex, innobase_basename(mutex->cfile_name), - (ulong) mutex->cline, - (ulong) mutex->lock_word, + "waiters flag %lu\n", + (void*) mutex, innobase_basename(mutex->cfile_name), + (ulong) mutex->cline, + (ulong) mutex->lock_word, #ifdef UNIV_SYNC_DEBUG - mutex->file_name, (ulong) mutex->line, + mutex->file_name, (ulong) mutex->line, #endif /* UNIV_SYNC_DEBUG */ - (ulong) mutex->waiters); + (ulong) mutex->waiters); + } } else if (type == RW_LOCK_EX || type == RW_LOCK_WAIT_EX @@ -478,33 +509,36 @@ sync_array_cell_print( rwlock = cell->old_wait_rw_lock; - fprintf(file, - " RW-latch at %p created in file %s line %lu\n", - (void*) rwlock, innobase_basename(rwlock->cfile_name), - (ulong) rwlock->cline); - writer = rw_lock_get_writer(rwlock); - if (writer != RW_LOCK_NOT_LOCKED) { + if (rwlock) { fprintf(file, - "a writer (thread id %lu) has" - " reserved it in mode %s", - (ulong) os_thread_pf(rwlock->writer_thread), - writer == RW_LOCK_EX - ? " exclusive\n" - : " wait exclusive\n"); - } + " RW-latch at %p created in file %s line %lu\n", + (void*) rwlock, innobase_basename(rwlock->cfile_name), + (ulong) rwlock->cline); + writer = rw_lock_get_writer(rwlock); + if (writer != RW_LOCK_NOT_LOCKED) { + fprintf(file, + "a writer (thread id %lu) has" + " reserved it in mode %s", + (ulong) os_thread_pf(rwlock->writer_thread), + writer == RW_LOCK_EX + ? " exclusive\n" + : " wait exclusive\n"); + *reserver = rwlock->writer_thread; + } - fprintf(file, - "number of readers %lu, waiters flag %lu, " - "lock_word: %lx\n" - "Last time read locked in file %s line %lu\n" - "Last time write locked in file %s line %lu\n", - (ulong) rw_lock_get_reader_count(rwlock), - (ulong) rwlock->waiters, - rwlock->lock_word, - innobase_basename(rwlock->last_s_file_name), - (ulong) rwlock->last_s_line, - rwlock->last_x_file_name, - (ulong) rwlock->last_x_line); + fprintf(file, + "number of readers %lu, waiters flag %lu, " + "lock_word: %lx\n" + "Last time read locked in file %s line %lu\n" + "Last time write locked in file %s line %lu\n", + (ulong) rw_lock_get_reader_count(rwlock), + (ulong) rwlock->waiters, + rwlock->lock_word, + innobase_basename(rwlock->last_s_file_name), + (ulong) rwlock->last_s_line, + rwlock->last_x_file_name, + (ulong) rwlock->last_x_line); + } } else { ut_error; } @@ -515,32 +549,6 @@ sync_array_cell_print( } #ifdef UNIV_SYNC_DEBUG -/******************************************************************//** -Looks for a cell with the given thread id. -@return pointer to cell or NULL if not found */ -static -sync_cell_t* -sync_array_find_thread( -/*===================*/ - sync_array_t* arr, /*!< in: wait array */ - os_thread_id_t thread) /*!< in: thread id */ -{ - ulint i; - sync_cell_t* cell; - - for (i = 0; i < arr->n_cells; i++) { - - cell = sync_array_get_nth_cell(arr, i); - - if (cell->wait_object != NULL - && os_thread_eq(cell->thread, thread)) { - - return(cell); /* Found */ - } - } - - return(NULL); /* Not found */ -} /******************************************************************//** Recursion step for deadlock detection. @@ -602,6 +610,7 @@ sync_array_detect_deadlock( os_thread_id_t thread; ibool ret; rw_lock_debug_t*debug; + os_thread_id_t reserver=0; ut_a(arr); ut_a(start); @@ -637,10 +646,10 @@ sync_array_detect_deadlock( depth); if (ret) { fprintf(stderr, - "Mutex %p owned by thread %lu file %s line %lu\n", + "Mutex %p owned by thread %lu file %s line %lu\n", mutex, (ulong) os_thread_pf(mutex->thread_id), mutex->file_name, (ulong) mutex->line); - sync_array_cell_print(stderr, cell); + sync_array_cell_print(stderr, cell, &reserver); return(TRUE); } @@ -678,7 +687,7 @@ sync_array_detect_deadlock( print: fprintf(stderr, "rw-lock %p ", (void*) lock); - sync_array_cell_print(stderr, cell); + sync_array_cell_print(stderr, cell, &reserver); rw_lock_debug_print(stderr, debug); return(TRUE); } @@ -740,6 +749,7 @@ sync_arr_cell_can_wake_up( mutex = static_cast<ib_mutex_t*>(cell->wait_object); + os_rmb; if (mutex_get_lock_word(mutex) == 0) { return(TRUE); @@ -749,6 +759,7 @@ sync_arr_cell_can_wake_up( lock = static_cast<rw_lock_t*>(cell->wait_object); + os_rmb; if (lock->lock_word > 0) { /* Either unlocked or only read locked. */ @@ -760,6 +771,7 @@ sync_arr_cell_can_wake_up( lock = static_cast<rw_lock_t*>(cell->wait_object); /* lock_word == 0 means all readers have left */ + os_rmb; if (lock->lock_word == 0) { return(TRUE); @@ -768,6 +780,7 @@ sync_arr_cell_can_wake_up( lock = static_cast<rw_lock_t*>(cell->wait_object); /* lock_word > 0 means no writer or reserved writer */ + os_rmb; if (lock->lock_word > 0) { return(TRUE); @@ -921,6 +934,7 @@ sync_array_print_long_waits_low( double diff; sync_cell_t* cell; void* wait_object; + os_thread_id_t reserver=0; cell = sync_array_get_nth_cell(arr, i); @@ -936,7 +950,7 @@ sync_array_print_long_waits_low( if (diff > SYNC_ARRAY_TIMEOUT) { fputs("InnoDB: Warning: a long semaphore wait:\n", stderr); - sync_array_cell_print(stderr, cell); + sync_array_cell_print(stderr, cell, &reserver); *noticed = TRUE; } @@ -951,6 +965,60 @@ sync_array_print_long_waits_low( } } + /* We found a long semaphore wait, wait all threads that are + waiting for a semaphore. */ + if (*noticed) { + for (i = 0; i < arr->n_cells; i++) { + void* wait_object; + os_thread_id_t reserver=(os_thread_id_t)ULINT_UNDEFINED; + sync_cell_t* cell; + ulint loop = 0; + + cell = sync_array_get_nth_cell(arr, i); + + wait_object = cell->wait_object; + + if (wait_object == NULL || !cell->waiting) { + + continue; + } + + fputs("InnoDB: Warning: semaphore wait:\n", + stderr); + sync_array_cell_print(stderr, cell, &reserver); + + /* Try to output cell information for writer recursive way */ + while (reserver != (os_thread_id_t)ULINT_UNDEFINED) { + sync_cell_t* reserver_wait; + + reserver_wait = sync_array_find_thread(arr, reserver); + + if (reserver_wait && + reserver_wait->wait_object != NULL && + reserver_wait->waiting) { + fputs("InnoDB: Warning: Writer thread is waiting this semaphore:\n", + stderr); + reserver = (os_thread_id_t)ULINT_UNDEFINED; + sync_array_cell_print(stderr, reserver_wait, &reserver); + loop++; + + if (reserver_wait->thread == reserver) { + reserver = (os_thread_id_t)ULINT_UNDEFINED; + } + } else { + reserver = (os_thread_id_t)ULINT_UNDEFINED; + } + + /* This is protection against loop */ + if (loop > 100) { + fputs("InnoDB: Warning: Too many waiting threads.\n", stderr); + break; + } + + } + } + } + #undef SYNC_ARRAY_TIMEOUT return(fatal); @@ -1030,6 +1098,7 @@ sync_array_print_info_low( { ulint i; ulint count = 0; + os_thread_id_t r = 0; fprintf(file, "OS WAIT ARRAY INFO: reservation count %ld\n", @@ -1042,7 +1111,7 @@ sync_array_print_info_low( if (cell->wait_object != NULL) { count++; - sync_array_cell_print(file, cell); + sync_array_cell_print(file, cell, &r); } } } diff --git a/storage/innobase/sync/sync0rw.cc b/storage/innobase/sync/sync0rw.cc index ebf73917702..8a211d81af5 100644 --- a/storage/innobase/sync/sync0rw.cc +++ b/storage/innobase/sync/sync0rw.cc @@ -41,6 +41,7 @@ Created 9/11/1995 Heikki Tuuri #include "srv0srv.h" #include "os0sync.h" /* for INNODB_RW_LOCKS_USE_ATOMICS */ #include "ha_prototypes.h" +#include "my_cpu.h" /* IMPLEMENTATION OF THE RW_LOCK @@ -151,18 +152,12 @@ UNIV_INTERN mysql_pfs_key_t rw_lock_mutex_key; To modify the debug info list of an rw-lock, this mutex has to be acquired in addition to the mutex protecting the lock. */ -UNIV_INTERN ib_mutex_t rw_lock_debug_mutex; +UNIV_INTERN os_fast_mutex_t rw_lock_debug_mutex; # ifdef UNIV_PFS_MUTEX UNIV_INTERN mysql_pfs_key_t rw_lock_debug_mutex_key; # endif -/* If deadlock detection does not get immediately the mutex, -it may wait for this event */ -UNIV_INTERN os_event_t rw_lock_debug_event; -/* This is set to TRUE, if there may be waiters for the event */ -UNIV_INTERN ibool rw_lock_debug_waiters; - /******************************************************************//** Creates a debug info struct. */ static @@ -381,15 +376,19 @@ rw_lock_s_lock_spin( lock_loop: /* Spin waiting for the writer field to become free */ + os_rmb; + HMT_low(); while (i < SYNC_SPIN_ROUNDS && lock->lock_word <= 0) { if (srv_spin_wait_delay) { ut_delay(ut_rnd_interval(0, srv_spin_wait_delay)); } i++; + os_rmb; } - if (i == SYNC_SPIN_ROUNDS) { + HMT_medium(); + if (i >= SYNC_SPIN_ROUNDS) { os_thread_yield(); } @@ -476,16 +475,20 @@ rw_lock_x_lock_wait( counter_index = (size_t) os_thread_get_curr_id(); + os_rmb; ut_ad(lock->lock_word <= 0); + HMT_low(); while (lock->lock_word < 0) { if (srv_spin_wait_delay) { ut_delay(ut_rnd_interval(0, srv_spin_wait_delay)); } if(i < SYNC_SPIN_ROUNDS) { i++; + os_rmb; continue; } + HMT_medium(); /* If there is still a reader, then go to sleep.*/ rw_lock_stats.rw_x_spin_round_count.add(counter_index, i); @@ -522,7 +525,9 @@ rw_lock_x_lock_wait( } else { sync_array_free_cell(sync_arr, index); } + HMT_low(); } + HMT_medium(); rw_lock_stats.rw_x_spin_round_count.add(counter_index, i); } @@ -560,6 +565,10 @@ rw_lock_x_lock_low( } else { os_thread_id_t thread_id = os_thread_get_curr_id(); + if (!pass) { + os_rmb; + } + /* Decrement failed: relock or failed lock */ if (!pass && lock->recursive && os_thread_eq(lock->writer_thread, thread_id)) { @@ -638,6 +647,8 @@ lock_loop: } /* Spin waiting for the lock_word to become free */ + os_rmb; + HMT_low(); while (i < SYNC_SPIN_ROUNDS && lock->lock_word <= 0) { if (srv_spin_wait_delay) { @@ -646,8 +657,10 @@ lock_loop: } i++; + os_rmb; } - if (i == SYNC_SPIN_ROUNDS) { + HMT_medium(); + if (i >= SYNC_SPIN_ROUNDS) { os_thread_yield(); } else { goto lock_loop; @@ -690,22 +703,7 @@ void rw_lock_debug_mutex_enter(void) /*===========================*/ { -loop: - if (0 == mutex_enter_nowait(&rw_lock_debug_mutex)) { - return; - } - - os_event_reset(rw_lock_debug_event); - - rw_lock_debug_waiters = TRUE; - - if (0 == mutex_enter_nowait(&rw_lock_debug_mutex)) { - return; - } - - os_event_wait(rw_lock_debug_event); - - goto loop; + os_fast_mutex_lock(&rw_lock_debug_mutex); } /******************************************************************//** @@ -715,12 +713,7 @@ void rw_lock_debug_mutex_exit(void) /*==========================*/ { - mutex_exit(&rw_lock_debug_mutex); - - if (rw_lock_debug_waiters) { - rw_lock_debug_waiters = FALSE; - os_event_set(rw_lock_debug_event); - } + os_fast_mutex_unlock(&rw_lock_debug_mutex); } /******************************************************************//** diff --git a/storage/innobase/sync/sync0sync.cc b/storage/innobase/sync/sync0sync.cc index 3532f513646..9fc21005d47 100644 --- a/storage/innobase/sync/sync0sync.cc +++ b/storage/innobase/sync/sync0sync.cc @@ -45,6 +45,7 @@ Created 9/5/1995 Heikki Tuuri # include "srv0start.h" /* srv_is_being_started */ #endif /* UNIV_SYNC_DEBUG */ #include "ha_prototypes.h" +#include "my_cpu.h" /* REASONS FOR IMPLEMENTING THE SPIN LOCK MUTEX @@ -126,7 +127,7 @@ it and did not see the waiters byte set to 1, a case which would lead the other thread to an infinite wait. LEMMA 1: After a thread resets the event of a mutex (or rw_lock), some -======= +====== thread will eventually call os_event_set() on that particular event. Thus no infinite wait is possible in this case. @@ -139,7 +140,7 @@ os_event_set() with the mutex as an argument. Q.E.D. LEMMA 2: If an os_event_set() call is made after some thread has called -======= +====== the os_event_reset() and before it starts wait on that event, the call will not be lost to the second thread. This is true even if there is an intervening call to os_event_reset() by another thread. @@ -456,6 +457,8 @@ mutex_set_waiters( ptr = &(mutex->waiters); + os_wmb; + *ptr = n; /* Here we assume that the write of a single word in memory is atomic */ } @@ -500,15 +503,17 @@ mutex_loop: spin_loop: + HMT_low(); + os_rmb; while (mutex_get_lock_word(mutex) != 0 && i < SYNC_SPIN_ROUNDS) { if (srv_spin_wait_delay) { ut_delay(ut_rnd_interval(0, srv_spin_wait_delay)); } - i++; } + HMT_medium(); - if (i == SYNC_SPIN_ROUNDS) { + if (i >= SYNC_SPIN_ROUNDS) { os_thread_yield(); } @@ -1473,11 +1478,7 @@ sync_init(void) SYNC_NO_ORDER_CHECK); #ifdef UNIV_SYNC_DEBUG - mutex_create(rw_lock_debug_mutex_key, &rw_lock_debug_mutex, - SYNC_NO_ORDER_CHECK); - - rw_lock_debug_event = os_event_create(); - rw_lock_debug_waiters = FALSE; + os_fast_mutex_init(rw_lock_debug_mutex_key, &rw_lock_debug_mutex); #endif /* UNIV_SYNC_DEBUG */ } @@ -1545,6 +1546,7 @@ sync_close(void) sync_order_checks_on = FALSE; sync_thread_level_arrays_free(); + os_fast_mutex_free(&rw_lock_debug_mutex); #endif /* UNIV_SYNC_DEBUG */ sync_initialized = FALSE; @@ -1559,12 +1561,12 @@ sync_print_wait_info( FILE* file) /*!< in: file where to print */ { fprintf(file, - "Mutex spin waits "UINT64PF", rounds "UINT64PF", " - "OS waits "UINT64PF"\n" - "RW-shared spins "UINT64PF", rounds "UINT64PF", " - "OS waits "UINT64PF"\n" - "RW-excl spins "UINT64PF", rounds "UINT64PF", " - "OS waits "UINT64PF"\n", + "Mutex spin waits " UINT64PF ", rounds " UINT64PF ", " + "OS waits " UINT64PF "\n" + "RW-shared spins " UINT64PF ", rounds " UINT64PF ", " + "OS waits " UINT64PF "\n" + "RW-excl spins " UINT64PF ", rounds " UINT64PF ", " + "OS waits " UINT64PF "\n", (ib_uint64_t) mutex_spin_wait_count, (ib_uint64_t) mutex_spin_round_count, (ib_uint64_t) mutex_os_wait_count, diff --git a/storage/innobase/trx/trx0i_s.cc b/storage/innobase/trx/trx0i_s.cc index f6360562ae7..01ccfb8a6d0 100644 --- a/storage/innobase/trx/trx0i_s.cc +++ b/storage/innobase/trx/trx0i_s.cc @@ -1639,7 +1639,7 @@ trx_i_s_create_lock_id( } else { /* table lock */ res_len = ut_snprintf(lock_id, lock_id_size, - TRX_ID_FMT":"UINT64PF, + TRX_ID_FMT":" UINT64PF, row->lock_trx_id, row->lock_table_id); } diff --git a/storage/innobase/trx/trx0sys.cc b/storage/innobase/trx/trx0sys.cc index 9dc0ef96460..2c31af9442c 100644 --- a/storage/innobase/trx/trx0sys.cc +++ b/storage/innobase/trx/trx0sys.cc @@ -1084,7 +1084,7 @@ trx_sys_print_mysql_binlog_offset_from_page( == TRX_SYS_MYSQL_LOG_MAGIC_N) { fprintf(stderr, - "ibbackup: Last MySQL binlog file position %lu %lu," + "mysqlbackup: Last MySQL binlog file position %lu %lu," " file name %s\n", (ulong) mach_read_from_4( sys_header + TRX_SYS_MYSQL_LOG_INFO @@ -1135,9 +1135,9 @@ trx_sys_read_file_format_id( ut_print_timestamp(stderr); fprintf(stderr, - " ibbackup: Error: trying to read system tablespace " - "file format,\n" - " ibbackup: but could not open the tablespace " + " mysqlbackup: Error: trying to read system " + "tablespace file format,\n" + " mysqlbackup: but could not open the tablespace " "file %s!\n", pathname); return(FALSE); } @@ -1154,9 +1154,9 @@ trx_sys_read_file_format_id( ut_print_timestamp(stderr); fprintf(stderr, - " ibbackup: Error: trying to read system tablespace " - "file format,\n" - " ibbackup: but failed to read the tablespace " + " mysqlbackup: Error: trying to read system " + "tablespace file format,\n" + " mysqlbackup: but failed to read the tablespace " "file %s!\n", pathname); os_file_close(file); @@ -1215,9 +1215,9 @@ trx_sys_read_pertable_file_format_id( ut_print_timestamp(stderr); fprintf(stderr, - " ibbackup: Error: trying to read per-table " + " mysqlbackup: Error: trying to read per-table " "tablespace format,\n" - " ibbackup: but could not open the tablespace " + " mysqlbackup: but could not open the tablespace " "file %s!\n", pathname); return(FALSE); @@ -1234,9 +1234,9 @@ trx_sys_read_pertable_file_format_id( ut_print_timestamp(stderr); fprintf(stderr, - " ibbackup: Error: trying to per-table data file " + " mysqlbackup: Error: trying to per-table data file " "format,\n" - " ibbackup: but failed to read the tablespace " + " mysqlbackup: but failed to read the tablespace " "file %s!\n", pathname); os_file_close(file); diff --git a/storage/innobase/trx/trx0trx.cc b/storage/innobase/trx/trx0trx.cc index 96e843647ea..a8f1a7424fd 100644 --- a/storage/innobase/trx/trx0trx.cc +++ b/storage/innobase/trx/trx0trx.cc @@ -52,6 +52,9 @@ Created 3/26/1996 Heikki Tuuri #include<set> +extern "C" +int thd_deadlock_victim_preference(const MYSQL_THD thd1, const MYSQL_THD thd2); + /** Set of table_id */ typedef std::set<table_id_t> table_id_set; @@ -1835,7 +1838,7 @@ state_ok: if (trx->undo_no != 0) { newline = TRUE; - fprintf(f, ", undo log entries "TRX_ID_FMT, trx->undo_no); + fprintf(f, ", undo log entries " TRX_ID_FMT, trx->undo_no); } if (newline) { @@ -1938,9 +1941,8 @@ trx_assert_started( #endif /* UNIV_DEBUG */ /*******************************************************************//** -Compares the "weight" (or size) of two transactions. Transactions that -have edited non-transactional tables are considered heavier than ones -that have not. +Compares the "weight" (or size) of two transactions. The heavier the weight, +the more reluctant we will be to choose the transaction as a deadlock victim. @return TRUE if weight(a) >= weight(b) */ UNIV_INTERN ibool @@ -1949,26 +1951,19 @@ trx_weight_ge( const trx_t* a, /*!< in: the first transaction to be compared */ const trx_t* b) /*!< in: the second transaction to be compared */ { - ibool a_notrans_edit; - ibool b_notrans_edit; - - /* If mysql_thd is NULL for a transaction we assume that it has - not edited non-transactional tables. */ - - a_notrans_edit = a->mysql_thd != NULL - && thd_has_edited_nontrans_tables(a->mysql_thd); - - b_notrans_edit = b->mysql_thd != NULL - && thd_has_edited_nontrans_tables(b->mysql_thd); - - if (a_notrans_edit != b_notrans_edit) { + int pref; - return(a_notrans_edit); + /* First ask the upper server layer if it has any preference for which + to prefer as a deadlock victim. */ + pref= thd_deadlock_victim_preference(a->mysql_thd, b->mysql_thd); + if (pref < 0) { + return FALSE; + } else if (pref > 0) { + return TRUE; } - /* Either both had edited non-transactional tables or both had - not, we fall back to comparing the number of altered/locked - rows. */ + /* Upper server layer had no preference, we fall back to comparing the + number of altered/locked rows. */ #if 0 fprintf(stderr, @@ -2135,7 +2130,7 @@ trx_recover_for_mysql( ut_print_timestamp(stderr); fprintf(stderr, " InnoDB: Transaction contains changes" - " to "TRX_ID_FMT" rows\n", + " to " TRX_ID_FMT " rows\n", trx->undo_no); count++; diff --git a/storage/maria/ha_maria.cc b/storage/maria/ha_maria.cc index 713a31b2ddf..1538bd2c63b 100644 --- a/storage/maria/ha_maria.cc +++ b/storage/maria/ha_maria.cc @@ -57,7 +57,7 @@ C_MODE_END #endif #define THD_TRN (*(TRN **)thd_ha_data(thd, maria_hton)) -ulong pagecache_division_limit, pagecache_age_threshold; +ulong pagecache_division_limit, pagecache_age_threshold, pagecache_file_hash_size; ulonglong pagecache_buffer_size; const char *zerofill_error_msg= "Table is from another system and must be zerofilled or repaired to be " @@ -248,6 +248,13 @@ static MYSQL_SYSVAR_ULONG(pagecache_division_limit, pagecache_division_limit, "The minimum percentage of warm blocks in key cache", 0, 0, 100, 1, 100, 1); +static MYSQL_SYSVAR_ULONG(pagecache_file_hash_size, pagecache_file_hash_size, + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "Number of hash buckets for open and changed files. If you have a lot of Aria " + "files open you should increase this for faster flush of changes. A good " + "value is probably 1/10 of number of possible open Aria files.", 0,0, + 512, 128, 16384, 1); + static MYSQL_SYSVAR_SET(recover, maria_recover_options, PLUGIN_VAR_OPCMDARG, "Specifies how corrupted tables should be automatically repaired", NULL, NULL, HA_RECOVER_DEFAULT, &maria_recover_typelib); @@ -1230,6 +1237,14 @@ int ha_maria::open(const char *name, int mode, uint test_if_locked) table->key_info[i].block_size= file->s->keyinfo[i].block_length; } my_errno= 0; + + /* Count statistics of usage for newly open normal files */ + if (file->s->reopen == 1 && ! (test_if_locked & HA_OPEN_TMP_TABLE)) + { + if (file->s->delay_key_write) + feature_files_opened_with_delayed_keys++; + } + return my_errno; } @@ -2803,7 +2818,8 @@ int ha_maria::implicit_commit(THD *thd, bool new_trn) TRN *trn; int error; uint locked_tables; - TABLE *table; + DYNAMIC_ARRAY used_tables; + DBUG_ENTER("ha_maria::implicit_commit"); if (!maria_hton || !(trn= THD_TRN)) DBUG_RETURN(0); @@ -2819,7 +2835,38 @@ int ha_maria::implicit_commit(THD *thd, bool new_trn) DBUG_PRINT("info", ("locked_tables, skipping")); DBUG_RETURN(0); } + locked_tables= trnman_has_locked_tables(trn); + + if (new_trn && trn && trn->used_tables) + { + MARIA_USED_TABLES *tables; + /* + Save locked tables so that we can move them to another transaction + We are using a dynamic array as locked_tables in some cases can be + smaller than the used_tables list (for example when the server does + early unlock of tables. + */ + + my_init_dynamic_array2(&used_tables, sizeof(MARIA_SHARE*), (void*) 0, + locked_tables, 8, MYF(MY_THREAD_SPECIFIC)); + for (tables= (MARIA_USED_TABLES*) trn->used_tables; + tables; + tables= tables->next) + { + if (tables->share->base.born_transactional) + { + if (insert_dynamic(&used_tables, (uchar*) &tables->share)) + { + error= HA_ERR_OUT_OF_MEM; + goto end_and_free; + } + } + } + } + else + bzero(&used_tables, sizeof(used_tables)); + error= 0; if (unlikely(ma_commit(trn))) error= 1; @@ -2843,7 +2890,7 @@ int ha_maria::implicit_commit(THD *thd, bool new_trn) if (unlikely(trn == NULL)) { error= HA_ERR_OUT_OF_MEM; - goto end; + goto end_and_free; } /* Move all locked tables to the new transaction @@ -2852,13 +2899,21 @@ int ha_maria::implicit_commit(THD *thd, bool new_trn) when we should call _ma_setup_live_state() and in some cases, like in check table, we use the table without calling start_stmt(). */ - for (table=thd->open_tables; table ; table=table->next) + + uint i; + for (i= 0 ; i < used_tables.elements ; i++) { - if (table->db_stat && table->file->ht == maria_hton) + MARIA_SHARE *share; + LIST *handlers; + + share= *(dynamic_element(&used_tables, i, MARIA_SHARE**)); + /* Find table instances that was used in this transaction */ + for (handlers= share->open_list; handlers; handlers= handlers->next) { - MARIA_HA *handler= ((ha_maria*) table->file)->file; - if (handler->s->base.born_transactional) - { + MARIA_HA *handler= (MARIA_HA*) handlers->data; + if (handler->external_ref && + ((TABLE*) handler->external_ref)->in_use == thd) + { _ma_set_trn_for_table(handler, trn); /* If handler uses versioning */ if (handler->s->lock_key_trees) @@ -2872,6 +2927,8 @@ int ha_maria::implicit_commit(THD *thd, bool new_trn) /* This is just a commit, tables stay locked if they were: */ trnman_reset_locked_tables(trn, locked_tables); +end_and_free: + delete_dynamic(&used_tables); end: DBUG_RETURN(error); } @@ -3503,10 +3560,11 @@ static int ha_maria_init(void *p) mark_recovery_start(log_dir)) || !init_pagecache(maria_pagecache, (size_t) pagecache_buffer_size, pagecache_division_limit, - pagecache_age_threshold, maria_block_size, 0) || + pagecache_age_threshold, maria_block_size, pagecache_file_hash_size, + 0) || !init_pagecache(maria_log_pagecache, TRANSLOG_PAGECACHE_SIZE, 0, 0, - TRANSLOG_PAGE_SIZE, 0) || + TRANSLOG_PAGE_SIZE, 0, 0) || translog_init(maria_data_root, log_file_size, MYSQL_VERSION_ID, server_id, maria_log_pagecache, TRANSLOG_DEFAULT_FLAGS, 0) || @@ -3622,6 +3680,7 @@ struct st_mysql_sys_var* system_variables[]= { MYSQL_SYSVAR(pagecache_age_threshold), MYSQL_SYSVAR(pagecache_buffer_size), MYSQL_SYSVAR(pagecache_division_limit), + MYSQL_SYSVAR(pagecache_file_hash_size), MYSQL_SYSVAR(recover), MYSQL_SYSVAR(repair_threads), MYSQL_SYSVAR(sort_buffer_size), @@ -3853,6 +3912,6 @@ maria_declare_plugin(aria) status_variables, /* status variables */ system_variables, /* system variables */ "1.5", /* string version */ - MariaDB_PLUGIN_MATURITY_GAMMA /* maturity */ + MariaDB_PLUGIN_MATURITY_STABLE /* maturity */ } maria_declare_plugin_end; diff --git a/storage/maria/ma_check.c b/storage/maria/ma_check.c index 4ac267ffb8a..0a89babb205 100644 --- a/storage/maria/ma_check.c +++ b/storage/maria/ma_check.c @@ -5515,6 +5515,8 @@ static int sort_key_write(MARIA_SORT_PARAM *sort_param, const uchar *a) } if ((sort_param->keyinfo->flag & HA_NOSAME) && cmp == 0) { + DBUG_EXECUTE("key", _ma_print_keydata(DBUG_FILE, sort_param->seg, a, + USE_WHOLE_KEY);); sort_info->dupp++; sort_info->info->cur_row.lastpos= get_record_for_key(sort_param->keyinfo, a); diff --git a/storage/maria/ma_close.c b/storage/maria/ma_close.c index dd3a034425a..4532b029126 100644 --- a/storage/maria/ma_close.c +++ b/storage/maria/ma_close.c @@ -80,7 +80,10 @@ int maria_close(register MARIA_HA *info) } flag= !--share->reopen; if (!internal_table) - maria_open_list=list_delete(maria_open_list,&info->open_list); + { + maria_open_list= list_delete(maria_open_list,&info->open_list); + share->open_list= list_delete(share->open_list, &info->share_list); + } my_free(info->rec_buff); (*share->end)(info); @@ -91,6 +94,7 @@ int maria_close(register MARIA_HA *info) /* Check that we don't have any dangling pointers from the transaction */ DBUG_ASSERT(share->in_trans == 0); + DBUG_ASSERT(share->open_list == 0); if (share->kfile.file >= 0) { diff --git a/storage/maria/ma_open.c b/storage/maria/ma_open.c index e06084cef07..1d274d796be 100644 --- a/storage/maria/ma_open.c +++ b/storage/maria/ma_open.c @@ -211,8 +211,9 @@ static MARIA_HA *maria_clone_internal(MARIA_SHARE *share, const char *name, if (!internal_table) { - m_info->open_list.data=(void*) m_info; - maria_open_list=list_add(maria_open_list,&m_info->open_list); + m_info->open_list.data= m_info->share_list.data= (void*) m_info; + maria_open_list= list_add(maria_open_list, &m_info->open_list); + share->open_list= list_add(share->open_list, &m_info->share_list); } else { diff --git a/storage/maria/ma_pagecache.c b/storage/maria/ma_pagecache.c index 8e8ecf945f0..bb085bbdc7a 100644 --- a/storage/maria/ma_pagecache.c +++ b/storage/maria/ma_pagecache.c @@ -502,7 +502,7 @@ static void test_key_cache(PAGECACHE *pagecache, #define PAGECACHE_HASH(p, f, pos) (((ulong) (pos) + \ (ulong) (f).file) & (p->hash_entries-1)) -#define FILE_HASH(f) ((uint) (f).file & (PAGECACHE_CHANGED_BLOCKS_HASH - 1)) +#define FILE_HASH(f,cache) ((uint) (f).file & (cache->changed_blocks_hash_size-1)) #define DEFAULT_PAGECACHE_DEBUG_LOG "pagecache_debug.log" @@ -743,7 +743,8 @@ static inline uint next_power(uint value) ulong init_pagecache(PAGECACHE *pagecache, size_t use_mem, uint division_limit, uint age_threshold, - uint block_size, myf my_readwrite_flags) + uint block_size, uint changed_blocks_hash_size, + myf my_readwrite_flags) { ulong blocks, hash_links, length; int error; @@ -786,6 +787,10 @@ ulong init_pagecache(PAGECACHE *pagecache, size_t use_mem, 2 * sizeof(PAGECACHE_HASH_LINK) + sizeof(PAGECACHE_HASH_LINK*) * 5/4 + block_size)); + /* Changed blocks hash needs to be a power of 2 */ + changed_blocks_hash_size= my_round_up_to_next_power(MY_MAX(changed_blocks_hash_size, + MIN_PAGECACHE_CHANGED_BLOCKS_HASH_SIZE)); + /* We need to support page cache with just one block to be able to do scanning of rows-in-block files @@ -809,10 +814,11 @@ ulong init_pagecache(PAGECACHE *pagecache, size_t use_mem, hash_links= MAX_THREADS + blocks - 1; #endif while ((length= (ALIGN_SIZE(blocks * sizeof(PAGECACHE_BLOCK_LINK)) + - ALIGN_SIZE(hash_links * sizeof(PAGECACHE_HASH_LINK)) + ALIGN_SIZE(sizeof(PAGECACHE_HASH_LINK*) * - pagecache->hash_entries))) + - (blocks << pagecache->shift) > use_mem) + pagecache->hash_entries) + + ALIGN_SIZE(hash_links * sizeof(PAGECACHE_HASH_LINK)) + + sizeof(PAGECACHE_BLOCK_LINK*)* (changed_blocks_hash_size*2))) + + (blocks << pagecache->shift) > use_mem && blocks > 8) blocks--; /* Allocate memory for cache page buffers */ if ((pagecache->block_mem= @@ -823,8 +829,17 @@ ulong init_pagecache(PAGECACHE *pagecache, size_t use_mem, Allocate memory for blocks, hash_links and hash entries; For each block 2 hash links are allocated */ - if ((pagecache->block_root= - (PAGECACHE_BLOCK_LINK*) my_malloc((size_t) length, MYF(0)))) + if (my_multi_malloc(MYF(MY_ZEROFILL), + &pagecache->block_root, blocks * sizeof(PAGECACHE_BLOCK_LINK), + &pagecache->hash_root, + sizeof(PAGECACHE_HASH_LINK*) * pagecache->hash_entries, + &pagecache->hash_link_root, + hash_links * sizeof(PAGECACHE_HASH_LINK), + &pagecache->changed_blocks, + sizeof(PAGECACHE_BLOCK_LINK*) * changed_blocks_hash_size, + &pagecache->file_blocks, + sizeof(PAGECACHE_BLOCK_LINK*) * changed_blocks_hash_size, + NullS)) break; my_large_free(pagecache->block_mem); pagecache->block_mem= 0; @@ -834,19 +849,6 @@ ulong init_pagecache(PAGECACHE *pagecache, size_t use_mem, pagecache->blocks_unused= blocks; pagecache->disk_blocks= (long) blocks; pagecache->hash_links= hash_links; - pagecache->hash_root= - (PAGECACHE_HASH_LINK**) ((char*) pagecache->block_root + - ALIGN_SIZE(blocks*sizeof(PAGECACHE_BLOCK_LINK))); - pagecache->hash_link_root= - (PAGECACHE_HASH_LINK*) ((char*) pagecache->hash_root + - ALIGN_SIZE((sizeof(PAGECACHE_HASH_LINK*) * - pagecache->hash_entries))); - bzero((uchar*) pagecache->block_root, - pagecache->disk_blocks * sizeof(PAGECACHE_BLOCK_LINK)); - bzero((uchar*) pagecache->hash_root, - pagecache->hash_entries * sizeof(PAGECACHE_HASH_LINK*)); - bzero((uchar*) pagecache->hash_link_root, - pagecache->hash_links * sizeof(PAGECACHE_HASH_LINK)); pagecache->hash_links_used= 0; pagecache->free_hash_list= NULL; pagecache->blocks_used= pagecache->blocks_changed= 0; @@ -866,6 +868,7 @@ ulong init_pagecache(PAGECACHE *pagecache, size_t use_mem, pagecache->age_threshold= (age_threshold ? blocks * age_threshold / 100 : blocks); + pagecache->changed_blocks_hash_size= changed_blocks_hash_size; pagecache->cnt_for_resize_op= 0; pagecache->resize_in_flush= 0; @@ -879,12 +882,6 @@ ulong init_pagecache(PAGECACHE *pagecache, size_t use_mem, pagecache->disk_blocks, (long) pagecache->block_root, pagecache->hash_entries, (long) pagecache->hash_root, pagecache->hash_links, (long) pagecache->hash_link_root)); - bzero((uchar*) pagecache->changed_blocks, - sizeof(pagecache->changed_blocks[0]) * - PAGECACHE_CHANGED_BLOCKS_HASH); - bzero((uchar*) pagecache->file_blocks, - sizeof(pagecache->file_blocks[0]) * - PAGECACHE_CHANGED_BLOCKS_HASH); pagecache->blocks= pagecache->disk_blocks > 0 ? pagecache->disk_blocks : 0; DBUG_RETURN((ulong) pagecache->disk_blocks); @@ -980,12 +977,11 @@ static int flush_all_key_blocks(PAGECACHE *pagecache) #if NOT_USED /* keep disabled until code is fixed see above !! */ ulong resize_pagecache(PAGECACHE *pagecache, size_t use_mem, uint division_limit, - uint age_threshold) + uint age_threshold, uint changed_blocks_hash_size) { ulong blocks; struct st_my_thread_var *thread; WQUEUE *wqueue; - DBUG_ENTER("resize_pagecache"); if (!pagecache->inited) @@ -1028,7 +1024,7 @@ ulong resize_pagecache(PAGECACHE *pagecache, end_pagecache(pagecache, 0); /* Don't free mutex */ /* The following will work even if use_mem is 0 */ blocks= init_pagecache(pagecache, pagecache->block_size, use_mem, - division_limit, age_threshold, + division_limit, age_threshold, changed_blocks_hash_size, pagecache->readwrite_flags); finish: @@ -1237,7 +1233,7 @@ static void link_to_file_list(PAGECACHE *pagecache, { if (unlink_flag) unlink_changed(block); - link_changed(block, &pagecache->file_blocks[FILE_HASH(*file)]); + link_changed(block, &pagecache->file_blocks[FILE_HASH(*file, pagecache)]); if (block->status & PCBLOCK_CHANGED) { block->status&= ~(PCBLOCK_CHANGED | PCBLOCK_DEL_WRITE); @@ -1258,7 +1254,7 @@ static inline void link_to_changed_list(PAGECACHE *pagecache, { unlink_changed(block); link_changed(block, - &pagecache->changed_blocks[FILE_HASH(block->hash_link->file)]); + &pagecache->changed_blocks[FILE_HASH(block->hash_link->file, pagecache)]); block->status|=PCBLOCK_CHANGED; pagecache->blocks_changed++; pagecache->global_blocks_changed++; @@ -4578,7 +4574,7 @@ static int flush_pagecache_blocks_int(PAGECACHE *pagecache, Count how many key blocks we have to cache to be able to flush all dirty pages with minimum seek moves. */ - for (block= pagecache->changed_blocks[FILE_HASH(*file)] ; + for (block= pagecache->changed_blocks[FILE_HASH(*file, pagecache)] ; block; block= block->next_changed) { @@ -4603,7 +4599,7 @@ static int flush_pagecache_blocks_int(PAGECACHE *pagecache, /* Retrieve the blocks and write them to a buffer to be flushed */ restart: end= (pos= cache)+count; - for (block= pagecache->changed_blocks[FILE_HASH(*file)] ; + for (block= pagecache->changed_blocks[FILE_HASH(*file, pagecache)] ; block; block= next) { @@ -4729,7 +4725,7 @@ restart: #if defined(PAGECACHE_DEBUG) cnt=0; #endif - for (block= pagecache->file_blocks[FILE_HASH(*file)] ; + for (block= pagecache->file_blocks[FILE_HASH(*file, pagecache)] ; block; block= next) { @@ -4918,7 +4914,7 @@ my_bool pagecache_collect_changed_blocks_with_lsn(PAGECACHE *pagecache, } /* Count how many dirty pages are interesting */ - for (file_hash= 0; file_hash < PAGECACHE_CHANGED_BLOCKS_HASH; file_hash++) + for (file_hash= 0; file_hash < pagecache->changed_blocks_hash_size; file_hash++) { PAGECACHE_BLOCK_LINK *block; for (block= pagecache->changed_blocks[file_hash] ; @@ -4957,7 +4953,7 @@ my_bool pagecache_collect_changed_blocks_with_lsn(PAGECACHE *pagecache, DBUG_PRINT("info", ("found %lu dirty pages", stored_list_size)); if (stored_list_size == 0) goto end; - for (file_hash= 0; file_hash < PAGECACHE_CHANGED_BLOCKS_HASH; file_hash++) + for (file_hash= 0; file_hash < pagecache->changed_blocks_hash_size; file_hash++) { PAGECACHE_BLOCK_LINK *block; for (block= pagecache->changed_blocks[file_hash] ; @@ -5008,7 +5004,7 @@ void pagecache_file_no_dirty_page(PAGECACHE *pagecache, PAGECACHE_FILE *file) { File fd= file->file; PAGECACHE_BLOCK_LINK *block; - for (block= pagecache->changed_blocks[FILE_HASH(*file)]; + for (block= pagecache->changed_blocks[FILE_HASH(*file, pagecache)]; block != NULL; block= block->next_changed) if (block->hash_link->file.file == fd) diff --git a/storage/maria/ma_pagecache.h b/storage/maria/ma_pagecache.h index 8460eaddc57..f7ddb2fe716 100644 --- a/storage/maria/ma_pagecache.h +++ b/storage/maria/ma_pagecache.h @@ -104,7 +104,9 @@ typedef struct st_pagecache_hash_link PAGECACHE_HASH_LINK; #include <wqueue.h> -#define PAGECACHE_CHANGED_BLOCKS_HASH 128 /* must be power of 2 */ +/* Default size of hash for changed files */ +#define MIN_PAGECACHE_CHANGED_BLOCKS_HASH_SIZE 512 + #define PAGECACHE_PRIORITY_LOW 0 #define PAGECACHE_PRIORITY_DEFAULT 3 #define PAGECACHE_PRIORITY_HIGH 6 @@ -121,6 +123,7 @@ typedef struct st_pagecache ulong age_threshold; /* age threshold for hot blocks */ ulonglong time; /* total number of block link operations */ ulong hash_entries; /* max number of entries in the hash table */ + ulong changed_blocks_hash_size; /* Number of hash buckets for file blocks */ long hash_links; /* max number of hash links */ long hash_links_used; /* number of hash links taken from free links pool */ long disk_blocks; /* max number of blocks in the cache */ @@ -145,9 +148,9 @@ typedef struct st_pagecache WQUEUE waiting_for_hash_link;/* waiting for a free hash link */ WQUEUE waiting_for_block; /* requests waiting for a free block */ /* hash for dirty file bl.*/ - PAGECACHE_BLOCK_LINK *changed_blocks[PAGECACHE_CHANGED_BLOCKS_HASH]; + PAGECACHE_BLOCK_LINK **changed_blocks; /* hash for other file bl.*/ - PAGECACHE_BLOCK_LINK *file_blocks[PAGECACHE_CHANGED_BLOCKS_HASH]; + PAGECACHE_BLOCK_LINK **file_blocks; /* The following variables are and variables used to hold parameters for @@ -195,10 +198,11 @@ extern PAGECACHE dflt_pagecache_var, *dflt_pagecache; extern ulong init_pagecache(PAGECACHE *pagecache, size_t use_mem, uint division_limit, uint age_threshold, - uint block_size, myf my_read_flags); + uint block_size, uint changed_blocks_hash_size, + myf my_read_flags); extern ulong resize_pagecache(PAGECACHE *pagecache, size_t use_mem, uint division_limit, - uint age_threshold); + uint age_threshold, uint changed_blocks_hash_size); extern void change_pagecache_param(PAGECACHE *pagecache, uint division_limit, uint age_threshold); diff --git a/storage/maria/ma_rt_test.c b/storage/maria/ma_rt_test.c index 29244bab6ce..9d8574212ca 100644 --- a/storage/maria/ma_rt_test.c +++ b/storage/maria/ma_rt_test.c @@ -100,11 +100,11 @@ int main(int argc, char *argv[]) /* Maria requires that we always have a page cache */ if (maria_init() || (init_pagecache(maria_pagecache, maria_block_size * 16, 0, 0, - maria_block_size, MY_WME) == 0) || + maria_block_size, 0, MY_WME) == 0) || ma_control_file_open(TRUE, TRUE) || (init_pagecache(maria_log_pagecache, TRANSLOG_PAGECACHE_SIZE, 0, 0, - TRANSLOG_PAGE_SIZE, MY_WME) == 0) || + TRANSLOG_PAGE_SIZE, 0, MY_WME) == 0) || translog_init(maria_data_root, TRANSLOG_FILE_SIZE, 0, 0, maria_log_pagecache, TRANSLOG_DEFAULT_FLAGS, 0) || diff --git a/storage/maria/ma_state.c b/storage/maria/ma_state.c index f130da21d07..0c673ded04e 100644 --- a/storage/maria/ma_state.c +++ b/storage/maria/ma_state.c @@ -240,6 +240,7 @@ void _ma_reset_state(MARIA_HA *info) MARIA_STATE_HISTORY *history= share->state_history; DBUG_ENTER("_ma_reset_state"); + /* Always true if share->now_transactional is set */ if (history) { MARIA_STATE_HISTORY *next; @@ -769,7 +770,7 @@ void _ma_copy_nontrans_state_information(MARIA_HA *info) /** Reset history - This is only called during repair when we the only one using the table. + This is only called during repair when we are the only one using the table. */ void _ma_reset_history(MARIA_SHARE *share) diff --git a/storage/maria/ma_test1.c b/storage/maria/ma_test1.c index 595b87ef4d0..901a7ef06e3 100644 --- a/storage/maria/ma_test1.c +++ b/storage/maria/ma_test1.c @@ -79,11 +79,11 @@ int main(int argc,char *argv[]) /* Maria requires that we always have a page cache */ if (maria_init() || (init_pagecache(maria_pagecache, maria_block_size * 16, 0, 0, - maria_block_size, MY_WME) == 0) || + maria_block_size, 0, MY_WME) == 0) || ma_control_file_open(TRUE, TRUE) || (init_pagecache(maria_log_pagecache, TRANSLOG_PAGECACHE_SIZE, 0, 0, - TRANSLOG_PAGE_SIZE, MY_WME) == 0) || + TRANSLOG_PAGE_SIZE, 0, MY_WME) == 0) || translog_init(maria_data_root, TRANSLOG_FILE_SIZE, 0, 0, maria_log_pagecache, TRANSLOG_DEFAULT_FLAGS, 0) || diff --git a/storage/maria/ma_test2.c b/storage/maria/ma_test2.c index 52c0839cff6..709a190c1a7 100644 --- a/storage/maria/ma_test2.c +++ b/storage/maria/ma_test2.c @@ -91,11 +91,11 @@ int main(int argc, char *argv[]) /* Maria requires that we always have a page cache */ if (maria_init() || (init_pagecache(maria_pagecache, pagecache_size, 0, 0, - maria_block_size, MY_WME) == 0) || + maria_block_size, 0, MY_WME) == 0) || ma_control_file_open(TRUE, TRUE) || (init_pagecache(maria_log_pagecache, TRANSLOG_PAGECACHE_SIZE, 0, 0, - TRANSLOG_PAGE_SIZE, MY_WME) == 0) || + TRANSLOG_PAGE_SIZE, 0, MY_WME) == 0) || translog_init(maria_data_root, TRANSLOG_FILE_SIZE, 0, 0, maria_log_pagecache, TRANSLOG_DEFAULT_FLAGS, 0) || diff --git a/storage/maria/ma_test3.c b/storage/maria/ma_test3.c index 64b22e45c1b..5d57bef8f9e 100644 --- a/storage/maria/ma_test3.c +++ b/storage/maria/ma_test3.c @@ -178,7 +178,7 @@ void start_test(int id) exit(1); } if (pagecacheing && rnd(2) == 0) - init_pagecache(maria_pagecache, 65536L, 0, 0, MARIA_KEY_BLOCK_LENGTH, + init_pagecache(maria_pagecache, 65536L, 0, 0, MARIA_KEY_BLOCK_LENGTH, 0, MY_WME); printf("Process %d, pid: %ld\n",id,(long) getpid()); fflush(stdout); diff --git a/storage/maria/maria_chk.c b/storage/maria/maria_chk.c index 9235d5ee96a..2ea647ea1f5 100644 --- a/storage/maria/maria_chk.c +++ b/storage/maria/maria_chk.c @@ -140,7 +140,7 @@ int main(int argc, char **argv) { if (init_pagecache(maria_log_pagecache, TRANSLOG_PAGECACHE_SIZE, 0, 0, - TRANSLOG_PAGE_SIZE, MY_WME) == 0 || + TRANSLOG_PAGE_SIZE, 0, MY_WME) == 0 || translog_init(opt_log_dir, TRANSLOG_FILE_SIZE, 0, 0, maria_log_pagecache, TRANSLOG_DEFAULT_FLAGS, 0)) @@ -1178,7 +1178,7 @@ static int maria_chk(HA_CHECK *param, char *filename) maria_lock_database(info, F_EXTRA_LCK); datafile= info->dfile.file; if (init_pagecache(maria_pagecache, (size_t) param->use_buffers, 0, 0, - maria_block_size, MY_WME) == 0) + maria_block_size, 0, MY_WME) == 0) { _ma_check_print_error(param, "Can't initialize page cache with %lu memory", (ulong) param->use_buffers); diff --git a/storage/maria/maria_def.h b/storage/maria/maria_def.h index ab4ade30c44..b878aaa0f7d 100644 --- a/storage/maria/maria_def.h +++ b/storage/maria/maria_def.h @@ -364,6 +364,7 @@ typedef struct st_maria_share LEX_STRING index_file_name; LEX_STRING open_file_name; /* parameter to open filename */ uchar *file_map; /* mem-map of file if possible */ + LIST *open_list; /* Tables open with this share */ PAGECACHE *pagecache; /* ref to the current key cache */ MARIA_DECODE_TREE *decode_trees; /* @@ -629,6 +630,7 @@ struct st_maria_handler PAGECACHE_FILE dfile; /* The datafile */ IO_CACHE rec_cache; /* When cacheing records */ LIST open_list; + LIST share_list; MY_BITMAP changed_fields; ulong row_base_length; /* Length of row header */ uint row_flag; /* Flag to store in row header */ diff --git a/storage/maria/maria_ftdump.c b/storage/maria/maria_ftdump.c index 68e13a8ddc4..4e34678c8f8 100644 --- a/storage/maria/maria_ftdump.c +++ b/storage/maria/maria_ftdump.c @@ -85,7 +85,7 @@ int main(int argc,char *argv[]) } init_pagecache(maria_pagecache, PAGE_BUFFER_INIT, 0, 0, - MARIA_KEY_BLOCK_LENGTH, MY_WME); + MARIA_KEY_BLOCK_LENGTH, 0, MY_WME); if (!(info=maria_open(argv[0], O_RDONLY, HA_OPEN_ABORT_IF_LOCKED|HA_OPEN_FROM_SQL_LAYER))) diff --git a/storage/maria/maria_pack.c b/storage/maria/maria_pack.c index 26d57ade59a..7eca9e14e93 100644 --- a/storage/maria/maria_pack.c +++ b/storage/maria/maria_pack.c @@ -511,7 +511,7 @@ static int compress(PACK_MRG_INFO *mrg,char *result_table) fn_format(org_name,isam_file->s->open_file_name.str, "",MARIA_NAME_DEXT, 2+4+16); if (init_pagecache(maria_pagecache, MARIA_MIN_PAGE_CACHE_SIZE, 0, 0, - maria_block_size, MY_WME) == 0) + maria_block_size, 0, MY_WME) == 0) { fprintf(stderr, "Can't initialize page cache\n"); goto err; diff --git a/storage/maria/maria_read_log.c b/storage/maria/maria_read_log.c index f5b91f9628f..8fa6533bc46 100644 --- a/storage/maria/maria_read_log.c +++ b/storage/maria/maria_read_log.c @@ -70,7 +70,7 @@ int main(int argc, char **argv) goto err; } if (init_pagecache(maria_pagecache, opt_page_buffer_size, 0, 0, - maria_block_size, MY_WME) == 0) + maria_block_size, 0, MY_WME) == 0) { fprintf(stderr, "Got error in init_pagecache() (errno: %d)\n", errno); goto err; @@ -82,7 +82,7 @@ int main(int argc, char **argv) which is useless. TODO: start log handler in read-only mode. */ if (init_pagecache(maria_log_pagecache, opt_translog_buffer_size, - 0, 0, TRANSLOG_PAGE_SIZE, MY_WME) == 0 || + 0, 0, TRANSLOG_PAGE_SIZE, 0, MY_WME) == 0 || translog_init(maria_data_root, TRANSLOG_FILE_SIZE, 0, 0, maria_log_pagecache, TRANSLOG_DEFAULT_FLAGS, opt_display_only)) diff --git a/storage/maria/unittest/ma_pagecache_consist.c b/storage/maria/unittest/ma_pagecache_consist.c index 6a25a47591c..5f0e25b5bf4 100644 --- a/storage/maria/unittest/ma_pagecache_consist.c +++ b/storage/maria/unittest/ma_pagecache_consist.c @@ -431,7 +431,7 @@ int main(int argc __attribute__((unused)), #endif if ((pagen= init_pagecache(&pagecache, PCACHE_SIZE, 0, 0, - TEST_PAGE_SIZE, 0)) == 0) + TEST_PAGE_SIZE, 0, 0)) == 0) { diag("Got error: init_pagecache() (errno: %d)\n", errno); diff --git a/storage/maria/unittest/ma_pagecache_rwconsist.c b/storage/maria/unittest/ma_pagecache_rwconsist.c index 7afdbfd0ac1..1a268db6ad5 100644 --- a/storage/maria/unittest/ma_pagecache_rwconsist.c +++ b/storage/maria/unittest/ma_pagecache_rwconsist.c @@ -301,7 +301,7 @@ int main(int argc __attribute__((unused)), #endif if ((pagen= init_pagecache(&pagecache, PCACHE_SIZE, 0, 0, - TEST_PAGE_SIZE, 0)) == 0) + TEST_PAGE_SIZE, 0, 0)) == 0) { diag("Got error: init_pagecache() (errno: %d)\n", errno); diff --git a/storage/maria/unittest/ma_pagecache_rwconsist2.c b/storage/maria/unittest/ma_pagecache_rwconsist2.c index 917fddd0bcf..751c045a879 100644 --- a/storage/maria/unittest/ma_pagecache_rwconsist2.c +++ b/storage/maria/unittest/ma_pagecache_rwconsist2.c @@ -297,7 +297,7 @@ int main(int argc __attribute__((unused)), #endif if ((pagen= init_pagecache(&pagecache, PCACHE_SIZE, 0, 0, - TEST_PAGE_SIZE, 0)) == 0) + TEST_PAGE_SIZE, 0, 0)) == 0) { diag("Got error: init_pagecache() (errno: %d)\n", errno); diff --git a/storage/maria/unittest/ma_pagecache_single.c b/storage/maria/unittest/ma_pagecache_single.c index 0031582589e..64f6782f20f 100644 --- a/storage/maria/unittest/ma_pagecache_single.c +++ b/storage/maria/unittest/ma_pagecache_single.c @@ -828,7 +828,7 @@ int main(int argc __attribute__((unused)), #endif if ((pagen= init_pagecache(&pagecache, PCACHE_SIZE, 0, 0, - TEST_PAGE_SIZE, MYF(MY_WME))) == 0) + TEST_PAGE_SIZE, 0, MYF(MY_WME))) == 0) { fprintf(stderr,"Got error: init_pagecache() (errno: %d)\n", errno); diff --git a/storage/maria/unittest/ma_test_loghandler-t.c b/storage/maria/unittest/ma_test_loghandler-t.c index abf2078ce8f..18650fa400d 100644 --- a/storage/maria/unittest/ma_test_loghandler-t.c +++ b/storage/maria/unittest/ma_test_loghandler-t.c @@ -147,7 +147,6 @@ int main(int argc __attribute__((unused)), char *argv[]) { uint32 i; uint32 rec_len; - uint pagen; uchar long_tr_id[6]; uchar lsn_buff[23]= { @@ -203,8 +202,8 @@ int main(int argc __attribute__((unused)), char *argv[]) fprintf(stderr, "Can't init control file (%d)\n", errno); exit(1); } - if ((pagen= init_pagecache(&pagecache, PCACHE_SIZE, 0, 0, - TRANSLOG_PAGE_SIZE, 0)) == 0) + if (init_pagecache(&pagecache, PCACHE_SIZE, 0, 0, + TRANSLOG_PAGE_SIZE, 0, 0) == 0) { fprintf(stderr, "Got error: init_pagecache() (errno: %d)\n", errno); exit(1); diff --git a/storage/maria/unittest/ma_test_loghandler_first_lsn-t.c b/storage/maria/unittest/ma_test_loghandler_first_lsn-t.c index 9ebd56c754c..cf86b59da45 100644 --- a/storage/maria/unittest/ma_test_loghandler_first_lsn-t.c +++ b/storage/maria/unittest/ma_test_loghandler_first_lsn-t.c @@ -35,7 +35,6 @@ static const char *default_dbug_option; int main(int argc __attribute__((unused)), char *argv[]) { - uint pagen; uchar long_tr_id[6]; PAGECACHE pagecache; LSN lsn, first_lsn, theor_lsn; @@ -72,8 +71,8 @@ int main(int argc __attribute__((unused)), char *argv[]) fprintf(stderr, "Can't init control file (%d)\n", errno); exit(1); } - if ((pagen= init_pagecache(&pagecache, PCACHE_SIZE, 0, 0, - PCACHE_PAGE, 0)) == 0) + if (init_pagecache(&pagecache, PCACHE_SIZE, 0, 0, + PCACHE_PAGE, 0, 0) == 0) { fprintf(stderr, "Got error: init_pagecache() (errno: %d)\n", errno); exit(1); diff --git a/storage/maria/unittest/ma_test_loghandler_max_lsn-t.c b/storage/maria/unittest/ma_test_loghandler_max_lsn-t.c index 4ae9def8598..855135451c3 100644 --- a/storage/maria/unittest/ma_test_loghandler_max_lsn-t.c +++ b/storage/maria/unittest/ma_test_loghandler_max_lsn-t.c @@ -36,7 +36,6 @@ static const char *default_dbug_option; int main(int argc __attribute__((unused)), char *argv[]) { ulong i; - uint pagen; uchar long_tr_id[6]; PAGECACHE pagecache; LSN lsn, max_lsn, last_lsn= LSN_IMPOSSIBLE; @@ -70,8 +69,8 @@ int main(int argc __attribute__((unused)), char *argv[]) fprintf(stderr, "Can't init control file (%d)\n", errno); exit(1); } - if ((pagen= init_pagecache(&pagecache, PCACHE_SIZE, 0, 0, - PCACHE_PAGE, 0)) == 0) + if (init_pagecache(&pagecache, PCACHE_SIZE, 0, 0, + PCACHE_PAGE, 0, 0) == 0) { fprintf(stderr, "Got error: init_pagecache() (errno: %d)\n", errno); exit(1); diff --git a/storage/maria/unittest/ma_test_loghandler_multigroup-t.c b/storage/maria/unittest/ma_test_loghandler_multigroup-t.c index c8e63cb26ab..63d1f1c6977 100644 --- a/storage/maria/unittest/ma_test_loghandler_multigroup-t.c +++ b/storage/maria/unittest/ma_test_loghandler_multigroup-t.c @@ -226,7 +226,6 @@ int main(int argc __attribute__((unused)), char *argv[]) { uint32 i; uint32 rec_len; - uint pagen; uchar long_tr_id[6]; uchar lsn_buff[23]= { @@ -284,8 +283,8 @@ int main(int argc __attribute__((unused)), char *argv[]) fprintf(stderr, "Can't init control file (%d)\n", errno); exit(1); } - if ((pagen= init_pagecache(&pagecache, PCACHE_SIZE, 0, 0, - TRANSLOG_PAGE_SIZE, 0)) == 0) + if (init_pagecache(&pagecache, PCACHE_SIZE, 0, 0, + TRANSLOG_PAGE_SIZE, 0, 0) == 0) { fprintf(stderr, "Got error: init_pagecache() (errno: %d)\n", errno); exit(1); @@ -447,8 +446,8 @@ int main(int argc __attribute__((unused)), char *argv[]) fprintf(stderr, "pass2: Can't init control file (%d)\n", errno); exit(1); } - if ((pagen= init_pagecache(&pagecache, PCACHE_SIZE, 0, 0, - TRANSLOG_PAGE_SIZE, 0)) == 0) + if (init_pagecache(&pagecache, PCACHE_SIZE, 0, 0, + TRANSLOG_PAGE_SIZE, 0, 0) == 0) { fprintf(stderr, "pass2: Got error: init_pagecache() (errno: %d)\n", errno); exit(1); diff --git a/storage/maria/unittest/ma_test_loghandler_multithread-t.c b/storage/maria/unittest/ma_test_loghandler_multithread-t.c index 18fbaeace5a..535f363048b 100644 --- a/storage/maria/unittest/ma_test_loghandler_multithread-t.c +++ b/storage/maria/unittest/ma_test_loghandler_multithread-t.c @@ -261,7 +261,6 @@ int main(int argc __attribute__((unused)), char **argv __attribute__ ((unused))) { uint32 i; - uint pagen; PAGECACHE pagecache; LSN first_lsn; TRANSLOG_HEADER_BUFFER rec; @@ -341,8 +340,8 @@ int main(int argc __attribute__((unused)), fprintf(stderr, "Can't init control file (%d)\n", errno); exit(1); } - if ((pagen= init_pagecache(&pagecache, PCACHE_SIZE, 0, 0, - TRANSLOG_PAGE_SIZE, 0)) == 0) + if (init_pagecache(&pagecache, PCACHE_SIZE, 0, 0, + TRANSLOG_PAGE_SIZE, 0, 0) == 0) { fprintf(stderr, "Got error: init_pagecache() (errno: %d)\n", errno); exit(1); diff --git a/storage/maria/unittest/ma_test_loghandler_noflush-t.c b/storage/maria/unittest/ma_test_loghandler_noflush-t.c index c8c0f7d1873..8d0af947574 100644 --- a/storage/maria/unittest/ma_test_loghandler_noflush-t.c +++ b/storage/maria/unittest/ma_test_loghandler_noflush-t.c @@ -34,7 +34,6 @@ static const char *default_dbug_option; int main(int argc __attribute__((unused)), char *argv[]) { - uint pagen; int rc= 1; uchar long_tr_id[6]; PAGECACHE pagecache; @@ -71,8 +70,8 @@ int main(int argc __attribute__((unused)), char *argv[]) fprintf(stderr, "Can't init control file (%d)\n", errno); exit(1); } - if ((pagen= init_pagecache(&pagecache, PCACHE_SIZE, 0, 0, - PCACHE_PAGE, 0)) == 0) + if (init_pagecache(&pagecache, PCACHE_SIZE, 0, 0, + PCACHE_PAGE, 0, 0) == 0) { fprintf(stderr, "Got error: init_pagecache() (errno: %d)\n", errno); exit(1); diff --git a/storage/maria/unittest/ma_test_loghandler_nologs-t.c b/storage/maria/unittest/ma_test_loghandler_nologs-t.c index 24c93e428e1..5d6db7355c2 100644 --- a/storage/maria/unittest/ma_test_loghandler_nologs-t.c +++ b/storage/maria/unittest/ma_test_loghandler_nologs-t.c @@ -36,7 +36,6 @@ static const char *default_dbug_option; int main(int argc __attribute__((unused)), char *argv[]) { ulong i; - uint pagen; uchar long_tr_id[6]; PAGECACHE pagecache; LSN lsn; @@ -72,8 +71,8 @@ int main(int argc __attribute__((unused)), char *argv[]) fprintf(stderr, "Can't init control file (%d)\n", errno); exit(1); } - if ((pagen= init_pagecache(&pagecache, PCACHE_SIZE, 0, 0, - PCACHE_PAGE, 0)) == 0) + if (init_pagecache(&pagecache, PCACHE_SIZE, 0, 0, + PCACHE_PAGE, 0, 0) == 0) { fprintf(stderr, "Got error: init_pagecache() (errno: %d)\n", errno); exit(1); @@ -145,8 +144,8 @@ int main(int argc __attribute__((unused)), char *argv[]) fprintf(stderr, "Can't init control file (%d)\n", errno); exit(1); } - if ((pagen= init_pagecache(&pagecache, PCACHE_SIZE, 0, 0, - PCACHE_PAGE, 0)) == 0) + if (init_pagecache(&pagecache, PCACHE_SIZE, 0, 0, + PCACHE_PAGE, 0, 0) == 0) { fprintf(stderr, "Got error: init_pagecache() (errno: %d)\n", errno); exit(1); diff --git a/storage/maria/unittest/ma_test_loghandler_pagecache-t.c b/storage/maria/unittest/ma_test_loghandler_pagecache-t.c index a939cef71a7..e634506628a 100644 --- a/storage/maria/unittest/ma_test_loghandler_pagecache-t.c +++ b/storage/maria/unittest/ma_test_loghandler_pagecache-t.c @@ -64,7 +64,6 @@ dummy_fail_callback(uchar* data_ptr __attribute__((unused))) int main(int argc __attribute__((unused)), char *argv[]) { - uint pagen; uchar long_tr_id[6]; PAGECACHE pagecache; LSN lsn; @@ -99,8 +98,8 @@ int main(int argc __attribute__((unused)), char *argv[]) fprintf(stderr, "Can't init control file (%d)\n", errno); exit(1); } - if ((pagen= init_pagecache(&pagecache, PCACHE_SIZE, 0, 0, - PCACHE_PAGE, 0)) == 0) + if (init_pagecache(&pagecache, PCACHE_SIZE, 0, 0, + PCACHE_PAGE, 0, 0) == 0) { fprintf(stderr, "Got error: init_pagecache() (errno: %d)\n", errno); exit(1); diff --git a/storage/maria/unittest/ma_test_loghandler_purge-t.c b/storage/maria/unittest/ma_test_loghandler_purge-t.c index 6ae0e7830ae..a13645f1bb8 100644 --- a/storage/maria/unittest/ma_test_loghandler_purge-t.c +++ b/storage/maria/unittest/ma_test_loghandler_purge-t.c @@ -73,7 +73,7 @@ int main(int argc __attribute__((unused)), char *argv[]) exit(1); } if ((pagen= init_pagecache(&pagecache, PCACHE_SIZE, 0, 0, - PCACHE_PAGE, 0)) == 0) + PCACHE_PAGE, 0, 0)) == 0) { fprintf(stderr, "Got error: init_pagecache() (errno: %d)\n", errno); exit(1); diff --git a/storage/myisam/ft_parser.c b/storage/myisam/ft_parser.c index 0e89d7d1b3a..5612b4bec14 100644 --- a/storage/myisam/ft_parser.c +++ b/storage/myisam/ft_parser.c @@ -329,7 +329,7 @@ MYSQL_FTPARSER_PARAM* ftparser_alloc_param(MI_INFO *info) { if (!info->ftparser_param) { - /* + /* . info->ftparser_param can not be zero after the initialization, because it always includes built-in fulltext parser. And built-in parser can be called even if the table has no fulltext indexes and diff --git a/storage/myisam/ha_myisam.cc b/storage/myisam/ha_myisam.cc index d1be84d3792..439aa85b09d 100644 --- a/storage/myisam/ha_myisam.cc +++ b/storage/myisam/ha_myisam.cc @@ -822,7 +822,15 @@ int ha_myisam::open(const char *name, int mode, uint test_if_locked) table->key_info[i].block_size= file->s->keyinfo[i].block_length; } my_errno= 0; + + /* Count statistics of usage for newly open normal files */ + if (file->s->reopen == 1 && ! (test_if_locked & HA_OPEN_TMP_TABLE)) + { + if (file->s->delay_key_write) + feature_files_opened_with_delayed_keys++; + } goto end; + err: this->close(); end: @@ -1079,7 +1087,6 @@ int ha_myisam::repair(THD *thd, HA_CHECK ¶m, bool do_optimize) param.db_name= table->s->db.str; param.table_name= table->alias.c_ptr(); - param.tmpfile_createflag= O_RDWR | O_TRUNC | O_EXCL; param.using_global_keycache = 1; param.thd= thd; param.tmpdir= &mysql_tmpdir_list; diff --git a/storage/myisam/mi_check.c b/storage/myisam/mi_check.c index 3a2bdb2e899..b79d6c891f1 100644 --- a/storage/myisam/mi_check.c +++ b/storage/myisam/mi_check.c @@ -1536,7 +1536,7 @@ int mi_repair(HA_CHECK *param, register MI_INFO *info, if (!param->using_global_keycache) (void) init_key_cache(dflt_key_cache, param->key_cache_block_size, - (size_t) param->use_buffers, 0, 0, 0); + (size_t) param->use_buffers, 0, 0, 0, 0); if (init_io_cache(¶m->read_cache,info->dfile, (uint) param->read_buffer_length, diff --git a/storage/myisam/mi_test1.c b/storage/myisam/mi_test1.c index 87cea2e5566..d90a8549e1e 100644 --- a/storage/myisam/mi_test1.c +++ b/storage/myisam/mi_test1.c @@ -51,7 +51,7 @@ int main(int argc,char *argv[]) my_init(); if (key_cacheing) init_key_cache(dflt_key_cache,KEY_CACHE_BLOCK_SIZE,IO_SIZE*16,0,0, - DEFAULT_KEY_CACHE_PARTITIONS); + 0, DEFAULT_KEY_CACHE_PARTITIONS); get_options(argc,argv); exit(run_test("test1")); diff --git a/storage/myisam/mi_test2.c b/storage/myisam/mi_test2.c index e53c68874b2..be58b3c54d0 100644 --- a/storage/myisam/mi_test2.c +++ b/storage/myisam/mi_test2.c @@ -217,7 +217,7 @@ int main(int argc, char *argv[]) printf("- Writing key:s\n"); if (key_cacheing) init_key_cache(dflt_key_cache,key_cache_block_size,key_cache_size,0,0, - DEFAULT_KEY_CACHE_PARTITIONS); + 0, DEFAULT_KEY_CACHE_PARTITIONS); if (do_locking) mi_lock_database(file,F_WRLCK); if (write_cacheing) @@ -278,8 +278,9 @@ int main(int argc, char *argv[]) } } if (key_cacheing) - resize_key_cache(dflt_key_cache,key_cache_block_size,key_cache_size*2,0,0); - + resize_key_cache(dflt_key_cache,key_cache_block_size,key_cache_size*2, + 0, 0, 0); + if (!silent) printf("- Delete\n"); for (i=0 ; i<recant/10 ; i++) diff --git a/storage/myisam/mi_test3.c b/storage/myisam/mi_test3.c index 885118d4eec..e05398f7c4a 100644 --- a/storage/myisam/mi_test3.c +++ b/storage/myisam/mi_test3.c @@ -178,7 +178,7 @@ void start_test(int id) } if (key_cacheing && rnd(2) == 0) init_key_cache(dflt_key_cache, KEY_CACHE_BLOCK_SIZE, 65536L, 0, 0, - DEFAULT_KEY_CACHE_PARTITIONS); + 0, DEFAULT_KEY_CACHE_PARTITIONS); printf("Process %d, pid: %ld\n", id, (long) getpid()); fflush(stdout); diff --git a/storage/myisam/mi_test_all.sh b/storage/myisam/mi_test_all.sh index 12c28d7d132..e6327fd8247 100755 --- a/storage/myisam/mi_test_all.sh +++ b/storage/myisam/mi_test_all.sh @@ -156,9 +156,9 @@ echo "mi_test2$suffix $silent -L -K -R1 -m2000 ; Should give error 135" ./myisamchk$suffix -sm test2 ./mi_test2$suffix $silent -L -K -W -P -m50 -l -./myisamlog$suffix +./myisamlog$suffix -P ./mi_test2$suffix $silent -L -K -W -P -m50 -l -b100 -./myisamlog$suffix +./myisamlog$suffix -P time ./mi_test2$suffix $silent time ./mi_test2$suffix $silent -K -B time ./mi_test2$suffix $silent -L -B diff --git a/storage/myisam/myisam_ftdump.c b/storage/myisam/myisam_ftdump.c index e1ea9f2de37..55ee3795f9b 100644 --- a/storage/myisam/myisam_ftdump.c +++ b/storage/myisam/myisam_ftdump.c @@ -84,7 +84,7 @@ int main(int argc,char *argv[]) usage(); } - init_key_cache(dflt_key_cache, MI_KEY_BLOCK_LENGTH, KEY_BUFFER_INIT, 0, 0, 0); + init_key_cache(dflt_key_cache, MI_KEY_BLOCK_LENGTH, KEY_BUFFER_INIT, 0, 0, 0, 0); if (!(info=mi_open(argv[0], O_RDONLY, HA_OPEN_ABORT_IF_LOCKED|HA_OPEN_FROM_SQL_LAYER))) diff --git a/storage/myisam/myisamchk.c b/storage/myisam/myisamchk.c index 0cd01398cbc..7835ab83531 100644 --- a/storage/myisam/myisamchk.c +++ b/storage/myisam/myisamchk.c @@ -1115,7 +1115,7 @@ static int myisamchk(HA_CHECK *param, char * filename) { if (param->testflag & (T_EXTEND | T_MEDIUM)) (void) init_key_cache(dflt_key_cache,opt_key_cache_block_size, - param->use_buffers, 0, 0, 0); + param->use_buffers, 0, 0, 0, 0); (void) init_io_cache(¶m->read_cache,datafile, (uint) param->read_buffer_length, READ_CACHE, @@ -1532,7 +1532,7 @@ static int mi_sort_records(HA_CHECK *param, DBUG_RETURN(0); /* Nothing to do */ init_key_cache(dflt_key_cache, opt_key_cache_block_size, - (size_t) param->use_buffers, 0, 0, 0); + (size_t) param->use_buffers, 0, 0, 0, 0); if (init_io_cache(&info->rec_cache,-1,(uint) param->write_buffer_length, WRITE_CACHE,share->pack.header_length,1, MYF(MY_WME | MY_WAIT_IF_FULL))) diff --git a/storage/myisam/myisamlog.c b/storage/myisam/myisamlog.c index 86e1978edaa..d549dd76037 100644 --- a/storage/myisam/myisamlog.c +++ b/storage/myisam/myisamlog.c @@ -333,7 +333,7 @@ static int examine_log(char * file_name, char **table_names) (tree_element_free) file_info_free, NULL, MYF(MY_TREE_WITH_DELETE)); (void) init_key_cache(dflt_key_cache,KEY_CACHE_BLOCK_SIZE,KEY_CACHE_SIZE, - 0, 0, 0); + 0, 0, 0, 0); files_open=0; access_time=0; while (access_time++ != number_of_commands && diff --git a/storage/perfschema/CMakeLists.txt b/storage/perfschema/CMakeLists.txt index b77cae6d018..ed530d82df0 100644 --- a/storage/perfschema/CMakeLists.txt +++ b/storage/perfschema/CMakeLists.txt @@ -22,9 +22,11 @@ INCLUDE_DIRECTORIES(${CMAKE_SOURCE_DIR} ADD_DEFINITIONS(-DMYSQL_SERVER) # Gen_pfs_lex_token -ADD_EXECUTABLE(gen_pfs_lex_token gen_pfs_lex_token.cc) -# gen_pfs_lex_token itself depends on ${CMAKE_CURRENT_BINARY_DIR}/sql/sql_yacc.h -ADD_DEPENDENCIES(gen_pfs_lex_token GenServerSource) +IF(NOT CMAKE_CROSSCOMPILING) + ADD_EXECUTABLE(gen_pfs_lex_token gen_pfs_lex_token.cc) + # gen_pfs_lex_token itself depends on ${CMAKE_CURRENT_BINARY_DIR}/sql/sql_yacc.h + ADD_DEPENDENCIES(gen_pfs_lex_token GenServerSource) +ENDIF() ADD_CUSTOM_COMMAND( OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/pfs_lex_token.h diff --git a/storage/perfschema/ha_perfschema.cc b/storage/perfschema/ha_perfschema.cc index 8d41ff1711d..2e1393ec591 100644 --- a/storage/perfschema/ha_perfschema.cc +++ b/storage/perfschema/ha_perfschema.cc @@ -225,7 +225,7 @@ maria_declare_plugin(perfschema) 0x0001, pfs_status_vars, NULL, - "0.1", + "5.6.20", MariaDB_PLUGIN_MATURITY_STABLE } maria_declare_plugin_end; diff --git a/storage/perfschema/table_events_statements.cc b/storage/perfschema/table_events_statements.cc index d520c712e5b..6e2a19fa6a1 100644 --- a/storage/perfschema/table_events_statements.cc +++ b/storage/perfschema/table_events_statements.cc @@ -41,7 +41,7 @@ table_events_statements_current::m_share= &table_events_statements_current::delete_all_rows, NULL, /* get_row_count */ 1000, /* records */ - sizeof(PFS_simple_index), /* ref length */ + sizeof(pos_events_statements_current), /* ref length */ &m_table_lock, { C_STRING_WITH_LEN("CREATE TABLE events_statements_current(" "THREAD_ID BIGINT unsigned not null," diff --git a/storage/sphinx/ha_sphinx.cc b/storage/sphinx/ha_sphinx.cc index f31669e931c..8a46167c92a 100644 --- a/storage/sphinx/ha_sphinx.cc +++ b/storage/sphinx/ha_sphinx.cc @@ -1,5 +1,5 @@ // -// $Id: ha_sphinx.cc 4507 2014-01-22 15:24:34Z deogar $ +// $Id: ha_sphinx.cc 4761 2014-07-03 07:24:02Z deogar $ // // @@ -153,7 +153,7 @@ void sphUnalignedWrite ( void * pPtr, const T & tVal ) #define SPHINXSE_MAX_ALLOC (16*1024*1024) #define SPHINXSE_MAX_KEYWORDSTATS 4096 -#define SPHINXSE_VERSION "2.1.5-release" +#define SPHINXSE_VERSION "2.1.9-release" // FIXME? the following is cut-n-paste from sphinx.h and searchd.cpp // cut-n-paste is somewhat simpler that adding dependencies however.. @@ -2719,11 +2719,11 @@ const Item * ha_sphinx::cond_push ( const Item *cond ) break; // copy the query, and let know that we intercepted this condition - Item_string * pString = (Item_string *) args[1]; + String *pString= args[1]->val_str(NULL); pTls->m_bQuery = true; - strncpy ( pTls->m_sQuery, pString->str_value.c_ptr(), sizeof(pTls->m_sQuery) ); + strncpy ( pTls->m_sQuery, pString->c_ptr(), sizeof(pTls->m_sQuery) ); pTls->m_sQuery[sizeof(pTls->m_sQuery)-1] = '\0'; - pTls->m_pQueryCharset = pString->str_value.charset(); + pTls->m_pQueryCharset = pString->charset(); } else { @@ -3637,5 +3637,5 @@ maria_declare_plugin_end; #endif // >50100 // -// $Id: ha_sphinx.cc 4507 2014-01-22 15:24:34Z deogar $ +// $Id: ha_sphinx.cc 4761 2014-07-03 07:24:02Z deogar $ // diff --git a/storage/spider/CMakeLists.txt b/storage/spider/CMakeLists.txt index e8e272acf66..cdb2de45ee6 100644 --- a/storage/spider/CMakeLists.txt +++ b/storage/spider/CMakeLists.txt @@ -33,9 +33,7 @@ IF(EXISTS ${PROJECT_SOURCE_DIR}/storage/mysql_storage_engine.cmake) MYSQL_STORAGE_ENGINE(SPIDER) ELSE() - INCLUDE_DIRECTORIES( - ${CMAKE_SOURCE_DIR}/storage/spider/hs_client - ${ORACLE_INCLUDE_DIR}) + INCLUDE_DIRECTORIES(${CMAKE_SOURCE_DIR}/storage/spider/hs_client) INSTALL(FILES ${CMAKE_SOURCE_DIR}/storage/spider/scripts/install_spider.sql @@ -46,7 +44,11 @@ ELSE() ENDIF() IF(ORACLE_INCLUDE_DIR AND ORACLE_OCI_LIBRARY) - SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DHAVE_ORACLE_OCI -DLINUX -D_GNU_SOURCE -D_REENTRANT") - SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -DHAVE_ORACLE_OCI -DLINUX -D_GNU_SOURCE -D_REENTRANT") - TARGET_LINK_LIBRARIES (spider ${ORACLE_OCI_LIBRARY}) + SET(SPIDER_WITH_ORACLE_OCI OFF CACHE BOOL "Spider is compiled with Oracle OCI library.") + IF(SPIDER_WITH_ORACLE_OCI) + SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DHAVE_ORACLE_OCI -DLINUX -D_GNU_SOURCE -D_REENTRANT") + SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -DHAVE_ORACLE_OCI -DLINUX -D_GNU_SOURCE -D_REENTRANT") + INCLUDE_DIRECTORIES(${ORACLE_INCLUDE_DIR}) + TARGET_LINK_LIBRARIES (spider ${ORACLE_OCI_LIBRARY}) + ENDIF() ENDIF() diff --git a/storage/spider/ha_spider.cc b/storage/spider/ha_spider.cc index c5b9292e8f2..03fa6440d43 100644 --- a/storage/spider/ha_spider.cc +++ b/storage/spider/ha_spider.cc @@ -158,6 +158,7 @@ ha_spider::ha_spider( result_list.direct_aggregate = FALSE; result_list.snap_direct_aggregate = FALSE; #endif + result_list.direct_distinct = FALSE; result_list.casual_read = NULL; result_list.use_both_key = FALSE; DBUG_VOID_RETURN; @@ -264,6 +265,7 @@ ha_spider::ha_spider( result_list.direct_aggregate = FALSE; result_list.snap_direct_aggregate = FALSE; #endif + result_list.direct_distinct = FALSE; result_list.casual_read = NULL; result_list.use_both_key = FALSE; ref_length = sizeof(SPIDER_POSITION); @@ -1585,6 +1587,7 @@ int ha_spider::reset() result_list.direct_aggregate = FALSE; result_list.snap_direct_aggregate = FALSE; #endif + result_list.direct_distinct = FALSE; store_error_num = 0; #ifdef WITH_PARTITION_STORAGE_ENGINE if ( @@ -7765,9 +7768,6 @@ void ha_spider::ft_end() { DBUG_ENTER("ha_spider::ft_end"); DBUG_PRINT("info",("spider this=%p", this)); - ft_handler = NULL; - ft_current = NULL; - ft_count = 0; if (ft_init_without_index_init) { if (ft_init_idx == MAX_KEY) @@ -8183,15 +8183,13 @@ int ha_spider::info( auto_inc_temporary = FALSE; #endif sql_command = thd_sql_command(thd); - if ( /* + if ( sql_command == SQLCOM_DROP_TABLE || sql_command == SQLCOM_ALTER_TABLE || sql_command == SQLCOM_SHOW_CREATE -*/ - sql_command == SQLCOM_DROP_TABLE || - sql_command == SQLCOM_ALTER_TABLE ) { +*/ if (flag & HA_STATUS_AUTO) { if (share->lgtm_tblhnd_share->auto_increment_value) @@ -8204,8 +8202,14 @@ int ha_spider::info( #endif } } - DBUG_RETURN(0); + if ( + sql_command == SQLCOM_DROP_TABLE || + sql_command == SQLCOM_ALTER_TABLE + ) + DBUG_RETURN(0); +/* } +*/ if (flag & (HA_STATUS_TIME | HA_STATUS_CONST | HA_STATUS_VARIABLE | HA_STATUS_AUTO)) @@ -8240,6 +8244,15 @@ int ha_spider::info( spider_param_table_init_error_interval()) { pthread_mutex_unlock(&share->sts_mutex); + if (sql_command == SQLCOM_SHOW_CREATE) + { + if (thd->is_error()) + { + DBUG_PRINT("info", ("spider clear_error")); + thd->clear_error(); + } + DBUG_RETURN(0); + } if (spider_init_error_table->init_error_with_message) my_message(spider_init_error_table->init_error, spider_init_error_table->init_error_msg, MYF(0)); @@ -8304,6 +8317,15 @@ int ha_spider::info( share->init_error = TRUE; share->init = TRUE; } + if (sql_command == SQLCOM_SHOW_CREATE) + { + if (thd->is_error()) + { + DBUG_PRINT("info", ("spider clear_error")); + thd->clear_error(); + } + DBUG_RETURN(0); + } DBUG_RETURN(check_error_mode(error_num)); } if ((error_num = spider_get_sts(share, search_link_idx, tmp_time, @@ -8356,6 +8378,15 @@ int ha_spider::info( share->init_error = TRUE; share->init = TRUE; } + if (sql_command == SQLCOM_SHOW_CREATE) + { + if (thd->is_error()) + { + DBUG_PRINT("info", ("spider clear_error")); + thd->clear_error(); + } + DBUG_RETURN(0); + } DBUG_RETURN(check_error_mode(error_num)); } } @@ -8376,6 +8407,15 @@ int ha_spider::info( if ((error_num = spider_create_sts_thread(share))) { pthread_mutex_unlock(&share->sts_mutex); + if (sql_command == SQLCOM_SHOW_CREATE) + { + if (thd->is_error()) + { + DBUG_PRINT("info", ("spider clear_error")); + thd->clear_error(); + } + DBUG_RETURN(0); + } DBUG_RETURN(error_num); } } else @@ -8389,7 +8429,18 @@ int ha_spider::info( if (flag & HA_STATUS_CONST) { if ((error_num = check_crd())) + { + if (sql_command == SQLCOM_SHOW_CREATE) + { + if (thd->is_error()) + { + DBUG_PRINT("info", ("spider clear_error")); + thd->clear_error(); + } + DBUG_RETURN(0); + } DBUG_RETURN(error_num); + } spider_db_set_cardinarity(this, table); } @@ -8413,6 +8464,9 @@ int ha_spider::info( } if (flag & HA_STATUS_AUTO) { +#ifdef HANDLER_HAS_CAN_USE_FOR_AUTO_INC_INIT + auto_inc_temporary = FALSE; +#endif #ifdef WITH_PARTITION_STORAGE_ENGINE if (share->partition_share && table->next_number_field) { @@ -8481,6 +8535,7 @@ ha_rows ha_spider::records_in_range( spider_db_handler *dbton_hdl; DBUG_ENTER("ha_spider::records_in_range"); DBUG_PRINT("info",("spider this=%p", this)); + DBUG_PRINT("info",("spider inx=%u", inx)); time_t tmp_time = (time_t) time((time_t*) 0); if (!share->crd_init) { @@ -8517,12 +8572,16 @@ ha_rows ha_spider::records_in_range( crd_mode = dbton_hdl->crd_mode_exchange(crd_mode); if (crd_mode == 1 || crd_mode == 2) { + DBUG_PRINT("info", ("spider static_key_cardinality[%u]=%lld", inx, + share->static_key_cardinality[inx])); DBUG_PRINT("info", ("spider difftime=%f", difftime(tmp_time, share->crd_get_time))); DBUG_PRINT("info", ("spider crd_interval=%f", crd_interval)); - if (difftime(tmp_time, share->crd_get_time) >= crd_interval) - { + if ( + share->static_key_cardinality[inx] == -1 && + difftime(tmp_time, share->crd_get_time) >= crd_interval + ) { if ( crd_interval == 0 || !pthread_mutex_trylock(&share->crd_mutex) @@ -8654,44 +8713,89 @@ ha_rows ha_spider::records_in_range( else weight = 1; - for ( - key_part = key_info->key_part; - tgt_key_part_map > 1; - tgt_key_part_map >>= 1, - key_part++ - ) { - field = key_part->field; - if ((rate = - ((double) share->cardinality[field->field_index]) / weight) >= 1 + if (share->static_key_cardinality[inx] == -1) + { + for ( + key_part = key_info->key_part; + tgt_key_part_map > 1; + tgt_key_part_map >>= 1, + key_part++ ) { - if ((rows = rows / rate) < 2) + field = key_part->field; + DBUG_PRINT("info", + ("spider field_index=%u", + field->field_index)); + DBUG_PRINT("info", + ("spider cardinality=%lld", share->cardinality[field->field_index])); + if (share->cardinality[field->field_index] == -1) { - DBUG_PRINT("info",("spider rows2=%f then ret 2", rows)); - DBUG_RETURN((ha_rows) 2); + DBUG_PRINT("info", + ("spider uninitialized column cardinality")); + DBUG_RETURN(HA_POS_ERROR); + } + if ((rate = + ((double) share->cardinality[field->field_index]) / weight) >= 1 + ) { + if ((rows = rows / rate) < 2) + { + DBUG_PRINT("info",("spider rows2=%f then ret 2", rows)); + DBUG_RETURN((ha_rows) 2); + } } + if (crd_type == 1) + weight += spider_param_crd_weight(thd, share->crd_weight); + else if (crd_type == 2) + weight *= spider_param_crd_weight(thd, share->crd_weight); + } + field = key_part->field; + DBUG_PRINT("info", + ("spider field_index=%u", + field->field_index)); + DBUG_PRINT("info", + ("spider cardinality=%lld", share->cardinality[field->field_index])); + if (share->cardinality[field->field_index] == -1) + { + DBUG_PRINT("info", + ("spider uninitialized column cardinality")); + DBUG_RETURN(HA_POS_ERROR); } - if (crd_type == 1) - weight += spider_param_crd_weight(thd, share->crd_weight); - else if (crd_type == 2) - weight *= spider_param_crd_weight(thd, share->crd_weight); } - field = key_part->field; if ( start_key_part_map >= end_key_part_map && start_key->flag == HA_READ_KEY_EXACT ) { - if ((rate = - ((double) share->cardinality[field->field_index]) / weight) >= 1) + if (share->static_key_cardinality[inx] == -1) + { + if ((rate = + ((double) share->cardinality[field->field_index]) / weight) >= 1) + rows = rows / rate; + } else { + rate = ((double) share->static_key_cardinality[inx]); rows = rows / rate; + } } else if (start_key_part_map == end_key_part_map) { - if ((rate = - ((double) share->cardinality[field->field_index]) / weight / 4) >= 1) - rows = rows / rate; + if (share->static_key_cardinality[inx] == -1) + { + if ((rate = + ((double) share->cardinality[field->field_index]) / weight / 4) >= 1) + rows = rows / rate; + } else { + if ((rate = + ((double) share->static_key_cardinality[inx]) / 4) >= 1) + rows = rows / rate; + } } else { - if ((rate = - ((double) share->cardinality[field->field_index]) / weight / 16) >= 1) - rows = rows / rate; + if (share->static_key_cardinality[inx] == -1) + { + if ((rate = + ((double) share->cardinality[field->field_index]) / weight / 16) >= 1) + rows = rows / rate; + } else { + if ((rate = + ((double) share->static_key_cardinality[inx]) / 16) >= 1) + rows = rows / rate; + } } if (rows < 2) { @@ -10270,6 +10374,17 @@ ha_rows ha_spider::estimate_rows_upper_bound() DBUG_RETURN(HA_POS_ERROR); } +void ha_spider::print_error( + int error, + myf errflag +) { + DBUG_ENTER("ha_spider::print_error"); + DBUG_PRINT("info",("spider this=%p", this)); + if (!current_thd->is_error()) + handler::print_error(error, errflag); + DBUG_VOID_RETURN; +} + bool ha_spider::get_error_message( int error, String *buf @@ -10350,11 +10465,22 @@ int ha_spider::create( { goto error; } - if (form->s->keys > 0 && - !(tmp_share.key_hint = new spider_string[form->s->keys]) - ) { - error_num = HA_ERR_OUT_OF_MEM; - goto error; + if (form->s->keys > 0) + { + if (!(tmp_share.static_key_cardinality = (longlong *) + spider_bulk_malloc(spider_current_trx, 246, MYF(MY_WME), + &tmp_share.static_key_cardinality, + sizeof(*tmp_share.static_key_cardinality) * form->s->keys, + NullS)) + ) { + error_num = HA_ERR_OUT_OF_MEM; + goto error; + } + if (!(tmp_share.key_hint = new spider_string[form->s->keys])) + { + error_num = HA_ERR_OUT_OF_MEM; + goto error; + } } for (roop_count = 0; roop_count < form->s->keys; roop_count++) tmp_share.key_hint[roop_count].init_calc_mem(85); @@ -10460,6 +10586,8 @@ int ha_spider::create( pthread_mutex_unlock(&tmp_share.lgtm_tblhnd_share->auto_increment_mutex); } + if (tmp_share.static_key_cardinality) + spider_free(spider_current_trx, tmp_share.static_key_cardinality, MYF(0)); spider_free_share_alloc(&tmp_share); DBUG_RETURN(0); @@ -10469,6 +10597,8 @@ error: &open_tables_backup, need_lock); if (tmp_share.lgtm_tblhnd_share) spider_free_lgtm_tblhnd_share_alloc(tmp_share.lgtm_tblhnd_share, FALSE); + if (tmp_share.static_key_cardinality) + spider_free(spider_current_trx, tmp_share.static_key_cardinality, MYF(0)); spider_free_share_alloc(&tmp_share); error_alter_before_unlock: error_get_trx: @@ -10478,7 +10608,6 @@ error_get_trx: void ha_spider::update_create_info( HA_CREATE_INFO* create_info ) { - THD *thd = ha_thd(); DBUG_ENTER("ha_spider::update_create_info"); DBUG_PRINT("info",("spider this=%p", this)); if (!create_info->connect_string.str) @@ -10495,13 +10624,6 @@ void ha_spider::update_create_info( info(HA_STATUS_AUTO); create_info->auto_increment_value = stats.auto_increment_value; } - if ( - thd->is_error() && - thd_sql_command(thd) == SQLCOM_SHOW_CREATE - ) { - DBUG_PRINT("info", ("spider clear_error")); - thd->clear_error(); - } DBUG_VOID_RETURN; } @@ -11363,10 +11485,20 @@ void ha_spider::set_ft_discard_bitmap() } } } - item_next = ha_thd()->free_list; + THD *thd = ha_thd(); + Statement *stmt = thd->stmt_map.find(thd->id); + if (stmt && stmt->free_list) + { + DBUG_PRINT("info",("spider item from stmt")); + item_next = stmt->free_list; + } else { + DBUG_PRINT("info",("spider item from thd")); + item_next = thd->free_list; + } while ((item = item_next)) { DBUG_PRINT("info",("spider item=%p", item)); + DBUG_PRINT("info",("spider itemtype=%u", item->type())); item_next = item->next; if (item->type() != Item::FIELD_ITEM) continue; diff --git a/storage/spider/ha_spider.h b/storage/spider/ha_spider.h index 08918021e21..fc880831912 100644 --- a/storage/spider/ha_spider.h +++ b/storage/spider/ha_spider.h @@ -658,6 +658,10 @@ public: #endif const key_map *keys_to_use_for_scanning(); ha_rows estimate_rows_upper_bound(); + void print_error( + int error, + myf errflag + ); bool get_error_message( int error, String *buf diff --git a/storage/spider/spd_conn.cc b/storage/spider/spd_conn.cc index 3cdb6ef5f29..e19222c99f6 100644 --- a/storage/spider/spd_conn.cc +++ b/storage/spider/spd_conn.cc @@ -2505,14 +2505,21 @@ void *spider_bg_conn_action( ) { if (thd->is_error()) { - SPIDER_BG_DIRECT_SQL *bg_direct_sql = - (SPIDER_BG_DIRECT_SQL *) direct_sql->parent; - pthread_mutex_lock(direct_sql->bg_mutex); - bg_direct_sql->bg_error = spider_stmt_da_sql_errno(thd); - strmov((char *) bg_direct_sql->bg_error_msg, - spider_stmt_da_message(thd)); - pthread_mutex_unlock(direct_sql->bg_mutex); - is_error = TRUE; + if ( + direct_sql->error_rw_mode && + spider_db_conn_is_network_error(error_num) + ) { + thd->clear_error(); + } else { + SPIDER_BG_DIRECT_SQL *bg_direct_sql = + (SPIDER_BG_DIRECT_SQL *) direct_sql->parent; + pthread_mutex_lock(direct_sql->bg_mutex); + bg_direct_sql->bg_error = spider_stmt_da_sql_errno(thd); + strmov((char *) bg_direct_sql->bg_error_msg, + spider_stmt_da_message(thd)); + pthread_mutex_unlock(direct_sql->bg_mutex); + is_error = TRUE; + } } } if (direct_sql->modified_non_trans_table) diff --git a/storage/spider/spd_db_conn.cc b/storage/spider/spd_db_conn.cc index 7dd0249bddb..64e2dc59b65 100644 --- a/storage/spider/spd_db_conn.cc +++ b/storage/spider/spd_db_conn.cc @@ -26,6 +26,7 @@ #include "sql_analyse.h" #include "sql_base.h" #include "tztime.h" +#include "errmsg.h" #ifdef HANDLER_HAS_DIRECT_AGGREGATE #include "sql_select.h" #endif @@ -637,8 +638,11 @@ int spider_db_errorno( if (conn->server_lost) { *conn->need_mon = ER_SPIDER_REMOTE_SERVER_GONE_AWAY_NUM; - my_message(ER_SPIDER_REMOTE_SERVER_GONE_AWAY_NUM, - ER_SPIDER_REMOTE_SERVER_GONE_AWAY_STR, MYF(0)); + if (!current_thd->is_error()) + { + my_message(ER_SPIDER_REMOTE_SERVER_GONE_AWAY_NUM, + ER_SPIDER_REMOTE_SERVER_GONE_AWAY_STR, MYF(0)); + } if (!conn->mta_conn_mutex_unlock_later) { SPIDER_CLEAR_FILE_POS(&conn->mta_conn_mutex_file_pos); @@ -3199,7 +3203,8 @@ void spider_db_free_one_result_for_start_next( result = (SPIDER_RESULT *) result_list->current; if ( !result->result && - !result->first_position + !result->first_position && + !result->tmp_tbl_use_position ) result_list->current = result->prev; } @@ -3263,6 +3268,35 @@ void spider_db_free_one_result( position[roop_count].row = NULL; } } + if (result_list->quick_mode == 3) + { + if (!result->first_pos_use_position) + { + spider_free(spider_current_trx, position, MYF(0)); + result->first_position = NULL; + } + if (result->result) + { + result->result->free_result(); + delete result->result; + result->result = NULL; + } + if (!result->tmp_tbl_use_position) + { + if (result->result_tmp_tbl) + { + if (result->result_tmp_tbl_inited) + { + result->result_tmp_tbl->file->ha_rnd_end(); + result->result_tmp_tbl_inited = 0; + } + spider_rm_sys_tmp_table_for_result(result->result_tmp_tbl_thd, + result->result_tmp_tbl, &result->result_tmp_tbl_prm); + result->result_tmp_tbl = NULL; + result->result_tmp_tbl_thd = NULL; + } + } + } } } DBUG_VOID_RETURN; @@ -3472,6 +3506,8 @@ int spider_db_free_result( result->record_num = 0; DBUG_PRINT("info",("spider result->finish_flg = FALSE")); result->finish_flg = FALSE; + result->first_pos_use_position = FALSE; + result->tmp_tbl_use_position = FALSE; result->use_position = FALSE; result = (SPIDER_RESULT*) result->next; } @@ -3872,8 +3908,10 @@ int spider_db_store_result( DBUG_PRINT("info", ("spider conn[%p]->quick_target=NULL", conn)); conn->quick_target = NULL; spider->quick_targets[link_idx] = NULL; - } else if (result_list->limit_num == roop_count) - { + } else if ( + result_list->quick_mode == 3 || + result_list->limit_num == roop_count + ) { current->result->free_result(); if (!current->result_tmp_tbl) { @@ -4176,6 +4214,7 @@ int spider_db_seek_next( spider_next_split_read_param(spider); if ( result_list->quick_mode == 0 || + result_list->quick_mode == 3 || !result_list->current->result ) { result_list->limit_num = @@ -4839,6 +4878,7 @@ void spider_db_create_position( tmp_pos->use_position = TRUE; tmp_pos->pos_mode = 0; pos->pos_mode = 0; + current->first_pos_use_position = TRUE; } else { TABLE *tmp_tbl = current->result_tmp_tbl; pos->row = NULL; @@ -4848,6 +4888,7 @@ void spider_db_create_position( DBUG_PRINT("info",("spider tmp_tbl->file->ref=%p", tmp_tbl->file->ref)); tmp_tbl->file->ref = (uchar *) &pos->tmp_tbl_pos; tmp_tbl->file->position(tmp_tbl->record[0]); + current->tmp_tbl_use_position = TRUE; } } current->use_position = TRUE; @@ -10256,3 +10297,20 @@ void spider_db_hs_request_buf_reset( DBUG_VOID_RETURN; } #endif + +bool spider_db_conn_is_network_error( + int error_num +) { + DBUG_ENTER("spider_db_conn_is_network_error"); + if ( + error_num == ER_SPIDER_REMOTE_SERVER_GONE_AWAY_NUM || + error_num == ER_CONNECT_TO_FOREIGN_DATA_SOURCE || + ( + error_num >= CR_MIN_ERROR && + error_num <= CR_MAX_ERROR + ) + ) { + DBUG_RETURN(TRUE); + } + DBUG_RETURN(FALSE); +} diff --git a/storage/spider/spd_db_conn.h b/storage/spider/spd_db_conn.h index 9b37202adab..3f77c96b99f 100644 --- a/storage/spider/spd_db_conn.h +++ b/storage/spider/spd_db_conn.h @@ -69,6 +69,8 @@ #define SPIDER_SQL_UPDATE_LEN (sizeof(SPIDER_SQL_UPDATE_STR) - 1) #define SPIDER_SQL_DELETE_STR "delete " #define SPIDER_SQL_DELETE_LEN (sizeof(SPIDER_SQL_DELETE_STR) - 1) +#define SPIDER_SQL_DISTINCT_STR "distinct " +#define SPIDER_SQL_DISTINCT_LEN (sizeof(SPIDER_SQL_DISTINCT_STR) - 1) #define SPIDER_SQL_HIGH_PRIORITY_STR "high_priority " #define SPIDER_SQL_HIGH_PRIORITY_LEN (sizeof(SPIDER_SQL_HIGH_PRIORITY_STR) - 1) #define SPIDER_SQL_LOW_PRIORITY_STR "low_priority " @@ -1049,3 +1051,7 @@ void spider_db_hs_request_buf_reset( SPIDER_CONN *conn ); #endif + +bool spider_db_conn_is_network_error( + int error_num +); diff --git a/storage/spider/spd_db_include.h b/storage/spider/spd_db_include.h index 7741ffcdb16..371257a955b 100644 --- a/storage/spider/spd_db_include.h +++ b/storage/spider/spd_db_include.h @@ -493,7 +493,6 @@ public: uint32 arg_length, uint32 step_alloc ); -#ifdef SPIDER_HAS_APPEND_FOR_SINGLE_QUOTE bool append_for_single_quote( const char *st, uint len @@ -504,7 +503,6 @@ public: bool append_for_single_quote( const char *st ); -#endif void print( String *print ); @@ -1566,6 +1564,8 @@ typedef struct st_spider_result longlong record_num; bool finish_flg; bool use_position; + bool first_pos_use_position; + bool tmp_tbl_use_position; uint field_count; /* for quick mode */ TABLE *result_tmp_tbl; TMP_TABLE_PARAM result_tmp_tbl_prm; @@ -1642,6 +1642,7 @@ typedef struct st_spider_result_list spider_bulk_upd_start bulk_update_start; bool check_direct_order_limit; bool direct_order_limit; + bool direct_distinct; #ifdef HANDLER_HAS_DIRECT_AGGREGATE bool direct_aggregate; bool snap_mrr_with_cnt; diff --git a/storage/spider/spd_db_mysql.cc b/storage/spider/spd_db_mysql.cc index 76c89d4138a..81e2fc41a57 100644 --- a/storage/spider/spd_db_mysql.cc +++ b/storage/spider/spd_db_mysql.cc @@ -919,13 +919,13 @@ int spider_db_mysql_result::fetch_table_cardinality( Field *field; DBUG_ENTER("spider_db_mysql_result::fetch_table_cardinality"); DBUG_PRINT("info",("spider this=%p", this)); + memset((uchar *) cardinality_upd, 0, sizeof(uchar) * bitmap_size); if (!(mysql_row = mysql_fetch_row(db_result))) { DBUG_PRINT("info",("spider fetch row is null")); /* no index */ DBUG_RETURN(0); } - memset((uchar *) cardinality_upd, 0, sizeof(uchar) * bitmap_size); if (mode == 1) { uint num_fields = this->num_fields(); @@ -2096,6 +2096,7 @@ int spider_db_mysql::rollback( int error_num; DBUG_ENTER("spider_db_mysql::rollback"); DBUG_PRINT("info",("spider this=%p", this)); + conn->mta_conn_mutex_unlock_later = TRUE; if (spider_db_query( conn, SPIDER_SQL_ROLLBACK_STR, @@ -2104,7 +2105,6 @@ int spider_db_mysql::rollback( need_mon) ) { is_error = conn->thd->is_error(); - conn->mta_conn_mutex_unlock_later = TRUE; error_num = spider_db_errorno(conn); if ( error_num == ER_SPIDER_REMOTE_SERVER_GONE_AWAY_NUM && @@ -4814,6 +4814,21 @@ int spider_mysql_share::discover_table_structure( ) { DBUG_RETURN(error_num); } + if (!conn->disable_reconnect) + { + ha_spider tmp_spider; + int need_mon = 0; + uint tmp_conn_link_idx = 0; + tmp_spider.trx = trx; + tmp_spider.share = spider_share; + tmp_spider.need_mons = &need_mon; + tmp_spider.conn_link_idx = &tmp_conn_link_idx; + if ((error_num = spider_db_ping(&tmp_spider, conn, 0))) + { + DBUG_PRINT("info",("spider spider_db_ping error")); + continue; + } + } pthread_mutex_lock(&conn->mta_conn_mutex); SPIDER_SET_FILE_POS(&conn->mta_conn_mutex_file_pos); conn->need_mon = &need_mon; @@ -4879,6 +4894,11 @@ int spider_mysql_share::discover_table_structure( conn->mta_conn_mutex_unlock_later = FALSE; SPIDER_CLEAR_FILE_POS(&conn->mta_conn_mutex_file_pos); pthread_mutex_unlock(&conn->mta_conn_mutex); + my_printf_error(ER_SPIDER_REMOTE_TABLE_NOT_FOUND_NUM, + ER_SPIDER_REMOTE_TABLE_NOT_FOUND_STR, MYF(0), + db_names_str[roop_count].ptr(), + table_names_str[roop_count].ptr()); + error_num = ER_SPIDER_REMOTE_TABLE_NOT_FOUND_NUM; continue; } res->free_result(); @@ -6238,6 +6258,12 @@ int spider_mysql_handler::append_select( if (str->reserve(SPIDER_SQL_SELECT_LEN)) DBUG_RETURN(HA_ERR_OUT_OF_MEM); str->q_append(SPIDER_SQL_SELECT_STR, SPIDER_SQL_SELECT_LEN); + if (result_list->direct_distinct) + { + if (str->reserve(SPIDER_SQL_DISTINCT_LEN)) + DBUG_RETURN(HA_ERR_OUT_OF_MEM); + str->q_append(SPIDER_SQL_DISTINCT_STR, SPIDER_SQL_DISTINCT_LEN); + } if (result_list->lock_type != F_WRLCK && spider->lock_mode < 1) { /* no lock */ @@ -10328,6 +10354,14 @@ int spider_mysql_handler::show_table_status( DBUG_RETURN(error_num); } } + if (share->static_records_for_status != -1) + { + share->records = (ha_rows) share->static_records_for_status; + } + if (share->static_mean_rec_length != -1) + { + share->mean_rec_length = (ulong) share->static_mean_rec_length; + } if (auto_increment_value > share->lgtm_tblhnd_share->auto_increment_value) { share->lgtm_tblhnd_share->auto_increment_value = auto_increment_value; @@ -10462,8 +10496,8 @@ int spider_mysql_handler::show_index( if (!spider_bit_is_set(share->cardinality_upd, roop_count)) { DBUG_PRINT("info", - ("spider init column cardinality id=%d", roop_count)); - *tmp_cardinality = 1; + ("spider uninitialized column cardinality id=%d", roop_count)); + *tmp_cardinality = -1; } } if (res) @@ -10596,8 +10630,8 @@ int spider_mysql_handler::show_index( if (!spider_bit_is_set(share->cardinality_upd, roop_count)) { DBUG_PRINT("info", - ("spider init column cardinality id=%d", roop_count)); - *tmp_cardinality = 1; + ("spider uninitialized column cardinality id=%d", roop_count)); + *tmp_cardinality = -1; } } if (res) diff --git a/storage/spider/spd_db_oracle.cc b/storage/spider/spd_db_oracle.cc index 870bd849265..0d8a218c5f6 100644 --- a/storage/spider/spd_db_oracle.cc +++ b/storage/spider/spd_db_oracle.cc @@ -5935,6 +5935,12 @@ int spider_oracle_handler::append_select( if (str->reserve(SPIDER_SQL_SELECT_LEN)) DBUG_RETURN(HA_ERR_OUT_OF_MEM); str->q_append(SPIDER_SQL_SELECT_STR, SPIDER_SQL_SELECT_LEN); + if (spider->result_list.direct_distinct) + { + if (str->reserve(SPIDER_SQL_DISTINCT_LEN)) + DBUG_RETURN(HA_ERR_OUT_OF_MEM); + str->q_append(SPIDER_SQL_DISTINCT_STR, SPIDER_SQL_DISTINCT_LEN); + } } DBUG_RETURN(0); } diff --git a/storage/spider/spd_direct_sql.cc b/storage/spider/spd_direct_sql.cc index 4f756aab784..27041790d63 100644 --- a/storage/spider/spd_direct_sql.cc +++ b/storage/spider/spd_direct_sql.cc @@ -1045,6 +1045,7 @@ int spider_udf_parse_direct_sql_param( #else direct_sql->use_real_table = -1; #endif + direct_sql->error_rw_mode = -1; for (roop_count = 0; roop_count < direct_sql->table_count; roop_count++) direct_sql->iop[roop_count] = -1; @@ -1123,9 +1124,10 @@ int spider_udf_parse_direct_sql_param( MYF(0), tmp_ptr); goto error; case 4: + SPIDER_PARAM_INT_WITH_MAX("erwm", error_rw_mode, 0, 1); SPIDER_PARAM_STR("host", tgt_host); - SPIDER_PARAM_STR("user", tgt_username); SPIDER_PARAM_INT_WITH_MAX("port", tgt_port, 0, 65535); + SPIDER_PARAM_STR("user", tgt_username); error_num = ER_SPIDER_INVALID_UDF_PARAM_NUM; my_printf_error(error_num, ER_SPIDER_INVALID_UDF_PARAM_STR, MYF(0), tmp_ptr); @@ -1178,6 +1180,7 @@ int spider_udf_parse_direct_sql_param( goto error; case 13: SPIDER_PARAM_STR("default_group", tgt_default_group); + SPIDER_PARAM_INT_WITH_MAX("error_rw_mode", error_rw_mode, 0, 1); error_num = ER_SPIDER_INVALID_UDF_PARAM_NUM; my_printf_error(error_num, ER_SPIDER_INVALID_UDF_PARAM_STR, MYF(0), tmp_ptr); @@ -1400,6 +1403,8 @@ int spider_udf_set_direct_sql_param_default( if (direct_sql->use_real_table == -1) direct_sql->use_real_table = 0; #endif + if (direct_sql->error_rw_mode == -1) + direct_sql->error_rw_mode = 0; for (roop_count = 0; roop_count < direct_sql->table_count; roop_count++) { if (direct_sql->iop[roop_count] == -1) @@ -1526,12 +1531,14 @@ long long spider_direct_sql_body( uint use_real_table = 0; #endif DBUG_ENTER("spider_direct_sql_body"); + SPIDER_BACKUP_DASTATUS; if (!(direct_sql = (SPIDER_DIRECT_SQL *) spider_bulk_malloc(spider_current_trx, 34, MYF(MY_WME | MY_ZEROFILL), &direct_sql, sizeof(SPIDER_DIRECT_SQL), &sql, sizeof(char) * args->lengths[0], NullS)) ) { + error_num = HA_ERR_OUT_OF_MEM; my_error(ER_OUT_OF_RESOURCES, MYF(0), HA_ERR_OUT_OF_MEM); goto error; } @@ -1556,48 +1563,51 @@ long long spider_direct_sql_body( #endif if (!(trx = spider_get_trx(thd, TRUE, &error_num))) { - my_error(ER_OUT_OF_RESOURCES, MYF(0), HA_ERR_OUT_OF_MEM); + if (error_num == HA_ERR_OUT_OF_MEM) + my_error(ER_OUT_OF_RESOURCES, MYF(0), HA_ERR_OUT_OF_MEM); goto error; } direct_sql->trx = trx; if (args->args[1]) { - if (spider_udf_direct_sql_create_table_list( + if ((error_num = spider_udf_direct_sql_create_table_list( direct_sql, args->args[1], args->lengths[1] - )) { - my_error(ER_OUT_OF_RESOURCES, MYF(0), HA_ERR_OUT_OF_MEM); + ))) { + if (error_num == HA_ERR_OUT_OF_MEM) + my_error(ER_OUT_OF_RESOURCES, MYF(0), HA_ERR_OUT_OF_MEM); goto error; } } else { - if (spider_udf_direct_sql_create_table_list( + if ((error_num = spider_udf_direct_sql_create_table_list( direct_sql, (char *) "", 0 - )) { - my_error(ER_OUT_OF_RESOURCES, MYF(0), HA_ERR_OUT_OF_MEM); + ))) { + if (error_num == HA_ERR_OUT_OF_MEM) + my_error(ER_OUT_OF_RESOURCES, MYF(0), HA_ERR_OUT_OF_MEM); goto error; } } if (args->args[2]) { - if (spider_udf_parse_direct_sql_param( + if ((error_num = spider_udf_parse_direct_sql_param( trx, direct_sql, args->args[2], args->lengths[2] - )) { + ))) { goto error; } } else { - if (spider_udf_parse_direct_sql_param( + if ((error_num = spider_udf_parse_direct_sql_param( trx, direct_sql, "", 0 - )) { + ))) { goto error; } } @@ -1626,6 +1636,7 @@ long long spider_direct_sql_body( if (!use_real_table) { #endif + error_num = ER_SPIDER_UDF_TMP_TABLE_NOT_FOUND_NUM; my_printf_error(ER_SPIDER_UDF_TMP_TABLE_NOT_FOUND_NUM, ER_SPIDER_UDF_TMP_TABLE_NOT_FOUND_STR, MYF(0), table_list.db, table_list.table_name); @@ -1651,20 +1662,24 @@ long long spider_direct_sql_body( #endif } } - if (spider_udf_direct_sql_create_conn_key(direct_sql)) + if ((error_num = spider_udf_direct_sql_create_conn_key(direct_sql))) { + if (error_num == HA_ERR_OUT_OF_MEM) + my_error(ER_OUT_OF_RESOURCES, MYF(0), HA_ERR_OUT_OF_MEM); goto error; } if (!(conn = spider_udf_direct_sql_get_conn(direct_sql, trx, &error_num))) { - my_error(ER_OUT_OF_RESOURCES, MYF(0), HA_ERR_OUT_OF_MEM); + if (error_num == HA_ERR_OUT_OF_MEM) + my_error(ER_OUT_OF_RESOURCES, MYF(0), HA_ERR_OUT_OF_MEM); goto error; } conn->error_mode = 0; direct_sql->conn = conn; - if (spider_db_udf_check_and_set_set_names(trx)) + if ((error_num = spider_db_udf_check_and_set_set_names(trx))) { - my_error(ER_OUT_OF_RESOURCES, MYF(0), HA_ERR_OUT_OF_MEM); + if (error_num == HA_ERR_OUT_OF_MEM) + my_error(ER_OUT_OF_RESOURCES, MYF(0), HA_ERR_OUT_OF_MEM); goto error; } if (args->args[0]) @@ -1678,21 +1693,24 @@ long long spider_direct_sql_body( #ifndef WITHOUT_SPIDER_BG_SEARCH if (bg) { - if (spider_udf_bg_direct_sql(direct_sql)) + if ((error_num = spider_udf_bg_direct_sql(direct_sql))) { - my_error(ER_OUT_OF_RESOURCES, MYF(0), HA_ERR_OUT_OF_MEM); + if (error_num == HA_ERR_OUT_OF_MEM) + my_error(ER_OUT_OF_RESOURCES, MYF(0), HA_ERR_OUT_OF_MEM); goto error; } } else { #endif if (conn->bg_init) pthread_mutex_lock(&conn->bg_conn_mutex); - if (spider_db_udf_direct_sql(direct_sql)) + if ((error_num = spider_db_udf_direct_sql(direct_sql))) { if (conn->bg_init) pthread_mutex_unlock(&conn->bg_conn_mutex); if (direct_sql->modified_non_trans_table) thd->transaction.stmt.modified_non_trans_table = TRUE; + if (error_num == HA_ERR_OUT_OF_MEM) + my_error(ER_OUT_OF_RESOURCES, MYF(0), HA_ERR_OUT_OF_MEM); goto error; } if (conn->bg_init) @@ -1712,7 +1730,17 @@ long long spider_direct_sql_body( error: if (direct_sql) + { + if ( + direct_sql->error_rw_mode && + spider_db_conn_is_network_error(error_num) + ) { + SPIDER_RESTORE_DASTATUS; + spider_udf_free_direct_sql_alloc(direct_sql, bg); + DBUG_RETURN(1); + } spider_udf_free_direct_sql_alloc(direct_sql, bg); + } *error = 1; DBUG_RETURN(0); } @@ -1790,6 +1818,15 @@ void spider_direct_sql_deinit_body( DBUG_ENTER("spider_direct_sql_deinit_body"); if (bg_direct_sql) { + pthread_mutex_lock(&bg_direct_sql->bg_mutex); + while (bg_direct_sql->direct_sql) + pthread_cond_wait(&bg_direct_sql->bg_cond, &bg_direct_sql->bg_mutex); + pthread_mutex_unlock(&bg_direct_sql->bg_mutex); + if (bg_direct_sql->modified_non_trans_table) + { + THD *thd = current_thd; + thd->transaction.stmt.modified_non_trans_table = TRUE; + } pthread_cond_destroy(&bg_direct_sql->bg_cond); pthread_mutex_destroy(&bg_direct_sql->bg_mutex); spider_free(spider_current_trx, bg_direct_sql, MYF(0)); diff --git a/storage/spider/spd_include.h b/storage/spider/spd_include.h index 85ceae0f54b..f6952e5398e 100644 --- a/storage/spider/spd_include.h +++ b/storage/spider/spd_include.h @@ -13,7 +13,7 @@ along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ -#define SPIDER_DETAIL_VERSION "3.2.4" +#define SPIDER_DETAIL_VERSION "3.2.11" #define SPIDER_HEX_VERSION 0x0302 #if MYSQL_VERSION_ID < 50500 @@ -138,7 +138,7 @@ #define SPIDER_TMP_SHARE_LONG_COUNT 15 #define SPIDER_TMP_SHARE_LONGLONG_COUNT 3 -#define SPIDER_MEM_CALC_LIST_NUM 246 +#define SPIDER_MEM_CALC_LIST_NUM 247 #define SPIDER_BACKUP_DASTATUS \ bool da_status; if (thd) da_status = thd->is_error(); else da_status = FALSE; @@ -709,9 +709,13 @@ typedef struct st_spider_share time_t create_time; time_t update_time; + longlong static_records_for_status; + longlong static_mean_rec_length; + int bitmap_size; spider_string *key_hint; CHARSET_INFO *access_charset; + longlong *static_key_cardinality; longlong *cardinality; uchar *cardinality_upd; longlong additional_table_flags; @@ -1039,6 +1043,7 @@ typedef struct st_spider_direct_sql #else int use_real_table; #endif + int error_rw_mode; char *server_name; char *tgt_default_db_name; diff --git a/storage/spider/spd_malloc.cc b/storage/spider/spd_malloc.cc index 57b4fb915bc..4a033c1e7cf 100644 --- a/storage/spider/spd_malloc.cc +++ b/storage/spider/spd_malloc.cc @@ -22,6 +22,7 @@ #include "sql_priv.h" #include "probes_mysql.h" #include "sql_class.h" +#include "sql_analyse.h" #endif #include "spd_db_include.h" #include "spd_include.h" @@ -1251,7 +1252,7 @@ bool spider_string::append_for_single_quote( #ifdef SPIDER_HAS_APPEND_FOR_SINGLE_QUOTE bool res = str.append_for_single_quote(s); #else - bool res = append_escaped(&str, s); + bool res = append_escaped(&str, (String *) s); #endif SPIDER_STRING_CALC_MEM; DBUG_RETURN(res); diff --git a/storage/spider/spd_table.cc b/storage/spider/spd_table.cc index 0926f45773b..f17de987366 100644 --- a/storage/spider/spd_table.cc +++ b/storage/spider/spd_table.cc @@ -25,10 +25,8 @@ #include "sql_class.h" #include "sql_partition.h" #include "sql_servers.h" -#ifdef HANDLER_HAS_DIRECT_AGGREGATE #include "sql_select.h" #endif -#endif #include "spd_err.h" #include "spd_param.h" #include "spd_db_include.h" @@ -907,6 +905,8 @@ void spider_free_tmp_share_alloc( spider_free(spider_current_trx, share->conn_keys, MYF(0)); share->conn_keys = NULL; } + if (share->static_key_cardinality) + spider_free(spider_current_trx, share->static_key_cardinality, MYF(0)); if (share->key_hint) { delete [] share->key_hint; @@ -1476,6 +1476,16 @@ int spider_increase_longlong_list( DBUG_RETURN(0); } +static int spider_set_ll_value( + longlong *value, + char *str +) { + int error_num = 0; + DBUG_ENTER("spider_set_ll_value"); + *value = my_strtoll10(str, (char**) NULL, &error_num); + DBUG_RETURN(error_num); +} + #define SPIDER_PARAM_STR_LEN(name) name ## _length #define SPIDER_PARAM_STR(title_name, param_name) \ if (!strncasecmp(tmp_ptr, title_name, title_length)) \ @@ -1556,6 +1566,38 @@ int spider_increase_longlong_list( } \ break; \ } +#define SPIDER_PARAM_NUMHINT(title_name, param_name, check_length, max_size, append_method) \ + if (!strncasecmp(tmp_ptr, title_name, check_length)) \ + { \ + DBUG_PRINT("info",("spider "title_name" start")); \ + DBUG_PRINT("info",("spider max_size=%d", max_size)); \ + int hint_num = atoi(tmp_ptr + check_length); \ + DBUG_PRINT("info",("spider hint_num=%d", hint_num)); \ + DBUG_PRINT("info",("spider share->param_name=%p", share->param_name)); \ + if (share->param_name) \ + { \ + if (hint_num < 0 || hint_num >= max_size) \ + { \ + error_num = ER_SPIDER_INVALID_CONNECT_INFO_NUM; \ + my_printf_error(error_num, ER_SPIDER_INVALID_CONNECT_INFO_STR, \ + MYF(0), tmp_ptr); \ + goto error; \ + } else if (share->param_name[hint_num] != -1) \ + break; \ + char *hint_str = spider_get_string_between_quote(start_ptr, FALSE); \ + if ((error_num = \ + append_method(&share->param_name[hint_num], hint_str))) \ + goto error; \ + DBUG_PRINT("info",("spider "title_name"[%d]=%lld", hint_num, \ + share->param_name[hint_num])); \ + } else { \ + error_num = ER_SPIDER_INVALID_CONNECT_INFO_NUM; \ + my_printf_error(error_num, ER_SPIDER_INVALID_CONNECT_INFO_STR, \ + MYF(0), tmp_ptr); \ + goto error; \ + } \ + break; \ + } #define SPIDER_PARAM_LONG_LEN(name) name ## _length #define SPIDER_PARAM_LONG_LIST_WITH_MAX(title_name, param_name, \ min_val, max_val) \ @@ -1817,6 +1859,12 @@ int spider_parse_connect_info( #endif share->casual_read = -1; share->delete_all_rows_type = -1; + share->static_records_for_status = -1; + share->static_mean_rec_length = -1; + for (roop_count = 0; roop_count < (int) table_share->keys; roop_count++) + { + share->static_key_cardinality[roop_count] = -1; + } #ifdef WITH_PARTITION_STORAGE_ENGINE for (roop_count = 4; roop_count > 0; roop_count--) @@ -2025,6 +2073,7 @@ int spider_parse_connect_info( SPIDER_PARAM_STR_LIST("sky", tgt_ssl_keys); SPIDER_PARAM_INT_WITH_MAX("slm", selupd_lock_mode, 0, 2); SPIDER_PARAM_INT_WITH_MAX("smd", sts_mode, 1, 2); + SPIDER_PARAM_LONGLONG("smr", static_mean_rec_length, 0); SPIDER_PARAM_LONGLONG("spr", split_read, 0); SPIDER_PARAM_STR_LIST("sqn", tgt_sequence_names); SPIDER_PARAM_LONGLONG("srd", second_read, 0); @@ -2037,6 +2086,7 @@ int spider_parse_connect_info( #endif SPIDER_PARAM_INT_WITH_MAX("stc", semi_table_lock_conn, 0, 1); SPIDER_PARAM_INT_WITH_MAX("stl", semi_table_lock, 0, 1); + SPIDER_PARAM_LONGLONG("srs", static_records_for_status, 0); SPIDER_PARAM_LONG_LIST_WITH_MAX("svc", tgt_ssl_vscs, 0, 1); SPIDER_PARAM_STR_LIST("tbl", tgt_table_names); SPIDER_PARAM_INT_WITH_MAX("tcm", table_count_mode, 0, 3); @@ -2073,6 +2123,8 @@ int spider_parse_connect_info( SPIDER_PARAM_HINT("idx", key_hint, 3, (int) table_share->keys, spider_db_append_key_hint); SPIDER_PARAM_STR_LIST("ssl_ca", tgt_ssl_cas); + SPIDER_PARAM_NUMHINT("skc", static_key_cardinality, 3, + (int) table_share->keys, spider_set_ll_value); error_num = ER_SPIDER_INVALID_CONNECT_INFO_NUM; my_printf_error(error_num, ER_SPIDER_INVALID_CONNECT_INFO_STR, MYF(0), tmp_ptr); @@ -2312,6 +2364,8 @@ int spider_parse_connect_info( #endif SPIDER_PARAM_INT_WITH_MAX( "skip_default_condition", skip_default_condition, 0, 1); + SPIDER_PARAM_LONGLONG( + "static_mean_rec_length", static_mean_rec_length, 0); error_num = ER_SPIDER_INVALID_CONNECT_INFO_NUM; my_printf_error(error_num, ER_SPIDER_INVALID_CONNECT_INFO_STR, MYF(0), tmp_ptr); @@ -2323,6 +2377,15 @@ int spider_parse_connect_info( my_printf_error(error_num, ER_SPIDER_INVALID_CONNECT_INFO_STR, MYF(0), tmp_ptr); goto error; + case 25: + SPIDER_PARAM_LONGLONG("static_records_for_status", + static_records_for_status, 0); + SPIDER_PARAM_NUMHINT("static_key_cardinality", static_key_cardinality, + 3, (int) table_share->keys, spider_set_ll_value); + error_num = ER_SPIDER_INVALID_CONNECT_INFO_NUM; + my_printf_error(error_num, ER_SPIDER_INVALID_CONNECT_INFO_STR, + MYF(0), tmp_ptr); + goto error; case 26: SPIDER_PARAM_INT_WITH_MAX( "semi_table_lock_connection", semi_table_lock_conn, 0, 1); @@ -3963,7 +4026,7 @@ SPIDER_SHARE *spider_create_share( int use_table_charset; SPIDER_SHARE *share; char *tmp_name; - longlong *tmp_cardinality; + longlong *tmp_cardinality, *tmp_static_key_cardinality; uchar *tmp_cardinality_upd; DBUG_ENTER("spider_create_share"); length = (uint) strlen(table_name); @@ -3972,6 +4035,7 @@ SPIDER_SHARE *spider_create_share( spider_bulk_malloc(spider_current_trx, 46, MYF(MY_WME | MY_ZEROFILL), &share, sizeof(*share), &tmp_name, length + 1, + &tmp_static_key_cardinality, sizeof(*tmp_static_key_cardinality) * table_share->keys, &tmp_cardinality, sizeof(*tmp_cardinality) * table_share->fields, &tmp_cardinality_upd, sizeof(*tmp_cardinality_upd) * bitmap_size, NullS)) @@ -3988,6 +4052,7 @@ SPIDER_SHARE *spider_create_share( share->table_name_length = length; share->table_name = tmp_name; strmov(share->table_name, table_name); + share->static_key_cardinality = tmp_static_key_cardinality; share->cardinality = tmp_cardinality; share->cardinality_upd = tmp_cardinality_upd; share->bitmap_size = bitmap_size; @@ -7737,12 +7802,42 @@ longlong spider_split_read_param( result_list->set_split_read = TRUE; DBUG_RETURN(9223372036854775807LL); } + Explain_query *explain = thd->lex->explain; + bool filesort = FALSE; + if (explain) + { + DBUG_PRINT("info",("spider explain=%p", explain)); + Explain_select *explain_select = NULL; + if (select_lex) + { + DBUG_PRINT("info",("spider select_lex=%p", select_lex)); + DBUG_PRINT("info",("spider select_number=%u", + select_lex->select_number)); + explain_select = + explain->get_select(select_lex->select_number); + } + if (explain_select) + { + DBUG_PRINT("info",("spider explain_select=%p", explain_select)); + if (explain_select->using_filesort) + { + DBUG_PRINT("info",("spider using filesort")); + filesort = TRUE; + } + } + } result_list->split_read_base = spider_param_split_read(thd, share->split_read); - result_list->semi_split_read = - spider_param_semi_split_read(thd, share->semi_split_read); - result_list->semi_split_read_limit = - spider_param_semi_split_read_limit(thd, share->semi_split_read_limit); + if (filesort) + { + result_list->semi_split_read = 0; + result_list->semi_split_read_limit = 9223372036854775807LL; + } else { + result_list->semi_split_read = + spider_param_semi_split_read(thd, share->semi_split_read); + result_list->semi_split_read_limit = + spider_param_semi_split_read_limit(thd, share->semi_split_read_limit); + } result_list->first_read = spider_param_first_read(thd, share->first_read); result_list->second_read = @@ -7864,6 +7959,12 @@ bool spider_check_direct_order_limit( DBUG_PRINT("info",("spider leaf_tables.elements=%u", select_lex->leaf_tables.elements)); #endif + + if (select_lex->options & SELECT_DISTINCT) + { + DBUG_PRINT("info",("spider with distinct")); + spider->result_list.direct_distinct = TRUE; + } #ifdef HANDLER_HAS_DIRECT_AGGREGATE spider->result_list.direct_aggregate = TRUE; #endif @@ -7878,6 +7979,7 @@ bool spider_check_direct_order_limit( ) { DBUG_PRINT("info",("spider first_check is FALSE")); first_check = FALSE; + spider->result_list.direct_distinct = FALSE; #ifdef HANDLER_HAS_DIRECT_AGGREGATE spider->result_list.direct_aggregate = FALSE; #endif @@ -7885,6 +7987,14 @@ bool spider_check_direct_order_limit( { DBUG_PRINT("info",("spider FALSE by condition")); first_check = FALSE; + spider->result_list.direct_distinct = FALSE; +#ifdef HANDLER_HAS_DIRECT_AGGREGATE + spider->result_list.direct_aggregate = FALSE; +#endif + } else if (spider->sql_kinds & SPIDER_SQL_KIND_HANDLER) + { + DBUG_PRINT("info",("spider sql_kinds with SPIDER_SQL_KIND_HANDLER")); + spider->result_list.direct_distinct = FALSE; #ifdef HANDLER_HAS_DIRECT_AGGREGATE spider->result_list.direct_aggregate = FALSE; } else if ( @@ -7893,10 +8003,6 @@ bool spider_check_direct_order_limit( ) { DBUG_PRINT("info",("spider this SQL is not aggregate SQL")); spider->result_list.direct_aggregate = FALSE; - } else if (spider->sql_kinds & SPIDER_SQL_KIND_HANDLER) - { - DBUG_PRINT("info",("spider sql_kinds with SPIDER_SQL_KIND_HANDLER")); - spider->result_list.direct_aggregate = FALSE; } else { ORDER *group; for (group = (ORDER *) select_lex->group_list.first; group; diff --git a/storage/tokudb/CMakeLists.in b/storage/tokudb/CMakeLists.in deleted file mode 100644 index 20c05126841..00000000000 --- a/storage/tokudb/CMakeLists.in +++ /dev/null @@ -1,29 +0,0 @@ -# Copyright (C) 2006 MySQL AB -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software -# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - -SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DTOKUDB_VERSION=\\\"TOKUDB_VERSION_REPLACE_ME\\\"") -SET(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -DSAFEMALLOC -DSAFE_MUTEX") -SET(CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} -DSAFEMALLOC -DSAFE_MUTEX") - -INCLUDE_DIRECTORIES(TOKUDB_DIR_REPLACE_ME/windows - TOKUDB_DIR_REPLACE_ME/src - TOKUDB_DIR_REPLACE_ME/include - TOKUDB_DIR_REPLACE_ME/toku_include) - -INCLUDE("${PROJECT_SOURCE_DIR}/storage/mysql_storage_engine.cmake") -SET(TOKUDB_SOURCES hatoku_hton.cc ha_tokudb.cc hatoku_cmp.cc) -MYSQL_STORAGE_ENGINE(TOKUDB) - -TARGET_LINK_LIBRARIES(ha_tokudb PowrProf optimized TOKUDB_OBJ_DIR_REPLACE_ME/opt/ipo_libtokudb optimized TOKUDB_OBJ_DIR_REPLACE_ME/opt/libtokuportability debug TOKUDB_OBJ_DIR_REPLACE_ME/debug/static_libtokudb debug TOKUDB_OBJ_DIR_REPLACE_ME/debug/libtokuportability) diff --git a/storage/tokudb/CMakeLists.txt b/storage/tokudb/CMakeLists.txt index dcd893d771d..30b31e10c91 100644 --- a/storage/tokudb/CMakeLists.txt +++ b/storage/tokudb/CMakeLists.txt @@ -14,12 +14,14 @@ IF(NOT TOKUDB_OK OR PLUGIN_TOKUDB STREQUAL "NO") RETURN() ENDIF() +IF(NOT LIBJEMALLOC) + MESSAGE(WARNING "TokuDB is enabled, but jemalloc is not. This configuration is not supported") +ENDIF() + ############################################ -SET(TOKUDB_VERSION "7.1.6") +SET(TOKUDB_VERSION "7.5.0") SET(TOKUDB_DEB_FILES "usr/lib/mysql/plugin/ha_tokudb.so\netc/mysql/conf.d/tokudb.cnf\nusr/bin/tokuftdump\nusr/share/doc/mariadb-server-10.1/README-TOKUDB\nusr/share/doc/mariadb-server-10.1/README.md" PARENT_SCOPE) SET(USE_BDB OFF CACHE BOOL "") -SET(USE_VALGRIND OFF CACHE BOOL "") -SET(BUILD_TESTING OFF CACHE BOOL "") MARK_AS_ADVANCED(BUILDNAME) MARK_AS_ADVANCED(BUILD_TESTING) MARK_AS_ADVANCED(CMAKE_TOKUDB_REVISION) @@ -32,6 +34,10 @@ MARK_AS_ADVANCED(USE_VALGRIND) MARK_AS_ADVANCED(XZ_SOURCE_DIR) ############################################ +SET(BUILD_TESTING OFF CACHE BOOL "") +SET(USE_VALGRIND OFF CACHE BOOL "") +SET(TOKU_DEBUG_PARANOID OFF CACHE BOOL "") + IF(NOT DEFINED TOKUDB_VERSION) IF(DEFINED ENV{TOKUDB_VERSION}) SET(TOKUDB_VERSION $ENV{TOKUDB_VERSION}) @@ -49,6 +55,25 @@ IF(DEFINED TOKUDB_CHECK_JEMALLOC) ADD_DEFINITIONS("-DTOKUDB_CHECK_JEMALLOC=${TOKUDB_CHECK_JEMALLOC}") ENDIF() +## adds a compiler flag if the compiler supports it +include(CheckCCompilerFlag) +include(CheckCXXCompilerFlag) + +macro(set_cflags_if_supported) + foreach(flag ${ARGN}) + check_c_compiler_flag(${flag} HAVE_C_${flag}) + if (HAVE_C_${flag}) + set(CMAKE_C_FLAGS "${flag} ${CMAKE_C_FLAGS}") + endif () + check_cxx_compiler_flag(${flag} HAVE_CXX_${flag}) + if (HAVE_CXX_${flag}) + set(CMAKE_CXX_FLAGS "${flag} ${CMAKE_CXX_FLAGS}") + endif () + endforeach(flag) +endmacro(set_cflags_if_supported) + +set_cflags_if_supported(-Wno-missing-field-initializers) + ADD_SUBDIRECTORY(ft-index) INCLUDE_DIRECTORIES(ft-index) diff --git a/storage/tokudb/README.md b/storage/tokudb/README.md index 7d4ebcefce1..ff1773fc2b0 100644 --- a/storage/tokudb/README.md +++ b/storage/tokudb/README.md @@ -1,17 +1,17 @@ TokuDB ====== -TokuDB is a high-performance, transactional storage engine for MySQL and +TokuDB is a high-performance, write optimized, transactional storage engine for MySQL and MariaDB. For more details, see our [product page][products]. -This repository contains the MySQL plugin that uses the [TokuKV][tokukv] +This repository contains the MySQL plugin that uses the [TokuFT][tokuft] core. There are also patches to the MySQL and MariaDB kernels, available in our forks of [mysql][mysql] and [mariadb][mariadb]. [products]: http://www.tokutek.com/products/tokudb-for-mysql/ -[tokukv]: http://github.com/Tokutek/ft-index +[tokuft]: http://github.com/Tokutek/ft-index [mysql]: http://github.com/Tokutek/mysql [mariadb]: http://github.com/Tokutek/mariadb @@ -24,14 +24,14 @@ working MySQL or MariaDB with Tokutek patches, and with the TokuDB storage engine, called `make.mysql.bash`. This script will download copies of the needed source code from github and build everything. -To build MySQL 5.5.36 with TokuDB 7.1.5: +To build MySQL 5.5.38 with TokuDB 7.1.7: ```sh -scripts/make.mysql.bash --mysqlbuild=mysql-5.5.36-tokudb-7.1.5-linux-x86_64 +scripts/make.mysql.bash --mysqlbuild=mysql-5.5.38-tokudb-7.1.7-linux-x86_64 ``` -To build MariaDB 5.5.36 with TokuDB 7.1.5: +To build MariaDB 5.5.38 with TokuDB 7.1.7: ```sh -scripts/make.mysql.bash --mysqlbuild=mariadb-5.5.36-tokudb-7.1.5-linux-x86_64 +scripts/make.mysql.bash --mysqlbuild=mariadb-5.5.38-tokudb-7.1.7-linux-x86_64 ``` Before you start, make sure you have a C++11-compatible compiler (GCC >= diff --git a/storage/tokudb/ft-index/CMakeLists.txt b/storage/tokudb/ft-index/CMakeLists.txt index 1228da8c35d..c6846dae679 100644 --- a/storage/tokudb/ft-index/CMakeLists.txt +++ b/storage/tokudb/ft-index/CMakeLists.txt @@ -6,6 +6,31 @@ project(TokuDB) set(CMAKE_SHARED_LIBRARY_LINK_C_FLAGS "") set(CMAKE_SHARED_LIBRARY_LINK_CXX_FLAGS "") +## Versions of gcc >= 4.9.0 require special version of 'ar' and 'ranlib' for +## link-time optimizations to work properly. +## +## From https://gcc.gnu.org/gcc-4.9/changes.html: +## +## When using a linker plugin, compiling with the -flto option now +## generates slim objects files (.o) which only contain intermediate +## language representation for LTO. Use -ffat-lto-objects to create +## files which contain additionally the object code. To generate +## static libraries suitable for LTO processing, use gcc-ar and +## gcc-ranlib; to list symbols from a slim object file use +## gcc-nm. (Requires that ar, ranlib and nm have been compiled with +## plugin support.) +if ((CMAKE_CXX_COMPILER_ID STREQUAL GNU) AND + NOT (CMAKE_CXX_COMPILER_VERSION VERSION_LESS "4.9.0")) + find_program(gcc_ar "gcc-ar") + if (gcc_ar) + set(CMAKE_AR "${gcc_ar}") + endif () + find_program(gcc_ranlib "gcc-ranlib") + if (gcc_ranlib) + set(CMAKE_RANLIB "${gcc_ranlib}") + endif () +endif() + include(TokuFeatureDetection) include(TokuSetupCompiler) include(TokuSetupCTest) @@ -51,9 +76,6 @@ add_subdirectory(locktree) add_subdirectory(src) add_subdirectory(tools) -## subdirectories that just install things -#add_subdirectory(examples) - INSTALL_DOCUMENTATION(README.md README-TOKUDB COMPONENT Server) ## build tags diff --git a/storage/tokudb/ft-index/CTestCustom.cmake.in b/storage/tokudb/ft-index/CTestCustom.cmake index 9861d8e20a2..62b592a5149 100644 --- a/storage/tokudb/ft-index/CTestCustom.cmake.in +++ b/storage/tokudb/ft-index/CTestCustom.cmake @@ -3,7 +3,6 @@ cmake_policy(SET CMP0012 NEW) ## these tests shouldn't run with valgrind list(APPEND CTEST_CUSTOM_MEMCHECK_IGNORE ft/bnc-insert-benchmark - ft/brt-serialize-benchmark ft/ft_loader-test-extractor-1 ft/ft_loader-test-extractor-2 ft/ft_loader-test-extractor-3 @@ -34,7 +33,6 @@ list(APPEND CTEST_CUSTOM_MEMCHECK_IGNORE ydb/preload-db-nested.tdb ydb/stress-gc.tdb ydb/stress-gc2.tdb - ydb/stress-test.bdb ydb/stress-test.tdb ydb/test-5138.tdb ydb/test-prepare.tdb @@ -46,7 +44,6 @@ list(APPEND CTEST_CUSTOM_MEMCHECK_IGNORE ydb/test-xa-prepare.tdb ydb/test4573-logtrim.tdb ydb/test_3645.tdb - ydb/test_groupcommit_perf.bdb ydb/test_groupcommit_perf.tdb ydb/test_large_update_broadcast_small_cachetable.tdb ydb/test_update_broadcast_stress.tdb diff --git a/storage/tokudb/ft-index/README-TOKUDB b/storage/tokudb/ft-index/README-TOKUDB index 68fb40b3671..7d70059a912 100644 --- a/storage/tokudb/ft-index/README-TOKUDB +++ b/storage/tokudb/ft-index/README-TOKUDB @@ -25,7 +25,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/README.md b/storage/tokudb/ft-index/README.md index 72b8988165a..2914ff9be2c 100644 --- a/storage/tokudb/ft-index/README.md +++ b/storage/tokudb/ft-index/README.md @@ -1,16 +1,16 @@ -TokuKV +TokuFT ====== -TokuKV is a high-performance, transactional key-value store, used in the +TokuFT is a high-performance, transactional key-value store, used in the TokuDB storage engine for MySQL and MariaDB and in TokuMX, the high-performance MongoDB distribution. -TokuKV is provided as a shared library with an interface similar to +TokuFT is provided as a shared library with an interface similar to Berkeley DB. To build the full MySQL product, see the instructions for [Tokutek/ft-engine][ft-engine]. To build TokuMX, see the instructions -for [Tokutek/mongo][mongo]. This document covers TokuKV only. +for [Tokutek/mongo][mongo]. This document covers TokuFT only. [ft-engine]: https://github.com/Tokutek/ft-engine [mongo]: https://github.com/Tokutek/mongo @@ -19,7 +19,7 @@ for [Tokutek/mongo][mongo]. This document covers TokuKV only. Building -------- -TokuKV is built using CMake >= 2.8.9. Out-of-source builds are +TokuFT is built using CMake >= 2.8.9. Out-of-source builds are recommended. You need a C++11 compiler, though only GCC >= 4.7 and Apple's Clang are tested. You also need zlib development packages (`yum install zlib-devel` or `apt-get install zlib1g-dev`). @@ -35,7 +35,6 @@ mkdir build cd build CC=gcc47 CXX=g++47 cmake \ -D CMAKE_BUILD_TYPE=Debug \ - -D USE_BDB=OFF \ -D BUILD_TESTING=OFF \ -D USE_VALGRIND=OFF \ -D CMAKE_INSTALL_PREFIX=../prefix/ \ @@ -50,14 +49,14 @@ to that if you are planning to run benchmarks or in production. ### Platforms -TokuKV is supported on 64-bit Centos, should work on other 64-bit linux -distributions, and may work on OSX 10.8 and FreeBSD. TokuKV is not +TokuFT is supported on 64-bit Centos, should work on other 64-bit linux +distributions, and may work on OSX 10.8 and FreeBSD. TokuFT is not supported on 32-bit systems. [Transparent hugepages][transparent-hugepages] is a feature in newer linux kernel versions that causes problems for the memory usage tracking -calculations in TokuKV and can lead to memory overcommit. If you have -this feature enabled, TokuKV will not start, and you should turn it off. +calculations in TokuFT and can lead to memory overcommit. If you have +this feature enabled, TokuFT will not start, and you should turn it off. If you want to run with transparent hugepages on, you can set an environment variable `TOKU_HUGE_PAGES_OK=1`, but only do this for testing, and only with a small cache size. @@ -68,31 +67,26 @@ and only with a small cache size. Examples -------- -There are some sample programs that can use either TokuKV or Berkeley DB +There are some sample programs that can use either TokuFT or Berkeley DB in the `examples/` directory. Follow the above instructions to build and -install TokuKV, and then look in the installed `examples/` directory for +install TokuFT, and then look in the installed `examples/` directory for instructions on building and running them. Testing ------- -TokuKV uses CTest for testing. The CDash testing dashboard is not +TokuFT uses CTest for testing. The CDash testing dashboard is not currently public, but you can run the tests without submitting them. There are some large data files not stored in the git repository, that will be made available soon. For now, the tests that use these files will not run. -Many of the tests are linked with both TokuKV and Berkeley DB, as a sanity -check on the tests themselves. To build these tests, you will need -Berkeley DB and its header files installed. If you do not have Berkeley -DB installed, just don't pass `USE_BDB=ON`. - In the build directory from above: ```sh -cmake -D BUILD_TESTING=ON [-D USE_BDB=ON] .. +cmake -D BUILD_TESTING=ON .. ctest -D ExperimentalStart \ -D ExperimentalConfigure \ -D ExperimentalBuild \ @@ -103,7 +97,7 @@ ctest -D ExperimentalStart \ Contributing ------------ -Please report bugs in TokuKV here on github. +Please report bugs in TokuFT to the [issue tracker][jira]. We have two publicly accessible mailing lists for TokuDB: @@ -121,11 +115,13 @@ and two for TokuMX: We are also available on IRC on freenode.net, in the #tokutek channel. +[jira]: https://tokutek.atlassian.net/browse/FT/ + License ------- -TokuKV is available under the GPL version 2, with slight modifications. +TokuFT is available under the GPL version 2, with slight modifications. See [README-TOKUDB][license]. [license]: http://github.com/Tokutek/ft-index/blob/master/README-TOKUDB diff --git a/storage/tokudb/ft-index/buildheader/CMakeLists.txt b/storage/tokudb/ft-index/buildheader/CMakeLists.txt index 5da3c98ff48..9a3a6be1cae 100644 --- a/storage/tokudb/ft-index/buildheader/CMakeLists.txt +++ b/storage/tokudb/ft-index/buildheader/CMakeLists.txt @@ -26,4 +26,4 @@ if (NOT DEFINED MYSQL_PROJECT_NAME_DOCSTRING) DESTINATION include COMPONENT tokukv_headers ) -endif () +endif ()
\ No newline at end of file diff --git a/storage/tokudb/ft-index/buildheader/make_tdb.cc b/storage/tokudb/ft-index/buildheader/make_tdb.cc index d185aa352fb..9890b8ed34b 100644 --- a/storage/tokudb/ft-index/buildheader/make_tdb.cc +++ b/storage/tokudb/ft-index/buildheader/make_tdb.cc @@ -28,7 +28,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -291,6 +291,7 @@ static void print_defines (void) { printf("#define DB_IS_HOT_INDEX 0x00100000\n"); // private tokudb printf("#define DBC_DISABLE_PREFETCHING 0x20000000\n"); // private tokudb printf("#define DB_UPDATE_CMP_DESCRIPTOR 0x40000000\n"); // private tokudb + printf("#define TOKUFT_DIRTY_SHUTDOWN %x\n", 1<<31); { //dbt flags @@ -340,8 +341,8 @@ static void print_defines (void) { dodefine_from_track(txn_flags, DB_TXN_READ_ONLY); } - /* TOKUDB specific error codes*/ - printf("/* TOKUDB specific error codes */\n"); + /* TokuFT specific error codes*/ + printf("/* TokuFT specific error codes */\n"); dodefine(TOKUDB_OUT_OF_LOCKS); dodefine(TOKUDB_SUCCEEDED_EARLY); dodefine(TOKUDB_FOUND_BUT_REJECTED); @@ -421,7 +422,7 @@ static void print_db_env_struct (void) { "int (*cleaner_set_iterations) (DB_ENV*, uint32_t) /* Change the number of attempts on each cleaner invokation. 0 means disabled. */", "int (*cleaner_get_iterations) (DB_ENV*, uint32_t*) /* Retrieve the number of attempts on each cleaner invokation. 0 means disabled. */", "int (*checkpointing_postpone) (DB_ENV*) /* Use for 'rename table' or any other operation that must be disjoint from a checkpoint */", - "int (*checkpointing_resume) (DB_ENV*) /* Alert tokudb 'postpone' is no longer necessary */", + "int (*checkpointing_resume) (DB_ENV*) /* Alert tokuft that 'postpone' is no longer necessary */", "int (*checkpointing_begin_atomic_operation) (DB_ENV*) /* Begin a set of operations (that must be atomic as far as checkpoints are concerned). i.e. inserting into every index in one table */", "int (*checkpointing_end_atomic_operation) (DB_ENV*) /* End a set of operations (that must be atomic as far as checkpoints are concerned). */", "int (*set_default_bt_compare) (DB_ENV*,int (*bt_compare) (DB *, const DBT *, const DBT *)) /* Set default (key) comparison function for all DBs in this environment. Required for RECOVERY since you cannot open the DBs manually. */", @@ -545,6 +546,7 @@ static void print_db_struct (void) { "int (*change_fanout)(DB *db, uint32_t fanout)", "int (*get_fanout)(DB *db, uint32_t *fanout)", "int (*set_fanout)(DB *db, uint32_t fanout)", + "int (*set_memcmp_magic)(DB *db, uint8_t magic)", "int (*set_indexer)(DB*, DB_INDEXER*)", "void (*get_indexer)(DB*, DB_INDEXER**)", "int (*verify_with_progress)(DB *, int (*progress_callback)(void *progress_extra, float progress), void *progress_extra, int verbose, int keep_going)", @@ -571,8 +573,9 @@ static void print_db_txn_struct (void) { STRUCT_SETUP(DB_TXN, api_internal,"void *%s"); STRUCT_SETUP(DB_TXN, commit, "int (*%s) (DB_TXN*, uint32_t)"); STRUCT_SETUP(DB_TXN, prepare, "int (*%s) (DB_TXN*, uint8_t gid[DB_GID_SIZE])"); + STRUCT_SETUP(DB_TXN, discard, "int (*%s) (DB_TXN*, uint32_t)"); STRUCT_SETUP(DB_TXN, id, "uint32_t (*%s) (DB_TXN *)"); - STRUCT_SETUP(DB_TXN, mgrp, "DB_ENV *%s /*In TokuDB, mgrp is a DB_ENV not a DB_TXNMGR*/"); + STRUCT_SETUP(DB_TXN, mgrp, "DB_ENV *%s /* In TokuFT, mgrp is a DB_ENV, not a DB_TXNMGR */"); STRUCT_SETUP(DB_TXN, parent, "DB_TXN *%s"); const char *extra[] = { "int (*txn_stat)(DB_TXN *, struct txn_stat **)", @@ -612,6 +615,7 @@ static void print_dbc_struct (void) { "int (*c_set_bounds)(DBC*, const DBT*, const DBT*, bool pre_acquire, int out_of_range_error)", "void (*c_set_check_interrupt_callback)(DBC*, bool (*)(void*), void *)", "void (*c_remove_restriction)(DBC*)", + "char _internal[512]", NULL}; sort_and_dump_fields("dbc", false, extra); } @@ -635,9 +639,9 @@ int main (int argc, char *const argv[] __attribute__((__unused__))) { printf("#define DB_VERSION_MAJOR %d\n", DB_VERSION_MAJOR); printf("#define DB_VERSION_MINOR %d\n", DB_VERSION_MINOR); - printf("/* As of r40364 (post TokuDB 5.2.7), the patch version number is 100+ the BDB header patch version number.*/\n"); + printf("/* As of r40364 (post TokuFT 5.2.7), the patch version number is 100+ the BDB header patch version number.*/\n"); printf("#define DB_VERSION_PATCH %d\n", 100+DB_VERSION_PATCH); - printf("#define DB_VERSION_STRING \"Tokutek: TokuDB %d.%d.%d\"\n", DB_VERSION_MAJOR, DB_VERSION_MINOR, 100+DB_VERSION_PATCH); + printf("#define DB_VERSION_STRING \"Tokutek: TokuFT %d.%d.%d\"\n", DB_VERSION_MAJOR, DB_VERSION_MINOR, 100+DB_VERSION_PATCH); #ifndef DB_GID_SIZE #define DB_GID_SIZE DB_XIDDATASIZE diff --git a/storage/tokudb/ft-index/cmake/merge_archives_unix.cmake.in b/storage/tokudb/ft-index/cmake/merge_archives_unix.cmake.in index e7140b8dbbc..66e23a824bd 100644 --- a/storage/tokudb/ft-index/cmake/merge_archives_unix.cmake.in +++ b/storage/tokudb/ft-index/cmake/merge_archives_unix.cmake.in @@ -43,7 +43,9 @@ FOREACH(LIB ${STATIC_LIBS}) LIST(LENGTH LIB_OBJ_LIST LENGTH_WITH_DUPS) SET(LIB_OBJ_LIST_NO_DUPS ${LIB_OBJ_LIST}) - LIST(REMOVE_DUPLICATES LIB_OBJ_LIST_NO_DUPS) + IF (LENGTH_WITH_DUPS GREATER 0) + LIST(REMOVE_DUPLICATES LIB_OBJ_LIST_NO_DUPS) + ENDIF () LIST(LENGTH LIB_OBJ_LIST_NO_DUPS LENGTH_WITHOUT_DUPS) IF(LENGTH_WITH_DUPS EQUAL LENGTH_WITHOUT_DUPS) diff --git a/storage/tokudb/ft-index/cmake_modules/FindBDB.cmake b/storage/tokudb/ft-index/cmake_modules/FindBDB.cmake deleted file mode 100644 index 495f2e87b3e..00000000000 --- a/storage/tokudb/ft-index/cmake_modules/FindBDB.cmake +++ /dev/null @@ -1,27 +0,0 @@ -# - Try to find BDB -# Once done this will define -# BDB_FOUND - System has BDB -# BDB_INCLUDE_DIRS - The BDB include directories -# BDB_LIBRARIES - The libraries needed to use BDB -# BDB_DEFINITIONS - Compiler switches required for using BDB - -find_path(BDB_INCLUDE_DIR db.h) - -find_library(BDB_LIBRARY NAMES db libdb) - -include(CheckSymbolExists) -## check if the found bdb has DB_TXN_SNAPSHOT -set(CMAKE_REQUIRED_INCLUDES ${BDB_INCLUDE_DIR}) -check_symbol_exists(DB_TXN_SNAPSHOT "db.h" HAVE_DB_TXN_SNAPSHOT) -if(HAVE_DB_TXN_SNAPSHOT) - set(BDB_INCLUDE_DIRS ${BDB_INCLUDE_DIR}) - set(BDB_LIBRARIES ${BDB_LIBRARY}) - - include(FindPackageHandleStandardArgs) - # handle the QUIETLY and REQUIRED arguments and set BDB_FOUND to TRUE - # if all listed variables are TRUE - find_package_handle_standard_args(BDB DEFAULT_MSG - BDB_LIBRARY BDB_INCLUDE_DIR) - - mark_as_advanced(BDB_INCLUDE_DIR BDB_LIBRARY) -endif() diff --git a/storage/tokudb/ft-index/cmake_modules/TokuBuildTagDatabases.cmake b/storage/tokudb/ft-index/cmake_modules/TokuBuildTagDatabases.cmake deleted file mode 100644 index 72c96389872..00000000000 --- a/storage/tokudb/ft-index/cmake_modules/TokuBuildTagDatabases.cmake +++ /dev/null @@ -1,126 +0,0 @@ -## set up lists of sources and headers for tags -file(GLOB_RECURSE all_srcs - buildheader/*.cc - db-benchmark-test/*.cc - ft/*.cc - include/*.cc - locktree/*.cc - portability/*.cc - src/*.cc - utils/*.cc - util/*.cc - db-benchmark-test/*.cc - ) -list(APPEND all_srcs - ${CMAKE_CURRENT_BINARY_DIR}/ft/log_code.cc - ${CMAKE_CURRENT_BINARY_DIR}/ft/log_print.cc - ) -file(GLOB_RECURSE all_hdrs - buildheader/*.h - db-benchmark-test/*.h - ft/*.h - include/*.h - locktree/*.h - portability/*.h - src/*.h - utils/*.h - util/*.h - db-benchmark-test/*.h - ) -list(APPEND all_hdrs - ${CMAKE_CURRENT_BINARY_DIR}/portability/toku_config.h - ${CMAKE_CURRENT_BINARY_DIR}/buildheader/db.h - ${CMAKE_CURRENT_BINARY_DIR}/ft/log_header.h - ) - -option(USE_CTAGS "Build the ctags database." ON) -if (USE_CTAGS AND - # Macs by default are not case-sensitive, so tags and TAGS clobber each other. Do etags and not ctags in that case, because Emacs is superior. :P - (NOT APPLE OR NOT USE_ETAGS)) - find_program(CTAGS "ctags") - if (NOT CTAGS MATCHES NOTFOUND) - add_custom_command( - OUTPUT "${CMAKE_CURRENT_SOURCE_DIR}/tags" - OUTPUT "${CMAKE_CURRENT_BINARY_DIR}/ctags-stamp" - COMMAND ${CTAGS} -o tags ${all_srcs} ${all_hdrs} - COMMAND touch "${CMAKE_CURRENT_BINARY_DIR}/ctags-stamp" - DEPENDS ${all_srcs} ${all_hdrs} install_tdb_h generate_config_h generate_log_code - WORKING_DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}") - add_custom_target(build_ctags ALL DEPENDS "${CMAKE_CURRENT_SOURCE_DIR}/tags" ctags-stamp) - endif () -endif () - -option(USE_ETAGS "Build the etags database." ON) -if (USE_ETAGS) - find_program(ETAGS "etags") - if (NOT ETAGS MATCHES NOTFOUND) - add_custom_command( - OUTPUT "${CMAKE_CURRENT_SOURCE_DIR}/TAGS" - OUTPUT "${CMAKE_CURRENT_BINARY_DIR}/etags-stamp" - COMMAND ${ETAGS} -o TAGS ${all_srcs} ${all_hdrs} - COMMAND touch "${CMAKE_CURRENT_BINARY_DIR}/etags-stamp" - DEPENDS ${all_srcs} ${all_hdrs} install_tdb_h generate_config_h generate_log_code - WORKING_DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}") - add_custom_target(build_etags ALL DEPENDS "${CMAKE_CURRENT_SOURCE_DIR}/TAGS" etags-stamp) - endif () -endif () - -option(USE_CSCOPE "Build the cscope database." ON) -if (USE_CSCOPE) - find_program(CSCOPE "cscope") - if (NOT CSCOPE MATCHES NOTFOUND) - file(WRITE "${CMAKE_CURRENT_BINARY_DIR}/cscope.files" "") - foreach(file ${all_srcs} ${all_hdrs}) - file(APPEND "${CMAKE_CURRENT_BINARY_DIR}/cscope.files" "${file}\n") - endforeach(file) - add_custom_command( - OUTPUT "${CMAKE_CURRENT_SOURCE_DIR}/cscope.out" - OUTPUT "${CMAKE_CURRENT_SOURCE_DIR}/cscope.in.out" - OUTPUT "${CMAKE_CURRENT_SOURCE_DIR}/cscope.po.out" - COMMAND ${CSCOPE} -b -q -R -i"${CMAKE_CURRENT_BINARY_DIR}/cscope.files" -I"${CMAKE_CURRENT_SOURCE_DIR}" -I"${CMAKE_CURRENT_SOURCE_DIR}/include" -I"${CMAKE_CURRENT_SOURCE_DIR}/portability" -I"${CMAKE_CURRENT_SOURCE_DIR}/portability" -I"${CMAKE_CURRENT_SOURCE_DIR}/ft" -I"${CMAKE_CURRENT_SOURCE_DIR}/src" -I"${CMAKE_CURRENT_SOURCE_DIR}/locktree" -I"${CMAKE_CURRENT_SOURCE_DIR}/utils" -I"${CMAKE_CURRENT_SOURCE_DIR}/db-benchmark-test" -I"${CMAKE_CURRENT_BINARY_DIR}" -I"${CMAKE_CURRENT_BINARY_DIR}/portability" -I"${CMAKE_CURRENT_BINARY_DIR}/buildheader" - DEPENDS ${all_srcs} ${all_hdrs} install_tdb_h generate_config_h generate_log_code - WORKING_DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}") - add_custom_target(build_cscope.out ALL DEPENDS - "${CMAKE_CURRENT_SOURCE_DIR}/cscope.out" - "${CMAKE_CURRENT_SOURCE_DIR}/cscope.in.out" - "${CMAKE_CURRENT_SOURCE_DIR}/cscope.po.out") - endif () -endif () - -option(USE_GTAGS "Build the gtags database." ON) -if (USE_GTAGS) - find_program(GTAGS "gtags") - if (NOT GTAGS MATCHES NOTFOUND) - file(WRITE "${CMAKE_CURRENT_BINARY_DIR}/gtags.files" "") - foreach(file ${all_srcs} ${all_hdrs}) - file(APPEND "${CMAKE_CURRENT_BINARY_DIR}/gtags.files" "${file}\n") - endforeach(file) - add_custom_command( - OUTPUT "${CMAKE_CURRENT_SOURCE_DIR}/GTAGS" - OUTPUT "${CMAKE_CURRENT_SOURCE_DIR}/GRTAGS" - OUTPUT "${CMAKE_CURRENT_SOURCE_DIR}/GPATH" - OUTPUT "${CMAKE_CURRENT_SOURCE_DIR}/GSYMS" - COMMAND ${GTAGS} -f "${CMAKE_CURRENT_BINARY_DIR}/gtags.files" - DEPENDS ${all_srcs} ${all_hdrs} install_tdb_h generate_config_h generate_log_code - WORKING_DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}") - add_custom_target(build_GTAGS ALL DEPENDS - "${CMAKE_CURRENT_SOURCE_DIR}/GTAGS" - "${CMAKE_CURRENT_SOURCE_DIR}/GRTAGS" - "${CMAKE_CURRENT_SOURCE_DIR}/GPATH" - "${CMAKE_CURRENT_SOURCE_DIR}/GSYMS") - endif () -endif () - -option(USE_MKID "Build the idutils database." ON) -if (USE_MKID) - find_program(MKID "mkid") - if (NOT MKID MATCHES NOTFOUND) - add_custom_command( - OUTPUT "${CMAKE_CURRENT_SOURCE_DIR}/ID" - COMMAND ${MKID} ${all_srcs} ${all_hdrs} - DEPENDS ${all_srcs} ${all_hdrs} install_tdb_h generate_config_h generate_log_code - WORKING_DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}") - add_custom_target(build_MKID ALL DEPENDS - "${CMAKE_CURRENT_SOURCE_DIR}/ID") - endif () -endif () diff --git a/storage/tokudb/ft-index/cmake_modules/TokuFeatureDetection.cmake b/storage/tokudb/ft-index/cmake_modules/TokuFeatureDetection.cmake index 59dff0aadd4..e7fd27525d5 100644 --- a/storage/tokudb/ft-index/cmake_modules/TokuFeatureDetection.cmake +++ b/storage/tokudb/ft-index/cmake_modules/TokuFeatureDetection.cmake @@ -2,11 +2,6 @@ find_package(Threads) find_package(ZLIB REQUIRED) -option(USE_BDB "Build some tools and tests with bdb (requires a proper BerkeleyDB include directory and library)." ON) -if(USE_BDB) - find_package(BDB REQUIRED) -endif() - option(USE_VALGRIND "Build to run safely under valgrind (often slower)." ON) if(USE_VALGRIND) find_package(Valgrind REQUIRED) diff --git a/storage/tokudb/ft-index/cmake_modules/TokuThirdParty.cmake b/storage/tokudb/ft-index/cmake_modules/TokuThirdParty.cmake index 461390ffb7c..cb474c385af 100644 --- a/storage/tokudb/ft-index/cmake_modules/TokuThirdParty.cmake +++ b/storage/tokudb/ft-index/cmake_modules/TokuThirdParty.cmake @@ -3,35 +3,34 @@ include(ExternalProject) if (CMAKE_PROJECT_NAME STREQUAL TokuDB) ## add jemalloc with an external project set(JEMALLOC_SOURCE_DIR "${TokuDB_SOURCE_DIR}/third_party/jemalloc" CACHE FILEPATH "Where to find jemalloc sources.") - if (NOT EXISTS "${JEMALLOC_SOURCE_DIR}/configure") - message(FATAL_ERROR "Can't find jemalloc sources. Please check them out to ${JEMALLOC_SOURCE_DIR} or modify JEMALLOC_SOURCE_DIR.") - endif () - set(jemalloc_configure_opts "CC=${CMAKE_C_COMPILER}" "--with-jemalloc-prefix=" "--with-private-namespace=tokudb_jemalloc_internal_" "--enable-cc-silence") - option(JEMALLOC_DEBUG "Build jemalloc with --enable-debug." OFF) - if (JEMALLOC_DEBUG) - list(APPEND jemalloc_configure_opts --enable-debug) - endif () - ExternalProject_Add(build_jemalloc - PREFIX jemalloc - SOURCE_DIR "${JEMALLOC_SOURCE_DIR}" - CONFIGURE_COMMAND - "${JEMALLOC_SOURCE_DIR}/configure" ${jemalloc_configure_opts} - "--prefix=${CMAKE_CURRENT_BINARY_DIR}/${CMAKE_CFG_INTDIR}/jemalloc" - ) + if (EXISTS "${JEMALLOC_SOURCE_DIR}/configure") + set(jemalloc_configure_opts "CC=${CMAKE_C_COMPILER}" "--with-jemalloc-prefix=" "--with-private-namespace=tokudb_jemalloc_internal_" "--enable-cc-silence") + option(JEMALLOC_DEBUG "Build jemalloc with --enable-debug." OFF) + if (JEMALLOC_DEBUG) + list(APPEND jemalloc_configure_opts --enable-debug) + endif () + ExternalProject_Add(build_jemalloc + PREFIX jemalloc + SOURCE_DIR "${JEMALLOC_SOURCE_DIR}" + CONFIGURE_COMMAND + "${JEMALLOC_SOURCE_DIR}/configure" ${jemalloc_configure_opts} + "--prefix=${CMAKE_CURRENT_BINARY_DIR}/${CMAKE_CFG_INTDIR}/jemalloc" + ) - add_library(jemalloc STATIC IMPORTED GLOBAL) - set_target_properties(jemalloc PROPERTIES IMPORTED_LOCATION - "${CMAKE_CURRENT_BINARY_DIR}/${CMAKE_CFG_INTDIR}/jemalloc/lib/libjemalloc_pic.a") - add_dependencies(jemalloc build_jemalloc) - add_library(jemalloc_nopic STATIC IMPORTED GLOBAL) - set_target_properties(jemalloc_nopic PROPERTIES IMPORTED_LOCATION - "${CMAKE_CURRENT_BINARY_DIR}/${CMAKE_CFG_INTDIR}/jemalloc/lib/libjemalloc.a") - add_dependencies(jemalloc_nopic build_jemalloc) + add_library(jemalloc STATIC IMPORTED GLOBAL) + set_target_properties(jemalloc PROPERTIES IMPORTED_LOCATION + "${CMAKE_CURRENT_BINARY_DIR}/${CMAKE_CFG_INTDIR}/jemalloc/lib/libjemalloc_pic.a") + add_dependencies(jemalloc build_jemalloc) + add_library(jemalloc_nopic STATIC IMPORTED GLOBAL) + set_target_properties(jemalloc_nopic PROPERTIES IMPORTED_LOCATION + "${CMAKE_CURRENT_BINARY_DIR}/${CMAKE_CFG_INTDIR}/jemalloc/lib/libjemalloc.a") + add_dependencies(jemalloc_nopic build_jemalloc) - # detect when we are being built as a subproject - if (NOT DEFINED MYSQL_PROJECT_NAME_DOCSTRING) - install(DIRECTORY "${CMAKE_CURRENT_BINARY_DIR}/${CMAKE_CFG_INTDIR}/jemalloc/lib" DESTINATION . - COMPONENT tokukv_libs_extra) + # detect when we are being built as a subproject + if (NOT DEFINED MYSQL_PROJECT_NAME_DOCSTRING) + install(DIRECTORY "${CMAKE_CURRENT_BINARY_DIR}/${CMAKE_CFG_INTDIR}/jemalloc/lib" DESTINATION . + COMPONENT tokukv_libs_extra) + endif () endif () endif () diff --git a/storage/tokudb/ft-index/examples/CMakeLists.txt b/storage/tokudb/ft-index/examples/CMakeLists.txt deleted file mode 100644 index 01ad01aa8d2..00000000000 --- a/storage/tokudb/ft-index/examples/CMakeLists.txt +++ /dev/null @@ -1,16 +0,0 @@ -# detect when we are being built as a subproject -if (NOT DEFINED MYSQL_PROJECT_NAME_DOCSTRING) - install( - FILES - db-insert.c - db-insert-multiple.c - db-scan.c - db-update.c - Makefile - README.examples - DESTINATION - examples - COMPONENT - tokukv_examples - ) -endif ()
\ No newline at end of file diff --git a/storage/tokudb/ft-index/examples/Makefile b/storage/tokudb/ft-index/examples/Makefile deleted file mode 100644 index 7f11d23dfd8..00000000000 --- a/storage/tokudb/ft-index/examples/Makefile +++ /dev/null @@ -1,29 +0,0 @@ -SRCS = $(wildcard *.c) -TARGETS = $(patsubst %.c,%,$(SRCS)) $(patsubst %.c,%-bdb,$(SRCS)) -CPPFLAGS = -I../include -D_GNU_SOURCE -CFLAGS = -g -std=c99 -Wall -Wextra -Werror -Wno-missing-field-initializers -ifeq ($(USE_STATIC_LIBS),1) -LIBTOKUDB = tokufractaltree_static -LIBTOKUPORTABILITY = tokuportability_static -else -LIBTOKUDB = tokufractaltree -LIBTOKUPORTABILITY = tokuportability -endif -LDFLAGS = -L../lib -l$(LIBTOKUDB) -l$(LIBTOKUPORTABILITY) -Wl,-rpath,../lib -lpthread -lz -ldl - -default local: $(TARGETS) - -%: %.c - $(CC) $(CPPFLAGS) $(CFLAGS) $^ -o $@ $(LDFLAGS) - -%-bdb: %.c - $(CC) -D_GNU_SOURCE -DBDB $(CFLAGS) $^ -o $@ -ldb - -check: $(TARGETS) - ./db-insert -x && ./db-scan --lwc --prelock --prelockflag - -checknox: $(TARGETS) - ./db-insert && ./db-scan --nox --lwc --prelock --prelockflag - -clean: - rm -rf $(TARGETS) bench.* update.env.* insertm.env.* diff --git a/storage/tokudb/ft-index/examples/README.examples b/storage/tokudb/ft-index/examples/README.examples deleted file mode 100644 index 2fc6071d686..00000000000 --- a/storage/tokudb/ft-index/examples/README.examples +++ /dev/null @@ -1,85 +0,0 @@ -The examples includes a pair of programs that can be compiled to use either the Berkeley DB library or the Tokutek Fractal Tree index library. - -Note: The file formats are different from TokuDB and Berkley DB. Thus -you cannot access a database created by Berkeley DB using the Tokutek -DB, or vice-versa. - -db-insert is a program that inserts random key-value pairs into a database. - -db-scan is a program that scans through the key-value pairs, reading every row, from a database. - -db-update is a program that upserts key-value pairs into a database. If the key already exists it increment a count in the value. - -db-insert-multiple is a program and inserts key-value pairs into multiple databases. This is is now TokuDB maintains consistent -secondary databases. - -To build it and run it (it's been tested on Fedora 10): -$ make (Makes the binaries) -Run the insertion workload under TokuDB: -$ ./db-insert -Run the insertion workload under BDB: -$ ./db-insert-bdb - -Here is what the output looks like (this on a Thinkpad X61s laptop -running Fedora 10). BDB is a little faster for sequential insertions -(the first three columns), but much much slower for random insertions -(the next 3 columns), so that TokuDB is faster on combined workload. - -$ ./db-insert -serial and random insertions of 1048576 per batch -serial 2.609965s 401759/s random 10.983798s 95466/s cumulative 13.593869s 154272/s -serial 3.053433s 343409/s random 12.008670s 87318/s cumulative 28.656115s 146367/s -serial 5.198312s 201715/s random 15.087426s 69500/s cumulative 48.954605s 128516/s -serial 6.096396s 171999/s random 13.550688s 77382/s cumulative 68.638321s 122215/s -Shutdown 4.025110s -Total time 72.677498s for 8388608 insertions = 115422/s -$ ./db-insert-bdb -serial and random insertions of 1048576 per batch -serial 2.623888s 399627/s random 8.770850s 119552/s cumulative 11.394805s 184045/s -serial 3.081946s 340232/s random 21.046589s 49822/s cumulative 35.523434s 118071/s -serial 14.160498s 74049/s random 497.117523s 2109/s cumulative 546.804504s 11506/s -serial 1.534212s 683462/s random 1128.525146s 929/s cumulative 1676.863892s 5003/s -Shutdown 195.879242s -Total time 1872.746582s for 8388608 insertions = 4479/s - -The files are smaller for TokuDB than BDB. - -$ ls -lh bench.tokudb/ -total 39M --rwxrwxr-x 1 bradley bradley 39M 2009-07-28 15:36 bench.db -$ ls -lh bench.bdb/ -total 322M --rw-r--r-- 1 bradley bradley 322M 2009-07-28 16:14 bench.db - -When scanning the table, one can run out of locks with BDB. There are ways around it (increase the lock table size). - -$ ./db-scan-bdb --nox -Lock table is out of available object entries -db-scan-bdb: db-scan.c:177: scanscan_hwc: Assertion `r==(-30988)' failed. -Aborted - -TokuDB is fine on a big table scan. - -$ ./db-scan --nox -Scan 33162304 bytes (2072644 rows) in 7.924463s at 4.184801MB/s -Scan 33162304 bytes (2072644 rows) in 3.062239s at 10.829431MB/s -0:3 1:53 2:56 -miss=3 hit=53 wait_reading=0 wait=0 -VmPeak: 244668 kB -VmHWM: 68096 kB -VmRSS: 1232 kB - -The update-bdb program upserts 1B rows into a BDB database. When the database gets larger than memory, the throughput -should tank since every update needs to read a block from the storage system. The storage system becomes the performance -bottleneck. The program uses 1 1GB cache in front of the kernel's file system buffer cache. The program should hit the wall -at about 300M rows on a machine with 16GB of memory since keys are 8 bytes and values are 8 bytes in size. - -$ ./db-update-bdb - -The update program upserts 1B rows into a TokuDB database. Throughput should be not degrade significantly since the cost -of the storage system reads is amortized over 1000's of update operations. One should expect TokuDB to be at least 50 times -faster than BDB. - -$ ./db-update - -There isn't much documentation for the Tokutek Fractal Tree index library, but most of the API is like Berkeley DB's. diff --git a/storage/tokudb/ft-index/examples/db-insert-multiple.c b/storage/tokudb/ft-index/examples/db-insert-multiple.c deleted file mode 100644 index e77dd94547f..00000000000 --- a/storage/tokudb/ft-index/examples/db-insert-multiple.c +++ /dev/null @@ -1,510 +0,0 @@ -/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */ -// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4: -#ident "$Id$" -/* -COPYING CONDITIONS NOTICE: - - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation, and provided that the - following conditions are met: - - * Redistributions of source code must retain this COPYING - CONDITIONS NOTICE, the COPYRIGHT NOTICE (below), the - DISCLAIMER (below), the UNIVERSITY PATENT NOTICE (below), the - PATENT MARKING NOTICE (below), and the PATENT RIGHTS - GRANT (below). - - * Redistributions in binary form must reproduce this COPYING - CONDITIONS NOTICE, the COPYRIGHT NOTICE (below), the - DISCLAIMER (below), the UNIVERSITY PATENT NOTICE (below), the - PATENT MARKING NOTICE (below), and the PATENT RIGHTS - GRANT (below) in the documentation and/or other materials - provided with the distribution. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA - 02110-1301, USA. - -COPYRIGHT NOTICE: - - TokuDB, Tokutek Fractal Tree Indexing Library. - Copyright (C) 2007-2013 Tokutek, Inc. - -DISCLAIMER: - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - -UNIVERSITY PATENT NOTICE: - - The technology is licensed by the Massachusetts Institute of - Technology, Rutgers State University of New Jersey, and the Research - Foundation of State University of New York at Stony Brook under - United States of America Serial No. 11/760379 and to the patents - and/or patent applications resulting from it. - -PATENT MARKING NOTICE: - - This software is covered by US Patent No. 8,185,551. - This software is covered by US Patent No. 8,489,638. - -PATENT RIGHTS GRANT: - - "THIS IMPLEMENTATION" means the copyrightable works distributed by - Tokutek as part of the Fractal Tree project. - - "PATENT CLAIMS" means the claims of patents that are owned or - licensable by Tokutek, both currently or in the future; and that in - the absence of this license would be infringed by THIS - IMPLEMENTATION or by using or running THIS IMPLEMENTATION. - - "PATENT CHALLENGE" shall mean a challenge to the validity, - patentability, enforceability and/or non-infringement of any of the - PATENT CLAIMS or otherwise opposing any of the PATENT CLAIMS. - - Tokutek hereby grants to you, for the term and geographical scope of - the PATENT CLAIMS, a non-exclusive, no-charge, royalty-free, - irrevocable (except as stated in this section) patent license to - make, have made, use, offer to sell, sell, import, transfer, and - otherwise run, modify, and propagate the contents of THIS - IMPLEMENTATION, where such license applies only to the PATENT - CLAIMS. This grant does not include claims that would be infringed - only as a consequence of further modifications of THIS - IMPLEMENTATION. If you or your agent or licensee institute or order - or agree to the institution of patent litigation against any entity - (including a cross-claim or counterclaim in a lawsuit) alleging that - THIS IMPLEMENTATION constitutes direct or contributory patent - infringement, or inducement of patent infringement, then any rights - granted to you under this License shall terminate as of the date - such litigation is filed. If you or your agent or exclusive - licensee institute or order or agree to the institution of a PATENT - CHALLENGE, then Tokutek may terminate any rights granted to you - under this License. -*/ - -#ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved." -#ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it." - -// measure the performance of insertions into multiple dictionaries using ENV->put_multiple -// the table schema is t(a bigint, b bigint, c bigint, d bigint, primary key(a), key(b), key(c,d), clustering key(d)) -// the primary key(a) is represented with key=a and value=b,c,d -// the key(b) index is represented with key=b,a and no value -// the key(c,d) index is represented with key=c,d,a and no value -// the clustering key(d) is represented with key=d,a and value=b,c -// a is auto increment -// b, c and d are random - -#include "../include/toku_config.h" -#include <stdio.h> -#include <stdbool.h> -#include <stdlib.h> -#include <assert.h> -#include <string.h> -#include <sys/stat.h> -#include <sys/time.h> -#if defined(HAVE_BYTESWAP_H) -# include <byteswap.h> -#elif defined(HAVE_LIBKERN_OSBYTEORDER_H) -# include <libkern/OSByteOrder.h> -# define bswap_64 OSSwapInt64 -#endif -#include <arpa/inet.h> -#include "db.h" - -static int force_multiple = 1; - -struct table { - int ndbs; - DB **dbs; -#if defined(TOKUDB) - DBT *mult_keys; - DBT *mult_vals; - uint32_t *mult_flags; -#endif -}; - -#if defined(TOKUDB) -static void table_init_dbt(DBT *dbt, size_t length) { - dbt->flags = DB_DBT_USERMEM; - dbt->data = malloc(length); - dbt->ulen = length; - dbt->size = 0; -} - -static void table_destroy_dbt(DBT *dbt) { - free(dbt->data); -} -#endif - -static void table_init(struct table *t, int ndbs, DB **dbs, size_t key_length __attribute__((unused)), size_t val_length __attribute__((unused))) { - t->ndbs = ndbs; - t->dbs = dbs; -#if defined(TOKUDB) - t->mult_keys = calloc(ndbs, sizeof (DBT)); - int i; - for (i = 0; i < ndbs; i++) - table_init_dbt(&t->mult_keys[i], key_length); - t->mult_vals = calloc(ndbs, sizeof (DBT)); - for (i = 0; i < ndbs; i++) - table_init_dbt(&t->mult_vals[i], val_length); - t->mult_flags = calloc(ndbs, sizeof (uint32_t)); - for (i = 0; i < ndbs; i++) - t->mult_flags[i] = 0; -#endif -} - -static void table_destroy(struct table *t) { -#if defined(TOKUDB) - int i; - for (i = 0; i < t->ndbs; i++) - table_destroy_dbt(&t->mult_keys[i]); - free(t->mult_keys); - for (i = 0; i < t->ndbs; i++) - table_destroy_dbt(&t->mult_vals[i]); - free(t->mult_vals); - free(t->mult_flags); -#else - assert(t); -#endif -} - -static int verbose = 0; - -static long random64(void) { - return ((long)random() << 32LL) + (long)random(); -} - -static long htonl64(long x) { -#if BYTE_ORDER == LITTLE_ENDIAN - return bswap_64(x); -#else -#error -#endif -} - -#if defined(TOKUDB) -static int my_generate_row_for_put(DB *dest_db, DB *src_db, DBT *dest_key, DBT *dest_val, const DBT *src_key, const DBT *src_val) { - assert(src_db); - assert(dest_key->flags == DB_DBT_USERMEM && dest_key->ulen >= 4 * 8); - assert(dest_val->flags == DB_DBT_USERMEM && dest_val->ulen >= 4 * 8); - int index_num; - assert(dest_db->descriptor->dbt.size == sizeof index_num); - memcpy(&index_num, dest_db->descriptor->dbt.data, sizeof index_num); - switch (htonl(index_num) % 4) { - case 0: - // dest_key = src_key - dest_key->size = src_key->size; - memcpy(dest_key->data, src_key->data, src_key->size); - // dest_val = src_val - dest_val->size = src_val->size; - memcpy(dest_val->data, src_val->data, src_val->size); - break; - case 1: - // dest_key = b,a - dest_key->size = 2 * 8; - memcpy((char *)dest_key->data + 0, (char *)src_val->data + 0, 8); - memcpy((char *)dest_key->data + 8, (char *)src_key->data + 0, 8); - // dest_val = null - dest_val->size = 0; - break; - case 2: - // dest_key = c,d,a - dest_key->size = 3 * 8; - memcpy((char *)dest_key->data + 0, (char *)src_val->data + 8, 8); - memcpy((char *)dest_key->data + 8, (char *)src_val->data + 16, 8); - memcpy((char *)dest_key->data + 16, (char *)src_key->data + 0, 8); - // dest_val = null - dest_val->size = 0; - break; - case 3: - // dest_key = d,a - dest_key->size = 2 * 8; - memcpy((char *)dest_key->data + 0, (char *)src_val->data + 16, 8); - memcpy((char *)dest_key->data + 8, (char *)src_key->data + 0, 8); - // dest_val = b,c - dest_val->size = 2 * 8; - memcpy((char *)dest_val->data + 0, (char *)src_val->data + 0, 8); - memcpy((char *)dest_val->data + 8, (char *)src_val->data + 8, 8); - break; - default: - assert(0); - } - return 0; -} - -#else - -static int my_secondary_key(DB *db, const DBT *src_key, const DBT *src_val, DBT *dest_key) { - assert(dest_key->flags == 0 && dest_key->data == NULL); - dest_key->flags = DB_DBT_APPMALLOC; - dest_key->data = malloc(4 * 8); assert(dest_key->data); - switch ((intptr_t)db->app_private % 4) { - case 0: - // dest_key = src_key - dest_key->size = src_key->size; - memcpy(dest_key->data, src_key->data, src_key->size); - break; - case 1: - // dest_key = b,a - dest_key->size = 2 * 8; - memcpy((char *)dest_key->data + 0, (char *)src_val->data + 0, 8); - memcpy((char *)dest_key->data + 8, (char *)src_key->data + 0, 8); - break; - case 2: - // dest_key = c,d,a - dest_key->size = 3 * 8; - memcpy((char *)dest_key->data + 0, (char *)src_val->data + 8, 8); - memcpy((char *)dest_key->data + 8, (char *)src_val->data + 16, 8); - memcpy((char *)dest_key->data + 16, (char *)src_key->data + 0, 8); - break; - case 3: - // dest_key = d,a,b,c - dest_key->size = 4 * 8; - memcpy((char *)dest_key->data + 0, (char *)src_val->data + 16, 8); - memcpy((char *)dest_key->data + 8, (char *)src_key->data + 0, 8); - memcpy((char *)dest_key->data + 16, (char *)src_val->data + 0, 8); - memcpy((char *)dest_key->data + 24, (char *)src_val->data + 8, 8); - break; - default: - assert(0); - } - return 0; -} -#endif - -static void insert_row(DB_ENV *db_env, struct table *t, DB_TXN *txn, long a, long b, long c, long d) { - int r; - - // generate the primary key - char key_buffer[8]; - a = htonl64(a); - memcpy(key_buffer, &a, sizeof a); - - // generate the primary value - char val_buffer[3*8]; - b = htonl64(b); - memcpy(val_buffer+0, &b, sizeof b); - c = htonl64(c); - memcpy(val_buffer+8, &c, sizeof c); - d = htonl64(d); - memcpy(val_buffer+16, &d, sizeof d); - - DBT key = { .data = key_buffer, .size = sizeof key_buffer }; - DBT value = { .data = val_buffer, .size = sizeof val_buffer }; -#if defined(TOKUDB) - if (!force_multiple && t->ndbs == 1) { - r = t->dbs[0]->put(t->dbs[0], txn, &key, &value, t->mult_flags[0]); assert(r == 0); - } else { - r = db_env->put_multiple(db_env, t->dbs[0], txn, &key, &value, t->ndbs, &t->dbs[0], t->mult_keys, t->mult_vals, t->mult_flags); assert(r == 0); - } -#else - assert(db_env); - r = t->dbs[0]->put(t->dbs[0], txn, &key, &value, 0); assert(r == 0); -#endif -} - -static inline float tdiff (struct timeval *a, struct timeval *b) { - return (a->tv_sec - b->tv_sec) +1e-6*(a->tv_usec - b->tv_usec); -} - -static void insert_all(DB_ENV *db_env, struct table *t, long nrows, long max_rows_per_txn, long key_range, long rows_per_report, bool do_txn) { - int r; - - struct timeval tstart; - r = gettimeofday(&tstart, NULL); assert(r == 0); - struct timeval tlast = tstart; - DB_TXN *txn = NULL; - if (do_txn) { - r = db_env->txn_begin(db_env, NULL, &txn, 0); assert(r == 0); - } - long n_rows_per_txn = 0; - long rowi; - for (rowi = 0; rowi < nrows; rowi++) { - long a = rowi; - long b = random64() % key_range; - long c = random64() % key_range; - long d = random64() % key_range; - insert_row(db_env, t, txn, a, b, c, d); - n_rows_per_txn++; - - // maybe commit - if (do_txn && n_rows_per_txn == max_rows_per_txn) { - r = txn->commit(txn, 0); assert(r == 0); - r = db_env->txn_begin(db_env, NULL, &txn, 0); assert(r == 0); - n_rows_per_txn = 0; - } - - // maybe report performance - if (((rowi + 1) % rows_per_report) == 0) { - struct timeval tnow; - r = gettimeofday(&tnow, NULL); assert(r == 0); - float last_time = tdiff(&tnow, &tlast); - float total_time = tdiff(&tnow, &tstart); - printf("%ld %.3f %.0f/s %.0f/s\n", rowi + 1, last_time, rows_per_report/last_time, rowi/total_time); fflush(stdout); - tlast = tnow; - } - } - - if (do_txn) { - r = txn->commit(txn, 0); assert(r == 0); - } - struct timeval tnow; - r = gettimeofday(&tnow, NULL); assert(r == 0); - printf("total %ld %.3f %.0f/s\n", nrows, tdiff(&tnow, &tstart), nrows/tdiff(&tnow, &tstart)); fflush(stdout); -} - -int main(int argc, char *argv[]) { -#if defined(TOKDUB) - char *db_env_dir = "insertm.env.tokudb"; -#else - char *db_env_dir = "insertm.env.bdb"; -#endif - int db_env_open_flags = DB_CREATE | DB_PRIVATE | DB_INIT_MPOOL | DB_INIT_TXN | DB_INIT_LOCK | DB_INIT_LOG; - long rows = 100000000; - long rows_per_txn = 1000; - long rows_per_report = 100000; - long key_range = 100000; - bool do_txn = true; - u_int32_t pagesize = 0; - u_int64_t cachesize = 1000000000; - int ndbs = 4; -#if defined(TOKUDB) - u_int32_t checkpoint_period = 60; -#endif - - int i; - for (i = 1; i < argc; i++) { - char *arg = argv[i]; - if (strcmp(arg, "--verbose") == 0) { - verbose++; - continue; - } - if (strcmp(arg, "--ndbs") == 0 && i+1 < argc) { - ndbs = atoi(argv[++i]); - continue; - } - if (strcmp(arg, "--rows") == 0 && i+1 < argc) { - rows = atol(argv[++i]); - continue; - } - if (strcmp(arg, "--rows_per_txn") == 0 && i+1 < argc) { - rows_per_txn = atol(argv[++i]); - continue; - } - if (strcmp(arg, "--rows_per_report") == 0 && i+1 < argc) { - rows_per_report = atol(argv[++i]); - continue; - } - if (strcmp(arg, "--key_range") == 0 && i+1 < argc) { - key_range = atol(argv[++i]); - continue; - } - if (strcmp(arg, "--txn") == 0 && i+1 < argc) { - do_txn = atoi(argv[++i]); - continue; - } - if (strcmp(arg, "--pagesize") == 0 && i+1 < argc) { - pagesize = atoi(argv[++i]); - continue; - } - if (strcmp(arg, "--cachesize") == 0 && i+1 < argc) { - cachesize = atol(argv[++i]); - continue; - } - if (strcmp(arg, "--force_multiple") == 0 && i+1 < argc) { - force_multiple = atoi(argv[++i]); - continue; - } -#if defined(TOKUDB) - if (strcmp(arg, "--checkpoint_period") == 0 && i+1 < argc) { - checkpoint_period = atoi(argv[++i]); - continue; - } -#endif - - assert(0); - } - - int r; - char rm_cmd[strlen(db_env_dir) + strlen("rm -rf ") + 1]; - snprintf(rm_cmd, sizeof(rm_cmd), "rm -rf %s", db_env_dir); - r = system(rm_cmd); assert(r == 0); - - r = mkdir(db_env_dir, S_IRWXU | S_IRGRP | S_IXGRP | S_IROTH | S_IXOTH); assert(r == 0); - - // create and open the env - DB_ENV *db_env = NULL; - r = db_env_create(&db_env, 0); assert(r == 0); - if (!do_txn) - db_env_open_flags &= ~(DB_INIT_TXN | DB_INIT_LOG); - if (cachesize) { - const u_int64_t gig = 1 << 30; - r = db_env->set_cachesize(db_env, cachesize / gig, cachesize % gig, 1); assert(r == 0); - } -#if defined(TOKUDB) - r = db_env->set_generate_row_callback_for_put(db_env, my_generate_row_for_put); assert(r == 0); -#endif - r = db_env->open(db_env, db_env_dir, db_env_open_flags, S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH); assert(r == 0); -#if defined(TOKUDB) - if (checkpoint_period) { - r = db_env->checkpointing_set_period(db_env, checkpoint_period); assert(r == 0); - u_int32_t period; - r = db_env->checkpointing_get_period(db_env, &period); assert(r == 0 && period == checkpoint_period); - } -#endif - - - // create the db - DB *dbs[ndbs]; - for (i = 0; i < ndbs; i++) { - DB *db = NULL; - r = db_create(&db, db_env, 0); assert(r == 0); - DB_TXN *create_txn = NULL; - if (do_txn) { - r = db_env->txn_begin(db_env, NULL, &create_txn, 0); assert(r == 0); - } - if (pagesize) { - r = db->set_pagesize(db, pagesize); assert(r == 0); - } - char db_filename[32]; sprintf(db_filename, "test%d", i); - r = db->open(db, create_txn, db_filename, NULL, DB_BTREE, DB_CREATE, S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH); assert(r == 0); - -#if defined(TOKUDB) - DESCRIPTOR_S new_descriptor; - int index_num = htonl(i); - new_descriptor.dbt.data = &index_num; - new_descriptor.dbt.size = sizeof i; - r = db->change_descriptor(db, create_txn, &new_descriptor.dbt, 0); assert(r == 0); -#else - db->app_private = (void *) (intptr_t) i; - if (i > 0) { - r = dbs[0]->associate(dbs[0], create_txn, db, my_secondary_key, 0); assert(r == 0); - } -#endif - if (do_txn) { - r = create_txn->commit(create_txn, 0); assert(r == 0); - } - dbs[i] = db; - } - - // insert all rows - struct table table; - table_init(&table, ndbs, dbs, 4 * 8, 4 * 8); - - insert_all(db_env, &table, rows, rows_per_txn, key_range, rows_per_report, do_txn); - - table_destroy(&table); - - // shutdown - for (i = 0; i < ndbs; i++) { - DB *db = dbs[i]; - r = db->close(db, 0); assert(r == 0); db = NULL; - } - r = db_env->close(db_env, 0); assert(r == 0); db_env = NULL; - - return 0; -} diff --git a/storage/tokudb/ft-index/examples/db-insert.c b/storage/tokudb/ft-index/examples/db-insert.c deleted file mode 100644 index 87cd9d35e21..00000000000 --- a/storage/tokudb/ft-index/examples/db-insert.c +++ /dev/null @@ -1,610 +0,0 @@ -/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */ -// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4: -#ident "$Id$" -/* -COPYING CONDITIONS NOTICE: - - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation, and provided that the - following conditions are met: - - * Redistributions of source code must retain this COPYING - CONDITIONS NOTICE, the COPYRIGHT NOTICE (below), the - DISCLAIMER (below), the UNIVERSITY PATENT NOTICE (below), the - PATENT MARKING NOTICE (below), and the PATENT RIGHTS - GRANT (below). - - * Redistributions in binary form must reproduce this COPYING - CONDITIONS NOTICE, the COPYRIGHT NOTICE (below), the - DISCLAIMER (below), the UNIVERSITY PATENT NOTICE (below), the - PATENT MARKING NOTICE (below), and the PATENT RIGHTS - GRANT (below) in the documentation and/or other materials - provided with the distribution. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA - 02110-1301, USA. - -COPYRIGHT NOTICE: - - TokuDB, Tokutek Fractal Tree Indexing Library. - Copyright (C) 2007-2013 Tokutek, Inc. - -DISCLAIMER: - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - -UNIVERSITY PATENT NOTICE: - - The technology is licensed by the Massachusetts Institute of - Technology, Rutgers State University of New Jersey, and the Research - Foundation of State University of New York at Stony Brook under - United States of America Serial No. 11/760379 and to the patents - and/or patent applications resulting from it. - -PATENT MARKING NOTICE: - - This software is covered by US Patent No. 8,185,551. - This software is covered by US Patent No. 8,489,638. - -PATENT RIGHTS GRANT: - - "THIS IMPLEMENTATION" means the copyrightable works distributed by - Tokutek as part of the Fractal Tree project. - - "PATENT CLAIMS" means the claims of patents that are owned or - licensable by Tokutek, both currently or in the future; and that in - the absence of this license would be infringed by THIS - IMPLEMENTATION or by using or running THIS IMPLEMENTATION. - - "PATENT CHALLENGE" shall mean a challenge to the validity, - patentability, enforceability and/or non-infringement of any of the - PATENT CLAIMS or otherwise opposing any of the PATENT CLAIMS. - - Tokutek hereby grants to you, for the term and geographical scope of - the PATENT CLAIMS, a non-exclusive, no-charge, royalty-free, - irrevocable (except as stated in this section) patent license to - make, have made, use, offer to sell, sell, import, transfer, and - otherwise run, modify, and propagate the contents of THIS - IMPLEMENTATION, where such license applies only to the PATENT - CLAIMS. This grant does not include claims that would be infringed - only as a consequence of further modifications of THIS - IMPLEMENTATION. If you or your agent or licensee institute or order - or agree to the institution of patent litigation against any entity - (including a cross-claim or counterclaim in a lawsuit) alleging that - THIS IMPLEMENTATION constitutes direct or contributory patent - infringement, or inducement of patent infringement, then any rights - granted to you under this License shall terminate as of the date - such litigation is filed. If you or your agent or exclusive - licensee institute or order or agree to the institution of a PATENT - CHALLENGE, then Tokutek may terminate any rights granted to you - under this License. -*/ - -#ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved." -#ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it." - -#include <stdio.h> -#include <stdlib.h> -#include <unistd.h> -// Define BDB if you want to compile this to use Berkeley DB -#include <stdint.h> -#include <inttypes.h> -#ifdef BDB -#include <sys/types.h> -#include <db.h> -#define DIRSUF bdb -#else -#include <tokudb.h> -#define DIRSUF tokudb -#endif - -#include <assert.h> -#include <errno.h> -#include <string.h> -#include <sys/stat.h> -#include <sys/time.h> - -static inline float toku_tdiff (struct timeval *a, struct timeval *b) { - return (a->tv_sec - b->tv_sec) +1e-6*(a->tv_usec - b->tv_usec); -} - -#if !defined(DB_PRELOCKED_WRITE) -#define NO_DB_PRELOCKED -#define DB_PRELOCKED_WRITE 0 -#endif - -int verbose=1; - -enum { SERIAL_SPACING = 1<<6 }; -enum { DEFAULT_ITEMS_TO_INSERT_PER_ITERATION = 1<<20 }; -enum { DEFAULT_ITEMS_PER_TRANSACTION = 1<<14 }; - -static void insert (long long v); -#define CKERR(r) ({ int __r = r; if (__r!=0) fprintf(stderr, "%s:%d error %d %s\n", __FILE__, __LINE__, __r, db_strerror(r)); assert(__r==0); }) -#define CKERR2(r,rexpect) if (r!=rexpect) fprintf(stderr, "%s:%d error %d %s\n", __FILE__, __LINE__, r, db_strerror(r)); assert(r==rexpect); - -/* default test parameters */ -int keysize = sizeof (long long); -int valsize = sizeof (long long); -int pagesize = 0; -long long cachesize = 1000000000; // 1GB -int dupflags = 0; -int noserial = 0; // Don't do the serial stuff -int norandom = 0; // Don't do the random stuff -int prelock = 0; -int prelockflag = 0; -int items_per_transaction = DEFAULT_ITEMS_PER_TRANSACTION; -int items_per_iteration = DEFAULT_ITEMS_TO_INSERT_PER_ITERATION; -int finish_child_first = 0; // Commit or abort child first (before doing so to the parent). No effect if child does not exist. -int singlex_child = 0; // Do a single transaction, but do all work with a child -int singlex = 0; // Do a single transaction -int singlex_create = 0; // Create the db using the single transaction (only valid if singlex) -int insert1first = 0; // insert 1 before doing the rest -int do_transactions = 0; -int if_transactions_do_logging = DB_INIT_LOG; // set this to zero if we want no logging when transactions are used -int do_abort = 0; -int n_insertions_since_txn_began=0; -int env_open_flags = DB_CREATE|DB_PRIVATE|DB_INIT_MPOOL; -u_int32_t put_flags = 0; -double compressibility = -1; // -1 means make it very compressible. 1 means use random bits everywhere. 2 means half the bits are random. -int do_append = 0; -u_int32_t checkpoint_period = 60; - -static void do_prelock(DB* db, DB_TXN* txn) { - if (prelock) { -#if !defined(NO_DB_PRELOCKED) - int r = db->pre_acquire_table_lock(db, txn); - assert(r==0); -#else - (void) db; (void) txn; -#endif - } -} - -#define STRINGIFY2(s) #s -#define STRINGIFY(s) STRINGIFY2(s) -const char *dbdir = "./bench." STRINGIFY(DIRSUF); -char *dbfilename = "bench.db"; -char *dbname; - -DB_ENV *dbenv; -DB *db; -DB_TXN *parenttid=0; -DB_TXN *tid=0; - - -static void benchmark_setup (void) { - int r; - - if (!do_append) { - char unlink_cmd[strlen(dbdir) + strlen("rm -rf ") + 1]; - snprintf(unlink_cmd, sizeof(unlink_cmd), "rm -rf %s", dbdir); - //printf("unlink_cmd=%s\n", unlink_cmd); - system(unlink_cmd); - - if (strcmp(dbdir, ".") != 0) { - r = mkdir(dbdir,S_IRWXU|S_IRGRP|S_IXGRP|S_IROTH|S_IXOTH); - assert(r == 0); - } - } - - r = db_env_create(&dbenv, 0); - assert(r == 0); - -#if !defined(TOKUDB) -#if DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR <= 4 - if (dbenv->set_lk_max) { - r = dbenv->set_lk_max(dbenv, items_per_transaction*2); - assert(r==0); - } -#elif (DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR <= 7) || DB_VERSION_MAJOR >= 5 - if (dbenv->set_lk_max_locks) { - r = dbenv->set_lk_max_locks(dbenv, items_per_transaction*2); - assert(r==0); - } - if (dbenv->set_lk_max_lockers) { - r = dbenv->set_lk_max_lockers(dbenv, items_per_transaction*2); - assert(r==0); - } - if (dbenv->set_lk_max_objects) { - r = dbenv->set_lk_max_objects(dbenv, items_per_transaction*2); - assert(r==0); - } -#else -#error -#endif -#endif - - if (dbenv->set_cachesize) { - r = dbenv->set_cachesize(dbenv, cachesize / (1024*1024*1024), cachesize % (1024*1024*1024), 1); - if (r != 0) - printf("WARNING: set_cachesize %d\n", r); - } - { - r = dbenv->open(dbenv, dbdir, env_open_flags, S_IRUSR|S_IWUSR|S_IRGRP|S_IROTH); - assert(r == 0); - } - -#if defined(TOKUDB) - if (checkpoint_period) { - printf("set checkpoint_period %u\n", checkpoint_period); - r = dbenv->checkpointing_set_period(dbenv, checkpoint_period); assert(r == 0); - u_int32_t period; - r = dbenv->checkpointing_get_period(dbenv, &period); assert(r == 0 && period == checkpoint_period); - } -#endif - - r = db_create(&db, dbenv, 0); - assert(r == 0); - - if (do_transactions) { - r=dbenv->txn_begin(dbenv, 0, &tid, 0); CKERR(r); - } - if (pagesize && db->set_pagesize) { - r = db->set_pagesize(db, pagesize); - assert(r == 0); - } - if (dupflags) { - r = db->set_flags(db, dupflags); - assert(r == 0); - } - r = db->open(db, tid, dbfilename, NULL, DB_BTREE, DB_CREATE, S_IRUSR|S_IWUSR|S_IRGRP|S_IROTH); - if (r!=0) fprintf(stderr, "errno=%d, %s\n", errno, strerror(errno)); - assert(r == 0); - if (insert1first) { - if (do_transactions) { - r=tid->commit(tid, 0); - assert(r==0); - tid = NULL; - r=dbenv->txn_begin(dbenv, 0, &tid, 0); CKERR(r); - } - insert(-1); - if (singlex) { - r=tid->commit(tid, 0); - assert(r==0); - tid = NULL; - r=dbenv->txn_begin(dbenv, 0, &tid, 0); CKERR(r); - } - } - else if (singlex && !singlex_create) { - r=tid->commit(tid, 0); - assert(r==0); - tid = NULL; - r=dbenv->txn_begin(dbenv, 0, &tid, 0); CKERR(r); - } - if (do_transactions) { - if (singlex) - do_prelock(db, tid); - else { - r=tid->commit(tid, 0); - assert(r==0); - tid = NULL; - } - } - if (singlex_child) { - parenttid = tid; - tid = NULL; - r=dbenv->txn_begin(dbenv, parenttid, &tid, 0); CKERR(r); - } - -} - -static void benchmark_shutdown (void) { - int r; - - if (do_transactions && singlex && !insert1first && (singlex_create || prelock)) { -#if defined(TOKUDB) - //There should be a single 'truncate' in the rollback instead of many 'insert' entries. - struct txn_stat *s; - r = tid->txn_stat(tid, &s); - assert(r==0); - //TODO: #1125 Always do the test after performance testing is done. - if (singlex_child) fprintf(stderr, "SKIPPED 'small rollback' test for child txn\n"); - else - assert(s->rollback_raw_count < 100); // gross test, not worth investigating details - free(s); - //system("ls -l bench.tokudb"); -#endif - } - if (do_transactions && singlex) { - if (!singlex_child || finish_child_first) { - assert(tid); - r = (do_abort ? tid->abort(tid) : tid->commit(tid, 0)); assert(r==0); - tid = NULL; - } - if (singlex_child) { - assert(parenttid); - r = (do_abort ? parenttid->abort(parenttid) : parenttid->commit(parenttid, 0)); assert(r==0); - parenttid = NULL; - } - else - assert(!parenttid); - } - assert(!tid); - assert(!parenttid); - - r = db->close(db, 0); - assert(r == 0); - r = dbenv->close(dbenv, 0); - assert(r == 0); -} - -static void long_long_to_array (unsigned char *a, int array_size, unsigned long long l) { - int i; - for (i=0; i<8 && i<array_size; i++) - a[i] = (l>>(56-8*i))&0xff; -} - -static DBT *fill_dbt(DBT *dbt, const void *data, int size) { - memset(dbt, 0, sizeof *dbt); - dbt->size = size; - dbt->data = (void *) data; - return dbt; -} - -// Fill array with 0's if compressibilty==-1, otherwise fill array with data that is likely to compress by a factor of compressibility. -static void fill_array (unsigned char *data, int size) { - memset(data, 0, size); - if (compressibility>0) { - int i; - for (i=0; i<size/compressibility; i++) { - data[i] = (unsigned char) random(); - } - } -} - -static void insert (long long v) { - unsigned char kc[keysize], vc[valsize]; - DBT kt, vt; - fill_array(kc, sizeof kc); - long_long_to_array(kc, keysize, v); // Fill in the array first, then write the long long in. - fill_array(vc, sizeof vc); - long_long_to_array(vc, valsize, v); - int r = db->put(db, tid, fill_dbt(&kt, kc, keysize), fill_dbt(&vt, vc, valsize), put_flags); - CKERR(r); - if (do_transactions) { - if (n_insertions_since_txn_began>=items_per_transaction && !singlex) { - n_insertions_since_txn_began=0; - r = tid->commit(tid, 0); assert(r==0); - tid = NULL; - r=dbenv->txn_begin(dbenv, 0, &tid, 0); assert(r==0); - do_prelock(db, tid); - n_insertions_since_txn_began=0; - } - n_insertions_since_txn_began++; - } -} - -static void serial_insert_from (long long from) { - long long i; - if (do_transactions && !singlex) { - int r = dbenv->txn_begin(dbenv, 0, &tid, 0); assert(r==0); - do_prelock(db, tid); - { - DBT k,v; - r=db->put(db, tid, fill_dbt(&k, "a", 1), fill_dbt(&v, "b", 1), put_flags); - CKERR(r); - } - } - for (i=0; i<items_per_iteration; i++) { - insert((from+i)*SERIAL_SPACING); - } - if (do_transactions && !singlex) { - int r= tid->commit(tid, 0); assert(r==0); - tid=NULL; - } -} - -static long long llrandom (void) { - return (((long long)(random()))<<32) + random(); -} - -static void random_insert_below (long long below) { - long long i; - if (do_transactions && !singlex) { - int r = dbenv->txn_begin(dbenv, 0, &tid, 0); assert(r==0); - do_prelock(db, tid); - } - for (i=0; i<items_per_iteration; i++) { - insert(llrandom()%below); - } - if (do_transactions && !singlex) { - int r= tid->commit(tid, 0); assert(r==0); - tid=NULL; - } -} - -static void biginsert (long long n_elements, struct timeval *starttime) { - long long i; - struct timeval t1,t2; - int iteration; - for (i=0, iteration=0; i<n_elements; i+=items_per_iteration, iteration++) { - if (verbose) { - printf("%d ", iteration); - fflush(stdout); - } - if (!noserial) { - gettimeofday(&t1,0); - serial_insert_from(i); - gettimeofday(&t2,0); - if (verbose) { - printf("serial %9.6fs %8.0f/s ", toku_tdiff(&t2, &t1), items_per_iteration/toku_tdiff(&t2, &t1)); - fflush(stdout); - } - } - if (!norandom) { - gettimeofday(&t1,0); - random_insert_below((i+items_per_iteration)*SERIAL_SPACING); - gettimeofday(&t2,0); - if (verbose) { - printf("random %9.6fs %8.0f/s ", toku_tdiff(&t2, &t1), items_per_iteration/toku_tdiff(&t2, &t1)); - fflush(stdout); - } - } - if (verbose) { - printf("cumulative %9.6fs %8.0f/s\n", toku_tdiff(&t2, starttime), (((float)items_per_iteration*(!noserial+!norandom))/toku_tdiff(&t2, starttime))*(iteration+1)); - fflush(stdout); - } - } -} - -const long long default_n_items = 1LL<<22; - -static int print_usage (const char *argv0) { - fprintf(stderr, "Usage:\n"); - fprintf(stderr, " %s [-x] [--keysize KEYSIZE] [--valsize VALSIZE] [--noserial] [--norandom] [ n_iterations ]\n", argv0); - fprintf(stderr, " where\n"); - fprintf(stderr, " -x do transactions (XCOUNT transactions per iteration) (default: no transactions at all)\n"); - fprintf(stderr, " --keysize KEYSIZE sets the key size (default 8)\n"); - fprintf(stderr, " --valsize VALSIZE sets the value size (default 8)\n"); - fprintf(stderr, " --noserial causes the serial insertions to be skipped\n"); - fprintf(stderr, " --norandom causes the random insertions to be skipped\n"); - fprintf(stderr, " --cachesize CACHESIZE set the database cache size\n"); - fprintf(stderr, " --pagesize PAGESIZE sets the database page size\n"); - fprintf(stderr, " --compressibility C creates data that should compress by about a factor C. Default C is large. C is an float.\n"); - fprintf(stderr, " --xcount N how many insertions per transaction (default=%d)\n", DEFAULT_ITEMS_PER_TRANSACTION); - fprintf(stderr, " --singlex (implies -x) Run the whole job as a single transaction. (Default don't run as a single transaction.)\n"); - fprintf(stderr, " --singlex-child (implies -x) Run the whole job as a single transaction, do all work a child of that transaction.\n"); - fprintf(stderr, " --finish-child-first Commit/abort child before doing so to parent (no effect if no child).\n"); - fprintf(stderr, " --singlex-create (implies --singlex) Create the file using the single transaction (Default is to use a different transaction to create.)\n"); - fprintf(stderr, " --prelock Prelock the database.\n"); - fprintf(stderr, " --prelockflag Prelock the database and send the DB_PRELOCKED_WRITE flag.\n"); - fprintf(stderr, " --abort Abort the singlex after the transaction is over. (Requires --singlex.)\n"); - fprintf(stderr, " --nolog If transactions are used, then don't write the recovery log\n"); - fprintf(stderr, " --periter N how many insertions per iteration (default=%d)\n", DEFAULT_ITEMS_TO_INSERT_PER_ITERATION); - fprintf(stderr, " --env DIR\n"); - fprintf(stderr, " --append append to an existing file\n"); - fprintf(stderr, " --checkpoint-period %" PRIu32 " checkpoint period\n", checkpoint_period); - fprintf(stderr, " n_iterations how many iterations (default %lld)\n", default_n_items/DEFAULT_ITEMS_TO_INSERT_PER_ITERATION); - - return 1; -} - -#define UU(x) x __attribute__((__unused__)) - -int main (int argc, const char *argv[]) { - struct timeval t1,t2,t3; - long long total_n_items = default_n_items; - char *endptr; - int i; - for (i=1; i<argc; i++) { - const char *arg = argv[i]; - if (arg[0] != '-') - break; - if (strcmp(arg, "-q") == 0) { - verbose--; if (verbose<0) verbose=0; - } else if (strcmp(arg, "-x") == 0) { - do_transactions = 1; - } else if (strcmp(arg, "--noserial") == 0) { - noserial=1; - } else if (strcmp(arg, "--norandom") == 0) { - norandom=1; - } else if (strcmp(arg, "--compressibility") == 0) { - compressibility = atof(argv[++i]); - } else if (strcmp(arg, "--nolog") == 0) { - if_transactions_do_logging = 0; - } else if (strcmp(arg, "--singlex-create") == 0) { - do_transactions = 1; - singlex = 1; - singlex_create = 1; - } else if (strcmp(arg, "--finish-child-first") == 0) { - finish_child_first = 1; - } else if (strcmp(arg, "--singlex-child") == 0) { - do_transactions = 1; - singlex = 1; - singlex_child = 1; - } else if (strcmp(arg, "--singlex") == 0) { - do_transactions = 1; - singlex = 1; - } else if (strcmp(arg, "--insert1first") == 0) { - insert1first = 1; - } else if (strcmp(arg, "--xcount") == 0) { - if (i+1 >= argc) return print_usage(argv[0]); - items_per_transaction = strtoll(argv[++i], &endptr, 10); assert(*endptr == 0); - } else if (strcmp(arg, "--abort") == 0) { - do_abort = 1; - } else if (strcmp(arg, "--periter") == 0) { - if (i+1 >= argc) return print_usage(argv[0]); - items_per_iteration = strtoll(argv[++i], &endptr, 10); assert(*endptr == 0); - } else if (strcmp(arg, "--cachesize") == 0) { - if (i+1 >= argc) return print_usage(argv[0]); - cachesize = strtoll(argv[++i], &endptr, 10); assert(*endptr == 0); - } else if (strcmp(arg, "--keysize") == 0) { - if (i+1 >= argc) return print_usage(argv[0]); - keysize = atoi(argv[++i]); - } else if (strcmp(arg, "--valsize") == 0) { - if (i+1 >= argc) return print_usage(argv[0]); - valsize = atoi(argv[++i]); - } else if (strcmp(arg, "--pagesize") == 0) { - if (i+1 >= argc) return print_usage(argv[0]); - pagesize = atoi(argv[++i]); - } else if (strcmp(arg, "--env") == 0) { - if (i+1 >= argc) return print_usage(argv[0]); - dbdir = argv[++i]; - } else if (strcmp(arg, "--prelock") == 0) { - prelock=1; - } else if (strcmp(arg, "--prelockflag") == 0) { - prelock=1; - prelockflag=1; - } else if (strcmp(arg, "--srandom") == 0) { - if (i+1 >= argc) return print_usage(argv[0]); - srandom(atoi(argv[++i])); - } else if (strcmp(arg, "--append") == 0) { - do_append = 1; - } else if (strcmp(arg, "--checkpoint-period") == 0) { - if (i+1 >= argc) return print_usage(argv[9]); - checkpoint_period = (u_int32_t) atoi(argv[++i]); - } else if (strcmp(arg, "--unique_checks") == 0) { - if (i+1 >= argc) return print_usage(argv[0]); - int unique_checks = atoi(argv[++i]); - if (unique_checks) - put_flags = DB_NOOVERWRITE; - else - put_flags = 0; - } else { - return print_usage(argv[0]); - } - } - if (do_transactions) { - env_open_flags |= DB_INIT_TXN | if_transactions_do_logging | DB_INIT_LOCK; - } - if (do_transactions && prelockflag) { - put_flags |= DB_PRELOCKED_WRITE; - } - if (i<argc) { - /* if it looks like a number */ - char *end; - errno=0; - long n_iterations = strtol(argv[i], &end, 10); - if (errno!=0 || *end!=0 || end==argv[i]) { - print_usage(argv[0]); - return 1; - } - total_n_items = items_per_iteration * (long long)n_iterations; - } - if (verbose) { - if (!noserial) printf("serial "); - if (!noserial && !norandom) printf("and "); - if (!norandom) printf("random "); - printf("insertions of %d per batch%s\n", items_per_iteration, do_transactions ? " (with transactions)" : ""); - } - benchmark_setup(); - gettimeofday(&t1,0); - biginsert(total_n_items, &t1); - gettimeofday(&t2,0); - benchmark_shutdown(); - gettimeofday(&t3,0); - if (verbose) { - printf("Shutdown %9.6fs\n", toku_tdiff(&t3, &t2)); - printf("Total time %9.6fs for %lld insertions = %8.0f/s\n", toku_tdiff(&t3, &t1), - (!noserial+!norandom)*total_n_items, (!noserial+!norandom)*total_n_items/toku_tdiff(&t3, &t1)); - } - - return 0; -} diff --git a/storage/tokudb/ft-index/examples/db-scan.c b/storage/tokudb/ft-index/examples/db-scan.c deleted file mode 100644 index f01e0dc55d0..00000000000 --- a/storage/tokudb/ft-index/examples/db-scan.c +++ /dev/null @@ -1,461 +0,0 @@ -/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */ -// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4: -#ident "$Id$" -/* -COPYING CONDITIONS NOTICE: - - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation, and provided that the - following conditions are met: - - * Redistributions of source code must retain this COPYING - CONDITIONS NOTICE, the COPYRIGHT NOTICE (below), the - DISCLAIMER (below), the UNIVERSITY PATENT NOTICE (below), the - PATENT MARKING NOTICE (below), and the PATENT RIGHTS - GRANT (below). - - * Redistributions in binary form must reproduce this COPYING - CONDITIONS NOTICE, the COPYRIGHT NOTICE (below), the - DISCLAIMER (below), the UNIVERSITY PATENT NOTICE (below), the - PATENT MARKING NOTICE (below), and the PATENT RIGHTS - GRANT (below) in the documentation and/or other materials - provided with the distribution. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA - 02110-1301, USA. - -COPYRIGHT NOTICE: - - TokuDB, Tokutek Fractal Tree Indexing Library. - Copyright (C) 2007-2013 Tokutek, Inc. - -DISCLAIMER: - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - -UNIVERSITY PATENT NOTICE: - - The technology is licensed by the Massachusetts Institute of - Technology, Rutgers State University of New Jersey, and the Research - Foundation of State University of New York at Stony Brook under - United States of America Serial No. 11/760379 and to the patents - and/or patent applications resulting from it. - -PATENT MARKING NOTICE: - - This software is covered by US Patent No. 8,185,551. - This software is covered by US Patent No. 8,489,638. - -PATENT RIGHTS GRANT: - - "THIS IMPLEMENTATION" means the copyrightable works distributed by - Tokutek as part of the Fractal Tree project. - - "PATENT CLAIMS" means the claims of patents that are owned or - licensable by Tokutek, both currently or in the future; and that in - the absence of this license would be infringed by THIS - IMPLEMENTATION or by using or running THIS IMPLEMENTATION. - - "PATENT CHALLENGE" shall mean a challenge to the validity, - patentability, enforceability and/or non-infringement of any of the - PATENT CLAIMS or otherwise opposing any of the PATENT CLAIMS. - - Tokutek hereby grants to you, for the term and geographical scope of - the PATENT CLAIMS, a non-exclusive, no-charge, royalty-free, - irrevocable (except as stated in this section) patent license to - make, have made, use, offer to sell, sell, import, transfer, and - otherwise run, modify, and propagate the contents of THIS - IMPLEMENTATION, where such license applies only to the PATENT - CLAIMS. This grant does not include claims that would be infringed - only as a consequence of further modifications of THIS - IMPLEMENTATION. If you or your agent or licensee institute or order - or agree to the institution of patent litigation against any entity - (including a cross-claim or counterclaim in a lawsuit) alleging that - THIS IMPLEMENTATION constitutes direct or contributory patent - infringement, or inducement of patent infringement, then any rights - granted to you under this License shall terminate as of the date - such litigation is filed. If you or your agent or exclusive - licensee institute or order or agree to the institution of a PATENT - CHALLENGE, then Tokutek may terminate any rights granted to you - under this License. -*/ - -#ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved." -#ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it." - -/* Scan the bench.tokudb/bench.db over and over. */ -#define DONT_DEPRECATE_MALLOC - -#include <stdio.h> -#include <stdlib.h> -#include <unistd.h> -#include <stdint.h> -#include <inttypes.h> -#ifdef BDB -#include <db.h> -#define DIRSUF bdb -#else -#include <tokudb.h> -#define DIRSUF tokudb -#endif -#include <assert.h> -#include <errno.h> -#include <string.h> -#include <stdio.h> -#include <sys/stat.h> -#include <fcntl.h> -#include <sys/time.h> - -static const char *pname; -static enum run_mode { RUN_HWC, RUN_LWC, RUN_VERIFY, RUN_RANGE} run_mode = RUN_HWC; -static int do_txns=1, prelock=0, prelockflag=0; -static u_int32_t lock_flag = 0; -static long limitcount=-1; -static u_int32_t cachesize = 127*1024*1024; -static u_int64_t start_range = 0, end_range = 0; -static int n_experiments = 2; -static int bulk_fetch = 1; - -static int print_usage (const char *argv0) { - fprintf(stderr, "Usage:\n%s [--verify-lwc | --lwc | --nohwc] [--prelock] [--prelockflag] [--prelockwriteflag] [--env DIR]\n", argv0); - fprintf(stderr, " --verify-lwc means to run the light weight cursor and the heavyweight cursor to verify that they get the same answer.\n"); - fprintf(stderr, " --lwc run light weight cursors instead of heavy weight cursors\n"); - fprintf(stderr, " --prelock acquire a read lock on the entire table before running\n"); - fprintf(stderr, " --prelockflag pass DB_PRELOCKED to the the cursor get operation whenever the locks have been acquired\n"); - fprintf(stderr, " --prelockwriteflag pass DB_PRELOCKED_WRITE to the cursor get operation\n"); - fprintf(stderr, " --nox no transactions (no locking)\n"); - fprintf(stderr, " --count COUNT read the first COUNT rows and then stop.\n"); - fprintf(stderr, " --cachesize N set the env cachesize to N bytes\n"); - fprintf(stderr, " --srandom N srandom(N)\n"); - fprintf(stderr, " --env DIR put db files in DIR instead of default\n"); - fprintf(stderr, " --bulk_fetch 0|1 do bulk fetch on lwc operations (default: 1)\n"); - return 1; -} - -static DB_ENV *env; -static DB *db; -static DB_TXN *tid=0; - -#define STRINGIFY2(s) #s -#define STRINGIFY(s) STRINGIFY2(s) -static const char *dbdir = "./bench." STRINGIFY(DIRSUF); /* DIRSUF is passed in as a -D argument to the compiler. */ -static int env_open_flags_yesx = DB_CREATE|DB_PRIVATE|DB_INIT_MPOOL|DB_INIT_TXN|DB_INIT_LOG|DB_INIT_LOCK; -static int env_open_flags_nox = DB_CREATE|DB_PRIVATE|DB_INIT_MPOOL; -static char *dbfilename = "bench.db"; - - -static void parse_args (int argc, const char *argv[]) { - pname=argv[0]; - argc--; argv++; - int specified_run_mode=0; - while (argc>0) { - if (strcmp(*argv,"--verify-lwc")==0) { - if (specified_run_mode && run_mode!=RUN_VERIFY) { two_modes: fprintf(stderr, "You specified two run modes\n"); exit(1); } - run_mode = RUN_VERIFY; - } else if (strcmp(*argv, "--lwc")==0) { - if (specified_run_mode && run_mode!=RUN_LWC) goto two_modes; - run_mode = RUN_LWC; - } else if (strcmp(*argv, "--hwc")==0) { - if (specified_run_mode && run_mode!=RUN_VERIFY) goto two_modes; - run_mode = RUN_HWC; - } else if (strcmp(*argv, "--prelock")==0) prelock=1; -#ifdef TOKUDB - else if (strcmp(*argv, "--prelockflag")==0) { prelockflag=1; lock_flag = DB_PRELOCKED; } - else if (strcmp(*argv, "--prelockwriteflag")==0) { prelockflag=1; lock_flag = DB_PRELOCKED_WRITE; } -#endif - else if (strcmp(*argv, "--nox")==0) { do_txns=0; } - else if (strcmp(*argv, "--count")==0) { - char *end; - argc--; argv++; - errno=0; limitcount=strtol(*argv, &end, 10); assert(errno==0); - printf("Limiting count to %ld\n", limitcount); - } else if (strcmp(*argv, "--cachesize")==0 && argc>0) { - char *end; - argc--; argv++; - cachesize=(u_int32_t)strtol(*argv, &end, 10); - } else if (strcmp(*argv, "--env") == 0) { - argc--; argv++; - if (argc==0) exit(print_usage(pname)); - dbdir = *argv; - } else if (strcmp(*argv, "--range") == 0 && argc > 2) { - run_mode = RUN_RANGE; - argc--; argv++; - start_range = strtoll(*argv, NULL, 10); - argc--; argv++; - end_range = strtoll(*argv, NULL, 10); - } else if (strcmp(*argv, "--experiments") == 0 && argc > 1) { - argc--; argv++; - n_experiments = strtol(*argv, NULL, 10); - } else if (strcmp(*argv, "--srandom") == 0 && argc > 1) { - argc--; argv++; - srandom(atoi(*argv)); - } else if (strcmp(*argv, "--bulk_fetch") == 0 && argc > 1) { - argc--; argv++; - bulk_fetch = atoi(*argv); - } else { - exit(print_usage(pname)); - } - argc--; argv++; - } - //Prelocking is meaningless without transactions - if (do_txns==0) { - prelockflag=0; - lock_flag=0; - prelock=0; - } -} - -static void scanscan_setup (void) { - int r; - r = db_env_create(&env, 0); assert(r==0); - r = env->set_cachesize(env, 0, cachesize, 1); assert(r==0); - r = env->open(env, dbdir, do_txns? env_open_flags_yesx : env_open_flags_nox, S_IRUSR|S_IWUSR|S_IRGRP|S_IROTH); assert(r==0); - r = db_create(&db, env, 0); assert(r==0); - if (do_txns) { - r = env->txn_begin(env, 0, &tid, 0); assert(r==0); - } - r = db->open(db, tid, dbfilename, NULL, DB_BTREE, 0, S_IRUSR|S_IWUSR|S_IRGRP|S_IROTH); assert(r==0); -#ifdef TOKUDB - if (prelock) { - r = db->pre_acquire_table_lock(db, tid); - assert(r==0); - } -#endif -} - -static void scanscan_shutdown (void) { - int r; - r = db->close(db, 0); assert(r==0); - if (do_txns) { - r = tid->commit(tid, 0); assert(r==0); - } - r = env->close(env, 0); assert(r==0); -} - -static double gettime (void) { - struct timeval tv; - int r = gettimeofday(&tv, 0); - assert(r==0); - return tv.tv_sec + 1e-6*tv.tv_usec; -} - -static void scanscan_hwc (void) { - int r; - int counter=0; - for (counter=0; counter<n_experiments; counter++) { - long long totalbytes=0; - int rowcounter=0; - double prevtime = gettime(); - DBT k,v; - DBC *dbc; - r = db->cursor(db, tid, &dbc, 0); assert(r==0); - memset(&k, 0, sizeof(k)); - memset(&v, 0, sizeof(v)); - u_int32_t c_get_flags = DB_NEXT; - if (prelockflag && (counter || prelock)) { - c_get_flags |= lock_flag; - } - while (0 == (r = dbc->c_get(dbc, &k, &v, c_get_flags))) { - - //printf("r=%d\n", r); - - totalbytes += k.size + v.size; - rowcounter++; - if (limitcount>0 && rowcounter>=limitcount) break; - } - assert(r==DB_NOTFOUND); - r = dbc->c_close(dbc); assert(r==0); - double thistime = gettime(); - double tdiff = thistime-prevtime; - printf("Scan %lld bytes (%d rows) in %9.6fs at %9fMB/s\n", totalbytes, rowcounter, tdiff, 1e-6*totalbytes/tdiff); - } -} - -#ifdef TOKUDB - -struct extra_count { - long long totalbytes; - int rowcounter; -}; - -static int counttotalbytes (DBT const *key, DBT const *data, void *extrav) { - struct extra_count *e=extrav; - e->totalbytes += key->size + data->size; - e->rowcounter++; - return bulk_fetch ? TOKUDB_CURSOR_CONTINUE : 0; -} - -static void scanscan_lwc (void) { - int r; - int counter=0; - for (counter=0; counter<n_experiments; counter++) { - struct extra_count e = {0,0}; - double prevtime = gettime(); - DBC *dbc; - r = db->cursor(db, tid, &dbc, 0); assert(r==0); - u_int32_t f_flags = 0; - if (prelockflag && (counter || prelock)) { - f_flags |= lock_flag; - } - long rowcounter=0; - while (0 == (r = dbc->c_getf_next(dbc, f_flags, counttotalbytes, &e))) { - rowcounter++; - if (limitcount>0 && rowcounter>=limitcount) break; - } - r = dbc->c_close(dbc); assert(r==0); - double thistime = gettime(); - double tdiff = thistime-prevtime; - printf("LWC Scan %lld bytes (%d rows) in %9.6fs at %9fMB/s\n", e.totalbytes, e.rowcounter, tdiff, 1e-6*e.totalbytes/tdiff); - } -} -#endif - -static void scanscan_range (void) { - int r; - - double texperiments[n_experiments]; - u_int64_t k = 0; - char kv[8]; - DBT key, val; - - int counter; - for (counter = 0; counter < n_experiments; counter++) { - - if (1) { //if ((counter&1) == 0) { - makekey: - // generate a random key in the key range - k = (start_range + (random() % (end_range - start_range))) * (1<<6); - int i; - for (i = 0; i < 8; i++) - kv[i] = k >> (56-8*i); - } - memset(&key, 0, sizeof key); key.data = &kv, key.size = sizeof kv; - memset(&val, 0, sizeof val); - - double tstart = gettime(); - - DBC *dbc; - r = db->cursor(db, tid, &dbc, 0); assert(r==0); - - // set the cursor to the random key - r = dbc->c_get(dbc, &key, &val, DB_SET_RANGE+lock_flag); - if (r != 0) { - assert(r == DB_NOTFOUND); - printf("%s:%d %" PRIu64 "\n", __FUNCTION__, __LINE__, k); - goto makekey; - } - -#ifdef TOKUDB - // do the range scan - long rowcounter = 0; - struct extra_count e = {0,0}; - while (limitcount > 0 && rowcounter < limitcount) { - r = dbc->c_getf_next(dbc, prelockflag ? lock_flag : 0, counttotalbytes, &e); - if (r != 0) - break; - rowcounter++; - } -#endif - - r = dbc->c_close(dbc); - assert(r==0); - - texperiments[counter] = gettime() - tstart; - printf("%" PRIu64 " %f\n", k, texperiments[counter]); fflush(stdout); - } - - // print the times - double tsum = 0.0, tmin = 0.0, tmax = 0.0; - for (counter = 0; counter < n_experiments; counter++) { - if (counter==0 || texperiments[counter] < tmin) - tmin = texperiments[counter]; - if (counter==0 || texperiments[counter] > tmax) - tmax = texperiments[counter]; - tsum += texperiments[counter]; - } - printf("%f %f %f/%d = %f\n", tmin, tmax, tsum, n_experiments, tsum / n_experiments); -} - -#ifdef TOKUDB - -struct extra_verify { - long long totalbytes; - int rowcounter; - DBT k,v; // the k and v are gotten using the old cursor -}; - -static int -checkbytes (DBT const *key, DBT const *data, void *extrav) { - struct extra_verify *e=extrav; - e->totalbytes += key->size + data->size; - e->rowcounter++; - assert(e->k.size == key->size); - assert(e->v.size == data->size); - assert(memcmp(e->k.data, key->data, key->size)==0); - assert(memcmp(e->v.data, data->data, data->size)==0); - assert(e->k.data != key->data); - assert(e->v.data != data->data); - return 0; -} - - -static void scanscan_verify (void) { - int r; - int counter=0; - for (counter=0; counter<n_experiments; counter++) { - struct extra_verify v; - v.totalbytes=0; - v.rowcounter=0; - double prevtime = gettime(); - DBC *dbc1, *dbc2; - r = db->cursor(db, tid, &dbc1, 0); assert(r==0); - r = db->cursor(db, tid, &dbc2, 0); assert(r==0); - memset(&v.k, 0, sizeof(v.k)); - memset(&v.v, 0, sizeof(v.v)); - u_int32_t f_flags = 0; - u_int32_t c_get_flags = DB_NEXT; - if (prelockflag && (counter || prelock)) { - f_flags |= lock_flag; - c_get_flags |= lock_flag; - } - while (1) { - int r1,r2; - r2 = dbc1->c_get(dbc1, &v.k, &v.v, c_get_flags); - r1 = dbc2->c_getf_next(dbc2, f_flags, checkbytes, &v); - assert(r1==r2); - if (r1) break; - } - r = dbc1->c_close(dbc1); assert(r==0); - r = dbc2->c_close(dbc2); assert(r==0); - double thistime = gettime(); - double tdiff = thistime-prevtime; - printf("verify %lld bytes (%d rows) in %9.6fs at %9fMB/s\n", v.totalbytes, v.rowcounter, tdiff, 1e-6*v.totalbytes/tdiff); - } -} - -#endif - -int main (int argc, const char *argv[]) { - - parse_args(argc,argv); - - scanscan_setup(); - switch (run_mode) { - case RUN_HWC: scanscan_hwc(); break; -#ifdef TOKUDB - case RUN_LWC: scanscan_lwc(); break; - case RUN_VERIFY: scanscan_verify(); break; -#endif - case RUN_RANGE: scanscan_range(); break; - default: assert(0); break; - } - scanscan_shutdown(); - - return 0; -} diff --git a/storage/tokudb/ft-index/examples/db-update.c b/storage/tokudb/ft-index/examples/db-update.c deleted file mode 100644 index e2ab1ecdce4..00000000000 --- a/storage/tokudb/ft-index/examples/db-update.c +++ /dev/null @@ -1,379 +0,0 @@ -/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */ -// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4: -#ident "$Id$" -/* -COPYING CONDITIONS NOTICE: - - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation, and provided that the - following conditions are met: - - * Redistributions of source code must retain this COPYING - CONDITIONS NOTICE, the COPYRIGHT NOTICE (below), the - DISCLAIMER (below), the UNIVERSITY PATENT NOTICE (below), the - PATENT MARKING NOTICE (below), and the PATENT RIGHTS - GRANT (below). - - * Redistributions in binary form must reproduce this COPYING - CONDITIONS NOTICE, the COPYRIGHT NOTICE (below), the - DISCLAIMER (below), the UNIVERSITY PATENT NOTICE (below), the - PATENT MARKING NOTICE (below), and the PATENT RIGHTS - GRANT (below) in the documentation and/or other materials - provided with the distribution. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA - 02110-1301, USA. - -COPYRIGHT NOTICE: - - TokuDB, Tokutek Fractal Tree Indexing Library. - Copyright (C) 2007-2013 Tokutek, Inc. - -DISCLAIMER: - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - -UNIVERSITY PATENT NOTICE: - - The technology is licensed by the Massachusetts Institute of - Technology, Rutgers State University of New Jersey, and the Research - Foundation of State University of New York at Stony Brook under - United States of America Serial No. 11/760379 and to the patents - and/or patent applications resulting from it. - -PATENT MARKING NOTICE: - - This software is covered by US Patent No. 8,185,551. - This software is covered by US Patent No. 8,489,638. - -PATENT RIGHTS GRANT: - - "THIS IMPLEMENTATION" means the copyrightable works distributed by - Tokutek as part of the Fractal Tree project. - - "PATENT CLAIMS" means the claims of patents that are owned or - licensable by Tokutek, both currently or in the future; and that in - the absence of this license would be infringed by THIS - IMPLEMENTATION or by using or running THIS IMPLEMENTATION. - - "PATENT CHALLENGE" shall mean a challenge to the validity, - patentability, enforceability and/or non-infringement of any of the - PATENT CLAIMS or otherwise opposing any of the PATENT CLAIMS. - - Tokutek hereby grants to you, for the term and geographical scope of - the PATENT CLAIMS, a non-exclusive, no-charge, royalty-free, - irrevocable (except as stated in this section) patent license to - make, have made, use, offer to sell, sell, import, transfer, and - otherwise run, modify, and propagate the contents of THIS - IMPLEMENTATION, where such license applies only to the PATENT - CLAIMS. This grant does not include claims that would be infringed - only as a consequence of further modifications of THIS - IMPLEMENTATION. If you or your agent or licensee institute or order - or agree to the institution of patent litigation against any entity - (including a cross-claim or counterclaim in a lawsuit) alleging that - THIS IMPLEMENTATION constitutes direct or contributory patent - infringement, or inducement of patent infringement, then any rights - granted to you under this License shall terminate as of the date - such litigation is filed. If you or your agent or exclusive - licensee institute or order or agree to the institution of a PATENT - CHALLENGE, then Tokutek may terminate any rights granted to you - under this License. -*/ - -#ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved." -#ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it." - -// measure the performance of a simulated "insert on duplicate key update" operation -// the table schema is t(a int, b int, c int, d int, primary key(a, b)) -// a and b are random -// c is the sum of the observations -// d is the first observation - -#include <stdio.h> -#include <stdbool.h> -#include <stdlib.h> -#include <assert.h> -#include <string.h> -#include <sys/stat.h> -#include <sys/time.h> -#include <arpa/inet.h> -#include "db.h" - -static size_t key_size = 8; -static size_t val_size = 8; -static int verbose = 0; - -static void db_error(const DB_ENV *env, const char *prefix, const char *msg) { - printf("%s: %p %s %s\n", __FUNCTION__, env, prefix, msg); -} - -static int get_int(void *p) { - int v; - memcpy(&v, p, sizeof v); - return htonl(v); -} - -#if defined(TOKUDB) -static int my_update_callback(DB *db, const DBT *key, const DBT *old_val, const DBT *extra, void (*set_val)(const DBT *new_val, void *set_extra), void *set_extra) { - assert(db); - assert(key); - if (old_val == NULL) { - // insert new_val = extra - set_val(extra, set_extra); - } else { - if (verbose) printf("u"); - // update new_val = old_val + extra - assert(old_val->size == val_size && extra->size == val_size); - char new_val_buffer[val_size]; - memcpy(new_val_buffer, old_val->data, sizeof new_val_buffer); - int newc = htonl(get_int(old_val->data) + get_int(extra->data)); // newc = oldc + newc - memcpy(new_val_buffer, &newc, sizeof newc); - DBT new_val = { .data = new_val_buffer, .size = sizeof new_val_buffer }; - set_val(&new_val, set_extra); - } - return 0; -} -#endif - -static void insert_and_update(DB *db, DB_TXN *txn, int a, int b, int c, int d, bool do_update_callback) { -#if !defined(TOKUDB) - assert(!do_update_callback); -#endif - int r; - - // generate the key - assert(key_size >= 8); - char key_buffer[key_size]; - int newa = htonl(a); - memcpy(key_buffer, &newa, sizeof newa); - int newb = htonl(b); - memcpy(key_buffer+4, &newb, sizeof newb); - - // generate the value - assert(val_size >= 8); - char val_buffer[val_size]; - int newc = htonl(c); - memcpy(val_buffer, &newc, sizeof newc); - int newd = htonl(d); - memcpy(val_buffer+4, &newd, sizeof newd); - -#if defined(TOKUDB) - if (do_update_callback) { - // extra = value_buffer, implicit combine column c update function - DBT key = { .data = key_buffer, .size = sizeof key_buffer }; - DBT extra = { .data = val_buffer, .size = sizeof val_buffer }; - r = db->update(db, txn, &key, &extra, 0); assert(r == 0); - } else -#endif - { - DBT key = { .data = key_buffer, .size = sizeof key_buffer }; - DBT value = { .data = val_buffer, .size = sizeof val_buffer }; - DBT oldvalue = { }; - r = db->get(db, txn, &key, &oldvalue, 0); - assert(r == 0 || r == DB_NOTFOUND); - if (r == 0) { - // update it - if (verbose) printf("U"); - int oldc = get_int(oldvalue.data); - newc = htonl(oldc + c); // newc = oldc + newc - memcpy(val_buffer, &newc, sizeof newc); - r = db->put(db, txn, &key, &value, 0); - assert(r == 0); - } else if (r == DB_NOTFOUND) { - r = db->put(db, txn, &key, &value, 0); - assert(r == 0); - } - } -} - -static inline float tdiff (struct timeval *a, struct timeval *b) { - return (a->tv_sec - b->tv_sec) +1e-6*(a->tv_usec - b->tv_usec); -} - -static void insert_and_update_all(DB_ENV *db_env, DB *db, long nrows, long max_rows_per_txn, int key_range, long rows_per_report, bool do_update_callback, bool do_txn) { - int r; - struct timeval tstart; - r = gettimeofday(&tstart, NULL); assert(r == 0); - struct timeval tlast = tstart; - DB_TXN *txn = NULL; - if (do_txn) { - r = db_env->txn_begin(db_env, NULL, &txn, 0); assert(r == 0); - } - long n_rows_per_txn = 0; - long rowi; - for (rowi = 0; rowi < nrows; rowi++) { - int a = random() % key_range; - int b = random() % key_range; - int c = 1; - int d = 0; // timestamp - insert_and_update(db, txn, a, b, c, d, do_update_callback); - n_rows_per_txn++; - - // maybe commit - if (do_txn && n_rows_per_txn == max_rows_per_txn) { - r = txn->commit(txn, 0); assert(r == 0); - r = db_env->txn_begin(db_env, NULL, &txn, 0); assert(r == 0); - n_rows_per_txn = 0; - } - - // maybe report performance - if (((rowi + 1) % rows_per_report) == 0) { - struct timeval tnow; - r = gettimeofday(&tnow, NULL); assert(r == 0); - float last_time = tdiff(&tnow, &tlast); - float total_time = tdiff(&tnow, &tstart); - printf("%ld %.3f %.0f/s %.0f/s\n", rowi + 1, last_time, rows_per_report/last_time, rowi/total_time); fflush(stdout); - tlast = tnow; - } - } - - if (do_txn) { - r = txn->commit(txn, 0); assert(r == 0); - } - struct timeval tnow; - r = gettimeofday(&tnow, NULL); assert(r == 0); - printf("total %ld %.3f %.0f/s\n", nrows, tdiff(&tnow, &tstart), nrows/tdiff(&tnow, &tstart)); fflush(stdout); -} - -int main(int argc, char *argv[]) { -#if defined(TOKUDB) - char *db_env_dir = "update.env.tokudb"; -#else - char *db_env_dir = "update.env.bdb"; -#endif - int db_env_open_flags = DB_CREATE | DB_PRIVATE | DB_INIT_MPOOL | DB_INIT_TXN | DB_INIT_LOCK | DB_INIT_LOG; - char *db_filename = "update.db"; - long rows = 1000000000; - long rows_per_txn = 100; - long rows_per_report = 100000; - int key_range = 1000000; -#if defined(TOKUDB) - bool do_update_callback = true; -#else - bool do_update_callback = false; -#endif - bool do_txn = false; - u_int64_t cachesize = 1000000000; - u_int32_t pagesize = 0; -#if defined(TOKUDB) - u_int32_t checkpoint_period = 60; -#endif - - int i; - for (i = 1; i < argc; i++) { - char *arg = argv[i]; - if (strcmp(arg, "--verbose") == 0) { - verbose++; - continue; - } - if (strcmp(arg, "--rows") == 0 && i+1 < argc) { - rows = atol(argv[++i]); - continue; - } - if (strcmp(arg, "--rows_per_txn") == 0 && i+1 < argc) { - rows_per_txn = atol(argv[++i]); - continue; - } - if (strcmp(arg, "--rows_per_report") == 0 && i+1 < argc) { - rows_per_report = atol(argv[++i]); - continue; - } - if (strcmp(arg, "--key_range") == 0 && i+1 < argc) { - key_range = atol(argv[++i]); - continue; - } - if (strcmp(arg, "--txn") == 0 && i+1 < argc) { - do_txn = atoi(argv[++i]) != 0; - continue; - } - if (strcmp(arg, "--pagesize") == 0 && i+1 < argc) { - pagesize = atoi(argv[++i]); - continue; - } - if (strcmp(arg, "--cachesize") == 0 && i+1 < argc) { - cachesize = atol(argv[++i]); - continue; - } - if (strcmp(arg, "--update_callback") == 0 && i+1 < argc) { - do_update_callback = atoi(argv[++i]) != 0; - continue; - } - if (strcmp(arg, "--key_size") == 0 && i+1 < argc) { - key_size = atoi(argv[++i]); - continue; - } - if (strcmp(arg, "--val_size") == 0 && i+1 < argc) { - val_size = atoi(argv[++i]); - continue; - } -#if defined(TOKUDB) - if (strcmp(arg, "--checkpoint_period") == 0 && i+1 < argc) { - checkpoint_period = atoi(argv[++i]); - continue; - } -#endif - - assert(0); - } - - int r; - char rm_cmd[strlen(db_env_dir) + strlen("rm -rf ") + 1]; - snprintf(rm_cmd, sizeof(rm_cmd), "rm -rf %s", db_env_dir); - r = system(rm_cmd); assert(r == 0); - - r = mkdir(db_env_dir, S_IRWXU | S_IRGRP | S_IXGRP | S_IROTH | S_IXOTH); assert(r == 0); - - // create and open the env - DB_ENV *db_env = NULL; - r = db_env_create(&db_env, 0); assert(r == 0); -#if defined(TOKUDB) - db_env->set_update(db_env, my_update_callback); -#endif - if (cachesize) { - if (verbose) printf("cachesize %llu\n", (unsigned long long)cachesize); - const u_int64_t gig = 1 << 30; - r = db_env->set_cachesize(db_env, cachesize / gig, cachesize % gig, 1); assert(r == 0); - } - if (!do_txn) - db_env_open_flags &= ~(DB_INIT_TXN | DB_INIT_LOG); - db_env->set_errcall(db_env, db_error); - if (verbose) printf("env %s\n", db_env_dir); - r = db_env->open(db_env, db_env_dir, db_env_open_flags, S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH); assert(r == 0); -#if defined(TOKUDB) - if (checkpoint_period) { - r = db_env->checkpointing_set_period(db_env, checkpoint_period); assert(r == 0); - u_int32_t period; - r = db_env->checkpointing_get_period(db_env, &period); assert(r == 0 && period == checkpoint_period); - } -#endif - - // create the db - DB *db = NULL; - r = db_create(&db, db_env, 0); assert(r == 0); - DB_TXN *create_txn = NULL; - if (do_txn) { - r = db_env->txn_begin(db_env, NULL, &create_txn, 0); assert(r == 0); - } - if (pagesize) { - r = db->set_pagesize(db, pagesize); assert(r == 0); - } - r = db->open(db, create_txn, db_filename, NULL, DB_BTREE, DB_CREATE, S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH); assert(r == 0); - if (do_txn) { - r = create_txn->commit(create_txn, 0); assert(r == 0); - } - - // insert on duplicate key update - insert_and_update_all(db_env, db, rows, rows_per_txn, key_range, rows_per_report, do_update_callback, do_txn); - - // shutdown - r = db->close(db, 0); assert(r == 0); db = NULL; - r = db_env->close(db_env, 0); assert(r == 0); db_env = NULL; - - return 0; -} diff --git a/storage/tokudb/ft-index/ft/CMakeLists.txt b/storage/tokudb/ft-index/ft/CMakeLists.txt index da8d0f41d8f..a433c7fc3a7 100644 --- a/storage/tokudb/ft-index/ft/CMakeLists.txt +++ b/storage/tokudb/ft-index/ft/CMakeLists.txt @@ -7,15 +7,17 @@ set_source_files_properties( "${CMAKE_CURRENT_BINARY_DIR}/log_header.h" PROPERTIES GENERATED TRUE) -add_executable(logformat logformat.cc) +add_executable(logformat logger/logformat.cc) target_link_libraries(logformat ${LIBTOKUPORTABILITY}_static) +add_space_separated_property(TARGET logformat LINK_FLAGS --coverage) + add_custom_command( OUTPUT "${CMAKE_CURRENT_BINARY_DIR}/log_code.cc" OUTPUT "${CMAKE_CURRENT_BINARY_DIR}/log_print.cc" OUTPUT "${CMAKE_CURRENT_BINARY_DIR}/log_header.h" COMMAND $<TARGET_FILE:logformat> . - DEPENDS logformat + DEPENDS logger/logformat ) add_custom_target( generate_log_code @@ -23,52 +25,52 @@ add_custom_target( ) set(FT_SOURCES - background_job_manager - block_allocator - block_table bndata - cachetable - checkpoint - compress - dbufio - fifo + cachetable/background_job_manager + cachetable/cachetable + cachetable/checkpoint + cursor ft ft-cachetable-wrappers ft-flusher ft-hot-flusher - ftloader - ftloader-callback - ft_msg - ft_node-serialize - ft-node-deserialize ft-ops - ft-serialize ft-test-helpers ft-verify - key + loader/callbacks + loader/dbufio + loader/loader + loader/pqueue leafentry le-cursor - logcursor - logfilemgr - logger - log_upgrade - minicron - pqueue - queue - quicklz - recover - rollback - rollback-apply - rollback-ct-callbacks - rollback_log_node_cache - roll - sub_block - txn - txn_child_manager - txn_manager + logger/logcursor + logger/logfilemgr + logger/logger + logger/log_upgrade + logger/recover + msg + msg_buffer + node + pivotkeys + serialize/block_allocator + serialize/block_allocator_strategy + serialize/block_table + serialize/compress + serialize/ft_node-serialize + serialize/ft-node-deserialize + serialize/ft-serialize + serialize/quicklz + serialize/sub_block + txn/rollback + txn/rollback-apply + txn/rollback-ct-callbacks + txn/rollback_log_node_cache + txn/roll + txn/txn + txn/txn_child_manager + txn/txn_manager + txn/xids ule - xids - ybt "${CMAKE_CURRENT_BINARY_DIR}/log_code" "${CMAKE_CURRENT_BINARY_DIR}/log_print" ) @@ -85,24 +87,7 @@ add_dependencies(ft_static install_tdb_h generate_log_code build_lzma) ## link with lzma (which should be static) and link dependers with zlib target_link_libraries(ft LINK_PRIVATE util_static lzma ${LIBTOKUPORTABILITY}) -target_link_libraries(ft LINK_PUBLIC ${ZLIB_LIBRARY} ) +target_link_libraries(ft LINK_PUBLIC z) target_link_libraries(ft_static LINK_PRIVATE lzma) -## build the bins in this directory -foreach(tool tokuftdump tdb_logprint tdb-recover ftverify) - add_executable(${tool} ${tool}.cc) - add_dependencies(${tool} install_tdb_h) - target_link_libraries(${tool} ft_static util_static ${ZLIB_LIBRARY} lzma ${LIBTOKUPORTABILITY}_static ${CMAKE_THREAD_LIBS_INIT} ${EXTRA_SYSTEM_LIBS}) - add_space_separated_property(TARGET ${tool} COMPILE_FLAGS -fvisibility=hidden) -endforeach(tool) - -# link in math.h library just for this tool. -target_link_libraries(ftverify m) - -install( - TARGETS tokuftdump - COMPONENT Server - DESTINATION ${INSTALL_BINDIR} - ) - add_subdirectory(tests) diff --git a/storage/tokudb/ft-index/ft/block_allocator.cc b/storage/tokudb/ft-index/ft/block_allocator.cc deleted file mode 100644 index a16df353760..00000000000 --- a/storage/tokudb/ft-index/ft/block_allocator.cc +++ /dev/null @@ -1,473 +0,0 @@ -/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */ -// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4: -/* -COPYING CONDITIONS NOTICE: - - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation, and provided that the - following conditions are met: - - * Redistributions of source code must retain this COPYING - CONDITIONS NOTICE, the COPYRIGHT NOTICE (below), the - DISCLAIMER (below), the UNIVERSITY PATENT NOTICE (below), the - PATENT MARKING NOTICE (below), and the PATENT RIGHTS - GRANT (below). - - * Redistributions in binary form must reproduce this COPYING - CONDITIONS NOTICE, the COPYRIGHT NOTICE (below), the - DISCLAIMER (below), the UNIVERSITY PATENT NOTICE (below), the - PATENT MARKING NOTICE (below), and the PATENT RIGHTS - GRANT (below) in the documentation and/or other materials - provided with the distribution. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA - 02110-1301, USA. - -COPYRIGHT NOTICE: - - TokuDB, Tokutek Fractal Tree Indexing Library. - Copyright (C) 2007-2013 Tokutek, Inc. - -DISCLAIMER: - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - -UNIVERSITY PATENT NOTICE: - - The technology is licensed by the Massachusetts Institute of - Technology, Rutgers State University of New Jersey, and the Research - Foundation of State University of New York at Stony Brook under - United States of America Serial No. 11/760379 and to the patents - and/or patent applications resulting from it. - -PATENT MARKING NOTICE: - - This software is covered by US Patent No. 8,185,551. - This software is covered by US Patent No. 8,489,638. - -PATENT RIGHTS GRANT: - - "THIS IMPLEMENTATION" means the copyrightable works distributed by - Tokutek as part of the Fractal Tree project. - - "PATENT CLAIMS" means the claims of patents that are owned or - licensable by Tokutek, both currently or in the future; and that in - the absence of this license would be infringed by THIS - IMPLEMENTATION or by using or running THIS IMPLEMENTATION. - - "PATENT CHALLENGE" shall mean a challenge to the validity, - patentability, enforceability and/or non-infringement of any of the - PATENT CLAIMS or otherwise opposing any of the PATENT CLAIMS. - - Tokutek hereby grants to you, for the term and geographical scope of - the PATENT CLAIMS, a non-exclusive, no-charge, royalty-free, - irrevocable (except as stated in this section) patent license to - make, have made, use, offer to sell, sell, import, transfer, and - otherwise run, modify, and propagate the contents of THIS - IMPLEMENTATION, where such license applies only to the PATENT - CLAIMS. This grant does not include claims that would be infringed - only as a consequence of further modifications of THIS - IMPLEMENTATION. If you or your agent or licensee institute or order - or agree to the institution of patent litigation against any entity - (including a cross-claim or counterclaim in a lawsuit) alleging that - THIS IMPLEMENTATION constitutes direct or contributory patent - infringement, or inducement of patent infringement, then any rights - granted to you under this License shall terminate as of the date - such litigation is filed. If you or your agent or exclusive - licensee institute or order or agree to the institution of a PATENT - CHALLENGE, then Tokutek may terminate any rights granted to you - under this License. -*/ - -#ident "Copyright (c) 2009-2013 Tokutek Inc. All rights reserved." -#ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it." -#ident "$Id$" - -#include "block_allocator.h" -#include <memory.h> -#include <toku_assert.h> -#include <stdint.h> -#include <stdlib.h> -#include <string.h> - -// Here's a very simple implementation. -// It's not very fast at allocating or freeing. -// Previous implementation used next_fit, but now use first_fit since we are moving blocks around to reduce file size. - -struct block_allocator { - uint64_t reserve_at_beginning; // How much to reserve at the beginning - uint64_t alignment; // Block alignment - uint64_t n_blocks; // How many blocks - uint64_t blocks_array_size; // How big is the blocks_array. Must be >= n_blocks. - struct block_allocator_blockpair *blocks_array; // These blocks are sorted by address. - uint64_t n_bytes_in_use; // including the reserve_at_beginning -}; - -void -block_allocator_validate (BLOCK_ALLOCATOR ba) { - uint64_t i; - uint64_t n_bytes_in_use = ba->reserve_at_beginning; - for (i=0; i<ba->n_blocks; i++) { - n_bytes_in_use += ba->blocks_array[i].size; - if (i>0) { - assert(ba->blocks_array[i].offset > ba->blocks_array[i-1].offset); - assert(ba->blocks_array[i].offset >= ba->blocks_array[i-1].offset + ba->blocks_array[i-1].size ); - } - } - assert(n_bytes_in_use == ba->n_bytes_in_use); -} - -#if 0 -#define VALIDATE(b) block_allocator_validate(b) -#else -#define VALIDATE(b) ((void)0) -#endif - -#if 0 -void -block_allocator_print (BLOCK_ALLOCATOR ba) { - uint64_t i; - for (i=0; i<ba->n_blocks; i++) { - printf("%" PRId64 ":%" PRId64 " ", ba->blocks_array[i].offset, ba->blocks_array[i].size); - } - printf("\n"); - VALIDATE(ba); -} -#endif - -void -create_block_allocator (BLOCK_ALLOCATOR *ba, uint64_t reserve_at_beginning, uint64_t alignment) { - assert(alignment>=512 && 0==(alignment%512)); // the alignment must be at least 512 and aligned with 512 to make DIRECT_IO happy. - BLOCK_ALLOCATOR XMALLOC(result); - result->reserve_at_beginning = reserve_at_beginning; - result->alignment = alignment; - result->n_blocks = 0; - result->blocks_array_size = 1; - XMALLOC_N(result->blocks_array_size, result->blocks_array); - result->n_bytes_in_use = reserve_at_beginning; - *ba = result; - VALIDATE(result); -} - -void -destroy_block_allocator (BLOCK_ALLOCATOR *bap) { - BLOCK_ALLOCATOR ba = *bap; - *bap = 0; - toku_free(ba->blocks_array); - toku_free(ba); -} - -static void -grow_blocks_array_by (BLOCK_ALLOCATOR ba, uint64_t n_to_add) { - if (ba->n_blocks + n_to_add > ba->blocks_array_size) { - uint64_t new_size = ba->n_blocks + n_to_add; - uint64_t at_least = ba->blocks_array_size * 2; - if (at_least > new_size) { - new_size = at_least; - } - ba->blocks_array_size = new_size; - XREALLOC_N(ba->blocks_array_size, ba->blocks_array); - } -} - - -static void -grow_blocks_array (BLOCK_ALLOCATOR ba) { - grow_blocks_array_by(ba, 1); -} - -void -block_allocator_merge_blockpairs_into (uint64_t d, struct block_allocator_blockpair dst[/*d*/], - uint64_t s, const struct block_allocator_blockpair src[/*s*/]) -{ - uint64_t tail = d+s; - while (d>0 && s>0) { - struct block_allocator_blockpair *dp = &dst[d-1]; - struct block_allocator_blockpair const *sp = &src[s-1]; - struct block_allocator_blockpair *tp = &dst[tail-1]; - assert(tail>0); - if (dp->offset > sp->offset) { - *tp = *dp; - d--; - tail--; - } else { - *tp = *sp; - s--; - tail--; - } - } - while (d>0) { - struct block_allocator_blockpair *dp = &dst[d-1]; - struct block_allocator_blockpair *tp = &dst[tail-1]; - *tp = *dp; - d--; - tail--; - } - while (s>0) { - struct block_allocator_blockpair const *sp = &src[s-1]; - struct block_allocator_blockpair *tp = &dst[tail-1]; - *tp = *sp; - s--; - tail--; - } -} - -static int -compare_blockpairs (const void *av, const void *bv) { - const struct block_allocator_blockpair *a = (const struct block_allocator_blockpair *) av; - const struct block_allocator_blockpair *b = (const struct block_allocator_blockpair *) bv; - if (a->offset < b->offset) return -1; - if (a->offset > b->offset) return +1; - return 0; -} - -void -block_allocator_alloc_blocks_at (BLOCK_ALLOCATOR ba, uint64_t n_blocks, struct block_allocator_blockpair pairs[/*n_blocks*/]) -// See the documentation in block_allocator.h -{ - VALIDATE(ba); - qsort(pairs, n_blocks, sizeof(*pairs), compare_blockpairs); - for (uint64_t i=0; i<n_blocks; i++) { - assert(pairs[i].offset >= ba->reserve_at_beginning); - assert(pairs[i].offset%ba->alignment == 0); - ba->n_bytes_in_use += pairs[i].size; - invariant(pairs[i].size > 0); //Allocator does not support size 0 blocks. See block_allocator_free_block. - } - grow_blocks_array_by(ba, n_blocks); - block_allocator_merge_blockpairs_into(ba->n_blocks, ba->blocks_array, - n_blocks, pairs); - ba->n_blocks += n_blocks; - VALIDATE(ba); -} - -void -block_allocator_alloc_block_at (BLOCK_ALLOCATOR ba, uint64_t size, uint64_t offset) { - struct block_allocator_blockpair p = {.offset = offset, .size=size}; - // Just do a linear search for the block. - // This data structure is a sorted array (no gaps or anything), so the search isn't really making this any slower than the insertion. - // To speed up the insertion when opening a file, we provide the block_allocator_alloc_blocks_at function. - block_allocator_alloc_blocks_at(ba, 1, &p); -} - -static inline uint64_t -align (uint64_t value, BLOCK_ALLOCATOR ba) -// Effect: align a value by rounding up. -{ - return ((value+ba->alignment-1)/ba->alignment)*ba->alignment; -} - -void block_allocator_alloc_block(BLOCK_ALLOCATOR ba, uint64_t size, uint64_t *offset) -// Effect: Allocate a block. The resulting block must be aligned on the ba->alignment (which to make direct_io happy must be a positive multiple of 512). -{ - invariant(size > 0); //Allocator does not support size 0 blocks. See block_allocator_free_block. - grow_blocks_array(ba); - ba->n_bytes_in_use += size; - if (ba->n_blocks==0) { - assert(ba->n_bytes_in_use == ba->reserve_at_beginning + size); // we know exactly how many are in use - ba->blocks_array[0].offset = align(ba->reserve_at_beginning, ba); - ba->blocks_array[0].size = size; - *offset = ba->blocks_array[0].offset; - ba->n_blocks++; - return; - } - // Implement first fit. - { - uint64_t end_of_reserve = align(ba->reserve_at_beginning, ba); - if (end_of_reserve + size <= ba->blocks_array[0].offset ) { - // Check to see if the space immediately after the reserve is big enough to hold the new block. - struct block_allocator_blockpair *bp = &ba->blocks_array[0]; - memmove(bp+1, bp, (ba->n_blocks)*sizeof(*bp)); - bp[0].offset = end_of_reserve; - bp[0].size = size; - ba->n_blocks++; - *offset = end_of_reserve; - VALIDATE(ba); - return; - } - } - for (uint64_t blocknum = 0; blocknum +1 < ba->n_blocks; blocknum ++) { - // Consider the space after blocknum - struct block_allocator_blockpair *bp = &ba->blocks_array[blocknum]; - uint64_t this_offset = bp[0].offset; - uint64_t this_size = bp[0].size; - uint64_t answer_offset = align(this_offset + this_size, ba); - if (answer_offset + size > bp[1].offset) continue; // The block we want doesn't fit after this block. - // It fits, so allocate it here. - memmove(bp+2, bp+1, (ba->n_blocks - blocknum -1)*sizeof(*bp)); - bp[1].offset = answer_offset; - bp[1].size = size; - ba->n_blocks++; - *offset = answer_offset; - VALIDATE(ba); - return; - } - // It didn't fit anywhere, so fit it on the end. - assert(ba->n_blocks < ba->blocks_array_size); - struct block_allocator_blockpair *bp = &ba->blocks_array[ba->n_blocks]; - uint64_t answer_offset = align(bp[-1].offset+bp[-1].size, ba); - bp->offset = answer_offset; - bp->size = size; - ba->n_blocks++; - *offset = answer_offset; - VALIDATE(ba); -} - -static int64_t -find_block (BLOCK_ALLOCATOR ba, uint64_t offset) -// Find the index in the blocks array that has a particular offset. Requires that the block exist. -// Use binary search so it runs fast. -{ - VALIDATE(ba); - if (ba->n_blocks==1) { - assert(ba->blocks_array[0].offset == offset); - return 0; - } - uint64_t lo = 0; - uint64_t hi = ba->n_blocks; - while (1) { - assert(lo<hi); // otherwise no such block exists. - uint64_t mid = (lo+hi)/2; - uint64_t thisoff = ba->blocks_array[mid].offset; - //printf("lo=%" PRId64 " hi=%" PRId64 " mid=%" PRId64 " thisoff=%" PRId64 " offset=%" PRId64 "\n", lo, hi, mid, thisoff, offset); - if (thisoff < offset) { - lo = mid+1; - } else if (thisoff > offset) { - hi = mid; - } else { - return mid; - } - } -} - -// To support 0-sized blocks, we need to include size as an input to this function. -// All 0-sized blocks at the same offset can be considered identical, but -// a 0-sized block can share offset with a non-zero sized block. -// The non-zero sized block is not exchangable with a zero sized block (or vice versa), -// so inserting 0-sized blocks can cause corruption here. -void -block_allocator_free_block (BLOCK_ALLOCATOR ba, uint64_t offset) { - VALIDATE(ba); - int64_t bn = find_block(ba, offset); - assert(bn>=0); // we require that there is a block with that offset. Might as well abort if no such block exists. - ba->n_bytes_in_use -= ba->blocks_array[bn].size; - memmove(&ba->blocks_array[bn], &ba->blocks_array[bn+1], (ba->n_blocks-bn-1) * sizeof(struct block_allocator_blockpair)); - ba->n_blocks--; - VALIDATE(ba); -} - -uint64_t -block_allocator_block_size (BLOCK_ALLOCATOR ba, uint64_t offset) { - int64_t bn = find_block(ba, offset); - assert(bn>=0); // we require that there is a block with that offset. Might as well abort if no such block exists. - return ba->blocks_array[bn].size; -} - -uint64_t -block_allocator_allocated_limit (BLOCK_ALLOCATOR ba) { - if (ba->n_blocks==0) return ba->reserve_at_beginning; - else { - struct block_allocator_blockpair *last = &ba->blocks_array[ba->n_blocks-1]; - return last->offset + last->size; - } -} - -int -block_allocator_get_nth_block_in_layout_order (BLOCK_ALLOCATOR ba, uint64_t b, uint64_t *offset, uint64_t *size) -// Effect: Consider the blocks in sorted order. The reserved block at the beginning is number 0. The next one is number 1 and so forth. -// Return the offset and size of the block with that number. -// Return 0 if there is a block that big, return nonzero if b is too big. -{ - if (b==0) { - *offset=0; - *size =ba->reserve_at_beginning; - return 0; - } else if (b > ba->n_blocks) { - return -1; - } else { - *offset=ba->blocks_array[b-1].offset; - *size =ba->blocks_array[b-1].size; - return 0; - } -} - -void -block_allocator_get_unused_statistics(BLOCK_ALLOCATOR ba, TOKU_DB_FRAGMENTATION report) { - //Requires: report->file_size_bytes is filled in - //Requires: report->data_bytes is filled in - //Requires: report->checkpoint_bytes_additional is filled in - - assert(ba->n_bytes_in_use == report->data_bytes + report->checkpoint_bytes_additional); - - report->unused_bytes = 0; - report->unused_blocks = 0; - report->largest_unused_block = 0; - if (ba->n_blocks > 0) { - //Deal with space before block 0 and after reserve: - { - struct block_allocator_blockpair *bp = &ba->blocks_array[0]; - assert(bp->offset >= align(ba->reserve_at_beginning, ba)); - uint64_t free_space = bp->offset - align(ba->reserve_at_beginning, ba); - if (free_space > 0) { - report->unused_bytes += free_space; - report->unused_blocks++; - if (free_space > report->largest_unused_block) { - report->largest_unused_block = free_space; - } - } - } - - //Deal with space between blocks: - for (uint64_t blocknum = 0; blocknum +1 < ba->n_blocks; blocknum ++) { - // Consider the space after blocknum - struct block_allocator_blockpair *bp = &ba->blocks_array[blocknum]; - uint64_t this_offset = bp[0].offset; - uint64_t this_size = bp[0].size; - uint64_t end_of_this_block = align(this_offset+this_size, ba); - uint64_t next_offset = bp[1].offset; - uint64_t free_space = next_offset - end_of_this_block; - if (free_space > 0) { - report->unused_bytes += free_space; - report->unused_blocks++; - if (free_space > report->largest_unused_block) { - report->largest_unused_block = free_space; - } - } - } - - //Deal with space after last block - { - struct block_allocator_blockpair *bp = &ba->blocks_array[ba->n_blocks-1]; - uint64_t this_offset = bp[0].offset; - uint64_t this_size = bp[0].size; - uint64_t end_of_this_block = align(this_offset+this_size, ba); - if (end_of_this_block < report->file_size_bytes) { - uint64_t free_space = report->file_size_bytes - end_of_this_block; - assert(free_space > 0); - report->unused_bytes += free_space; - report->unused_blocks++; - if (free_space > report->largest_unused_block) { - report->largest_unused_block = free_space; - } - } - } - } - else { - //No blocks. Just the reserve. - uint64_t end_of_this_block = align(ba->reserve_at_beginning, ba); - if (end_of_this_block < report->file_size_bytes) { - uint64_t free_space = report->file_size_bytes - end_of_this_block; - assert(free_space > 0); - report->unused_bytes += free_space; - report->unused_blocks++; - if (free_space > report->largest_unused_block) { - report->largest_unused_block = free_space; - } - } - } -} diff --git a/storage/tokudb/ft-index/ft/block_allocator.h b/storage/tokudb/ft-index/ft/block_allocator.h deleted file mode 100644 index 289e7251c84..00000000000 --- a/storage/tokudb/ft-index/ft/block_allocator.h +++ /dev/null @@ -1,230 +0,0 @@ -/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */ -// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4: -#ifndef BLOCK_ALLOCATOR_H -#define BLOCK_ALLOCATOR_H - -#ident "$Id$" -/* -COPYING CONDITIONS NOTICE: - - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation, and provided that the - following conditions are met: - - * Redistributions of source code must retain this COPYING - CONDITIONS NOTICE, the COPYRIGHT NOTICE (below), the - DISCLAIMER (below), the UNIVERSITY PATENT NOTICE (below), the - PATENT MARKING NOTICE (below), and the PATENT RIGHTS - GRANT (below). - - * Redistributions in binary form must reproduce this COPYING - CONDITIONS NOTICE, the COPYRIGHT NOTICE (below), the - DISCLAIMER (below), the UNIVERSITY PATENT NOTICE (below), the - PATENT MARKING NOTICE (below), and the PATENT RIGHTS - GRANT (below) in the documentation and/or other materials - provided with the distribution. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA - 02110-1301, USA. - -COPYRIGHT NOTICE: - - TokuDB, Tokutek Fractal Tree Indexing Library. - Copyright (C) 2007-2013 Tokutek, Inc. - -DISCLAIMER: - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - -UNIVERSITY PATENT NOTICE: - - The technology is licensed by the Massachusetts Institute of - Technology, Rutgers State University of New Jersey, and the Research - Foundation of State University of New York at Stony Brook under - United States of America Serial No. 11/760379 and to the patents - and/or patent applications resulting from it. - -PATENT MARKING NOTICE: - - This software is covered by US Patent No. 8,185,551. - This software is covered by US Patent No. 8,489,638. - -PATENT RIGHTS GRANT: - - "THIS IMPLEMENTATION" means the copyrightable works distributed by - Tokutek as part of the Fractal Tree project. - - "PATENT CLAIMS" means the claims of patents that are owned or - licensable by Tokutek, both currently or in the future; and that in - the absence of this license would be infringed by THIS - IMPLEMENTATION or by using or running THIS IMPLEMENTATION. - - "PATENT CHALLENGE" shall mean a challenge to the validity, - patentability, enforceability and/or non-infringement of any of the - PATENT CLAIMS or otherwise opposing any of the PATENT CLAIMS. - - Tokutek hereby grants to you, for the term and geographical scope of - the PATENT CLAIMS, a non-exclusive, no-charge, royalty-free, - irrevocable (except as stated in this section) patent license to - make, have made, use, offer to sell, sell, import, transfer, and - otherwise run, modify, and propagate the contents of THIS - IMPLEMENTATION, where such license applies only to the PATENT - CLAIMS. This grant does not include claims that would be infringed - only as a consequence of further modifications of THIS - IMPLEMENTATION. If you or your agent or licensee institute or order - or agree to the institution of patent litigation against any entity - (including a cross-claim or counterclaim in a lawsuit) alleging that - THIS IMPLEMENTATION constitutes direct or contributory patent - infringement, or inducement of patent infringement, then any rights - granted to you under this License shall terminate as of the date - such litigation is filed. If you or your agent or exclusive - licensee institute or order or agree to the institution of a PATENT - CHALLENGE, then Tokutek may terminate any rights granted to you - under this License. -*/ - -#ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved." -#ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it." - -#include "fttypes.h" - - -#define BLOCK_ALLOCATOR_ALIGNMENT 4096 -// How much must be reserved at the beginning for the block? -// The actual header is 8+4+4+8+8_4+8+ the length of the db names + 1 pointer for each root. -// So 4096 should be enough. -#define BLOCK_ALLOCATOR_HEADER_RESERVE 4096 -#if (BLOCK_ALLOCATOR_HEADER_RESERVE % BLOCK_ALLOCATOR_ALIGNMENT) != 0 -#error -#endif - -// Block allocator. -// Overview: A block allocator manages the allocation of variable-sized blocks. -// The translation of block numbers to addresses is handled elsewhere. -// The allocation of block numbers is handled elsewhere. - -// We can create a block allocator. -// When creating a block allocator we also specify a certain-sized -// block at the beginning that is preallocated (and cannot be allocated -// or freed) - -// We can allocate blocks of a particular size at a particular location. -// We can allocate blocks of a particular size at a location chosen by the allocator. -// We can free blocks. -// We can determine the size of a block. - - -#define BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE (2*BLOCK_ALLOCATOR_HEADER_RESERVE) - -typedef struct block_allocator *BLOCK_ALLOCATOR; - -void create_block_allocator (BLOCK_ALLOCATOR * ba, uint64_t reserve_at_beginning, uint64_t alignment); -// Effect: Create a block allocator, in which the first RESERVE_AT_BEGINNING bytes are not put into a block. -// All blocks be start on a multiple of ALIGNMENT. -// Aborts if we run out of memory. -// Parameters -// ba (OUT): Result stored here. -// reserve_at_beginning (IN) Size of reserved block at beginning. This size does not have to be aligned. -// alignment (IN) Block alignment. - -void destroy_block_allocator (BLOCK_ALLOCATOR *ba); -// Effect: Destroy a block allocator at *ba. -// Also, set *ba=NULL. -// Rationale: If there was only one copy of the pointer, this kills that copy too. -// Paramaters: -// ba (IN/OUT): - - -void block_allocator_alloc_block_at (BLOCK_ALLOCATOR ba, uint64_t size, uint64_t offset); -// Effect: Allocate a block of the specified size at a particular offset. -// Aborts if anything goes wrong. -// The performance of this function may be as bad as Theta(N), where N is the number of blocks currently in use. -// Usage note: To allocate several blocks (e.g., when opening a BRT), use block_allocator_alloc_blocks_at(). -// Requires: The resulting block may not overlap any other allocated block. -// And the offset must be a multiple of the block alignment. -// Parameters: -// ba (IN/OUT): The block allocator. (Modifies ba.) -// size (IN): The size of the block. -// offset (IN): The location of the block. - - -struct block_allocator_blockpair { - uint64_t offset; - uint64_t size; -}; -void block_allocator_alloc_blocks_at (BLOCK_ALLOCATOR ba, uint64_t n_blocks, struct block_allocator_blockpair *pairs); -// Effect: Take pairs in any order, and add them all, as if we did block_allocator_alloc_block() on each pair. -// This should run in time O(N + M log M) where N is the number of blocks in ba, and M is the number of new blocks. -// Modifies: pairs (sorts them). - -void block_allocator_alloc_block (BLOCK_ALLOCATOR ba, uint64_t size, uint64_t *offset); -// Effect: Allocate a block of the specified size at an address chosen by the allocator. -// Aborts if anything goes wrong. -// The block address will be a multiple of the alignment. -// Parameters: -// ba (IN/OUT): The block allocator. (Modifies ba.) -// size (IN): The size of the block. (The size does not have to be aligned.) -// offset (OUT): The location of the block. - -void block_allocator_free_block (BLOCK_ALLOCATOR ba, uint64_t offset); -// Effect: Free the block at offset. -// Requires: There must be a block currently allocated at that offset. -// Parameters: -// ba (IN/OUT): The block allocator. (Modifies ba.) -// offset (IN): The offset of the block. - - -uint64_t block_allocator_block_size (BLOCK_ALLOCATOR ba, uint64_t offset); -// Effect: Return the size of the block that starts at offset. -// Requires: There must be a block currently allocated at that offset. -// Parameters: -// ba (IN/OUT): The block allocator. (Modifies ba.) -// offset (IN): The offset of the block. - -void block_allocator_validate (BLOCK_ALLOCATOR ba); -// Effect: Check to see if the block allocator is OK. This may take a long time. -// Usage Hints: Probably only use this for unit tests. - -void block_allocator_print (BLOCK_ALLOCATOR ba); -// Effect: Print information about the block allocator. -// Rationale: This is probably useful only for debugging. - -uint64_t block_allocator_allocated_limit (BLOCK_ALLOCATOR ba); -// Effect: Return the unallocated block address of "infinite" size. -// That is, return the smallest address that is above all the allocated blocks. -// Rationale: When writing the root FIFO we don't know how big the block is. -// So we start at the "infinite" block, write the fifo, and then -// allocate_block_at of the correct size and offset to account for the root FIFO. - -int block_allocator_get_nth_block_in_layout_order (BLOCK_ALLOCATOR ba, uint64_t b, uint64_t *offset, uint64_t *size); -// Effect: Consider the blocks in sorted order. The reserved block at the beginning is number 0. The next one is number 1 and so forth. -// Return the offset and size of the block with that number. -// Return 0 if there is a block that big, return nonzero if b is too big. -// Rationale: This is probably useful only for tests. - -void block_allocator_get_unused_statistics(BLOCK_ALLOCATOR ba, TOKU_DB_FRAGMENTATION report); -// Effect: Fill in report to indicate how the file is used. -// Requires: -// report->file_size_bytes is filled in -// report->data_bytes is filled in -// report->checkpoint_bytes_additional is filled in - -void block_allocator_merge_blockpairs_into (uint64_t d, struct block_allocator_blockpair dst[/*d*/], - uint64_t s, const struct block_allocator_blockpair src[/*s*/]); -// Effect: Merge dst[d] and src[s] into dst[d+s], merging in place. -// Initially dst and src hold sorted arrays (sorted by increasing offset). -// Finally dst contains all d+s elements sorted in order. -// Requires: -// dst and src are sorted. -// dst must be large enough. -// No blocks may overlap. -// Rationale: This is exposed so it can be tested by a glass box tester. Otherwise it would be static (file-scope) function inside block_allocator.c - - -#endif diff --git a/storage/tokudb/ft-index/ft/block_table.cc b/storage/tokudb/ft-index/ft/block_table.cc deleted file mode 100644 index ba7d60b8d42..00000000000 --- a/storage/tokudb/ft-index/ft/block_table.cc +++ /dev/null @@ -1,1199 +0,0 @@ -/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */ -// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4: -#ident "$Id$" -/* -COPYING CONDITIONS NOTICE: - - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation, and provided that the - following conditions are met: - - * Redistributions of source code must retain this COPYING - CONDITIONS NOTICE, the COPYRIGHT NOTICE (below), the - DISCLAIMER (below), the UNIVERSITY PATENT NOTICE (below), the - PATENT MARKING NOTICE (below), and the PATENT RIGHTS - GRANT (below). - - * Redistributions in binary form must reproduce this COPYING - CONDITIONS NOTICE, the COPYRIGHT NOTICE (below), the - DISCLAIMER (below), the UNIVERSITY PATENT NOTICE (below), the - PATENT MARKING NOTICE (below), and the PATENT RIGHTS - GRANT (below) in the documentation and/or other materials - provided with the distribution. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA - 02110-1301, USA. - -COPYRIGHT NOTICE: - - TokuDB, Tokutek Fractal Tree Indexing Library. - Copyright (C) 2007-2013 Tokutek, Inc. - -DISCLAIMER: - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - -UNIVERSITY PATENT NOTICE: - - The technology is licensed by the Massachusetts Institute of - Technology, Rutgers State University of New Jersey, and the Research - Foundation of State University of New York at Stony Brook under - United States of America Serial No. 11/760379 and to the patents - and/or patent applications resulting from it. - -PATENT MARKING NOTICE: - - This software is covered by US Patent No. 8,185,551. - This software is covered by US Patent No. 8,489,638. - -PATENT RIGHTS GRANT: - - "THIS IMPLEMENTATION" means the copyrightable works distributed by - Tokutek as part of the Fractal Tree project. - - "PATENT CLAIMS" means the claims of patents that are owned or - licensable by Tokutek, both currently or in the future; and that in - the absence of this license would be infringed by THIS - IMPLEMENTATION or by using or running THIS IMPLEMENTATION. - - "PATENT CHALLENGE" shall mean a challenge to the validity, - patentability, enforceability and/or non-infringement of any of the - PATENT CLAIMS or otherwise opposing any of the PATENT CLAIMS. - - Tokutek hereby grants to you, for the term and geographical scope of - the PATENT CLAIMS, a non-exclusive, no-charge, royalty-free, - irrevocable (except as stated in this section) patent license to - make, have made, use, offer to sell, sell, import, transfer, and - otherwise run, modify, and propagate the contents of THIS - IMPLEMENTATION, where such license applies only to the PATENT - CLAIMS. This grant does not include claims that would be infringed - only as a consequence of further modifications of THIS - IMPLEMENTATION. If you or your agent or licensee institute or order - or agree to the institution of patent litigation against any entity - (including a cross-claim or counterclaim in a lawsuit) alleging that - THIS IMPLEMENTATION constitutes direct or contributory patent - infringement, or inducement of patent infringement, then any rights - granted to you under this License shall terminate as of the date - such litigation is filed. If you or your agent or exclusive - licensee institute or order or agree to the institution of a PATENT - CHALLENGE, then Tokutek may terminate any rights granted to you - under this License. -*/ - -#ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved." -#ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it." - -#include <toku_portability.h> -#include "ft-internal.h" // ugly but pragmatic, need access to dirty bits while holding translation lock -#include "fttypes.h" -#include "block_table.h" -#include "memory.h" -#include "toku_assert.h" -#include <toku_pthread.h> -#include "block_allocator.h" -#include "rbuf.h" -#include "wbuf.h" -#include <util/nb_mutex.h> - -//When the translation (btt) is stored on disk: -// In Header: -// size_on_disk -// location_on_disk -// In block translation table (in order): -// smallest_never_used_blocknum -// blocknum_freelist_head -// array -// a checksum -struct translation { //This is the BTT (block translation table) - enum translation_type type; - int64_t length_of_array; //Number of elements in array (block_translation). always >= smallest_never_used_blocknum - BLOCKNUM smallest_never_used_blocknum; - BLOCKNUM blocknum_freelist_head; // next (previously used) unused blocknum (free list) - struct block_translation_pair *block_translation; - - // Where and how big is the block translation vector stored on disk. - // size_on_disk is stored in block_translation[RESERVED_BLOCKNUM_TRANSLATION].size - // location_on is stored in block_translation[RESERVED_BLOCKNUM_TRANSLATION].u.diskoff -}; - -static const BLOCKNUM freelist_null = {-1}; // in a freelist, this indicates end of list -static const DISKOFF size_is_free = (DISKOFF)-1; // value of block_translation_pair.size if blocknum is unused -static const DISKOFF diskoff_unused = (DISKOFF)-2; // value of block_translation_pair.u.diskoff if blocknum is used but does not yet have a diskblock - -/******** - * There are three copies of the translation table (btt) in the block table: - * - * checkpointed Is initialized by deserializing from disk, - * and is the only version ever read from disk. - * When read from disk it is copied to current. - * It is immutable. It can be replaced by an inprogress btt. - * - * inprogress Is only filled by copying from current, - * and is the only version ever serialized to disk. - * (It is serialized to disk on checkpoint and clean shutdown.) - * At end of checkpoint it replaces 'checkpointed'. - * During a checkpoint, any 'pending' dirty writes will update - * inprogress. - * - * current Is initialized by copying from checkpointed, - * is the only version ever modified while the database is in use, - * and is the only version ever copied to inprogress. - * It is never stored on disk. - ********/ - - -struct block_table { - struct translation current; // The current translation is the one used by client threads. It is not represented on disk. - struct translation inprogress; // the translation used by the checkpoint currently in progress. If the checkpoint thread allocates a block, it must also update the current translation. - struct translation checkpointed; // the translation for the data that shall remain inviolate on disk until the next checkpoint finishes, after which any blocks used only in this translation can be freed. - - // The in-memory data structure for block allocation. There is no on-disk data structure for block allocation. - // Note: This is *allocation* not *translation*. The block_allocator is unaware of which blocks are used for which translation, but simply allocates and deallocates blocks. - BLOCK_ALLOCATOR block_allocator; - toku_mutex_t mutex; - struct nb_mutex safe_file_size_lock; - bool checkpoint_skipped; - uint64_t safe_file_size; -}; - -//forward decls -static int64_t calculate_size_on_disk (struct translation *t); -static inline bool translation_prevents_freeing (struct translation *t, BLOCKNUM b, struct block_translation_pair *old_pair); -static inline void lock_for_blocktable (BLOCK_TABLE bt); -static inline void unlock_for_blocktable (BLOCK_TABLE bt); - - - -static void -ft_set_dirty(FT ft, bool for_checkpoint){ - toku_mutex_assert_locked(&ft->blocktable->mutex); - paranoid_invariant(ft->h->type == FT_CURRENT); - if (for_checkpoint) { - paranoid_invariant(ft->checkpoint_header->type == FT_CHECKPOINT_INPROGRESS); - ft->checkpoint_header->dirty = 1; - } - else { - ft->h->dirty = 1; - } -} - -static void -maybe_truncate_file(BLOCK_TABLE bt, int fd, uint64_t size_needed_before) { - toku_mutex_assert_locked(&bt->mutex); - uint64_t new_size_needed = block_allocator_allocated_limit(bt->block_allocator); - //Save a call to toku_os_get_file_size (kernel call) if unlikely to be useful. - if (new_size_needed < size_needed_before && new_size_needed < bt->safe_file_size) { - nb_mutex_lock(&bt->safe_file_size_lock, &bt->mutex); - - // Must hold safe_file_size_lock to change safe_file_size. - if (new_size_needed < bt->safe_file_size) { - int64_t safe_file_size_before = bt->safe_file_size; - // Not safe to use the 'to-be-truncated' portion until truncate is done. - bt->safe_file_size = new_size_needed; - unlock_for_blocktable(bt); - - uint64_t size_after; - toku_maybe_truncate_file(fd, new_size_needed, safe_file_size_before, &size_after); - lock_for_blocktable(bt); - - bt->safe_file_size = size_after; - } - nb_mutex_unlock(&bt->safe_file_size_lock); - } -} - -void -toku_maybe_truncate_file_on_open(BLOCK_TABLE bt, int fd) { - lock_for_blocktable(bt); - maybe_truncate_file(bt, fd, bt->safe_file_size); - unlock_for_blocktable(bt); -} - - -static void -copy_translation(struct translation * dst, struct translation * src, enum translation_type newtype) { - paranoid_invariant(src->length_of_array >= src->smallest_never_used_blocknum.b); //verify invariant - paranoid_invariant(newtype==TRANSLATION_DEBUG || - (src->type == TRANSLATION_CURRENT && newtype == TRANSLATION_INPROGRESS) || - (src->type == TRANSLATION_CHECKPOINTED && newtype == TRANSLATION_CURRENT)); - dst->type = newtype; - dst->smallest_never_used_blocknum = src->smallest_never_used_blocknum; - dst->blocknum_freelist_head = src->blocknum_freelist_head; - // destination btt is of fixed size. Allocate+memcpy the exact length necessary. - dst->length_of_array = dst->smallest_never_used_blocknum.b; - XMALLOC_N(dst->length_of_array, dst->block_translation); - memcpy(dst->block_translation, - src->block_translation, - dst->length_of_array * sizeof(*dst->block_translation)); - //New version of btt is not yet stored on disk. - dst->block_translation[RESERVED_BLOCKNUM_TRANSLATION].size = 0; - dst->block_translation[RESERVED_BLOCKNUM_TRANSLATION].u.diskoff = diskoff_unused; -} - -int64_t -toku_block_get_blocks_in_use_unlocked(BLOCK_TABLE bt) { - BLOCKNUM b; - struct translation *t = &bt->current; - int64_t num_blocks = 0; - { - //Reserved blocknums do not get upgraded; They are part of the header. - for (b.b = RESERVED_BLOCKNUMS; b.b < t->smallest_never_used_blocknum.b; b.b++) { - if (t->block_translation[b.b].size != size_is_free) { - num_blocks++; - } - } - } - return num_blocks; -} - -static void -maybe_optimize_translation(struct translation *t) { - //Reduce 'smallest_never_used_blocknum.b' (completely free blocknums instead of just - //on a free list. Doing so requires us to regenerate the free list. - //This is O(n) work, so do it only if you're already doing that. - - BLOCKNUM b; - paranoid_invariant(t->smallest_never_used_blocknum.b >= RESERVED_BLOCKNUMS); - //Calculate how large the free suffix is. - int64_t freed; - { - for (b.b = t->smallest_never_used_blocknum.b; b.b > RESERVED_BLOCKNUMS; b.b--) { - if (t->block_translation[b.b-1].size != size_is_free) { - break; - } - } - freed = t->smallest_never_used_blocknum.b - b.b; - } - if (freed>0) { - t->smallest_never_used_blocknum.b = b.b; - if (t->length_of_array/4 > t->smallest_never_used_blocknum.b) { - //We're using more memory than necessary to represent this now. Reduce. - uint64_t new_length = t->smallest_never_used_blocknum.b * 2; - XREALLOC_N(new_length, t->block_translation); - t->length_of_array = new_length; - //No need to zero anything out. - } - - //Regenerate free list. - t->blocknum_freelist_head.b = freelist_null.b; - for (b.b = RESERVED_BLOCKNUMS; b.b < t->smallest_never_used_blocknum.b; b.b++) { - if (t->block_translation[b.b].size == size_is_free) { - t->block_translation[b.b].u.next_free_blocknum = t->blocknum_freelist_head; - t->blocknum_freelist_head = b; - } - } - } -} - -// block table must be locked by caller of this function -void -toku_block_translation_note_start_checkpoint_unlocked (BLOCK_TABLE bt) { - toku_mutex_assert_locked(&bt->mutex); - // Copy current translation to inprogress translation. - paranoid_invariant(bt->inprogress.block_translation == NULL); - //We're going to do O(n) work to copy the translation, so we - //can afford to do O(n) work by optimizing the translation - maybe_optimize_translation(&bt->current); - copy_translation(&bt->inprogress, &bt->current, TRANSLATION_INPROGRESS); - - bt->checkpoint_skipped = false; -} - -//#define PRNTF(str, b, siz, ad, bt) printf("%s[%d] %s %" PRId64 " %" PRId64 " %" PRId64 "\n", __FUNCTION__, __LINE__, str, b, siz, ad); fflush(stdout); if (bt) block_allocator_validate(((BLOCK_TABLE)(bt))->block_allocator); -//Debugging function -#define PRNTF(str, b, siz, ad, bt) - -void toku_block_translation_note_skipped_checkpoint (BLOCK_TABLE bt) { - //Purpose, alert block translation that the checkpoint was skipped, e.x. for a non-dirty header - lock_for_blocktable(bt); - paranoid_invariant_notnull(bt->inprogress.block_translation); - bt->checkpoint_skipped = true; - unlock_for_blocktable(bt); -} - -// Purpose: free any disk space used by previous checkpoint that isn't in use by either -// - current state -// - in-progress checkpoint -// capture inprogress as new checkpointed. -// For each entry in checkpointBTT -// if offset does not match offset in inprogress -// assert offset does not match offset in current -// free (offset,len) from checkpoint -// move inprogress to checkpoint (resetting type) -// inprogress = NULL -void -toku_block_translation_note_end_checkpoint (BLOCK_TABLE bt, int fd) { - // Free unused blocks - lock_for_blocktable(bt); - uint64_t allocated_limit_at_start = block_allocator_allocated_limit(bt->block_allocator); - paranoid_invariant_notnull(bt->inprogress.block_translation); - if (bt->checkpoint_skipped) { - toku_free(bt->inprogress.block_translation); - memset(&bt->inprogress, 0, sizeof(bt->inprogress)); - goto end; - } - - //Make certain inprogress was allocated space on disk - assert(bt->inprogress.block_translation[RESERVED_BLOCKNUM_TRANSLATION].size > 0); - assert(bt->inprogress.block_translation[RESERVED_BLOCKNUM_TRANSLATION].u.diskoff > 0); - - { - int64_t i; - struct translation *t = &bt->checkpointed; - - for (i = 0; i < t->length_of_array; i++) { - struct block_translation_pair *pair = &t->block_translation[i]; - if (pair->size > 0 && !translation_prevents_freeing(&bt->inprogress, make_blocknum(i), pair)) { - assert(!translation_prevents_freeing(&bt->current, make_blocknum(i), pair)); - PRNTF("free", i, pair->size, pair->u.diskoff, bt); - block_allocator_free_block(bt->block_allocator, pair->u.diskoff); - } - } - toku_free(bt->checkpointed.block_translation); - bt->checkpointed = bt->inprogress; - bt->checkpointed.type = TRANSLATION_CHECKPOINTED; - memset(&bt->inprogress, 0, sizeof(bt->inprogress)); - maybe_truncate_file(bt, fd, allocated_limit_at_start); - } -end: - unlock_for_blocktable(bt); -} - -__attribute__((nonnull,const)) -static inline bool -is_valid_blocknum(struct translation *t, BLOCKNUM b) { - //Sanity check: Verify invariant - paranoid_invariant(t->length_of_array >= t->smallest_never_used_blocknum.b); - return b.b >= 0 && b.b < t->smallest_never_used_blocknum.b; -} - -static inline void -verify_valid_blocknum (struct translation *UU(t), BLOCKNUM UU(b)) { - paranoid_invariant(is_valid_blocknum(t, b)); -} - -__attribute__((nonnull,const)) -static inline bool -is_valid_freeable_blocknum(struct translation *t, BLOCKNUM b) { - //Sanity check: Verify invariant - paranoid_invariant(t->length_of_array >= t->smallest_never_used_blocknum.b); - return b.b >= RESERVED_BLOCKNUMS && b.b < t->smallest_never_used_blocknum.b; -} - -//Can be freed -static inline void -verify_valid_freeable_blocknum (struct translation *UU(t), BLOCKNUM UU(b)) { - paranoid_invariant(is_valid_freeable_blocknum(t, b)); -} - -static void -blocktable_lock_init (BLOCK_TABLE bt) { - memset(&bt->mutex, 0, sizeof(bt->mutex)); - toku_mutex_init(&bt->mutex, NULL); -} - -static void -blocktable_lock_destroy (BLOCK_TABLE bt) { - toku_mutex_destroy(&bt->mutex); -} - -static inline void -lock_for_blocktable (BLOCK_TABLE bt) { - // Locks the blocktable_mutex. - toku_mutex_lock(&bt->mutex); -} - -static inline void -unlock_for_blocktable (BLOCK_TABLE bt) { - toku_mutex_unlock(&bt->mutex); -} - -void -toku_ft_lock (FT ft) { - BLOCK_TABLE bt = ft->blocktable; - lock_for_blocktable(bt); -} - -void -toku_ft_unlock (FT ft) { - BLOCK_TABLE bt = ft->blocktable; - toku_mutex_assert_locked(&bt->mutex); - unlock_for_blocktable(bt); -} - -// Also used only in ft-serialize-test. -void -toku_block_free(BLOCK_TABLE bt, uint64_t offset) { - lock_for_blocktable(bt); -PRNTF("freeSOMETHINGunknown", 0L, 0L, offset, bt); - block_allocator_free_block(bt->block_allocator, offset); - unlock_for_blocktable(bt); -} - -static int64_t -calculate_size_on_disk (struct translation *t) { - int64_t r = (8 + // smallest_never_used_blocknum - 8 + // blocknum_freelist_head - t->smallest_never_used_blocknum.b * 16 + // Array - 4); // 4 for checksum - return r; -} - -// We cannot free the disk space allocated to this blocknum if it is still in use by the given translation table. -static inline bool -translation_prevents_freeing(struct translation *t, BLOCKNUM b, struct block_translation_pair *old_pair) { - return (t->block_translation && - b.b < t->smallest_never_used_blocknum.b && - old_pair->u.diskoff == t->block_translation[b.b].u.diskoff); -} - -static void -blocknum_realloc_on_disk_internal (BLOCK_TABLE bt, BLOCKNUM b, DISKOFF size, DISKOFF *offset, FT ft, bool for_checkpoint) { - toku_mutex_assert_locked(&bt->mutex); - ft_set_dirty(ft, for_checkpoint); - - struct translation *t = &bt->current; - struct block_translation_pair old_pair = t->block_translation[b.b]; -PRNTF("old", b.b, old_pair.size, old_pair.u.diskoff, bt); - //Free the old block if it is not still in use by the checkpoint in progress or the previous checkpoint - bool cannot_free = (bool) - ((!for_checkpoint && translation_prevents_freeing(&bt->inprogress, b, &old_pair)) || - translation_prevents_freeing(&bt->checkpointed, b, &old_pair)); - if (!cannot_free && old_pair.u.diskoff!=diskoff_unused) { -PRNTF("Freed", b.b, old_pair.size, old_pair.u.diskoff, bt); - block_allocator_free_block(bt->block_allocator, old_pair.u.diskoff); - } - - uint64_t allocator_offset = diskoff_unused; - t->block_translation[b.b].size = size; - if (size > 0) { - // Allocate a new block if the size is greater than 0, - // if the size is just 0, offset will be set to diskoff_unused - block_allocator_alloc_block(bt->block_allocator, size, &allocator_offset); - } - t->block_translation[b.b].u.diskoff = allocator_offset; - *offset = allocator_offset; - -PRNTF("New", b.b, t->block_translation[b.b].size, t->block_translation[b.b].u.diskoff, bt); - //Update inprogress btt if appropriate (if called because Pending bit is set). - if (for_checkpoint) { - paranoid_invariant(b.b < bt->inprogress.length_of_array); - bt->inprogress.block_translation[b.b] = t->block_translation[b.b]; - } -} - -static void -ensure_safe_write_unlocked(BLOCK_TABLE bt, int fd, DISKOFF block_size, DISKOFF block_offset) { - // Requires: holding bt->mutex - uint64_t size_needed = block_size + block_offset; - if (size_needed > bt->safe_file_size) { - // Must hold safe_file_size_lock to change safe_file_size. - nb_mutex_lock(&bt->safe_file_size_lock, &bt->mutex); - if (size_needed > bt->safe_file_size) { - unlock_for_blocktable(bt); - - int64_t size_after; - toku_maybe_preallocate_in_file(fd, size_needed, bt->safe_file_size, &size_after); - - lock_for_blocktable(bt); - bt->safe_file_size = size_after; - } - nb_mutex_unlock(&bt->safe_file_size_lock); - } -} - -void -toku_blocknum_realloc_on_disk (BLOCK_TABLE bt, BLOCKNUM b, DISKOFF size, DISKOFF *offset, FT ft, int fd, bool for_checkpoint) { - lock_for_blocktable(bt); - struct translation *t = &bt->current; - verify_valid_freeable_blocknum(t, b); - blocknum_realloc_on_disk_internal(bt, b, size, offset, ft, for_checkpoint); - - ensure_safe_write_unlocked(bt, fd, size, *offset); - unlock_for_blocktable(bt); -} - -__attribute__((nonnull,const)) -static inline bool -pair_is_unallocated(struct block_translation_pair *pair) { - return pair->size == 0 && pair->u.diskoff == diskoff_unused; -} - -static void blocknum_alloc_translation_on_disk_unlocked(BLOCK_TABLE bt) -// Effect: figure out where to put the inprogress btt on disk, allocate space for it there. -// The space must be 512-byte aligned (both the starting address and the size). -// As a result, the allcoated space may be a little bit bigger (up to the next 512-byte boundary) than the actual btt. -{ - toku_mutex_assert_locked(&bt->mutex); - - struct translation *t = &bt->inprogress; - paranoid_invariant_notnull(t->block_translation); - BLOCKNUM b = make_blocknum(RESERVED_BLOCKNUM_TRANSLATION); - //Each inprogress is allocated only once - paranoid_invariant(pair_is_unallocated(&t->block_translation[b.b])); - - //Allocate a new block - int64_t size = calculate_size_on_disk(t); - uint64_t offset; - block_allocator_alloc_block(bt->block_allocator, size, &offset); -PRNTF("blokAllokator", 1L, size, offset, bt); - t->block_translation[b.b].u.diskoff = offset; - t->block_translation[b.b].size = size; -} - -void toku_serialize_translation_to_wbuf(BLOCK_TABLE bt, int fd, struct wbuf *w, - int64_t *address, int64_t *size) -// Effect: Fills wbuf (which starts uninitialized) with bt -// A clean shutdown runs checkpoint start so that current and inprogress are copies. -// The resulting wbuf buffer is guaranteed to be be 512-byte aligned and the total length is a multiple of 512 (so we pad with zeros at the end if needd) -// The address is guaranteed to be 512-byte aligned, but the size is not guaranteed. -// It *is* guaranteed that we can read up to the next 512-byte boundary, however -{ - lock_for_blocktable(bt); - struct translation *t = &bt->inprogress; - - BLOCKNUM b = make_blocknum(RESERVED_BLOCKNUM_TRANSLATION); - blocknum_alloc_translation_on_disk_unlocked(bt); // The allocated block must be 512-byte aligned to make O_DIRECT happy. - uint64_t size_translation = calculate_size_on_disk(t); - uint64_t size_aligned = roundup_to_multiple(512, size_translation); - assert((int64_t)size_translation==t->block_translation[b.b].size); - { - //Init wbuf - if (0) - printf("%s:%d writing translation table of size_translation %" PRIu64 " at %" PRId64 "\n", __FILE__, __LINE__, size_translation, t->block_translation[b.b].u.diskoff); - char *XMALLOC_N_ALIGNED(512, size_aligned, buf); - for (uint64_t i=size_translation; i<size_aligned; i++) buf[i]=0; // fill in the end of the buffer with zeros. - wbuf_init(w, buf, size_aligned); - } - wbuf_BLOCKNUM(w, t->smallest_never_used_blocknum); - wbuf_BLOCKNUM(w, t->blocknum_freelist_head); - int64_t i; - for (i=0; i<t->smallest_never_used_blocknum.b; i++) { - if (0) - printf("%s:%d %" PRId64 ",%" PRId64 "\n", __FILE__, __LINE__, t->block_translation[i].u.diskoff, t->block_translation[i].size); - wbuf_DISKOFF(w, t->block_translation[i].u.diskoff); - wbuf_DISKOFF(w, t->block_translation[i].size); - } - uint32_t checksum = toku_x1764_finish(&w->checksum); - wbuf_int(w, checksum); - *address = t->block_translation[b.b].u.diskoff; - *size = size_translation; - assert((*address)%512 == 0); - - ensure_safe_write_unlocked(bt, fd, size_aligned, *address); - unlock_for_blocktable(bt); -} - - -// Perhaps rename: purpose is get disk address of a block, given its blocknum (blockid?) -static void -translate_blocknum_to_offset_size_unlocked(BLOCK_TABLE bt, BLOCKNUM b, DISKOFF *offset, DISKOFF *size) { - struct translation *t = &bt->current; - verify_valid_blocknum(t, b); - if (offset) *offset = t->block_translation[b.b].u.diskoff; - if (size) *size = t->block_translation[b.b].size; -} - -// Perhaps rename: purpose is get disk address of a block, given its blocknum (blockid?) -void -toku_translate_blocknum_to_offset_size(BLOCK_TABLE bt, BLOCKNUM b, DISKOFF *offset, DISKOFF *size) { - lock_for_blocktable(bt); - translate_blocknum_to_offset_size_unlocked(bt, b, offset, size); - unlock_for_blocktable(bt); -} - -//Only called by toku_allocate_blocknum -static void -maybe_expand_translation (struct translation *t) { -// Effect: expand the array to maintain size invariant -// given that one more never-used blocknum will soon be used. - if (t->length_of_array <= t->smallest_never_used_blocknum.b) { - //expansion is necessary - uint64_t new_length = t->smallest_never_used_blocknum.b * 2; - XREALLOC_N(new_length, t->block_translation); - uint64_t i; - for (i = t->length_of_array; i < new_length; i++) { - t->block_translation[i].u.next_free_blocknum = freelist_null; - t->block_translation[i].size = size_is_free; - } - t->length_of_array = new_length; - } -} - -void -toku_allocate_blocknum_unlocked(BLOCK_TABLE bt, BLOCKNUM *res, FT ft) { - toku_mutex_assert_locked(&bt->mutex); - BLOCKNUM result; - struct translation * t = &bt->current; - if (t->blocknum_freelist_head.b == freelist_null.b) { - // no previously used blocknums are available - // use a never used blocknum - maybe_expand_translation(t); //Ensure a never used blocknums is available - result = t->smallest_never_used_blocknum; - t->smallest_never_used_blocknum.b++; - } else { // reuse a previously used blocknum - result = t->blocknum_freelist_head; - BLOCKNUM next = t->block_translation[result.b].u.next_free_blocknum; - t->blocknum_freelist_head = next; - } - //Verify the blocknum is free - paranoid_invariant(t->block_translation[result.b].size == size_is_free); - //blocknum is not free anymore - t->block_translation[result.b].u.diskoff = diskoff_unused; - t->block_translation[result.b].size = 0; - verify_valid_freeable_blocknum(t, result); - *res = result; - ft_set_dirty(ft, false); -} - -void -toku_allocate_blocknum(BLOCK_TABLE bt, BLOCKNUM *res, FT ft) { - lock_for_blocktable(bt); - toku_allocate_blocknum_unlocked(bt, res, ft); - unlock_for_blocktable(bt); -} - -static void -free_blocknum_in_translation(struct translation *t, BLOCKNUM b) -{ - verify_valid_freeable_blocknum(t, b); - paranoid_invariant(t->block_translation[b.b].size != size_is_free); - - PRNTF("free_blocknum", b.b, t->block_translation[b.b].size, t->block_translation[b.b].u.diskoff, bt); - t->block_translation[b.b].size = size_is_free; - t->block_translation[b.b].u.next_free_blocknum = t->blocknum_freelist_head; - t->blocknum_freelist_head = b; -} - -static void -free_blocknum_unlocked(BLOCK_TABLE bt, BLOCKNUM *bp, FT ft, bool for_checkpoint) { -// Effect: Free a blocknum. -// If the blocknum holds the only reference to a block on disk, free that block - toku_mutex_assert_locked(&bt->mutex); - BLOCKNUM b = *bp; - bp->b = 0; //Remove caller's reference. - - struct block_translation_pair old_pair = bt->current.block_translation[b.b]; - - free_blocknum_in_translation(&bt->current, b); - if (for_checkpoint) { - paranoid_invariant(ft->checkpoint_header->type == FT_CHECKPOINT_INPROGRESS); - free_blocknum_in_translation(&bt->inprogress, b); - } - - //If the size is 0, no disk block has ever been assigned to this blocknum. - if (old_pair.size > 0) { - //Free the old block if it is not still in use by the checkpoint in progress or the previous checkpoint - bool cannot_free = (bool) - (translation_prevents_freeing(&bt->inprogress, b, &old_pair) || - translation_prevents_freeing(&bt->checkpointed, b, &old_pair)); - if (!cannot_free) { -PRNTF("free_blocknum_free", b.b, old_pair.size, old_pair.u.diskoff, bt); - block_allocator_free_block(bt->block_allocator, old_pair.u.diskoff); - } - } - else { - paranoid_invariant(old_pair.size==0); - paranoid_invariant(old_pair.u.diskoff == diskoff_unused); - } - ft_set_dirty(ft, for_checkpoint); -} - -void -toku_free_blocknum(BLOCK_TABLE bt, BLOCKNUM *bp, FT ft, bool for_checkpoint) { - lock_for_blocktable(bt); - free_blocknum_unlocked(bt, bp, ft, for_checkpoint); - unlock_for_blocktable(bt); -} - -//Verify there are no free blocks. -void -toku_block_verify_no_free_blocknums(BLOCK_TABLE UU(bt)) { - paranoid_invariant(bt->current.blocknum_freelist_head.b == freelist_null.b); -} - -// Frees blocknums that have a size of 0 and unused diskoff -// Currently used for eliminating unused cached rollback log nodes -void -toku_free_unused_blocknums(BLOCK_TABLE bt, BLOCKNUM root) { - lock_for_blocktable(bt); - int64_t smallest = bt->current.smallest_never_used_blocknum.b; - for (int64_t i=RESERVED_BLOCKNUMS; i < smallest; i++) { - if (i == root.b) { - continue; - } - BLOCKNUM b = make_blocknum(i); - if (bt->current.block_translation[b.b].size == 0) { - invariant(bt->current.block_translation[b.b].u.diskoff == diskoff_unused); - free_blocknum_in_translation(&bt->current, b); - } - } - unlock_for_blocktable(bt); -} - -__attribute__((nonnull,const,unused)) -static inline bool -no_data_blocks_except_root(BLOCK_TABLE bt, BLOCKNUM root) { - bool ok = true; - lock_for_blocktable(bt); - int64_t smallest = bt->current.smallest_never_used_blocknum.b; - if (root.b < RESERVED_BLOCKNUMS) { - ok = false; - goto cleanup; - } - int64_t i; - for (i=RESERVED_BLOCKNUMS; i < smallest; i++) { - if (i == root.b) { - continue; - } - BLOCKNUM b = make_blocknum(i); - if (bt->current.block_translation[b.b].size != size_is_free) { - ok = false; - goto cleanup; - } - } - cleanup: - unlock_for_blocktable(bt); - return ok; -} - -//Verify there are no data blocks except root. -// TODO(leif): This actually takes a lock, but I don't want to fix all the callers right now. -void -toku_block_verify_no_data_blocks_except_root(BLOCK_TABLE UU(bt), BLOCKNUM UU(root)) { - paranoid_invariant(no_data_blocks_except_root(bt, root)); -} - -__attribute__((nonnull,const,unused)) -static inline bool -blocknum_allocated(BLOCK_TABLE bt, BLOCKNUM b) { - lock_for_blocktable(bt); - struct translation *t = &bt->current; - verify_valid_blocknum(t, b); - bool ok = t->block_translation[b.b].size != size_is_free; - unlock_for_blocktable(bt); - return ok; -} - -//Verify a blocknum is currently allocated. -void -toku_verify_blocknum_allocated(BLOCK_TABLE UU(bt), BLOCKNUM UU(b)) { - paranoid_invariant(blocknum_allocated(bt, b)); -} - -//Only used by toku_dump_translation table (debug info) -static void -dump_translation(FILE *f, struct translation *t) { - if (t->block_translation) { - BLOCKNUM b = make_blocknum(RESERVED_BLOCKNUM_TRANSLATION); - fprintf(f, " length_of_array[%" PRId64 "]", t->length_of_array); - fprintf(f, " smallest_never_used_blocknum[%" PRId64 "]", t->smallest_never_used_blocknum.b); - fprintf(f, " blocknum_free_list_head[%" PRId64 "]", t->blocknum_freelist_head.b); - fprintf(f, " size_on_disk[%" PRId64 "]", t->block_translation[b.b].size); - fprintf(f, " location_on_disk[%" PRId64 "]\n", t->block_translation[b.b].u.diskoff); - int64_t i; - for (i=0; i<t->length_of_array; i++) { - fprintf(f, " %" PRId64 ": %" PRId64 " %" PRId64 "\n", i, t->block_translation[i].u.diskoff, t->block_translation[i].size); - } - fprintf(f, "\n"); - } - else fprintf(f, " does not exist\n"); -} - -//Only used by toku_ft_dump which is only for debugging purposes -// "pretty" just means we use tabs so we can parse output easier later -void -toku_dump_translation_table_pretty(FILE *f, BLOCK_TABLE bt) { - lock_for_blocktable(bt); - struct translation *t = &bt->checkpointed; - assert(t->block_translation != nullptr); - for (int64_t i = 0; i < t->length_of_array; ++i) { - fprintf(f, "%" PRId64 "\t%" PRId64 "\t%" PRId64 "\n", i, t->block_translation[i].u.diskoff, t->block_translation[i].size); - } - unlock_for_blocktable(bt); -} - -//Only used by toku_ft_dump which is only for debugging purposes -void -toku_dump_translation_table(FILE *f, BLOCK_TABLE bt) { - lock_for_blocktable(bt); - fprintf(f, "Current block translation:"); - dump_translation(f, &bt->current); - fprintf(f, "Checkpoint in progress block translation:"); - dump_translation(f, &bt->inprogress); - fprintf(f, "Checkpointed block translation:"); - dump_translation(f, &bt->checkpointed); - unlock_for_blocktable(bt); -} - -//Only used by ftdump -void -toku_blocknum_dump_translation(BLOCK_TABLE bt, BLOCKNUM b) { - lock_for_blocktable(bt); - - struct translation *t = &bt->current; - if (b.b < t->length_of_array) { - struct block_translation_pair *bx = &t->block_translation[b.b]; - printf("%" PRId64 ": %" PRId64 " %" PRId64 "\n", b.b, bx->u.diskoff, bx->size); - } - unlock_for_blocktable(bt); -} - - -//Must not call this function when anything else is using the blocktable. -//No one may use the blocktable afterwards. -void -toku_blocktable_destroy(BLOCK_TABLE *btp) { - BLOCK_TABLE bt = *btp; - *btp = NULL; - if (bt->current.block_translation) toku_free(bt->current.block_translation); - if (bt->inprogress.block_translation) toku_free(bt->inprogress.block_translation); - if (bt->checkpointed.block_translation) toku_free(bt->checkpointed.block_translation); - - destroy_block_allocator(&bt->block_allocator); - blocktable_lock_destroy(bt); - nb_mutex_destroy(&bt->safe_file_size_lock); - toku_free(bt); -} - - -static BLOCK_TABLE -blocktable_create_internal (void) { -// Effect: Fill it in, including the translation table, which is uninitialized - BLOCK_TABLE XCALLOC(bt); - blocktable_lock_init(bt); - nb_mutex_init(&bt->safe_file_size_lock); - - //There are two headers, so we reserve space for two. - uint64_t reserve_per_header = BLOCK_ALLOCATOR_HEADER_RESERVE; - - //Must reserve in multiples of BLOCK_ALLOCATOR_ALIGNMENT - //Round up the per-header usage if necessary. - //We want each header aligned. - uint64_t remainder = BLOCK_ALLOCATOR_HEADER_RESERVE % BLOCK_ALLOCATOR_ALIGNMENT; - if (remainder!=0) { - reserve_per_header += BLOCK_ALLOCATOR_ALIGNMENT; - reserve_per_header -= remainder; - } - assert(2*reserve_per_header == BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE); - create_block_allocator(&bt->block_allocator, - BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE, - BLOCK_ALLOCATOR_ALIGNMENT); - return bt; -} - - - -static void -translation_default(struct translation *t) { // destination into which to create a default translation - t->type = TRANSLATION_CHECKPOINTED; - t->smallest_never_used_blocknum = make_blocknum(RESERVED_BLOCKNUMS); - t->length_of_array = t->smallest_never_used_blocknum.b; - t->blocknum_freelist_head = freelist_null; - XMALLOC_N(t->length_of_array, t->block_translation); - int64_t i; - for (i = 0; i < t->length_of_array; i++) { - t->block_translation[i].size = 0; - t->block_translation[i].u.diskoff = diskoff_unused; - } -} - - -static int -translation_deserialize_from_buffer(struct translation *t, // destination into which to deserialize - DISKOFF location_on_disk, //Location of translation_buffer - uint64_t size_on_disk, - unsigned char * translation_buffer) { // buffer with serialized translation - int r = 0; - assert(location_on_disk!=0); - t->type = TRANSLATION_CHECKPOINTED; - { - // check the checksum - uint32_t x1764 = toku_x1764_memory(translation_buffer, size_on_disk - 4); - uint64_t offset = size_on_disk - 4; - //printf("%s:%d read from %ld (x1764 offset=%ld) size=%ld\n", __FILE__, __LINE__, block_translation_address_on_disk, offset, block_translation_size_on_disk); - uint32_t stored_x1764 = toku_dtoh32(*(int*)(translation_buffer + offset)); - if (x1764 != stored_x1764) { - fprintf(stderr, "Translation table checksum failure: calc=0x%08x read=0x%08x\n", x1764, stored_x1764); - r = TOKUDB_BAD_CHECKSUM; - goto exit; - } - } - struct rbuf rt; - rt.buf = translation_buffer; - rt.ndone = 0; - rt.size = size_on_disk-4;//4==checksum - - t->smallest_never_used_blocknum = rbuf_blocknum(&rt); - t->length_of_array = t->smallest_never_used_blocknum.b; - assert(t->smallest_never_used_blocknum.b >= RESERVED_BLOCKNUMS); - t->blocknum_freelist_head = rbuf_blocknum(&rt); - XMALLOC_N(t->length_of_array, t->block_translation); - int64_t i; - for (i=0; i < t->length_of_array; i++) { - t->block_translation[i].u.diskoff = rbuf_diskoff(&rt); - t->block_translation[i].size = rbuf_diskoff(&rt); -PRNTF("ReadIn", i, t->block_translation[i].size, t->block_translation[i].u.diskoff, NULL); - } - assert(calculate_size_on_disk(t) == (int64_t)size_on_disk); - assert(t->block_translation[RESERVED_BLOCKNUM_TRANSLATION].size == (int64_t)size_on_disk); - assert(t->block_translation[RESERVED_BLOCKNUM_TRANSLATION].u.diskoff == location_on_disk); -exit: - return r; -} - -// We just initialized a translation, inform block allocator to reserve space for each blocknum in use. -static void -blocktable_note_translation (BLOCK_ALLOCATOR allocator, struct translation *t) { - //This is where the space for them will be reserved (in addition to normal blocks). - //See RESERVED_BLOCKNUMS - - // Previously this added blocks one at a time. Now we make an array and pass it in so it can be sorted and merged. See #3218. - struct block_allocator_blockpair *XMALLOC_N(t->smallest_never_used_blocknum.b, pairs); - uint64_t n_pairs = 0; - for (int64_t i=0; i<t->smallest_never_used_blocknum.b; i++) { - struct block_translation_pair pair = t->block_translation[i]; - if (pair.size > 0) { - paranoid_invariant(pair.u.diskoff != diskoff_unused); - int cur_pair = n_pairs++; - pairs[cur_pair] = (struct block_allocator_blockpair) { .offset = (uint64_t) pair.u.diskoff, - .size = (uint64_t) pair.size }; - } - } - block_allocator_alloc_blocks_at(allocator, n_pairs, pairs); - toku_free(pairs); -} - - -// Fill in the checkpointed translation from buffer, and copy checkpointed to current. -// The one read from disk is the last known checkpointed one, so we are keeping it in -// place and then setting current (which is never stored on disk) for current use. -// The translation_buffer has translation only, we create the rest of the block_table. -int -toku_blocktable_create_from_buffer(int fd, - BLOCK_TABLE *btp, - DISKOFF location_on_disk, //Location of translation_buffer - DISKOFF size_on_disk, - unsigned char *translation_buffer) { - BLOCK_TABLE bt = blocktable_create_internal(); - int r = translation_deserialize_from_buffer(&bt->checkpointed, location_on_disk, size_on_disk, translation_buffer); - if (r != 0) { - goto exit; - } - blocktable_note_translation(bt->block_allocator, &bt->checkpointed); - // we just filled in checkpointed, now copy it to current. - copy_translation(&bt->current, &bt->checkpointed, TRANSLATION_CURRENT); - - int64_t file_size; - r = toku_os_get_file_size(fd, &file_size); - lazy_assert_zero(r); - invariant(file_size >= 0); - bt->safe_file_size = file_size; - - *btp = bt; -exit: - return r; -} - - -void -toku_blocktable_create_new(BLOCK_TABLE *btp) { - BLOCK_TABLE bt = blocktable_create_internal(); - translation_default(&bt->checkpointed); // create default btt (empty except for reserved blocknums) - blocktable_note_translation(bt->block_allocator, &bt->checkpointed); - // we just created a default checkpointed, now copy it to current. - copy_translation(&bt->current, &bt->checkpointed, TRANSLATION_CURRENT); - - *btp = bt; -} - -int -toku_blocktable_iterate (BLOCK_TABLE bt, enum translation_type type, BLOCKTABLE_CALLBACK f, void *extra, bool data_only, bool used_only) { - struct translation *src; - - int r = 0; - switch (type) { - case TRANSLATION_CURRENT: src = &bt->current; break; - case TRANSLATION_INPROGRESS: src = &bt->inprogress; break; - case TRANSLATION_CHECKPOINTED: src = &bt->checkpointed; break; - default: r = EINVAL; break; - } - struct translation fakecurrent; - struct translation *t = &fakecurrent; - if (r==0) { - lock_for_blocktable(bt); - copy_translation(t, src, TRANSLATION_DEBUG); - t->block_translation[RESERVED_BLOCKNUM_TRANSLATION] = - src->block_translation[RESERVED_BLOCKNUM_TRANSLATION]; - unlock_for_blocktable(bt); - int64_t i; - for (i=0; i<t->smallest_never_used_blocknum.b; i++) { - struct block_translation_pair pair = t->block_translation[i]; - if (data_only && i< RESERVED_BLOCKNUMS) continue; - if (used_only && pair.size <= 0) continue; - r = f(make_blocknum(i), pair.size, pair.u.diskoff, extra); - if (r!=0) break; - } - toku_free(t->block_translation); - } - return r; -} - -typedef struct { - int64_t used_space; - int64_t total_space; -} frag_extra; - -static int -frag_helper(BLOCKNUM UU(b), int64_t size, int64_t address, void *extra) { - frag_extra *info = (frag_extra *) extra; - - if (size + address > info->total_space) - info->total_space = size + address; - info->used_space += size; - return 0; -} - -void -toku_blocktable_internal_fragmentation (BLOCK_TABLE bt, int64_t *total_sizep, int64_t *used_sizep) { - frag_extra info = {0,0}; - int r = toku_blocktable_iterate(bt, TRANSLATION_CHECKPOINTED, frag_helper, &info, false, true); - assert_zero(r); - - if (total_sizep) *total_sizep = info.total_space; - if (used_sizep) *used_sizep = info.used_space; -} - -void -toku_realloc_descriptor_on_disk_unlocked(BLOCK_TABLE bt, DISKOFF size, DISKOFF *offset, FT ft) { - toku_mutex_assert_locked(&bt->mutex); - BLOCKNUM b = make_blocknum(RESERVED_BLOCKNUM_DESCRIPTOR); - blocknum_realloc_on_disk_internal(bt, b, size, offset, ft, false); -} - -void -toku_realloc_descriptor_on_disk(BLOCK_TABLE bt, DISKOFF size, DISKOFF *offset, FT ft, int fd) { - lock_for_blocktable(bt); - toku_realloc_descriptor_on_disk_unlocked(bt, size, offset, ft); - - ensure_safe_write_unlocked(bt, fd, size, *offset); - unlock_for_blocktable(bt); -} - -void -toku_get_descriptor_offset_size(BLOCK_TABLE bt, DISKOFF *offset, DISKOFF *size) { - lock_for_blocktable(bt); - BLOCKNUM b = make_blocknum(RESERVED_BLOCKNUM_DESCRIPTOR); - translate_blocknum_to_offset_size_unlocked(bt, b, offset, size); - unlock_for_blocktable(bt); -} - -void -toku_block_table_get_fragmentation_unlocked(BLOCK_TABLE bt, TOKU_DB_FRAGMENTATION report) { - //Requires: blocktable lock is held. - //Requires: report->file_size_bytes is already filled in. - - //Count the headers. - report->data_bytes = BLOCK_ALLOCATOR_HEADER_RESERVE; - report->data_blocks = 1; - report->checkpoint_bytes_additional = BLOCK_ALLOCATOR_HEADER_RESERVE; - report->checkpoint_blocks_additional = 1; - - struct translation *current = &bt->current; - int64_t i; - for (i = 0; i < current->length_of_array; i++) { - struct block_translation_pair *pair = ¤t->block_translation[i]; - if (pair->size > 0) { - report->data_bytes += pair->size; - report->data_blocks++; - } - } - struct translation *checkpointed = &bt->checkpointed; - for (i = 0; i < checkpointed->length_of_array; i++) { - struct block_translation_pair *pair = &checkpointed->block_translation[i]; - if (pair->size > 0 && - !(i < current->length_of_array && - current->block_translation[i].size > 0 && - current->block_translation[i].u.diskoff == pair->u.diskoff) - ) { - report->checkpoint_bytes_additional += pair->size; - report->checkpoint_blocks_additional++; - } - } - struct translation *inprogress = &bt->inprogress; - for (i = 0; i < inprogress->length_of_array; i++) { - struct block_translation_pair *pair = &inprogress->block_translation[i]; - if (pair->size > 0 && - !(i < current->length_of_array && - current->block_translation[i].size > 0 && - current->block_translation[i].u.diskoff == pair->u.diskoff) && - !(i < checkpointed->length_of_array && - checkpointed->block_translation[i].size > 0 && - checkpointed->block_translation[i].u.diskoff == pair->u.diskoff) - ) { - report->checkpoint_bytes_additional += pair->size; - report->checkpoint_blocks_additional++; - } - } - - block_allocator_get_unused_statistics(bt->block_allocator, report); -} - -void -toku_blocktable_get_info64(BLOCK_TABLE bt, struct ftinfo64 *s) { - lock_for_blocktable(bt); - - struct translation *current = &bt->current; - s->num_blocks_allocated = current->length_of_array; - s->num_blocks_in_use = 0; - s->size_allocated = 0; - s->size_in_use = 0; - - for (int64_t i = 0; i < current->length_of_array; ++i) { - struct block_translation_pair *block = ¤t->block_translation[i]; - if (block->size != size_is_free) { - ++s->num_blocks_in_use; - s->size_in_use += block->size; - if (block->u.diskoff != diskoff_unused) { - uint64_t limit = block->u.diskoff + block->size; - if (limit > s->size_allocated) { - s->size_allocated = limit; - } - } - } - } - - unlock_for_blocktable(bt); -} - -int -toku_blocktable_iterate_translation_tables(BLOCK_TABLE bt, uint64_t checkpoint_count, - int (*iter)(uint64_t checkpoint_count, - int64_t total_num_rows, - int64_t blocknum, - int64_t diskoff, - int64_t size, - void *extra), - void *iter_extra) { - int error = 0; - lock_for_blocktable(bt); - - int64_t total_num_rows = bt->current.length_of_array + bt->checkpointed.length_of_array; - for (int64_t i = 0; error == 0 && i < bt->current.length_of_array; ++i) { - struct block_translation_pair *block = &bt->current.block_translation[i]; - error = iter(checkpoint_count, total_num_rows, i, block->u.diskoff, block->size, iter_extra); - } - for (int64_t i = 0; error == 0 && i < bt->checkpointed.length_of_array; ++i) { - struct block_translation_pair *block = &bt->checkpointed.block_translation[i]; - error = iter(checkpoint_count - 1, total_num_rows, i, block->u.diskoff, block->size, iter_extra); - } - - unlock_for_blocktable(bt); - return error; -} diff --git a/storage/tokudb/ft-index/ft/block_table.h b/storage/tokudb/ft-index/ft/block_table.h deleted file mode 100644 index a9f17ad0e7e..00000000000 --- a/storage/tokudb/ft-index/ft/block_table.h +++ /dev/null @@ -1,176 +0,0 @@ -/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */ -// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4: -#ifndef BLOCKTABLE_H -#define BLOCKTABLE_H -#ident "$Id$" -/* -COPYING CONDITIONS NOTICE: - - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation, and provided that the - following conditions are met: - - * Redistributions of source code must retain this COPYING - CONDITIONS NOTICE, the COPYRIGHT NOTICE (below), the - DISCLAIMER (below), the UNIVERSITY PATENT NOTICE (below), the - PATENT MARKING NOTICE (below), and the PATENT RIGHTS - GRANT (below). - - * Redistributions in binary form must reproduce this COPYING - CONDITIONS NOTICE, the COPYRIGHT NOTICE (below), the - DISCLAIMER (below), the UNIVERSITY PATENT NOTICE (below), the - PATENT MARKING NOTICE (below), and the PATENT RIGHTS - GRANT (below) in the documentation and/or other materials - provided with the distribution. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA - 02110-1301, USA. - -COPYRIGHT NOTICE: - - TokuDB, Tokutek Fractal Tree Indexing Library. - Copyright (C) 2007-2013 Tokutek, Inc. - -DISCLAIMER: - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - -UNIVERSITY PATENT NOTICE: - - The technology is licensed by the Massachusetts Institute of - Technology, Rutgers State University of New Jersey, and the Research - Foundation of State University of New York at Stony Brook under - United States of America Serial No. 11/760379 and to the patents - and/or patent applications resulting from it. - -PATENT MARKING NOTICE: - - This software is covered by US Patent No. 8,185,551. - This software is covered by US Patent No. 8,489,638. - -PATENT RIGHTS GRANT: - - "THIS IMPLEMENTATION" means the copyrightable works distributed by - Tokutek as part of the Fractal Tree project. - - "PATENT CLAIMS" means the claims of patents that are owned or - licensable by Tokutek, both currently or in the future; and that in - the absence of this license would be infringed by THIS - IMPLEMENTATION or by using or running THIS IMPLEMENTATION. - - "PATENT CHALLENGE" shall mean a challenge to the validity, - patentability, enforceability and/or non-infringement of any of the - PATENT CLAIMS or otherwise opposing any of the PATENT CLAIMS. - - Tokutek hereby grants to you, for the term and geographical scope of - the PATENT CLAIMS, a non-exclusive, no-charge, royalty-free, - irrevocable (except as stated in this section) patent license to - make, have made, use, offer to sell, sell, import, transfer, and - otherwise run, modify, and propagate the contents of THIS - IMPLEMENTATION, where such license applies only to the PATENT - CLAIMS. This grant does not include claims that would be infringed - only as a consequence of further modifications of THIS - IMPLEMENTATION. If you or your agent or licensee institute or order - or agree to the institution of patent litigation against any entity - (including a cross-claim or counterclaim in a lawsuit) alleging that - THIS IMPLEMENTATION constitutes direct or contributory patent - infringement, or inducement of patent infringement, then any rights - granted to you under this License shall terminate as of the date - such litigation is filed. If you or your agent or exclusive - licensee institute or order or agree to the institution of a PATENT - CHALLENGE, then Tokutek may terminate any rights granted to you - under this License. -*/ - -#ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved." -#ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it." - -#include "fttypes.h" - - -typedef struct block_table *BLOCK_TABLE; - -//Needed by tests, ftdump -struct block_translation_pair { - union { // If in the freelist, use next_free_blocknum, otherwise diskoff. - DISKOFF diskoff; - BLOCKNUM next_free_blocknum; - } u; - DISKOFF size; // set to 0xFFFFFFFFFFFFFFFF for free -}; - -void toku_blocktable_create_new(BLOCK_TABLE *btp); -int toku_blocktable_create_from_buffer(int fd, BLOCK_TABLE *btp, DISKOFF location_on_disk, DISKOFF size_on_disk, unsigned char *translation_buffer); -void toku_blocktable_destroy(BLOCK_TABLE *btp); - -void toku_ft_lock(FT h); -void toku_ft_unlock(FT h); - -void toku_block_translation_note_start_checkpoint_unlocked(BLOCK_TABLE bt); -void toku_block_translation_note_end_checkpoint(BLOCK_TABLE bt, int fd); -void toku_block_translation_note_skipped_checkpoint(BLOCK_TABLE bt); -void toku_maybe_truncate_file_on_open(BLOCK_TABLE bt, int fd); - -//Blocknums -void toku_allocate_blocknum(BLOCK_TABLE bt, BLOCKNUM *res, FT h); -void toku_allocate_blocknum_unlocked(BLOCK_TABLE bt, BLOCKNUM *res, FT h); -void toku_free_blocknum(BLOCK_TABLE bt, BLOCKNUM *b, FT h, bool for_checkpoint); -void toku_verify_blocknum_allocated(BLOCK_TABLE bt, BLOCKNUM b); -void toku_block_verify_no_data_blocks_except_root(BLOCK_TABLE bt, BLOCKNUM root); -void toku_free_unused_blocknums(BLOCK_TABLE bt, BLOCKNUM root); -void toku_block_verify_no_free_blocknums(BLOCK_TABLE bt); -void toku_realloc_descriptor_on_disk(BLOCK_TABLE bt, DISKOFF size, DISKOFF *offset, FT h, int fd); -void toku_realloc_descriptor_on_disk_unlocked(BLOCK_TABLE bt, DISKOFF size, DISKOFF *offset, FT h); -void toku_get_descriptor_offset_size(BLOCK_TABLE bt, DISKOFF *offset, DISKOFF *size); - -//Blocks and Blocknums -void toku_blocknum_realloc_on_disk(BLOCK_TABLE bt, BLOCKNUM b, DISKOFF size, DISKOFF *offset, FT ft, int fd, bool for_checkpoint); -void toku_translate_blocknum_to_offset_size(BLOCK_TABLE bt, BLOCKNUM b, DISKOFF *offset, DISKOFF *size); - -//Serialization -void toku_serialize_translation_to_wbuf(BLOCK_TABLE bt, int fd, struct wbuf *w, int64_t *address, int64_t *size); - -void toku_block_table_swap_for_redirect(BLOCK_TABLE old_bt, BLOCK_TABLE new_bt); - - -//DEBUG ONLY (ftdump included), tests included -void toku_blocknum_dump_translation(BLOCK_TABLE bt, BLOCKNUM b); -void toku_dump_translation_table_pretty(FILE *f, BLOCK_TABLE bt); -void toku_dump_translation_table(FILE *f, BLOCK_TABLE bt); -void toku_block_free(BLOCK_TABLE bt, uint64_t offset); -typedef int(*BLOCKTABLE_CALLBACK)(BLOCKNUM b, int64_t size, int64_t address, void *extra); -enum translation_type {TRANSLATION_NONE=0, - TRANSLATION_CURRENT, - TRANSLATION_INPROGRESS, - TRANSLATION_CHECKPOINTED, - TRANSLATION_DEBUG}; - -int toku_blocktable_iterate(BLOCK_TABLE bt, enum translation_type type, BLOCKTABLE_CALLBACK f, void *extra, bool data_only, bool used_only); -void toku_blocktable_internal_fragmentation(BLOCK_TABLE bt, int64_t *total_sizep, int64_t *used_sizep); - -void toku_block_table_get_fragmentation_unlocked(BLOCK_TABLE bt, TOKU_DB_FRAGMENTATION report); -//Requires: blocktable lock is held. -//Requires: report->file_size_bytes is already filled in. - -int64_t toku_block_get_blocks_in_use_unlocked(BLOCK_TABLE bt); - -void toku_blocktable_get_info64(BLOCK_TABLE, struct ftinfo64 *); - -int toku_blocktable_iterate_translation_tables(BLOCK_TABLE, uint64_t, int (*)(uint64_t, int64_t, int64_t, int64_t, int64_t, void *), void *); - -//Unmovable reserved first, then reallocable. -// We reserve one blocknum for the translation table itself. -enum {RESERVED_BLOCKNUM_NULL =0, - RESERVED_BLOCKNUM_TRANSLATION=1, - RESERVED_BLOCKNUM_DESCRIPTOR =2, - RESERVED_BLOCKNUMS}; - - -#endif - diff --git a/storage/tokudb/ft-index/ft/bndata.cc b/storage/tokudb/ft-index/ft/bndata.cc index eb543a03ab4..a277e52aa0b 100644 --- a/storage/tokudb/ft-index/ft/bndata.cc +++ b/storage/tokudb/ft-index/ft/bndata.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -89,8 +89,8 @@ PATENT RIGHTS GRANT: #ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved." #ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it." -#include <bndata.h> -#include <ft-ops.h> +#include <ft/bndata.h> +#include <ft/ft-internal.h> using namespace toku; uint32_t bn_data::klpair_disksize(const uint32_t klpair_len, const klpair_struct *klpair) const { @@ -129,18 +129,18 @@ void bn_data::initialize_from_separate_keys_and_vals(uint32_t num_entries, struc uint32_t ndone_before = rb->ndone; init_zero(); invariant(all_keys_same_length); // Until otherwise supported. - bytevec keys_src; + const void *keys_src; rbuf_literal_bytes(rb, &keys_src, key_data_size); //Generate dmt this->m_buffer.create_from_sorted_memory_of_fixed_size_elements( keys_src, num_entries, key_data_size, fixed_klpair_length); toku_mempool_construct(&this->m_buffer_mempool, val_data_size); - bytevec vals_src; + const void *vals_src; rbuf_literal_bytes(rb, &vals_src, val_data_size); if (num_entries > 0) { - void *vals_dest = toku_mempool_malloc(&this->m_buffer_mempool, val_data_size, 1); + void *vals_dest = toku_mempool_malloc(&this->m_buffer_mempool, val_data_size); paranoid_invariant_notnull(vals_dest); memcpy(vals_dest, vals_src, val_data_size); } @@ -256,7 +256,7 @@ void bn_data::deserialize_from_rbuf(uint32_t num_entries, struct rbuf *rb, uint3 } } // Version >= 26 and version 25 deserialization are now identical except that <= 25 might allocate too much memory. - bytevec bytes; + const void *bytes; rbuf_literal_bytes(rb, &bytes, data_size); const unsigned char *CAST_FROM_VOIDP(buf, bytes); if (data_size == 0) { @@ -384,7 +384,7 @@ struct dmt_compressor_state { static int move_it (const uint32_t, klpair_struct *klpair, const uint32_t idx UU(), struct dmt_compressor_state * const oc) { LEAFENTRY old_le = oc->bd->get_le_from_klpair(klpair); uint32_t size = leafentry_memsize(old_le); - void* newdata = toku_mempool_malloc(oc->new_kvspace, size, 1); + void* newdata = toku_mempool_malloc(oc->new_kvspace, size); paranoid_invariant_notnull(newdata); // we do this on a fresh mempool, so nothing bad should happen memcpy(newdata, old_le, size); klpair->le_offset = toku_mempool_get_offset_from_pointer_and_base(oc->new_kvspace, newdata); @@ -411,7 +411,7 @@ void bn_data::dmt_compress_kvspace(size_t added_size, void **maybe_free, bool fo } else { toku_mempool_construct(&new_kvspace, total_size_needed); size_t old_offset_limit = toku_mempool_get_offset_limit(&m_buffer_mempool); - void *new_mempool_base = toku_mempool_malloc(&new_kvspace, old_offset_limit, 1); + void *new_mempool_base = toku_mempool_malloc(&new_kvspace, old_offset_limit); memcpy(new_mempool_base, old_mempool_base, old_offset_limit); } @@ -428,10 +428,10 @@ void bn_data::dmt_compress_kvspace(size_t added_size, void **maybe_free, bool fo // If MAYBE_FREE is nullptr then free the old mempool's space. // Otherwise, store the old mempool's space in maybe_free. LEAFENTRY bn_data::mempool_malloc_and_update_dmt(size_t size, void **maybe_free) { - void *v = toku_mempool_malloc(&m_buffer_mempool, size, 1); + void *v = toku_mempool_malloc(&m_buffer_mempool, size); if (v == nullptr) { dmt_compress_kvspace(size, maybe_free, false); - v = toku_mempool_malloc(&m_buffer_mempool, size, 1); + v = toku_mempool_malloc(&m_buffer_mempool, size); paranoid_invariant_notnull(v); } return (LEAFENTRY)v; @@ -441,6 +441,7 @@ void bn_data::get_space_for_overwrite( uint32_t idx, const void* keyp UU(), uint32_t keylen UU(), + uint32_t old_keylen, uint32_t old_le_size, uint32_t new_size, LEAFENTRY* new_le_space, @@ -455,8 +456,8 @@ void bn_data::get_space_for_overwrite( int r = m_buffer.fetch(idx, &klpair_len, &klp); invariant_zero(r); paranoid_invariant(klp!=nullptr); - // Key never changes. - paranoid_invariant(keylen_from_klpair_len(klpair_len) == keylen); + // Old key length should be consistent with what is stored in the DMT + invariant(keylen_from_klpair_len(klpair_len) == old_keylen); size_t new_le_offset = toku_mempool_get_offset_from_pointer_and_base(&this->m_buffer_mempool, new_le); paranoid_invariant(new_le_offset <= UINT32_MAX - new_size); // Not using > 4GB @@ -505,7 +506,7 @@ class split_klpairs_extra { LEAFENTRY old_le = m_left_bn->get_le_from_klpair(&klpair); size_t le_size = leafentry_memsize(old_le); - void *new_le = toku_mempool_malloc(dest_mp, le_size, 1); + void *new_le = toku_mempool_malloc(dest_mp, le_size); paranoid_invariant_notnull(new_le); memcpy(new_le, old_le, le_size); size_t le_offset = toku_mempool_get_offset_from_pointer_and_base(dest_mp, new_le); @@ -658,7 +659,7 @@ void bn_data::set_contents_as_clone_of_sorted_array( dmt_builder.create(num_les, total_key_size); for (uint32_t idx = 0; idx < num_les; idx++) { - void* new_le = toku_mempool_malloc(&m_buffer_mempool, le_sizes[idx], 1); + void* new_le = toku_mempool_malloc(&m_buffer_mempool, le_sizes[idx]); paranoid_invariant_notnull(new_le); memcpy(new_le, old_les[idx], le_sizes[idx]); size_t le_offset = toku_mempool_get_offset_from_pointer_and_base(&m_buffer_mempool, new_le); diff --git a/storage/tokudb/ft-index/ft/bndata.h b/storage/tokudb/ft-index/ft/bndata.h index 79daf1e5bf0..0cded5de5fc 100644 --- a/storage/tokudb/ft-index/ft/bndata.h +++ b/storage/tokudb/ft-index/ft/bndata.h @@ -28,7 +28,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -88,13 +88,13 @@ PATENT RIGHTS GRANT: #ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved." #ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it." - #pragma once -#include <util/mempool.h> -#include "wbuf.h" -#include <util/dmt.h> -#include "leafentry.h" +#include "util/dmt.h" +#include "util/mempool.h" + +#include "ft/leafentry.h" +#include "ft/serialize/wbuf.h" // Key/leafentry pair stored in a dmt. The key is inlined, the offset (in leafentry mempool) is stored for the leafentry. struct klpair_struct { @@ -304,7 +304,8 @@ public: // Allocates space in the mempool to store a new leafentry. // This may require reorganizing the mempool and updating the dmt. __attribute__((__nonnull__)) - void get_space_for_overwrite(uint32_t idx, const void* keyp, uint32_t keylen, uint32_t old_size, uint32_t new_size, LEAFENTRY* new_le_space, void **const maybe_free); + void get_space_for_overwrite(uint32_t idx, const void* keyp, uint32_t keylen, uint32_t old_keylen, uint32_t old_size, + uint32_t new_size, LEAFENTRY* new_le_space, void **const maybe_free); // Allocates space in the mempool to store a new leafentry // and inserts a new key into the dmt @@ -383,4 +384,3 @@ private: uint32_t key_data_size, uint32_t val_data_size, bool all_keys_same_length, uint32_t fixed_klpair_length); }; - diff --git a/storage/tokudb/ft-index/ft/background_job_manager.cc b/storage/tokudb/ft-index/ft/cachetable/background_job_manager.cc index 6849909a2ed..8db05018d3c 100644 --- a/storage/tokudb/ft-index/ft/background_job_manager.cc +++ b/storage/tokudb/ft-index/ft/cachetable/background_job_manager.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -93,7 +93,7 @@ PATENT RIGHTS GRANT: #include <memory.h> #include <toku_pthread.h> -#include "background_job_manager.h" +#include "cachetable/background_job_manager.h" struct background_job_manager_struct { bool accepting_jobs; diff --git a/storage/tokudb/ft-index/ft/background_job_manager.h b/storage/tokudb/ft-index/ft/cachetable/background_job_manager.h index 5474a569454..d977abae418 100644 --- a/storage/tokudb/ft-index/ft/background_job_manager.h +++ b/storage/tokudb/ft-index/ft/cachetable/background_job_manager.h @@ -1,7 +1,5 @@ /* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */ // vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4: -#ifndef BACKGROUND_JOB_MANAGER_H -#define BACKGROUND_JOB_MANAGER_H #ident "$Id$" /* COPYING CONDITIONS NOTICE: @@ -31,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -88,6 +86,8 @@ PATENT RIGHTS GRANT: under this License. */ +#pragma once + #ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved." #ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it." @@ -130,5 +130,3 @@ void bjm_remove_background_job(BACKGROUND_JOB_MANAGER bjm); // has completed, bjm_add_background_job returns an error. // void bjm_wait_for_jobs_to_finish(BACKGROUND_JOB_MANAGER bjm); - -#endif diff --git a/storage/tokudb/ft-index/ft/cachetable-internal.h b/storage/tokudb/ft-index/ft/cachetable/cachetable-internal.h index a02449f3c07..d5dc3ffa5fb 100644 --- a/storage/tokudb/ft-index/ft/cachetable-internal.h +++ b/storage/tokudb/ft-index/ft/cachetable/cachetable-internal.h @@ -1,9 +1,6 @@ /* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */ // vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4: -#ifndef TokuDB_cachetable_internal_h -#define TokuDB_cachetable_internal_h - #ident "$Id$" /* COPYING CONDITIONS NOTICE: @@ -33,7 +30,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -90,10 +87,12 @@ PATENT RIGHTS GRANT: under this License. */ +#pragma once + #ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved." #ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it." -#include "background_job_manager.h" +#include "cachetable/background_job_manager.h" #include <portability/toku_random.h> #include <util/frwlock.h> #include <util/kibbutz.h> @@ -179,8 +178,6 @@ class pair_list; // Maps to a file on disk. // struct cachefile { - CACHEFILE next; - CACHEFILE prev; // these next two fields are protected by cachetable's list lock // they are managed whenever we add or remove a pair from // the cachetable. As of Riddler, this linked list is only used to @@ -440,14 +437,12 @@ public: bool evict_some_stale_pair(evictor* ev); void free_stale_data(evictor* ev); // access to these fields are protected by the lock - CACHEFILE m_active_head; // head of CACHEFILEs that are active - CACHEFILE m_stale_head; // head of CACHEFILEs that are stale - CACHEFILE m_stale_tail; // tail of CACHEFILEs that are stale FILENUM m_next_filenum_to_use; uint32_t m_next_hash_id_to_use; toku_pthread_rwlock_t m_lock; // this field is publoc so we are still POD toku::omt<CACHEFILE> m_active_filenum; toku::omt<CACHEFILE> m_active_fileid; + toku::omt<CACHEFILE> m_stale_fileid; private: CACHEFILE find_cachefile_in_list_unlocked(CACHEFILE start, struct fileid* fileid); }; @@ -521,8 +516,8 @@ public: void add_pair_attr(PAIR_ATTR attr); void remove_pair_attr(PAIR_ATTR attr); void change_pair_attr(PAIR_ATTR old_attr, PAIR_ATTR new_attr); - void add_to_size_current(long size); - void remove_from_size_current(long size); + void add_cloned_data_size(long size); + void remove_cloned_data_size(long size); uint64_t reserve_memory(double fraction, uint64_t upper_bound); void release_reserved_memory(uint64_t reserved_memory); void run_eviction_thread(); @@ -536,6 +531,8 @@ public: void get_state(long *size_current_ptr, long *size_limit_ptr); void fill_engine_status(); private: + void add_to_size_current(long size); + void remove_from_size_current(long size); void run_eviction(); bool run_eviction_on_pair(PAIR p); void try_evict_pair(PAIR p); @@ -551,6 +548,7 @@ private: pair_list* m_pl; cachefile_list* m_cf_list; int64_t m_size_current; // the sum of the sizes of the pairs in the cachetable + int64_t m_size_cloned_data; // stores amount of cloned data we have, only used for engine status // changes to these two values are protected // by ev_thread_lock int64_t m_size_reserved; // How much memory is reserved (e.g., by the loader) @@ -654,5 +652,3 @@ struct cachetable { char *env_dir; }; - -#endif // End of header guardian. diff --git a/storage/tokudb/ft-index/ft/cachetable.cc b/storage/tokudb/ft-index/ft/cachetable/cachetable.cc index d7c734cc5fd..feda4abc76a 100644 --- a/storage/tokudb/ft-index/ft/cachetable.cc +++ b/storage/tokudb/ft-index/ft/cachetable/cachetable.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -89,24 +89,26 @@ PATENT RIGHTS GRANT: #ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved." #ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it." -#include <toku_portability.h> -#include <stdlib.h> #include <string.h> #include <time.h> #include <stdarg.h> -#include "cachetable.h" -#include <ft/log_header.h> -#include "checkpoint.h" -#include "log-internal.h" -#include "cachetable-internal.h" -#include <memory.h> -#include <toku_race_tools.h> + +#include <portability/memory.h> +#include <portability/toku_race_tools.h> #include <portability/toku_atomic.h> #include <portability/toku_pthread.h> +#include <portability/toku_portability.h> +#include <portability/toku_stdlib.h> #include <portability/toku_time.h> -#include <util/rwlock.h> -#include <util/status.h> -#include <util/context.h> + +#include "ft/cachetable/cachetable.h" +#include "ft/cachetable/cachetable-internal.h" +#include "ft/cachetable/checkpoint.h" +#include "ft/logger/log-internal.h" +#include "util/rwlock.h" +#include "util/scoped_malloc.h" +#include "util/status.h" +#include "util/context.h" /////////////////////////////////////////////////////////////////////////////////// // Engine status @@ -127,7 +129,7 @@ static CACHETABLE_STATUS_S ct_status; // Note, toku_cachetable_get_status() is below, after declaration of cachetable. -#define STATUS_INIT(k,c,t,l,inc) TOKUDB_STATUS_INIT(ct_status, k, c, t, "cachetable: " l, inc) +#define STATUS_INIT(k,c,t,l,inc) TOKUFT_STATUS_INIT(ct_status, k, c, t, "cachetable: " l, inc) static void status_init(void) { @@ -144,6 +146,7 @@ status_init(void) { STATUS_INIT(CT_SIZE_LEAF, CACHETABLE_SIZE_LEAF, UINT64, "size leaf", TOKU_ENGINE_STATUS|TOKU_GLOBAL_STATUS); STATUS_INIT(CT_SIZE_ROLLBACK, CACHETABLE_SIZE_ROLLBACK, UINT64, "size rollback", TOKU_ENGINE_STATUS|TOKU_GLOBAL_STATUS); STATUS_INIT(CT_SIZE_CACHEPRESSURE, CACHETABLE_SIZE_CACHEPRESSURE, UINT64, "size cachepressure", TOKU_ENGINE_STATUS|TOKU_GLOBAL_STATUS); + STATUS_INIT(CT_SIZE_CLONED, CACHETABLE_SIZE_CLONED, UINT64, "size currently cloned data for checkpoint", TOKU_ENGINE_STATUS|TOKU_GLOBAL_STATUS); STATUS_INIT(CT_EVICTIONS, CACHETABLE_EVICTIONS, UINT64, "evictions", TOKU_ENGINE_STATUS|TOKU_GLOBAL_STATUS); STATUS_INIT(CT_CLEANER_EXECUTIONS, CACHETABLE_CLEANER_EXECUTIONS, UINT64, "cleaner executions", TOKU_ENGINE_STATUS|TOKU_GLOBAL_STATUS); STATUS_INIT(CT_CLEANER_PERIOD, CACHETABLE_CLEANER_PERIOD, UINT64, "cleaner period", TOKU_ENGINE_STATUS|TOKU_GLOBAL_STATUS); @@ -704,7 +707,7 @@ static void cachetable_only_write_locked_data( p->disk_data = disk_data; if (is_clone) { p->cloned_value_data = NULL; - ev->remove_from_size_current(p->cloned_value_size); + ev->remove_cloned_data_size(p->cloned_value_size); p->cloned_value_size = 0; } } @@ -949,7 +952,7 @@ clone_pair(evictor* ev, PAIR p) { ev->change_pair_attr(old_attr, new_attr); } p->cloned_value_size = clone_size; - ev->add_to_size_current(p->cloned_value_size); + ev->add_cloned_data_size(p->cloned_value_size); } static void checkpoint_cloned_pair(void* extra) { @@ -1587,7 +1590,7 @@ int toku_cachetable_get_and_pin_with_dep_pairs ( PAIR* dependent_pairs, enum cachetable_dirty* dependent_dirty // array stating dirty/cleanness of dependent pairs ) -// See cachetable.h +// See cachetable/cachetable.h { CACHETABLE ct = cachefile->cachetable; bool wait = false; @@ -2023,7 +2026,7 @@ int toku_cachetable_get_and_pin_nonblocking( void *read_extraargs, UNLOCKERS unlockers ) -// See cachetable.h. +// See cachetable/cachetable.h. { CACHETABLE ct = cf->cachetable; assert(lock_type == PL_READ || @@ -2206,7 +2209,7 @@ int toku_cachefile_prefetch(CACHEFILE cf, CACHEKEY key, uint32_t fullhash, CACHETABLE_PARTIAL_FETCH_CALLBACK pf_callback, void *read_extraargs, bool *doing_prefetch) -// Effect: See the documentation for this function in cachetable.h +// Effect: See the documentation for this function in cachetable/cachetable.h { int r = 0; PAIR p = NULL; @@ -2509,6 +2512,11 @@ toku_cachetable_minicron_shutdown(CACHETABLE ct) { ct->cl.destroy(); } +void toku_cachetable_prepare_close(CACHETABLE ct UU()) { + extern bool toku_serialize_in_parallel; + toku_serialize_in_parallel = true; +} + /* Requires that it all be flushed. */ void toku_cachetable_close (CACHETABLE *ctp) { CACHETABLE ct = *ctp; @@ -3635,6 +3643,7 @@ int evictor::init(long _size_limit, pair_list* _pl, cachefile_list* _cf_list, KI m_size_reserved = unreservable_memory(_size_limit); m_size_current = 0; + m_size_cloned_data = 0; m_size_evicting = 0; m_size_nonleaf = create_partitioned_counter(); @@ -3770,6 +3779,22 @@ void evictor::remove_from_size_current(long size) { } // +// Adds the size of cloned data to necessary variables in the evictor +// +void evictor::add_cloned_data_size(long size) { + (void) toku_sync_fetch_and_add(&m_size_cloned_data, size); + add_to_size_current(size); +} + +// +// Removes the size of cloned data to necessary variables in the evictor +// +void evictor::remove_cloned_data_size(long size) { + (void) toku_sync_fetch_and_sub(&m_size_cloned_data, size); + remove_from_size_current(size); +} + +// // TODO: (Zardosht) comment this function // uint64_t evictor::reserve_memory(double fraction, uint64_t upper_bound) { @@ -4333,6 +4358,7 @@ void evictor::fill_engine_status() { STATUS_VALUE(CT_SIZE_LEAF) = read_partitioned_counter(m_size_leaf); STATUS_VALUE(CT_SIZE_ROLLBACK) = read_partitioned_counter(m_size_rollback); STATUS_VALUE(CT_SIZE_CACHEPRESSURE) = read_partitioned_counter(m_size_cachepressure); + STATUS_VALUE(CT_SIZE_CLONED) = m_size_cloned_data; STATUS_VALUE(CT_WAIT_PRESSURE_COUNT) = read_partitioned_counter(m_wait_pressure_count); STATUS_VALUE(CT_WAIT_PRESSURE_TIME) = read_partitioned_counter(m_wait_pressure_time); STATUS_VALUE(CT_LONG_WAIT_PRESSURE_COUNT) = read_partitioned_counter(m_long_wait_pressure_count); @@ -4414,43 +4440,48 @@ void checkpointer::increment_num_txns() { m_checkpoint_num_txns++; } -// -// Update the user data in any cachefiles in our checkpoint list. -// -void checkpointer::update_cachefiles() { - CACHEFILE cf; - for(cf = m_cf_list->m_active_head; cf; cf=cf->next) { +struct iterate_begin_checkpoint { + LSN lsn_of_checkpoint_in_progress; + iterate_begin_checkpoint(LSN lsn) : lsn_of_checkpoint_in_progress(lsn) { } + static int fn(const CACHEFILE &cf, const uint32_t UU(idx), struct iterate_begin_checkpoint *info) { assert(cf->begin_checkpoint_userdata); if (cf->for_checkpoint) { - cf->begin_checkpoint_userdata(m_lsn_of_checkpoint_in_progress, - cf->userdata); + cf->begin_checkpoint_userdata(info->lsn_of_checkpoint_in_progress, cf->userdata); } + return 0; } +}; + +// +// Update the user data in any cachefiles in our checkpoint list. +// +void checkpointer::update_cachefiles() { + struct iterate_begin_checkpoint iterate(m_lsn_of_checkpoint_in_progress); + int r = m_cf_list->m_active_fileid.iterate<struct iterate_begin_checkpoint, + iterate_begin_checkpoint::fn>(&iterate); + assert_zero(r); } +struct iterate_note_pin { + static int fn(const CACHEFILE &cf, uint32_t UU(idx), void **UU(extra)) { + assert(cf->note_pin_by_checkpoint); + cf->note_pin_by_checkpoint(cf, cf->userdata); + cf->for_checkpoint = true; + return 0; + } +}; + // // Sets up and kicks off a checkpoint. // void checkpointer::begin_checkpoint() { // 1. Initialize the accountability counters. - m_checkpoint_num_files = 0; m_checkpoint_num_txns = 0; // 2. Make list of cachefiles to be included in the checkpoint. - // TODO: <CER> How do we remove the non-lock cachetable reference here? m_cf_list->read_lock(); - for (CACHEFILE cf = m_cf_list->m_active_head; cf; cf = cf->next) { - // The caller must serialize open, close, and begin checkpoint. - // So we should never see a closing cachefile here. - // <CER> Is there an assert we can add here? - - // Putting this check here so that this method may be called - // by cachetable tests. - assert(cf->note_pin_by_checkpoint); - cf->note_pin_by_checkpoint(cf, cf->userdata); - cf->for_checkpoint = true; - m_checkpoint_num_files++; - } + m_cf_list->m_active_fileid.iterate<void *, iterate_note_pin::fn>(nullptr); + m_checkpoint_num_files = m_cf_list->m_active_fileid.size(); m_cf_list->read_unlock(); // 3. Create log entries for this checkpoint. @@ -4475,6 +4506,14 @@ void checkpointer::begin_checkpoint() { m_list->write_pending_exp_unlock(); } +struct iterate_log_fassociate { + static int fn(const CACHEFILE &cf, uint32_t UU(idx), void **UU(extra)) { + assert(cf->log_fassociate_during_checkpoint); + cf->log_fassociate_during_checkpoint(cf, cf->userdata); + return 0; + } +}; + // // Assuming the logger exists, this will write out the folloing // information to the log. @@ -4498,10 +4537,7 @@ void checkpointer::log_begin_checkpoint() { m_lsn_of_checkpoint_in_progress = begin_lsn; // Log the list of open dictionaries. - for (CACHEFILE cf = m_cf_list->m_active_head; cf; cf = cf->next) { - assert(cf->log_fassociate_during_checkpoint); - cf->log_fassociate_during_checkpoint(cf, cf->userdata); - } + m_cf_list->m_active_fileid.iterate<void *, iterate_log_fassociate::fn>(nullptr); // Write open transactions to the log. r = toku_txn_manager_iter_over_live_txns( @@ -4559,7 +4595,8 @@ void checkpointer::remove_background_job() { } void checkpointer::end_checkpoint(void (*testcallback_f)(void*), void* testextra) { - CACHEFILE *XMALLOC_N(m_checkpoint_num_files, checkpoint_cfs); + toku::scoped_malloc checkpoint_cfs_buf(m_checkpoint_num_files * sizeof(CACHEFILE)); + CACHEFILE *checkpoint_cfs = reinterpret_cast<CACHEFILE *>(checkpoint_cfs_buf.get()); this->fill_checkpoint_cfs(checkpoint_cfs); this->checkpoint_pending_pairs(); @@ -4571,22 +4608,33 @@ void checkpointer::end_checkpoint(void (*testcallback_f)(void*), void* testextr this->log_end_checkpoint(); this->end_checkpoint_userdata(checkpoint_cfs); - //Delete list of cachefiles in the checkpoint, + // Delete list of cachefiles in the checkpoint, this->remove_cachefiles(checkpoint_cfs); - toku_free(checkpoint_cfs); } -void checkpointer::fill_checkpoint_cfs(CACHEFILE* checkpoint_cfs) { - m_cf_list->read_lock(); - uint32_t curr_index = 0; - for (CACHEFILE cf = m_cf_list->m_active_head; cf; cf = cf->next) { +struct iterate_checkpoint_cfs { + CACHEFILE *checkpoint_cfs; + uint32_t checkpoint_num_files; + uint32_t curr_index; + iterate_checkpoint_cfs(CACHEFILE *cfs, uint32_t num_files) : + checkpoint_cfs(cfs), checkpoint_num_files(num_files), curr_index(0) { + } + static int fn(const CACHEFILE &cf, uint32_t UU(idx), struct iterate_checkpoint_cfs *info) { if (cf->for_checkpoint) { - assert(curr_index < m_checkpoint_num_files); - checkpoint_cfs[curr_index] = cf; - curr_index++; + assert(info->curr_index < info->checkpoint_num_files); + info->checkpoint_cfs[info->curr_index] = cf; + info->curr_index++; } + return 0; } - assert(curr_index == m_checkpoint_num_files); +}; + +void checkpointer::fill_checkpoint_cfs(CACHEFILE* checkpoint_cfs) { + struct iterate_checkpoint_cfs iterate(checkpoint_cfs, m_checkpoint_num_files); + + m_cf_list->read_lock(); + m_cf_list->m_active_fileid.iterate<struct iterate_checkpoint_cfs, iterate_checkpoint_cfs::fn>(&iterate); + assert(iterate.curr_index == m_checkpoint_num_files); m_cf_list->read_unlock(); } @@ -4671,19 +4719,18 @@ void checkpointer::remove_cachefiles(CACHEFILE* checkpoint_cfs) { static_assert(std::is_pod<cachefile_list>::value, "cachefile_list isn't POD"); void cachefile_list::init() { - m_active_head = NULL; - m_stale_head = NULL; - m_stale_tail = NULL; m_next_filenum_to_use.fileid = 0; m_next_hash_id_to_use = 0; toku_pthread_rwlock_init(&m_lock, NULL); m_active_filenum.create(); m_active_fileid.create(); + m_stale_fileid.create(); } void cachefile_list::destroy() { m_active_filenum.destroy(); m_active_fileid.destroy(); + m_stale_fileid.destroy(); toku_pthread_rwlock_destroy(&m_lock); } @@ -4702,34 +4749,31 @@ void cachefile_list::write_lock() { void cachefile_list::write_unlock() { toku_pthread_rwlock_wrunlock(&m_lock); } -int cachefile_list::cachefile_of_iname_in_env(const char *iname_in_env, CACHEFILE *cf) { - read_lock(); - CACHEFILE extant; - int r; - r = ENOENT; - for (extant = m_active_head; extant; extant = extant->next) { - if (extant->fname_in_env && - !strcmp(extant->fname_in_env, iname_in_env)) { - *cf = extant; - r = 0; - break; + +struct iterate_find_iname { + const char *iname_in_env; + CACHEFILE found_cf; + iterate_find_iname(const char *iname) : iname_in_env(iname), found_cf(nullptr) { } + static int fn(const CACHEFILE &cf, uint32_t UU(idx), struct iterate_find_iname *info) { + if (cf->fname_in_env && strcmp(cf->fname_in_env, info->iname_in_env) == 0) { + info->found_cf = cf; + return -1; } + return 0; } - read_unlock(); - return r; -} +}; + +int cachefile_list::cachefile_of_iname_in_env(const char *iname_in_env, CACHEFILE *cf) { + struct iterate_find_iname iterate(iname_in_env); -int cachefile_list::cachefile_of_filenum(FILENUM filenum, CACHEFILE *cf) { read_lock(); - CACHEFILE extant; - int r = ENOENT; - *cf = NULL; - for (extant = m_active_head; extant; extant = extant->next) { - if (extant->filenum.fileid==filenum.fileid) { - *cf = extant; - r = 0; - break; - } + int r = m_active_fileid.iterate<iterate_find_iname, iterate_find_iname::fn>(&iterate); + if (iterate.found_cf != nullptr) { + assert(strcmp(iterate.found_cf->fname_in_env, iname_in_env) == 0); + *cf = iterate.found_cf; + r = 0; + } else { + r = ENOENT; } read_unlock(); return r; @@ -4746,20 +4790,23 @@ static int cachefile_find_by_filenum(const CACHEFILE &a_cf, const FILENUM &b) { } } +int cachefile_list::cachefile_of_filenum(FILENUM filenum, CACHEFILE *cf) { + read_lock(); + int r = m_active_filenum.find_zero<FILENUM, cachefile_find_by_filenum>(filenum, cf, nullptr); + if (r == DB_NOTFOUND) { + r = ENOENT; + } else { + invariant_zero(r); + } + read_unlock(); + return r; +} + static int cachefile_find_by_fileid(const CACHEFILE &a_cf, const struct fileid &b) { return toku_fileid_cmp(a_cf->fileid, b); } void cachefile_list::add_cf_unlocked(CACHEFILE cf) { - invariant(cf->next == NULL); - invariant(cf->prev == NULL); - cf->next = m_active_head; - cf->prev = NULL; - if (m_active_head) { - m_active_head->prev = cf; - } - m_active_head = cf; - int r; r = m_active_filenum.insert<FILENUM, cachefile_find_by_filenum>(cf, cf->filenum, nullptr); assert_zero(r); @@ -4769,36 +4816,13 @@ void cachefile_list::add_cf_unlocked(CACHEFILE cf) { void cachefile_list::add_stale_cf(CACHEFILE cf) { write_lock(); - invariant(cf->next == NULL); - invariant(cf->prev == NULL); - - cf->next = m_stale_head; - cf->prev = NULL; - if (m_stale_head) { - m_stale_head->prev = cf; - } - m_stale_head = cf; - if (m_stale_tail == NULL) { - m_stale_tail = cf; - } + int r = m_stale_fileid.insert<struct fileid, cachefile_find_by_fileid>(cf, cf->fileid, nullptr); + assert_zero(r); write_unlock(); } void cachefile_list::remove_cf(CACHEFILE cf) { write_lock(); - invariant(m_active_head != NULL); - if (cf->next) { - cf->next->prev = cf->prev; - } - if (cf->prev) { - cf->prev->next = cf->next; - } - if (cf == m_active_head) { - invariant(cf->prev == NULL); - m_active_head = cf->next; - } - cf->prev = NULL; - cf->next = NULL; uint32_t idx; int r; @@ -4816,24 +4840,12 @@ void cachefile_list::remove_cf(CACHEFILE cf) { } void cachefile_list::remove_stale_cf_unlocked(CACHEFILE cf) { - invariant(m_stale_head != NULL); - invariant(m_stale_tail != NULL); - if (cf->next) { - cf->next->prev = cf->prev; - } - if (cf->prev) { - cf->prev->next = cf->next; - } - if (cf == m_stale_head) { - invariant(cf->prev == NULL); - m_stale_head = cf->next; - } - if (cf == m_stale_tail) { - invariant(cf->next == NULL); - m_stale_tail = cf->prev; - } - cf->prev = NULL; - cf->next = NULL; + uint32_t idx; + int r; + r = m_stale_fileid.find_zero<struct fileid, cachefile_find_by_fileid>(cf->fileid, nullptr, &idx); + assert_zero(r); + r = m_stale_fileid.delete_at(idx); + assert_zero(r); } FILENUM cachefile_list::reserve_filenum() { @@ -4849,11 +4861,6 @@ FILENUM cachefile_list::reserve_filenum() { break; } FILENUM filenum = m_next_filenum_to_use; -#if TOKU_DEBUG_PARANOID - for (CACHEFILE extant = m_active_head; extant; extant = extant->next) { - assert(filenum.fileid != extant->filenum.fileid); - } -#endif m_next_filenum_to_use.fileid++; write_unlock(); return filenum; @@ -4865,91 +4872,77 @@ uint32_t cachefile_list::get_new_hash_id_unlocked() { return retval; } -CACHEFILE cachefile_list::find_cachefile_in_list_unlocked( - CACHEFILE start, - struct fileid* fileid - ) -{ - CACHEFILE retval = NULL; - for (CACHEFILE extant = start; extant; extant = extant->next) { - if (toku_fileids_are_equal(&extant->fileid, fileid)) { - // Clients must serialize cachefile open, close, and unlink - // So, during open, we should never see a closing cachefile - // or one that has been marked as unlink on close. - assert(!extant->unlink_on_close); - retval = extant; - goto exit; - } - } -exit: - return retval; -} - CACHEFILE cachefile_list::find_cachefile_unlocked(struct fileid* fileid) { CACHEFILE cf = nullptr; int r = m_active_fileid.find_zero<struct fileid, cachefile_find_by_fileid>(*fileid, &cf, nullptr); if (r == 0) { assert(!cf->unlink_on_close); } -#if TOKU_DEBUG_PARANOID - assert(cf == find_cachefile_in_list_unlocked(m_active_head, fileid)); -#endif return cf; } CACHEFILE cachefile_list::find_stale_cachefile_unlocked(struct fileid* fileid) { - return find_cachefile_in_list_unlocked(m_stale_head, fileid); + CACHEFILE cf = nullptr; + int r = m_stale_fileid.find_zero<struct fileid, cachefile_find_by_fileid>(*fileid, &cf, nullptr); + if (r == 0) { + assert(!cf->unlink_on_close); + } + return cf; } void cachefile_list::verify_unused_filenum(FILENUM filenum) { int r = m_active_filenum.find_zero<FILENUM, cachefile_find_by_filenum>(filenum, nullptr, nullptr); assert(r == DB_NOTFOUND); -#if TOKU_DEBUG_PARANOID - for (CACHEFILE extant = m_active_head; extant; extant = extant->next) { - invariant(extant->filenum.fileid != filenum.fileid); - } -#endif } // returns true if some eviction ran, false otherwise bool cachefile_list::evict_some_stale_pair(evictor* ev) { - PAIR p = NULL; - CACHEFILE cf_to_destroy = NULL; write_lock(); - if (m_stale_tail == NULL) { + if (m_stale_fileid.size() == 0) { write_unlock(); return false; } - p = m_stale_tail->cf_head; + + CACHEFILE stale_cf = nullptr; + int r = m_stale_fileid.fetch(0, &stale_cf); + assert_zero(r); + // we should not have a cf in the stale list // that does not have any pairs + PAIR p = stale_cf->cf_head; paranoid_invariant(p != NULL); - evict_pair_from_cachefile(p); // now that we have evicted something, // let's check if the cachefile is needed anymore - if (m_stale_tail->cf_head == NULL) { - cf_to_destroy = m_stale_tail; - remove_stale_cf_unlocked(m_stale_tail); + // + // it is not needed if the latest eviction caused + // the cf_head for that cf to become null + bool destroy_cf = stale_cf->cf_head == nullptr; + if (destroy_cf) { + remove_stale_cf_unlocked(stale_cf); } write_unlock(); ev->remove_pair_attr(p->attr); cachetable_free_pair(p); - if (cf_to_destroy) { - cachefile_destroy(cf_to_destroy); + if (destroy_cf) { + cachefile_destroy(stale_cf); } return true; } void cachefile_list::free_stale_data(evictor* ev) { write_lock(); - while (m_stale_tail != NULL) { - PAIR p = m_stale_tail->cf_head; + while (m_stale_fileid.size() != 0) { + CACHEFILE stale_cf = nullptr; + int r = m_stale_fileid.fetch(0, &stale_cf); + assert_zero(r); + // we should not have a cf in the stale list // that does not have any pairs + PAIR p = stale_cf->cf_head; paranoid_invariant(p != NULL); evict_pair_from_cachefile(p); @@ -4958,10 +4951,9 @@ void cachefile_list::free_stale_data(evictor* ev) { // now that we have evicted something, // let's check if the cachefile is needed anymore - if (m_stale_tail->cf_head == NULL) { - CACHEFILE cf_to_destroy = m_stale_tail; - remove_stale_cf_unlocked(m_stale_tail); - cachefile_destroy(cf_to_destroy); + if (stale_cf->cf_head == NULL) { + remove_stale_cf_unlocked(stale_cf); + cachefile_destroy(stale_cf); } } write_unlock(); diff --git a/storage/tokudb/ft-index/ft/cachetable.h b/storage/tokudb/ft-index/ft/cachetable/cachetable.h index 9c11db02e00..a1ea83406a1 100644 --- a/storage/tokudb/ft-index/ft/cachetable.h +++ b/storage/tokudb/ft-index/ft/cachetable/cachetable.h @@ -1,7 +1,5 @@ /* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */ // vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4: -#ifndef CACHETABLE_H -#define CACHETABLE_H #ident "$Id$" /* @@ -32,7 +30,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -89,12 +87,17 @@ PATENT RIGHTS GRANT: under this License. */ +#pragma once + #ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved." #ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it." #include <fcntl.h> -#include "fttypes.h" -#include "minicron.h" + +#include "ft/logger/logger.h" +#include "ft/serialize/block_table.h" +#include "ft/txn/txn.h" +#include "util/minicron.h" // Maintain a cache mapping from cachekeys to values (void*) // Some of the keys can be pinned. Don't pin too many or for too long. @@ -111,6 +114,42 @@ PATENT RIGHTS GRANT: typedef BLOCKNUM CACHEKEY; +class checkpointer; +typedef class checkpointer *CHECKPOINTER; +typedef struct cachetable *CACHETABLE; +typedef struct cachefile *CACHEFILE; +typedef struct ctpair *PAIR; + +// This struct hold information about values stored in the cachetable. +// As one can tell from the names, we are probably violating an +// abstraction layer by placing names. +// +// The purpose of having this struct is to have a way for the +// cachetable to accumulate the some totals we are interested in. +// Breaking this abstraction layer by having these names was the +// easiest way. +// +typedef struct pair_attr_s { + long size; // size PAIR's value takes in memory + long nonleaf_size; // size if PAIR is a nonleaf node, 0 otherwise, used only for engine status + long leaf_size; // size if PAIR is a leaf node, 0 otherwise, used only for engine status + long rollback_size; // size of PAIR is a rollback node, 0 otherwise, used only for engine status + long cache_pressure_size; // amount PAIR contributes to cache pressure, is sum of buffer sizes and workdone counts + bool is_valid; +} PAIR_ATTR; + +static inline PAIR_ATTR make_pair_attr(long size) { + PAIR_ATTR result={ + .size = size, + .nonleaf_size = 0, + .leaf_size = 0, + .rollback_size = 0, + .cache_pressure_size = 0, + .is_valid = true + }; + return result; +} + void toku_set_cleaner_period (CACHETABLE ct, uint32_t new_period); uint32_t toku_get_cleaner_period_unlocked (CACHETABLE ct); void toku_set_cleaner_iterations (CACHETABLE ct, uint32_t new_iterations); @@ -122,7 +161,7 @@ uint32_t toku_get_cleaner_iterations_unlocked (CACHETABLE ct); // create and initialize a cache table // size_limit is the upper limit on the size of the size of the values in the table // pass 0 if you want the default -int toku_cachetable_create(CACHETABLE *result, long size_limit, LSN initial_lsn, TOKULOGGER); +int toku_cachetable_create(CACHETABLE *result, long size_limit, LSN initial_lsn, struct tokulogger *logger); // Create a new cachetable. // Effects: a new cachetable is created and initialized. @@ -147,15 +186,20 @@ int toku_cachefile_of_iname_in_env (CACHETABLE ct, const char *iname_in_env, CAC // Return the filename char *toku_cachefile_fname_in_cwd (CACHEFILE cf); -void toku_cachetable_begin_checkpoint (CHECKPOINTER cp, TOKULOGGER); +void toku_cachetable_begin_checkpoint (CHECKPOINTER cp, struct tokulogger *logger); -void toku_cachetable_end_checkpoint(CHECKPOINTER cp, TOKULOGGER logger, +void toku_cachetable_end_checkpoint(CHECKPOINTER cp, struct tokulogger *logger, void (*testcallback_f)(void*), void * testextra); + // Shuts down checkpoint thread // Requires no locks be held that are taken by the checkpoint function void toku_cachetable_minicron_shutdown(CACHETABLE ct); +// Prepare to close the cachetable. This informs the cachetable that it is about to be closed +// so that it can tune its checkpoint resource use. +void toku_cachetable_prepare_close(CACHETABLE ct); + // Close the cachetable. // Effects: All of the memory objects are flushed to disk, and the cachetable is destroyed. void toku_cachetable_close(CACHETABLE *ct); @@ -394,8 +438,9 @@ struct unlockers { bool locked; void (*f)(void* extra); void *extra; - UNLOCKERS next; + struct unlockers *next; }; +typedef struct unlockers *UNLOCKERS; // Effect: If the block is in the cachetable, then return it. // Otherwise call the functions in unlockers, fetch the data (but don't pin it, since we'll just end up pinning it again later), and return TOKUDB_TRY_AGAIN. @@ -506,15 +551,15 @@ void toku_cachefile_unlink_on_close(CACHEFILE cf); bool toku_cachefile_is_unlink_on_close(CACHEFILE cf); // Return the logger associated with the cachefile -TOKULOGGER toku_cachefile_logger (CACHEFILE); +struct tokulogger *toku_cachefile_logger(CACHEFILE cf); // Return the filenum associated with the cachefile -FILENUM toku_cachefile_filenum (CACHEFILE); +FILENUM toku_cachefile_filenum(CACHEFILE cf); // Effect: Return a 32-bit hash key. The hash key shall be suitable for using with bitmasking for a table of size power-of-two. -uint32_t toku_cachetable_hash (CACHEFILE cachefile, CACHEKEY key); +uint32_t toku_cachetable_hash(CACHEFILE cf, CACHEKEY key); -uint32_t toku_cachefile_fullhash_of_header (CACHEFILE cachefile); +uint32_t toku_cachefile_fullhash_of_header(CACHEFILE cf); // debug functions @@ -556,6 +601,7 @@ typedef enum { CT_SIZE_LEAF, // number of bytes in cachetable belonging to leaf nodes CT_SIZE_ROLLBACK, // number of bytes in cachetable belonging to rollback nodes CT_SIZE_CACHEPRESSURE, // number of bytes causing cache pressure (sum of buffers and workdone counters) + CT_SIZE_CLONED, // number of bytes of cloned data in the system CT_EVICTIONS, CT_CLEANER_EXECUTIONS, // number of times the cleaner thread's loop has executed CT_CLEANER_PERIOD, @@ -601,5 +647,3 @@ void toku_pair_list_set_lock_size(uint32_t num_locks); // layer. __attribute__((const,nonnull)) bool toku_ctpair_is_write_locked(PAIR pair); - -#endif /* CACHETABLE_H */ diff --git a/storage/tokudb/ft-index/ft/checkpoint.cc b/storage/tokudb/ft-index/ft/cachetable/checkpoint.cc index 3d26c3a460e..492893ddc7b 100644 --- a/storage/tokudb/ft-index/ft/checkpoint.cc +++ b/storage/tokudb/ft-index/ft/cachetable/checkpoint.cc @@ -28,7 +28,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -126,17 +126,18 @@ PATENT RIGHTS GRANT: * *****/ -#include <toku_portability.h> #include <time.h> -#include "fttypes.h" -#include "cachetable.h" -#include "log-internal.h" -#include "logger.h" -#include "checkpoint.h" -#include <portability/toku_atomic.h> -#include <util/status.h> -#include <util/frwlock.h> +#include "portability/toku_portability.h" +#include "portability/toku_atomic.h" + +#include "ft/cachetable/cachetable.h" +#include "ft/cachetable/checkpoint.h" +#include "ft/ft.h" +#include "ft/logger/log-internal.h" +#include "ft/logger/recover.h" +#include "util/frwlock.h" +#include "util/status.h" /////////////////////////////////////////////////////////////////////////////////// // Engine status @@ -146,7 +147,7 @@ PATENT RIGHTS GRANT: static CHECKPOINT_STATUS_S cp_status; -#define STATUS_INIT(k,c,t,l,inc) TOKUDB_STATUS_INIT(cp_status, k, c, t, "checkpoint: " l, inc) +#define STATUS_INIT(k,c,t,l,inc) TOKUFT_STATUS_INIT(cp_status, k, c, t, "checkpoint: " l, inc) static void status_init(void) { @@ -158,8 +159,8 @@ status_init(void) { STATUS_INIT(CP_TIME_LAST_CHECKPOINT_BEGIN, CHECKPOINT_LAST_BEGAN, UNIXTIME, "last checkpoint began ", TOKU_ENGINE_STATUS|TOKU_GLOBAL_STATUS); STATUS_INIT(CP_TIME_LAST_CHECKPOINT_BEGIN_COMPLETE, CHECKPOINT_LAST_COMPLETE_BEGAN, UNIXTIME, "last complete checkpoint began ", TOKU_ENGINE_STATUS|TOKU_GLOBAL_STATUS); STATUS_INIT(CP_TIME_LAST_CHECKPOINT_END, CHECKPOINT_LAST_COMPLETE_ENDED, UNIXTIME, "last complete checkpoint ended", TOKU_ENGINE_STATUS|TOKU_GLOBAL_STATUS); - STATUS_INIT(CP_TIME_CHECKPOINT_DURATION, CHECKPOINT_DURATION, UNIXTIME, "time spent during checkpoint (begin and end phases)", TOKU_ENGINE_STATUS|TOKU_GLOBAL_STATUS); - STATUS_INIT(CP_TIME_CHECKPOINT_DURATION_LAST, CHECKPOINT_DURATION_LAST, UNIXTIME, "time spent during last checkpoint (begin and end phases)", TOKU_ENGINE_STATUS|TOKU_GLOBAL_STATUS); + STATUS_INIT(CP_TIME_CHECKPOINT_DURATION, CHECKPOINT_DURATION, UINT64, "time spent during checkpoint (begin and end phases)", TOKU_ENGINE_STATUS|TOKU_GLOBAL_STATUS); + STATUS_INIT(CP_TIME_CHECKPOINT_DURATION_LAST, CHECKPOINT_DURATION_LAST, UINT64, "time spent during last checkpoint (begin and end phases)", TOKU_ENGINE_STATUS|TOKU_GLOBAL_STATUS); STATUS_INIT(CP_LAST_LSN, nullptr, UINT64, "last complete checkpoint LSN", TOKU_ENGINE_STATUS); STATUS_INIT(CP_CHECKPOINT_COUNT, CHECKPOINT_TAKEN, UINT64, "checkpoints taken ", TOKU_ENGINE_STATUS|TOKU_GLOBAL_STATUS); STATUS_INIT(CP_CHECKPOINT_COUNT_FAIL, CHECKPOINT_FAILED, UINT64, "checkpoints failed", TOKU_ENGINE_STATUS|TOKU_GLOBAL_STATUS); @@ -381,8 +382,8 @@ toku_checkpoint(CHECKPOINTER cp, TOKULOGGER logger, STATUS_VALUE(CP_LONG_BEGIN_TIME) += duration; STATUS_VALUE(CP_LONG_BEGIN_COUNT) += 1; } - STATUS_VALUE(CP_TIME_CHECKPOINT_DURATION) += ((time_t) STATUS_VALUE(CP_TIME_LAST_CHECKPOINT_END)) - ((time_t) STATUS_VALUE(CP_TIME_LAST_CHECKPOINT_BEGIN)); - STATUS_VALUE(CP_TIME_CHECKPOINT_DURATION_LAST) = ((time_t) STATUS_VALUE(CP_TIME_LAST_CHECKPOINT_END)) - ((time_t) STATUS_VALUE(CP_TIME_LAST_CHECKPOINT_BEGIN)); + STATUS_VALUE(CP_TIME_CHECKPOINT_DURATION) += (uint64_t) ((time_t) STATUS_VALUE(CP_TIME_LAST_CHECKPOINT_END)) - ((time_t) STATUS_VALUE(CP_TIME_LAST_CHECKPOINT_BEGIN)); + STATUS_VALUE(CP_TIME_CHECKPOINT_DURATION_LAST) = (uint64_t) ((time_t) STATUS_VALUE(CP_TIME_LAST_CHECKPOINT_END)) - ((time_t) STATUS_VALUE(CP_TIME_LAST_CHECKPOINT_BEGIN)); STATUS_VALUE(CP_FOOTPRINT) = 0; checkpoint_safe_checkpoint_unlock(); diff --git a/storage/tokudb/ft-index/ft/checkpoint.h b/storage/tokudb/ft-index/ft/cachetable/checkpoint.h index 9e1725af91b..57a41210e83 100644 --- a/storage/tokudb/ft-index/ft/checkpoint.h +++ b/storage/tokudb/ft-index/ft/cachetable/checkpoint.h @@ -1,7 +1,5 @@ /* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */ // vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4: -#ifndef TOKU_CHECKPOINT_H -#define TOKU_CHECKPOINT_H /* COPYING CONDITIONS NOTICE: @@ -31,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -88,17 +86,19 @@ PATENT RIGHTS GRANT: under this License. */ +#pragma once + #ident "Copyright (c) 2009-2013 Tokutek Inc. All rights reserved." #ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it." #ident "$Id$" -#include "cachetable.h" - #include <stdint.h> -void toku_set_checkpoint_period(CACHETABLE ct, uint32_t new_period); +#include "ft/cachetable/cachetable.h" + //Effect: Change [end checkpoint (n) - begin checkpoint (n+1)] delay to // new_period seconds. 0 means disable. +void toku_set_checkpoint_period(CACHETABLE ct, uint32_t new_period); uint32_t toku_get_checkpoint_period_unlocked(CACHETABLE ct); @@ -160,13 +160,11 @@ typedef enum {SCHEDULED_CHECKPOINT = 0, // "normal" checkpoint taken on check // Callbacks are called during checkpoint procedure while checkpoint_safe lock is still held. // Callbacks are primarily intended for use in testing. // caller_id identifies why the checkpoint is being taken. -int toku_checkpoint(CHECKPOINTER cp, TOKULOGGER logger, - void (*callback_f)(void*), void * extra, - void (*callback2_f)(void*), void * extra2, +int toku_checkpoint(CHECKPOINTER cp, struct tokulogger *logger, + void (*callback_f)(void *extra), void *extra, + void (*callback2_f)(void *extra2), void *extra2, checkpoint_caller_t caller_id); - - /****** * These functions are called from the ydb level. * They return status information and have no side effects. @@ -200,6 +198,3 @@ typedef struct { } CHECKPOINT_STATUS_S, *CHECKPOINT_STATUS; void toku_checkpoint_get_status(CACHETABLE ct, CHECKPOINT_STATUS stat); - - -#endif diff --git a/storage/tokudb/ft-index/ft/comparator.h b/storage/tokudb/ft-index/ft/comparator.h index 98c20b82aa5..caf2b8b9d18 100644 --- a/storage/tokudb/ft-index/ft/comparator.h +++ b/storage/tokudb/ft-index/ft/comparator.h @@ -28,7 +28,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -85,47 +85,105 @@ PATENT RIGHTS GRANT: under this License. */ +#pragma once + #ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved." #ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it." - -#pragma once - #include <db.h> #include <string.h> -#include <ft/ybt.h> -#include <ft/fttypes.h> +#include "portability/memory.h" + +#include "util/dbt.h" + +typedef int (*ft_compare_func)(DB *db, const DBT *a, const DBT *b); + +int toku_keycompare(const void *key1, uint32_t key1len, const void *key2, uint32_t key2len); + +int toku_builtin_compare_fun (DB *, const DBT *, const DBT*) __attribute__((__visibility__("default"))); namespace toku { -// a comparator object encapsulates the data necessary for -// comparing two keys in a fractal tree. it further understands -// that points may be positive or negative infinity. - -class comparator { -public: - void set_descriptor(DESCRIPTOR desc) { - m_fake_db.cmp_descriptor = desc; - } - - void create(ft_compare_func cmp, DESCRIPTOR desc) { - m_cmp = cmp; - memset(&m_fake_db, 0, sizeof(m_fake_db)); - m_fake_db.cmp_descriptor = desc; - } - - int compare(const DBT *a, const DBT *b) { - if (toku_dbt_is_infinite(a) || toku_dbt_is_infinite(b)) { - return toku_dbt_infinite_compare(a, b); - } else { - return m_cmp(&m_fake_db, a, b); + // a comparator object encapsulates the data necessary for + // comparing two keys in a fractal tree. it further understands + // that points may be positive or negative infinity. + + class comparator { + void init(ft_compare_func cmp, DESCRIPTOR desc, uint8_t memcmp_magic) { + _cmp = cmp; + _fake_db->cmp_descriptor = desc; + _memcmp_magic = memcmp_magic; + } + + public: + // This magic value is reserved to mean that the magic has not been set. + static const uint8_t MEMCMP_MAGIC_NONE = 0; + + void create(ft_compare_func cmp, DESCRIPTOR desc, uint8_t memcmp_magic = MEMCMP_MAGIC_NONE) { + XCALLOC(_fake_db); + init(cmp, desc, memcmp_magic); + } + + // inherit the attributes of another comparator, but keep our own + // copy of fake_db that is owned separately from the one given. + void inherit(const comparator &cmp) { + invariant_notnull(_fake_db); + invariant_notnull(cmp._cmp); + invariant_notnull(cmp._fake_db); + init(cmp._cmp, cmp._fake_db->cmp_descriptor, cmp._memcmp_magic); + } + + // like inherit, but doesn't require that the this comparator + // was already created + void create_from(const comparator &cmp) { + XCALLOC(_fake_db); + inherit(cmp); + } + + void destroy() { + toku_free(_fake_db); + } + + const DESCRIPTOR_S *get_descriptor() const { + return _fake_db->cmp_descriptor; + } + + ft_compare_func get_compare_func() const { + return _cmp; + } + + uint8_t get_memcmp_magic() const { + return _memcmp_magic; + } + + bool valid() const { + return _cmp != nullptr; + } + + inline bool dbt_has_memcmp_magic(const DBT *dbt) const { + return *reinterpret_cast<const char *>(dbt->data) == _memcmp_magic; + } + + int operator()(const DBT *a, const DBT *b) const { + if (__builtin_expect(toku_dbt_is_infinite(a) || toku_dbt_is_infinite(b), 0)) { + return toku_dbt_infinite_compare(a, b); + } else if (_memcmp_magic != MEMCMP_MAGIC_NONE + // If `a' has the memcmp magic.. + && dbt_has_memcmp_magic(a) + // ..then we expect `b' to also have the memcmp magic + && __builtin_expect(dbt_has_memcmp_magic(b), 1)) { + return toku_builtin_compare_fun(nullptr, a, b); + } else { + // yikes, const sadness here + return _cmp(const_cast<DB *>(_fake_db), a, b); + } } - } -private: - struct __toku_db m_fake_db; - ft_compare_func m_cmp; -}; + private: + DB *_fake_db; + ft_compare_func _cmp; + uint8_t _memcmp_magic; + }; } /* namespace toku */ diff --git a/storage/tokudb/ft-index/ft/cursor.cc b/storage/tokudb/ft-index/ft/cursor.cc new file mode 100644 index 00000000000..9814a49416b --- /dev/null +++ b/storage/tokudb/ft-index/ft/cursor.cc @@ -0,0 +1,505 @@ +/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */ +// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4: + +/* +COPYING CONDITIONS NOTICE: + + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation, and provided that the + following conditions are met: + + * Redistributions of source code must retain this COPYING + CONDITIONS NOTICE, the COPYRIGHT NOTICE (below), the + DISCLAIMER (below), the UNIVERSITY PATENT NOTICE (below), the + PATENT MARKING NOTICE (below), and the PATENT RIGHTS + GRANT (below). + + * Redistributions in binary form must reproduce this COPYING + CONDITIONS NOTICE, the COPYRIGHT NOTICE (below), the + DISCLAIMER (below), the UNIVERSITY PATENT NOTICE (below), the + PATENT MARKING NOTICE (below), and the PATENT RIGHTS + GRANT (below) in the documentation and/or other materials + provided with the distribution. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + 02110-1301, USA. + +COPYRIGHT NOTICE: + + TokuFT, Tokutek Fractal Tree Indexing Library. + Copyright (C) 2014 Tokutek, Inc. + +DISCLAIMER: + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + +UNIVERSITY PATENT NOTICE: + + The technology is licensed by the Massachusetts Institute of + Technology, Rutgers State University of New Jersey, and the Research + Foundation of State University of New York at Stony Brook under + United States of America Serial No. 11/760379 and to the patents + and/or patent applications resulting from it. + +PATENT MARKING NOTICE: + + This software is covered by US Patent No. 8,185,551. + This software is covered by US Patent No. 8,489,638. + +PATENT RIGHTS GRANT: + + "THIS IMPLEMENTATION" means the copyrightable works distributed by + Tokutek as part of the Fractal Tree project. + + "PATENT CLAIMS" means the claims of patents that are owned or + licensable by Tokutek, both currently or in the future; and that in + the absence of this license would be infringed by THIS + IMPLEMENTATION or by using or running THIS IMPLEMENTATION. + + "PATENT CHALLENGE" shall mean a challenge to the validity, + patentability, enforceability and/or non-infringement of any of the + PATENT CLAIMS or otherwise opposing any of the PATENT CLAIMS. + + Tokutek hereby grants to you, for the term and geographical scope of + the PATENT CLAIMS, a non-exclusive, no-charge, royalty-free, + irrevocable (except as stated in this section) patent license to + make, have made, use, offer to sell, sell, import, transfer, and + otherwise run, modify, and propagate the contents of THIS + IMPLEMENTATION, where such license applies only to the PATENT + CLAIMS. This grant does not include claims that would be infringed + only as a consequence of further modifications of THIS + IMPLEMENTATION. If you or your agent or licensee institute or order + or agree to the institution of patent litigation against any entity + (including a cross-claim or counterclaim in a lawsuit) alleging that + THIS IMPLEMENTATION constitutes direct or contributory patent + infringement, or inducement of patent infringement, then any rights + granted to you under this License shall terminate as of the date + such litigation is filed. If you or your agent or exclusive + licensee institute or order or agree to the institution of a PATENT + CHALLENGE, then Tokutek may terminate any rights granted to you + under this License. +*/ + +#include "ft/ft-internal.h" + +#include "ft/cursor.h" +#include "ft/leafentry.h" +#include "ft/txn/txn.h" +#include "util/dbt.h" + +int toku_ft_cursor_create(FT_HANDLE ft_handle, FT_CURSOR cursor, TOKUTXN ttxn, + bool is_snapshot_read, + bool disable_prefetching, + bool is_temporary) { + if (is_snapshot_read) { + invariant(ttxn != NULL); + int accepted = toku_txn_reads_txnid(ft_handle->ft->h->root_xid_that_created, ttxn); + if (accepted != TOKUDB_ACCEPT) { + invariant(accepted == 0); + return TOKUDB_MVCC_DICTIONARY_TOO_NEW; + } + } + + memset(cursor, 0, sizeof(*cursor)); + cursor->ft_handle = ft_handle; + cursor->ttxn = ttxn; + cursor->is_snapshot_read = is_snapshot_read; + cursor->disable_prefetching = disable_prefetching; + cursor->is_temporary = is_temporary; + return 0; +} + +void toku_ft_cursor_destroy(FT_CURSOR cursor) { + toku_destroy_dbt(&cursor->key); + toku_destroy_dbt(&cursor->val); + toku_destroy_dbt(&cursor->range_lock_left_key); + toku_destroy_dbt(&cursor->range_lock_right_key); +} + +// deprecated, should only be used by tests +int toku_ft_cursor(FT_HANDLE ft_handle, FT_CURSOR *cursorptr, TOKUTXN ttxn, + bool is_snapshot_read, bool disable_prefetching) { + FT_CURSOR XCALLOC(cursor); + int r = toku_ft_cursor_create(ft_handle, cursor, ttxn, is_snapshot_read, disable_prefetching, false); + if (r == 0) { + *cursorptr = cursor; + } else { + toku_free(cursor); + } + return r; +} + +// deprecated, should only be used by tests +void toku_ft_cursor_close(FT_CURSOR cursor) { + toku_ft_cursor_destroy(cursor); + toku_free(cursor); +} + +void toku_ft_cursor_remove_restriction(FT_CURSOR cursor) { + cursor->out_of_range_error = 0; + cursor->direction = 0; +} + +void toku_ft_cursor_set_check_interrupt_cb(FT_CURSOR cursor, FT_CHECK_INTERRUPT_CALLBACK cb, void *extra) { + cursor->interrupt_cb = cb; + cursor->interrupt_cb_extra = extra; +} + +void toku_ft_cursor_set_leaf_mode(FT_CURSOR cursor) { + cursor->is_leaf_mode = true; +} + +int toku_ft_cursor_is_leaf_mode(FT_CURSOR cursor) { + return cursor->is_leaf_mode; +} + +// TODO: Rename / cleanup - this has nothing to do with locking +void toku_ft_cursor_set_range_lock(FT_CURSOR cursor, + const DBT *left, const DBT *right, + bool left_is_neg_infty, bool right_is_pos_infty, + int out_of_range_error) { + // Destroy any existing keys and then clone the given left, right keys + toku_destroy_dbt(&cursor->range_lock_left_key); + if (left_is_neg_infty) { + cursor->left_is_neg_infty = true; + } else { + toku_clone_dbt(&cursor->range_lock_left_key, *left); + } + + toku_destroy_dbt(&cursor->range_lock_right_key); + if (right_is_pos_infty) { + cursor->right_is_pos_infty = true; + } else { + toku_clone_dbt(&cursor->range_lock_right_key, *right); + } + + // TOKUDB_FOUND_BUT_REJECTED is a DB_NOTFOUND with instructions to stop looking. (Faster) + cursor->out_of_range_error = out_of_range_error == DB_NOTFOUND ? TOKUDB_FOUND_BUT_REJECTED : out_of_range_error; + cursor->direction = 0; +} + +void toku_ft_cursor_set_prefetching(FT_CURSOR cursor) { + cursor->prefetching = true; +} + +bool toku_ft_cursor_prefetching(FT_CURSOR cursor) { + return cursor->prefetching; +} + +//Return true if cursor is uninitialized. false otherwise. +bool toku_ft_cursor_not_set(FT_CURSOR cursor) { + assert((cursor->key.data==NULL) == (cursor->val.data==NULL)); + return (bool)(cursor->key.data == NULL); +} + +struct ft_cursor_search_struct { + FT_GET_CALLBACK_FUNCTION getf; + void *getf_v; + FT_CURSOR cursor; + ft_search *search; +}; + +/* search for the first kv pair that matches the search object */ +static int ft_cursor_search(FT_CURSOR cursor, ft_search *search, + FT_GET_CALLBACK_FUNCTION getf, void *getf_v, bool can_bulk_fetch) { + int r = toku_ft_search(cursor->ft_handle, search, getf, getf_v, cursor, can_bulk_fetch); + return r; +} + +static inline int compare_k_x(FT_HANDLE ft_handle, const DBT *k, const DBT *x) { + return ft_handle->ft->cmp(k, x); +} + +int toku_ft_cursor_compare_one(const ft_search &UU(search), const DBT *UU(x)) { + return 1; +} + +static int ft_cursor_compare_set(const ft_search &search, const DBT *x) { + FT_HANDLE CAST_FROM_VOIDP(ft_handle, search.context); + return compare_k_x(ft_handle, search.k, x) <= 0; /* return min xy: kv <= xy */ +} + +static int +ft_cursor_current_getf(uint32_t keylen, const void *key, + uint32_t vallen, const void *val, + void *v, bool lock_only) { + struct ft_cursor_search_struct *CAST_FROM_VOIDP(bcss, v); + int r; + if (key==NULL) { + r = bcss->getf(0, NULL, 0, NULL, bcss->getf_v, lock_only); + } else { + FT_CURSOR cursor = bcss->cursor; + DBT newkey; + toku_fill_dbt(&newkey, key, keylen); + if (compare_k_x(cursor->ft_handle, &cursor->key, &newkey) != 0) { + r = bcss->getf(0, NULL, 0, NULL, bcss->getf_v, lock_only); // This was once DB_KEYEMPTY + if (r==0) r = TOKUDB_FOUND_BUT_REJECTED; + } + else + r = bcss->getf(keylen, key, vallen, val, bcss->getf_v, lock_only); + } + return r; +} + +static int ft_cursor_compare_next(const ft_search &search, const DBT *x) { + FT_HANDLE CAST_FROM_VOIDP(ft_handle, search.context); + return compare_k_x(ft_handle, search.k, x) < 0; /* return min xy: kv < xy */ +} + +int toku_ft_cursor_current(FT_CURSOR cursor, int op, FT_GET_CALLBACK_FUNCTION getf, void *getf_v) { + if (toku_ft_cursor_not_set(cursor)) { + return EINVAL; + } + cursor->direction = 0; + if (op == DB_CURRENT) { + struct ft_cursor_search_struct bcss = {getf, getf_v, cursor, 0}; + ft_search search; + ft_search_init(&search, ft_cursor_compare_set, FT_SEARCH_LEFT, &cursor->key, nullptr, cursor->ft_handle); + int r = toku_ft_search(cursor->ft_handle, &search, ft_cursor_current_getf, &bcss, cursor, false); + ft_search_finish(&search); + return r; + } + return getf(cursor->key.size, cursor->key.data, cursor->val.size, cursor->val.data, getf_v, false); // ft_cursor_copyout(cursor, outkey, outval); +} + +int toku_ft_cursor_first(FT_CURSOR cursor, FT_GET_CALLBACK_FUNCTION getf, void *getf_v) { + cursor->direction = 0; + ft_search search; + ft_search_init(&search, toku_ft_cursor_compare_one, FT_SEARCH_LEFT, nullptr, nullptr, cursor->ft_handle); + int r = ft_cursor_search(cursor, &search, getf, getf_v, false); + ft_search_finish(&search); + return r; +} + +int toku_ft_cursor_last(FT_CURSOR cursor, FT_GET_CALLBACK_FUNCTION getf, void *getf_v) { + cursor->direction = 0; + ft_search search; + ft_search_init(&search, toku_ft_cursor_compare_one, FT_SEARCH_RIGHT, nullptr, nullptr, cursor->ft_handle); + int r = ft_cursor_search(cursor, &search, getf, getf_v, false); + ft_search_finish(&search); + return r; +} + +int toku_ft_cursor_check_restricted_range(FT_CURSOR c, const void *key, uint32_t keylen) { + if (c->out_of_range_error) { + FT ft = c->ft_handle->ft; + DBT found_key; + toku_fill_dbt(&found_key, key, keylen); + if ((!c->left_is_neg_infty && c->direction <= 0 && ft->cmp(&found_key, &c->range_lock_left_key) < 0) || + (!c->right_is_pos_infty && c->direction >= 0 && ft->cmp(&found_key, &c->range_lock_right_key) > 0)) { + invariant(c->out_of_range_error); + return c->out_of_range_error; + } + } + // Reset cursor direction to mitigate risk if some query type doesn't set the direction. + // It is always correct to check both bounds (which happens when direction==0) but it can be slower. + c->direction = 0; + return 0; +} + +int toku_ft_cursor_shortcut(FT_CURSOR cursor, int direction, uint32_t index, bn_data *bd, + FT_GET_CALLBACK_FUNCTION getf, void *getf_v, + uint32_t *keylen, void **key, uint32_t *vallen, void **val) { + int r = 0; + // if we are searching towards the end, limit is last element + // if we are searching towards the beginning, limit is the first element + uint32_t limit = (direction > 0) ? (bd->num_klpairs() - 1) : 0; + + //Starting with the prev, find the first real (non-provdel) leafentry. + while (index != limit) { + index += direction; + LEAFENTRY le; + void* foundkey = NULL; + uint32_t foundkeylen = 0; + + r = bd->fetch_klpair(index, &le, &foundkeylen, &foundkey); + invariant_zero(r); + + if (toku_ft_cursor_is_leaf_mode(cursor) || !le_val_is_del(le, cursor->is_snapshot_read, cursor->ttxn)) { + le_extract_val( + le, + toku_ft_cursor_is_leaf_mode(cursor), + cursor->is_snapshot_read, + cursor->ttxn, + vallen, + val + ); + *key = foundkey; + *keylen = foundkeylen; + + cursor->direction = direction; + r = toku_ft_cursor_check_restricted_range(cursor, *key, *keylen); + if (r!=0) { + paranoid_invariant(r == cursor->out_of_range_error); + // We already got at least one entry from the bulk fetch. + // Return 0 (instead of out of range error). + r = 0; + break; + } + r = getf(*keylen, *key, *vallen, *val, getf_v, false); + if (r == TOKUDB_CURSOR_CONTINUE) { + continue; + } + else { + break; + } + } + } + + return r; +} + +int toku_ft_cursor_next(FT_CURSOR cursor, FT_GET_CALLBACK_FUNCTION getf, void *getf_v) { + cursor->direction = +1; + ft_search search; + ft_search_init(&search, ft_cursor_compare_next, FT_SEARCH_LEFT, &cursor->key, nullptr, cursor->ft_handle); + int r = ft_cursor_search(cursor, &search, getf, getf_v, true); + ft_search_finish(&search); + if (r == 0) { + toku_ft_cursor_set_prefetching(cursor); + } + return r; +} + +static int ft_cursor_search_eq_k_x_getf(uint32_t keylen, const void *key, + uint32_t vallen, const void *val, + void *v, bool lock_only) { + struct ft_cursor_search_struct *CAST_FROM_VOIDP(bcss, v); + int r; + if (key==NULL) { + r = bcss->getf(0, NULL, 0, NULL, bcss->getf_v, false); + } else { + FT_CURSOR cursor = bcss->cursor; + DBT newkey; + toku_fill_dbt(&newkey, key, keylen); + if (compare_k_x(cursor->ft_handle, bcss->search->k, &newkey) == 0) { + r = bcss->getf(keylen, key, vallen, val, bcss->getf_v, lock_only); + } else { + r = bcss->getf(0, NULL, 0, NULL, bcss->getf_v, lock_only); + if (r==0) r = TOKUDB_FOUND_BUT_REJECTED; + } + } + return r; +} + +/* search for the kv pair that matches the search object and is equal to k */ +static int ft_cursor_search_eq_k_x(FT_CURSOR cursor, ft_search *search, FT_GET_CALLBACK_FUNCTION getf, void *getf_v) { + struct ft_cursor_search_struct bcss = {getf, getf_v, cursor, search}; + int r = toku_ft_search(cursor->ft_handle, search, ft_cursor_search_eq_k_x_getf, &bcss, cursor, false); + return r; +} + +static int ft_cursor_compare_prev(const ft_search &search, const DBT *x) { + FT_HANDLE CAST_FROM_VOIDP(ft_handle, search.context); + return compare_k_x(ft_handle, search.k, x) > 0; /* return max xy: kv > xy */ +} + +int toku_ft_cursor_prev(FT_CURSOR cursor, FT_GET_CALLBACK_FUNCTION getf, void *getf_v) { + cursor->direction = -1; + ft_search search; + ft_search_init(&search, ft_cursor_compare_prev, FT_SEARCH_RIGHT, &cursor->key, nullptr, cursor->ft_handle); + int r = ft_cursor_search(cursor, &search, getf, getf_v, true); + ft_search_finish(&search); + return r; +} + +int toku_ft_cursor_compare_set_range(const ft_search &search, const DBT *x) { + FT_HANDLE CAST_FROM_VOIDP(ft_handle, search.context); + return compare_k_x(ft_handle, search.k, x) <= 0; /* return kv <= xy */ +} + +int toku_ft_cursor_set(FT_CURSOR cursor, DBT *key, FT_GET_CALLBACK_FUNCTION getf, void *getf_v) { + cursor->direction = 0; + ft_search search; + ft_search_init(&search, toku_ft_cursor_compare_set_range, FT_SEARCH_LEFT, key, nullptr, cursor->ft_handle); + int r = ft_cursor_search_eq_k_x(cursor, &search, getf, getf_v); + ft_search_finish(&search); + return r; +} + +int toku_ft_cursor_set_range(FT_CURSOR cursor, DBT *key, DBT *key_bound, FT_GET_CALLBACK_FUNCTION getf, void *getf_v) { + cursor->direction = 0; + ft_search search; + ft_search_init(&search, toku_ft_cursor_compare_set_range, FT_SEARCH_LEFT, key, key_bound, cursor->ft_handle); + int r = ft_cursor_search(cursor, &search, getf, getf_v, false); + ft_search_finish(&search); + return r; +} + +static int ft_cursor_compare_set_range_reverse(const ft_search &search, const DBT *x) { + FT_HANDLE CAST_FROM_VOIDP(ft_handle, search.context); + return compare_k_x(ft_handle, search.k, x) >= 0; /* return kv >= xy */ +} + +int toku_ft_cursor_set_range_reverse(FT_CURSOR cursor, DBT *key, FT_GET_CALLBACK_FUNCTION getf, void *getf_v) { + cursor->direction = 0; + ft_search search; + ft_search_init(&search, ft_cursor_compare_set_range_reverse, FT_SEARCH_RIGHT, key, nullptr, cursor->ft_handle); + int r = ft_cursor_search(cursor, &search, getf, getf_v, false); + ft_search_finish(&search); + return r; +} + +//TODO: When tests have been rewritten, get rid of this function. +//Only used by tests. +int toku_ft_cursor_get (FT_CURSOR cursor, DBT *key, FT_GET_CALLBACK_FUNCTION getf, void *getf_v, int get_flags) { + int op = get_flags & DB_OPFLAGS_MASK; + if (get_flags & ~DB_OPFLAGS_MASK) + return EINVAL; + + switch (op) { + case DB_CURRENT: + case DB_CURRENT_BINDING: + return toku_ft_cursor_current(cursor, op, getf, getf_v); + case DB_FIRST: + return toku_ft_cursor_first(cursor, getf, getf_v); + case DB_LAST: + return toku_ft_cursor_last(cursor, getf, getf_v); + case DB_NEXT: + if (toku_ft_cursor_not_set(cursor)) { + return toku_ft_cursor_first(cursor, getf, getf_v); + } else { + return toku_ft_cursor_next(cursor, getf, getf_v); + } + case DB_PREV: + if (toku_ft_cursor_not_set(cursor)) { + return toku_ft_cursor_last(cursor, getf, getf_v); + } else { + return toku_ft_cursor_prev(cursor, getf, getf_v); + } + case DB_SET: + return toku_ft_cursor_set(cursor, key, getf, getf_v); + case DB_SET_RANGE: + return toku_ft_cursor_set_range(cursor, key, nullptr, getf, getf_v); + default: ;// Fall through + } + return EINVAL; +} + +void toku_ft_cursor_peek(FT_CURSOR cursor, const DBT **pkey, const DBT **pval) { + *pkey = &cursor->key; + *pval = &cursor->val; +} + +bool toku_ft_cursor_uninitialized(FT_CURSOR c) { + return toku_ft_cursor_not_set(c); +} + +int toku_ft_lookup(FT_HANDLE ft_handle, DBT *k, FT_GET_CALLBACK_FUNCTION getf, void *getf_v) { + FT_CURSOR cursor; + int r = toku_ft_cursor(ft_handle, &cursor, NULL, false, false); + if (r != 0) { + return r; + } + + r = toku_ft_cursor_set(cursor, k, getf, getf_v); + + toku_ft_cursor_close(cursor); + return r; +} diff --git a/storage/tokudb/ft-index/ft/ft-search.h b/storage/tokudb/ft-index/ft/cursor.h index 9c26be456de..da2b3d5c8a1 100644 --- a/storage/tokudb/ft-index/ft/ft-search.h +++ b/storage/tokudb/ft-index/ft/cursor.h @@ -1,6 +1,6 @@ /* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */ // vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4: -#ident "$Id$" + /* COPYING CONDITIONS NOTICE: @@ -29,8 +29,8 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. - Copyright (C) 2007-2013 Tokutek, Inc. + TokuFT, Tokutek Fractal Tree Indexing Library. + Copyright (C) 2014 Tokutek, Inc. DISCLAIMER: @@ -86,12 +86,30 @@ PATENT RIGHTS GRANT: under this License. */ -#ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved." -#ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it." - -#ifndef FT_SEARCH_H -#define FT_SEARCH_H - +#pragma once + +#include <db.h> + +#include "ft/ft-internal.h" + +/* an ft cursor is represented as a kv pair in a tree */ +struct ft_cursor { + FT_HANDLE ft_handle; + DBT key, val; // The key-value pair that the cursor currently points to + DBT range_lock_left_key, range_lock_right_key; + bool prefetching; + bool left_is_neg_infty, right_is_pos_infty; + bool is_snapshot_read; // true if query is read_committed, false otherwise + bool is_leaf_mode; + bool disable_prefetching; + bool is_temporary; + int out_of_range_error; + int direction; + TOKUTXN ttxn; + FT_CHECK_INTERRUPT_CALLBACK interrupt_cb; + void *interrupt_cb_extra; +}; +typedef struct ft_cursor *FT_CURSOR; enum ft_search_direction_e { FT_SEARCH_LEFT = 1, /* search left -> right, finds min xy as defined by the compare function */ @@ -109,7 +127,7 @@ typedef int (*ft_search_compare_func_t)(const struct ft_search &, const DBT *); /* the search object contains the compare function, search direction, and the kv pair that is used in the compare function. the context is the user's private data */ -typedef struct ft_search { +struct ft_search { ft_search_compare_func_t compare; enum ft_search_direction_e direction; const DBT *k; @@ -137,22 +155,83 @@ typedef struct ft_search { // way out with a DB_NOTFOUND we ought to unpin those nodes. See #3528. DBT pivot_bound; const DBT *k_bound; -} ft_search_t; +}; /* initialize the search compare object */ -static inline ft_search_t *ft_search_init(ft_search_t *so, ft_search_compare_func_t compare, enum ft_search_direction_e direction, - const DBT *k, const DBT *k_bound, void *context) { - so->compare = compare; - so->direction = direction; - so->k = k; - so->context = context; - toku_init_dbt(&so->pivot_bound); - so->k_bound = k_bound; - return so; +static inline ft_search *ft_search_init(ft_search *search, ft_search_compare_func_t compare, + enum ft_search_direction_e direction, + const DBT *k, const DBT *k_bound, void *context) { + search->compare = compare; + search->direction = direction; + search->k = k; + search->context = context; + toku_init_dbt(&search->pivot_bound); + search->k_bound = k_bound; + return search; } -static inline void ft_search_finish(ft_search_t *so) { - toku_destroy_dbt(&so->pivot_bound); +static inline void ft_search_finish(ft_search *search) { + toku_destroy_dbt(&search->pivot_bound); } -#endif + +int toku_ft_cursor_create(FT_HANDLE ft_handle, FT_CURSOR cursor, TOKUTXN txn, + bool is_snapshot_read, + bool disable_prefetching, + bool is_temporary); + +void toku_ft_cursor_destroy(FT_CURSOR cursor); + +int toku_ft_lookup(FT_HANDLE ft_h, DBT *k, FT_GET_CALLBACK_FUNCTION getf, void *getf_v) __attribute__ ((warn_unused_result)); + +void toku_ft_cursor_set_prefetching(FT_CURSOR cursor); + +bool toku_ft_cursor_prefetching(FT_CURSOR cursor); + +bool toku_ft_cursor_not_set(FT_CURSOR cursor); + +void toku_ft_cursor_set_leaf_mode(FT_CURSOR cursor); + +void toku_ft_cursor_remove_restriction(FT_CURSOR cursor); + +void toku_ft_cursor_set_check_interrupt_cb(FT_CURSOR cursor, FT_CHECK_INTERRUPT_CALLBACK cb, void *extra); + +int toku_ft_cursor_is_leaf_mode(FT_CURSOR cursor); + +void toku_ft_cursor_set_range_lock(FT_CURSOR, const DBT *, const DBT *, bool, bool, int); + +int toku_ft_cursor_first(FT_CURSOR cursor, FT_GET_CALLBACK_FUNCTION getf, void *getf_v) __attribute__ ((warn_unused_result)); + +int toku_ft_cursor_last(FT_CURSOR cursor, FT_GET_CALLBACK_FUNCTION getf, void *getf_v) __attribute__ ((warn_unused_result)); + +int toku_ft_cursor_next(FT_CURSOR cursor, FT_GET_CALLBACK_FUNCTION getf, void *getf_v) __attribute__ ((warn_unused_result)); + +int toku_ft_cursor_prev(FT_CURSOR cursor, FT_GET_CALLBACK_FUNCTION getf, void *getf_v) __attribute__ ((warn_unused_result)); + +int toku_ft_cursor_current(FT_CURSOR cursor, int op, FT_GET_CALLBACK_FUNCTION getf, void *getf_v) __attribute__ ((warn_unused_result)); + +int toku_ft_cursor_set(FT_CURSOR cursor, DBT *key, FT_GET_CALLBACK_FUNCTION getf, void *getf_v) __attribute__ ((warn_unused_result)); + +int toku_ft_cursor_set_range(FT_CURSOR cursor, DBT *key, DBT *key_bound, FT_GET_CALLBACK_FUNCTION getf, void *getf_v) __attribute__ ((warn_unused_result)); + +int toku_ft_cursor_set_range_reverse(FT_CURSOR cursor, DBT *key, FT_GET_CALLBACK_FUNCTION getf, void *getf_v) __attribute__ ((warn_unused_result)); + +bool toku_ft_cursor_uninitialized(FT_CURSOR cursor) __attribute__ ((warn_unused_result)); + +void toku_ft_cursor_peek(FT_CURSOR cursor, const DBT **pkey, const DBT **pval); + +int toku_ft_cursor_check_restricted_range(FT_CURSOR cursor, const void *key, uint32_t keylen); + +int toku_ft_cursor_shortcut(FT_CURSOR cursor, int direction, uint32_t index, bn_data *bd, + FT_GET_CALLBACK_FUNCTION getf, void *getf_v, + uint32_t *keylen, void **key, uint32_t *vallen, void **val); + +// used by get_key_after_bytes +int toku_ft_cursor_compare_one(const ft_search &search, const DBT *x); +int toku_ft_cursor_compare_set_range(const ft_search &search, const DBT *x); + +// deprecated, should only be used by tests, and eventually removed +int toku_ft_cursor(FT_HANDLE ft_handle, FT_CURSOR *ftcursor_p, TOKUTXN txn, bool, bool) __attribute__ ((warn_unused_result)); +void toku_ft_cursor_close(FT_CURSOR cursor); +int toku_ft_cursor_get(FT_CURSOR cursor, DBT *key, FT_GET_CALLBACK_FUNCTION getf, void *getf_v, int get_flags); +int toku_ft_cursor_delete(FT_CURSOR cursor, int flags, TOKUTXN txn); diff --git a/storage/tokudb/ft-index/ft/fifo.cc b/storage/tokudb/ft-index/ft/fifo.cc deleted file mode 100644 index 07d7baec2a1..00000000000 --- a/storage/tokudb/ft-index/ft/fifo.cc +++ /dev/null @@ -1,253 +0,0 @@ -/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */ -// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4: -#ident "$Id$" -/* -COPYING CONDITIONS NOTICE: - - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation, and provided that the - following conditions are met: - - * Redistributions of source code must retain this COPYING - CONDITIONS NOTICE, the COPYRIGHT NOTICE (below), the - DISCLAIMER (below), the UNIVERSITY PATENT NOTICE (below), the - PATENT MARKING NOTICE (below), and the PATENT RIGHTS - GRANT (below). - - * Redistributions in binary form must reproduce this COPYING - CONDITIONS NOTICE, the COPYRIGHT NOTICE (below), the - DISCLAIMER (below), the UNIVERSITY PATENT NOTICE (below), the - PATENT MARKING NOTICE (below), and the PATENT RIGHTS - GRANT (below) in the documentation and/or other materials - provided with the distribution. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA - 02110-1301, USA. - -COPYRIGHT NOTICE: - - TokuDB, Tokutek Fractal Tree Indexing Library. - Copyright (C) 2007-2013 Tokutek, Inc. - -DISCLAIMER: - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - -UNIVERSITY PATENT NOTICE: - - The technology is licensed by the Massachusetts Institute of - Technology, Rutgers State University of New Jersey, and the Research - Foundation of State University of New York at Stony Brook under - United States of America Serial No. 11/760379 and to the patents - and/or patent applications resulting from it. - -PATENT MARKING NOTICE: - - This software is covered by US Patent No. 8,185,551. - This software is covered by US Patent No. 8,489,638. - -PATENT RIGHTS GRANT: - - "THIS IMPLEMENTATION" means the copyrightable works distributed by - Tokutek as part of the Fractal Tree project. - - "PATENT CLAIMS" means the claims of patents that are owned or - licensable by Tokutek, both currently or in the future; and that in - the absence of this license would be infringed by THIS - IMPLEMENTATION or by using or running THIS IMPLEMENTATION. - - "PATENT CHALLENGE" shall mean a challenge to the validity, - patentability, enforceability and/or non-infringement of any of the - PATENT CLAIMS or otherwise opposing any of the PATENT CLAIMS. - - Tokutek hereby grants to you, for the term and geographical scope of - the PATENT CLAIMS, a non-exclusive, no-charge, royalty-free, - irrevocable (except as stated in this section) patent license to - make, have made, use, offer to sell, sell, import, transfer, and - otherwise run, modify, and propagate the contents of THIS - IMPLEMENTATION, where such license applies only to the PATENT - CLAIMS. This grant does not include claims that would be infringed - only as a consequence of further modifications of THIS - IMPLEMENTATION. If you or your agent or licensee institute or order - or agree to the institution of patent litigation against any entity - (including a cross-claim or counterclaim in a lawsuit) alleging that - THIS IMPLEMENTATION constitutes direct or contributory patent - infringement, or inducement of patent infringement, then any rights - granted to you under this License shall terminate as of the date - such litigation is filed. If you or your agent or exclusive - licensee institute or order or agree to the institution of a PATENT - CHALLENGE, then Tokutek may terminate any rights granted to you - under this License. -*/ - -#ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved." -#ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it." - -#include "fifo.h" -#include "xids.h" -#include "ybt.h" -#include <memory.h> -#include <toku_assert.h> - -struct fifo { - int n_items_in_fifo; - char *memory; // An array of bytes into which fifo_entries are embedded. - int memory_size; // How big is fifo_memory - int memory_used; // How many bytes are in use? -}; - -static void fifo_init(struct fifo *fifo) { - fifo->n_items_in_fifo = 0; - fifo->memory = 0; - fifo->memory_size = 0; - fifo->memory_used = 0; -} - -__attribute__((const,nonnull)) -static int fifo_entry_size(struct fifo_entry *entry) { - return sizeof (struct fifo_entry) + entry->keylen + entry->vallen - + xids_get_size(&entry->xids_s) - - sizeof(XIDS_S); //Prevent double counting from fifo_entry+xids_get_size -} - -__attribute__((const,nonnull)) -size_t toku_ft_msg_memsize_in_fifo(FT_MSG msg) { - // This must stay in sync with fifo_entry_size because that's what we - // really trust. But sometimes we only have an in-memory FT_MSG, not - // a serialized fifo_entry so we have to fake it. - return sizeof (struct fifo_entry) + msg->u.id.key->size + msg->u.id.val->size - + xids_get_size(msg->xids) - - sizeof(XIDS_S); -} - -int toku_fifo_create(FIFO *ptr) { - struct fifo *XMALLOC(fifo); - if (fifo == 0) return ENOMEM; - fifo_init(fifo); - *ptr = fifo; - return 0; -} - -void toku_fifo_resize(FIFO fifo, size_t new_size) { - XREALLOC_N(new_size, fifo->memory); - fifo->memory_size = new_size; -} - -void toku_fifo_free(FIFO *ptr) { - FIFO fifo = *ptr; - if (fifo->memory) toku_free(fifo->memory); - fifo->memory=0; - toku_free(fifo); - *ptr = 0; -} - -int toku_fifo_n_entries(FIFO fifo) { - return fifo->n_items_in_fifo; -} - -static int next_power_of_two (int n) { - int r = 4096; - while (r < n) { - r*=2; - assert(r>0); - } - return r; -} - -int toku_fifo_enq(FIFO fifo, const void *key, unsigned int keylen, const void *data, unsigned int datalen, enum ft_msg_type type, MSN msn, XIDS xids, bool is_fresh, int32_t *dest) { - int need_space_here = sizeof(struct fifo_entry) - + keylen + datalen - + xids_get_size(xids) - - sizeof(XIDS_S); //Prevent double counting - int need_space_total = fifo->memory_used+need_space_here; - if (fifo->memory == NULL || need_space_total > fifo->memory_size) { - // resize the fifo to the next power of 2 greater than the needed space - int next_2 = next_power_of_two(need_space_total); - toku_fifo_resize(fifo, next_2); - } - struct fifo_entry *entry = (struct fifo_entry *)(fifo->memory + fifo->memory_used); - fifo_entry_set_msg_type(entry, type); - entry->msn = msn; - xids_cpy(&entry->xids_s, xids); - entry->is_fresh = is_fresh; - entry->keylen = keylen; - unsigned char *e_key = xids_get_end_of_array(&entry->xids_s); - memcpy(e_key, key, keylen); - entry->vallen = datalen; - memcpy(e_key + keylen, data, datalen); - if (dest) { - *dest = fifo->memory_used; - } - fifo->n_items_in_fifo++; - fifo->memory_used += need_space_here; - return 0; -} - -int toku_fifo_iterate_internal_start(FIFO UU(fifo)) { return 0; } -int toku_fifo_iterate_internal_has_more(FIFO fifo, int off) { return off < fifo->memory_used; } -int toku_fifo_iterate_internal_next(FIFO fifo, int off) { - struct fifo_entry *e = (struct fifo_entry *)(fifo->memory + off); - return off + fifo_entry_size(e); -} -struct fifo_entry * toku_fifo_iterate_internal_get_entry(FIFO fifo, int off) { - return (struct fifo_entry *)(fifo->memory + off); -} -size_t toku_fifo_internal_entry_memsize(struct fifo_entry *e) { - return fifo_entry_size(e); -} - -void toku_fifo_iterate (FIFO fifo, void(*f)(bytevec key,ITEMLEN keylen,bytevec data,ITEMLEN datalen, enum ft_msg_type type, MSN msn, XIDS xids, bool is_fresh, void*), void *arg) { - FIFO_ITERATE(fifo, - key, keylen, data, datalen, type, msn, xids, is_fresh, - f(key,keylen,data,datalen,type,msn,xids,is_fresh, arg)); -} - -unsigned int toku_fifo_buffer_size_in_use (FIFO fifo) { - return fifo->memory_used; -} - -unsigned long toku_fifo_memory_size_in_use(FIFO fifo) { - return sizeof(*fifo)+fifo->memory_used; -} - -unsigned long toku_fifo_memory_footprint(FIFO fifo) { - size_t size_used = toku_memory_footprint(fifo->memory, fifo->memory_used); - long rval = sizeof(*fifo) + size_used; - return rval; -} - -DBT *fill_dbt_for_fifo_entry(DBT *dbt, const struct fifo_entry *entry) { - return toku_fill_dbt(dbt, xids_get_end_of_array((XIDS) &entry->xids_s), entry->keylen); -} - -struct fifo_entry *toku_fifo_get_entry(FIFO fifo, int off) { - return toku_fifo_iterate_internal_get_entry(fifo, off); -} - -void toku_fifo_clone(FIFO orig_fifo, FIFO* cloned_fifo) { - struct fifo *XMALLOC(new_fifo); - assert(new_fifo); - new_fifo->n_items_in_fifo = orig_fifo->n_items_in_fifo; - new_fifo->memory_used = orig_fifo->memory_used; - new_fifo->memory_size = new_fifo->memory_used; - XMALLOC_N(new_fifo->memory_size, new_fifo->memory); - memcpy( - new_fifo->memory, - orig_fifo->memory, - new_fifo->memory_size - ); - *cloned_fifo = new_fifo; -} - -bool toku_are_fifos_same(FIFO fifo1, FIFO fifo2) { - return ( - fifo1->memory_used == fifo2->memory_used && - memcmp(fifo1->memory, fifo2->memory, fifo1->memory_used) == 0 - ); -} diff --git a/storage/tokudb/ft-index/ft/fifo.h b/storage/tokudb/ft-index/ft/fifo.h deleted file mode 100644 index e9f53248b98..00000000000 --- a/storage/tokudb/ft-index/ft/fifo.h +++ /dev/null @@ -1,182 +0,0 @@ -/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */ -// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4: -#ifndef FIFO_H -#define FIFO_H -#ident "$Id$" -/* -COPYING CONDITIONS NOTICE: - - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation, and provided that the - following conditions are met: - - * Redistributions of source code must retain this COPYING - CONDITIONS NOTICE, the COPYRIGHT NOTICE (below), the - DISCLAIMER (below), the UNIVERSITY PATENT NOTICE (below), the - PATENT MARKING NOTICE (below), and the PATENT RIGHTS - GRANT (below). - - * Redistributions in binary form must reproduce this COPYING - CONDITIONS NOTICE, the COPYRIGHT NOTICE (below), the - DISCLAIMER (below), the UNIVERSITY PATENT NOTICE (below), the - PATENT MARKING NOTICE (below), and the PATENT RIGHTS - GRANT (below) in the documentation and/or other materials - provided with the distribution. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA - 02110-1301, USA. - -COPYRIGHT NOTICE: - - TokuDB, Tokutek Fractal Tree Indexing Library. - Copyright (C) 2007-2013 Tokutek, Inc. - -DISCLAIMER: - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - -UNIVERSITY PATENT NOTICE: - - The technology is licensed by the Massachusetts Institute of - Technology, Rutgers State University of New Jersey, and the Research - Foundation of State University of New York at Stony Brook under - United States of America Serial No. 11/760379 and to the patents - and/or patent applications resulting from it. - -PATENT MARKING NOTICE: - - This software is covered by US Patent No. 8,185,551. - This software is covered by US Patent No. 8,489,638. - -PATENT RIGHTS GRANT: - - "THIS IMPLEMENTATION" means the copyrightable works distributed by - Tokutek as part of the Fractal Tree project. - - "PATENT CLAIMS" means the claims of patents that are owned or - licensable by Tokutek, both currently or in the future; and that in - the absence of this license would be infringed by THIS - IMPLEMENTATION or by using or running THIS IMPLEMENTATION. - - "PATENT CHALLENGE" shall mean a challenge to the validity, - patentability, enforceability and/or non-infringement of any of the - PATENT CLAIMS or otherwise opposing any of the PATENT CLAIMS. - - Tokutek hereby grants to you, for the term and geographical scope of - the PATENT CLAIMS, a non-exclusive, no-charge, royalty-free, - irrevocable (except as stated in this section) patent license to - make, have made, use, offer to sell, sell, import, transfer, and - otherwise run, modify, and propagate the contents of THIS - IMPLEMENTATION, where such license applies only to the PATENT - CLAIMS. This grant does not include claims that would be infringed - only as a consequence of further modifications of THIS - IMPLEMENTATION. If you or your agent or licensee institute or order - or agree to the institution of patent litigation against any entity - (including a cross-claim or counterclaim in a lawsuit) alleging that - THIS IMPLEMENTATION constitutes direct or contributory patent - infringement, or inducement of patent infringement, then any rights - granted to you under this License shall terminate as of the date - such litigation is filed. If you or your agent or exclusive - licensee institute or order or agree to the institution of a PATENT - CHALLENGE, then Tokutek may terminate any rights granted to you - under this License. -*/ - -#ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved." -#ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it." - -#include "fttypes.h" -#include "xids-internal.h" -#include "xids.h" - - -// If the fifo_entry is unpacked, the compiler aligns the xids array and we waste a lot of space -struct __attribute__((__packed__)) fifo_entry { - unsigned int keylen; - unsigned int vallen; - unsigned char type; - bool is_fresh; - MSN msn; - XIDS_S xids_s; -}; - -// get and set the ft message type for a fifo entry. -// it is internally stored as a single unsigned char. -static inline enum ft_msg_type -fifo_entry_get_msg_type(const struct fifo_entry * entry) -{ - enum ft_msg_type msg_type; - msg_type = (enum ft_msg_type) entry->type; - return msg_type; -} - -static inline void -fifo_entry_set_msg_type(struct fifo_entry * entry, - enum ft_msg_type msg_type) -{ - unsigned char type = (unsigned char) msg_type; - entry->type = type; -} - -typedef struct fifo *FIFO; - -int toku_fifo_create(FIFO *); - -void toku_fifo_resize(FIFO fifo, size_t new_size); - -void toku_fifo_free(FIFO *); - -int toku_fifo_n_entries(FIFO); - -int toku_fifo_enq (FIFO, const void *key, ITEMLEN keylen, const void *data, ITEMLEN datalen, enum ft_msg_type type, MSN msn, XIDS xids, bool is_fresh, int32_t *dest); - -unsigned int toku_fifo_buffer_size_in_use (FIFO fifo); -unsigned long toku_fifo_memory_size_in_use(FIFO fifo); // return how much memory in the fifo holds useful data - -unsigned long toku_fifo_memory_footprint(FIFO fifo); // return how much memory the fifo occupies - -void toku_fifo_iterate(FIFO, void(*f)(bytevec key,ITEMLEN keylen,bytevec data,ITEMLEN datalen, enum ft_msg_type type, MSN msn, XIDS xids, bool is_fresh, void*), void*); - -#define FIFO_ITERATE(fifo,keyvar,keylenvar,datavar,datalenvar,typevar,msnvar,xidsvar,is_freshvar,body) ({ \ - for (int fifo_iterate_off = toku_fifo_iterate_internal_start(fifo); \ - toku_fifo_iterate_internal_has_more(fifo, fifo_iterate_off); \ - fifo_iterate_off = toku_fifo_iterate_internal_next(fifo, fifo_iterate_off)) { \ - struct fifo_entry *e = toku_fifo_iterate_internal_get_entry(fifo, fifo_iterate_off); \ - ITEMLEN keylenvar = e->keylen; \ - ITEMLEN datalenvar = e->vallen; \ - enum ft_msg_type typevar = fifo_entry_get_msg_type(e); \ - MSN msnvar = e->msn; \ - XIDS xidsvar = &e->xids_s; \ - bytevec keyvar = xids_get_end_of_array(xidsvar); \ - bytevec datavar = (const uint8_t*)keyvar + e->keylen; \ - bool is_freshvar = e->is_fresh; \ - body; \ - } }) - -#define FIFO_CURRENT_ENTRY_MEMSIZE toku_fifo_internal_entry_memsize(e) - -// Internal functions for the iterator. -int toku_fifo_iterate_internal_start(FIFO fifo); -int toku_fifo_iterate_internal_has_more(FIFO fifo, int off); -int toku_fifo_iterate_internal_next(FIFO fifo, int off); -struct fifo_entry * toku_fifo_iterate_internal_get_entry(FIFO fifo, int off); -size_t toku_fifo_internal_entry_memsize(struct fifo_entry *e) __attribute__((const,nonnull)); -size_t toku_ft_msg_memsize_in_fifo(FT_MSG msg) __attribute__((const,nonnull)); - -DBT *fill_dbt_for_fifo_entry(DBT *dbt, const struct fifo_entry *entry); -struct fifo_entry *toku_fifo_get_entry(FIFO fifo, int off); - -void toku_fifo_clone(FIFO orig_fifo, FIFO* cloned_fifo); - -bool toku_are_fifos_same(FIFO fifo1, FIFO fifo2); - - - - -#endif diff --git a/storage/tokudb/ft-index/ft/ft-cachetable-wrappers.cc b/storage/tokudb/ft-index/ft/ft-cachetable-wrappers.cc index 1f3aa3e0baa..b8bee800f36 100644 --- a/storage/tokudb/ft-index/ft/ft-cachetable-wrappers.cc +++ b/storage/tokudb/ft-index/ft/ft-cachetable-wrappers.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -89,12 +89,13 @@ PATENT RIGHTS GRANT: #ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved." #ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it." -#include <ft-cachetable-wrappers.h> +#include "ft/serialize/block_table.h" +#include "ft/ft-cachetable-wrappers.h" +#include "ft/ft-flusher.h" +#include "ft/ft-internal.h" +#include "ft/ft.h" +#include "ft/node.h" -#include <fttypes.h> -#include <ft-flusher.h> -#include <ft-internal.h> -#include <ft.h> #include <util/context.h> static void @@ -103,23 +104,23 @@ ftnode_get_key_and_fullhash( uint32_t* fullhash, void* extra) { - FT h = (FT) extra; - BLOCKNUM name; - toku_allocate_blocknum(h->blocktable, &name, h); - *cachekey = name; - *fullhash = toku_cachetable_hash(h->cf, name); + FT ft = (FT) extra; + BLOCKNUM blocknum; + ft->blocktable.allocate_blocknum(&blocknum, ft); + *cachekey = blocknum; + *fullhash = toku_cachetable_hash(ft->cf, blocknum); } void cachetable_put_empty_node_with_dep_nodes( - FT h, + FT ft, uint32_t num_dependent_nodes, FTNODE* dependent_nodes, - BLOCKNUM* name, //output + BLOCKNUM* blocknum, //output uint32_t* fullhash, //output FTNODE* result) { - FTNODE XMALLOC(new_node); + FTNODE XCALLOC(new_node); PAIR dependent_pairs[num_dependent_nodes]; enum cachetable_dirty dependent_dirty_bits[num_dependent_nodes]; for (uint32_t i = 0; i < num_dependent_nodes; i++) { @@ -128,18 +129,18 @@ cachetable_put_empty_node_with_dep_nodes( } toku_cachetable_put_with_dep_pairs( - h->cf, + ft->cf, ftnode_get_key_and_fullhash, new_node, make_pair_attr(sizeof(FTNODE)), - get_write_callbacks_for_node(h), - h, + get_write_callbacks_for_node(ft), + ft, num_dependent_nodes, dependent_pairs, dependent_dirty_bits, - name, + blocknum, fullhash, - toku_node_save_ct_pair); + toku_ftnode_save_ct_pair); *result = new_node; } @@ -153,13 +154,13 @@ create_new_ftnode_with_dep_nodes( FTNODE* dependent_nodes) { uint32_t fullhash = 0; - BLOCKNUM name; + BLOCKNUM blocknum; cachetable_put_empty_node_with_dep_nodes( ft, num_dependent_nodes, dependent_nodes, - &name, + &blocknum, &fullhash, result); @@ -170,7 +171,7 @@ create_new_ftnode_with_dep_nodes( toku_initialize_empty_ftnode( *result, - name, + blocknum, height, n_children, ft->h->layout_version, @@ -207,8 +208,8 @@ toku_pin_ftnode_for_query( uint32_t fullhash, UNLOCKERS unlockers, ANCESTORS ancestors, - const PIVOT_BOUNDS bounds, - FTNODE_FETCH_EXTRA bfe, + const pivot_bounds &bounds, + ftnode_fetch_extra *bfe, bool apply_ancestor_messages, // this bool is probably temporary, for #3972, once we know how range query estimates work, will revisit this FTNODE *node_p, bool* msgs_applied) @@ -318,10 +319,10 @@ exit: void toku_pin_ftnode_with_dep_nodes( - FT h, + FT ft, BLOCKNUM blocknum, uint32_t fullhash, - FTNODE_FETCH_EXTRA bfe, + ftnode_fetch_extra *bfe, pair_lock_type lock_type, uint32_t num_dependent_nodes, FTNODE *dependent_nodes, @@ -337,12 +338,12 @@ toku_pin_ftnode_with_dep_nodes( } int r = toku_cachetable_get_and_pin_with_dep_pairs( - h->cf, + ft->cf, blocknum, fullhash, &node_v, NULL, - get_write_callbacks_for_node(h), + get_write_callbacks_for_node(ft), toku_ftnode_fetch_callback, toku_ftnode_pf_req_callback, toku_ftnode_pf_callback, @@ -355,7 +356,7 @@ toku_pin_ftnode_with_dep_nodes( invariant_zero(r); FTNODE node = (FTNODE) node_v; if (lock_type != PL_READ && node->height > 0 && move_messages) { - toku_move_ftnode_messages_to_stale(h, node); + toku_move_ftnode_messages_to_stale(ft, node); } *node_p = node; } @@ -363,7 +364,7 @@ toku_pin_ftnode_with_dep_nodes( void toku_pin_ftnode(FT ft, BLOCKNUM blocknum, uint32_t fullhash, - FTNODE_FETCH_EXTRA bfe, + ftnode_fetch_extra *bfe, pair_lock_type lock_type, FTNODE *node_p, bool move_messages) { @@ -403,3 +404,25 @@ toku_unpin_ftnode_read_only(FT ft, FTNODE node) ); assert(r==0); } + +void toku_ftnode_swap_pair_values(FTNODE a, FTNODE b) +// Effect: Swap the blocknum, fullhash, and PAIR for for a and b +// Requires: Both nodes are pinned +{ + BLOCKNUM tmp_blocknum = a->blocknum; + uint32_t tmp_fullhash = a->fullhash; + PAIR tmp_pair = a->ct_pair; + + a->blocknum = b->blocknum; + a->fullhash = b->fullhash; + a->ct_pair = b->ct_pair; + + b->blocknum = tmp_blocknum; + b->fullhash = tmp_fullhash; + b->ct_pair = tmp_pair; + + // A and B swapped pair pointers, but we still have to swap + // the actual pair values (ie: the FTNODEs they represent) + // in the cachetable. + toku_cachetable_swap_pair_values(a->ct_pair, b->ct_pair); +} diff --git a/storage/tokudb/ft-index/ft/ft-cachetable-wrappers.h b/storage/tokudb/ft-index/ft/ft-cachetable-wrappers.h index 9a56f4ff220..72056e4d58c 100644 --- a/storage/tokudb/ft-index/ft/ft-cachetable-wrappers.h +++ b/storage/tokudb/ft-index/ft/ft-cachetable-wrappers.h @@ -1,7 +1,5 @@ /* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */ // vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4: -#ifndef FT_CACHETABLE_WRAPPERS_H -#define FT_CACHETABLE_WRAPPERS_H #ident "$Id$" /* @@ -32,7 +30,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -89,11 +87,14 @@ PATENT RIGHTS GRANT: under this License. */ +#pragma once + #ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved." #ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it." -#include <fttypes.h> -#include "cachetable.h" +#include "ft/cachetable/cachetable.h" +#include "ft/ft-internal.h" +#include "ft/node.h" /** * Put an empty node (that is, no fields filled) into the cachetable. @@ -102,7 +103,7 @@ PATENT RIGHTS GRANT: */ void cachetable_put_empty_node_with_dep_nodes( - FT h, + FT ft, uint32_t num_dependent_nodes, FTNODE* dependent_nodes, BLOCKNUM* name, //output @@ -117,7 +118,7 @@ cachetable_put_empty_node_with_dep_nodes( */ void create_new_ftnode_with_dep_nodes( - FT h, + FT ft, FTNODE *result, int height, int n_children, @@ -146,8 +147,8 @@ toku_pin_ftnode_for_query( uint32_t fullhash, UNLOCKERS unlockers, ANCESTORS ancestors, - const PIVOT_BOUNDS pbounds, - FTNODE_FETCH_EXTRA bfe, + const pivot_bounds &bounds, + ftnode_fetch_extra *bfe, bool apply_ancestor_messages, // this bool is probably temporary, for #3972, once we know how range query estimates work, will revisit this FTNODE *node_p, bool* msgs_applied @@ -155,10 +156,10 @@ toku_pin_ftnode_for_query( // Pins an ftnode without dependent pairs void toku_pin_ftnode( - FT h, + FT ft, BLOCKNUM blocknum, uint32_t fullhash, - FTNODE_FETCH_EXTRA bfe, + ftnode_fetch_extra *bfe, pair_lock_type lock_type, FTNODE *node_p, bool move_messages @@ -167,10 +168,10 @@ void toku_pin_ftnode( // Pins an ftnode with dependent pairs // Unlike toku_pin_ftnode_for_query, this function blocks until the node is pinned. void toku_pin_ftnode_with_dep_nodes( - FT h, + FT ft, BLOCKNUM blocknum, uint32_t fullhash, - FTNODE_FETCH_EXTRA bfe, + ftnode_fetch_extra *bfe, pair_lock_type lock_type, uint32_t num_dependent_nodes, FTNODE *dependent_nodes, @@ -187,7 +188,8 @@ int toku_maybe_pin_ftnode_clean(FT ft, BLOCKNUM blocknum, uint32_t fullhash, pai /** * Effect: Unpin an ftnode. */ -void toku_unpin_ftnode(FT h, FTNODE node); +void toku_unpin_ftnode(FT ft, FTNODE node); void toku_unpin_ftnode_read_only(FT ft, FTNODE node); -#endif +// Effect: Swaps pair values of two pinned nodes +void toku_ftnode_swap_pair_values(FTNODE nodea, FTNODE nodeb); diff --git a/storage/tokudb/ft-index/ft/ft-flusher-internal.h b/storage/tokudb/ft-index/ft/ft-flusher-internal.h index 512f5ffd27d..f26b2d56ef5 100644 --- a/storage/tokudb/ft-index/ft/ft-flusher-internal.h +++ b/storage/tokudb/ft-index/ft/ft-flusher-internal.h @@ -1,7 +1,5 @@ /* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */ // vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4: -#ifndef FT_FLUSHER_INTERNAL_H -#define FT_FLUSHER_INTERNAL_H #ident "$Id$" /* COPYING CONDITIONS NOTICE: @@ -31,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -88,11 +86,11 @@ PATENT RIGHTS GRANT: under this License. */ +#pragma once + #ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved." #ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it." -#include <fttypes.h> - #define flt_flush_before_applying_inbox 1 #define flt_flush_before_child_pin 2 #define ft_flush_aflter_child_pin 3 @@ -115,7 +113,7 @@ typedef struct flusher_advice FLUSHER_ADVICE; * Cleaner thread merging leaf nodes: follow down to a key * Hot optimize table: follow down to the right of a key */ -typedef int (*FA_PICK_CHILD)(FT h, FTNODE parent, void* extra); +typedef int (*FA_PICK_CHILD)(FT ft, FTNODE parent, void* extra); /** * Decide whether to call `toku_ft_flush_some_child` on the child if it is @@ -139,7 +137,7 @@ typedef bool (*FA_SHOULD_RECURSIVELY_FLUSH)(FTNODE child, void* extra); * Hot optimize table: just do the merge */ typedef void (*FA_MAYBE_MERGE_CHILD)(struct flusher_advice *fa, - FT h, + FT ft, FTNODE parent, int childnum, FTNODE child, @@ -172,7 +170,7 @@ typedef void (*FA_UPDATE_STATUS)(FTNODE child, int dirtied, void* extra); * by `ft_split_child`. If -1 is returned, `ft_split_child` defaults to * the old behavior. */ -typedef int (*FA_PICK_CHILD_AFTER_SPLIT)(FT h, +typedef int (*FA_PICK_CHILD_AFTER_SPLIT)(FT ft, FTNODE node, int childnuma, int childnumb, @@ -223,18 +221,16 @@ dont_destroy_basement_nodes(void* extra); void default_merge_child(struct flusher_advice *fa, - FT h, + FT ft, FTNODE parent, int childnum, FTNODE child, void* extra); int -default_pick_child_after_split(FT h, +default_pick_child_after_split(FT ft, FTNODE parent, int childnuma, int childnumb, void *extra); - -#endif // End of header guardian. diff --git a/storage/tokudb/ft-index/ft/ft-flusher.cc b/storage/tokudb/ft-index/ft/ft-flusher.cc index 0fe556aec0f..4db92fa9d2b 100644 --- a/storage/tokudb/ft-index/ft/ft-flusher.cc +++ b/storage/tokudb/ft-index/ft/ft-flusher.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -89,22 +89,25 @@ PATENT RIGHTS GRANT: #ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved." #ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it." -#include <ft-internal.h> -#include <ft-flusher.h> -#include <ft-flusher-internal.h> -#include <ft-cachetable-wrappers.h> -#include <ft.h> -#include <toku_assert.h> -#include <portability/toku_atomic.h> -#include <util/status.h> -#include <util/context.h> +#include "ft/ft.h" +#include "ft/ft-cachetable-wrappers.h" +#include "ft/ft-internal.h" +#include "ft/ft-flusher.h" +#include "ft/ft-flusher-internal.h" +#include "ft/node.h" +#include "ft/serialize/block_table.h" +#include "ft/serialize/ft_node-serialize.h" +#include "portability/toku_assert.h" +#include "portability/toku_atomic.h" +#include "util/status.h" +#include "util/context.h" /* Status is intended for display to humans to help understand system behavior. * It does not need to be perfectly thread-safe. */ static FT_FLUSHER_STATUS_S ft_flusher_status; -#define STATUS_INIT(k,c,t,l,inc) TOKUDB_STATUS_INIT(ft_flusher_status, k, c, t, "ft flusher: " l, inc) +#define STATUS_INIT(k,c,t,l,inc) TOKUFT_STATUS_INIT(ft_flusher_status, k, c, t, "ft flusher: " l, inc) #define STATUS_VALUE(x) ft_flusher_status.status[x].value.num void toku_ft_flusher_status_init(void) { @@ -179,25 +182,21 @@ static int find_heaviest_child(FTNODE node) { int max_child = 0; - int max_weight = toku_bnc_nbytesinbuf(BNC(node, 0)) + BP_WORKDONE(node, 0); - int i; - - if (0) printf("%s:%d weights: %d", __FILE__, __LINE__, max_weight); - paranoid_invariant(node->n_children>0); - for (i=1; i<node->n_children; i++) { -#ifdef TOKU_DEBUG_PARANOID - if (BP_WORKDONE(node,i)) { - assert(toku_bnc_nbytesinbuf(BNC(node,i)) > 0); + uint64_t max_weight = toku_bnc_nbytesinbuf(BNC(node, 0)) + BP_WORKDONE(node, 0); + + invariant(node->n_children > 0); + for (int i = 1; i < node->n_children; i++) { + uint64_t bytes_in_buf = toku_bnc_nbytesinbuf(BNC(node, i)); + uint64_t workdone = BP_WORKDONE(node, i); + if (workdone > 0) { + invariant(bytes_in_buf > 0); } -#endif - int this_weight = toku_bnc_nbytesinbuf(BNC(node,i)) + BP_WORKDONE(node,i);; - if (0) printf(" %d", this_weight); + uint64_t this_weight = bytes_in_buf + workdone; if (max_weight < this_weight) { max_child = i; max_weight = this_weight; } } - if (0) printf("\n"); return max_child; } @@ -235,7 +234,7 @@ update_flush_status(FTNODE child, int cascades) { } static void -maybe_destroy_child_blbs(FTNODE node, FTNODE child, FT h) +maybe_destroy_child_blbs(FTNODE node, FTNODE child, FT ft) { // If the node is already fully in memory, as in upgrade, we don't // need to destroy the basement nodes because they are all equally @@ -247,7 +246,7 @@ maybe_destroy_child_blbs(FTNODE node, FTNODE child, FT h) if (BP_STATE(child, i) == PT_AVAIL && node->max_msn_applied_to_node_on_disk.msn < BLB_MAX_MSN_APPLIED(child, i).msn) { - toku_evict_bn_from_memory(child, i, h); + toku_evict_bn_from_memory(child, i, ft); } } } @@ -255,14 +254,14 @@ maybe_destroy_child_blbs(FTNODE node, FTNODE child, FT h) static void ft_merge_child( - FT h, + FT ft, FTNODE node, int childnum_to_merge, bool *did_react, struct flusher_advice *fa); static int -pick_heaviest_child(FT UU(h), +pick_heaviest_child(FT UU(ft), FTNODE parent, void* UU(extra)) { @@ -307,11 +306,11 @@ static bool recurse_if_child_is_gorged(FTNODE child, void* extra) { struct flush_status_update_extra *fste = (flush_status_update_extra *)extra; - return toku_ft_nonleaf_is_gorged(child, fste->nodesize); + return toku_ftnode_nonleaf_is_gorged(child, fste->nodesize); } int -default_pick_child_after_split(FT UU(h), +default_pick_child_after_split(FT UU(ft), FTNODE UU(parent), int UU(childnuma), int UU(childnumb), @@ -322,7 +321,7 @@ default_pick_child_after_split(FT UU(h), void default_merge_child(struct flusher_advice *fa, - FT h, + FT ft, FTNODE parent, int childnum, FTNODE child, @@ -334,13 +333,13 @@ default_merge_child(struct flusher_advice *fa, // we are just going to unpin child and // let ft_merge_child pin it again // - toku_unpin_ftnode(h, child); + toku_unpin_ftnode(ft, child); // // // it is responsibility of ft_merge_child to unlock parent // bool did_react; - ft_merge_child(h, parent, childnum, &did_react, fa); + ft_merge_child(ft, parent, childnum, &did_react, fa); } void @@ -397,7 +396,7 @@ struct ctm_extra { }; static int -ctm_pick_child(FT h, +ctm_pick_child(FT ft, FTNODE parent, void* extra) { @@ -405,13 +404,8 @@ ctm_pick_child(FT h, int childnum; if (parent->height == 1 && ctme->is_last_child) { childnum = parent->n_children - 1; - } - else { - childnum = toku_ftnode_which_child( - parent, - &ctme->target_key, - &h->cmp_descriptor, - h->compare_fun); + } else { + childnum = toku_ftnode_which_child(parent, &ctme->target_key, ft->cmp); } return childnum; } @@ -428,7 +422,7 @@ ctm_update_status( static void ctm_maybe_merge_child(struct flusher_advice *fa, - FT h, + FT ft, FTNODE parent, int childnum, FTNODE child, @@ -437,19 +431,19 @@ ctm_maybe_merge_child(struct flusher_advice *fa, if (child->height == 0) { (void) toku_sync_fetch_and_add(&STATUS_VALUE(FT_FLUSHER_CLEANER_NUM_LEAF_MERGES_COMPLETED), 1); } - default_merge_child(fa, h, parent, childnum, child, extra); + default_merge_child(fa, ft, parent, childnum, child, extra); } static void ct_maybe_merge_child(struct flusher_advice *fa, - FT h, + FT ft, FTNODE parent, int childnum, FTNODE child, void* extra) { if (child->height > 0) { - default_merge_child(fa, h, parent, childnum, child, extra); + default_merge_child(fa, ft, parent, childnum, child, extra); } else { struct ctm_extra ctme; @@ -471,8 +465,7 @@ ct_maybe_merge_child(struct flusher_advice *fa, ctme.is_last_child = false; pivot_to_save = childnum; } - const DBT *pivot = &parent->childkeys[pivot_to_save]; - toku_clone_dbt(&ctme.target_key, *pivot); + toku_clone_dbt(&ctme.target_key, parent->pivotkeys.get_pivot(pivot_to_save)); // at this point, ctme is properly setup, now we can do the merge struct flusher_advice new_fa; @@ -486,24 +479,24 @@ ct_maybe_merge_child(struct flusher_advice *fa, default_pick_child_after_split, &ctme); - toku_unpin_ftnode(h, parent); - toku_unpin_ftnode(h, child); + toku_unpin_ftnode(ft, parent); + toku_unpin_ftnode(ft, child); FTNODE root_node = NULL; { uint32_t fullhash; CACHEKEY root; - toku_calculate_root_offset_pointer(h, &root, &fullhash); - struct ftnode_fetch_extra bfe; - fill_bfe_for_full_read(&bfe, h); - toku_pin_ftnode(h, root, fullhash, &bfe, PL_WRITE_EXPENSIVE, &root_node, true); - toku_assert_entire_node_in_memory(root_node); + toku_calculate_root_offset_pointer(ft, &root, &fullhash); + ftnode_fetch_extra bfe; + bfe.create_for_full_read(ft); + toku_pin_ftnode(ft, root, fullhash, &bfe, PL_WRITE_EXPENSIVE, &root_node, true); + toku_ftnode_assert_fully_in_memory(root_node); } (void) toku_sync_fetch_and_add(&STATUS_VALUE(FT_FLUSHER_CLEANER_NUM_LEAF_MERGES_STARTED), 1); (void) toku_sync_fetch_and_add(&STATUS_VALUE(FT_FLUSHER_CLEANER_NUM_LEAF_MERGES_RUNNING), 1); - toku_ft_flush_some_child(h, root_node, &new_fa); + toku_ft_flush_some_child(ft, root_node, &new_fa); (void) toku_sync_fetch_and_sub(&STATUS_VALUE(FT_FLUSHER_CLEANER_NUM_LEAF_MERGES_RUNNING), 1); @@ -545,13 +538,12 @@ ct_flusher_advice_init(struct flusher_advice *fa, struct flush_status_update_ext // a leaf node that is not entirely in memory. If so, then // we cannot be sure if the node is reactive. // -static bool may_node_be_reactive(FT ft, FTNODE node) +static bool ft_ftnode_may_be_reactive(FT ft, FTNODE node) { if (node->height == 0) { return true; - } - else { - return (get_nonleaf_reactivity(node, ft->h->fanout) != RE_STABLE); + } else { + return toku_ftnode_get_nonleaf_reactivity(node, ft->h->fanout) != RE_STABLE; } } @@ -565,6 +557,7 @@ static bool may_node_be_reactive(FT ft, FTNODE node) */ static void handle_split_of_child( + FT ft, FTNODE node, int childnum, FTNODE childa, @@ -575,40 +568,49 @@ handle_split_of_child( paranoid_invariant(node->height>0); paranoid_invariant(0 <= childnum); paranoid_invariant(childnum < node->n_children); - toku_assert_entire_node_in_memory(node); - toku_assert_entire_node_in_memory(childa); - toku_assert_entire_node_in_memory(childb); + toku_ftnode_assert_fully_in_memory(node); + toku_ftnode_assert_fully_in_memory(childa); + toku_ftnode_assert_fully_in_memory(childb); NONLEAF_CHILDINFO old_bnc = BNC(node, childnum); paranoid_invariant(toku_bnc_nbytesinbuf(old_bnc)==0); - int cnum; WHEN_NOT_GCOV( - if (toku_ft_debug_mode) { - int i; - printf("%s:%d Child %d splitting on %s\n", __FILE__, __LINE__, childnum, (char*)splitk->data); - printf("%s:%d oldsplitkeys:", __FILE__, __LINE__); - for(i=0; i<node->n_children-1; i++) printf(" %s", (char *) node->childkeys[i].data); - printf("\n"); - } - ) + if (toku_ft_debug_mode) { + printf("%s:%d Child %d splitting on %s\n", __FILE__, __LINE__, childnum, (char*)splitk->data); + printf("%s:%d oldsplitkeys:", __FILE__, __LINE__); + for(int i = 0; i < node->n_children - 1; i++) printf(" %s", (char *) node->pivotkeys.get_pivot(i).data); + printf("\n"); + } + ) node->dirty = 1; XREALLOC_N(node->n_children+1, node->bp); - XREALLOC_N(node->n_children, node->childkeys); // Slide the children over. // suppose n_children is 10 and childnum is 5, meaning node->childnum[5] just got split // this moves node->bp[6] through node->bp[9] over to // node->bp[7] through node->bp[10] - for (cnum=node->n_children; cnum>childnum+1; cnum--) { + for (int cnum=node->n_children; cnum>childnum+1; cnum--) { node->bp[cnum] = node->bp[cnum-1]; } memset(&node->bp[childnum+1],0,sizeof(node->bp[0])); node->n_children++; - paranoid_invariant(BP_BLOCKNUM(node, childnum).b==childa->thisnodename.b); // use the same child + paranoid_invariant(BP_BLOCKNUM(node, childnum).b==childa->blocknum.b); // use the same child + + // We never set the rightmost blocknum to be the root. + // Instead, we wait for the root to split and let promotion initialize the rightmost + // blocknum to be the first non-root leaf node on the right extreme to recieve an insert. + invariant(ft->h->root_blocknum.b != ft->rightmost_blocknum.b); + if (childa->blocknum.b == ft->rightmost_blocknum.b) { + // The rightmost leaf (a) split into (a) and (b). We want (b) to swap pair values + // with (a), now that it is the new rightmost leaf. This keeps the rightmost blocknum + // constant, the same the way we keep the root blocknum constant. + toku_ftnode_swap_pair_values(childa, childb); + BP_BLOCKNUM(node, childnum) = childa->blocknum; + } - BP_BLOCKNUM(node, childnum+1) = childb->thisnodename; - BP_WORKDONE(node, childnum+1) = 0; + BP_BLOCKNUM(node, childnum+1) = childb->blocknum; + BP_WORKDONE(node, childnum+1) = 0; BP_STATE(node,childnum+1) = PT_AVAIL; NONLEAF_CHILDINFO new_bnc = toku_create_empty_nl(); @@ -620,29 +622,21 @@ handle_split_of_child( } set_BNC(node, childnum+1, new_bnc); - // Slide the keys over - { - for (cnum=node->n_children-2; cnum>childnum; cnum--) { - toku_copy_dbt(&node->childkeys[cnum], node->childkeys[cnum-1]); - } - //if (logger) assert((t->flags&TOKU_DB_DUPSORT)==0); // the setpivot is wrong for TOKU_DB_DUPSORT, so recovery will be broken. - toku_copy_dbt(&node->childkeys[childnum], *splitk); - node->totalchildkeylens += splitk->size; - } + // Insert the new split key , sliding the other keys over + node->pivotkeys.insert_at(splitk, childnum); WHEN_NOT_GCOV( - if (toku_ft_debug_mode) { - int i; - printf("%s:%d splitkeys:", __FILE__, __LINE__); - for(i=0; i<node->n_children-2; i++) printf(" %s", (char*)node->childkeys[i].data); - printf("\n"); - } - ) + if (toku_ft_debug_mode) { + printf("%s:%d splitkeys:", __FILE__, __LINE__); + for (int i = 0; i < node->n_children - 2; i++) printf(" %s", (char *) node->pivotkeys.get_pivot(i).data); + printf("\n"); + } + ) /* Keep pushing to the children, but not if the children would require a pushdown */ - toku_assert_entire_node_in_memory(node); - toku_assert_entire_node_in_memory(childa); - toku_assert_entire_node_in_memory(childb); + toku_ftnode_assert_fully_in_memory(node); + toku_ftnode_assert_fully_in_memory(childa); + toku_ftnode_assert_fully_in_memory(childb); VERIFY_NODE(t, node); VERIFY_NODE(t, childa); @@ -667,7 +661,7 @@ ftleaf_disk_size(FTNODE node) // Effect: get the disk size of a leafentry { paranoid_invariant(node->height == 0); - toku_assert_entire_node_in_memory(node); + toku_ftnode_assert_fully_in_memory(node); uint64_t retval = 0; for (int i = 0; i < node->n_children; i++) { retval += BLB_DATA(node, i)->get_disk_size(); @@ -758,8 +752,8 @@ move_leafentries( static void ftnode_finalize_split(FTNODE node, FTNODE B, MSN max_msn_applied_to_node) { // Effect: Finalizes a split by updating some bits and dirtying both nodes - toku_assert_entire_node_in_memory(node); - toku_assert_entire_node_in_memory(B); + toku_ftnode_assert_fully_in_memory(node); + toku_ftnode_assert_fully_in_memory(B); verify_all_in_mempool(node); verify_all_in_mempool(B); @@ -775,7 +769,7 @@ static void ftnode_finalize_split(FTNODE node, FTNODE B, MSN max_msn_applied_to_ void ftleaf_split( - FT h, + FT ft, FTNODE node, FTNODE *nodea, FTNODE *nodeb, @@ -824,7 +818,7 @@ ftleaf_split( // So, we must call this before evaluating // those two values cachetable_put_empty_node_with_dep_nodes( - h, + ft, num_dependent_nodes, dependent_nodes, &name, @@ -838,7 +832,7 @@ ftleaf_split( paranoid_invariant(node->height==0); - toku_assert_entire_node_in_memory(node); + toku_ftnode_assert_fully_in_memory(node); verify_all_in_mempool(node); MSN max_msn_applied_to_node = node->max_msn_applied_to_node_on_disk; @@ -880,13 +874,12 @@ ftleaf_split( name, 0, num_children_in_b, - h->h->layout_version, - h->h->flags); + ft->h->layout_version, + ft->h->flags); B->fullhash = fullhash; } else { B = *nodeb; - REALLOC_N(num_children_in_b-1, B->childkeys); REALLOC_N(num_children_in_b, B->bp); B->n_children = num_children_in_b; for (int i = 0; i < num_children_in_b; i++) { @@ -938,20 +931,10 @@ ftleaf_split( // the child index in the original node that corresponds to the // first node in the right node of the split - int base_index = num_left_bns - (split_on_boundary ? 0 : 1); - // make pivots in B - for (int i=0; i < num_children_in_b-1; i++) { - toku_copy_dbt(&B->childkeys[i], node->childkeys[i+base_index]); - B->totalchildkeylens += node->childkeys[i+base_index].size; - node->totalchildkeylens -= node->childkeys[i+base_index].size; - toku_init_dbt(&node->childkeys[i+base_index]); - } - if (split_on_boundary && num_left_bns < node->n_children) { - if (splitk) { - toku_copy_dbt(splitk, node->childkeys[num_left_bns - 1]); - } else { - toku_destroy_dbt(&node->childkeys[num_left_bns - 1]); - } + int split_idx = num_left_bns - (split_on_boundary ? 0 : 1); + node->pivotkeys.split_at(split_idx, &B->pivotkeys); + if (split_on_boundary && num_left_bns < node->n_children && splitk) { + toku_copyref_dbt(splitk, node->pivotkeys.get_pivot(num_left_bns - 1)); } else if (splitk) { bn_data* bd = BLB_DATA(node, num_left_bns - 1); uint32_t keylen; @@ -963,7 +946,6 @@ ftleaf_split( node->n_children = num_children_in_node; REALLOC_N(num_children_in_node, node->bp); - REALLOC_N(num_children_in_node-1, node->childkeys); } ftnode_finalize_split(node, B, max_msn_applied_to_node); @@ -973,7 +955,7 @@ ftleaf_split( void ft_nonleaf_split( - FT h, + FT ft, FTNODE node, FTNODE *nodea, FTNODE *nodeb, @@ -983,7 +965,7 @@ ft_nonleaf_split( { //VERIFY_NODE(t,node); STATUS_VALUE(FT_FLUSHER_SPLIT_NONLEAF)++; - toku_assert_entire_node_in_memory(node); + toku_ftnode_assert_fully_in_memory(node); int old_n_children = node->n_children; int n_children_in_a = old_n_children/2; int n_children_in_b = old_n_children-n_children_in_a; @@ -991,14 +973,12 @@ ft_nonleaf_split( FTNODE B; paranoid_invariant(node->height>0); paranoid_invariant(node->n_children>=2); // Otherwise, how do we split? We need at least two children to split. */ - create_new_ftnode_with_dep_nodes(h, &B, node->height, n_children_in_b, num_dependent_nodes, dependent_nodes); + create_new_ftnode_with_dep_nodes(ft, &B, node->height, n_children_in_b, num_dependent_nodes, dependent_nodes); { /* The first n_children_in_a go into node a. * That means that the first n_children_in_a-1 keys go into node a. * The splitter key is key number n_children_in_a */ - int i; - - for (i=n_children_in_a; i<old_n_children; i++) { + for (int i = n_children_in_a; i<old_n_children; i++) { int targchild = i-n_children_in_a; // TODO: Figure out better way to handle this // the problem is that create_new_ftnode_with_dep_nodes for B creates @@ -1010,26 +990,15 @@ ft_nonleaf_split( // now move the bp over B->bp[targchild] = node->bp[i]; memset(&node->bp[i], 0, sizeof(node->bp[0])); - - // Delete a child, removing the preceeding pivot key. The child number must be > 0 - { - paranoid_invariant(i>0); - if (i>n_children_in_a) { - toku_copy_dbt(&B->childkeys[targchild-1], node->childkeys[i-1]); - B->totalchildkeylens += node->childkeys[i-1].size; - node->totalchildkeylens -= node->childkeys[i-1].size; - toku_init_dbt(&node->childkeys[i-1]); - } - } } - node->n_children=n_children_in_a; + // the split key for our parent is the rightmost pivot key in node + node->pivotkeys.split_at(n_children_in_a, &B->pivotkeys); + toku_clone_dbt(splitk, node->pivotkeys.get_pivot(n_children_in_a - 1)); + node->pivotkeys.delete_at(n_children_in_a - 1); - toku_copy_dbt(splitk, node->childkeys[n_children_in_a-1]); - node->totalchildkeylens -= node->childkeys[n_children_in_a-1].size; - - REALLOC_N(n_children_in_a, node->bp); - REALLOC_N(n_children_in_a-1, node->childkeys); + node->n_children = n_children_in_a; + REALLOC_N(node->n_children, node->bp); } ftnode_finalize_split(node, B, max_msn_applied_to_node); @@ -1047,7 +1016,7 @@ ft_nonleaf_split( // static void ft_split_child( - FT h, + FT ft, FTNODE node, int childnum, FTNODE child, @@ -1066,12 +1035,12 @@ ft_split_child( dep_nodes[0] = node; dep_nodes[1] = child; if (child->height==0) { - ftleaf_split(h, child, &nodea, &nodeb, &splitk, true, split_mode, 2, dep_nodes); + ftleaf_split(ft, child, &nodea, &nodeb, &splitk, true, split_mode, 2, dep_nodes); } else { - ft_nonleaf_split(h, child, &nodea, &nodeb, &splitk, 2, dep_nodes); + ft_nonleaf_split(ft, child, &nodea, &nodeb, &splitk, 2, dep_nodes); } // printf("%s:%d child did split\n", __FILE__, __LINE__); - handle_split_of_child (node, childnum, nodea, nodeb, &splitk); + handle_split_of_child (ft, node, childnum, nodea, nodeb, &splitk); // for test call_flusher_thread_callback(flt_flush_during_split); @@ -1080,42 +1049,44 @@ ft_split_child( // now we need to unlock node, // and possibly continue // flushing one of the children - int picked_child = fa->pick_child_after_split(h, node, childnum, childnum + 1, fa->extra); - toku_unpin_ftnode(h, node); + int picked_child = fa->pick_child_after_split(ft, node, childnum, childnum + 1, fa->extra); + toku_unpin_ftnode(ft, node); if (picked_child == childnum || (picked_child < 0 && nodea->height > 0 && fa->should_recursively_flush(nodea, fa->extra))) { - toku_unpin_ftnode(h, nodeb); - toku_ft_flush_some_child(h, nodea, fa); + toku_unpin_ftnode(ft, nodeb); + toku_ft_flush_some_child(ft, nodea, fa); } else if (picked_child == childnum + 1 || (picked_child < 0 && nodeb->height > 0 && fa->should_recursively_flush(nodeb, fa->extra))) { - toku_unpin_ftnode(h, nodea); - toku_ft_flush_some_child(h, nodeb, fa); + toku_unpin_ftnode(ft, nodea); + toku_ft_flush_some_child(ft, nodeb, fa); } else { - toku_unpin_ftnode(h, nodea); - toku_unpin_ftnode(h, nodeb); + toku_unpin_ftnode(ft, nodea); + toku_unpin_ftnode(ft, nodeb); } + + toku_destroy_dbt(&splitk); } static void bring_node_fully_into_memory(FTNODE node, FT ft) { - if (!is_entire_node_in_memory(node)) { - struct ftnode_fetch_extra bfe; - fill_bfe_for_full_read(&bfe, ft); + if (!toku_ftnode_fully_in_memory(node)) { + ftnode_fetch_extra bfe; + bfe.create_for_full_read(ft); toku_cachetable_pf_pinned_pair( node, toku_ftnode_pf_callback, &bfe, ft->cf, - node->thisnodename, - toku_cachetable_hash(ft->cf, node->thisnodename) + node->blocknum, + toku_cachetable_hash(ft->cf, node->blocknum) ); } } static void flush_this_child( - FT h, + FT ft, FTNODE node, FTNODE child, int childnum, @@ -1123,14 +1094,14 @@ flush_this_child( // Effect: Push everything in the CHILDNUMth buffer of node down into the child. { update_flush_status(child, 0); - toku_assert_entire_node_in_memory(node); + toku_ftnode_assert_fully_in_memory(node); if (fa->should_destroy_basement_nodes(fa)) { - maybe_destroy_child_blbs(node, child, h); + maybe_destroy_child_blbs(node, child, ft); } - bring_node_fully_into_memory(child, h); - toku_assert_entire_node_in_memory(child); + bring_node_fully_into_memory(child, ft); + toku_ftnode_assert_fully_in_memory(child); paranoid_invariant(node->height>0); - paranoid_invariant(child->thisnodename.b!=0); + paranoid_invariant(child->blocknum.b!=0); // VERIFY_NODE does not work off client thread as of now //VERIFY_NODE(t, child); node->dirty = 1; @@ -1142,7 +1113,7 @@ flush_this_child( // now we have a bnc to flush to the child. pass down the parent's // oldest known referenced xid as we flush down to the child. - toku_bnc_flush_to_child(h, bnc, child, node->oldest_referenced_xid_known); + toku_bnc_flush_to_child(ft, bnc, child, node->oldest_referenced_xid_known); destroy_nonleaf_childinfo(bnc); } @@ -1150,8 +1121,8 @@ static void merge_leaf_nodes(FTNODE a, FTNODE b) { STATUS_VALUE(FT_FLUSHER_MERGE_LEAF)++; - toku_assert_entire_node_in_memory(a); - toku_assert_entire_node_in_memory(b); + toku_ftnode_assert_fully_in_memory(a); + toku_ftnode_assert_fully_in_memory(b); paranoid_invariant(a->height == 0); paranoid_invariant(b->height == 0); paranoid_invariant(a->n_children > 0); @@ -1173,52 +1144,47 @@ merge_leaf_nodes(FTNODE a, FTNODE b) // of a gets eliminated because we do not have a pivot to store for it (because it has no elements) const bool a_has_tail = a_last_bd->num_klpairs() > 0; - // move each basement node from b to a - // move the pivots, adding one of what used to be max(a) - // move the estimates int num_children = a->n_children + b->n_children; if (!a_has_tail) { - uint lastchild = a->n_children-1; + int lastchild = a->n_children - 1; BASEMENTNODE bn = BLB(a, lastchild); - { - // verify that last basement in a is empty, then destroy mempool - size_t used_space = a_last_bd->get_disk_size(); - invariant_zero(used_space); - } + + // verify that last basement in a is empty, then destroy mempool + size_t used_space = a_last_bd->get_disk_size(); + invariant_zero(used_space); destroy_basement_node(bn); - set_BNULL(a, a->n_children-1); + set_BNULL(a, lastchild); num_children--; - } - - //realloc pivots and basement nodes in a - REALLOC_N(num_children, a->bp); - REALLOC_N(num_children-1, a->childkeys); - - // fill in pivot for what used to be max of node 'a', if it is needed - if (a_has_tail) { + if (lastchild < a->pivotkeys.num_pivots()) { + a->pivotkeys.delete_at(lastchild); + } + } else { + // fill in pivot for what used to be max of node 'a', if it is needed uint32_t keylen; void *key; - int rr = a_last_bd->fetch_key_and_len(a_last_bd->num_klpairs() - 1, &keylen, &key); - invariant_zero(rr); - toku_memdup_dbt(&a->childkeys[a->n_children-1], key, keylen); - a->totalchildkeylens += keylen; + int r = a_last_bd->fetch_key_and_len(a_last_bd->num_klpairs() - 1, &keylen, &key); + invariant_zero(r); + DBT pivotkey; + toku_fill_dbt(&pivotkey, key, keylen); + a->pivotkeys.replace_at(&pivotkey, a->n_children - 1); } + // realloc basement nodes in `a' + REALLOC_N(num_children, a->bp); + + // move each basement node from b to a uint32_t offset = a_has_tail ? a->n_children : a->n_children - 1; for (int i = 0; i < b->n_children; i++) { - a->bp[i+offset] = b->bp[i]; - memset(&b->bp[i],0,sizeof(b->bp[0])); - if (i < (b->n_children-1)) { - toku_copy_dbt(&a->childkeys[i+offset], b->childkeys[i]); - toku_init_dbt(&b->childkeys[i]); - } + a->bp[i + offset] = b->bp[i]; + memset(&b->bp[i], 0, sizeof(b->bp[0])); } - a->totalchildkeylens += b->totalchildkeylens; - a->n_children = num_children; + + // append b's pivots to a's pivots + a->pivotkeys.append(b->pivotkeys); // now that all the data has been moved from b to a, we can destroy the data in b - // b can remain untouched, as it will be destroyed later - b->totalchildkeylens = 0; + a->n_children = num_children; + b->pivotkeys.destroy(); b->n_children = 0; } @@ -1242,7 +1208,7 @@ static void maybe_merge_pinned_leaf_nodes( FTNODE a, FTNODE b, - DBT *parent_splitk, + const DBT *parent_splitk, bool *did_merge, bool *did_rebalance, DBT *splitk, @@ -1255,7 +1221,7 @@ maybe_merge_pinned_leaf_nodes( { unsigned int sizea = toku_serialize_ftnode_size(a); unsigned int sizeb = toku_serialize_ftnode_size(b); - uint32_t num_leafentries = get_leaf_num_entries(a) + get_leaf_num_entries(b); + uint32_t num_leafentries = toku_ftnode_leaf_num_entries(a) + toku_ftnode_leaf_num_entries(b); if (num_leafentries > 1 && (sizea + sizeb)*4 > (nodesize*3)) { // the combined size is more than 3/4 of a node, so don't merge them. *did_merge = false; @@ -1266,7 +1232,6 @@ maybe_merge_pinned_leaf_nodes( return; } // one is less than 1/4 of a node, and together they are more than 3/4 of a node. - toku_destroy_dbt(parent_splitk); // We don't need the parent_splitk any more. If we need a splitk (if we don't merge) we'll malloc a new one. *did_rebalance = true; balance_leaf_nodes(a, b, splitk); } else { @@ -1274,7 +1239,6 @@ maybe_merge_pinned_leaf_nodes( *did_merge = true; *did_rebalance = false; toku_init_dbt(splitk); - toku_destroy_dbt(parent_splitk); // if we are merging, the splitk gets freed. merge_leaf_nodes(a, b); } } @@ -1288,28 +1252,20 @@ maybe_merge_pinned_nonleaf_nodes( bool *did_rebalance, DBT *splitk) { - toku_assert_entire_node_in_memory(a); - toku_assert_entire_node_in_memory(b); - paranoid_invariant(parent_splitk->data); + toku_ftnode_assert_fully_in_memory(a); + toku_ftnode_assert_fully_in_memory(b); + invariant_notnull(parent_splitk->data); + int old_n_children = a->n_children; int new_n_children = old_n_children + b->n_children; + XREALLOC_N(new_n_children, a->bp); - memcpy(a->bp + old_n_children, - b->bp, - b->n_children*sizeof(b->bp[0])); - memset(b->bp,0,b->n_children*sizeof(b->bp[0])); - - XREALLOC_N(new_n_children-1, a->childkeys); - toku_copy_dbt(&a->childkeys[old_n_children-1], *parent_splitk); - a->totalchildkeylens += parent_splitk->size; - for (int i = 0; i < b->n_children - 1; ++i) { - toku_copy_dbt(&a->childkeys[old_n_children + i], b->childkeys[i]); - a->totalchildkeylens += b->childkeys[i].size; - toku_init_dbt(&b->childkeys[i]); - } - a->n_children = new_n_children; + memcpy(a->bp + old_n_children, b->bp, b->n_children * sizeof(b->bp[0])); + memset(b->bp, 0, b->n_children * sizeof(b->bp[0])); - b->totalchildkeylens = 0; + a->pivotkeys.insert_at(parent_splitk, old_n_children - 1); + a->pivotkeys.append(b->pivotkeys); + a->n_children = new_n_children; b->n_children = 0; a->dirty = 1; @@ -1325,7 +1281,7 @@ maybe_merge_pinned_nonleaf_nodes( static void maybe_merge_pinned_nodes( FTNODE parent, - DBT *parent_splitk, + const DBT *parent_splitk, FTNODE a, FTNODE b, bool *did_merge, @@ -1353,9 +1309,9 @@ maybe_merge_pinned_nodes( { MSN msn_max; paranoid_invariant(a->height == b->height); - toku_assert_entire_node_in_memory(parent); - toku_assert_entire_node_in_memory(a); - toku_assert_entire_node_in_memory(b); + toku_ftnode_assert_fully_in_memory(parent); + toku_ftnode_assert_fully_in_memory(a); + toku_ftnode_assert_fully_in_memory(b); parent->dirty = 1; // just to make sure { MSN msna = a->max_msn_applied_to_node_on_disk; @@ -1376,13 +1332,9 @@ maybe_merge_pinned_nodes( } } -static void merge_remove_key_callback( - BLOCKNUM *bp, - bool for_checkpoint, - void *extra) -{ - FT h = (FT) extra; - toku_free_blocknum(h->blocktable, bp, h, for_checkpoint); +static void merge_remove_key_callback(BLOCKNUM *bp, bool for_checkpoint, void *extra) { + FT ft = (FT) extra; + ft->blocktable.free_blocknum(bp, ft, for_checkpoint); } // @@ -1391,7 +1343,7 @@ static void merge_remove_key_callback( // static void ft_merge_child( - FT h, + FT ft, FTNODE node, int childnum_to_merge, bool *did_react, @@ -1400,7 +1352,7 @@ ft_merge_child( // this function should not be called // if the child is not mergable paranoid_invariant(node->n_children > 1); - toku_assert_entire_node_in_memory(node); + toku_ftnode_assert_fully_in_memory(node); int childnuma,childnumb; if (childnum_to_merge > 0) { @@ -1422,10 +1374,10 @@ ft_merge_child( FTNODE childa, childb; { - uint32_t childfullhash = compute_child_fullhash(h->cf, node, childnuma); - struct ftnode_fetch_extra bfe; - fill_bfe_for_full_read(&bfe, h); - toku_pin_ftnode_with_dep_nodes(h, BP_BLOCKNUM(node, childnuma), childfullhash, &bfe, PL_WRITE_EXPENSIVE, 1, &node, &childa, true); + uint32_t childfullhash = compute_child_fullhash(ft->cf, node, childnuma); + ftnode_fetch_extra bfe; + bfe.create_for_full_read(ft); + toku_pin_ftnode_with_dep_nodes(ft, BP_BLOCKNUM(node, childnuma), childfullhash, &bfe, PL_WRITE_EXPENSIVE, 1, &node, &childa, true); } // for test call_flusher_thread_callback(flt_flush_before_pin_second_node_for_merge); @@ -1433,17 +1385,17 @@ ft_merge_child( FTNODE dep_nodes[2]; dep_nodes[0] = node; dep_nodes[1] = childa; - uint32_t childfullhash = compute_child_fullhash(h->cf, node, childnumb); - struct ftnode_fetch_extra bfe; - fill_bfe_for_full_read(&bfe, h); - toku_pin_ftnode_with_dep_nodes(h, BP_BLOCKNUM(node, childnumb), childfullhash, &bfe, PL_WRITE_EXPENSIVE, 2, dep_nodes, &childb, true); + uint32_t childfullhash = compute_child_fullhash(ft->cf, node, childnumb); + ftnode_fetch_extra bfe; + bfe.create_for_full_read(ft); + toku_pin_ftnode_with_dep_nodes(ft, BP_BLOCKNUM(node, childnumb), childfullhash, &bfe, PL_WRITE_EXPENSIVE, 2, dep_nodes, &childb, true); } if (toku_bnc_n_entries(BNC(node,childnuma))>0) { - flush_this_child(h, node, childa, childnuma, fa); + flush_this_child(ft, node, childa, childnuma, fa); } if (toku_bnc_n_entries(BNC(node,childnumb))>0) { - flush_this_child(h, node, childb, childnumb, fa); + flush_this_child(ft, node, childb, childnumb, fa); } // now we have both children pinned in main memory, and cachetable locked, @@ -1453,26 +1405,14 @@ ft_merge_child( { DBT splitk; toku_init_dbt(&splitk); - DBT *old_split_key = &node->childkeys[childnuma]; - unsigned int deleted_size = old_split_key->size; - maybe_merge_pinned_nodes(node, &node->childkeys[childnuma], childa, childb, &did_merge, &did_rebalance, &splitk, h->h->nodesize); - if (childa->height>0) { - for (int i=0; i+1<childa->n_children; i++) { - paranoid_invariant(childa->childkeys[i].data); - } - } + const DBT old_split_key = node->pivotkeys.get_pivot(childnuma); + maybe_merge_pinned_nodes(node, &old_split_key, childa, childb, &did_merge, &did_rebalance, &splitk, ft->h->nodesize); //toku_verify_estimates(t,childa); // the tree did react if a merge (did_merge) or rebalance (new spkit key) occurred *did_react = (bool)(did_merge || did_rebalance); - if (did_merge) { - paranoid_invariant(!splitk.data); - } else { - paranoid_invariant(splitk.data); - } - - node->totalchildkeylens -= deleted_size; // The key was free()'d inside the maybe_merge_pinned_nodes. if (did_merge) { + invariant_null(splitk.data); NONLEAF_CHILDINFO remaining_bnc = BNC(node, childnuma); NONLEAF_CHILDINFO merged_bnc = BNC(node, childnumb); for (unsigned int i = 0; i < (sizeof remaining_bnc->flow) / (sizeof remaining_bnc->flow[0]); ++i) { @@ -1485,11 +1425,16 @@ ft_merge_child( &node->bp[childnumb+1], (node->n_children-childnumb)*sizeof(node->bp[0])); REALLOC_N(node->n_children, node->bp); - memmove(&node->childkeys[childnuma], - &node->childkeys[childnuma+1], - (node->n_children-childnumb)*sizeof(node->childkeys[0])); - REALLOC_N(node->n_children-1, node->childkeys); - paranoid_invariant(BP_BLOCKNUM(node, childnuma).b == childa->thisnodename.b); + node->pivotkeys.delete_at(childnuma); + + // Handle a merge of the rightmost leaf node. + if (did_merge && childb->blocknum.b == ft->rightmost_blocknum.b) { + invariant(childb->blocknum.b != ft->h->root_blocknum.b); + toku_ftnode_swap_pair_values(childa, childb); + BP_BLOCKNUM(node, childnuma) = childa->blocknum; + } + + paranoid_invariant(BP_BLOCKNUM(node, childnuma).b == childa->blocknum.b); childa->dirty = 1; // just to make sure childb->dirty = 1; // just to make sure } else { @@ -1498,10 +1443,11 @@ ft_merge_child( // pretty far down the tree) // If we didn't merge the nodes, then we need the correct pivot. - toku_copy_dbt(&node->childkeys[childnuma], splitk); - node->totalchildkeylens += node->childkeys[childnuma].size; + invariant_notnull(splitk.data); + node->pivotkeys.replace_at(&splitk, childnuma); node->dirty = 1; } + toku_destroy_dbt(&splitk); } // // now we possibly flush the children @@ -1512,10 +1458,10 @@ ft_merge_child( // merge_remove_key_callback will free the blocknum int rrb = toku_cachetable_unpin_and_remove( - h->cf, + ft->cf, childb->ct_pair, merge_remove_key_callback, - h + ft ); assert_zero(rrb); @@ -1524,7 +1470,7 @@ ft_merge_child( // unlock the parent paranoid_invariant(node->dirty); - toku_unpin_ftnode(h, node); + toku_unpin_ftnode(ft, node); } else { // for test @@ -1532,14 +1478,14 @@ ft_merge_child( // unlock the parent paranoid_invariant(node->dirty); - toku_unpin_ftnode(h, node); - toku_unpin_ftnode(h, childb); + toku_unpin_ftnode(ft, node); + toku_unpin_ftnode(ft, childb); } if (childa->height > 0 && fa->should_recursively_flush(childa, fa->extra)) { - toku_ft_flush_some_child(h, childa, fa); + toku_ft_flush_some_child(ft, childa, fa); } else { - toku_unpin_ftnode(h, childa); + toku_unpin_ftnode(ft, childa); } } @@ -1556,7 +1502,7 @@ void toku_ft_flush_some_child(FT ft, FTNODE parent, struct flusher_advice *fa) int dirtied = 0; NONLEAF_CHILDINFO bnc = NULL; paranoid_invariant(parent->height>0); - toku_assert_entire_node_in_memory(parent); + toku_ftnode_assert_fully_in_memory(parent); TXNID parent_oldest_referenced_xid_known = parent->oldest_referenced_xid_known; // pick the child we want to flush to @@ -1567,13 +1513,13 @@ void toku_ft_flush_some_child(FT ft, FTNODE parent, struct flusher_advice *fa) // get the child into memory BLOCKNUM targetchild = BP_BLOCKNUM(parent, childnum); - toku_verify_blocknum_allocated(ft->blocktable, targetchild); + ft->blocktable.verify_blocknum_allocated(targetchild); uint32_t childfullhash = compute_child_fullhash(ft->cf, parent, childnum); FTNODE child; - struct ftnode_fetch_extra bfe; + ftnode_fetch_extra bfe; // Note that we don't read the entire node into memory yet. // The idea is let's try to do the minimum work before releasing the parent lock - fill_bfe_for_min_read(&bfe, ft); + bfe.create_for_min_read(ft); toku_pin_ftnode_with_dep_nodes(ft, targetchild, childfullhash, &bfe, PL_WRITE_EXPENSIVE, 1, &parent, &child, true); // for test @@ -1587,9 +1533,9 @@ void toku_ft_flush_some_child(FT ft, FTNODE parent, struct flusher_advice *fa) // Let's do a quick check to see if the child may be reactive // If the child cannot be reactive, then we can safely unlock // the parent before finishing reading in the entire child node. - bool may_child_be_reactive = may_node_be_reactive(ft, child); + bool may_child_be_reactive = ft_ftnode_may_be_reactive(ft, child); - paranoid_invariant(child->thisnodename.b!=0); + paranoid_invariant(child->blocknum.b!=0); // only do the following work if there is a flush to perform if (toku_bnc_n_entries(BNC(parent, childnum)) > 0 || parent->height == 1) { @@ -1628,7 +1574,7 @@ void toku_ft_flush_some_child(FT ft, FTNODE parent, struct flusher_advice *fa) // we wont be splitting/merging child // and we have already replaced the bnc // for the root with a fresh one - enum reactivity child_re = get_node_reactivity(ft, child); + enum reactivity child_re = toku_ftnode_get_reactivity(ft, child); if (parent && child_re == RE_STABLE) { toku_unpin_ftnode(ft, parent); parent = NULL; @@ -1658,7 +1604,7 @@ void toku_ft_flush_some_child(FT ft, FTNODE parent, struct flusher_advice *fa) // let's get the reactivity of the child again, // it is possible that the flush got rid of some values // and now the parent is no longer reactive - child_re = get_node_reactivity(ft, child); + child_re = toku_ftnode_get_reactivity(ft, child); // if the parent has been unpinned above, then // this is our only option, even if the child is not stable // if the child is not stable, we'll handle it the next @@ -1703,6 +1649,78 @@ void toku_ft_flush_some_child(FT ft, FTNODE parent, struct flusher_advice *fa) } } +void toku_bnc_flush_to_child(FT ft, NONLEAF_CHILDINFO bnc, FTNODE child, TXNID parent_oldest_referenced_xid_known) { + paranoid_invariant(bnc); + + TOKULOGGER logger = toku_cachefile_logger(ft->cf); + TXN_MANAGER txn_manager = logger != nullptr ? toku_logger_get_txn_manager(logger) : nullptr; + TXNID oldest_referenced_xid_for_simple_gc = TXNID_NONE; + + txn_manager_state txn_state_for_gc(txn_manager); + bool do_garbage_collection = child->height == 0 && txn_manager != nullptr; + if (do_garbage_collection) { + txn_state_for_gc.init(); + oldest_referenced_xid_for_simple_gc = toku_txn_manager_get_oldest_referenced_xid_estimate(txn_manager); + } + txn_gc_info gc_info(&txn_state_for_gc, + oldest_referenced_xid_for_simple_gc, + child->oldest_referenced_xid_known, + true); + struct flush_msg_fn { + FT ft; + FTNODE child; + NONLEAF_CHILDINFO bnc; + txn_gc_info *gc_info; + + STAT64INFO_S stats_delta; + size_t remaining_memsize = bnc->msg_buffer.buffer_size_in_use(); + + flush_msg_fn(FT t, FTNODE n, NONLEAF_CHILDINFO nl, txn_gc_info *g) : + ft(t), child(n), bnc(nl), gc_info(g), remaining_memsize(bnc->msg_buffer.buffer_size_in_use()) { + stats_delta = { 0, 0 }; + } + int operator()(const ft_msg &msg, bool is_fresh) { + size_t flow_deltas[] = { 0, 0 }; + size_t memsize_in_buffer = message_buffer::msg_memsize_in_buffer(msg); + if (remaining_memsize <= bnc->flow[0]) { + // this message is in the current checkpoint's worth of + // the end of the message buffer + flow_deltas[0] = memsize_in_buffer; + } else if (remaining_memsize <= bnc->flow[0] + bnc->flow[1]) { + // this message is in the last checkpoint's worth of the + // end of the message buffer + flow_deltas[1] = memsize_in_buffer; + } + toku_ftnode_put_msg( + ft->cmp, + ft->update_fun, + child, + -1, + msg, + is_fresh, + gc_info, + flow_deltas, + &stats_delta + ); + remaining_memsize -= memsize_in_buffer; + return 0; + } + } flush_fn(ft, child, bnc, &gc_info); + bnc->msg_buffer.iterate(flush_fn); + + child->oldest_referenced_xid_known = parent_oldest_referenced_xid_known; + + invariant(flush_fn.remaining_memsize == 0); + if (flush_fn.stats_delta.numbytes || flush_fn.stats_delta.numrows) { + toku_ft_update_stats(&ft->in_memory_stats, flush_fn.stats_delta); + } + if (do_garbage_collection) { + size_t buffsize = bnc->msg_buffer.buffer_size_in_use(); + // may be misleading if there's a broadcast message in there + toku_ft_status_note_msg_bytes_out(buffsize); + } +} + static void update_cleaner_status( FTNODE node, @@ -1820,11 +1838,11 @@ toku_ftnode_cleaner_callback( void *extraargs) { FTNODE node = (FTNODE) ftnode_pv; - invariant(node->thisnodename.b == blocknum.b); + invariant(node->blocknum.b == blocknum.b); invariant(node->fullhash == fullhash); invariant(node->height > 0); // we should never pick a leaf node (for now at least) - FT h = (FT) extraargs; - bring_node_fully_into_memory(node, h); + FT ft = (FT) extraargs; + bring_node_fully_into_memory(node, ft); int childnum = find_heaviest_child(node); update_cleaner_status(node, childnum); @@ -1832,16 +1850,16 @@ toku_ftnode_cleaner_callback( if (toku_bnc_nbytesinbuf(BNC(node, childnum)) > 0) { struct flusher_advice fa; struct flush_status_update_extra fste; - ct_flusher_advice_init(&fa, &fste, h->h->nodesize); - toku_ft_flush_some_child(h, node, &fa); + ct_flusher_advice_init(&fa, &fste, ft->h->nodesize); + toku_ft_flush_some_child(ft, node, &fa); } else { - toku_unpin_ftnode(h, node); + toku_unpin_ftnode(ft, node); } return 0; } struct flusher_extra { - FT h; + FT ft; FTNODE node; NONLEAF_CHILDINFO bnc; TXNID parent_oldest_referenced_xid_known; @@ -1866,12 +1884,12 @@ static void flush_node_fun(void *fe_v) // destroyed its basement nodes if necessary, so we now need to either // read them back in, or just do the regular partial fetch. If we // don't, that means fe->node is a parent, so we need to do this anyway. - bring_node_fully_into_memory(fe->node,fe->h); + bring_node_fully_into_memory(fe->node,fe->ft); fe->node->dirty = 1; struct flusher_advice fa; struct flush_status_update_extra fste; - flt_flusher_advice_init(&fa, &fste, fe->h->h->nodesize); + flt_flusher_advice_init(&fa, &fste, fe->ft->h->nodesize); if (fe->bnc) { // In this case, we have a bnc to flush to a node @@ -1880,7 +1898,7 @@ static void flush_node_fun(void *fe_v) call_flusher_thread_callback(flt_flush_before_applying_inbox); toku_bnc_flush_to_child( - fe->h, + fe->ft, fe->bnc, fe->node, fe->parent_oldest_referenced_xid_known @@ -1891,11 +1909,11 @@ static void flush_node_fun(void *fe_v) // If so, call toku_ft_flush_some_child on the node (because this flush intends to // pass a meaningful oldest referenced xid for simple garbage collection), and it is the // responsibility of the flush to unlock the node. otherwise, we unlock it here. - if (fe->node->height > 0 && toku_ft_nonleaf_is_gorged(fe->node, fe->h->h->nodesize)) { - toku_ft_flush_some_child(fe->h, fe->node, &fa); + if (fe->node->height > 0 && toku_ftnode_nonleaf_is_gorged(fe->node, fe->ft->h->nodesize)) { + toku_ft_flush_some_child(fe->ft, fe->node, &fa); } else { - toku_unpin_ftnode(fe->h,fe->node); + toku_unpin_ftnode(fe->ft,fe->node); } } else { @@ -1903,25 +1921,25 @@ static void flush_node_fun(void *fe_v) // bnc, which means we are tasked with flushing some // buffer in the node. // It is the responsibility of flush some child to unlock the node - toku_ft_flush_some_child(fe->h, fe->node, &fa); + toku_ft_flush_some_child(fe->ft, fe->node, &fa); } - remove_background_job_from_cf(fe->h->cf); + remove_background_job_from_cf(fe->ft->cf); toku_free(fe); } static void place_node_and_bnc_on_background_thread( - FT h, + FT ft, FTNODE node, NONLEAF_CHILDINFO bnc, TXNID parent_oldest_referenced_xid_known) { struct flusher_extra *XMALLOC(fe); - fe->h = h; + fe->ft = ft; fe->node = node; fe->bnc = bnc; fe->parent_oldest_referenced_xid_known = parent_oldest_referenced_xid_known; - cachefile_kibbutz_enq(h->cf, flush_node_fun, fe); + cachefile_kibbutz_enq(ft->cf, flush_node_fun, fe); } // @@ -1937,7 +1955,7 @@ place_node_and_bnc_on_background_thread( // child needs to be split/merged), then we place the parent on the background thread. // The parent will be unlocked on the background thread // -void toku_ft_flush_node_on_background_thread(FT h, FTNODE parent) +void toku_ft_flush_node_on_background_thread(FT ft, FTNODE parent) { toku::context flush_ctx(CTX_FLUSH); TXNID parent_oldest_referenced_xid_known = parent->oldest_referenced_xid_known; @@ -1951,24 +1969,24 @@ void toku_ft_flush_node_on_background_thread(FT h, FTNODE parent) // see if we can pin the child // FTNODE child; - uint32_t childfullhash = compute_child_fullhash(h->cf, parent, childnum); - int r = toku_maybe_pin_ftnode_clean(h, BP_BLOCKNUM(parent, childnum), childfullhash, PL_WRITE_EXPENSIVE, &child); + uint32_t childfullhash = compute_child_fullhash(ft->cf, parent, childnum); + int r = toku_maybe_pin_ftnode_clean(ft, BP_BLOCKNUM(parent, childnum), childfullhash, PL_WRITE_EXPENSIVE, &child); if (r != 0) { // In this case, we could not lock the child, so just place the parent on the background thread // In the callback, we will use toku_ft_flush_some_child, which checks to // see if we should blow away the old basement nodes. - place_node_and_bnc_on_background_thread(h, parent, NULL, parent_oldest_referenced_xid_known); + place_node_and_bnc_on_background_thread(ft, parent, NULL, parent_oldest_referenced_xid_known); } else { // // successfully locked child // - bool may_child_be_reactive = may_node_be_reactive(h, child); + bool may_child_be_reactive = ft_ftnode_may_be_reactive(ft, child); if (!may_child_be_reactive) { // We're going to unpin the parent, so before we do, we must // check to see if we need to blow away the basement nodes to // keep the MSN invariants intact. - maybe_destroy_child_blbs(parent, child, h); + maybe_destroy_child_blbs(parent, child, ft); // // can detach buffer and unpin root here @@ -1986,17 +2004,17 @@ void toku_ft_flush_node_on_background_thread(FT h, FTNODE parent) // so, because we know for sure the child is not // reactive, we can unpin the parent // - place_node_and_bnc_on_background_thread(h, child, bnc, parent_oldest_referenced_xid_known); - toku_unpin_ftnode(h, parent); + place_node_and_bnc_on_background_thread(ft, child, bnc, parent_oldest_referenced_xid_known); + toku_unpin_ftnode(ft, parent); } else { // because the child may be reactive, we need to // put parent on background thread. // As a result, we unlock the child here. - toku_unpin_ftnode(h, child); + toku_unpin_ftnode(ft, child); // Again, we'll have the parent on the background thread, so // we don't need to destroy the basement nodes yet. - place_node_and_bnc_on_background_thread(h, parent, NULL, parent_oldest_referenced_xid_known); + place_node_and_bnc_on_background_thread(ft, parent, NULL, parent_oldest_referenced_xid_known); } } } diff --git a/storage/tokudb/ft-index/ft/ft-flusher.h b/storage/tokudb/ft-index/ft/ft-flusher.h index 0788bf665d3..47bf4e7cf77 100644 --- a/storage/tokudb/ft-index/ft/ft-flusher.h +++ b/storage/tokudb/ft-index/ft/ft-flusher.h @@ -1,7 +1,5 @@ /* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */ // vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4: -#ifndef FT_FLUSHER_H -#define FT_FLUSHER_H #ident "$Id$" /* COPYING CONDITIONS NOTICE: @@ -31,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -88,11 +86,12 @@ PATENT RIGHTS GRANT: under this License. */ +#pragma once + #ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved." #ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it." -// This must be first to make the 64-bit file mode work right in Linux -#include "fttypes.h" +#include "ft/ft-internal.h" typedef enum { FT_FLUSHER_CLEANER_TOTAL_NODES = 0, // total number of nodes whose buffers are potentially flushed by cleaner thread @@ -152,10 +151,31 @@ toku_flusher_thread_set_callback( * Puts a workitem on the flusher thread queue, scheduling the node to be * flushed by toku_ft_flush_some_child. */ -void -toku_ft_flush_node_on_background_thread( +void toku_ft_flush_node_on_background_thread(FT ft, FTNODE parent); + +enum split_mode { + SPLIT_EVENLY, + SPLIT_LEFT_HEAVY, + SPLIT_RIGHT_HEAVY +}; + + +// Given pinned node and pinned child, split child into two +// and update node with information about its new child. +void toku_ft_split_child( FT ft, - FTNODE parent + FTNODE node, + int childnum, + FTNODE child, + enum split_mode split_mode + ); + +// Given pinned node, merge childnum with a neighbor and update node with +// information about the change +void toku_ft_merge_child( + FT ft, + FTNODE node, + int childnum ); /** @@ -166,9 +186,10 @@ toku_ft_flush_node_on_background_thread( * nodea is the left node that results from the split * splitk is the right-most key of nodea */ +// TODO: Rename toku_ft_leaf_split void ftleaf_split( - FT h, + FT ft, FTNODE node, FTNODE *nodea, FTNODE *nodeb, @@ -189,8 +210,9 @@ ftleaf_split( * but it does not guarantee that the resulting nodes are smaller than nodesize. */ void +// TODO: Rename toku_ft_nonleaf_split ft_nonleaf_split( - FT h, + FT ft, FTNODE node, FTNODE *nodea, FTNODE *nodeb, @@ -199,8 +221,6 @@ ft_nonleaf_split( FTNODE* dependent_nodes ); - - /************************************************************************ * HOT optimize, should perhaps be factored out to its own header file * ************************************************************************ @@ -231,7 +251,5 @@ void toku_ft_hot_get_status(FT_HOT_STATUS); */ int toku_ft_hot_optimize(FT_HANDLE ft_h, DBT* left, DBT* right, - int (*progress_callback)(void *extra, float progress), - void *progress_extra, uint64_t* loops_run); - -#endif // End of header guardian. + int (*progress_callback)(void *extra, float progress), + void *progress_extra, uint64_t* loops_run); diff --git a/storage/tokudb/ft-index/ft/ft-hot-flusher.cc b/storage/tokudb/ft-index/ft/ft-hot-flusher.cc index 066e075ee0e..55230e75da0 100644 --- a/storage/tokudb/ft-index/ft/ft-hot-flusher.cc +++ b/storage/tokudb/ft-index/ft/ft-hot-flusher.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -89,14 +89,15 @@ PATENT RIGHTS GRANT: #ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved." #ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it." -#include <ft-flusher.h> -#include <ft-flusher-internal.h> -#include <ft-cachetable-wrappers.h> -#include <ft-internal.h> -#include <ft.h> -#include <portability/toku_atomic.h> -#include <util/status.h> -#include <util/context.h> +#include "ft/ft.h" +#include "ft/ft-cachetable-wrappers.h" +#include "ft/ft-flusher.h" +#include "ft/ft-flusher-internal.h" +#include "ft/ft-internal.h" +#include "ft/node.h" +#include "portability/toku_atomic.h" +#include "util/context.h" +#include "util/status.h" // Member Descirption: // 1. highest_pivot_key - this is the key that corresponds to the @@ -119,7 +120,7 @@ struct hot_flusher_extra { static FT_HOT_STATUS_S hot_status; -#define STATUS_INIT(k,c,t,l,inc) TOKUDB_STATUS_INIT(hot_status, k, c, t, "hot: " l, inc) +#define STATUS_INIT(k,c,t,l,inc) TOKUFT_STATUS_INIT(hot_status, k, c, t, "hot: " l, inc) #define STATUS_VALUE(x) hot_status.status[x].value.num @@ -168,7 +169,7 @@ hot_set_start_key(struct hot_flusher_extra *flusher, const DBT* start) } static int -hot_just_pick_child(FT h, +hot_just_pick_child(FT ft, FTNODE parent, struct hot_flusher_extra *flusher) { @@ -183,10 +184,7 @@ hot_just_pick_child(FT h, childnum = 0; } else { // Find the pivot boundary. - childnum = toku_ftnode_hot_next_child(parent, - &flusher->highest_pivot_key, - &h->cmp_descriptor, - h->compare_fun); + childnum = toku_ftnode_hot_next_child(parent, &flusher->highest_pivot_key, ft->cmp); } return childnum; @@ -201,19 +199,19 @@ hot_update_flusher_keys(FTNODE parent, // child node. if (childnum < (parent->n_children - 1)) { toku_destroy_dbt(&flusher->max_current_key); - toku_clone_dbt(&flusher->max_current_key, parent->childkeys[childnum]); + toku_clone_dbt(&flusher->max_current_key, parent->pivotkeys.get_pivot(childnum)); } } // Picks which child toku_ft_flush_some_child will use for flushing and // recursion. static int -hot_pick_child(FT h, +hot_pick_child(FT ft, FTNODE parent, void *extra) { struct hot_flusher_extra *flusher = (struct hot_flusher_extra *) extra; - int childnum = hot_just_pick_child(h, parent, flusher); + int childnum = hot_just_pick_child(ft, parent, flusher); // Now we determine the percentage of the tree flushed so far. @@ -243,14 +241,14 @@ hot_update_status(FTNODE UU(child), // one to flush into. This gives it a chance to do that, and update the // keys it maintains. static int -hot_pick_child_after_split(FT h, +hot_pick_child_after_split(FT ft, FTNODE parent, int childnuma, int childnumb, void *extra) { struct hot_flusher_extra *flusher = (struct hot_flusher_extra *) extra; - int childnum = hot_just_pick_child(h, parent, flusher); + int childnum = hot_just_pick_child(ft, parent, flusher); assert(childnum == childnuma || childnum == childnumb); hot_update_flusher_keys(parent, childnum, flusher); if (parent->height == 1) { @@ -330,8 +328,8 @@ toku_ft_hot_optimize(FT_HANDLE ft_handle, DBT* left, DBT* right, // Get root node (the first parent of each successive HOT // call.) toku_calculate_root_offset_pointer(ft_handle->ft, &root_key, &fullhash); - struct ftnode_fetch_extra bfe; - fill_bfe_for_full_read(&bfe, ft_handle->ft); + ftnode_fetch_extra bfe; + bfe.create_for_full_read(ft_handle->ft); toku_pin_ftnode(ft_handle->ft, (BLOCKNUM) root_key, fullhash, @@ -339,7 +337,7 @@ toku_ft_hot_optimize(FT_HANDLE ft_handle, DBT* left, DBT* right, PL_WRITE_EXPENSIVE, &root, true); - toku_assert_entire_node_in_memory(root); + toku_ftnode_assert_fully_in_memory(root); } // Prepare HOT diagnostics. @@ -385,8 +383,7 @@ toku_ft_hot_optimize(FT_HANDLE ft_handle, DBT* left, DBT* right, else if (right) { // if we have flushed past the bounds set for us, // set rightmost_leaf_seen so we exit - FAKE_DB(db, &ft_handle->ft->cmp_descriptor); - int cmp = ft_handle->ft->compare_fun(&db, &flusher.max_current_key, right); + int cmp = ft_handle->ft->cmp(&flusher.max_current_key, right); if (cmp > 0) { flusher.rightmost_leaf_seen = 1; } diff --git a/storage/tokudb/ft-index/ft/ft-internal.h b/storage/tokudb/ft-index/ft/ft-internal.h index 42d27638330..3cd39705571 100644 --- a/storage/tokudb/ft-index/ft/ft-internal.h +++ b/storage/tokudb/ft-index/ft/ft-internal.h @@ -1,7 +1,5 @@ /* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */ // vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4: -#ifndef FT_INTERNAL_H -#define FT_INTERNAL_H #ident "$Id$" /* @@ -32,7 +30,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -89,11 +87,22 @@ PATENT RIGHTS GRANT: under this License. */ +#pragma once + #ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved." #ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it." -#include <portability/toku_config.h> -#include <toku_race_tools.h> +#include "portability/toku_config.h" +#include "portability/toku_list.h" +#include "portability/toku_race_tools.h" + +#include "ft/cachetable/cachetable.h" +#include "ft/comparator.h" +#include "ft/ft.h" +#include "ft/ft-ops.h" +#include "ft/node.h" +#include "ft/serialize/block_table.h" +#include "ft/txn/rollback.h" // Symbol TOKUDB_REVISION is not defined by fractal-tree makefiles, so // BUILD_ID of 1000 indicates development build of main, not a release build. @@ -103,361 +112,24 @@ PATENT RIGHTS GRANT: #error #endif -#include "ft_layout_version.h" -#include "block_allocator.h" -#include "cachetable.h" -#include "fifo.h" -#include "ft-ops.h" -#include "toku_list.h" -#include <util/omt.h> -#include "leafentry.h" -#include "block_table.h" -#include "compress.h" -#include <util/mempool.h> -#include <util/omt.h> -#include "bndata.h" - -enum { KEY_VALUE_OVERHEAD = 8 }; /* Must store the two lengths. */ -enum { FT_MSG_OVERHEAD = (2 + sizeof(MSN)) }; // the type plus freshness plus MSN +struct ft_search; + enum { FT_DEFAULT_FANOUT = 16 }; enum { FT_DEFAULT_NODE_SIZE = 4 * 1024 * 1024 }; enum { FT_DEFAULT_BASEMENT_NODE_SIZE = 128 * 1024 }; -// -// Field in ftnode_fetch_extra that tells the -// partial fetch callback what piece of the node -// is needed by the ydb -// -enum ftnode_fetch_type { - ftnode_fetch_none=1, // no partitions needed. - ftnode_fetch_subset, // some subset of partitions needed - ftnode_fetch_prefetch, // this is part of a prefetch call - ftnode_fetch_all, // every partition is needed - ftnode_fetch_keymatch, // one child is needed if it holds both keys -}; - -static bool is_valid_ftnode_fetch_type(enum ftnode_fetch_type type) UU(); -static bool is_valid_ftnode_fetch_type(enum ftnode_fetch_type type) { - switch (type) { - case ftnode_fetch_none: - case ftnode_fetch_subset: - case ftnode_fetch_prefetch: - case ftnode_fetch_all: - case ftnode_fetch_keymatch: - return true; - default: - return false; - } -} - -// -// An extra parameter passed to cachetable functions -// That is used in all types of fetch callbacks. -// The contents help the partial fetch and fetch -// callbacks retrieve the pieces of a node necessary -// for the ensuing operation (flush, query, ...) -// -struct ftnode_fetch_extra { - enum ftnode_fetch_type type; - // needed for reading a node off disk - FT h; - // used in the case where type == ftnode_fetch_subset - // parameters needed to find out which child needs to be decompressed (so it can be read) - ft_search_t* search; - DBT range_lock_left_key, range_lock_right_key; - bool left_is_neg_infty, right_is_pos_infty; - // states if we should try to aggressively fetch basement nodes - // that are not specifically needed for current query, - // but may be needed for other cursor operations user is doing - // For example, if we have not disabled prefetching, - // and the user is doing a dictionary wide scan, then - // even though a query may only want one basement node, - // we fetch all basement nodes in a leaf node. - bool disable_prefetching; - // this value will be set during the fetch_callback call by toku_ftnode_fetch_callback or toku_ftnode_pf_req_callback - // thi callbacks need to evaluate this anyway, so we cache it here so the search code does not reevaluate it - int child_to_read; - // when we read internal nodes, we want to read all the data off disk in one I/O - // then we'll treat it as normal and only decompress the needed partitions etc. - - bool read_all_partitions; - // Accounting: How many bytes were read, and how much time did we spend doing I/O? - uint64_t bytes_read; - tokutime_t io_time; - tokutime_t decompress_time; - tokutime_t deserialize_time; -}; +// We optimize for a sequential insert pattern if 100 consecutive injections +// happen into the rightmost leaf node due to promotion. +enum { FT_SEQINSERT_SCORE_THRESHOLD = 100 }; -struct toku_fifo_entry_key_msn_heaviside_extra { - DESCRIPTOR desc; - ft_compare_func cmp; - FIFO fifo; - const DBT *key; - MSN msn; -}; - -// comparison function for inserting messages into a -// ftnode_nonleaf_childinfo's message_tree -int -toku_fifo_entry_key_msn_heaviside(const int32_t &v, const struct toku_fifo_entry_key_msn_heaviside_extra &extra); - -struct toku_fifo_entry_key_msn_cmp_extra { - DESCRIPTOR desc; - ft_compare_func cmp; - FIFO fifo; -}; - -// same thing for qsort_r -int -toku_fifo_entry_key_msn_cmp(const struct toku_fifo_entry_key_msn_cmp_extra &extrap, const int &a, const int &b); - -typedef toku::omt<int32_t> off_omt_t; -typedef toku::omt<int32_t, int32_t, true> marked_off_omt_t; - -// data of an available partition of a nonleaf ftnode -struct ftnode_nonleaf_childinfo { - FIFO buffer; - off_omt_t broadcast_list; - marked_off_omt_t fresh_message_tree; - off_omt_t stale_message_tree; - uint64_t flow[2]; // current and last checkpoint -}; - -unsigned int toku_bnc_nbytesinbuf(NONLEAF_CHILDINFO bnc); -int toku_bnc_n_entries(NONLEAF_CHILDINFO bnc); -long toku_bnc_memory_size(NONLEAF_CHILDINFO bnc); -long toku_bnc_memory_used(NONLEAF_CHILDINFO bnc); -void toku_bnc_insert_msg(NONLEAF_CHILDINFO bnc, const void *key, ITEMLEN keylen, const void *data, ITEMLEN datalen, enum ft_msg_type type, MSN msn, XIDS xids, bool is_fresh, DESCRIPTOR desc, ft_compare_func cmp); -void toku_bnc_empty(NONLEAF_CHILDINFO bnc); -void toku_bnc_flush_to_child(FT h, NONLEAF_CHILDINFO bnc, FTNODE child, TXNID parent_oldest_referenced_xid_known); -bool toku_bnc_should_promote(FT ft, NONLEAF_CHILDINFO bnc) __attribute__((const, nonnull)); -bool toku_ft_nonleaf_is_gorged(FTNODE node, uint32_t nodesize); - -enum reactivity get_nonleaf_reactivity(FTNODE node, unsigned int fanout); -enum reactivity get_node_reactivity(FT ft, FTNODE node); -uint32_t get_leaf_num_entries(FTNODE node); - -// data of an available partition of a leaf ftnode -struct ftnode_leaf_basement_node { - bn_data data_buffer; - unsigned int seqinsert; // number of sequential inserts to this leaf - MSN max_msn_applied; // max message sequence number applied - bool stale_ancestor_messages_applied; - STAT64INFO_S stat64_delta; // change in stat64 counters since basement was last written to disk -}; - -enum pt_state { // declare this to be packed so that when used below it will only take 1 byte. - PT_INVALID = 0, - PT_ON_DISK = 1, - PT_COMPRESSED = 2, - PT_AVAIL = 3}; - -enum ftnode_child_tag { - BCT_INVALID = 0, - BCT_NULL, - BCT_SUBBLOCK, - BCT_LEAF, - BCT_NONLEAF -}; - -typedef struct ftnode_child_pointer { - union { - struct sub_block *subblock; - struct ftnode_nonleaf_childinfo *nonleaf; - struct ftnode_leaf_basement_node *leaf; - } u; - enum ftnode_child_tag tag; -} FTNODE_CHILD_POINTER; - - -struct ftnode_disk_data { - // - // stores the offset to the beginning of the partition on disk from the ftnode, and the length, needed to read a partition off of disk - // the value is only meaningful if the node is clean. If the node is dirty, then the value is meaningless - // The START is the distance from the end of the compressed node_info data, to the beginning of the compressed partition - // The SIZE is the size of the compressed partition. - // Rationale: We cannot store the size from the beginning of the node since we don't know how big the header will be. - // However, later when we are doing aligned writes, we won't be able to store the size from the end since we want things to align. - uint32_t start; - uint32_t size; -}; -#define BP_START(node_dd,i) ((node_dd)[i].start) -#define BP_SIZE(node_dd,i) ((node_dd)[i].size) - - -// a ftnode partition, associated with a child of a node -struct ftnode_partition { - // the following three variables are used for nonleaf nodes - // for leaf nodes, they are meaningless - BLOCKNUM blocknum; // blocknum of child - - // How many bytes worth of work was performed by messages in each buffer. - uint64_t workdone; - - // - // pointer to the partition. Depending on the state, they may be different things - // if state == PT_INVALID, then the node was just initialized and ptr == NULL - // if state == PT_ON_DISK, then ptr == NULL - // if state == PT_COMPRESSED, then ptr points to a struct sub_block* - // if state == PT_AVAIL, then ptr is: - // a struct ftnode_nonleaf_childinfo for internal nodes, - // a struct ftnode_leaf_basement_node for leaf nodes - // - struct ftnode_child_pointer ptr; - // - // at any time, the partitions may be in one of the following three states (stored in pt_state): - // PT_INVALID - means that the partition was just initialized - // PT_ON_DISK - means that the partition is not in memory and needs to be read from disk. To use, must read off disk and decompress - // PT_COMPRESSED - means that the partition is compressed in memory. To use, must decompress - // PT_AVAIL - means the partition is decompressed and in memory - // - enum pt_state state; // make this an enum to make debugging easier. - - // clock count used to for pe_callback to determine if a node should be evicted or not - // for now, saturating the count at 1 - uint8_t clock_count; -}; - -struct ftnode { - MSN max_msn_applied_to_node_on_disk; // max_msn_applied that will be written to disk - unsigned int flags; - BLOCKNUM thisnodename; // Which block number is this node? - int layout_version; // What version of the data structure? - int layout_version_original; // different (<) from layout_version if upgraded from a previous version (useful for debugging) - int layout_version_read_from_disk; // transient, not serialized to disk, (useful for debugging) - uint32_t build_id; // build_id (svn rev number) of software that wrote this node to disk - int height; /* height is always >= 0. 0 for leaf, >0 for nonleaf. */ - int dirty; - uint32_t fullhash; - int n_children; //for internal nodes, if n_children==fanout+1 then the tree needs to be rebalanced. - // for leaf nodes, represents number of basement nodes - unsigned int totalchildkeylens; - DBT *childkeys; /* Pivot keys. Child 0's keys are <= childkeys[0]. Child 1's keys are <= childkeys[1]. - Child 1's keys are > childkeys[0]. */ - - // What's the oldest referenced xid that this node knows about? The real oldest - // referenced xid might be younger, but this is our best estimate. We use it - // as a heuristic to transition provisional mvcc entries from provisional to - // committed (from implicity committed to really committed). - // - // A better heuristic would be the oldest live txnid, but we use this since it - // still works well most of the time, and its readily available on the inject - // code path. - TXNID oldest_referenced_xid_known; - - // array of size n_children, consisting of ftnode partitions - // each one is associated with a child - // for internal nodes, the ith partition corresponds to the ith message buffer - // for leaf nodes, the ith partition corresponds to the ith basement node - struct ftnode_partition *bp; - PAIR ct_pair; -}; - -// ftnode partition macros -// BP stands for ftnode_partition -#define BP_BLOCKNUM(node,i) ((node)->bp[i].blocknum) -#define BP_STATE(node,i) ((node)->bp[i].state) -#define BP_WORKDONE(node, i)((node)->bp[i].workdone) - -// -// macros for managing a node's clock -// Should be managed by ft-ops.c, NOT by serialize/deserialize -// +uint32_t compute_child_fullhash (CACHEFILE cf, FTNODE node, int childnum); -// -// BP_TOUCH_CLOCK uses a compare and swap because multiple threads -// that have a read lock on an internal node may try to touch the clock -// simultaneously -// -#define BP_TOUCH_CLOCK(node, i) ((node)->bp[i].clock_count = 1) -#define BP_SWEEP_CLOCK(node, i) ((node)->bp[i].clock_count = 0) -#define BP_SHOULD_EVICT(node, i) ((node)->bp[i].clock_count == 0) -// not crazy about having these two here, one is for the case where we create new -// nodes, such as in splits and creating new roots, and the other is for when -// we are deserializing a node and not all bp's are touched -#define BP_INIT_TOUCHED_CLOCK(node, i) ((node)->bp[i].clock_count = 1) -#define BP_INIT_UNTOUCHED_CLOCK(node, i) ((node)->bp[i].clock_count = 0) - -// internal node macros -static inline void set_BNULL(FTNODE node, int i) { - paranoid_invariant(i >= 0); - paranoid_invariant(i < node->n_children); - node->bp[i].ptr.tag = BCT_NULL; -} -static inline bool is_BNULL (FTNODE node, int i) { - paranoid_invariant(i >= 0); - paranoid_invariant(i < node->n_children); - return node->bp[i].ptr.tag == BCT_NULL; -} -static inline NONLEAF_CHILDINFO BNC(FTNODE node, int i) { - paranoid_invariant(i >= 0); - paranoid_invariant(i < node->n_children); - FTNODE_CHILD_POINTER p = node->bp[i].ptr; - paranoid_invariant(p.tag==BCT_NONLEAF); - return p.u.nonleaf; -} -static inline void set_BNC(FTNODE node, int i, NONLEAF_CHILDINFO nl) { - paranoid_invariant(i >= 0); - paranoid_invariant(i < node->n_children); - FTNODE_CHILD_POINTER *p = &node->bp[i].ptr; - p->tag = BCT_NONLEAF; - p->u.nonleaf = nl; -} - -static inline BASEMENTNODE BLB(FTNODE node, int i) { - paranoid_invariant(i >= 0); - // The optimizer really doesn't like it when we compare - // i to n_children as signed integers. So we assert that - // n_children is in fact positive before doing a comparison - // on the values forcibly cast to unsigned ints. - paranoid_invariant(node->n_children > 0); - paranoid_invariant((unsigned) i < (unsigned) node->n_children); - FTNODE_CHILD_POINTER p = node->bp[i].ptr; - paranoid_invariant(p.tag==BCT_LEAF); - return p.u.leaf; -} -static inline void set_BLB(FTNODE node, int i, BASEMENTNODE bn) { - paranoid_invariant(i >= 0); - paranoid_invariant(i < node->n_children); - FTNODE_CHILD_POINTER *p = &node->bp[i].ptr; - p->tag = BCT_LEAF; - p->u.leaf = bn; -} - -static inline SUB_BLOCK BSB(FTNODE node, int i) { - paranoid_invariant(i >= 0); - paranoid_invariant(i < node->n_children); - FTNODE_CHILD_POINTER p = node->bp[i].ptr; - paranoid_invariant(p.tag==BCT_SUBBLOCK); - return p.u.subblock; -} -static inline void set_BSB(FTNODE node, int i, SUB_BLOCK sb) { - paranoid_invariant(i >= 0); - paranoid_invariant(i < node->n_children); - FTNODE_CHILD_POINTER *p = &node->bp[i].ptr; - p->tag = BCT_SUBBLOCK; - p->u.subblock = sb; -} - -// ftnode leaf basementnode macros, -#define BLB_MAX_MSN_APPLIED(node,i) (BLB(node,i)->max_msn_applied) -#define BLB_MAX_DSN_APPLIED(node,i) (BLB(node,i)->max_dsn_applied) -#define BLB_DATA(node,i) (&(BLB(node,i)->data_buffer)) -#define BLB_NBYTESINDATA(node,i) (BLB_DATA(node,i)->get_disk_size()) -#define BLB_SEQINSERT(node,i) (BLB(node,i)->seqinsert) - -/* pivot flags (must fit in 8 bits) */ -enum { - FT_PIVOT_TRUNC = 4, - FT_PIVOT_FRONT_COMPRESS = 8, +enum ft_type { + FT_CURRENT = 1, + FT_CHECKPOINT_INPROGRESS }; -uint32_t compute_child_fullhash (CACHEFILE cf, FTNODE node, int childnum); - // The ft_header is not managed by the cachetable. Instead, it hangs off the cachefile as userdata. - -enum ft_type {FT_CURRENT=1, FT_CHECKPOINT_INPROGRESS}; - struct ft_header { enum ft_type type; @@ -470,7 +142,7 @@ struct ft_header { // LSN of creation of "checkpoint-begin" record in log. LSN checkpoint_lsn; - // see ft_layout_version.h. maybe don't need this if we assume + // see serialize/ft_layout_version.h. maybe don't need this if we assume // it's always the current version after deserializing const int layout_version; // different (<) from layout_version if upgraded from a previous @@ -525,6 +197,7 @@ struct ft_header { STAT64INFO_S on_disk_stats; }; +typedef struct ft_header *FT_HEADER; // ft_header is always the current version. struct ft { @@ -536,20 +209,23 @@ struct ft { CACHEFILE cf; // unique id for dictionary DICTIONARY_ID dict_id; - ft_compare_func compare_fun; - ft_update_func update_fun; // protected by locktree DESCRIPTOR_S descriptor; - // protected by locktree and user. User - // makes sure this is only changed - // when no activity on tree + + // protected by locktree and user. + // User makes sure this is only changed when no activity on tree DESCRIPTOR_S cmp_descriptor; + // contains a pointer to cmp_descriptor (above) - their lifetimes are bound + toku::comparator cmp; + + // the update function always utilizes the cmp_descriptor, not the regular one + ft_update_func update_fun; // These are not read-only: // protected by blocktable lock - BLOCK_TABLE blocktable; + block_table blocktable; // protected by atomic builtins STAT64INFO_S in_memory_stats; @@ -572,13 +248,29 @@ struct ft { // is this ft a blackhole? if so, all messages are dropped. bool blackhole; + + // The blocknum of the rightmost leaf node in the tree. Stays constant through splits + // and merges using pair-swapping (like the root node, see toku_ftnode_swap_pair_values()) + // + // This field only transitions from RESERVED_BLOCKNUM_NULL to non-null, never back. + // We initialize it when promotion inserts into a non-root leaf node on the right extreme. + // We use the blocktable lock to protect the initialize transition, though it's not really + // necessary since all threads should be setting it to the same value. We maintain that invariant + // on first initialization, see ft_set_or_verify_rightmost_blocknum() + BLOCKNUM rightmost_blocknum; + + // sequential access pattern heuristic + // - when promotion pushes a message directly into the rightmost leaf, the score goes up. + // - if the score is high enough, we optimistically attempt to insert directly into the rightmost leaf + // - if our attempt fails because the key was not in range of the rightmost leaf, we reset the score back to 0 + uint32_t seqinsert_score; }; // Allocate a DB struct off the stack and only set its comparison // descriptor. We don't bother setting any other fields because // the comparison function doesn't need it, and we would like to // reduce the CPU work done per comparison. -#define FAKE_DB(db, desc) struct __toku_db db; do { db.cmp_descriptor = desc; } while (0) +#define FAKE_DB(db, desc) struct __toku_db db; do { db.cmp_descriptor = const_cast<DESCRIPTOR>(desc); } while (0) struct ft_options { unsigned int nodesize; @@ -586,6 +278,7 @@ struct ft_options { enum toku_compression_method compression_method; unsigned int fanout; unsigned int flags; + uint8_t memcmp_magic; ft_compare_func compare_fun; ft_update_func update_fun; }; @@ -605,439 +298,173 @@ struct ft_handle { PAIR_ATTR make_ftnode_pair_attr(FTNODE node); PAIR_ATTR make_invalid_pair_attr(void); -/* serialization code */ -void -toku_create_compressed_partition_from_available( - FTNODE node, - int childnum, - enum toku_compression_method compression_method, - SUB_BLOCK sb - ); -void rebalance_ftnode_leaf(FTNODE node, unsigned int basementnodesize); -int toku_serialize_ftnode_to_memory (FTNODE node, - FTNODE_DISK_DATA* ndd, - unsigned int basementnodesize, - enum toku_compression_method compression_method, - bool do_rebalancing, - bool in_parallel, - /*out*/ size_t *n_bytes_to_write, - /*out*/ size_t *n_uncompressed_bytes, - /*out*/ char **bytes_to_write); -int toku_serialize_ftnode_to(int fd, BLOCKNUM, FTNODE node, FTNODE_DISK_DATA* ndd, bool do_rebalancing, FT h, bool for_checkpoint); -int toku_serialize_rollback_log_to (int fd, ROLLBACK_LOG_NODE log, SERIALIZED_ROLLBACK_LOG_NODE serialized_log, bool is_serialized, - FT h, bool for_checkpoint); -void toku_serialize_rollback_log_to_memory_uncompressed(ROLLBACK_LOG_NODE log, SERIALIZED_ROLLBACK_LOG_NODE serialized); -int toku_deserialize_rollback_log_from (int fd, BLOCKNUM blocknum, ROLLBACK_LOG_NODE *logp, FT h); -int toku_deserialize_bp_from_disk(FTNODE node, FTNODE_DISK_DATA ndd, int childnum, int fd, struct ftnode_fetch_extra* bfe); -int toku_deserialize_bp_from_compressed(FTNODE node, int childnum, struct ftnode_fetch_extra *bfe); -int toku_deserialize_ftnode_from (int fd, BLOCKNUM off, uint32_t /*fullhash*/, FTNODE *ftnode, FTNODE_DISK_DATA* ndd, struct ftnode_fetch_extra* bfe); - -// <CER> For verifying old, non-upgraded nodes (versions 13 and 14). -int -decompress_from_raw_block_into_rbuf(uint8_t *raw_block, size_t raw_block_size, struct rbuf *rb, BLOCKNUM blocknum); -// - -//////////////// <CER> TODO: Move these function declarations -int -deserialize_ft_from_fd_into_rbuf(int fd, - toku_off_t offset_of_header, - struct rbuf *rb, - uint64_t *checkpoint_count, - LSN *checkpoint_lsn, - uint32_t * version_p); +// +// Field in ftnode_fetch_extra that tells the +// partial fetch callback what piece of the node +// is needed by the ydb +// +enum ftnode_fetch_type { + ftnode_fetch_none = 1, // no partitions needed. + ftnode_fetch_subset, // some subset of partitions needed + ftnode_fetch_prefetch, // this is part of a prefetch call + ftnode_fetch_all, // every partition is needed + ftnode_fetch_keymatch, // one child is needed if it holds both keys +}; -int -deserialize_ft_versioned(int fd, struct rbuf *rb, FT *ft, uint32_t version); +// Info passed to cachetable fetch callbacks to say which parts of a node +// should be fetched (perhaps a subset, perhaps the whole thing, depending +// on operation) +class ftnode_fetch_extra { +public: + // Used when the whole node must be in memory, such as for flushes. + void create_for_full_read(FT ft); -void read_block_from_fd_into_rbuf( - int fd, - BLOCKNUM blocknum, - FT h, - struct rbuf *rb - ); + // A subset of children are necessary. Used by point queries. + void create_for_subset_read(FT ft, ft_search *search, const DBT *left, const DBT *right, + bool left_is_neg_infty, bool right_is_pos_infty, + bool disable_prefetching, bool read_all_partitions); -int -read_compressed_sub_block(struct rbuf *rb, struct sub_block *sb); + // No partitions are necessary - only pivots and/or subtree estimates. + // Currently used for stat64. + void create_for_min_read(FT ft); -int -verify_ftnode_sub_block (struct sub_block *sb); + // Used to prefetch partitions that fall within the bounds given by the cursor. + void create_for_prefetch(FT ft, struct ft_cursor *cursor); -void -just_decompress_sub_block(struct sub_block *sb); + // Only a portion of the node (within a keyrange) is required. + // Used by keysrange when the left and right key are in the same basement node. + void create_for_keymatch(FT ft, const DBT *left, const DBT *right, + bool disable_prefetching, bool read_all_partitions); -/* Beginning of ft-node-deserialize.c helper functions. */ -void initialize_ftnode(FTNODE node, BLOCKNUM blocknum); -int read_and_check_magic(struct rbuf *rb); -int read_and_check_version(FTNODE node, struct rbuf *rb); -void read_node_info(FTNODE node, struct rbuf *rb, int version); -void allocate_and_read_partition_offsets(FTNODE node, struct rbuf *rb, FTNODE_DISK_DATA *ndd); -int check_node_info_checksum(struct rbuf *rb); -void read_legacy_node_info(FTNODE node, struct rbuf *rb, int version); -int check_legacy_end_checksum(struct rbuf *rb); -/* End of ft-node-deserialization.c helper functions. */ + void destroy(void); -unsigned int toku_serialize_ftnode_size(FTNODE node); /* How much space will it take? */ + // return: true if a specific childnum is required to be in memory + bool wants_child_available(int childnum) const; -void toku_verify_or_set_counts(FTNODE); + // return: the childnum of the leftmost child that is required to be in memory + int leftmost_child_wanted(FTNODE node) const; -size_t toku_serialize_ft_size (FT_HEADER h); -void toku_serialize_ft_to (int fd, FT_HEADER h, BLOCK_TABLE blocktable, CACHEFILE cf); -void toku_serialize_ft_to_wbuf ( - struct wbuf *wbuf, - FT_HEADER h, - DISKOFF translation_location_on_disk, - DISKOFF translation_size_on_disk - ); -int toku_deserialize_ft_from (int fd, LSN max_acceptable_lsn, FT *ft); -void toku_serialize_descriptor_contents_to_fd(int fd, const DESCRIPTOR desc, DISKOFF offset); -void toku_serialize_descriptor_contents_to_wbuf(struct wbuf *wb, const DESCRIPTOR desc); -BASEMENTNODE toku_create_empty_bn(void); -BASEMENTNODE toku_create_empty_bn_no_buffer(void); // create a basement node with a null buffer. -NONLEAF_CHILDINFO toku_clone_nl(NONLEAF_CHILDINFO orig_childinfo); -BASEMENTNODE toku_clone_bn(BASEMENTNODE orig_bn); -NONLEAF_CHILDINFO toku_create_empty_nl(void); -// FIXME needs toku prefix -void destroy_basement_node (BASEMENTNODE bn); -// FIXME needs toku prefix -void destroy_nonleaf_childinfo (NONLEAF_CHILDINFO nl); -void toku_destroy_ftnode_internals(FTNODE node); -void toku_ftnode_free (FTNODE *node); -bool is_entire_node_in_memory(FTNODE node); -void toku_assert_entire_node_in_memory(FTNODE node); + // return: the childnum of the rightmost child that is required to be in memory + int rightmost_child_wanted(FTNODE node) const; -// append a child node to a parent node -void toku_ft_nonleaf_append_child(FTNODE node, FTNODE child, const DBT *pivotkey); + // needed for reading a node off disk + FT ft; -// append a message to a nonleaf node child buffer -void toku_ft_append_to_child_buffer(ft_compare_func compare_fun, DESCRIPTOR desc, FTNODE node, int childnum, enum ft_msg_type type, MSN msn, XIDS xids, bool is_fresh, const DBT *key, const DBT *val); + enum ftnode_fetch_type type; -STAT64INFO_S toku_get_and_clear_basement_stats(FTNODE leafnode); + // used in the case where type == ftnode_fetch_subset + // parameters needed to find out which child needs to be decompressed (so it can be read) + ft_search *search; + DBT range_lock_left_key, range_lock_right_key; + bool left_is_neg_infty, right_is_pos_infty; -//#define SLOW -#ifdef SLOW -#define VERIFY_NODE(t,n) (toku_verify_or_set_counts(n), toku_verify_estimates(t,n)) -#else -#define VERIFY_NODE(t,n) ((void)0) -#endif + // states if we should try to aggressively fetch basement nodes + // that are not specifically needed for current query, + // but may be needed for other cursor operations user is doing + // For example, if we have not disabled prefetching, + // and the user is doing a dictionary wide scan, then + // even though a query may only want one basement node, + // we fetch all basement nodes in a leaf node. + bool disable_prefetching; -void toku_ft_status_update_pivot_fetch_reason(struct ftnode_fetch_extra *bfe); -void toku_ft_status_update_flush_reason(FTNODE node, uint64_t uncompressed_bytes_flushed, uint64_t bytes_written, tokutime_t write_time, bool for_checkpoint); -void toku_ft_status_update_serialize_times(FTNODE node, tokutime_t serialize_time, tokutime_t compress_time); -void toku_ft_status_update_deserialize_times(FTNODE node, tokutime_t deserialize_time, tokutime_t decompress_time); + // this value will be set during the fetch_callback call by toku_ftnode_fetch_callback or toku_ftnode_pf_req_callback + // thi callbacks need to evaluate this anyway, so we cache it here so the search code does not reevaluate it + int child_to_read; + // when we read internal nodes, we want to read all the data off disk in one I/O + // then we'll treat it as normal and only decompress the needed partitions etc. + bool read_all_partitions; + + // Accounting: How many bytes were read, and how much time did we spend doing I/O? + uint64_t bytes_read; + tokutime_t io_time; + tokutime_t decompress_time; + tokutime_t deserialize_time; + +private: + void _create_internal(FT ft_); +}; + +// Only exported for tests. +// Cachetable callbacks for ftnodes. void toku_ftnode_clone_callback(void* value_data, void** cloned_value_data, long* clone_size, PAIR_ATTR* new_attr, bool for_checkpoint, void* write_extraargs); void toku_ftnode_checkpoint_complete_callback(void *value_data); -void toku_ftnode_flush_callback (CACHEFILE cachefile, int fd, BLOCKNUM nodename, void *ftnode_v, void** UU(disk_data), void *extraargs, PAIR_ATTR size, PAIR_ATTR* new_size, bool write_me, bool keep_me, bool for_checkpoint, bool is_clone); -int toku_ftnode_fetch_callback (CACHEFILE cachefile, PAIR p, int fd, BLOCKNUM nodename, uint32_t fullhash, void **ftnode_pv, void** UU(disk_data), PAIR_ATTR *sizep, int*dirty, void*extraargs); +void toku_ftnode_flush_callback (CACHEFILE cachefile, int fd, BLOCKNUM blocknum, void *ftnode_v, void** UU(disk_data), void *extraargs, PAIR_ATTR size, PAIR_ATTR* new_size, bool write_me, bool keep_me, bool for_checkpoint, bool is_clone); +int toku_ftnode_fetch_callback (CACHEFILE cachefile, PAIR p, int fd, BLOCKNUM blocknum, uint32_t fullhash, void **ftnode_pv, void** UU(disk_data), PAIR_ATTR *sizep, int*dirty, void*extraargs); void toku_ftnode_pe_est_callback(void* ftnode_pv, void* disk_data, long* bytes_freed_estimate, enum partial_eviction_cost *cost, void* write_extraargs); int toku_ftnode_pe_callback(void *ftnode_pv, PAIR_ATTR old_attr, void *extraargs, void (*finalize)(PAIR_ATTR new_attr, void *extra), void *finalize_extra); bool toku_ftnode_pf_req_callback(void* ftnode_pv, void* read_extraargs); int toku_ftnode_pf_callback(void* ftnode_pv, void* UU(disk_data), void* read_extraargs, int fd, PAIR_ATTR* sizep); int toku_ftnode_cleaner_callback( void *ftnode_pv, BLOCKNUM blocknum, uint32_t fullhash, void *extraargs); -void toku_evict_bn_from_memory(FTNODE node, int childnum, FT h); -BASEMENTNODE toku_detach_bn(FTNODE node, int childnum); - -// Given pinned node and pinned child, split child into two -// and update node with information about its new child. -void toku_ft_split_child( - FT h, - FTNODE node, - int childnum, - FTNODE child, - enum split_mode split_mode - ); -// Given pinned node, merge childnum with a neighbor and update node with -// information about the change -void toku_ft_merge_child( - FT ft, - FTNODE node, - int childnum - ); -static inline CACHETABLE_WRITE_CALLBACK get_write_callbacks_for_node(FT h) { - CACHETABLE_WRITE_CALLBACK wc; - wc.flush_callback = toku_ftnode_flush_callback; - wc.pe_est_callback = toku_ftnode_pe_est_callback; - wc.pe_callback = toku_ftnode_pe_callback; - wc.cleaner_callback = toku_ftnode_cleaner_callback; - wc.clone_callback = toku_ftnode_clone_callback; - wc.checkpoint_complete_callback = toku_ftnode_checkpoint_complete_callback; - wc.write_extraargs = h; - return wc; -} - -static const FTNODE null_ftnode=0; - -/* an ft cursor is represented as a kv pair in a tree */ -struct ft_cursor { - struct toku_list cursors_link; - FT_HANDLE ft_handle; - DBT key, val; // The key-value pair that the cursor currently points to - DBT range_lock_left_key, range_lock_right_key; - bool prefetching; - bool left_is_neg_infty, right_is_pos_infty; - bool is_snapshot_read; // true if query is read_committed, false otherwise - bool is_leaf_mode; - bool disable_prefetching; - bool is_temporary; - int out_of_range_error; - int direction; - TOKUTXN ttxn; - FT_CHECK_INTERRUPT_CALLBACK interrupt_cb; - void *interrupt_cb_extra; -}; -// -// Helper function to fill a ftnode_fetch_extra with data -// that will tell the fetch callback that the entire node is -// necessary. Used in cases where the entire node -// is required, such as for flushes. -// -static inline void fill_bfe_for_full_read(struct ftnode_fetch_extra *bfe, FT h) { - bfe->type = ftnode_fetch_all; - bfe->h = h; - bfe->search = NULL; - toku_init_dbt(&bfe->range_lock_left_key); - toku_init_dbt(&bfe->range_lock_right_key); - bfe->left_is_neg_infty = false; - bfe->right_is_pos_infty = false; - bfe->child_to_read = -1; - bfe->disable_prefetching = false; - bfe->read_all_partitions = false; - bfe->bytes_read = 0; - bfe->io_time = 0; - bfe->deserialize_time = 0; - bfe->decompress_time = 0; -} +CACHETABLE_WRITE_CALLBACK get_write_callbacks_for_node(FT ft); -// -// Helper function to fill a ftnode_fetch_extra with data -// that will tell the fetch callback that an explicit range of children is -// necessary. Used in cases where the portion of the node that is required -// is known in advance, e.g. for keysrange when the left and right key -// are in the same basement node. -// -static inline void fill_bfe_for_keymatch( - struct ftnode_fetch_extra *bfe, - FT h, - const DBT *left, - const DBT *right, - bool disable_prefetching, - bool read_all_partitions - ) -{ - paranoid_invariant(h->h->type == FT_CURRENT); - bfe->type = ftnode_fetch_keymatch; - bfe->h = h; - bfe->search = nullptr; - toku_init_dbt(&bfe->range_lock_left_key); - toku_init_dbt(&bfe->range_lock_right_key); - if (left) { - toku_copyref_dbt(&bfe->range_lock_left_key, *left); - } - - if (right) { - toku_copyref_dbt(&bfe->range_lock_right_key, *right); - } - bfe->left_is_neg_infty = left == nullptr; - bfe->right_is_pos_infty = right == nullptr; - bfe->child_to_read = -1; - bfe->disable_prefetching = disable_prefetching; - bfe->read_all_partitions = read_all_partitions; - bfe->bytes_read = 0; - bfe->io_time = 0; - bfe->deserialize_time = 0; - bfe->decompress_time = 0; -} +// This is only exported for tests. +// append a child node to a parent node +void toku_ft_nonleaf_append_child(FTNODE node, FTNODE child, const DBT *pivotkey); -// -// Helper function to fill a ftnode_fetch_extra with data -// that will tell the fetch callback that some subset of the node -// necessary. Used in cases where some of the node is required -// such as for a point query. -// -static inline void fill_bfe_for_subset_read( - struct ftnode_fetch_extra *bfe, - FT h, - ft_search_t* search, - const DBT *left, - const DBT *right, - bool left_is_neg_infty, - bool right_is_pos_infty, - bool disable_prefetching, - bool read_all_partitions - ) -{ - paranoid_invariant(h->h->type == FT_CURRENT); - bfe->type = ftnode_fetch_subset; - bfe->h = h; - bfe->search = search; - toku_init_dbt(&bfe->range_lock_left_key); - toku_init_dbt(&bfe->range_lock_right_key); - if (left) { - toku_copyref_dbt(&bfe->range_lock_left_key, *left); - } - if (right) { - toku_copyref_dbt(&bfe->range_lock_right_key, *right); - } - bfe->left_is_neg_infty = left_is_neg_infty; - bfe->right_is_pos_infty = right_is_pos_infty; - bfe->child_to_read = -1; - bfe->disable_prefetching = disable_prefetching; - bfe->read_all_partitions = read_all_partitions; - bfe->bytes_read = 0; - bfe->io_time = 0; - bfe->deserialize_time = 0; - bfe->decompress_time = 0; -} +// This is only exported for tests. +// append a message to a nonleaf node child buffer +void toku_ft_append_to_child_buffer(const toku::comparator &cmp, FTNODE node, int childnum, enum ft_msg_type type, MSN msn, XIDS xids, bool is_fresh, const DBT *key, const DBT *val); -// -// Helper function to fill a ftnode_fetch_extra with data -// that will tell the fetch callback that no partitions are -// necessary, only the pivots and/or subtree estimates. -// Currently used for stat64. -// -static inline void fill_bfe_for_min_read(struct ftnode_fetch_extra *bfe, FT h) { - paranoid_invariant(h->h->type == FT_CURRENT); - bfe->type = ftnode_fetch_none; - bfe->h = h; - bfe->search = NULL; - toku_init_dbt(&bfe->range_lock_left_key); - toku_init_dbt(&bfe->range_lock_right_key); - bfe->left_is_neg_infty = false; - bfe->right_is_pos_infty = false; - bfe->child_to_read = -1; - bfe->disable_prefetching = false; - bfe->read_all_partitions = false; - bfe->bytes_read = 0; - bfe->io_time = 0; - bfe->deserialize_time = 0; - bfe->decompress_time = 0; -} - -static inline void destroy_bfe_for_prefetch(struct ftnode_fetch_extra *bfe) { - paranoid_invariant(bfe->type == ftnode_fetch_prefetch); - toku_destroy_dbt(&bfe->range_lock_left_key); - toku_destroy_dbt(&bfe->range_lock_right_key); -} - -// this is in a strange place because it needs the cursor struct to be defined -static inline void fill_bfe_for_prefetch(struct ftnode_fetch_extra *bfe, - FT h, - FT_CURSOR c) { - paranoid_invariant(h->h->type == FT_CURRENT); - bfe->type = ftnode_fetch_prefetch; - bfe->h = h; - bfe->search = NULL; - toku_init_dbt(&bfe->range_lock_left_key); - toku_init_dbt(&bfe->range_lock_right_key); - const DBT *left = &c->range_lock_left_key; - if (left->data) { - toku_clone_dbt(&bfe->range_lock_left_key, *left); - } - const DBT *right = &c->range_lock_right_key; - if (right->data) { - toku_clone_dbt(&bfe->range_lock_right_key, *right); - } - bfe->left_is_neg_infty = c->left_is_neg_infty; - bfe->right_is_pos_infty = c->right_is_pos_infty; - bfe->child_to_read = -1; - bfe->disable_prefetching = c->disable_prefetching; - bfe->read_all_partitions = false; - bfe->bytes_read = 0; - bfe->io_time = 0; - bfe->deserialize_time = 0; - bfe->decompress_time = 0; -} - -struct ancestors { - FTNODE node; // This is the root node if next is NULL. - int childnum; // which buffer holds messages destined to the node whose ancestors this list represents. - ANCESTORS next; // Parent of this node (so next->node.(next->childnum) refers to this node). -}; -struct pivot_bounds { - const DBT * const lower_bound_exclusive; - const DBT * const upper_bound_inclusive; // NULL to indicate negative or positive infinity (which are in practice exclusive since there are now transfinite keys in messages). -}; +STAT64INFO_S toku_get_and_clear_basement_stats(FTNODE leafnode); -__attribute__((nonnull)) -void toku_move_ftnode_messages_to_stale(FT ft, FTNODE node); -void toku_apply_ancestors_messages_to_node (FT_HANDLE t, FTNODE node, ANCESTORS ancestors, struct pivot_bounds const * const bounds, bool* msgs_applied, int child_to_read); -__attribute__((nonnull)) -bool toku_ft_leaf_needs_ancestors_messages(FT ft, FTNODE node, ANCESTORS ancestors, struct pivot_bounds const * const bounds, MSN *const max_msn_in_path, int child_to_read); -__attribute__((nonnull)) -void toku_ft_bn_update_max_msn(FTNODE node, MSN max_msn_applied, int child_to_read); +//#define SLOW +#ifdef SLOW +#define VERIFY_NODE(t,n) (toku_verify_or_set_counts(n), toku_verify_estimates(t,n)) +#else +#define VERIFY_NODE(t,n) ((void)0) +#endif -__attribute__((const,nonnull)) -size_t toku_ft_msg_memsize_in_fifo(FT_MSG msg); +void toku_verify_or_set_counts(FTNODE); -int -toku_ft_search_which_child( - DESCRIPTOR desc, - ft_compare_func cmp, - FTNODE node, - ft_search_t *search - ); +// TODO: consider moving this to ft/pivotkeys.cc +class pivot_bounds { +public: + pivot_bounds(const DBT &lbe_dbt, const DBT &ubi_dbt); -bool -toku_bfe_wants_child_available (struct ftnode_fetch_extra* bfe, int childnum); + pivot_bounds next_bounds(FTNODE node, int childnum) const; -int -toku_bfe_leftmost_child_wanted(struct ftnode_fetch_extra *bfe, FTNODE node); -int -toku_bfe_rightmost_child_wanted(struct ftnode_fetch_extra *bfe, FTNODE node); + const DBT *lbe() const; + const DBT *ubi() const; + + static pivot_bounds infinite_bounds(); + +private: + DBT _prepivotkey(FTNODE node, int childnum, const DBT &lbe_dbt) const; + DBT _postpivotkey(FTNODE node, int childnum, const DBT &ubi_dbt) const; + + // if toku_dbt_is_empty() is true for either bound, then it represents + // negative or positive infinity (which are exclusive in practice) + const DBT _lower_bound_exclusive; + const DBT _upper_bound_inclusive; +}; // allocate a block number // allocate and initialize a ftnode // put the ftnode into the cache table -void toku_create_new_ftnode (FT_HANDLE t, FTNODE *result, int height, int n_children); - -// Effect: Fill in N as an empty ftnode. -void toku_initialize_empty_ftnode (FTNODE n, BLOCKNUM nodename, int height, int num_children, - int layout_version, unsigned int flags); - -int toku_ftnode_which_child(FTNODE node, const DBT *k, - DESCRIPTOR desc, ft_compare_func cmp) - __attribute__((__warn_unused_result__)); - -/** - * Finds the next child for HOT to flush to, given that everything up to - * and including k has been flattened. - * - * If k falls between pivots in node, then we return the childnum where k - * lies. - * - * If k is equal to some pivot, then we return the next (to the right) - * childnum. - */ -int toku_ftnode_hot_next_child(FTNODE node, - const DBT *k, - DESCRIPTOR desc, - ft_compare_func cmp); +void toku_create_new_ftnode(FT_HANDLE ft_handle, FTNODE *result, int height, int n_children); /* Stuff for testing */ // toku_testsetup_initialize() must be called before any other test_setup_xxx() functions are called. void toku_testsetup_initialize(void); int toku_testsetup_leaf(FT_HANDLE ft_h, BLOCKNUM *blocknum, int n_children, char **keys, int *keylens); -int toku_testsetup_nonleaf (FT_HANDLE ft_h, int height, BLOCKNUM *diskoff, int n_children, BLOCKNUM *children, char **keys, int *keylens); +int toku_testsetup_nonleaf (FT_HANDLE ft_h, int height, BLOCKNUM *blocknum, int n_children, BLOCKNUM *children, char **keys, int *keylens); int toku_testsetup_root(FT_HANDLE ft_h, BLOCKNUM); int toku_testsetup_get_sersize(FT_HANDLE ft_h, BLOCKNUM); // Return the size on disk. int toku_testsetup_insert_to_leaf (FT_HANDLE ft_h, BLOCKNUM, const char *key, int keylen, const char *val, int vallen); int toku_testsetup_insert_to_nonleaf (FT_HANDLE ft_h, BLOCKNUM, enum ft_msg_type, const char *key, int keylen, const char *val, int vallen); void toku_pin_node_with_min_bfe(FTNODE* node, BLOCKNUM b, FT_HANDLE t); -void toku_ft_root_put_msg(FT h, FT_MSG msg, txn_gc_info *gc_info); +void toku_ft_root_put_msg(FT ft, const ft_msg &msg, txn_gc_info *gc_info); -void -toku_get_node_for_verify( - BLOCKNUM blocknum, - FT_HANDLE ft_h, - FTNODE* nodep - ); +// TODO: Rename +void toku_get_node_for_verify(BLOCKNUM blocknum, FT_HANDLE ft_h, FTNODE* nodep); int toku_verify_ftnode (FT_HANDLE ft_h, - MSN rootmsn, MSN parentmsn, bool messages_exist_above, + MSN rootmsn, MSN parentmsn_with_messages, bool messages_exist_above, FTNODE node, int height, const DBT *lesser_pivot, // Everything in the subtree should be > lesser_pivot. (lesser_pivot==NULL if there is no lesser pivot.) const DBT *greatereq_pivot, // Everything in the subtree should be <= lesser_pivot. (lesser_pivot==NULL if there is no lesser pivot.) @@ -1186,6 +613,9 @@ typedef enum { FT_PRO_NUM_DIDNT_WANT_PROMOTE, FT_BASEMENT_DESERIALIZE_FIXED_KEYSIZE, // how many basement nodes were deserialized with a fixed keysize FT_BASEMENT_DESERIALIZE_VARIABLE_KEYSIZE, // how many basement nodes were deserialized with a variable keysize + FT_PRO_RIGHTMOST_LEAF_SHORTCUT_SUCCESS, + FT_PRO_RIGHTMOST_LEAF_SHORTCUT_FAIL_POS, + FT_PRO_RIGHTMOST_LEAF_SHORTCUT_FAIL_REACTIVE, FT_STATUS_NUM_ROWS } ft_status_entry; @@ -1194,61 +624,37 @@ typedef struct { TOKU_ENGINE_STATUS_ROW_S status[FT_STATUS_NUM_ROWS]; } FT_STATUS_S, *FT_STATUS; -void toku_ft_get_status(FT_STATUS); +void toku_ft_status_update_pivot_fetch_reason(ftnode_fetch_extra *bfe); +void toku_ft_status_update_flush_reason(FTNODE node, uint64_t uncompressed_bytes_flushed, uint64_t bytes_written, tokutime_t write_time, bool for_checkpoint); +void toku_ft_status_update_serialize_times(FTNODE node, tokutime_t serialize_time, tokutime_t compress_time); +void toku_ft_status_update_deserialize_times(FTNODE node, tokutime_t deserialize_time, tokutime_t decompress_time); +void toku_ft_status_note_msn_discard(void); +void toku_ft_status_note_update(bool broadcast); +void toku_ft_status_note_msg_bytes_out(size_t buffsize); +void toku_ft_status_note_ftnode(int height, bool created); // created = false means destroyed -void -toku_ft_bn_apply_msg_once( - BASEMENTNODE bn, - const FT_MSG msg, - uint32_t idx, - LEAFENTRY le, - txn_gc_info *gc_info, - uint64_t *workdonep, - STAT64INFO stats_to_update - ); - -void -toku_ft_bn_apply_msg( - ft_compare_func compare_fun, - ft_update_func update_fun, - DESCRIPTOR desc, - BASEMENTNODE bn, - FT_MSG msg, - txn_gc_info *gc_info, - uint64_t *workdone, - STAT64INFO stats_to_update - ); - -void -toku_ft_leaf_apply_msg( - ft_compare_func compare_fun, - ft_update_func update_fun, - DESCRIPTOR desc, - FTNODE node, - int target_childnum, - FT_MSG msg, - txn_gc_info *gc_info, - uint64_t *workdone, - STAT64INFO stats_to_update - ); - -void -toku_ft_node_put_msg( - ft_compare_func compare_fun, - ft_update_func update_fun, - DESCRIPTOR desc, - FTNODE node, - int target_childnum, - FT_MSG msg, - bool is_fresh, - txn_gc_info *gc_info, - size_t flow_deltas[], - STAT64INFO stats_to_update - ); +void toku_ft_get_status(FT_STATUS); void toku_flusher_thread_set_callback(void (*callback_f)(int, void*), void* extra); -int toku_upgrade_subtree_estimates_to_stat64info(int fd, FT h) __attribute__((nonnull)); -int toku_upgrade_msn_from_root_to_header(int fd, FT h) __attribute__((nonnull)); - -#endif +// For upgrade +int toku_upgrade_subtree_estimates_to_stat64info(int fd, FT ft) __attribute__((nonnull)); +int toku_upgrade_msn_from_root_to_header(int fd, FT ft) __attribute__((nonnull)); + +// A callback function is invoked with the key, and the data. +// The pointers (to the bytevecs) must not be modified. The data must be copied out before the callback function returns. +// Note: In the thread-safe version, the ftnode remains locked while the callback function runs. So return soon, and don't call the ft code from the callback function. +// If the callback function returns a nonzero value (an error code), then that error code is returned from the get function itself. +// The cursor object will have been updated (so that if result==0 the current value is the value being passed) +// (If r!=0 then the cursor won't have been updated.) +// If r!=0, it's up to the callback function to return that value of r. +// A 'key' pointer of NULL means that element is not found (effectively infinity or +// -infinity depending on direction) +// When lock_only is false, the callback does optional lock tree locking and then processes the key and val. +// When lock_only is true, the callback only does optional lock tree locking. +typedef int (*FT_GET_CALLBACK_FUNCTION)(uint32_t keylen, const void *key, uint32_t vallen, const void *val, void *extra, bool lock_only); + +typedef bool (*FT_CHECK_INTERRUPT_CALLBACK)(void *extra); + +struct ft_cursor; +int toku_ft_search(FT_HANDLE ft_handle, ft_search *search, FT_GET_CALLBACK_FUNCTION getf, void *getf_v, struct ft_cursor *ftcursor, bool can_bulk_fetch); diff --git a/storage/tokudb/ft-index/ft/ft-ops.cc b/storage/tokudb/ft-index/ft/ft-ops.cc index 64b6b498c9a..bf845d2c38d 100644 --- a/storage/tokudb/ft-index/ft/ft-ops.cc +++ b/storage/tokudb/ft-index/ft/ft-ops.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -168,7 +168,7 @@ Split_or_merge (node, childnum) { return; If the child needs to be merged (it's a leaf with too little stuff (less than 1/4 full) or a nonleaf with too little fanout (less than 1/4) fetch node, the child and a sibling of the child into main memory. - move all messages from the node to the two children (so that the FIFOs are empty) + move all messages from the node to the two children (so that the message buffers are empty) If the two siblings together fit into one node then merge the two siblings. fixup the node to point at one child @@ -200,20 +200,24 @@ basement nodes, bulk fetch, and partial fetch: */ -#include "checkpoint.h" -#include "ft.h" -#include "ft-cachetable-wrappers.h" -#include "ft-flusher.h" -#include "ft-internal.h" -#include "ft_layout_version.h" -#include "key.h" -#include "log-internal.h" -#include "sub_block.h" -#include "txn_manager.h" -#include "leafentry.h" -#include "xids.h" -#include "ft_msg.h" -#include "ule.h" +#include "ft/cachetable/checkpoint.h" +#include "ft/cursor.h" +#include "ft/ft.h" +#include "ft/ft-cachetable-wrappers.h" +#include "ft/ft-flusher.h" +#include "ft/ft-internal.h" +#include "ft/msg.h" +#include "ft/leafentry.h" +#include "ft/logger/log-internal.h" +#include "ft/node.h" +#include "ft/serialize/block_table.h" +#include "ft/serialize/sub_block.h" +#include "ft/serialize/ft-serialize.h" +#include "ft/serialize/ft_layout_version.h" +#include "ft/serialize/ft_node-serialize.h" +#include "ft/txn/txn_manager.h" +#include "ft/ule.h" +#include "ft/txn/xids.h" #include <toku_race_tools.h> @@ -233,7 +237,7 @@ basement nodes, bulk fetch, and partial fetch: */ static FT_STATUS_S ft_status; -#define STATUS_INIT(k,c,t,l,inc) TOKUDB_STATUS_INIT(ft_status, k, c, t, "ft: " l, inc) +#define STATUS_INIT(k,c,t,l,inc) TOKUFT_STATUS_INIT(ft_status, k, c, t, "ft: " l, inc) static toku_mutex_t ft_open_close_lock; @@ -367,6 +371,9 @@ status_init(void) STATUS_INIT(FT_PRO_NUM_DIDNT_WANT_PROMOTE, PROMOTION_STOPPED_AFTER_LOCKING_CHILD, PARCOUNT, "promotion: stopped anyway, after locking the child", TOKU_ENGINE_STATUS|TOKU_GLOBAL_STATUS); STATUS_INIT(FT_BASEMENT_DESERIALIZE_FIXED_KEYSIZE, BASEMENT_DESERIALIZATION_FIXED_KEY, PARCOUNT, "basement nodes deserialized with fixed-keysize", TOKU_ENGINE_STATUS|TOKU_GLOBAL_STATUS); STATUS_INIT(FT_BASEMENT_DESERIALIZE_VARIABLE_KEYSIZE, BASEMENT_DESERIALIZATION_VARIABLE_KEY, PARCOUNT, "basement nodes deserialized with variable-keysize", TOKU_ENGINE_STATUS|TOKU_GLOBAL_STATUS); + STATUS_INIT(FT_PRO_RIGHTMOST_LEAF_SHORTCUT_SUCCESS, nullptr, PARCOUNT, "promotion: succeeded in using the rightmost leaf shortcut", TOKU_ENGINE_STATUS); + STATUS_INIT(FT_PRO_RIGHTMOST_LEAF_SHORTCUT_FAIL_POS, nullptr, PARCOUNT, "promotion: tried the rightmost leaf shorcut but failed (out-of-bounds)", TOKU_ENGINE_STATUS); + STATUS_INIT(FT_PRO_RIGHTMOST_LEAF_SHORTCUT_FAIL_REACTIVE,nullptr, PARCOUNT, "promotion: tried the rightmost leaf shorcut but failed (child reactive)", TOKU_ENGINE_STATUS); ft_status.initialized = true; } @@ -419,6 +426,7 @@ toku_ft_get_status(FT_STATUS s) { } \ } while (0) + void toku_note_deserialized_basement_node(bool fixed_key_size) { if (fixed_key_size) { STATUS_INC(FT_BASEMENT_DESERIALIZE_FIXED_KEYSIZE, 1); @@ -427,172 +435,73 @@ void toku_note_deserialized_basement_node(bool fixed_key_size) { } } -bool is_entire_node_in_memory(FTNODE node) { - for (int i = 0; i < node->n_children; i++) { - if(BP_STATE(node,i) != PT_AVAIL) { - return false; - } - } - return true; -} - -void -toku_assert_entire_node_in_memory(FTNODE UU() node) { - paranoid_invariant(is_entire_node_in_memory(node)); -} - -uint32_t -get_leaf_num_entries(FTNODE node) { - uint32_t result = 0; - int i; - toku_assert_entire_node_in_memory(node); - for ( i = 0; i < node->n_children; i++) { - result += BLB_DATA(node, i)->num_klpairs(); - } - return result; +static void ft_verify_flags(FT UU(ft), FTNODE UU(node)) { + paranoid_invariant(ft->h->flags == node->flags); } -static enum reactivity -get_leaf_reactivity (FTNODE node, uint32_t nodesize) { - enum reactivity re = RE_STABLE; - toku_assert_entire_node_in_memory(node); - paranoid_invariant(node->height==0); - unsigned int size = toku_serialize_ftnode_size(node); - if (size > nodesize && get_leaf_num_entries(node) > 1) { - re = RE_FISSIBLE; - } - else if ((size*4) < nodesize && !BLB_SEQINSERT(node, node->n_children-1)) { - re = RE_FUSIBLE; - } - return re; -} +int toku_ft_debug_mode = 0; -enum reactivity -get_nonleaf_reactivity(FTNODE node, unsigned int fanout) { +uint32_t compute_child_fullhash (CACHEFILE cf, FTNODE node, int childnum) { paranoid_invariant(node->height>0); - int n_children = node->n_children; - if (n_children > (int) fanout) return RE_FISSIBLE; - if (n_children*4 < (int) fanout) return RE_FUSIBLE; - return RE_STABLE; + paranoid_invariant(childnum<node->n_children); + return toku_cachetable_hash(cf, BP_BLOCKNUM(node, childnum)); } -enum reactivity -get_node_reactivity(FT ft, FTNODE node) { - toku_assert_entire_node_in_memory(node); - if (node->height==0) - return get_leaf_reactivity(node, ft->h->nodesize); - else - return get_nonleaf_reactivity(node, ft->h->fanout); -} +// +// pivot bounds +// TODO: move me to ft/node.cc? +// -unsigned int -toku_bnc_nbytesinbuf(NONLEAF_CHILDINFO bnc) -{ - return toku_fifo_buffer_size_in_use(bnc->buffer); +pivot_bounds::pivot_bounds(const DBT &lbe_dbt, const DBT &ubi_dbt) : + _lower_bound_exclusive(lbe_dbt), _upper_bound_inclusive(ubi_dbt) { } -// return true if the size of the buffers plus the amount of work done is large enough. (But return false if there is nothing to be flushed (the buffers empty)). -bool -toku_ft_nonleaf_is_gorged (FTNODE node, uint32_t nodesize) { - uint64_t size = toku_serialize_ftnode_size(node); - - bool buffers_are_empty = true; - toku_assert_entire_node_in_memory(node); - // - // the nonleaf node is gorged if the following holds true: - // - the buffers are non-empty - // - the total workdone by the buffers PLUS the size of the buffers - // is greater than nodesize (which as of Maxwell should be - // 4MB) - // - paranoid_invariant(node->height > 0); - for (int child = 0; child < node->n_children; ++child) { - size += BP_WORKDONE(node, child); - } - for (int child = 0; child < node->n_children; ++child) { - if (toku_bnc_nbytesinbuf(BNC(node, child)) > 0) { - buffers_are_empty = false; - break; - } - } - return ((size > nodesize) - && - (!buffers_are_empty)); -} +pivot_bounds pivot_bounds::infinite_bounds() { + DBT dbt; + toku_init_dbt(&dbt); -static void ft_verify_flags(FT UU(ft), FTNODE UU(node)) { - paranoid_invariant(ft->h->flags == node->flags); + // infinity is represented by an empty dbt + invariant(toku_dbt_is_empty(&dbt)); + return pivot_bounds(dbt, dbt); } -int toku_ft_debug_mode = 0; - -uint32_t compute_child_fullhash (CACHEFILE cf, FTNODE node, int childnum) { - paranoid_invariant(node->height>0); - paranoid_invariant(childnum<node->n_children); - return toku_cachetable_hash(cf, BP_BLOCKNUM(node, childnum)); +const DBT *pivot_bounds::lbe() const { + return &_lower_bound_exclusive; } -int -toku_bnc_n_entries(NONLEAF_CHILDINFO bnc) -{ - return toku_fifo_n_entries(bnc->buffer); +const DBT *pivot_bounds::ubi() const { + return &_upper_bound_inclusive; } -static const DBT *prepivotkey (FTNODE node, int childnum, const DBT * const lower_bound_exclusive) { - if (childnum==0) - return lower_bound_exclusive; - else { - return &node->childkeys[childnum-1]; +DBT pivot_bounds::_prepivotkey(FTNODE node, int childnum, const DBT &lbe_dbt) const { + if (childnum == 0) { + return lbe_dbt; + } else { + return node->pivotkeys.get_pivot(childnum - 1); } } -static const DBT *postpivotkey (FTNODE node, int childnum, const DBT * const upper_bound_inclusive) { - if (childnum+1 == node->n_children) - return upper_bound_inclusive; - else { - return &node->childkeys[childnum]; +DBT pivot_bounds::_postpivotkey(FTNODE node, int childnum, const DBT &ubi_dbt) const { + if (childnum + 1 == node->n_children) { + return ubi_dbt; + } else { + return node->pivotkeys.get_pivot(childnum); } } -static struct pivot_bounds next_pivot_keys (FTNODE node, int childnum, struct pivot_bounds const * const old_pb) { - struct pivot_bounds pb = {.lower_bound_exclusive = prepivotkey(node, childnum, old_pb->lower_bound_exclusive), - .upper_bound_inclusive = postpivotkey(node, childnum, old_pb->upper_bound_inclusive)}; - return pb; -} -// how much memory does this child buffer consume? -long -toku_bnc_memory_size(NONLEAF_CHILDINFO bnc) -{ - return (sizeof(*bnc) + - toku_fifo_memory_footprint(bnc->buffer) + - bnc->fresh_message_tree.memory_size() + - bnc->stale_message_tree.memory_size() + - bnc->broadcast_list.memory_size()); +pivot_bounds pivot_bounds::next_bounds(FTNODE node, int childnum) const { + return pivot_bounds(_prepivotkey(node, childnum, _lower_bound_exclusive), + _postpivotkey(node, childnum, _upper_bound_inclusive)); } -// how much memory in this child buffer holds useful data? -// originally created solely for use by test program(s). -long -toku_bnc_memory_used(NONLEAF_CHILDINFO bnc) -{ - return (sizeof(*bnc) + - toku_fifo_memory_size_in_use(bnc->buffer) + - bnc->fresh_message_tree.memory_size() + - bnc->stale_message_tree.memory_size() + - bnc->broadcast_list.memory_size()); -} +//////////////////////////////////////////////////////////////////////////////// -static long -get_avail_internal_node_partition_size(FTNODE node, int i) -{ +static long get_avail_internal_node_partition_size(FTNODE node, int i) { paranoid_invariant(node->height > 0); return toku_bnc_memory_size(BNC(node, i)); } - -static long -ftnode_cachepressure_size(FTNODE node) -{ +static long ftnode_cachepressure_size(FTNODE node) { long retval = 0; bool totally_empty = true; if (node->height == 0) { @@ -633,8 +542,7 @@ ftnode_memory_size (FTNODE node) int n_children = node->n_children; retval += sizeof(*node); retval += (n_children)*(sizeof(node->bp[0])); - retval += (n_children > 0 ? n_children-1 : 0)*(sizeof(node->childkeys[0])); - retval += node->totalchildkeylens; + retval += node->pivotkeys.total_size(); // now calculate the sizes of the partitions for (int i = 0; i < n_children; i++) { @@ -700,42 +608,133 @@ next_dict_id(void) { return d; } -// -// Given a bfe and a childnum, returns whether the query that constructed the bfe -// wants the child available. -// Requires: bfe->child_to_read to have been set -// -bool -toku_bfe_wants_child_available (struct ftnode_fetch_extra* bfe, int childnum) -{ - return bfe->type == ftnode_fetch_all || - (bfe->child_to_read == childnum && - (bfe->type == ftnode_fetch_subset || bfe->type == ftnode_fetch_keymatch)); +// TODO: This isn't so pretty +void ftnode_fetch_extra::_create_internal(FT ft_) { + ft = ft_; + type = ftnode_fetch_none; + search = nullptr; + + toku_init_dbt(&range_lock_left_key); + toku_init_dbt(&range_lock_right_key); + left_is_neg_infty = false; + right_is_pos_infty = false; + + // -1 means 'unknown', which is the correct default state + child_to_read = -1; + disable_prefetching = false; + read_all_partitions = false; + + bytes_read = 0; + io_time = 0; + deserialize_time = 0; + decompress_time = 0; } -int -toku_bfe_leftmost_child_wanted(struct ftnode_fetch_extra *bfe, FTNODE node) -{ - paranoid_invariant(bfe->type == ftnode_fetch_subset || bfe->type == ftnode_fetch_prefetch || bfe->type == ftnode_fetch_keymatch); - if (bfe->left_is_neg_infty) { +void ftnode_fetch_extra::create_for_full_read(FT ft_) { + _create_internal(ft_); + + type = ftnode_fetch_all; +} + +void ftnode_fetch_extra::create_for_keymatch(FT ft_, const DBT *left, const DBT *right, + bool disable_prefetching_, bool read_all_partitions_) { + _create_internal(ft_); + invariant(ft->h->type == FT_CURRENT); + + type = ftnode_fetch_keymatch; + if (left != nullptr) { + toku_copyref_dbt(&range_lock_left_key, *left); + } + if (right != nullptr) { + toku_copyref_dbt(&range_lock_right_key, *right); + } + left_is_neg_infty = left == nullptr; + right_is_pos_infty = right == nullptr; + disable_prefetching = disable_prefetching_; + read_all_partitions = read_all_partitions_; +} + +void ftnode_fetch_extra::create_for_subset_read(FT ft_, ft_search *search_, + const DBT *left, const DBT *right, + bool left_is_neg_infty_, bool right_is_pos_infty_, + bool disable_prefetching_, bool read_all_partitions_) { + _create_internal(ft_); + invariant(ft->h->type == FT_CURRENT); + + type = ftnode_fetch_subset; + search = search_; + if (left != nullptr) { + toku_copyref_dbt(&range_lock_left_key, *left); + } + if (right != nullptr) { + toku_copyref_dbt(&range_lock_right_key, *right); + } + left_is_neg_infty = left_is_neg_infty_; + right_is_pos_infty = right_is_pos_infty_; + disable_prefetching = disable_prefetching_; + read_all_partitions = read_all_partitions_; +} + +void ftnode_fetch_extra::create_for_min_read(FT ft_) { + _create_internal(ft_); + invariant(ft->h->type == FT_CURRENT); + + type = ftnode_fetch_none; +} + +void ftnode_fetch_extra::create_for_prefetch(FT ft_, struct ft_cursor *cursor) { + _create_internal(ft_); + invariant(ft->h->type == FT_CURRENT); + + type = ftnode_fetch_prefetch; + const DBT *left = &cursor->range_lock_left_key; + if (left->data) { + toku_clone_dbt(&range_lock_left_key, *left); + } + const DBT *right = &cursor->range_lock_right_key; + if (right->data) { + toku_clone_dbt(&range_lock_right_key, *right); + } + left_is_neg_infty = cursor->left_is_neg_infty; + right_is_pos_infty = cursor->right_is_pos_infty; + disable_prefetching = cursor->disable_prefetching; +} + +void ftnode_fetch_extra::destroy(void) { + toku_destroy_dbt(&range_lock_left_key); + toku_destroy_dbt(&range_lock_right_key); +} + +// Requires: child_to_read to have been set +bool ftnode_fetch_extra::wants_child_available(int childnum) const { + return type == ftnode_fetch_all || + (child_to_read == childnum && + (type == ftnode_fetch_subset || type == ftnode_fetch_keymatch)); +} + +int ftnode_fetch_extra::leftmost_child_wanted(FTNODE node) const { + paranoid_invariant(type == ftnode_fetch_subset || + type == ftnode_fetch_prefetch || + type == ftnode_fetch_keymatch); + if (left_is_neg_infty) { return 0; - } else if (bfe->range_lock_left_key.data == nullptr) { + } else if (range_lock_left_key.data == nullptr) { return -1; } else { - return toku_ftnode_which_child(node, &bfe->range_lock_left_key, &bfe->h->cmp_descriptor, bfe->h->compare_fun); + return toku_ftnode_which_child(node, &range_lock_left_key, ft->cmp); } } -int -toku_bfe_rightmost_child_wanted(struct ftnode_fetch_extra *bfe, FTNODE node) -{ - paranoid_invariant(bfe->type == ftnode_fetch_subset || bfe->type == ftnode_fetch_prefetch || bfe->type == ftnode_fetch_keymatch); - if (bfe->right_is_pos_infty) { +int ftnode_fetch_extra::rightmost_child_wanted(FTNODE node) const { + paranoid_invariant(type == ftnode_fetch_subset || + type == ftnode_fetch_prefetch || + type == ftnode_fetch_keymatch); + if (right_is_pos_infty) { return node->n_children - 1; - } else if (bfe->range_lock_right_key.data == nullptr) { + } else if (range_lock_right_key.data == nullptr) { return -1; } else { - return toku_ftnode_which_child(node, &bfe->range_lock_right_key, &bfe->h->cmp_descriptor, bfe->h->compare_fun); + return toku_ftnode_which_child(node, &range_lock_right_key, ft->cmp); } } @@ -747,7 +746,7 @@ ft_cursor_rightmost_child_wanted(FT_CURSOR cursor, FT_HANDLE ft_handle, FTNODE n } else if (cursor->range_lock_right_key.data == nullptr) { return -1; } else { - return toku_ftnode_which_child(node, &cursor->range_lock_right_key, &ft_handle->ft->cmp_descriptor, ft_handle->ft->compare_fun); + return toku_ftnode_which_child(node, &cursor->range_lock_right_key, ft_handle->ft->cmp); } } @@ -798,45 +797,6 @@ void toku_ft_status_update_flush_reason(FTNODE node, } } -static void ftnode_update_disk_stats( - FTNODE ftnode, - FT ft, - bool for_checkpoint - ) -{ - STAT64INFO_S deltas = ZEROSTATS; - // capture deltas before rebalancing basements for serialization - deltas = toku_get_and_clear_basement_stats(ftnode); - // locking not necessary here with respect to checkpointing - // in Clayface (because of the pending lock and cachetable lock - // in toku_cachetable_begin_checkpoint) - // essentially, if we are dealing with a for_checkpoint - // parameter in a function that is called by the flush_callback, - // then the cachetable needs to ensure that this is called in a safe - // manner that does not interfere with the beginning - // of a checkpoint, which it does with the cachetable lock - // and pending lock - toku_ft_update_stats(&ft->h->on_disk_stats, deltas); - if (for_checkpoint) { - toku_ft_update_stats(&ft->checkpoint_header->on_disk_stats, deltas); - } -} - -static void ftnode_clone_partitions(FTNODE node, FTNODE cloned_node) { - for (int i = 0; i < node->n_children; i++) { - BP_BLOCKNUM(cloned_node,i) = BP_BLOCKNUM(node,i); - paranoid_invariant(BP_STATE(node,i) == PT_AVAIL); - BP_STATE(cloned_node,i) = PT_AVAIL; - BP_WORKDONE(cloned_node, i) = BP_WORKDONE(node, i); - if (node->height == 0) { - set_BLB(cloned_node, i, toku_clone_bn(BLB(node,i))); - } - else { - set_BNC(cloned_node, i, toku_clone_nl(BNC(node,i))); - } - } -} - void toku_ftnode_checkpoint_complete_callback(void *value_data) { FTNODE node = static_cast<FTNODE>(value_data); if (node->height > 0) { @@ -860,20 +820,20 @@ void toku_ftnode_clone_callback( ) { FTNODE node = static_cast<FTNODE>(value_data); - toku_assert_entire_node_in_memory(node); + toku_ftnode_assert_fully_in_memory(node); FT ft = static_cast<FT>(write_extraargs); FTNODE XCALLOC(cloned_node); if (node->height == 0) { // set header stats, must be done before rebalancing - ftnode_update_disk_stats(node, ft, for_checkpoint); + toku_ftnode_update_disk_stats(node, ft, for_checkpoint); // rebalance the leaf node - rebalance_ftnode_leaf(node, ft->h->basementnodesize); + toku_ftnode_leaf_rebalance(node, ft->h->basementnodesize); } cloned_node->oldest_referenced_xid_known = node->oldest_referenced_xid_known; cloned_node->max_msn_applied_to_node_on_disk = node->max_msn_applied_to_node_on_disk; cloned_node->flags = node->flags; - cloned_node->thisnodename = node->thisnodename; + cloned_node->blocknum = node->blocknum; cloned_node->layout_version = node->layout_version; cloned_node->layout_version_original = node->layout_version_original; cloned_node->layout_version_read_from_disk = node->layout_version_read_from_disk; @@ -882,16 +842,17 @@ void toku_ftnode_clone_callback( cloned_node->dirty = node->dirty; cloned_node->fullhash = node->fullhash; cloned_node->n_children = node->n_children; - cloned_node->totalchildkeylens = node->totalchildkeylens; - XMALLOC_N(node->n_children-1, cloned_node->childkeys); XMALLOC_N(node->n_children, cloned_node->bp); // clone pivots - for (int i = 0; i < node->n_children-1; i++) { - toku_clone_dbt(&cloned_node->childkeys[i], node->childkeys[i]); + cloned_node->pivotkeys.create_from_pivot_keys(node->pivotkeys); + if (node->height > 0) { + // need to move messages here so that we don't serialize stale + // messages to the fresh tree - ft verify code complains otherwise. + toku_move_ftnode_messages_to_stale(ft, node); } // clone partition - ftnode_clone_partitions(node, cloned_node); + toku_ftnode_clone_partitions(node, cloned_node); // clear dirty bit node->dirty = 0; @@ -908,12 +869,10 @@ void toku_ftnode_clone_callback( *cloned_value_data = cloned_node; } -static void ft_leaf_run_gc(FT ft, FTNODE node); - void toku_ftnode_flush_callback( CACHEFILE UU(cachefile), int fd, - BLOCKNUM nodename, + BLOCKNUM blocknum, void *ftnode_v, void** disk_data, void *extraargs, @@ -925,20 +884,23 @@ void toku_ftnode_flush_callback( bool is_clone ) { - FT h = (FT) extraargs; + FT ft = (FT) extraargs; FTNODE ftnode = (FTNODE) ftnode_v; FTNODE_DISK_DATA* ndd = (FTNODE_DISK_DATA*)disk_data; - assert(ftnode->thisnodename.b==nodename.b); + assert(ftnode->blocknum.b == blocknum.b); int height = ftnode->height; if (write_me) { - toku_assert_entire_node_in_memory(ftnode); - if (height == 0) { - ft_leaf_run_gc(h, ftnode); - } - if (height == 0 && !is_clone) { - ftnode_update_disk_stats(ftnode, h, for_checkpoint); + toku_ftnode_assert_fully_in_memory(ftnode); + if (height > 0 && !is_clone) { + // cloned nodes already had their stale messages moved, see toku_ftnode_clone_callback() + toku_move_ftnode_messages_to_stale(ft, ftnode); + } else if (height == 0) { + toku_ftnode_leaf_run_gc(ft, ftnode); + if (!is_clone) { + toku_ftnode_update_disk_stats(ftnode, ft, for_checkpoint); + } } - int r = toku_serialize_ftnode_to(fd, ftnode->thisnodename, ftnode, ndd, !is_clone, h, for_checkpoint); + int r = toku_serialize_ftnode_to(fd, ftnode->blocknum, ftnode, ndd, !is_clone, ft, for_checkpoint); assert_zero(r); ftnode->layout_version_read_from_disk = FT_LAYOUT_VERSION; } @@ -959,7 +921,7 @@ void toku_ftnode_flush_callback( for (int i = 0; i < ftnode->n_children; i++) { if (BP_STATE(ftnode,i) == PT_AVAIL) { BASEMENTNODE bn = BLB(ftnode, i); - toku_ft_decrease_stats(&h->in_memory_stats, bn->stat64_delta); + toku_ft_decrease_stats(&ft->in_memory_stats, bn->stat64_delta); } } } @@ -972,7 +934,7 @@ void toku_ftnode_flush_callback( } void -toku_ft_status_update_pivot_fetch_reason(struct ftnode_fetch_extra *bfe) +toku_ft_status_update_pivot_fetch_reason(ftnode_fetch_extra *bfe) { if (bfe->type == ftnode_fetch_prefetch) { STATUS_INC(FT_NUM_PIVOTS_FETCHED_PREFETCH, 1); @@ -989,17 +951,17 @@ toku_ft_status_update_pivot_fetch_reason(struct ftnode_fetch_extra *bfe) } } -int toku_ftnode_fetch_callback (CACHEFILE UU(cachefile), PAIR p, int fd, BLOCKNUM nodename, uint32_t fullhash, +int toku_ftnode_fetch_callback (CACHEFILE UU(cachefile), PAIR p, int fd, BLOCKNUM blocknum, uint32_t fullhash, void **ftnode_pv, void** disk_data, PAIR_ATTR *sizep, int *dirtyp, void *extraargs) { assert(extraargs); assert(*ftnode_pv == NULL); FTNODE_DISK_DATA* ndd = (FTNODE_DISK_DATA*)disk_data; - struct ftnode_fetch_extra *bfe = (struct ftnode_fetch_extra *)extraargs; + ftnode_fetch_extra *bfe = (ftnode_fetch_extra *)extraargs; FTNODE *node=(FTNODE*)ftnode_pv; // deserialize the node, must pass the bfe in because we cannot // evaluate what piece of the the node is necessary until we get it at // least partially into memory - int r = toku_deserialize_ftnode_from(fd, nodename, fullhash, node, ndd, bfe); + int r = toku_deserialize_ftnode_from(fd, blocknum, fullhash, node, ndd, bfe); if (r != 0) { if (r == TOKUDB_BAD_CHECKSUM) { fprintf(stderr, @@ -1080,10 +1042,7 @@ exit: } // replace the child buffer with a compressed version of itself. -// @return the old child buffer -static NONLEAF_CHILDINFO -compress_internal_node_partition(FTNODE node, int i, enum toku_compression_method compression_method) -{ +static void compress_internal_node_partition(FTNODE node, int i, enum toku_compression_method compression_method) { // if we should evict, compress the // message buffer into a sub_block assert(BP_STATE(node, i) == PT_AVAIL); @@ -1092,29 +1051,9 @@ compress_internal_node_partition(FTNODE node, int i, enum toku_compression_metho sub_block_init(sb); toku_create_compressed_partition_from_available(node, i, compression_method, sb); - // now set the state to compressed and return the old, available partition - NONLEAF_CHILDINFO bnc = BNC(node, i); + // now set the state to compressed set_BSB(node, i, sb); BP_STATE(node,i) = PT_COMPRESSED; - return bnc; -} - -void toku_evict_bn_from_memory(FTNODE node, int childnum, FT h) { - // free the basement node - assert(!node->dirty); - BASEMENTNODE bn = BLB(node, childnum); - toku_ft_decrease_stats(&h->in_memory_stats, bn->stat64_delta); - destroy_basement_node(bn); - set_BNULL(node, childnum); - BP_STATE(node, childnum) = PT_ON_DISK; -} - -BASEMENTNODE toku_detach_bn(FTNODE node, int childnum) { - assert(BP_STATE(node, childnum) == PT_AVAIL); - BASEMENTNODE bn = BLB(node, childnum); - set_BNULL(node, childnum); - BP_STATE(node, childnum) = PT_ON_DISK; - return bn; } // callback for partially evicting a node @@ -1149,18 +1088,27 @@ int toku_ftnode_pe_callback(void *ftnode_pv, PAIR_ATTR old_attr, void *write_ext for (int i = 0; i < node->n_children; i++) { if (BP_STATE(node,i) == PT_AVAIL) { if (BP_SHOULD_EVICT(node,i)) { - NONLEAF_CHILDINFO bnc; - if (ft_compress_buffers_before_eviction) { - // When partially evicting, always compress with quicklz - bnc = compress_internal_node_partition( + NONLEAF_CHILDINFO bnc = BNC(node, i); + if (ft_compress_buffers_before_eviction && + // We may not serialize and compress a partition in memory if its + // in memory layout version is different than what's on disk (and + // therefore requires upgrade). + // + // Auto-upgrade code assumes that if a node's layout version read + // from disk is not current, it MUST require upgrade. Breaking + // this rule would cause upgrade code to upgrade this partition + // again after we serialize it as the current version, which is bad. + node->layout_version == node->layout_version_read_from_disk) { + toku_ft_bnc_move_messages_to_stale(ft, bnc); + compress_internal_node_partition( node, i, + // Always compress with quicklz TOKU_QUICKLZ_METHOD ); } else { // We're not compressing buffers before eviction. Simply // detach the buffer and set the child's state to on-disk. - bnc = BNC(node, i); set_BNULL(node, i); BP_STATE(node, i) = PT_ON_DISK; } @@ -1268,7 +1216,7 @@ bool toku_ftnode_pf_req_callback(void* ftnode_pv, void* read_extraargs) { // placeholder for now bool retval = false; FTNODE node = (FTNODE) ftnode_pv; - struct ftnode_fetch_extra *bfe = (struct ftnode_fetch_extra *) read_extraargs; + ftnode_fetch_extra *bfe = (ftnode_fetch_extra *) read_extraargs; // // The three types of fetches that the ft layer may request are: // - ftnode_fetch_none: no partitions are necessary (example use: stat64) @@ -1298,11 +1246,9 @@ bool toku_ftnode_pf_req_callback(void* ftnode_pv, void* read_extraargs) { // we can possibly require is a single basement node // we find out what basement node the query cares about // and check if it is available - paranoid_invariant(bfe->h->compare_fun); paranoid_invariant(bfe->search); bfe->child_to_read = toku_ft_search_which_child( - &bfe->h->cmp_descriptor, - bfe->h->compare_fun, + bfe->ft->cmp, node, bfe->search ); @@ -1314,8 +1260,8 @@ bool toku_ftnode_pf_req_callback(void* ftnode_pv, void* read_extraargs) { // makes no sense to have prefetching disabled // and still call this function paranoid_invariant(!bfe->disable_prefetching); - int lc = toku_bfe_leftmost_child_wanted(bfe, node); - int rc = toku_bfe_rightmost_child_wanted(bfe, node); + int lc = bfe->leftmost_child_wanted(node); + int rc = bfe->rightmost_child_wanted(node); for (int i = lc; i <= rc; ++i) { if (BP_STATE(node, i) != PT_AVAIL) { retval = true; @@ -1327,10 +1273,9 @@ bool toku_ftnode_pf_req_callback(void* ftnode_pv, void* read_extraargs) { // we can possibly require is a single basement node // we find out what basement node the query cares about // and check if it is available - paranoid_invariant(bfe->h->compare_fun); if (node->height == 0) { - int left_child = toku_bfe_leftmost_child_wanted(bfe, node); - int right_child = toku_bfe_rightmost_child_wanted(bfe, node); + int left_child = bfe->leftmost_child_wanted(node); + int right_child = bfe->rightmost_child_wanted(node); if (left_child == right_child) { bfe->child_to_read = left_child; unsafe_touch_clock(node,bfe->child_to_read); @@ -1347,7 +1292,7 @@ bool toku_ftnode_pf_req_callback(void* ftnode_pv, void* read_extraargs) { static void ft_status_update_partial_fetch_reason( - struct ftnode_fetch_extra* bfe, + ftnode_fetch_extra *bfe, int childnum, enum pt_state state, bool is_leaf @@ -1446,13 +1391,41 @@ void toku_ft_status_update_deserialize_times(FTNODE node, tokutime_t deserialize } } +void toku_ft_status_note_msn_discard(void) { + STATUS_INC(FT_MSN_DISCARDS, 1); +} + +void toku_ft_status_note_update(bool broadcast) { + if (broadcast) { + STATUS_INC(FT_UPDATES_BROADCAST, 1); + } else { + STATUS_INC(FT_UPDATES, 1); + } +} + +void toku_ft_status_note_msg_bytes_out(size_t buffsize) { + STATUS_INC(FT_MSG_BYTES_OUT, buffsize); + STATUS_INC(FT_MSG_BYTES_CURR, -buffsize); +} +void toku_ft_status_note_ftnode(int height, bool created) { + if (created) { + if (height == 0) { + STATUS_INC(FT_CREATE_LEAF, 1); + } else { + STATUS_INC(FT_CREATE_NONLEAF, 1); + } + } else { + // created = false means destroyed + } +} + // callback for partially reading a node // could have just used toku_ftnode_fetch_callback, but wanted to separate the two cases to separate functions int toku_ftnode_pf_callback(void* ftnode_pv, void* disk_data, void* read_extraargs, int fd, PAIR_ATTR* sizep) { int r = 0; FTNODE node = (FTNODE) ftnode_pv; FTNODE_DISK_DATA ndd = (FTNODE_DISK_DATA) disk_data; - struct ftnode_fetch_extra *bfe = (struct ftnode_fetch_extra *) read_extraargs; + ftnode_fetch_extra *bfe = (ftnode_fetch_extra *) read_extraargs; // there must be a reason this is being called. If we get a garbage type or the type is ftnode_fetch_none, // then something went wrong assert((bfe->type == ftnode_fetch_subset) || (bfe->type == ftnode_fetch_all) || (bfe->type == ftnode_fetch_prefetch) || (bfe->type == ftnode_fetch_keymatch)); @@ -1462,8 +1435,8 @@ int toku_ftnode_pf_callback(void* ftnode_pv, void* disk_data, void* read_extraar (bfe->type == ftnode_fetch_subset || bfe->type == ftnode_fetch_prefetch) ) { - lc = toku_bfe_leftmost_child_wanted(bfe, node); - rc = toku_bfe_rightmost_child_wanted(bfe, node); + lc = bfe->leftmost_child_wanted(node); + rc = bfe->rightmost_child_wanted(node); } else { lc = -1; rc = -1; @@ -1472,7 +1445,7 @@ int toku_ftnode_pf_callback(void* ftnode_pv, void* disk_data, void* read_extraar if (BP_STATE(node,i) == PT_AVAIL) { continue; } - if ((lc <= i && i <= rc) || toku_bfe_wants_child_available(bfe, i)) { + if ((lc <= i && i <= rc) || bfe->wants_child_available(i)) { enum pt_state state = BP_STATE(node, i); if (state == PT_COMPRESSED) { r = toku_deserialize_bp_from_compressed(node, i, bfe); @@ -1487,7 +1460,7 @@ int toku_ftnode_pf_callback(void* ftnode_pv, void* disk_data, void* read_extraar if (r == TOKUDB_BAD_CHECKSUM) { fprintf(stderr, "Checksum failure while reading node partition in file %s.\n", - toku_cachefile_fname_in_env(bfe->h->cf)); + toku_cachefile_fname_in_env(bfe->ft->cf)); } else { fprintf(stderr, "Error while reading node partition %d\n", @@ -1502,118 +1475,8 @@ int toku_ftnode_pf_callback(void* ftnode_pv, void* disk_data, void* read_extraar return 0; } -struct msg_leafval_heaviside_extra { - ft_compare_func compare_fun; - DESCRIPTOR desc; - DBT const * const key; -}; - -//TODO: #1125 optimize -static int -toku_msg_leafval_heaviside(DBT const &kdbt, const struct msg_leafval_heaviside_extra &be) { - FAKE_DB(db, be.desc); - DBT const * const key = be.key; - return be.compare_fun(&db, &kdbt, key); -} - -static int -ft_compare_pivot(DESCRIPTOR desc, ft_compare_func cmp, const DBT *key, const DBT *pivot) -{ - int r; - FAKE_DB(db, desc); - r = cmp(&db, key, pivot); - return r; -} - - -// destroys the internals of the ftnode, but it does not free the values -// that are stored -// this is common functionality for toku_ftnode_free and rebalance_ftnode_leaf -// MUST NOT do anything besides free the structures that have been allocated -void toku_destroy_ftnode_internals(FTNODE node) -{ - for (int i=0; i<node->n_children-1; i++) { - toku_destroy_dbt(&node->childkeys[i]); - } - toku_free(node->childkeys); - node->childkeys = NULL; - - for (int i=0; i < node->n_children; i++) { - if (BP_STATE(node,i) == PT_AVAIL) { - if (node->height > 0) { - destroy_nonleaf_childinfo(BNC(node,i)); - } else { - destroy_basement_node(BLB(node, i)); - } - } else if (BP_STATE(node,i) == PT_COMPRESSED) { - SUB_BLOCK sb = BSB(node,i); - toku_free(sb->compressed_ptr); - toku_free(sb); - } else { - paranoid_invariant(is_BNULL(node, i)); - } - set_BNULL(node, i); - } - toku_free(node->bp); - node->bp = NULL; -} - -/* Frees a node, including all the stuff in the hash table. */ -void toku_ftnode_free(FTNODE *nodep) { - FTNODE node = *nodep; - if (node->height == 0) { - STATUS_INC(FT_DESTROY_LEAF, 1); - } else { - STATUS_INC(FT_DESTROY_NONLEAF, 1); - } - toku_destroy_ftnode_internals(node); - toku_free(node); - *nodep = nullptr; -} - -void -toku_initialize_empty_ftnode (FTNODE n, BLOCKNUM nodename, int height, int num_children, int layout_version, unsigned int flags) -// Effect: Fill in N as an empty ftnode. -{ - paranoid_invariant(layout_version != 0); - paranoid_invariant(height >= 0); - - if (height == 0) { - STATUS_INC(FT_CREATE_LEAF, 1); - } else { - STATUS_INC(FT_CREATE_NONLEAF, 1); - } - - n->max_msn_applied_to_node_on_disk = ZERO_MSN; // correct value for root node, harmless for others - n->flags = flags; - n->thisnodename = nodename; - n->layout_version = layout_version; - n->layout_version_original = layout_version; - n->layout_version_read_from_disk = layout_version; - n->height = height; - n->totalchildkeylens = 0; - n->childkeys = 0; - n->bp = 0; - n->n_children = num_children; - n->oldest_referenced_xid_known = TXNID_NONE; - - if (num_children > 0) { - XMALLOC_N(num_children-1, n->childkeys); - XMALLOC_N(num_children, n->bp); - for (int i = 0; i < num_children; i++) { - BP_BLOCKNUM(n,i).b=0; - BP_STATE(n,i) = PT_INVALID; - BP_WORKDONE(n,i) = 0; - BP_INIT_TOUCHED_CLOCK(n, i); - set_BNULL(n,i); - if (height > 0) { - set_BNC(n, i, toku_create_empty_nl()); - } else { - set_BLB(n, i, toku_create_empty_bn()); - } - } - } - n->dirty = 1; // special case exception, it's okay to mark as dirty because the basements are empty +int toku_msg_leafval_heaviside(DBT const &kdbt, const struct toku_msg_leafval_heaviside_extra &be) { + return be.cmp(&kdbt, be.key); } static void @@ -1624,14 +1487,12 @@ ft_init_new_root(FT ft, FTNODE oldroot, FTNODE *newrootp) { FTNODE newroot; - BLOCKNUM old_blocknum = oldroot->thisnodename; + BLOCKNUM old_blocknum = oldroot->blocknum; uint32_t old_fullhash = oldroot->fullhash; - PAIR old_pair = oldroot->ct_pair; int new_height = oldroot->height+1; uint32_t new_fullhash; BLOCKNUM new_blocknum; - PAIR new_pair = NULL; cachetable_put_empty_node_with_dep_nodes( ft, @@ -1641,7 +1502,6 @@ ft_init_new_root(FT ft, FTNODE oldroot, FTNODE *newrootp) &new_fullhash, &newroot ); - new_pair = newroot->ct_pair; assert(newroot); assert(new_height > 0); @@ -1653,22 +1513,18 @@ ft_init_new_root(FT ft, FTNODE oldroot, FTNODE *newrootp) ft->h->layout_version, ft->h->flags ); + newroot->fullhash = new_fullhash; MSN msna = oldroot->max_msn_applied_to_node_on_disk; newroot->max_msn_applied_to_node_on_disk = msna; BP_STATE(newroot,0) = PT_AVAIL; newroot->dirty = 1; - // now do the "switcheroo" - BP_BLOCKNUM(newroot,0) = new_blocknum; - newroot->thisnodename = old_blocknum; - newroot->fullhash = old_fullhash; - newroot->ct_pair = old_pair; - - oldroot->thisnodename = new_blocknum; - oldroot->fullhash = new_fullhash; - oldroot->ct_pair = new_pair; - - toku_cachetable_swap_pair_values(old_pair, new_pair); + // Set the first child to have the new blocknum, + // and then swap newroot with oldroot. The new root + // will inherit the hash/blocknum/pair from oldroot, + // keeping the root blocknum constant. + BP_BLOCKNUM(newroot, 0) = new_blocknum; + toku_ftnode_swap_pair_values(newroot, oldroot); toku_ft_split_child( ft, @@ -1681,8 +1537,8 @@ ft_init_new_root(FT ft, FTNODE oldroot, FTNODE *newrootp) // ft_split_child released locks on newroot // and oldroot, so now we repin and // return to caller - struct ftnode_fetch_extra bfe; - fill_bfe_for_full_read(&bfe, ft); + ftnode_fetch_extra bfe; + bfe.create_for_full_read(ft); toku_pin_ftnode( ft, old_blocknum, @@ -1694,1005 +1550,11 @@ ft_init_new_root(FT ft, FTNODE oldroot, FTNODE *newrootp) ); } -static void -init_childinfo(FTNODE node, int childnum, FTNODE child) { - BP_BLOCKNUM(node,childnum) = child->thisnodename; - BP_STATE(node,childnum) = PT_AVAIL; - BP_WORKDONE(node, childnum) = 0; - set_BNC(node, childnum, toku_create_empty_nl()); -} - -static void -init_childkey(FTNODE node, int childnum, const DBT *pivotkey) { - toku_clone_dbt(&node->childkeys[childnum], *pivotkey); - node->totalchildkeylens += pivotkey->size; -} - -// Used only by test programs: append a child node to a parent node -void -toku_ft_nonleaf_append_child(FTNODE node, FTNODE child, const DBT *pivotkey) { - int childnum = node->n_children; - node->n_children++; - XREALLOC_N(node->n_children, node->bp); - init_childinfo(node, childnum, child); - XREALLOC_N(node->n_children-1, node->childkeys); - if (pivotkey) { - invariant(childnum > 0); - init_childkey(node, childnum-1, pivotkey); - } - node->dirty = 1; -} - -void -toku_ft_bn_apply_msg_once ( - BASEMENTNODE bn, - const FT_MSG msg, - uint32_t idx, - LEAFENTRY le, - txn_gc_info *gc_info, - uint64_t *workdone, - STAT64INFO stats_to_update - ) -// Effect: Apply msg to leafentry (msn is ignored) -// Calculate work done by message on leafentry and add it to caller's workdone counter. -// idx is the location where it goes -// le is old leafentry -{ - size_t newsize=0, oldsize=0, workdone_this_le=0; - LEAFENTRY new_le=0; - int64_t numbytes_delta = 0; // how many bytes of user data (not including overhead) were added or deleted from this row - int64_t numrows_delta = 0; // will be +1 or -1 or 0 (if row was added or deleted or not) - uint32_t key_storage_size = ft_msg_get_keylen(msg) + sizeof(uint32_t); - if (le) { - oldsize = leafentry_memsize(le) + key_storage_size; - } - - // toku_le_apply_msg() may call bn_data::mempool_malloc_and_update_dmt() to allocate more space. - // That means le is guaranteed to not cause a sigsegv but it may point to a mempool that is - // no longer in use. We'll have to release the old mempool later. - toku_le_apply_msg( - msg, - le, - &bn->data_buffer, - idx, - gc_info, - &new_le, - &numbytes_delta - ); - // at this point, we cannot trust cmd->u.id.key to be valid. - // The dmt may have realloced its mempool and freed the one containing key. - - newsize = new_le ? (leafentry_memsize(new_le) + + key_storage_size) : 0; - if (le && new_le) { - workdone_this_le = (oldsize > newsize ? oldsize : newsize); // work done is max of le size before and after message application - - } else { // we did not just replace a row, so ... - if (le) { - // ... we just deleted a row ... - workdone_this_le = oldsize; - numrows_delta = -1; - } - if (new_le) { - // ... or we just added a row - workdone_this_le = newsize; - numrows_delta = 1; - } - } - if (workdone) { // test programs may call with NULL - *workdone += workdone_this_le; - } - - // now update stat64 statistics - bn->stat64_delta.numrows += numrows_delta; - bn->stat64_delta.numbytes += numbytes_delta; - // the only reason stats_to_update may be null is for tests - if (stats_to_update) { - stats_to_update->numrows += numrows_delta; - stats_to_update->numbytes += numbytes_delta; - } - -} - -static const uint32_t setval_tag = 0xee0ccb99; // this was gotten by doing "cat /dev/random|head -c4|od -x" to get a random number. We want to make sure that the user actually passes us the setval_extra_s that we passed in. -struct setval_extra_s { - uint32_t tag; - bool did_set_val; - int setval_r; // any error code that setval_fun wants to return goes here. - // need arguments for toku_ft_bn_apply_msg_once - BASEMENTNODE bn; - MSN msn; // captured from original message, not currently used - XIDS xids; - const DBT *key; - uint32_t idx; - LEAFENTRY le; - txn_gc_info *gc_info; - uint64_t * workdone; // set by toku_ft_bn_apply_msg_once() - STAT64INFO stats_to_update; -}; - -/* - * If new_val == NULL, we send a delete message instead of an insert. - * This happens here instead of in do_delete() for consistency. - * setval_fun() is called from handlerton, passing in svextra_v - * from setval_extra_s input arg to ft->update_fun(). - */ -static void setval_fun (const DBT *new_val, void *svextra_v) { - struct setval_extra_s *CAST_FROM_VOIDP(svextra, svextra_v); - paranoid_invariant(svextra->tag==setval_tag); - paranoid_invariant(!svextra->did_set_val); - svextra->did_set_val = true; - - { - // can't leave scope until toku_ft_bn_apply_msg_once if - // this is a delete - DBT val; - FT_MSG_S msg = { FT_NONE, svextra->msn, svextra->xids, - .u = { .id = {svextra->key, NULL} } }; - if (new_val) { - msg.type = FT_INSERT; - msg.u.id.val = new_val; - } else { - msg.type = FT_DELETE_ANY; - toku_init_dbt(&val); - msg.u.id.val = &val; - } - toku_ft_bn_apply_msg_once(svextra->bn, &msg, - svextra->idx, svextra->le, - svextra->gc_info, - svextra->workdone, svextra->stats_to_update); - svextra->setval_r = 0; - } -} - -// We are already past the msn filter (in toku_ft_bn_apply_msg(), which calls do_update()), -// so capturing the msn in the setval_extra_s is not strictly required. The alternative -// would be to put a dummy msn in the messages created by setval_fun(), but preserving -// the original msn seems cleaner and it preserves accountability at a lower layer. -static int do_update(ft_update_func update_fun, DESCRIPTOR desc, BASEMENTNODE bn, FT_MSG msg, uint32_t idx, - LEAFENTRY le, - void* keydata, - uint32_t keylen, - txn_gc_info *gc_info, - uint64_t * workdone, - STAT64INFO stats_to_update) { - LEAFENTRY le_for_update; - DBT key; - const DBT *keyp; - const DBT *update_function_extra; - DBT vdbt; - const DBT *vdbtp; - - // the location of data depends whether this is a regular or - // broadcast update - if (msg->type == FT_UPDATE) { - // key is passed in with command (should be same as from le) - // update function extra is passed in with command - STATUS_INC(FT_UPDATES, 1); - keyp = msg->u.id.key; - update_function_extra = msg->u.id.val; - } else if (msg->type == FT_UPDATE_BROADCAST_ALL) { - // key is not passed in with broadcast, it comes from le - // update function extra is passed in with command - paranoid_invariant(le); // for broadcast updates, we just hit all leafentries - // so this cannot be null - paranoid_invariant(keydata); - paranoid_invariant(keylen); - paranoid_invariant(msg->u.id.key->size == 0); - STATUS_INC(FT_UPDATES_BROADCAST, 1); - keyp = toku_fill_dbt(&key, keydata, keylen); - update_function_extra = msg->u.id.val; - } else { - abort(); - } - - if (le && !le_latest_is_del(le)) { - // if the latest val exists, use it, and we'll use the leafentry later - uint32_t vallen; - void *valp = le_latest_val_and_len(le, &vallen); - vdbtp = toku_fill_dbt(&vdbt, valp, vallen); - } else { - // otherwise, the val and leafentry are both going to be null - vdbtp = NULL; - } - le_for_update = le; - - struct setval_extra_s setval_extra = {setval_tag, false, 0, bn, msg->msn, msg->xids, - keyp, idx, le_for_update, gc_info, - workdone, stats_to_update}; - // call handlerton's ft->update_fun(), which passes setval_extra to setval_fun() - FAKE_DB(db, desc); - int r = update_fun( - &db, - keyp, - vdbtp, - update_function_extra, - setval_fun, &setval_extra - ); - - if (r == 0) { r = setval_extra.setval_r; } - return r; -} - -// Should be renamed as something like "apply_msg_to_basement()." -void -toku_ft_bn_apply_msg ( - ft_compare_func compare_fun, - ft_update_func update_fun, - DESCRIPTOR desc, - BASEMENTNODE bn, - FT_MSG msg, - txn_gc_info *gc_info, - uint64_t *workdone, - STAT64INFO stats_to_update - ) -// Effect: -// Put a msg into a leaf. -// Calculate work done by message on leafnode and add it to caller's workdone counter. -// The leaf could end up "too big" or "too small". The caller must fix that up. -{ - LEAFENTRY storeddata; - void* key = NULL; - uint32_t keylen = 0; - - uint32_t num_klpairs; - int r; - struct msg_leafval_heaviside_extra be = {compare_fun, desc, msg->u.id.key}; - - unsigned int doing_seqinsert = bn->seqinsert; - bn->seqinsert = 0; - - switch (msg->type) { - case FT_INSERT_NO_OVERWRITE: - case FT_INSERT: { - uint32_t idx; - if (doing_seqinsert) { - idx = bn->data_buffer.num_klpairs(); - DBT kdbt; - r = bn->data_buffer.fetch_key_and_len(idx-1, &kdbt.size, &kdbt.data); - if (r != 0) goto fz; - int cmp = toku_msg_leafval_heaviside(kdbt, be); - if (cmp >= 0) goto fz; - r = DB_NOTFOUND; - } else { - fz: - r = bn->data_buffer.find_zero<decltype(be), toku_msg_leafval_heaviside>( - be, - &storeddata, - &key, - &keylen, - &idx - ); - } - if (r==DB_NOTFOUND) { - storeddata = 0; - } else { - assert_zero(r); - } - toku_ft_bn_apply_msg_once(bn, msg, idx, storeddata, gc_info, workdone, stats_to_update); - - // if the insertion point is within a window of the right edge of - // the leaf then it is sequential - // window = min(32, number of leaf entries/16) - { - uint32_t s = bn->data_buffer.num_klpairs(); - uint32_t w = s / 16; - if (w == 0) w = 1; - if (w > 32) w = 32; - - // within the window? - if (s - idx <= w) - bn->seqinsert = doing_seqinsert + 1; - } - break; - } - case FT_DELETE_ANY: - case FT_ABORT_ANY: - case FT_COMMIT_ANY: { - uint32_t idx; - // Apply to all the matches - - r = bn->data_buffer.find_zero<decltype(be), toku_msg_leafval_heaviside>( - be, - &storeddata, - &key, - &keylen, - &idx - ); - if (r == DB_NOTFOUND) break; - assert_zero(r); - toku_ft_bn_apply_msg_once(bn, msg, idx, storeddata, gc_info, workdone, stats_to_update); - - break; - } - case FT_OPTIMIZE_FOR_UPGRADE: - // fall through so that optimize_for_upgrade performs rest of the optimize logic - case FT_COMMIT_BROADCAST_ALL: - case FT_OPTIMIZE: - // Apply to all leafentries - num_klpairs = bn->data_buffer.num_klpairs(); - for (uint32_t idx = 0; idx < num_klpairs; ) { - DBT curr_keydbt; - void* curr_keyp = NULL; - uint32_t curr_keylen = 0; - r = bn->data_buffer.fetch_klpair(idx, &storeddata, &curr_keylen, &curr_keyp); - assert_zero(r); - toku_fill_dbt(&curr_keydbt, curr_keyp, curr_keylen); - // because this is a broadcast message, we need - // to fill the key in the message that we pass into toku_ft_bn_apply_msg_once - msg->u.id.key = &curr_keydbt; - int deleted = 0; - if (!le_is_clean(storeddata)) { //If already clean, nothing to do. - toku_ft_bn_apply_msg_once(bn, msg, idx, storeddata, gc_info, workdone, stats_to_update); - // at this point, we cannot trust msg->u.id.key to be valid. - uint32_t new_dmt_size = bn->data_buffer.num_klpairs(); - if (new_dmt_size != num_klpairs) { - paranoid_invariant(new_dmt_size + 1 == num_klpairs); - //Item was deleted. - deleted = 1; - } - } - if (deleted) - num_klpairs--; - else - idx++; - } - paranoid_invariant(bn->data_buffer.num_klpairs() == num_klpairs); - - break; - case FT_COMMIT_BROADCAST_TXN: - case FT_ABORT_BROADCAST_TXN: - // Apply to all leafentries if txn is represented - num_klpairs = bn->data_buffer.num_klpairs(); - for (uint32_t idx = 0; idx < num_klpairs; ) { - DBT curr_keydbt; - void* curr_keyp = NULL; - uint32_t curr_keylen = 0; - r = bn->data_buffer.fetch_klpair(idx, &storeddata, &curr_keylen, &curr_keyp); - assert_zero(r); - toku_fill_dbt(&curr_keydbt, curr_keyp, curr_keylen); - // because this is a broadcast message, we need - // to fill the key in the message that we pass into toku_ft_bn_apply_msg_once - msg->u.id.key = &curr_keydbt; - int deleted = 0; - if (le_has_xids(storeddata, msg->xids)) { - toku_ft_bn_apply_msg_once(bn, msg, idx, storeddata, gc_info, workdone, stats_to_update); - uint32_t new_dmt_size = bn->data_buffer.num_klpairs(); - if (new_dmt_size != num_klpairs) { - paranoid_invariant(new_dmt_size + 1 == num_klpairs); - //Item was deleted. - deleted = 1; - } - } - if (deleted) - num_klpairs--; - else - idx++; - } - paranoid_invariant(bn->data_buffer.num_klpairs() == num_klpairs); - - break; - case FT_UPDATE: { - uint32_t idx; - r = bn->data_buffer.find_zero<decltype(be), toku_msg_leafval_heaviside>( - be, - &storeddata, - &key, - &keylen, - &idx - ); - if (r==DB_NOTFOUND) { - { - //Point to msg's copy of the key so we don't worry about le being freed - //TODO: 46 MAYBE Get rid of this when le_apply message memory is better handled - key = msg->u.id.key->data; - keylen = msg->u.id.key->size; - } - r = do_update(update_fun, desc, bn, msg, idx, NULL, NULL, 0, gc_info, workdone, stats_to_update); - } else if (r==0) { - r = do_update(update_fun, desc, bn, msg, idx, storeddata, key, keylen, gc_info, workdone, stats_to_update); - } // otherwise, a worse error, just return it - break; - } - case FT_UPDATE_BROADCAST_ALL: { - // apply to all leafentries. - uint32_t idx = 0; - uint32_t num_leafentries_before; - while (idx < (num_leafentries_before = bn->data_buffer.num_klpairs())) { - void* curr_key = nullptr; - uint32_t curr_keylen = 0; - r = bn->data_buffer.fetch_klpair(idx, &storeddata, &curr_keylen, &curr_key); - assert_zero(r); - - //TODO: 46 replace this with something better than cloning key - // TODO: (Zardosht) This may be unnecessary now, due to how the key - // is handled in the bndata. Investigate and determine - char clone_mem[curr_keylen]; // only lasts one loop, alloca would overflow (end of function) - memcpy((void*)clone_mem, curr_key, curr_keylen); - curr_key = (void*)clone_mem; - - // This is broken below. Have a compilation error checked - // in as a reminder - r = do_update(update_fun, desc, bn, msg, idx, storeddata, curr_key, curr_keylen, gc_info, workdone, stats_to_update); - assert_zero(r); - - if (num_leafentries_before == bn->data_buffer.num_klpairs()) { - // we didn't delete something, so increment the index. - idx++; - } - } - break; - } - case FT_NONE: break; // don't do anything - } - - return; -} - -static inline int -key_msn_cmp(const DBT *a, const DBT *b, const MSN amsn, const MSN bmsn, - DESCRIPTOR descriptor, ft_compare_func key_cmp) -{ - FAKE_DB(db, descriptor); - int r = key_cmp(&db, a, b); - if (r == 0) { - if (amsn.msn > bmsn.msn) { - r = +1; - } else if (amsn.msn < bmsn.msn) { - r = -1; - } else { - r = 0; - } - } - return r; -} - -int -toku_fifo_entry_key_msn_heaviside(const int32_t &offset, const struct toku_fifo_entry_key_msn_heaviside_extra &extra) -{ - const struct fifo_entry *query = toku_fifo_get_entry(extra.fifo, offset); - DBT qdbt; - const DBT *query_key = fill_dbt_for_fifo_entry(&qdbt, query); - const DBT *target_key = extra.key; - return key_msn_cmp(query_key, target_key, query->msn, extra.msn, - extra.desc, extra.cmp); -} - -int -toku_fifo_entry_key_msn_cmp(const struct toku_fifo_entry_key_msn_cmp_extra &extra, const int32_t &ao, const int32_t &bo) -{ - const struct fifo_entry *a = toku_fifo_get_entry(extra.fifo, ao); - const struct fifo_entry *b = toku_fifo_get_entry(extra.fifo, bo); - DBT adbt, bdbt; - const DBT *akey = fill_dbt_for_fifo_entry(&adbt, a); - const DBT *bkey = fill_dbt_for_fifo_entry(&bdbt, b); - return key_msn_cmp(akey, bkey, a->msn, b->msn, - extra.desc, extra.cmp); -} - -void toku_bnc_insert_msg(NONLEAF_CHILDINFO bnc, const void *key, ITEMLEN keylen, const void *data, ITEMLEN datalen, enum ft_msg_type type, MSN msn, XIDS xids, bool is_fresh, DESCRIPTOR desc, ft_compare_func cmp) -// Effect: Enqueue the message represented by the parameters into the -// bnc's buffer, and put it in either the fresh or stale message tree, -// or the broadcast list. -// -// This is only exported for tests. -{ - int32_t offset; - int r = toku_fifo_enq(bnc->buffer, key, keylen, data, datalen, type, msn, xids, is_fresh, &offset); - assert_zero(r); - if (ft_msg_type_applies_once(type)) { - DBT keydbt; - struct toku_fifo_entry_key_msn_heaviside_extra extra = { .desc = desc, .cmp = cmp, .fifo = bnc->buffer, .key = toku_fill_dbt(&keydbt, key, keylen), .msn = msn }; - if (is_fresh) { - r = bnc->fresh_message_tree.insert<struct toku_fifo_entry_key_msn_heaviside_extra, toku_fifo_entry_key_msn_heaviside>(offset, extra, nullptr); - assert_zero(r); - } else { - r = bnc->stale_message_tree.insert<struct toku_fifo_entry_key_msn_heaviside_extra, toku_fifo_entry_key_msn_heaviside>(offset, extra, nullptr); - assert_zero(r); - } - } else { - invariant(ft_msg_type_applies_all(type) || ft_msg_type_does_nothing(type)); - const uint32_t idx = bnc->broadcast_list.size(); - r = bnc->broadcast_list.insert_at(offset, idx); - assert_zero(r); - } -} - -// append a msg to a nonleaf node's child buffer -// should be static, but used by test programs -void toku_ft_append_to_child_buffer(ft_compare_func compare_fun, DESCRIPTOR desc, FTNODE node, int childnum, enum ft_msg_type type, MSN msn, XIDS xids, bool is_fresh, const DBT *key, const DBT *val) { - paranoid_invariant(BP_STATE(node,childnum) == PT_AVAIL); - toku_bnc_insert_msg(BNC(node, childnum), key->data, key->size, val->data, val->size, type, msn, xids, is_fresh, desc, compare_fun); - node->dirty = 1; -} - -static void ft_nonleaf_msg_once_to_child(ft_compare_func compare_fun, DESCRIPTOR desc, FTNODE node, int target_childnum, FT_MSG msg, bool is_fresh, size_t flow_deltas[]) -// Previously we had passive aggressive promotion, but that causes a lot of I/O a the checkpoint. So now we are just putting it in the buffer here. -// Also we don't worry about the node getting overfull here. It's the caller's problem. -{ - unsigned int childnum = (target_childnum >= 0 - ? target_childnum - : toku_ftnode_which_child(node, msg->u.id.key, desc, compare_fun)); - toku_ft_append_to_child_buffer(compare_fun, desc, node, childnum, msg->type, msg->msn, msg->xids, is_fresh, msg->u.id.key, msg->u.id.val); - NONLEAF_CHILDINFO bnc = BNC(node, childnum); - bnc->flow[0] += flow_deltas[0]; - bnc->flow[1] += flow_deltas[1]; -} - -/* Find the leftmost child that may contain the key. - * If the key exists it will be in the child whose number - * is the return value of this function. - */ -int toku_ftnode_which_child(FTNODE node, const DBT *k, - DESCRIPTOR desc, ft_compare_func cmp) { - // a funny case of no pivots - if (node->n_children <= 1) return 0; - - // check the last key to optimize seq insertions - int n = node->n_children-1; - int c = ft_compare_pivot(desc, cmp, k, &node->childkeys[n-1]); - if (c > 0) return n; - - // binary search the pivots - int lo = 0; - int hi = n-1; // skip the last one, we checked it above - int mi; - while (lo < hi) { - mi = (lo + hi) / 2; - c = ft_compare_pivot(desc, cmp, k, &node->childkeys[mi]); - if (c > 0) { - lo = mi+1; - continue; - } - if (c < 0) { - hi = mi; - continue; - } - return mi; - } - return lo; -} - -// Used for HOT. -int -toku_ftnode_hot_next_child(FTNODE node, - const DBT *k, - DESCRIPTOR desc, - ft_compare_func cmp) { - int low = 0; - int hi = node->n_children - 1; - int mi; - while (low < hi) { - mi = (low + hi) / 2; - int r = ft_compare_pivot(desc, cmp, k, &node->childkeys[mi]); - if (r > 0) { - low = mi + 1; - } else if (r < 0) { - hi = mi; - } else { - // if they were exactly equal, then we want the sub-tree under - // the next pivot. - return mi + 1; - } - } - invariant(low == hi); - return low; -} - -// TODO Use this function to clean up other places where bits of messages are passed around -// such as toku_bnc_insert_msg() and the call stack above it. -static uint64_t -ft_msg_size(FT_MSG msg) { - size_t keyval_size = msg->u.id.key->size + msg->u.id.val->size; - size_t xids_size = xids_get_serialize_size(msg->xids); - return keyval_size + KEY_VALUE_OVERHEAD + FT_MSG_OVERHEAD + xids_size; -} - -static void -ft_nonleaf_msg_all(ft_compare_func compare_fun, DESCRIPTOR desc, FTNODE node, FT_MSG msg, bool is_fresh, size_t flow_deltas[]) -// Effect: Put the message into a nonleaf node. We put it into all children, possibly causing the children to become reactive. -// We don't do the splitting and merging. That's up to the caller after doing all the puts it wants to do. -// The re_array[i] gets set to the reactivity of any modified child i. (And there may be several such children.) -{ - for (int i = 0; i < node->n_children; i++) { - ft_nonleaf_msg_once_to_child(compare_fun, desc, node, i, msg, is_fresh, flow_deltas); - } -} - -static bool -ft_msg_applies_once(FT_MSG msg) -{ - return ft_msg_type_applies_once(msg->type); -} - -static bool -ft_msg_applies_all(FT_MSG msg) -{ - return ft_msg_type_applies_all(msg->type); -} - -static bool -ft_msg_does_nothing(FT_MSG msg) -{ - return ft_msg_type_does_nothing(msg->type); -} - -static void -ft_nonleaf_put_msg(ft_compare_func compare_fun, DESCRIPTOR desc, FTNODE node, int target_childnum, FT_MSG msg, bool is_fresh, size_t flow_deltas[]) -// Effect: Put the message into a nonleaf node. We may put it into a child, possibly causing the child to become reactive. -// We don't do the splitting and merging. That's up to the caller after doing all the puts it wants to do. -// The re_array[i] gets set to the reactivity of any modified child i. (And there may be several such children.) -// -{ - - // - // see comments in toku_ft_leaf_apply_msg - // to understand why we handle setting - // node->max_msn_applied_to_node_on_disk here, - // and don't do it in toku_ft_node_put_msg - // - MSN msg_msn = msg->msn; - invariant(msg_msn.msn > node->max_msn_applied_to_node_on_disk.msn); - node->max_msn_applied_to_node_on_disk = msg_msn; - - if (ft_msg_applies_once(msg)) { - ft_nonleaf_msg_once_to_child(compare_fun, desc, node, target_childnum, msg, is_fresh, flow_deltas); - } else if (ft_msg_applies_all(msg)) { - ft_nonleaf_msg_all(compare_fun, desc, node, msg, is_fresh, flow_deltas); - } else { - paranoid_invariant(ft_msg_does_nothing(msg)); - } -} - -// Garbage collect one leaf entry. -static void -ft_basement_node_gc_once(BASEMENTNODE bn, - uint32_t index, - void* keyp, - uint32_t keylen, - LEAFENTRY leaf_entry, - txn_gc_info *gc_info, - STAT64INFO_S * delta) -{ - paranoid_invariant(leaf_entry); - - // Don't run garbage collection on non-mvcc leaf entries. - if (leaf_entry->type != LE_MVCC) { - goto exit; - } - - // Don't run garbage collection if this leafentry decides it's not worth it. - if (!toku_le_worth_running_garbage_collection(leaf_entry, gc_info)) { - goto exit; - } - - LEAFENTRY new_leaf_entry; - new_leaf_entry = NULL; - - // The mempool doesn't free itself. When it allocates new memory, - // this pointer will be set to the older memory that must now be - // freed. - void * maybe_free; - maybe_free = NULL; - - // These will represent the number of bytes and rows changed as - // part of the garbage collection. - int64_t numbytes_delta; - int64_t numrows_delta; - toku_le_garbage_collect(leaf_entry, - &bn->data_buffer, - index, - keyp, - keylen, - gc_info, - &new_leaf_entry, - &numbytes_delta); - - numrows_delta = 0; - if (new_leaf_entry) { - numrows_delta = 0; - } else { - numrows_delta = -1; - } - - // If we created a new mempool buffer we must free the - // old/original buffer. - if (maybe_free) { - toku_free(maybe_free); - } - - // Update stats. - bn->stat64_delta.numrows += numrows_delta; - bn->stat64_delta.numbytes += numbytes_delta; - delta->numrows += numrows_delta; - delta->numbytes += numbytes_delta; - -exit: - return; -} - -// Garbage collect all leaf entries for a given basement node. -static void -basement_node_gc_all_les(BASEMENTNODE bn, - txn_gc_info *gc_info, - STAT64INFO_S * delta) -{ - int r = 0; - uint32_t index = 0; - uint32_t num_leafentries_before; - while (index < (num_leafentries_before = bn->data_buffer.num_klpairs())) { - void* keyp = NULL; - uint32_t keylen = 0; - LEAFENTRY leaf_entry; - r = bn->data_buffer.fetch_klpair(index, &leaf_entry, &keylen, &keyp); - assert_zero(r); - ft_basement_node_gc_once( - bn, - index, - keyp, - keylen, - leaf_entry, - gc_info, - delta - ); - // Check if the leaf entry was deleted or not. - if (num_leafentries_before == bn->data_buffer.num_klpairs()) { - ++index; - } - } -} - -// Garbage collect all leaf entires in all basement nodes. -static void -ft_leaf_gc_all_les(FT ft, FTNODE node, txn_gc_info *gc_info) -{ - toku_assert_entire_node_in_memory(node); - paranoid_invariant_zero(node->height); - // Loop through each leaf entry, garbage collecting as we go. - for (int i = 0; i < node->n_children; ++i) { - // Perform the garbage collection. - BASEMENTNODE bn = BLB(node, i); - STAT64INFO_S delta; - delta.numrows = 0; - delta.numbytes = 0; - basement_node_gc_all_les(bn, gc_info, &delta); - toku_ft_update_stats(&ft->in_memory_stats, delta); - } -} - -static void -ft_leaf_run_gc(FT ft, FTNODE node) { - TOKULOGGER logger = toku_cachefile_logger(ft->cf); - if (logger) { - TXN_MANAGER txn_manager = toku_logger_get_txn_manager(logger); - txn_manager_state txn_state_for_gc(txn_manager); - txn_state_for_gc.init(); - TXNID oldest_referenced_xid_for_simple_gc = toku_txn_manager_get_oldest_referenced_xid_estimate(txn_manager); - - // Perform full garbage collection. - // - // - txn_state_for_gc - // a fresh snapshot of the transaction system. - // - oldest_referenced_xid_for_simple_gc - // the oldest xid in any live list as of right now - suitible for simple gc - // - node->oldest_referenced_xid_known - // the last known oldest referenced xid for this node and any unapplied messages. - // it is a lower bound on the actual oldest referenced xid - but becasue there - // may be abort messages above us, we need to be careful to only use this value - // for implicit promotion (as opposed to the oldest referenced xid for simple gc) - // - // The node has its own oldest referenced xid because it must be careful not to implicitly promote - // provisional entries for transactions that are no longer live, but may have abort messages - // somewhere above us in the tree. - txn_gc_info gc_info(&txn_state_for_gc, - oldest_referenced_xid_for_simple_gc, - node->oldest_referenced_xid_known, - true); - ft_leaf_gc_all_les(ft, node, &gc_info); - } -} - -void toku_bnc_flush_to_child( - FT ft, - NONLEAF_CHILDINFO bnc, - FTNODE child, - TXNID parent_oldest_referenced_xid_known - ) -{ - paranoid_invariant(bnc); - STAT64INFO_S stats_delta = {0,0}; - size_t remaining_memsize = toku_fifo_buffer_size_in_use(bnc->buffer); - - TOKULOGGER logger = toku_cachefile_logger(ft->cf); - TXN_MANAGER txn_manager = logger != nullptr ? toku_logger_get_txn_manager(logger) : nullptr; - TXNID oldest_referenced_xid_for_simple_gc = TXNID_NONE; - - txn_manager_state txn_state_for_gc(txn_manager); - bool do_garbage_collection = child->height == 0 && txn_manager != nullptr; - if (do_garbage_collection) { - txn_state_for_gc.init(); - oldest_referenced_xid_for_simple_gc = toku_txn_manager_get_oldest_referenced_xid_estimate(txn_manager); - } - txn_gc_info gc_info(&txn_state_for_gc, - oldest_referenced_xid_for_simple_gc, - child->oldest_referenced_xid_known, - true); - FIFO_ITERATE( - bnc->buffer, key, keylen, val, vallen, type, msn, xids, is_fresh, - ({ - DBT hk,hv; - FT_MSG_S ftmsg = { type, msn, xids, .u = { .id = { toku_fill_dbt(&hk, key, keylen), - toku_fill_dbt(&hv, val, vallen) } } }; - size_t flow_deltas[] = { 0, 0 }; - if (remaining_memsize <= bnc->flow[0]) { - // this message is in the current checkpoint's worth of - // the end of the fifo - flow_deltas[0] = FIFO_CURRENT_ENTRY_MEMSIZE; - } else if (remaining_memsize <= bnc->flow[0] + bnc->flow[1]) { - // this message is in the last checkpoint's worth of the - // end of the fifo - flow_deltas[1] = FIFO_CURRENT_ENTRY_MEMSIZE; - } - toku_ft_node_put_msg( - ft->compare_fun, - ft->update_fun, - &ft->cmp_descriptor, - child, - -1, - &ftmsg, - is_fresh, - &gc_info, - flow_deltas, - &stats_delta - ); - remaining_memsize -= FIFO_CURRENT_ENTRY_MEMSIZE; - })); - child->oldest_referenced_xid_known = parent_oldest_referenced_xid_known; - - invariant(remaining_memsize == 0); - if (stats_delta.numbytes || stats_delta.numrows) { - toku_ft_update_stats(&ft->in_memory_stats, stats_delta); - } - if (do_garbage_collection) { - size_t buffsize = toku_fifo_buffer_size_in_use(bnc->buffer); - STATUS_INC(FT_MSG_BYTES_OUT, buffsize); - // may be misleading if there's a broadcast message in there - STATUS_INC(FT_MSG_BYTES_CURR, -buffsize); - } -} - -bool toku_bnc_should_promote(FT ft, NONLEAF_CHILDINFO bnc) { - static const double factor = 0.125; - const uint64_t flow_threshold = ft->h->nodesize * factor; - return bnc->flow[0] >= flow_threshold || bnc->flow[1] >= flow_threshold; -} - -void -toku_ft_node_put_msg ( - ft_compare_func compare_fun, - ft_update_func update_fun, - DESCRIPTOR desc, - FTNODE node, - int target_childnum, - FT_MSG msg, - bool is_fresh, - txn_gc_info *gc_info, - size_t flow_deltas[], - STAT64INFO stats_to_update - ) -// Effect: Push message into the subtree rooted at NODE. -// If NODE is a leaf, then -// put message into leaf, applying it to the leafentries -// If NODE is a nonleaf, then push the message into the FIFO(s) of the relevent child(ren). -// The node may become overfull. That's not our problem. -{ - toku_assert_entire_node_in_memory(node); - // - // see comments in toku_ft_leaf_apply_msg - // to understand why we don't handle setting - // node->max_msn_applied_to_node_on_disk here, - // and instead defer to these functions - // - if (node->height==0) { - toku_ft_leaf_apply_msg(compare_fun, update_fun, desc, node, target_childnum, msg, gc_info, nullptr, stats_to_update); - } else { - ft_nonleaf_put_msg(compare_fun, desc, node, target_childnum, msg, is_fresh, flow_deltas); - } -} - -static const struct pivot_bounds infinite_bounds = {.lower_bound_exclusive=NULL, - .upper_bound_inclusive=NULL}; - - -// Effect: applies the message to the leaf if the appropriate basement node is in memory. -// This function is called during message injection and/or flushing, so the entire -// node MUST be in memory. -void toku_ft_leaf_apply_msg( - ft_compare_func compare_fun, - ft_update_func update_fun, - DESCRIPTOR desc, - FTNODE node, - int target_childnum, // which child to inject to, or -1 if unknown - FT_MSG msg, - txn_gc_info *gc_info, - uint64_t *workdone, - STAT64INFO stats_to_update - ) -{ - VERIFY_NODE(t, node); - toku_assert_entire_node_in_memory(node); - - // - // Because toku_ft_leaf_apply_msg is called with the intent of permanently - // applying a message to a leaf node (meaning the message is permanently applied - // and will be purged from the system after this call, as opposed to - // toku_apply_ancestors_messages_to_node, which applies a message - // for a query, but the message may still reside in the system and - // be reapplied later), we mark the node as dirty and - // take the opportunity to update node->max_msn_applied_to_node_on_disk. - // - node->dirty = 1; - - // - // we cannot blindly update node->max_msn_applied_to_node_on_disk, - // we must check to see if the msn is greater that the one already stored, - // because the message may have already been applied earlier (via - // toku_apply_ancestors_messages_to_node) to answer a query - // - // This is why we handle node->max_msn_applied_to_node_on_disk both here - // and in ft_nonleaf_put_msg, as opposed to in one location, toku_ft_node_put_msg. - // - MSN msg_msn = msg->msn; - if (msg_msn.msn > node->max_msn_applied_to_node_on_disk.msn) { - node->max_msn_applied_to_node_on_disk = msg_msn; - } - - if (ft_msg_applies_once(msg)) { - unsigned int childnum = (target_childnum >= 0 - ? target_childnum - : toku_ftnode_which_child(node, msg->u.id.key, desc, compare_fun)); - BASEMENTNODE bn = BLB(node, childnum); - if (msg->msn.msn > bn->max_msn_applied.msn) { - bn->max_msn_applied = msg->msn; - toku_ft_bn_apply_msg(compare_fun, - update_fun, - desc, - bn, - msg, - gc_info, - workdone, - stats_to_update); - } else { - STATUS_INC(FT_MSN_DISCARDS, 1); - } - } - else if (ft_msg_applies_all(msg)) { - for (int childnum=0; childnum<node->n_children; childnum++) { - if (msg->msn.msn > BLB(node, childnum)->max_msn_applied.msn) { - BLB(node, childnum)->max_msn_applied = msg->msn; - toku_ft_bn_apply_msg(compare_fun, - update_fun, - desc, - BLB(node, childnum), - msg, - gc_info, - workdone, - stats_to_update); - } else { - STATUS_INC(FT_MSN_DISCARDS, 1); - } - } - } - else if (!ft_msg_does_nothing(msg)) { - abort(); - } - VERIFY_NODE(t, node); -} - static void inject_message_in_locked_node( FT ft, FTNODE node, int childnum, - FT_MSG_S *msg, + const ft_msg &msg, size_t flow_deltas[], txn_gc_info *gc_info ) @@ -2702,7 +1564,7 @@ static void inject_message_in_locked_node( // check in frwlock. Should be possible with TOKU_PTHREAD_DEBUG, nop // otherwise. invariant(toku_ctpair_is_write_locked(node->ct_pair)); - toku_assert_entire_node_in_memory(node); + toku_ftnode_assert_fully_in_memory(node); // Take the newer of the two oldest referenced xid values from the node and gc_info. // The gc_info usually has a newer value, because we got it at the top of this call @@ -2717,16 +1579,17 @@ static void inject_message_in_locked_node( // Get the MSN from the header. Now that we have a write lock on the // node we're injecting into, we know no other thread will get an MSN // after us and get that message into our subtree before us. - msg->msn.msn = toku_sync_add_and_fetch(&ft->h->max_msn_in_ft.msn, 1); - paranoid_invariant(msg->msn.msn > node->max_msn_applied_to_node_on_disk.msn); + MSN msg_msn = { .msn = toku_sync_add_and_fetch(&ft->h->max_msn_in_ft.msn, 1) }; + ft_msg msg_with_msn(msg.kdbt(), msg.vdbt(), msg.type(), msg_msn, msg.xids()); + paranoid_invariant(msg_with_msn.msn().msn > node->max_msn_applied_to_node_on_disk.msn); + STAT64INFO_S stats_delta = {0,0}; - toku_ft_node_put_msg( - ft->compare_fun, + toku_ftnode_put_msg( + ft->cmp, ft->update_fun, - &ft->cmp_descriptor, node, childnum, - msg, + msg_with_msn, true, gc_info, flow_deltas, @@ -2736,30 +1599,39 @@ static void inject_message_in_locked_node( toku_ft_update_stats(&ft->in_memory_stats, stats_delta); } // - // assumption is that toku_ft_node_put_msg will + // assumption is that toku_ftnode_put_msg will // mark the node as dirty. // enforcing invariant here. // paranoid_invariant(node->dirty != 0); - // TODO: Why not at height 0? // update some status variables if (node->height != 0) { - uint64_t msgsize = ft_msg_size(msg); + size_t msgsize = msg.total_size(); STATUS_INC(FT_MSG_BYTES_IN, msgsize); STATUS_INC(FT_MSG_BYTES_CURR, msgsize); STATUS_INC(FT_MSG_NUM, 1); - if (ft_msg_applies_all(msg)) { + if (ft_msg_type_applies_all(msg.type())) { STATUS_INC(FT_MSG_NUM_BROADCAST, 1); } } // verify that msn of latest message was captured in root node - paranoid_invariant(msg->msn.msn == node->max_msn_applied_to_node_on_disk.msn); + paranoid_invariant(msg_with_msn.msn().msn == node->max_msn_applied_to_node_on_disk.msn); + + if (node->blocknum.b == ft->rightmost_blocknum.b) { + if (ft->seqinsert_score < FT_SEQINSERT_SCORE_THRESHOLD) { + // we promoted to the rightmost leaf node and the seqinsert score has not yet saturated. + toku_sync_fetch_and_add(&ft->seqinsert_score, 1); + } + } else if (ft->seqinsert_score != 0) { + // we promoted to something other than the rightmost leaf node and the score should reset + ft->seqinsert_score = 0; + } // if we call toku_ft_flush_some_child, then that function unpins the root // otherwise, we unpin ourselves - if (node->height > 0 && toku_ft_nonleaf_is_gorged(node, ft->h->nodesize)) { + if (node->height > 0 && toku_ftnode_nonleaf_is_gorged(node, ft->h->nodesize)) { toku_ft_flush_node_on_background_thread(ft, node); } else { @@ -2784,7 +1656,7 @@ static bool process_maybe_reactive_child(FT ft, FTNODE parent, FTNODE child, int // true if relocking is needed // false otherwise { - enum reactivity re = get_node_reactivity(ft, child); + enum reactivity re = toku_ftnode_get_reactivity(ft, child); enum reactivity newre; BLOCKNUM child_blocknum; uint32_t child_fullhash; @@ -2794,14 +1666,14 @@ static bool process_maybe_reactive_child(FT ft, FTNODE parent, FTNODE child, int case RE_FISSIBLE: { // We only have a read lock on the parent. We need to drop both locks, and get write locks. - BLOCKNUM parent_blocknum = parent->thisnodename; + BLOCKNUM parent_blocknum = parent->blocknum; uint32_t parent_fullhash = toku_cachetable_hash(ft->cf, parent_blocknum); int parent_height = parent->height; int parent_n_children = parent->n_children; toku_unpin_ftnode_read_only(ft, child); toku_unpin_ftnode_read_only(ft, parent); - struct ftnode_fetch_extra bfe; - fill_bfe_for_full_read(&bfe, ft); + ftnode_fetch_extra bfe; + bfe.create_for_full_read(ft); FTNODE newparent, newchild; toku_pin_ftnode(ft, parent_blocknum, parent_fullhash, &bfe, PL_WRITE_CHEAP, &newparent, true); if (newparent->height != parent_height || newparent->n_children != parent_n_children || @@ -2818,7 +1690,7 @@ static bool process_maybe_reactive_child(FT ft, FTNODE parent, FTNODE child, int child_blocknum = BP_BLOCKNUM(newparent, childnum); child_fullhash = compute_child_fullhash(ft->cf, newparent, childnum); toku_pin_ftnode_with_dep_nodes(ft, child_blocknum, child_fullhash, &bfe, PL_WRITE_CHEAP, 1, &newparent, &newchild, true); - newre = get_node_reactivity(ft, newchild); + newre = toku_ftnode_get_reactivity(ft, newchild); if (newre == RE_FISSIBLE) { enum split_mode split_mode; if (newparent->height == 1 && (loc & LEFT_EXTREME) && childnum == 0) { @@ -2848,12 +1720,12 @@ static bool process_maybe_reactive_child(FT ft, FTNODE parent, FTNODE child, int } int parent_height = parent->height; - BLOCKNUM parent_blocknum = parent->thisnodename; + BLOCKNUM parent_blocknum = parent->blocknum; uint32_t parent_fullhash = toku_cachetable_hash(ft->cf, parent_blocknum); toku_unpin_ftnode_read_only(ft, child); toku_unpin_ftnode_read_only(ft, parent); - struct ftnode_fetch_extra bfe; - fill_bfe_for_full_read(&bfe, ft); + ftnode_fetch_extra bfe; + bfe.create_for_full_read(ft); FTNODE newparent, newchild; toku_pin_ftnode(ft, parent_blocknum, parent_fullhash, &bfe, PL_WRITE_CHEAP, &newparent, true); if (newparent->height != parent_height || childnum >= newparent->n_children) { @@ -2864,7 +1736,7 @@ static bool process_maybe_reactive_child(FT ft, FTNODE parent, FTNODE child, int child_blocknum = BP_BLOCKNUM(newparent, childnum); child_fullhash = compute_child_fullhash(ft->cf, newparent, childnum); toku_pin_ftnode_with_dep_nodes(ft, child_blocknum, child_fullhash, &bfe, PL_READ, 1, &newparent, &newchild, true); - newre = get_node_reactivity(ft, newchild); + newre = toku_ftnode_get_reactivity(ft, newchild); if (newre == RE_FUSIBLE && newparent->n_children >= 2) { toku_unpin_ftnode_read_only(ft, newchild); toku_ft_merge_child(ft, newparent, childnum); @@ -2887,17 +1759,17 @@ static bool process_maybe_reactive_child(FT ft, FTNODE parent, FTNODE child, int abort(); } -static void inject_message_at_this_blocknum(FT ft, CACHEKEY cachekey, uint32_t fullhash, FT_MSG_S *msg, size_t flow_deltas[], txn_gc_info *gc_info) +static void inject_message_at_this_blocknum(FT ft, CACHEKEY cachekey, uint32_t fullhash, const ft_msg &msg, size_t flow_deltas[], txn_gc_info *gc_info) // Effect: // Inject message into the node at this blocknum (cachekey). // Gets a write lock on the node for you. { toku::context inject_ctx(CTX_MESSAGE_INJECTION); FTNODE node; - struct ftnode_fetch_extra bfe; - fill_bfe_for_full_read(&bfe, ft); + ftnode_fetch_extra bfe; + bfe.create_for_full_read(ft); toku_pin_ftnode(ft, cachekey, fullhash, &bfe, PL_WRITE_CHEAP, &node, true); - toku_assert_entire_node_in_memory(node); + toku_ftnode_assert_fully_in_memory(node); paranoid_invariant(node->fullhash==fullhash); ft_verify_flags(ft, node); inject_message_in_locked_node(ft, node, -1, msg, flow_deltas, gc_info); @@ -2913,11 +1785,32 @@ static inline bool should_inject_in_node(seqinsert_loc loc, int height, int dept return (height == 0 || (loc == NEITHER_EXTREME && (height <= 1 || depth >= 2))); } +static void ft_set_or_verify_rightmost_blocknum(FT ft, BLOCKNUM b) +// Given: 'b', the _definitive_ and constant rightmost blocknum of 'ft' +{ + if (ft->rightmost_blocknum.b == RESERVED_BLOCKNUM_NULL) { + toku_ft_lock(ft); + if (ft->rightmost_blocknum.b == RESERVED_BLOCKNUM_NULL) { + ft->rightmost_blocknum = b; + } + toku_ft_unlock(ft); + } + // The rightmost blocknum only transitions from RESERVED_BLOCKNUM_NULL to non-null. + // If it's already set, verify that the stored value is consistent with 'b' + invariant(ft->rightmost_blocknum.b == b.b); +} + +bool toku_bnc_should_promote(FT ft, NONLEAF_CHILDINFO bnc) { + static const double factor = 0.125; + const uint64_t flow_threshold = ft->h->nodesize * factor; + return bnc->flow[0] >= flow_threshold || bnc->flow[1] >= flow_threshold; +} + static void push_something_in_subtree( FT ft, FTNODE subtree_root, int target_childnum, - FT_MSG_S *msg, + const ft_msg &msg, size_t flow_deltas[], txn_gc_info *gc_info, int depth, @@ -2946,7 +1839,7 @@ static void push_something_in_subtree( // When the birdie is still saying we should promote, we use get_and_pin so that we wait to get the node. // If the birdie doesn't say to promote, we try maybe_get_and_pin. If we get the node cheaply, and it's dirty, we promote anyway. { - toku_assert_entire_node_in_memory(subtree_root); + toku_ftnode_assert_fully_in_memory(subtree_root); if (should_inject_in_node(loc, subtree_root->height, depth)) { switch (depth) { case 0: @@ -2960,6 +1853,14 @@ static void push_something_in_subtree( default: STATUS_INC(FT_PRO_NUM_INJECT_DEPTH_GT3, 1); break; } + // If the target node is a non-root leaf node on the right extreme, + // set the rightmost blocknum. We know there are no messages above us + // because promotion would not chose to inject directly into this leaf + // otherwise. We explicitly skip the root node because then we don't have + // to worry about changing the rightmost blocknum when the root splits. + if (subtree_root->height == 0 && loc == RIGHT_EXTREME && subtree_root->blocknum.b != ft->h->root_blocknum.b) { + ft_set_or_verify_rightmost_blocknum(ft, subtree_root->blocknum); + } inject_message_in_locked_node(ft, subtree_root, target_childnum, msg, flow_deltas, gc_info); } else { int r; @@ -2967,10 +1868,10 @@ static void push_something_in_subtree( NONLEAF_CHILDINFO bnc; // toku_ft_root_put_msg should not have called us otherwise. - paranoid_invariant(ft_msg_applies_once(msg)); + paranoid_invariant(ft_msg_type_applies_once(msg.type())); childnum = (target_childnum >= 0 ? target_childnum - : toku_ftnode_which_child(subtree_root, msg->u.id.key, &ft->cmp_descriptor, ft->compare_fun)); + : toku_ftnode_which_child(subtree_root, msg.kdbt(), ft->cmp)); bnc = BNC(subtree_root, childnum); if (toku_bnc_n_entries(bnc) > 0) { @@ -2996,7 +1897,7 @@ static void push_something_in_subtree( { const BLOCKNUM child_blocknum = BP_BLOCKNUM(subtree_root, childnum); - toku_verify_blocknum_allocated(ft->blocktable, child_blocknum); + ft->blocktable.verify_blocknum_allocated(child_blocknum); const uint32_t child_fullhash = toku_cachetable_hash(ft->cf, child_blocknum); FTNODE child; @@ -3014,8 +1915,8 @@ static void push_something_in_subtree( // promote and we're in the top two levels of the // tree, don't stop just because someone else has the // node locked. - struct ftnode_fetch_extra bfe; - fill_bfe_for_full_read(&bfe, ft); + ftnode_fetch_extra bfe; + bfe.create_for_full_read(ft); if (lock_type == PL_WRITE_CHEAP) { // We intend to take the write lock for message injection toku::context inject_ctx(CTX_MESSAGE_INJECTION); @@ -3032,7 +1933,7 @@ static void push_something_in_subtree( STATUS_INC(FT_PRO_NUM_STOP_LOCK_CHILD, 1); goto relock_and_push_here; } - if (is_entire_node_in_memory(child)) { + if (toku_ftnode_fully_in_memory(child)) { // toku_pin_ftnode... touches the clock but toku_maybe_pin_ftnode... doesn't. // This prevents partial eviction. for (int i = 0; i < child->n_children; ++i) { @@ -3048,14 +1949,14 @@ static void push_something_in_subtree( paranoid_invariant_notnull(child); if (!just_did_split_or_merge) { - BLOCKNUM subtree_root_blocknum = subtree_root->thisnodename; + BLOCKNUM subtree_root_blocknum = subtree_root->blocknum; uint32_t subtree_root_fullhash = toku_cachetable_hash(ft->cf, subtree_root_blocknum); const bool did_split_or_merge = process_maybe_reactive_child(ft, subtree_root, child, childnum, loc); if (did_split_or_merge) { // Need to re-pin this node and try at this level again. FTNODE newparent; - struct ftnode_fetch_extra bfe; - fill_bfe_for_full_read(&bfe, ft); // should be fully in memory, we just split it + ftnode_fetch_extra bfe; + bfe.create_for_full_read(ft); // should be fully in memory, we just split it toku_pin_ftnode(ft, subtree_root_blocknum, subtree_root_fullhash, &bfe, PL_READ, &newparent, true); push_something_in_subtree(ft, newparent, -1, msg, flow_deltas, gc_info, depth, loc, true); return; @@ -3084,7 +1985,7 @@ static void push_something_in_subtree( { // Right now we have a read lock on subtree_root, but we want // to inject into it so we get a write lock instead. - BLOCKNUM subtree_root_blocknum = subtree_root->thisnodename; + BLOCKNUM subtree_root_blocknum = subtree_root->blocknum; uint32_t subtree_root_fullhash = toku_cachetable_hash(ft->cf, subtree_root_blocknum); toku_unpin_ftnode_read_only(ft, subtree_root); switch (depth) { @@ -3106,7 +2007,7 @@ static void push_something_in_subtree( void toku_ft_root_put_msg( FT ft, - FT_MSG_S *msg, + const ft_msg &msg, txn_gc_info *gc_info ) // Effect: @@ -3141,10 +2042,10 @@ void toku_ft_root_put_msg( uint32_t fullhash; CACHEKEY root_key; toku_calculate_root_offset_pointer(ft, &root_key, &fullhash); - struct ftnode_fetch_extra bfe; - fill_bfe_for_full_read(&bfe, ft); + ftnode_fetch_extra bfe; + bfe.create_for_full_read(ft); - size_t flow_deltas[] = { toku_ft_msg_memsize_in_fifo(msg), 0 }; + size_t flow_deltas[] = { message_buffer::msg_memsize_in_buffer(msg), 0 }; pair_lock_type lock_type; lock_type = PL_READ; // try first for a read lock @@ -3154,7 +2055,7 @@ void toku_ft_root_put_msg( change_lock_type: // get the root node toku_pin_ftnode(ft, root_key, fullhash, &bfe, lock_type, &node, true); - toku_assert_entire_node_in_memory(node); + toku_ftnode_assert_fully_in_memory(node); paranoid_invariant(node->fullhash==fullhash); ft_verify_flags(ft, node); @@ -3163,7 +2064,7 @@ void toku_ft_root_put_msg( // injection thread to change lock type back and forth, when only one // of them needs to in order to handle the split. That's not great, // but root splits are incredibly rare. - enum reactivity re = get_node_reactivity(ft, node); + enum reactivity re = toku_ftnode_get_reactivity(ft, node); switch (re) { case RE_STABLE: case RE_FUSIBLE: // cannot merge anything at the root @@ -3206,7 +2107,7 @@ void toku_ft_root_put_msg( // anyway. // Now, either inject here or promote. We decide based on a heuristic: - if (node->height == 0 || !ft_msg_applies_once(msg)) { + if (node->height == 0 || !ft_msg_type_applies_once(msg.type())) { // If the root's a leaf or we're injecting a broadcast, drop the read lock and inject here. toku_unpin_ftnode_read_only(ft, node); STATUS_INC(FT_PRO_NUM_ROOT_H0_INJECT, 1); @@ -3217,7 +2118,7 @@ void toku_ft_root_put_msg( } else { // The root's height 1. We may be eligible for promotion here. // On the extremes, we want to promote, in the middle, we don't. - int childnum = toku_ftnode_which_child(node, msg->u.id.key, &ft->cmp_descriptor, ft->compare_fun); + int childnum = toku_ftnode_which_child(node, msg.kdbt(), ft->cmp); if (childnum == 0 || childnum == node->n_children - 1) { // On the extremes, promote. We know which childnum we're going to, so pass that down too. push_something_in_subtree(ft, node, childnum, msg, flow_deltas, gc_info, 0, LEFT_EXTREME | RIGHT_EXTREME, false); @@ -3230,7 +2131,260 @@ void toku_ft_root_put_msg( } } -// Effect: Insert the key-val pair into ft. +// TODO: Remove me, I'm boring. +static int ft_compare_keys(FT ft, const DBT *a, const DBT *b) +// Effect: Compare two keys using the given fractal tree's comparator/descriptor +{ + return ft->cmp(a, b); +} + +static LEAFENTRY bn_get_le_and_key(BASEMENTNODE bn, int idx, DBT *key) +// Effect: Gets the i'th leafentry from the given basement node and +// fill its key in *key +// Requires: The i'th leafentry exists. +{ + LEAFENTRY le; + uint32_t le_len; + void *le_key; + int r = bn->data_buffer.fetch_klpair(idx, &le, &le_len, &le_key); + invariant_zero(r); + toku_fill_dbt(key, le_key, le_len); + return le; +} + +static LEAFENTRY ft_leaf_leftmost_le_and_key(FTNODE leaf, DBT *leftmost_key) +// Effect: If a leftmost key exists in the given leaf, toku_fill_dbt() +// the key into *leftmost_key +// Requires: Leaf is fully in memory and pinned for read or write. +// Return: leafentry if it exists, nullptr otherwise +{ + for (int i = 0; i < leaf->n_children; i++) { + BASEMENTNODE bn = BLB(leaf, i); + if (bn->data_buffer.num_klpairs() > 0) { + // Get the first (leftmost) leafentry and its key + return bn_get_le_and_key(bn, 0, leftmost_key); + } + } + return nullptr; +} + +static LEAFENTRY ft_leaf_rightmost_le_and_key(FTNODE leaf, DBT *rightmost_key) +// Effect: If a rightmost key exists in the given leaf, toku_fill_dbt() +// the key into *rightmost_key +// Requires: Leaf is fully in memory and pinned for read or write. +// Return: leafentry if it exists, nullptr otherwise +{ + for (int i = leaf->n_children - 1; i >= 0; i--) { + BASEMENTNODE bn = BLB(leaf, i); + size_t num_les = bn->data_buffer.num_klpairs(); + if (num_les > 0) { + // Get the last (rightmost) leafentry and its key + return bn_get_le_and_key(bn, num_les - 1, rightmost_key); + } + } + return nullptr; +} + +static int ft_leaf_get_relative_key_pos(FT ft, FTNODE leaf, const DBT *key, bool *nondeleted_key_found, int *target_childnum) +// Effect: Determines what the relative position of the given key is with +// respect to a leaf node, and if it exists. +// Requires: Leaf is fully in memory and pinned for read or write. +// Requires: target_childnum is non-null +// Return: < 0 if key is less than the leftmost key in the leaf OR the relative position is unknown, for any reason. +// 0 if key is in the bounds [leftmost_key, rightmost_key] for this leaf or the leaf is empty +// > 0 if key is greater than the rightmost key in the leaf +// *nondeleted_key_found is set (if non-null) if the target key was found and is not deleted, unmodified otherwise +// *target_childnum is set to the child that (does or would) contain the key, if calculated, unmodified otherwise +{ + DBT rightmost_key; + LEAFENTRY rightmost_le = ft_leaf_rightmost_le_and_key(leaf, &rightmost_key); + if (rightmost_le == nullptr) { + // If we can't get a rightmost key then the leaf is empty. + // In such a case, we don't have any information about what keys would be in this leaf. + // We have to assume the leaf node that would contain this key is to the left. + return -1; + } + // We have a rightmost leafentry, so it must exist in some child node + invariant(leaf->n_children > 0); + + int relative_pos = 0; + int c = ft_compare_keys(ft, key, &rightmost_key); + if (c > 0) { + relative_pos = 1; + *target_childnum = leaf->n_children - 1; + } else if (c == 0) { + if (nondeleted_key_found != nullptr && !le_latest_is_del(rightmost_le)) { + *nondeleted_key_found = true; + } + relative_pos = 0; + *target_childnum = leaf->n_children - 1; + } else { + // The key is less than the rightmost. It may still be in bounds if it's >= the leftmost. + DBT leftmost_key; + LEAFENTRY leftmost_le = ft_leaf_leftmost_le_and_key(leaf, &leftmost_key); + invariant_notnull(leftmost_le); // Must exist because a rightmost exists + c = ft_compare_keys(ft, key, &leftmost_key); + if (c > 0) { + if (nondeleted_key_found != nullptr) { + // The caller wants to know if a nondeleted key can be found. + LEAFENTRY target_le; + int childnum = toku_ftnode_which_child(leaf, key, ft->cmp); + BASEMENTNODE bn = BLB(leaf, childnum); + struct toku_msg_leafval_heaviside_extra extra(ft->cmp, key); + int r = bn->data_buffer.find_zero<decltype(extra), toku_msg_leafval_heaviside>( + extra, + &target_le, + nullptr, nullptr, nullptr + ); + *target_childnum = childnum; + if (r == 0 && !le_latest_is_del(leftmost_le)) { + *nondeleted_key_found = true; + } + } + relative_pos = 0; + } else if (c == 0) { + if (nondeleted_key_found != nullptr && !le_latest_is_del(leftmost_le)) { + *nondeleted_key_found = true; + } + relative_pos = 0; + *target_childnum = 0; + } else { + relative_pos = -1; + } + } + + return relative_pos; +} + +static void ft_insert_directly_into_leaf(FT ft, FTNODE leaf, int target_childnum, DBT *key, DBT *val, + XIDS message_xids, enum ft_msg_type type, txn_gc_info *gc_info); +static int getf_nothing(uint32_t, const void *, uint32_t, const void *, void *, bool); + +static int ft_maybe_insert_into_rightmost_leaf(FT ft, DBT *key, DBT *val, XIDS message_xids, enum ft_msg_type type, + txn_gc_info *gc_info, bool unique) +// Effect: Pins the rightmost leaf node and attempts to do an insert. +// There are three reasons why we may not succeed. +// - The rightmost leaf is too full and needs a split. +// - The key to insert is not within the provable bounds of this leaf node. +// - The key is within bounds, but it already exists. +// Return: 0 if this function did insert, DB_KEYEXIST if a unique key constraint exists and +// some nondeleted leafentry with the same key exists +// < 0 if this function did not insert, for a reason other than DB_KEYEXIST. +// Note: Treat this function as a possible, but not necessary, optimization for insert. +// Rationale: We want O(1) insertions down the rightmost path of the tree. +{ + int r = -1; + + uint32_t rightmost_fullhash; + BLOCKNUM rightmost_blocknum = ft->rightmost_blocknum; + FTNODE rightmost_leaf = nullptr; + + // Don't do the optimization if our heurstic suggests that + // insertion pattern is not sequential. + if (ft->seqinsert_score < FT_SEQINSERT_SCORE_THRESHOLD) { + goto cleanup; + } + + // We know the seqinsert score is high enough that we should + // attemp to directly insert into the right most leaf. Because + // the score is non-zero, the rightmost blocknum must have been + // set. See inject_message_in_locked_node(), which only increases + // the score if the target node blocknum == rightmost_blocknum + invariant(rightmost_blocknum.b != RESERVED_BLOCKNUM_NULL); + + // Pin the rightmost leaf with a write lock. + rightmost_fullhash = toku_cachetable_hash(ft->cf, rightmost_blocknum); + ftnode_fetch_extra bfe; + bfe.create_for_full_read(ft); + toku_pin_ftnode(ft, rightmost_blocknum, rightmost_fullhash, &bfe, PL_WRITE_CHEAP, &rightmost_leaf, true); + + // The rightmost blocknum never chances once it is initialized to something + // other than null. Verify that the pinned node has the correct blocknum. + invariant(rightmost_leaf->blocknum.b == rightmost_blocknum.b); + + // If the rightmost leaf is reactive, bail out out and let the normal promotion pass + // take care of it. This also ensures that if any of our ancestors are reactive, + // they'll be taken care of too. + if (toku_ftnode_get_leaf_reactivity(rightmost_leaf, ft->h->nodesize) != RE_STABLE) { + STATUS_INC(FT_PRO_RIGHTMOST_LEAF_SHORTCUT_FAIL_REACTIVE, 1); + goto cleanup; + } + + // The groundwork has been laid for an insertion directly into the rightmost + // leaf node. We know that it is pinned for write, fully in memory, has + // no messages above it, and is not reactive. + // + // Now, two more things must be true for this insertion to actually happen: + // 1. The key to insert is within the bounds of this leafnode, or to the right. + // 2. If there is a uniqueness constraint, it passes. + bool nondeleted_key_found; + int relative_pos; + int target_childnum; + + nondeleted_key_found = false; + target_childnum = -1; + relative_pos = ft_leaf_get_relative_key_pos(ft, rightmost_leaf, key, + unique ? &nondeleted_key_found : nullptr, + &target_childnum); + if (relative_pos >= 0) { + STATUS_INC(FT_PRO_RIGHTMOST_LEAF_SHORTCUT_SUCCESS, 1); + if (unique && nondeleted_key_found) { + r = DB_KEYEXIST; + } else { + ft_insert_directly_into_leaf(ft, rightmost_leaf, target_childnum, + key, val, message_xids, type, gc_info); + r = 0; + } + } else { + STATUS_INC(FT_PRO_RIGHTMOST_LEAF_SHORTCUT_FAIL_POS, 1); + r = -1; + } + +cleanup: + // If we did the insert, the rightmost leaf was unpinned for us. + if (r != 0 && rightmost_leaf != nullptr) { + toku_unpin_ftnode(ft, rightmost_leaf); + } + + return r; +} + +static void ft_txn_log_insert(FT ft, DBT *key, DBT *val, TOKUTXN txn, bool do_logging, enum ft_msg_type type); + +int toku_ft_insert_unique(FT_HANDLE ft_h, DBT *key, DBT *val, TOKUTXN txn, bool do_logging) { +// Effect: Insert a unique key-val pair into the fractal tree. +// Return: 0 on success, DB_KEYEXIST if the overwrite constraint failed + XIDS message_xids = txn != nullptr ? toku_txn_get_xids(txn) : toku_xids_get_root_xids(); + + TXN_MANAGER txn_manager = toku_ft_get_txn_manager(ft_h); + txn_manager_state txn_state_for_gc(txn_manager); + + TXNID oldest_referenced_xid_estimate = toku_ft_get_oldest_referenced_xid_estimate(ft_h); + txn_gc_info gc_info(&txn_state_for_gc, + oldest_referenced_xid_estimate, + // no messages above us, we can implicitly promote uxrs based on this xid + oldest_referenced_xid_estimate, + true); + int r = ft_maybe_insert_into_rightmost_leaf(ft_h->ft, key, val, message_xids, FT_INSERT, &gc_info, true); + if (r != 0 && r != DB_KEYEXIST) { + // Default to a regular unique check + insert algorithm if we couldn't + // do it based on the rightmost leaf alone. + int lookup_r = toku_ft_lookup(ft_h, key, getf_nothing, nullptr); + if (lookup_r == DB_NOTFOUND) { + toku_ft_send_insert(ft_h, key, val, message_xids, FT_INSERT, &gc_info); + r = 0; + } else { + r = DB_KEYEXIST; + } + } + + if (r == 0) { + ft_txn_log_insert(ft_h->ft, key, val, txn, do_logging, FT_INSERT); + } + return r; +} + +// Effect: Insert the key-val pair into an ft. void toku_ft_insert (FT_HANDLE ft_handle, DBT *key, DBT *val, TOKUTXN txn) { toku_ft_maybe_insert(ft_handle, key, val, txn, false, ZERO_LSN, true, FT_INSERT); } @@ -3273,13 +2427,13 @@ void toku_ft_optimize (FT_HANDLE ft_h) { if (logger) { TXNID oldest = toku_txn_manager_get_oldest_living_xid(logger->txn_manager); - XIDS root_xids = xids_get_root_xids(); + XIDS root_xids = toku_xids_get_root_xids(); XIDS message_xids; if (oldest == TXNID_NONE_LIVING) { message_xids = root_xids; } else { - int r = xids_create_child(root_xids, &message_xids, oldest); + int r = toku_xids_create_child(root_xids, &message_xids, oldest); invariant(r == 0); } @@ -3287,7 +2441,7 @@ void toku_ft_optimize (FT_HANDLE ft_h) { DBT val; toku_init_dbt(&key); toku_init_dbt(&val); - FT_MSG_S ftmsg = { FT_OPTIMIZE, ZERO_MSN, message_xids, .u = { .id = {&key,&val} } }; + ft_msg msg(&key, &val, FT_OPTIMIZE, ZERO_MSN, message_xids); TXN_MANAGER txn_manager = toku_ft_get_txn_manager(ft_h); txn_manager_state txn_state_for_gc(txn_manager); @@ -3298,8 +2452,8 @@ void toku_ft_optimize (FT_HANDLE ft_h) { // no messages above us, we can implicitly promote uxrs based on this xid oldest_referenced_xid_estimate, true); - toku_ft_root_put_msg(ft_h->ft, &ftmsg, &gc_info); - xids_destroy(&message_xids); + toku_ft_root_put_msg(ft_h->ft, msg, &gc_info); + toku_xids_destroy(&message_xids); } } @@ -3356,32 +2510,38 @@ TXNID toku_ft_get_oldest_referenced_xid_estimate(FT_HANDLE ft_h) { return txn_manager != nullptr ? toku_txn_manager_get_oldest_referenced_xid_estimate(txn_manager) : TXNID_NONE; } -void toku_ft_maybe_insert (FT_HANDLE ft_h, DBT *key, DBT *val, TOKUTXN txn, bool oplsn_valid, LSN oplsn, bool do_logging, enum ft_msg_type type) { - paranoid_invariant(type==FT_INSERT || type==FT_INSERT_NO_OVERWRITE); - XIDS message_xids = xids_get_root_xids(); //By default use committed messages +static void ft_txn_log_insert(FT ft, DBT *key, DBT *val, TOKUTXN txn, bool do_logging, enum ft_msg_type type) { + paranoid_invariant(type == FT_INSERT || type == FT_INSERT_NO_OVERWRITE); + + //By default use committed messages TXNID_PAIR xid = toku_txn_get_txnid(txn); if (txn) { BYTESTRING keybs = {key->size, (char *) key->data}; - toku_logger_save_rollback_cmdinsert(txn, toku_cachefile_filenum(ft_h->ft->cf), &keybs); - toku_txn_maybe_note_ft(txn, ft_h->ft); - message_xids = toku_txn_get_xids(txn); + toku_logger_save_rollback_cmdinsert(txn, toku_cachefile_filenum(ft->cf), &keybs); + toku_txn_maybe_note_ft(txn, ft); } TOKULOGGER logger = toku_txn_logger(txn); if (do_logging && logger) { BYTESTRING keybs = {.len=key->size, .data=(char *) key->data}; BYTESTRING valbs = {.len=val->size, .data=(char *) val->data}; if (type == FT_INSERT) { - toku_log_enq_insert(logger, (LSN*)0, 0, txn, toku_cachefile_filenum(ft_h->ft->cf), xid, keybs, valbs); + toku_log_enq_insert(logger, (LSN*)0, 0, txn, toku_cachefile_filenum(ft->cf), xid, keybs, valbs); } else { - toku_log_enq_insert_no_overwrite(logger, (LSN*)0, 0, txn, toku_cachefile_filenum(ft_h->ft->cf), xid, keybs, valbs); + toku_log_enq_insert_no_overwrite(logger, (LSN*)0, 0, txn, toku_cachefile_filenum(ft->cf), xid, keybs, valbs); } } +} + +void toku_ft_maybe_insert (FT_HANDLE ft_h, DBT *key, DBT *val, TOKUTXN txn, bool oplsn_valid, LSN oplsn, bool do_logging, enum ft_msg_type type) { + ft_txn_log_insert(ft_h->ft, key, val, txn, do_logging, type); LSN treelsn; if (oplsn_valid && oplsn.lsn <= (treelsn = toku_ft_checkpoint_lsn(ft_h->ft)).lsn) { // do nothing } else { + XIDS message_xids = txn ? toku_txn_get_xids(txn) : toku_xids_get_root_xids(); + TXN_MANAGER txn_manager = toku_ft_get_txn_manager(ft_h); txn_manager_state txn_state_for_gc(txn_manager); @@ -3391,16 +2551,28 @@ void toku_ft_maybe_insert (FT_HANDLE ft_h, DBT *key, DBT *val, TOKUTXN txn, bool // no messages above us, we can implicitly promote uxrs based on this xid oldest_referenced_xid_estimate, txn != nullptr ? !txn->for_recovery : false); - toku_ft_send_insert(ft_h, key, val, message_xids, type, &gc_info); + int r = ft_maybe_insert_into_rightmost_leaf(ft_h->ft, key, val, message_xids, FT_INSERT, &gc_info, false); + if (r != 0) { + toku_ft_send_insert(ft_h, key, val, message_xids, type, &gc_info); + } } } +static void ft_insert_directly_into_leaf(FT ft, FTNODE leaf, int target_childnum, DBT *key, DBT *val, + XIDS message_xids, enum ft_msg_type type, txn_gc_info *gc_info) +// Effect: Insert directly into a leaf node a fractal tree. Does not do any logging. +// Requires: Leaf is fully in memory and pinned for write. +// Requires: If this insertion were to happen through the root node, the promotion +// algorithm would have selected the given leaf node as the point of injection. +// That means this function relies on the current implementation of promotion. +{ + ft_msg msg(key, val, type, ZERO_MSN, message_xids); + size_t flow_deltas[] = { 0, 0 }; + inject_message_in_locked_node(ft, leaf, target_childnum, msg, flow_deltas, gc_info); +} + static void -ft_send_update_msg(FT_HANDLE ft_h, FT_MSG_S *msg, TOKUTXN txn) { - msg->xids = (txn - ? toku_txn_get_xids(txn) - : xids_get_root_xids()); - +ft_send_update_msg(FT_HANDLE ft_h, const ft_msg &msg, TOKUTXN txn) { TXN_MANAGER txn_manager = toku_ft_get_txn_manager(ft_h); txn_manager_state txn_state_for_gc(txn_manager); @@ -3439,9 +2611,9 @@ void toku_ft_maybe_update(FT_HANDLE ft_h, const DBT *key, const DBT *update_func if (oplsn_valid && oplsn.lsn <= (treelsn = toku_ft_checkpoint_lsn(ft_h->ft)).lsn) { // do nothing } else { - FT_MSG_S msg = { FT_UPDATE, ZERO_MSN, NULL, - .u = { .id = { key, update_function_extra } } }; - ft_send_update_msg(ft_h, &msg, txn); + XIDS message_xids = txn ? toku_txn_get_xids(txn) : toku_xids_get_root_xids(); + ft_msg msg(key, update_function_extra, FT_UPDATE, ZERO_MSN, message_xids); + ft_send_update_msg(ft_h, msg, txn); } } @@ -3471,23 +2643,22 @@ void toku_ft_maybe_update_broadcast(FT_HANDLE ft_h, const DBT *update_function_e oplsn.lsn <= (treelsn = toku_ft_checkpoint_lsn(ft_h->ft)).lsn) { } else { - DBT nullkey; - const DBT *nullkeyp = toku_init_dbt(&nullkey); - FT_MSG_S msg = { FT_UPDATE_BROADCAST_ALL, ZERO_MSN, NULL, - .u = { .id = { nullkeyp, update_function_extra } } }; - ft_send_update_msg(ft_h, &msg, txn); + DBT empty_dbt; + XIDS message_xids = txn ? toku_txn_get_xids(txn) : toku_xids_get_root_xids(); + ft_msg msg(toku_init_dbt(&empty_dbt), update_function_extra, FT_UPDATE_BROADCAST_ALL, ZERO_MSN, message_xids); + ft_send_update_msg(ft_h, msg, txn); } } void toku_ft_send_insert(FT_HANDLE ft_handle, DBT *key, DBT *val, XIDS xids, enum ft_msg_type type, txn_gc_info *gc_info) { - FT_MSG_S ftmsg = { type, ZERO_MSN, xids, .u = { .id = { key, val } } }; - toku_ft_root_put_msg(ft_handle->ft, &ftmsg, gc_info); + ft_msg msg(key, val, type, ZERO_MSN, xids); + toku_ft_root_put_msg(ft_handle->ft, msg, gc_info); } void toku_ft_send_commit_any(FT_HANDLE ft_handle, DBT *key, XIDS xids, txn_gc_info *gc_info) { DBT val; - FT_MSG_S ftmsg = { FT_COMMIT_ANY, ZERO_MSN, xids, .u = { .id = { key, toku_init_dbt(&val) } } }; - toku_ft_root_put_msg(ft_handle->ft, &ftmsg, gc_info); + ft_msg msg(key, toku_init_dbt(&val), FT_COMMIT_ANY, ZERO_MSN, xids); + toku_ft_root_put_msg(ft_handle->ft, msg, gc_info); } void toku_ft_delete(FT_HANDLE ft_handle, DBT *key, TOKUTXN txn) { @@ -3525,7 +2696,7 @@ toku_ft_log_del_multiple (TOKUTXN txn, FT_HANDLE src_ft, FT_HANDLE *fts, uint32_ } void toku_ft_maybe_delete(FT_HANDLE ft_h, DBT *key, TOKUTXN txn, bool oplsn_valid, LSN oplsn, bool do_logging) { - XIDS message_xids = xids_get_root_xids(); //By default use committed messages + XIDS message_xids = toku_xids_get_root_xids(); //By default use committed messages TXNID_PAIR xid = toku_txn_get_txnid(txn); if (txn) { BYTESTRING keybs = {key->size, (char *) key->data}; @@ -3558,8 +2729,8 @@ void toku_ft_maybe_delete(FT_HANDLE ft_h, DBT *key, TOKUTXN txn, bool oplsn_vali void toku_ft_send_delete(FT_HANDLE ft_handle, DBT *key, XIDS xids, txn_gc_info *gc_info) { DBT val; toku_init_dbt(&val); - FT_MSG_S ftmsg = { FT_DELETE_ANY, ZERO_MSN, xids, .u = { .id = { key, &val } } }; - toku_ft_root_put_msg(ft_handle->ft, &ftmsg, gc_info); + ft_msg msg(key, toku_init_dbt(&val), FT_DELETE_ANY, ZERO_MSN, xids); + toku_ft_root_put_msg(ft_handle->ft, msg, gc_info); } /* ******************** open,close and create ********************** */ @@ -3603,19 +2774,20 @@ static inline int ft_open_maybe_direct(const char *filename, int oflag, int mode } } +static const mode_t file_mode = S_IRUSR+S_IWUSR+S_IRGRP+S_IWGRP+S_IROTH+S_IWOTH; + // open a file for use by the ft // Requires: File does not exist. static int ft_create_file(FT_HANDLE UU(ft_handle), const char *fname, int *fdp) { - mode_t mode = S_IRWXU|S_IRWXG|S_IRWXO; int r; int fd; int er; - fd = ft_open_maybe_direct(fname, O_RDWR | O_BINARY, mode); + fd = ft_open_maybe_direct(fname, O_RDWR | O_BINARY, file_mode); assert(fd==-1); if ((er = get_maybe_error_errno()) != ENOENT) { return er; } - fd = ft_open_maybe_direct(fname, O_RDWR | O_CREAT | O_BINARY, mode); + fd = ft_open_maybe_direct(fname, O_RDWR | O_CREAT | O_BINARY, file_mode); if (fd==-1) { r = get_error_errno(); return r; @@ -3633,9 +2805,8 @@ static int ft_create_file(FT_HANDLE UU(ft_handle), const char *fname, int *fdp) // open a file for use by the ft. if the file does not exist, error static int ft_open_file(const char *fname, int *fdp) { - mode_t mode = S_IRWXU|S_IRWXG|S_IRWXO; int fd; - fd = ft_open_maybe_direct(fname, O_RDWR | O_BINARY, mode); + fd = ft_open_maybe_direct(fname, O_RDWR | O_BINARY, file_mode); if (fd==-1) { return get_error_errno(); } @@ -3686,10 +2857,29 @@ toku_ft_handle_get_fanout(FT_HANDLE ft_handle, unsigned int *fanout) *fanout = ft_handle->options.fanout; } } + +// The memcmp magic byte may be set on a per fractal tree basis to communicate +// that if two keys begin with this byte, they may be compared with the builtin +// key comparison function. This greatly optimizes certain in-memory workloads, +// such as lookups by OID primary key in TokuMX. +int toku_ft_handle_set_memcmp_magic(FT_HANDLE ft_handle, uint8_t magic) { + if (magic == comparator::MEMCMP_MAGIC_NONE) { + return EINVAL; + } + if (ft_handle->ft != nullptr) { + // if the handle is already open, then we cannot set the memcmp magic + // (because it may or may not have been set by someone else already) + return EINVAL; + } + ft_handle->options.memcmp_magic = magic; + return 0; +} + static int verify_builtin_comparisons_consistent(FT_HANDLE t, uint32_t flags) { - if ((flags & TOKU_DB_KEYCMP_BUILTIN) && (t->options.compare_fun != toku_builtin_compare_fun)) + if ((flags & TOKU_DB_KEYCMP_BUILTIN) && (t->options.compare_fun != toku_builtin_compare_fun)) { return EINVAL; + } return 0; } @@ -3754,7 +2944,8 @@ toku_ft_handle_inherit_options(FT_HANDLE t, FT ft) { .compression_method = ft->h->compression_method, .fanout = ft->h->fanout, .flags = ft->h->flags, - .compare_fun = ft->compare_fun, + .memcmp_magic = ft->cmp.get_memcmp_magic(), + .compare_fun = ft->cmp.get_compare_func(), .update_fun = ft->update_fun }; t->options = options; @@ -3793,13 +2984,12 @@ ft_handle_open(FT_HANDLE ft_h, const char *fname_in_env, int is_create, int only } if (r==ENOENT && is_create) { did_create = true; - mode_t mode = S_IRWXU|S_IRWXG|S_IRWXO; if (txn) { BYTESTRING bs = { .len=(uint32_t) strlen(fname_in_env), .data = (char*)fname_in_env }; toku_logger_save_rollback_fcreate(txn, reserved_filenum, &bs); // bs is a copy of the fname relative to the environment } txn_created = (bool)(txn!=NULL); - toku_logger_log_fcreate(txn, fname_in_env, reserved_filenum, mode, ft_h->options.flags, ft_h->options.nodesize, ft_h->options.basementnodesize, ft_h->options.compression_method); + toku_logger_log_fcreate(txn, fname_in_env, reserved_filenum, file_mode, ft_h->options.flags, ft_h->options.nodesize, ft_h->options.basementnodesize, ft_h->options.compression_method); r = ft_create_file(ft_h, fname_in_cwd, &fd); if (r) { goto exit; } } @@ -3835,6 +3025,14 @@ ft_handle_open(FT_HANDLE ft_h, const char *fname_in_env, int is_create, int only r = EINVAL; goto exit; } + + // Ensure that the memcmp magic bits are consistent, if set. + if (ft->cmp.get_memcmp_magic() != toku::comparator::MEMCMP_MAGIC_NONE && + ft_h->options.memcmp_magic != toku::comparator::MEMCMP_MAGIC_NONE && + ft_h->options.memcmp_magic != ft->cmp.get_memcmp_magic()) { + r = EINVAL; + goto exit; + } toku_ft_handle_inherit_options(ft_h, ft); if (!was_already_open) { @@ -3875,10 +3073,11 @@ ft_handle_open(FT_HANDLE ft_h, const char *fname_in_env, int is_create, int only toku_txn_maybe_note_ft(txn, ft); } - //Opening an ft may restore to previous checkpoint. Truncate if necessary. + // Opening an ft may restore to previous checkpoint. + // Truncate if necessary. { int fd = toku_cachefile_get_fd (ft->cf); - toku_maybe_truncate_file_on_open(ft->blocktable, fd); + ft->blocktable.maybe_truncate_file_on_open(fd); } r = 0; @@ -3986,9 +3185,8 @@ toku_ft_handle_open_with_dict_id( DICTIONARY_ID toku_ft_get_dictionary_id(FT_HANDLE ft_handle) { - FT h = ft_handle->ft; - DICTIONARY_ID dict_id = h->dict_id; - return dict_id; + FT ft = ft_handle->ft; + return ft->dict_id; } void toku_ft_set_flags(FT_HANDLE ft_handle, unsigned int flags) { @@ -4057,8 +3255,9 @@ void toku_ft_set_update(FT_HANDLE ft_handle, ft_update_func update_fun) { ft_handle->options.update_fun = update_fun; } -ft_compare_func toku_ft_get_bt_compare (FT_HANDLE ft_handle) { - return ft_handle->options.compare_fun; +const toku::comparator &toku_ft_get_comparator(FT_HANDLE ft_handle) { + invariant_notnull(ft_handle->ft); + return ft_handle->ft->cmp; } static void @@ -4067,34 +3266,31 @@ ft_remove_handle_ref_callback(FT UU(ft), void *extra) { toku_list_remove(&handle->live_ft_handle_link); } -// close an ft handle during normal operation. the underlying ft may or may not close, -// depending if there are still references. an lsn for this close will come from the logger. -void -toku_ft_handle_close(FT_HANDLE ft_handle) { - // There are error paths in the ft_handle_open that end with ft_handle->ft==NULL. +static void ft_handle_close(FT_HANDLE ft_handle, bool oplsn_valid, LSN oplsn) { FT ft = ft_handle->ft; - if (ft) { - const bool oplsn_valid = false; - toku_ft_remove_reference(ft, oplsn_valid, ZERO_LSN, ft_remove_handle_ref_callback, ft_handle); + // There are error paths in the ft_handle_open that end with ft_handle->ft == nullptr. + if (ft != nullptr) { + toku_ft_remove_reference(ft, oplsn_valid, oplsn, ft_remove_handle_ref_callback, ft_handle); } toku_free(ft_handle); } +// close an ft handle during normal operation. the underlying ft may or may not close, +// depending if there are still references. an lsn for this close will come from the logger. +void toku_ft_handle_close(FT_HANDLE ft_handle) { + ft_handle_close(ft_handle, false, ZERO_LSN); +} + // close an ft handle during recovery. the underlying ft must close, and will use the given lsn. -void -toku_ft_handle_close_recovery(FT_HANDLE ft_handle, LSN oplsn) { - FT ft = ft_handle->ft; +void toku_ft_handle_close_recovery(FT_HANDLE ft_handle, LSN oplsn) { // the ft must exist if closing during recovery. error paths during // open for recovery should close handles using toku_ft_handle_close() - assert(ft); - const bool oplsn_valid = true; - toku_ft_remove_reference(ft, oplsn_valid, oplsn, ft_remove_handle_ref_callback, ft_handle); - toku_free(ft_handle); + invariant_notnull(ft_handle->ft); + ft_handle_close(ft_handle, true, oplsn); } // TODO: remove this, callers should instead just use toku_ft_handle_close() -int -toku_close_ft_handle_nolsn (FT_HANDLE ft_handle, char** UU(error_string)) { +int toku_close_ft_handle_nolsn(FT_HANDLE ft_handle, char **UU(error_string)) { toku_ft_handle_close(ft_handle); return 0; } @@ -4114,192 +3310,21 @@ void toku_ft_handle_create(FT_HANDLE *ft_handle_ptr) { *ft_handle_ptr = ft_handle; } -/* ************* CURSORS ********************* */ +/******************************* search ***************************************/ -static inline void -ft_cursor_cleanup_dbts(FT_CURSOR c) { - toku_destroy_dbt(&c->key); - toku_destroy_dbt(&c->val); -} - -// -// This function is used by the leafentry iterators. -// returns TOKUDB_ACCEPT if live transaction context is allowed to read a value -// that is written by transaction with LSN of id -// live transaction context may read value if either id is the root ancestor of context, or if -// id was committed before context's snapshot was taken. -// For id to be committed before context's snapshot was taken, the following must be true: -// - id < context->snapshot_txnid64 AND id is not in context's live root transaction list -// For the above to NOT be true: -// - id > context->snapshot_txnid64 OR id is in context's live root transaction list -// -static int -does_txn_read_entry(TXNID id, TOKUTXN context) { - int rval; - TXNID oldest_live_in_snapshot = toku_get_oldest_in_live_root_txn_list(context); - if (oldest_live_in_snapshot == TXNID_NONE && id < context->snapshot_txnid64) { - rval = TOKUDB_ACCEPT; - } - else if (id < oldest_live_in_snapshot || id == context->txnid.parent_id64) { - rval = TOKUDB_ACCEPT; - } - else if (id > context->snapshot_txnid64 || toku_is_txn_in_live_root_txn_list(*context->live_root_txn_list, id)) { - rval = 0; - } - else { - rval = TOKUDB_ACCEPT; - } - return rval; -} - -static inline void -ft_cursor_extract_val(LEAFENTRY le, - FT_CURSOR cursor, - uint32_t *vallen, - void **val) { - if (toku_ft_cursor_is_leaf_mode(cursor)) { - *val = le; - *vallen = leafentry_memsize(le); - } else if (cursor->is_snapshot_read) { - int r = le_iterate_val( - le, - does_txn_read_entry, - val, - vallen, - cursor->ttxn - ); - lazy_assert_zero(r); - } else { - *val = le_latest_val_and_len(le, vallen); - } -} - -int toku_ft_cursor ( - FT_HANDLE ft_handle, - FT_CURSOR *cursorptr, - TOKUTXN ttxn, - bool is_snapshot_read, - bool disable_prefetching - ) -{ - if (is_snapshot_read) { - invariant(ttxn != NULL); - int accepted = does_txn_read_entry(ft_handle->ft->h->root_xid_that_created, ttxn); - if (accepted!=TOKUDB_ACCEPT) { - invariant(accepted==0); - return TOKUDB_MVCC_DICTIONARY_TOO_NEW; - } - } - FT_CURSOR XCALLOC(cursor); - cursor->ft_handle = ft_handle; - cursor->prefetching = false; - toku_init_dbt(&cursor->range_lock_left_key); - toku_init_dbt(&cursor->range_lock_right_key); - cursor->left_is_neg_infty = false; - cursor->right_is_pos_infty = false; - cursor->is_snapshot_read = is_snapshot_read; - cursor->is_leaf_mode = false; - cursor->ttxn = ttxn; - cursor->disable_prefetching = disable_prefetching; - cursor->is_temporary = false; - *cursorptr = cursor; - return 0; -} - -void toku_ft_cursor_remove_restriction(FT_CURSOR ftcursor) { - ftcursor->out_of_range_error = 0; - ftcursor->direction = 0; -} - -void toku_ft_cursor_set_check_interrupt_cb(FT_CURSOR ftcursor, FT_CHECK_INTERRUPT_CALLBACK cb, void *extra) { - ftcursor->interrupt_cb = cb; - ftcursor->interrupt_cb_extra = extra; -} - - -void -toku_ft_cursor_set_temporary(FT_CURSOR ftcursor) { - ftcursor->is_temporary = true; -} - -void -toku_ft_cursor_set_leaf_mode(FT_CURSOR ftcursor) { - ftcursor->is_leaf_mode = true; -} - -int -toku_ft_cursor_is_leaf_mode(FT_CURSOR ftcursor) { - return ftcursor->is_leaf_mode; -} - -void -toku_ft_cursor_set_range_lock(FT_CURSOR cursor, const DBT *left, const DBT *right, - bool left_is_neg_infty, bool right_is_pos_infty, - int out_of_range_error) -{ - // Destroy any existing keys and then clone the given left, right keys - toku_destroy_dbt(&cursor->range_lock_left_key); - if (left_is_neg_infty) { - cursor->left_is_neg_infty = true; - } else { - toku_clone_dbt(&cursor->range_lock_left_key, *left); - } - - toku_destroy_dbt(&cursor->range_lock_right_key); - if (right_is_pos_infty) { - cursor->right_is_pos_infty = true; - } else { - toku_clone_dbt(&cursor->range_lock_right_key, *right); +// Return true if this key is within the search bound. If there is no search bound then the tree search continues. +static bool search_continue(ft_search *search, void *key, uint32_t key_len) { + bool result = true; + if (search->direction == FT_SEARCH_LEFT && search->k_bound) { + FT_HANDLE CAST_FROM_VOIDP(ft_handle, search->context); + DBT this_key = { .data = key, .size = key_len }; + // search continues if this key <= key bound + result = (ft_handle->ft->cmp(&this_key, search->k_bound) <= 0); } - - // TOKUDB_FOUND_BUT_REJECTED is a DB_NOTFOUND with instructions to stop looking. (Faster) - cursor->out_of_range_error = out_of_range_error == DB_NOTFOUND ? TOKUDB_FOUND_BUT_REJECTED : out_of_range_error; - cursor->direction = 0; -} - -void toku_ft_cursor_close(FT_CURSOR cursor) { - ft_cursor_cleanup_dbts(cursor); - toku_destroy_dbt(&cursor->range_lock_left_key); - toku_destroy_dbt(&cursor->range_lock_right_key); - toku_free(cursor); -} - -static inline void ft_cursor_set_prefetching(FT_CURSOR cursor) { - cursor->prefetching = true; -} - -static inline bool ft_cursor_prefetching(FT_CURSOR cursor) { - return cursor->prefetching; -} - -//Return true if cursor is uninitialized. false otherwise. -static bool -ft_cursor_not_set(FT_CURSOR cursor) { - assert((cursor->key.data==NULL) == (cursor->val.data==NULL)); - return (bool)(cursor->key.data == NULL); + return result; } -// -// -// -// -// -// -// -// -// -// TODO: ask Yoni why second parameter here is not const -// -// -// -// -// -// -// -// -// -static int -heaviside_from_search_t(const DBT &kdbt, ft_search_t &search) { +static int heaviside_from_search_t(const DBT &kdbt, ft_search &search) { int cmp = search.compare(search, search.k ? &kdbt : 0); // The search->compare function returns only 0 or 1 @@ -4310,659 +3335,11 @@ heaviside_from_search_t(const DBT &kdbt, ft_search_t &search) { abort(); return 0; } - -// -// Returns true if the value that is to be read is empty. -// -static inline int -is_le_val_del(LEAFENTRY le, FT_CURSOR ftcursor) { - int rval; - if (ftcursor->is_snapshot_read) { - bool is_del; - le_iterate_is_del( - le, - does_txn_read_entry, - &is_del, - ftcursor->ttxn - ); - rval = is_del; - } - else { - rval = le_latest_is_del(le); - } - return rval; -} - -struct store_fifo_offset_extra { - int32_t *offsets; - int i; -}; - -int store_fifo_offset(const int32_t &offset, const uint32_t UU(idx), struct store_fifo_offset_extra *const extra) __attribute__((nonnull(3))); -int store_fifo_offset(const int32_t &offset, const uint32_t UU(idx), struct store_fifo_offset_extra *const extra) -{ - extra->offsets[extra->i] = offset; - extra->i++; - return 0; -} - -/** - * Given pointers to offsets within a FIFO where we can find messages, - * figure out the MSN of each message, and compare those MSNs. Returns 1, - * 0, or -1 if a is larger than, equal to, or smaller than b. - */ -int fifo_offset_msn_cmp(FIFO &fifo, const int32_t &ao, const int32_t &bo); -int fifo_offset_msn_cmp(FIFO &fifo, const int32_t &ao, const int32_t &bo) -{ - const struct fifo_entry *a = toku_fifo_get_entry(fifo, ao); - const struct fifo_entry *b = toku_fifo_get_entry(fifo, bo); - if (a->msn.msn > b->msn.msn) { - return +1; - } - if (a->msn.msn < b->msn.msn) { - return -1; - } - return 0; -} - -/** - * Given a fifo_entry, either decompose it into its parameters and call - * toku_ft_bn_apply_msg, or discard it, based on its MSN and the MSN of the - * basement node. - */ -static void -do_bn_apply_msg(FT_HANDLE t, BASEMENTNODE bn, struct fifo_entry *entry, txn_gc_info *gc_info, uint64_t *workdone, STAT64INFO stats_to_update) -{ - // The messages are being iterated over in (key,msn) order or just in - // msn order, so all the messages for one key, from one buffer, are in - // ascending msn order. So it's ok that we don't update the basement - // node's msn until the end. - if (entry->msn.msn > bn->max_msn_applied.msn) { - ITEMLEN keylen = entry->keylen; - ITEMLEN vallen = entry->vallen; - enum ft_msg_type type = fifo_entry_get_msg_type(entry); - MSN msn = entry->msn; - const XIDS xids = (XIDS) &entry->xids_s; - bytevec key = xids_get_end_of_array(xids); - bytevec val = (uint8_t*)key + entry->keylen; - - DBT hk; - toku_fill_dbt(&hk, key, keylen); - DBT hv; - FT_MSG_S ftmsg = { type, msn, xids, .u = { .id = { &hk, toku_fill_dbt(&hv, val, vallen) } } }; - toku_ft_bn_apply_msg( - t->ft->compare_fun, - t->ft->update_fun, - &t->ft->cmp_descriptor, - bn, - &ftmsg, - gc_info, - workdone, - stats_to_update - ); - } else { - STATUS_INC(FT_MSN_DISCARDS, 1); - } - // We must always mark entry as stale since it has been marked - // (using omt::iterate_and_mark_range) - // It is possible to call do_bn_apply_msg even when it won't apply the message because - // the node containing it could have been evicted and brought back in. - entry->is_fresh = false; -} - -struct iterate_do_bn_apply_msg_extra { - FT_HANDLE t; - BASEMENTNODE bn; - NONLEAF_CHILDINFO bnc; - txn_gc_info *gc_info; - uint64_t *workdone; - STAT64INFO stats_to_update; -}; - -int iterate_do_bn_apply_msg(const int32_t &offset, const uint32_t UU(idx), struct iterate_do_bn_apply_msg_extra *const e) __attribute__((nonnull(3))); -int iterate_do_bn_apply_msg(const int32_t &offset, const uint32_t UU(idx), struct iterate_do_bn_apply_msg_extra *const e) -{ - struct fifo_entry *entry = toku_fifo_get_entry(e->bnc->buffer, offset); - do_bn_apply_msg(e->t, e->bn, entry, e->gc_info, e->workdone, e->stats_to_update); - return 0; -} - -/** - * Given the bounds of the basement node to which we will apply messages, - * find the indexes within message_tree which contain the range of - * relevant messages. - * - * The message tree contains offsets into the buffer, where messages are - * found. The pivot_bounds are the lower bound exclusive and upper bound - * inclusive, because they come from pivot keys in the tree. We want OMT - * indices, which must have the lower bound be inclusive and the upper - * bound exclusive. We will get these by telling omt::find to look - * for something strictly bigger than each of our pivot bounds. - * - * Outputs the OMT indices in lbi (lower bound inclusive) and ube (upper - * bound exclusive). - */ -template<typename find_bounds_omt_t> -static void -find_bounds_within_message_tree( - DESCRIPTOR desc, /// used for cmp - ft_compare_func cmp, /// used to compare keys - const find_bounds_omt_t &message_tree, /// tree holding FIFO offsets, in which we want to look for indices - FIFO buffer, /// buffer in which messages are found - struct pivot_bounds const * const bounds, /// key bounds within the basement node we're applying messages to - uint32_t *lbi, /// (output) "lower bound inclusive" (index into message_tree) - uint32_t *ube /// (output) "upper bound exclusive" (index into message_tree) - ) -{ - int r = 0; - - if (bounds->lower_bound_exclusive) { - // By setting msn to MAX_MSN and by using direction of +1, we will - // get the first message greater than (in (key, msn) order) any - // message (with any msn) with the key lower_bound_exclusive. - // This will be a message we want to try applying, so it is the - // "lower bound inclusive" within the message_tree. - struct toku_fifo_entry_key_msn_heaviside_extra lbi_extra; - ZERO_STRUCT(lbi_extra); - lbi_extra.desc = desc; - lbi_extra.cmp = cmp; - lbi_extra.fifo = buffer; - lbi_extra.key = bounds->lower_bound_exclusive; - lbi_extra.msn = MAX_MSN; - int32_t found_lb; - r = message_tree.template find<struct toku_fifo_entry_key_msn_heaviside_extra, toku_fifo_entry_key_msn_heaviside>(lbi_extra, +1, &found_lb, lbi); - if (r == DB_NOTFOUND) { - // There is no relevant data (the lower bound is bigger than - // any message in this tree), so we have no range and we're - // done. - *lbi = 0; - *ube = 0; - return; - } - if (bounds->upper_bound_inclusive) { - // Check if what we found for lbi is greater than the upper - // bound inclusive that we have. If so, there are no relevant - // messages between these bounds. - const DBT *ubi = bounds->upper_bound_inclusive; - const int32_t offset = found_lb; - DBT found_lbidbt; - fill_dbt_for_fifo_entry(&found_lbidbt, toku_fifo_get_entry(buffer, offset)); - FAKE_DB(db, desc); - int c = cmp(&db, &found_lbidbt, ubi); - // These DBTs really are both inclusive bounds, so we need - // strict inequality in order to determine that there's - // nothing between them. If they're equal, then we actually - // need to apply the message pointed to by lbi, and also - // anything with the same key but a bigger msn. - if (c > 0) { - *lbi = 0; - *ube = 0; - return; - } - } - } else { - // No lower bound given, it's negative infinity, so we start at - // the first message in the OMT. - *lbi = 0; - } - if (bounds->upper_bound_inclusive) { - // Again, we use an msn of MAX_MSN and a direction of +1 to get - // the first thing bigger than the upper_bound_inclusive key. - // This is therefore the smallest thing we don't want to apply, - // and omt::iterate_on_range will not examine it. - struct toku_fifo_entry_key_msn_heaviside_extra ube_extra; - ZERO_STRUCT(ube_extra); - ube_extra.desc = desc; - ube_extra.cmp = cmp; - ube_extra.fifo = buffer; - ube_extra.key = bounds->upper_bound_inclusive; - ube_extra.msn = MAX_MSN; - r = message_tree.template find<struct toku_fifo_entry_key_msn_heaviside_extra, toku_fifo_entry_key_msn_heaviside>(ube_extra, +1, nullptr, ube); - if (r == DB_NOTFOUND) { - // Couldn't find anything in the buffer bigger than our key, - // so we need to look at everything up to the end of - // message_tree. - *ube = message_tree.size(); - } - } else { - // No upper bound given, it's positive infinity, so we need to go - // through the end of the OMT. - *ube = message_tree.size(); - } -} - -/** - * For each message in the ancestor's buffer (determined by childnum) that - * is key-wise between lower_bound_exclusive and upper_bound_inclusive, - * apply the message to the basement node. We treat the bounds as minus - * or plus infinity respectively if they are NULL. Do not mark the node - * as dirty (preserve previous state of 'dirty' bit). - */ -static void -bnc_apply_messages_to_basement_node( - FT_HANDLE t, // used for comparison function - BASEMENTNODE bn, // where to apply messages - FTNODE ancestor, // the ancestor node where we can find messages to apply - int childnum, // which child buffer of ancestor contains messages we want - struct pivot_bounds const * const bounds, // contains pivot key bounds of this basement node - txn_gc_info *gc_info, - bool* msgs_applied - ) -{ - int r; - NONLEAF_CHILDINFO bnc = BNC(ancestor, childnum); - - // Determine the offsets in the message trees between which we need to - // apply messages from this buffer - STAT64INFO_S stats_delta = {0,0}; - uint64_t workdone_this_ancestor = 0; - - uint32_t stale_lbi, stale_ube; - if (!bn->stale_ancestor_messages_applied) { - find_bounds_within_message_tree(&t->ft->cmp_descriptor, t->ft->compare_fun, bnc->stale_message_tree, bnc->buffer, bounds, &stale_lbi, &stale_ube); - } else { - stale_lbi = 0; - stale_ube = 0; - } - uint32_t fresh_lbi, fresh_ube; - find_bounds_within_message_tree(&t->ft->cmp_descriptor, t->ft->compare_fun, bnc->fresh_message_tree, bnc->buffer, bounds, &fresh_lbi, &fresh_ube); - - // We now know where all the messages we must apply are, so one of the - // following 4 cases will do the application, depending on which of - // the lists contains relevant messages: - // - // 1. broadcast messages and anything else, or a mix of fresh and stale - // 2. only fresh messages - // 3. only stale messages - if (bnc->broadcast_list.size() > 0 || - (stale_lbi != stale_ube && fresh_lbi != fresh_ube)) { - // We have messages in multiple trees, so we grab all - // the relevant messages' offsets and sort them by MSN, then apply - // them in MSN order. - const int buffer_size = ((stale_ube - stale_lbi) + (fresh_ube - fresh_lbi) + bnc->broadcast_list.size()); - toku::scoped_malloc offsets_buf(buffer_size * sizeof(int32_t)); - int32_t *offsets = reinterpret_cast<int32_t *>(offsets_buf.get()); - struct store_fifo_offset_extra sfo_extra = { .offsets = offsets, .i = 0 }; - - // Populate offsets array with offsets to stale messages - r = bnc->stale_message_tree.iterate_on_range<struct store_fifo_offset_extra, store_fifo_offset>(stale_lbi, stale_ube, &sfo_extra); - assert_zero(r); - - // Then store fresh offsets, and mark them to be moved to stale later. - r = bnc->fresh_message_tree.iterate_and_mark_range<struct store_fifo_offset_extra, store_fifo_offset>(fresh_lbi, fresh_ube, &sfo_extra); - assert_zero(r); - - // Store offsets of all broadcast messages. - r = bnc->broadcast_list.iterate<struct store_fifo_offset_extra, store_fifo_offset>(&sfo_extra); - assert_zero(r); - invariant(sfo_extra.i == buffer_size); - - // Sort by MSN. - r = toku::sort<int32_t, FIFO, fifo_offset_msn_cmp>::mergesort_r(offsets, buffer_size, bnc->buffer); - assert_zero(r); - - // Apply the messages in MSN order. - for (int i = 0; i < buffer_size; ++i) { - *msgs_applied = true; - struct fifo_entry *entry = toku_fifo_get_entry(bnc->buffer, offsets[i]); - do_bn_apply_msg(t, bn, entry, gc_info, &workdone_this_ancestor, &stats_delta); - } - } else if (stale_lbi == stale_ube) { - // No stale messages to apply, we just apply fresh messages, and mark them to be moved to stale later. - struct iterate_do_bn_apply_msg_extra iter_extra = { .t = t, .bn = bn, .bnc = bnc, .gc_info = gc_info, .workdone = &workdone_this_ancestor, .stats_to_update = &stats_delta }; - if (fresh_ube - fresh_lbi > 0) *msgs_applied = true; - r = bnc->fresh_message_tree.iterate_and_mark_range<struct iterate_do_bn_apply_msg_extra, iterate_do_bn_apply_msg>(fresh_lbi, fresh_ube, &iter_extra); - assert_zero(r); - } else { - invariant(fresh_lbi == fresh_ube); - // No fresh messages to apply, we just apply stale messages. - - if (stale_ube - stale_lbi > 0) *msgs_applied = true; - struct iterate_do_bn_apply_msg_extra iter_extra = { .t = t, .bn = bn, .bnc = bnc, .gc_info = gc_info, .workdone = &workdone_this_ancestor, .stats_to_update = &stats_delta }; - - r = bnc->stale_message_tree.iterate_on_range<struct iterate_do_bn_apply_msg_extra, iterate_do_bn_apply_msg>(stale_lbi, stale_ube, &iter_extra); - assert_zero(r); - } - // - // update stats - // - if (workdone_this_ancestor > 0) { - (void) toku_sync_fetch_and_add(&BP_WORKDONE(ancestor, childnum), workdone_this_ancestor); - } - if (stats_delta.numbytes || stats_delta.numrows) { - toku_ft_update_stats(&t->ft->in_memory_stats, stats_delta); - } -} - -static void -apply_ancestors_messages_to_bn( - FT_HANDLE t, - FTNODE node, - int childnum, - ANCESTORS ancestors, - struct pivot_bounds const * const bounds, - txn_gc_info *gc_info, - bool* msgs_applied - ) -{ - BASEMENTNODE curr_bn = BLB(node, childnum); - struct pivot_bounds curr_bounds = next_pivot_keys(node, childnum, bounds); - for (ANCESTORS curr_ancestors = ancestors; curr_ancestors; curr_ancestors = curr_ancestors->next) { - if (curr_ancestors->node->max_msn_applied_to_node_on_disk.msn > curr_bn->max_msn_applied.msn) { - paranoid_invariant(BP_STATE(curr_ancestors->node, curr_ancestors->childnum) == PT_AVAIL); - bnc_apply_messages_to_basement_node( - t, - curr_bn, - curr_ancestors->node, - curr_ancestors->childnum, - &curr_bounds, - gc_info, - msgs_applied - ); - // We don't want to check this ancestor node again if the - // next time we query it, the msn hasn't changed. - curr_bn->max_msn_applied = curr_ancestors->node->max_msn_applied_to_node_on_disk; - } - } - // At this point, we know all the stale messages above this - // basement node have been applied, and any new messages will be - // fresh, so we don't need to look at stale messages for this - // basement node, unless it gets evicted (and this field becomes - // false when it's read in again). - curr_bn->stale_ancestor_messages_applied = true; -} - -void -toku_apply_ancestors_messages_to_node ( - FT_HANDLE t, - FTNODE node, - ANCESTORS ancestors, - struct pivot_bounds const * const bounds, - bool* msgs_applied, - int child_to_read - ) -// Effect: -// Bring a leaf node up-to-date according to all the messages in the ancestors. -// If the leaf node is already up-to-date then do nothing. -// If the leaf node is not already up-to-date, then record the work done -// for that leaf in each ancestor. -// Requires: -// This is being called when pinning a leaf node for the query path. -// The entire root-to-leaf path is pinned and appears in the ancestors list. -{ - VERIFY_NODE(t, node); - paranoid_invariant(node->height == 0); - - TXN_MANAGER txn_manager = toku_ft_get_txn_manager(t); - txn_manager_state txn_state_for_gc(txn_manager); - - TXNID oldest_referenced_xid_for_simple_gc = toku_ft_get_oldest_referenced_xid_estimate(t); - txn_gc_info gc_info(&txn_state_for_gc, - oldest_referenced_xid_for_simple_gc, - node->oldest_referenced_xid_known, - true); - if (!node->dirty && child_to_read >= 0) { - paranoid_invariant(BP_STATE(node, child_to_read) == PT_AVAIL); - apply_ancestors_messages_to_bn( - t, - node, - child_to_read, - ancestors, - bounds, - &gc_info, - msgs_applied - ); - } - else { - // know we are a leaf node - // An important invariant: - // We MUST bring every available basement node for a dirty node up to date. - // flushing on the cleaner thread depends on this. This invariant - // allows the cleaner thread to just pick an internal node and flush it - // as opposed to being forced to start from the root. - for (int i = 0; i < node->n_children; i++) { - if (BP_STATE(node, i) != PT_AVAIL) { continue; } - apply_ancestors_messages_to_bn( - t, - node, - i, - ancestors, - bounds, - &gc_info, - msgs_applied - ); - } - } - VERIFY_NODE(t, node); -} - -static bool bn_needs_ancestors_messages( - FT ft, - FTNODE node, - int childnum, - struct pivot_bounds const * const bounds, - ANCESTORS ancestors, - MSN* max_msn_applied - ) -{ - BASEMENTNODE bn = BLB(node, childnum); - struct pivot_bounds curr_bounds = next_pivot_keys(node, childnum, bounds); - bool needs_ancestors_messages = false; - for (ANCESTORS curr_ancestors = ancestors; curr_ancestors; curr_ancestors = curr_ancestors->next) { - if (curr_ancestors->node->max_msn_applied_to_node_on_disk.msn > bn->max_msn_applied.msn) { - paranoid_invariant(BP_STATE(curr_ancestors->node, curr_ancestors->childnum) == PT_AVAIL); - NONLEAF_CHILDINFO bnc = BNC(curr_ancestors->node, curr_ancestors->childnum); - if (bnc->broadcast_list.size() > 0) { - needs_ancestors_messages = true; - goto cleanup; - } - if (!bn->stale_ancestor_messages_applied) { - uint32_t stale_lbi, stale_ube; - find_bounds_within_message_tree(&ft->cmp_descriptor, - ft->compare_fun, - bnc->stale_message_tree, - bnc->buffer, - &curr_bounds, - &stale_lbi, - &stale_ube); - if (stale_lbi < stale_ube) { - needs_ancestors_messages = true; - goto cleanup; - } - } - uint32_t fresh_lbi, fresh_ube; - find_bounds_within_message_tree(&ft->cmp_descriptor, - ft->compare_fun, - bnc->fresh_message_tree, - bnc->buffer, - &curr_bounds, - &fresh_lbi, - &fresh_ube); - if (fresh_lbi < fresh_ube) { - needs_ancestors_messages = true; - goto cleanup; - } - if (curr_ancestors->node->max_msn_applied_to_node_on_disk.msn > max_msn_applied->msn) { - max_msn_applied->msn = curr_ancestors->node->max_msn_applied_to_node_on_disk.msn; - } - } - } -cleanup: - return needs_ancestors_messages; -} - -bool toku_ft_leaf_needs_ancestors_messages( - FT ft, - FTNODE node, - ANCESTORS ancestors, - struct pivot_bounds const * const bounds, - MSN *const max_msn_in_path, - int child_to_read - ) -// Effect: Determine whether there are messages in a node's ancestors -// which must be applied to it. These messages are in the correct -// keyrange for any available basement nodes, and are in nodes with the -// correct max_msn_applied_to_node_on_disk. -// Notes: -// This is an approximate query. -// Output: -// max_msn_in_path: max of "max_msn_applied_to_node_on_disk" over -// ancestors. This is used later to update basement nodes' -// max_msn_applied values in case we don't do the full algorithm. -// Returns: -// true if there may be some such messages -// false only if there are definitely no such messages -// Rationale: -// When we pin a node with a read lock, we want to quickly determine if -// we should exchange it for a write lock in preparation for applying -// messages. If there are no messages, we don't need the write lock. -{ - paranoid_invariant(node->height == 0); - bool needs_ancestors_messages = false; - // child_to_read may be -1 in test cases - if (!node->dirty && child_to_read >= 0) { - paranoid_invariant(BP_STATE(node, child_to_read) == PT_AVAIL); - needs_ancestors_messages = bn_needs_ancestors_messages( - ft, - node, - child_to_read, - bounds, - ancestors, - max_msn_in_path - ); - } - else { - for (int i = 0; i < node->n_children; ++i) { - if (BP_STATE(node, i) != PT_AVAIL) { continue; } - needs_ancestors_messages = bn_needs_ancestors_messages( - ft, - node, - i, - bounds, - ancestors, - max_msn_in_path - ); - if (needs_ancestors_messages) { - goto cleanup; - } - } - } -cleanup: - return needs_ancestors_messages; -} - -void toku_ft_bn_update_max_msn(FTNODE node, MSN max_msn_applied, int child_to_read) { - invariant(node->height == 0); - if (!node->dirty && child_to_read >= 0) { - paranoid_invariant(BP_STATE(node, child_to_read) == PT_AVAIL); - BASEMENTNODE bn = BLB(node, child_to_read); - if (max_msn_applied.msn > bn->max_msn_applied.msn) { - // see comment below - (void) toku_sync_val_compare_and_swap(&bn->max_msn_applied.msn, bn->max_msn_applied.msn, max_msn_applied.msn); - } - } - else { - for (int i = 0; i < node->n_children; ++i) { - if (BP_STATE(node, i) != PT_AVAIL) { continue; } - BASEMENTNODE bn = BLB(node, i); - if (max_msn_applied.msn > bn->max_msn_applied.msn) { - // This function runs in a shared access context, so to silence tools - // like DRD, we use a CAS and ignore the result. - // Any threads trying to update these basement nodes should be - // updating them to the same thing (since they all have a read lock on - // the same root-to-leaf path) so this is safe. - (void) toku_sync_val_compare_and_swap(&bn->max_msn_applied.msn, bn->max_msn_applied.msn, max_msn_applied.msn); - } - } - } -} - -struct copy_to_stale_extra { - FT ft; - NONLEAF_CHILDINFO bnc; -}; - -int copy_to_stale(const int32_t &offset, const uint32_t UU(idx), struct copy_to_stale_extra *const extra) __attribute__((nonnull(3))); -int copy_to_stale(const int32_t &offset, const uint32_t UU(idx), struct copy_to_stale_extra *const extra) -{ - struct fifo_entry *entry = toku_fifo_get_entry(extra->bnc->buffer, offset); - DBT keydbt; - DBT *key = fill_dbt_for_fifo_entry(&keydbt, entry); - struct toku_fifo_entry_key_msn_heaviside_extra heaviside_extra = { .desc = &extra->ft->cmp_descriptor, .cmp = extra->ft->compare_fun, .fifo = extra->bnc->buffer, .key = key, .msn = entry->msn }; - int r = extra->bnc->stale_message_tree.insert<struct toku_fifo_entry_key_msn_heaviside_extra, toku_fifo_entry_key_msn_heaviside>(offset, heaviside_extra, nullptr); - invariant_zero(r); - return 0; -} - -__attribute__((nonnull)) -void -toku_move_ftnode_messages_to_stale(FT ft, FTNODE node) { - invariant(node->height > 0); - for (int i = 0; i < node->n_children; ++i) { - if (BP_STATE(node, i) != PT_AVAIL) { - continue; - } - NONLEAF_CHILDINFO bnc = BNC(node, i); - // We can't delete things out of the fresh tree inside the above - // procedures because we're still looking at the fresh tree. Instead - // we have to move messages after we're done looking at it. - struct copy_to_stale_extra cts_extra = { .ft = ft, .bnc = bnc }; - int r = bnc->fresh_message_tree.iterate_over_marked<struct copy_to_stale_extra, copy_to_stale>(&cts_extra); - invariant_zero(r); - bnc->fresh_message_tree.delete_all_marked(); - } -} - -static int cursor_check_restricted_range(FT_CURSOR c, bytevec key, ITEMLEN keylen) { - if (c->out_of_range_error) { - FT ft = c->ft_handle->ft; - FAKE_DB(db, &ft->cmp_descriptor); - DBT found_key; - toku_fill_dbt(&found_key, key, keylen); - if ((!c->left_is_neg_infty && c->direction <= 0 && ft->compare_fun(&db, &found_key, &c->range_lock_left_key) < 0) || - (!c->right_is_pos_infty && c->direction >= 0 && ft->compare_fun(&db, &found_key, &c->range_lock_right_key) > 0)) { - invariant(c->out_of_range_error); - return c->out_of_range_error; - } - } - // Reset cursor direction to mitigate risk if some query type doesn't set the direction. - // It is always correct to check both bounds (which happens when direction==0) but it can be slower. - c->direction = 0; - return 0; -} - -static int -ft_cursor_shortcut ( - FT_CURSOR cursor, - int direction, - uint32_t index, - bn_data* bd, - FT_GET_CALLBACK_FUNCTION getf, - void *getf_v, - uint32_t *keylen, - void **key, - uint32_t *vallen, - void **val - ); - -// Return true if this key is within the search bound. If there is no search bound then the tree search continues. -static bool search_continue(ft_search *search, void *key, uint32_t key_len) { - bool result = true; - if (search->direction == FT_SEARCH_LEFT && search->k_bound) { - FT_HANDLE CAST_FROM_VOIDP(ft_handle, search->context); - FAKE_DB(db, &ft_handle->ft->cmp_descriptor); - DBT this_key = { .data = key, .size = key_len }; - // search continues if this key <= key bound - result = (ft_handle->ft->compare_fun(&db, &this_key, search->k_bound) <= 0); - } - return result; -} - // This is a bottom layer of the search functions. static int ft_search_basement_node( BASEMENTNODE bn, - ft_search_t *search, + ft_search *search, FT_GET_CALLBACK_FUNCTION getf, void *getf_v, bool *doprefetch, @@ -4970,7 +3347,7 @@ ft_search_basement_node( bool can_bulk_fetch ) { - // Now we have to convert from ft_search_t to the heaviside function with a direction. What a pain... + // Now we have to convert from ft_search to the heaviside function with a direction. What a pain... int direction; switch (search->direction) { @@ -4995,7 +3372,7 @@ ok: ; if (toku_ft_cursor_is_leaf_mode(ftcursor)) goto got_a_good_value; // leaf mode cursors see all leaf entries - if (is_le_val_del(le,ftcursor)) { + if (le_val_is_del(le, ftcursor->is_snapshot_read, ftcursor->ttxn)) { // Provisionally deleted stuff is gone. // So we need to scan in the direction to see if we can find something. // Every 100 deleted leaf entries check if the leaf's key is within the search bounds. @@ -5025,7 +3402,9 @@ ok: ; } r = bn->data_buffer.fetch_klpair(idx, &le, &keylen, &key); assert_zero(r); // we just validated the index - if (!is_le_val_del(le,ftcursor)) goto got_a_good_value; + if (!le_val_is_del(le, ftcursor->is_snapshot_read, ftcursor->ttxn)) { + goto got_a_good_value; + } } } got_a_good_value: @@ -5033,42 +3412,31 @@ got_a_good_value: uint32_t vallen; void *val; - ft_cursor_extract_val(le, - ftcursor, - &vallen, - &val - ); - r = cursor_check_restricted_range(ftcursor, key, keylen); - if (r==0) { + le_extract_val(le, toku_ft_cursor_is_leaf_mode(ftcursor), + ftcursor->is_snapshot_read, ftcursor->ttxn, + &vallen, &val); + r = toku_ft_cursor_check_restricted_range(ftcursor, key, keylen); + if (r == 0) { r = getf(keylen, key, vallen, val, getf_v, false); } - if (r==0 || r == TOKUDB_CURSOR_CONTINUE) { + if (r == 0 || r == TOKUDB_CURSOR_CONTINUE) { // // IMPORTANT: bulk fetch CANNOT go past the current basement node, // because there is no guarantee that messages have been applied // to other basement nodes, as part of #5770 // if (r == TOKUDB_CURSOR_CONTINUE && can_bulk_fetch) { - r = ft_cursor_shortcut( - ftcursor, - direction, - idx, - &bn->data_buffer, - getf, - getf_v, - &keylen, - &key, - &vallen, - &val - ); + r = toku_ft_cursor_shortcut(ftcursor, direction, idx, &bn->data_buffer, + getf, getf_v, &keylen, &key, &vallen, &val); } - ft_cursor_cleanup_dbts(ftcursor); + toku_destroy_dbt(&ftcursor->key); + toku_destroy_dbt(&ftcursor->val); if (!ftcursor->is_temporary) { toku_memdup_dbt(&ftcursor->key, key, keylen); toku_memdup_dbt(&ftcursor->val, val, vallen); } - //The search was successful. Prefetching can continue. + // The search was successful. Prefetching can continue. *doprefetch = true; } } @@ -5080,7 +3448,7 @@ static int ft_search_node ( FT_HANDLE ft_handle, FTNODE node, - ft_search_t *search, + ft_search *search, int child_to_search, FT_GET_CALLBACK_FUNCTION getf, void *getf_v, @@ -5088,17 +3456,17 @@ ft_search_node ( FT_CURSOR ftcursor, UNLOCKERS unlockers, ANCESTORS, - struct pivot_bounds const * const bounds, + const pivot_bounds &bounds, bool can_bulk_fetch ); static int -ftnode_fetch_callback_and_free_bfe(CACHEFILE cf, PAIR p, int fd, BLOCKNUM nodename, uint32_t fullhash, void **ftnode_pv, void** UU(disk_data), PAIR_ATTR *sizep, int *dirtyp, void *extraargs) +ftnode_fetch_callback_and_free_bfe(CACHEFILE cf, PAIR p, int fd, BLOCKNUM blocknum, uint32_t fullhash, void **ftnode_pv, void** UU(disk_data), PAIR_ATTR *sizep, int *dirtyp, void *extraargs) { - int r = toku_ftnode_fetch_callback(cf, p, fd, nodename, fullhash, ftnode_pv, disk_data, sizep, dirtyp, extraargs); - struct ftnode_fetch_extra *CAST_FROM_VOIDP(ffe, extraargs); - destroy_bfe_for_prefetch(ffe); - toku_free(ffe); + int r = toku_ftnode_fetch_callback(cf, p, fd, blocknum, fullhash, ftnode_pv, disk_data, sizep, dirtyp, extraargs); + ftnode_fetch_extra *CAST_FROM_VOIDP(bfe, extraargs); + bfe->destroy(); + toku_free(bfe); return r; } @@ -5106,12 +3474,24 @@ static int ftnode_pf_callback_and_free_bfe(void *ftnode_pv, void* disk_data, void *read_extraargs, int fd, PAIR_ATTR *sizep) { int r = toku_ftnode_pf_callback(ftnode_pv, disk_data, read_extraargs, fd, sizep); - struct ftnode_fetch_extra *CAST_FROM_VOIDP(ffe, read_extraargs); - destroy_bfe_for_prefetch(ffe); - toku_free(ffe); + ftnode_fetch_extra *CAST_FROM_VOIDP(bfe, read_extraargs); + bfe->destroy(); + toku_free(bfe); return r; } +CACHETABLE_WRITE_CALLBACK get_write_callbacks_for_node(FT ft) { + CACHETABLE_WRITE_CALLBACK wc; + wc.flush_callback = toku_ftnode_flush_callback; + wc.pe_est_callback = toku_ftnode_pe_est_callback; + wc.pe_callback = toku_ftnode_pe_callback; + wc.cleaner_callback = toku_ftnode_cleaner_callback; + wc.clone_callback = toku_ftnode_clone_callback; + wc.checkpoint_complete_callback = toku_ftnode_checkpoint_complete_callback; + wc.write_extraargs = ft; + return wc; +} + static void ft_node_maybe_prefetch(FT_HANDLE ft_handle, FTNODE node, int childnum, FT_CURSOR ftcursor, bool *doprefetch) { // the number of nodes to prefetch @@ -5119,13 +3499,13 @@ ft_node_maybe_prefetch(FT_HANDLE ft_handle, FTNODE node, int childnum, FT_CURSOR // if we want to prefetch in the tree // then prefetch the next children if there are any - if (*doprefetch && ft_cursor_prefetching(ftcursor) && !ftcursor->disable_prefetching) { + if (*doprefetch && toku_ft_cursor_prefetching(ftcursor) && !ftcursor->disable_prefetching) { int rc = ft_cursor_rightmost_child_wanted(ftcursor, ft_handle, node); for (int i = childnum + 1; (i <= childnum + num_nodes_to_prefetch) && (i <= rc); i++) { BLOCKNUM nextchildblocknum = BP_BLOCKNUM(node, i); uint32_t nextfullhash = compute_child_fullhash(ft_handle->ft->cf, node, i); - struct ftnode_fetch_extra *MALLOC(bfe); - fill_bfe_for_prefetch(bfe, ft_handle->ft, ftcursor); + ftnode_fetch_extra *XCALLOC(bfe); + bfe->create_for_prefetch(ft_handle->ft, ftcursor); bool doing_prefetch = false; toku_cachefile_prefetch( ft_handle->ft->cf, @@ -5139,7 +3519,7 @@ ft_node_maybe_prefetch(FT_HANDLE ft_handle, FTNODE node, int childnum, FT_CURSOR &doing_prefetch ); if (!doing_prefetch) { - destroy_bfe_for_prefetch(bfe); + bfe->destroy(); toku_free(bfe); } *doprefetch = false; @@ -5152,6 +3532,7 @@ struct unlock_ftnode_extra { FTNODE node; bool msgs_applied; }; + // When this is called, the cachetable lock is held static void unlock_ftnode_fun (void *v) { @@ -5171,8 +3552,8 @@ unlock_ftnode_fun (void *v) { /* search in a node's child */ static int -ft_search_child(FT_HANDLE ft_handle, FTNODE node, int childnum, ft_search_t *search, FT_GET_CALLBACK_FUNCTION getf, void *getf_v, bool *doprefetch, FT_CURSOR ftcursor, UNLOCKERS unlockers, - ANCESTORS ancestors, struct pivot_bounds const * const bounds, bool can_bulk_fetch) +ft_search_child(FT_HANDLE ft_handle, FTNODE node, int childnum, ft_search *search, FT_GET_CALLBACK_FUNCTION getf, void *getf_v, bool *doprefetch, FT_CURSOR ftcursor, UNLOCKERS unlockers, + ANCESTORS ancestors, const pivot_bounds &bounds, bool can_bulk_fetch) // Effect: Search in a node's child. Searches are read-only now (at least as far as the hardcopy is concerned). { struct ancestors next_ancestors = {node, childnum, ancestors}; @@ -5184,9 +3565,8 @@ ft_search_child(FT_HANDLE ft_handle, FTNODE node, int childnum, ft_search_t *sea // If the current node's height is greater than 1, then its child is an internal node. // Therefore, to warm the cache better (#5798), we want to read all the partitions off disk in one shot. bool read_all_partitions = node->height > 1; - struct ftnode_fetch_extra bfe; - fill_bfe_for_subset_read( - &bfe, + ftnode_fetch_extra bfe; + bfe.create_for_subset_read( ft_handle->ft, search, &ftcursor->range_lock_left_key, @@ -5250,19 +3630,13 @@ ft_search_child(FT_HANDLE ft_handle, FTNODE node, int childnum, ft_search_t *sea } static inline int -search_which_child_cmp_with_bound(DB *db, ft_compare_func cmp, FTNODE node, int childnum, ft_search_t *search, DBT *dbt) -{ - return cmp(db, toku_copy_dbt(dbt, node->childkeys[childnum]), &search->pivot_bound); +search_which_child_cmp_with_bound(const toku::comparator &cmp, FTNODE node, int childnum, + ft_search *search, DBT *dbt) { + return cmp(toku_copyref_dbt(dbt, node->pivotkeys.get_pivot(childnum)), &search->pivot_bound); } int -toku_ft_search_which_child( - DESCRIPTOR desc, - ft_compare_func cmp, - FTNODE node, - ft_search_t *search - ) -{ +toku_ft_search_which_child(const toku::comparator &cmp, FTNODE node, ft_search *search) { if (node->n_children <= 1) return 0; DBT pivotkey; @@ -5272,7 +3646,7 @@ toku_ft_search_which_child( int mi; while (lo < hi) { mi = (lo + hi) / 2; - toku_copy_dbt(&pivotkey, node->childkeys[mi]); + node->pivotkeys.fill_pivot(mi, &pivotkey); // search->compare is really strange, and only works well with a // linear search, it makes binary search a pita. // @@ -5297,10 +3671,9 @@ toku_ft_search_which_child( // ready to return something, if the pivot is bounded, we have to move // over a bit to get away from what we've already searched if (search->pivot_bound.data != nullptr) { - FAKE_DB(db, desc); if (search->direction == FT_SEARCH_LEFT) { while (lo < node->n_children - 1 && - search_which_child_cmp_with_bound(&db, cmp, node, lo, search, &pivotkey) <= 0) { + search_which_child_cmp_with_bound(cmp, node, lo, search, &pivotkey) <= 0) { // searching left to right, if the comparison says the // current pivot (lo) is left of or equal to our bound, // don't search that child again @@ -5308,11 +3681,11 @@ toku_ft_search_which_child( } } else { while (lo > 0 && - search_which_child_cmp_with_bound(&db, cmp, node, lo - 1, search, &pivotkey) >= 0) { + search_which_child_cmp_with_bound(cmp, node, lo - 1, search, &pivotkey) >= 0) { // searching right to left, same argument as just above // (but we had to pass lo - 1 because the pivot between lo // and the thing just less than it is at that position in - // the childkeys array) + // the pivot keys array) lo--; } } @@ -5324,17 +3697,17 @@ static void maybe_search_save_bound( FTNODE node, int child_searched, - ft_search_t *search) + ft_search *search) { int p = (search->direction == FT_SEARCH_LEFT) ? child_searched : child_searched - 1; if (p >= 0 && p < node->n_children-1) { toku_destroy_dbt(&search->pivot_bound); - toku_clone_dbt(&search->pivot_bound, node->childkeys[p]); + toku_clone_dbt(&search->pivot_bound, node->pivotkeys.get_pivot(p)); } } // Returns true if there are still children left to search in this node within the search bound (if any). -static bool search_try_again(FTNODE node, int child_to_search, ft_search_t *search) { +static bool search_try_again(FTNODE node, int child_to_search, ft_search *search) { bool try_again = false; if (search->direction == FT_SEARCH_LEFT) { if (child_to_search < node->n_children-1) { @@ -5342,8 +3715,7 @@ static bool search_try_again(FTNODE node, int child_to_search, ft_search_t *sear // if there is a search bound and the bound is within the search pivot then continue the search if (search->k_bound) { FT_HANDLE CAST_FROM_VOIDP(ft_handle, search->context); - FAKE_DB(db, &ft_handle->ft->cmp_descriptor); - try_again = (ft_handle->ft->compare_fun(&db, search->k_bound, &search->pivot_bound) > 0); + try_again = (ft_handle->ft->cmp(search->k_bound, &search->pivot_bound) > 0); } } } else if (search->direction == FT_SEARCH_RIGHT) { @@ -5357,7 +3729,7 @@ static int ft_search_node( FT_HANDLE ft_handle, FTNODE node, - ft_search_t *search, + ft_search *search, int child_to_search, FT_GET_CALLBACK_FUNCTION getf, void *getf_v, @@ -5365,7 +3737,7 @@ ft_search_node( FT_CURSOR ftcursor, UNLOCKERS unlockers, ANCESTORS ancestors, - struct pivot_bounds const * const bounds, + const pivot_bounds &bounds, bool can_bulk_fetch ) { @@ -5377,7 +3749,7 @@ ft_search_node( // At this point, we must have the necessary partition available to continue the search // assert(BP_STATE(node,child_to_search) == PT_AVAIL); - const struct pivot_bounds next_bounds = next_pivot_keys(node, child_to_search, bounds); + const pivot_bounds next_bounds = bounds.next_bounds(node, child_to_search); if (node->height > 0) { r = ft_search_child( ft_handle, @@ -5390,7 +3762,7 @@ ft_search_node( ftcursor, unlockers, ancestors, - &next_bounds, + next_bounds, can_bulk_fetch ); } @@ -5419,12 +3791,8 @@ ft_search_node( // we have a new pivotkey if (node->height == 0) { // when we run off the end of a basement, try to lock the range up to the pivot. solves #3529 - const DBT *pivot = nullptr; - if (search->direction == FT_SEARCH_LEFT) { - pivot = next_bounds.upper_bound_inclusive; // left -> right - } else { - pivot = next_bounds.lower_bound_exclusive; // right -> left - } + const DBT *pivot = search->direction == FT_SEARCH_LEFT ? next_bounds.ubi() : // left -> right + next_bounds.lbe(); // right -> left if (pivot != nullptr) { int rr = getf(pivot->size, pivot->data, 0, nullptr, getf_v, true); if (rr != 0) { @@ -5452,8 +3820,7 @@ ft_search_node( return r; } -static int -toku_ft_search (FT_HANDLE ft_handle, ft_search_t *search, FT_GET_CALLBACK_FUNCTION getf, void *getf_v, FT_CURSOR ftcursor, bool can_bulk_fetch) +int toku_ft_search(FT_HANDLE ft_handle, ft_search *search, FT_GET_CALLBACK_FUNCTION getf, void *getf_v, FT_CURSOR ftcursor, bool can_bulk_fetch) // Effect: Perform a search. Associate cursor with a leaf if possible. // All searches are performed through this function. { @@ -5483,7 +3850,7 @@ try_again: // and the partial fetch callback (in case the node is perhaps partially in memory) to the fetch the node // - This eventually calls either toku_ftnode_fetch_callback or toku_ftnode_pf_req_callback depending on whether the node is in // memory at all or not. - // - Within these functions, the "ft_search_t search" parameter is used to evaluate which child the search is interested in. + // - Within these functions, the "ft_search search" parameter is used to evaluate which child the search is interested in. // If the node is not in memory at all, toku_ftnode_fetch_callback will read the node and decompress only the partition for the // relevant child, be it a message buffer or basement node. If the node is in memory, then toku_ftnode_pf_req_callback // will tell the cachetable that a partial fetch is required if and only if the relevant child is not in memory. If the relevant child @@ -5493,9 +3860,8 @@ try_again: // - At this point, toku_ftnode_pin_holding_lock has returned, with bfe.child_to_read set, // - ft_search_node is called, assuming that the node and its relevant partition are in memory. // - struct ftnode_fetch_extra bfe; - fill_bfe_for_subset_read( - &bfe, + ftnode_fetch_extra bfe; + bfe.create_for_subset_read( ft, search, &ftcursor->range_lock_left_key, @@ -5530,7 +3896,7 @@ try_again: { bool doprefetch = false; //static int counter = 0; counter++; - r = ft_search_node(ft_handle, node, search, bfe.child_to_read, getf, getf_v, &doprefetch, ftcursor, &unlockers, (ANCESTORS)NULL, &infinite_bounds, can_bulk_fetch); + r = ft_search_node(ft_handle, node, search, bfe.child_to_read, getf, getf_v, &doprefetch, ftcursor, &unlockers, (ANCESTORS)NULL, pivot_bounds::infinite_bounds(), can_bulk_fetch); if (r==TOKUDB_TRY_AGAIN) { // there are two cases where we get TOKUDB_TRY_AGAIN // case 1 is when some later call to toku_pin_ftnode returned @@ -5584,355 +3950,20 @@ try_again: return r; } -struct ft_cursor_search_struct { - FT_GET_CALLBACK_FUNCTION getf; - void *getf_v; - FT_CURSOR cursor; - ft_search_t *search; -}; - -/* search for the first kv pair that matches the search object */ -static int -ft_cursor_search(FT_CURSOR cursor, ft_search_t *search, FT_GET_CALLBACK_FUNCTION getf, void *getf_v, bool can_bulk_fetch) -{ - int r = toku_ft_search(cursor->ft_handle, search, getf, getf_v, cursor, can_bulk_fetch); - return r; -} - -static inline int compare_k_x(FT_HANDLE ft_handle, const DBT *k, const DBT *x) { - FAKE_DB(db, &ft_handle->ft->cmp_descriptor); - return ft_handle->ft->compare_fun(&db, k, x); -} - -static int -ft_cursor_compare_one(const ft_search_t &search __attribute__((__unused__)), const DBT *x __attribute__((__unused__))) -{ - return 1; -} - -static int ft_cursor_compare_set(const ft_search_t &search, const DBT *x) { - FT_HANDLE CAST_FROM_VOIDP(ft_handle, search.context); - return compare_k_x(ft_handle, search.k, x) <= 0; /* return min xy: kv <= xy */ -} - -static int -ft_cursor_current_getf(ITEMLEN keylen, bytevec key, - ITEMLEN vallen, bytevec val, - void *v, bool lock_only) { - struct ft_cursor_search_struct *CAST_FROM_VOIDP(bcss, v); - int r; - if (key==NULL) { - r = bcss->getf(0, NULL, 0, NULL, bcss->getf_v, lock_only); - } else { - FT_CURSOR cursor = bcss->cursor; - DBT newkey; - toku_fill_dbt(&newkey, key, keylen); - if (compare_k_x(cursor->ft_handle, &cursor->key, &newkey) != 0) { - r = bcss->getf(0, NULL, 0, NULL, bcss->getf_v, lock_only); // This was once DB_KEYEMPTY - if (r==0) r = TOKUDB_FOUND_BUT_REJECTED; - } - else - r = bcss->getf(keylen, key, vallen, val, bcss->getf_v, lock_only); - } - return r; -} - -int -toku_ft_cursor_current(FT_CURSOR cursor, int op, FT_GET_CALLBACK_FUNCTION getf, void *getf_v) -{ - if (ft_cursor_not_set(cursor)) - return EINVAL; - cursor->direction = 0; - if (op == DB_CURRENT) { - struct ft_cursor_search_struct bcss = {getf, getf_v, cursor, 0}; - ft_search_t search; - ft_search_init(&search, ft_cursor_compare_set, FT_SEARCH_LEFT, &cursor->key, nullptr, cursor->ft_handle); - int r = toku_ft_search(cursor->ft_handle, &search, ft_cursor_current_getf, &bcss, cursor, false); - ft_search_finish(&search); - return r; - } - return getf(cursor->key.size, cursor->key.data, cursor->val.size, cursor->val.data, getf_v, false); // ft_cursor_copyout(cursor, outkey, outval); -} - -int -toku_ft_cursor_first(FT_CURSOR cursor, FT_GET_CALLBACK_FUNCTION getf, void *getf_v) -{ - cursor->direction = 0; - ft_search_t search; - ft_search_init(&search, ft_cursor_compare_one, FT_SEARCH_LEFT, nullptr, nullptr, cursor->ft_handle); - int r = ft_cursor_search(cursor, &search, getf, getf_v, false); - ft_search_finish(&search); - return r; -} - -int -toku_ft_cursor_last(FT_CURSOR cursor, FT_GET_CALLBACK_FUNCTION getf, void *getf_v) -{ - cursor->direction = 0; - ft_search_t search; - ft_search_init(&search, ft_cursor_compare_one, FT_SEARCH_RIGHT, nullptr, nullptr, cursor->ft_handle); - int r = ft_cursor_search(cursor, &search, getf, getf_v, false); - ft_search_finish(&search); - return r; -} - -static int ft_cursor_compare_next(const ft_search_t &search, const DBT *x) { - FT_HANDLE CAST_FROM_VOIDP(ft_handle, search.context); - return compare_k_x(ft_handle, search.k, x) < 0; /* return min xy: kv < xy */ -} - -static int -ft_cursor_shortcut ( - FT_CURSOR cursor, - int direction, - uint32_t index, - bn_data* bd, - FT_GET_CALLBACK_FUNCTION getf, - void *getf_v, - uint32_t *keylen, - void **key, - uint32_t *vallen, - void **val - ) -{ - int r = 0; - // if we are searching towards the end, limit is last element - // if we are searching towards the beginning, limit is the first element - uint32_t limit = (direction > 0) ? (bd->num_klpairs() - 1) : 0; - - //Starting with the prev, find the first real (non-provdel) leafentry. - while (index != limit) { - index += direction; - LEAFENTRY le; - void* foundkey = NULL; - uint32_t foundkeylen = 0; - - r = bd->fetch_klpair(index, &le, &foundkeylen, &foundkey); - invariant_zero(r); - - if (toku_ft_cursor_is_leaf_mode(cursor) || !is_le_val_del(le, cursor)) { - ft_cursor_extract_val( - le, - cursor, - vallen, - val - ); - *key = foundkey; - *keylen = foundkeylen; - - cursor->direction = direction; - r = cursor_check_restricted_range(cursor, *key, *keylen); - if (r!=0) { - paranoid_invariant(r == cursor->out_of_range_error); - // We already got at least one entry from the bulk fetch. - // Return 0 (instead of out of range error). - r = 0; - break; - } - r = getf(*keylen, *key, *vallen, *val, getf_v, false); - if (r == TOKUDB_CURSOR_CONTINUE) { - continue; - } - else { - break; - } - } - } - - return r; -} - -int -toku_ft_cursor_next(FT_CURSOR cursor, FT_GET_CALLBACK_FUNCTION getf, void *getf_v) -{ - cursor->direction = +1; - ft_search_t search; - ft_search_init(&search, ft_cursor_compare_next, FT_SEARCH_LEFT, &cursor->key, nullptr, cursor->ft_handle); - int r = ft_cursor_search(cursor, &search, getf, getf_v, true); - ft_search_finish(&search); - if (r == 0) ft_cursor_set_prefetching(cursor); - return r; -} - -static int -ft_cursor_search_eq_k_x_getf(ITEMLEN keylen, bytevec key, - ITEMLEN vallen, bytevec val, - void *v, bool lock_only) { - struct ft_cursor_search_struct *CAST_FROM_VOIDP(bcss, v); - int r; - if (key==NULL) { - r = bcss->getf(0, NULL, 0, NULL, bcss->getf_v, false); - } else { - FT_CURSOR cursor = bcss->cursor; - DBT newkey; - toku_fill_dbt(&newkey, key, keylen); - if (compare_k_x(cursor->ft_handle, bcss->search->k, &newkey) == 0) { - r = bcss->getf(keylen, key, vallen, val, bcss->getf_v, lock_only); - } else { - r = bcss->getf(0, NULL, 0, NULL, bcss->getf_v, lock_only); - if (r==0) r = TOKUDB_FOUND_BUT_REJECTED; - } - } - return r; -} - -/* search for the kv pair that matches the search object and is equal to k */ -static int -ft_cursor_search_eq_k_x(FT_CURSOR cursor, ft_search_t *search, FT_GET_CALLBACK_FUNCTION getf, void *getf_v) -{ - struct ft_cursor_search_struct bcss = {getf, getf_v, cursor, search}; - int r = toku_ft_search(cursor->ft_handle, search, ft_cursor_search_eq_k_x_getf, &bcss, cursor, false); - return r; -} - -static int ft_cursor_compare_prev(const ft_search_t &search, const DBT *x) { - FT_HANDLE CAST_FROM_VOIDP(ft_handle, search.context); - return compare_k_x(ft_handle, search.k, x) > 0; /* return max xy: kv > xy */ -} - -int -toku_ft_cursor_prev(FT_CURSOR cursor, FT_GET_CALLBACK_FUNCTION getf, void *getf_v) -{ - cursor->direction = -1; - ft_search_t search; - ft_search_init(&search, ft_cursor_compare_prev, FT_SEARCH_RIGHT, &cursor->key, nullptr, cursor->ft_handle); - int r = ft_cursor_search(cursor, &search, getf, getf_v, true); - ft_search_finish(&search); - return r; -} - -static int ft_cursor_compare_set_range(const ft_search_t &search, const DBT *x) { - FT_HANDLE CAST_FROM_VOIDP(ft_handle, search.context); - return compare_k_x(ft_handle, search.k, x) <= 0; /* return kv <= xy */ -} - -int -toku_ft_cursor_set(FT_CURSOR cursor, DBT *key, FT_GET_CALLBACK_FUNCTION getf, void *getf_v) -{ - cursor->direction = 0; - ft_search_t search; - ft_search_init(&search, ft_cursor_compare_set_range, FT_SEARCH_LEFT, key, nullptr, cursor->ft_handle); - int r = ft_cursor_search_eq_k_x(cursor, &search, getf, getf_v); - ft_search_finish(&search); - return r; -} - -int -toku_ft_cursor_set_range(FT_CURSOR cursor, DBT *key, DBT *key_bound, FT_GET_CALLBACK_FUNCTION getf, void *getf_v) -{ - cursor->direction = 0; - ft_search_t search; - ft_search_init(&search, ft_cursor_compare_set_range, FT_SEARCH_LEFT, key, key_bound, cursor->ft_handle); - int r = ft_cursor_search(cursor, &search, getf, getf_v, false); - ft_search_finish(&search); - return r; -} - -static int ft_cursor_compare_set_range_reverse(const ft_search_t &search, const DBT *x) { - FT_HANDLE CAST_FROM_VOIDP(ft_handle, search.context); - return compare_k_x(ft_handle, search.k, x) >= 0; /* return kv >= xy */ -} - -int -toku_ft_cursor_set_range_reverse(FT_CURSOR cursor, DBT *key, FT_GET_CALLBACK_FUNCTION getf, void *getf_v) -{ - cursor->direction = 0; - ft_search_t search; - ft_search_init(&search, ft_cursor_compare_set_range_reverse, FT_SEARCH_RIGHT, key, nullptr, cursor->ft_handle); - int r = ft_cursor_search(cursor, &search, getf, getf_v, false); - ft_search_finish(&search); - return r; -} - - -//TODO: When tests have been rewritten, get rid of this function. -//Only used by tests. -int -toku_ft_cursor_get (FT_CURSOR cursor, DBT *key, FT_GET_CALLBACK_FUNCTION getf, void *getf_v, int get_flags) -{ - int op = get_flags & DB_OPFLAGS_MASK; - if (get_flags & ~DB_OPFLAGS_MASK) - return EINVAL; - - switch (op) { - case DB_CURRENT: - case DB_CURRENT_BINDING: - return toku_ft_cursor_current(cursor, op, getf, getf_v); - case DB_FIRST: - return toku_ft_cursor_first(cursor, getf, getf_v); - case DB_LAST: - return toku_ft_cursor_last(cursor, getf, getf_v); - case DB_NEXT: - if (ft_cursor_not_set(cursor)) { - return toku_ft_cursor_first(cursor, getf, getf_v); - } else { - return toku_ft_cursor_next(cursor, getf, getf_v); - } - case DB_PREV: - if (ft_cursor_not_set(cursor)) { - return toku_ft_cursor_last(cursor, getf, getf_v); - } else { - return toku_ft_cursor_prev(cursor, getf, getf_v); - } - case DB_SET: - return toku_ft_cursor_set(cursor, key, getf, getf_v); - case DB_SET_RANGE: - return toku_ft_cursor_set_range(cursor, key, nullptr, getf, getf_v); - default: ;// Fall through - } - return EINVAL; -} - -void -toku_ft_cursor_peek(FT_CURSOR cursor, const DBT **pkey, const DBT **pval) -// Effect: Retrieves a pointer to the DBTs for the current key and value. -// Requires: The caller may not modify the DBTs or the memory at which they points. -// Requires: The caller must be in the context of a -// FT_GET_(STRADDLE_)CALLBACK_FUNCTION -{ - *pkey = &cursor->key; - *pval = &cursor->val; -} - -bool toku_ft_cursor_uninitialized(FT_CURSOR c) { - return ft_cursor_not_set(c); -} - - -/* ********************************* lookup **************************************/ - -int -toku_ft_lookup (FT_HANDLE ft_handle, DBT *k, FT_GET_CALLBACK_FUNCTION getf, void *getf_v) -{ - int r, rr; - FT_CURSOR cursor; - - rr = toku_ft_cursor(ft_handle, &cursor, NULL, false, false); - if (rr != 0) return rr; - - int op = DB_SET; - r = toku_ft_cursor_get(cursor, k, getf, getf_v, op); - - toku_ft_cursor_close(cursor); - - return r; -} - /* ********************************* delete **************************************/ static int -getf_nothing (ITEMLEN UU(keylen), bytevec UU(key), ITEMLEN UU(vallen), bytevec UU(val), void *UU(pair_v), bool UU(lock_only)) { +getf_nothing (uint32_t UU(keylen), const void *UU(key), uint32_t UU(vallen), const void *UU(val), void *UU(pair_v), bool UU(lock_only)) { return 0; } -int -toku_ft_cursor_delete(FT_CURSOR cursor, int flags, TOKUTXN txn) { +int toku_ft_cursor_delete(FT_CURSOR cursor, int flags, TOKUTXN txn) { int r; int unchecked_flags = flags; bool error_if_missing = (bool) !(flags&DB_DELETE_ANY); unchecked_flags &= ~DB_DELETE_ANY; if (unchecked_flags!=0) r = EINVAL; - else if (ft_cursor_not_set(cursor)) r = EINVAL; + else if (toku_ft_cursor_not_set(cursor)) r = EINVAL; else { r = 0; if (error_if_missing) { @@ -5947,17 +3978,14 @@ toku_ft_cursor_delete(FT_CURSOR cursor, int flags, TOKUTXN txn) { /* ********************* keyrange ************************ */ - struct keyrange_compare_s { FT ft; const DBT *key; }; -static int -keyrange_compare (DBT const &kdbt, const struct keyrange_compare_s &s) { - // TODO: maybe put a const fake_db in the header - FAKE_DB(db, &s.ft->cmp_descriptor); - return s.ft->compare_fun(&db, &kdbt, s.key); +// TODO: Remove me, I'm boring +static int keyrange_compare(DBT const &kdbt, const struct keyrange_compare_s &s) { + return s.ft->cmp(&kdbt, s.key); } static void @@ -6020,17 +4048,17 @@ toku_ft_keysrange_internal (FT_HANDLE ft_handle, FTNODE node, uint64_t* less, uint64_t* equal_left, uint64_t* middle, uint64_t* equal_right, uint64_t* greater, bool* single_basement_node, uint64_t estimated_num_rows, - struct ftnode_fetch_extra *min_bfe, // set up to read a minimal read. - struct ftnode_fetch_extra *match_bfe, // set up to read a basement node iff both keys in it - struct unlockers *unlockers, ANCESTORS ancestors, struct pivot_bounds const * const bounds) + ftnode_fetch_extra *min_bfe, // set up to read a minimal read. + ftnode_fetch_extra *match_bfe, // set up to read a basement node iff both keys in it + struct unlockers *unlockers, ANCESTORS ancestors, const pivot_bounds &bounds) // Implementation note: Assign values to less, equal, and greater, and then on the way out (returning up the stack) we add more values in. { int r = 0; // if KEY is NULL then use the leftmost key. - int left_child_number = key_left ? toku_ftnode_which_child (node, key_left, &ft_handle->ft->cmp_descriptor, ft_handle->ft->compare_fun) : 0; + int left_child_number = key_left ? toku_ftnode_which_child (node, key_left, ft_handle->ft->cmp) : 0; int right_child_number = node->n_children; // Sentinel that does not equal left_child_number. if (may_find_right) { - right_child_number = key_right ? toku_ftnode_which_child (node, key_right, &ft_handle->ft->cmp_descriptor, ft_handle->ft->compare_fun) : node->n_children - 1; + right_child_number = key_right ? toku_ftnode_which_child (node, key_right, ft_handle->ft->cmp) : node->n_children - 1; } uint64_t rows_per_child = estimated_num_rows / node->n_children; @@ -6070,11 +4098,11 @@ toku_ft_keysrange_internal (FT_HANDLE ft_handle, FTNODE node, struct unlock_ftnode_extra unlock_extra = {ft_handle,childnode,false}; struct unlockers next_unlockers = {true, unlock_ftnode_fun, (void*)&unlock_extra, unlockers}; - const struct pivot_bounds next_bounds = next_pivot_keys(node, left_child_number, bounds); + const pivot_bounds next_bounds = bounds.next_bounds(node, left_child_number); r = toku_ft_keysrange_internal(ft_handle, childnode, key_left, key_right, child_may_find_right, less, equal_left, middle, equal_right, greater, single_basement_node, - rows_per_child, min_bfe, match_bfe, &next_unlockers, &next_ancestors, &next_bounds); + rows_per_child, min_bfe, match_bfe, &next_unlockers, &next_ancestors, next_bounds); if (r != TOKUDB_TRY_AGAIN) { assert_zero(r); @@ -6118,10 +4146,10 @@ void toku_ft_keysrange(FT_HANDLE ft_handle, DBT* key_left, DBT* key_right, uint6 return; } paranoid_invariant(!(!key_left && key_right)); - struct ftnode_fetch_extra min_bfe; - struct ftnode_fetch_extra match_bfe; - fill_bfe_for_min_read(&min_bfe, ft_handle->ft); // read pivot keys but not message buffers - fill_bfe_for_keymatch(&match_bfe, ft_handle->ft, key_left, key_right, false, false); // read basement node only if both keys in it. + ftnode_fetch_extra min_bfe; + ftnode_fetch_extra match_bfe; + min_bfe.create_for_min_read(ft_handle->ft); // read pivot keys but not message buffers + match_bfe.create_for_keymatch(ft_handle->ft, key_left, key_right, false, false); // read basement node only if both keys in it. try_again: { uint64_t less = 0, equal_left = 0, middle = 0, equal_right = 0, greater = 0; @@ -6153,7 +4181,7 @@ try_again: r = toku_ft_keysrange_internal (ft_handle, node, key_left, key_right, true, &less, &equal_left, &middle, &equal_right, &greater, &single_basement_node, numrows, - &min_bfe, &match_bfe, &unlockers, (ANCESTORS)NULL, &infinite_bounds); + &min_bfe, &match_bfe, &unlockers, (ANCESTORS)NULL, pivot_bounds::infinite_bounds()); assert(r == 0 || r == TOKUDB_TRY_AGAIN); if (r == TOKUDB_TRY_AGAIN) { assert(!unlockers.locked); @@ -6169,7 +4197,7 @@ try_again: r = toku_ft_keysrange_internal (ft_handle, node, key_right, nullptr, false, &less2, &equal_left2, &middle2, &equal_right2, &greater2, &ignore, numrows, - &min_bfe, &match_bfe, &unlockers, (ANCESTORS)nullptr, &infinite_bounds); + &min_bfe, &match_bfe, &unlockers, (ANCESTORS)nullptr, pivot_bounds::infinite_bounds()); assert(r == 0 || r == TOKUDB_TRY_AGAIN); if (r == TOKUDB_TRY_AGAIN) { assert(!unlockers.locked); @@ -6256,9 +4284,9 @@ static int get_key_after_bytes_in_basementnode(FT ft, BASEMENTNODE bn, const DBT return r; } -static int get_key_after_bytes_in_subtree(FT_HANDLE ft_h, FT ft, FTNODE node, UNLOCKERS unlockers, ANCESTORS ancestors, PIVOT_BOUNDS bounds, FTNODE_FETCH_EXTRA bfe, ft_search_t *search, uint64_t subtree_bytes, const DBT *start_key, uint64_t skip_len, void (*callback)(const DBT *, uint64_t, void *), void *cb_extra, uint64_t *skipped); +static int get_key_after_bytes_in_subtree(FT_HANDLE ft_h, FT ft, FTNODE node, UNLOCKERS unlockers, ANCESTORS ancestors, const pivot_bounds &bounds, ftnode_fetch_extra *bfe, ft_search *search, uint64_t subtree_bytes, const DBT *start_key, uint64_t skip_len, void (*callback)(const DBT *, uint64_t, void *), void *cb_extra, uint64_t *skipped); -static int get_key_after_bytes_in_child(FT_HANDLE ft_h, FT ft, FTNODE node, UNLOCKERS unlockers, ANCESTORS ancestors, PIVOT_BOUNDS bounds, FTNODE_FETCH_EXTRA bfe, ft_search_t *search, int childnum, uint64_t subtree_bytes, const DBT *start_key, uint64_t skip_len, void (*callback)(const DBT *, uint64_t, void *), void *cb_extra, uint64_t *skipped) { +static int get_key_after_bytes_in_child(FT_HANDLE ft_h, FT ft, FTNODE node, UNLOCKERS unlockers, ANCESTORS ancestors, const pivot_bounds &bounds, ftnode_fetch_extra *bfe, ft_search *search, int childnum, uint64_t subtree_bytes, const DBT *start_key, uint64_t skip_len, void (*callback)(const DBT *, uint64_t, void *), void *cb_extra, uint64_t *skipped) { int r; struct ancestors next_ancestors = {node, childnum, ancestors}; BLOCKNUM childblocknum = BP_BLOCKNUM(node, childnum); @@ -6273,13 +4301,13 @@ static int get_key_after_bytes_in_child(FT_HANDLE ft_h, FT ft, FTNODE node, UNLO assert_zero(r); struct unlock_ftnode_extra unlock_extra = {ft_h, child, false}; struct unlockers next_unlockers = {true, unlock_ftnode_fun, (void *) &unlock_extra, unlockers}; - const struct pivot_bounds next_bounds = next_pivot_keys(node, childnum, bounds); - return get_key_after_bytes_in_subtree(ft_h, ft, child, &next_unlockers, &next_ancestors, &next_bounds, bfe, search, subtree_bytes, start_key, skip_len, callback, cb_extra, skipped); + const pivot_bounds next_bounds = bounds.next_bounds(node, childnum); + return get_key_after_bytes_in_subtree(ft_h, ft, child, &next_unlockers, &next_ancestors, next_bounds, bfe, search, subtree_bytes, start_key, skip_len, callback, cb_extra, skipped); } -static int get_key_after_bytes_in_subtree(FT_HANDLE ft_h, FT ft, FTNODE node, UNLOCKERS unlockers, ANCESTORS ancestors, PIVOT_BOUNDS bounds, FTNODE_FETCH_EXTRA bfe, ft_search_t *search, uint64_t subtree_bytes, const DBT *start_key, uint64_t skip_len, void (*callback)(const DBT *, uint64_t, void *), void *cb_extra, uint64_t *skipped) { +static int get_key_after_bytes_in_subtree(FT_HANDLE ft_h, FT ft, FTNODE node, UNLOCKERS unlockers, ANCESTORS ancestors, const pivot_bounds &bounds, ftnode_fetch_extra *bfe, ft_search *search, uint64_t subtree_bytes, const DBT *start_key, uint64_t skip_len, void (*callback)(const DBT *, uint64_t, void *), void *cb_extra, uint64_t *skipped) { int r; - int childnum = toku_ft_search_which_child(&ft->cmp_descriptor, ft->compare_fun, node, search); + int childnum = toku_ft_search_which_child(ft->cmp, node, search); const uint64_t child_subtree_bytes = subtree_bytes / node->n_children; if (node->height == 0) { r = DB_NOTFOUND; @@ -6295,7 +4323,8 @@ static int get_key_after_bytes_in_subtree(FT_HANDLE ft_h, FT ft, FTNODE node, UN } else { *skipped += child_subtree_bytes; if (*skipped >= skip_len && i < node->n_children - 1) { - callback(&node->childkeys[i], *skipped, cb_extra); + DBT pivot; + callback(node->pivotkeys.fill_pivot(i, &pivot), *skipped, cb_extra); r = 0; } // Otherwise, r is still DB_NOTFOUND. If this is the last @@ -6340,8 +4369,8 @@ int toku_ft_get_key_after_bytes(FT_HANDLE ft_h, const DBT *start_key, uint64_t s // an error code otherwise { FT ft = ft_h->ft; - struct ftnode_fetch_extra bfe; - fill_bfe_for_min_read(&bfe, ft); + ftnode_fetch_extra bfe; + bfe.create_for_min_read(ft); while (true) { FTNODE root; { @@ -6352,8 +4381,8 @@ int toku_ft_get_key_after_bytes(FT_HANDLE ft_h, const DBT *start_key, uint64_t s } struct unlock_ftnode_extra unlock_extra = {ft_h, root, false}; struct unlockers unlockers = {true, unlock_ftnode_fun, (void*)&unlock_extra, (UNLOCKERS) nullptr}; - ft_search_t search; - ft_search_init(&search, (start_key == nullptr ? ft_cursor_compare_one : ft_cursor_compare_set_range), FT_SEARCH_LEFT, start_key, nullptr, ft_h); + ft_search search; + ft_search_init(&search, (start_key == nullptr ? toku_ft_cursor_compare_one : toku_ft_cursor_compare_set_range), FT_SEARCH_LEFT, start_key, nullptr, ft_h); int r; // We can't do this because of #5768, there may be dictionaries in the wild that have negative stats. This won't affect mongo so it's ok: @@ -6363,7 +4392,7 @@ int toku_ft_get_key_after_bytes(FT_HANDLE ft_h, const DBT *start_key, uint64_t s numbytes = 0; } uint64_t skipped = 0; - r = get_key_after_bytes_in_subtree(ft_h, ft, root, &unlockers, nullptr, &infinite_bounds, &bfe, &search, (uint64_t) numbytes, start_key, skip_len, callback, cb_extra, &skipped); + r = get_key_after_bytes_in_subtree(ft_h, ft, root, &unlockers, nullptr, pivot_bounds::infinite_bounds(), &bfe, &search, (uint64_t) numbytes, start_key, skip_len, callback, cb_extra, &skipped); assert(!unlockers.locked); if (r != TOKUDB_TRY_AGAIN) { if (r == DB_NOTFOUND) { @@ -6404,8 +4433,8 @@ toku_dump_ftnode (FILE *file, FT_HANDLE ft_handle, BLOCKNUM blocknum, int depth, toku_get_node_for_verify(blocknum, ft_handle, &node); result=toku_verify_ftnode(ft_handle, ft_handle->ft->h->max_msn_in_ft, ft_handle->ft->h->max_msn_in_ft, false, node, -1, lorange, hirange, NULL, NULL, 0, 1, 0); uint32_t fullhash = toku_cachetable_hash(ft_handle->ft->cf, blocknum); - struct ftnode_fetch_extra bfe; - fill_bfe_for_full_read(&bfe, ft_handle->ft); + ftnode_fetch_extra bfe; + bfe.create_for_full_read(ft_handle->ft); toku_pin_ftnode( ft_handle->ft, blocknum, @@ -6424,20 +4453,27 @@ toku_dump_ftnode (FILE *file, FT_HANDLE ft_handle, BLOCKNUM blocknum, int depth, int i; for (i=0; i+1< node->n_children; i++) { fprintf(file, "%*spivotkey %d =", depth+1, "", i); - toku_print_BYTESTRING(file, node->childkeys[i].size, (char *) node->childkeys[i].data); + toku_print_BYTESTRING(file, node->pivotkeys.get_pivot(i).size, (char *) node->pivotkeys.get_pivot(i).data); fprintf(file, "\n"); } for (i=0; i< node->n_children; i++) { if (node->height > 0) { NONLEAF_CHILDINFO bnc = BNC(node, i); fprintf(file, "%*schild %d buffered (%d entries):", depth+1, "", i, toku_bnc_n_entries(bnc)); - FIFO_ITERATE(bnc->buffer, key, keylen, data, datalen, type, msn, xids, UU(is_fresh), - { - data=data; datalen=datalen; keylen=keylen; - fprintf(file, "%*s xid=%" PRIu64 " %u (type=%d) msn=0x%" PRIu64 "\n", depth+2, "", xids_get_innermost_xid(xids), (unsigned)toku_dtoh32(*(int*)key), type, msn.msn); - //assert(strlen((char*)key)+1==keylen); - //assert(strlen((char*)data)+1==datalen); - }); + struct print_msg_fn { + FILE *file; + int depth; + print_msg_fn(FILE *f, int d) : file(f), depth(d) { } + int operator()(const ft_msg &msg, bool UU(is_fresh)) { + fprintf(file, "%*s xid=%" PRIu64 " %u (type=%d) msn=0x%" PRIu64 "\n", + depth+2, "", + toku_xids_get_innermost_xid(msg.xids()), + static_cast<unsigned>(toku_dtoh32(*(int*)msg.kdbt()->data)), + msg.type(), msg.msn().msn); + return 0; + } + } print_fn(file, depth); + bnc->msg_buffer.iterate(print_fn); } else { int size = BLB_DATA(node, i)->num_klpairs(); @@ -6459,12 +4495,13 @@ toku_dump_ftnode (FILE *file, FT_HANDLE ft_handle, BLOCKNUM blocknum, int depth, for (i=0; i<node->n_children; i++) { fprintf(file, "%*schild %d\n", depth, "", i); if (i>0) { - char *CAST_FROM_VOIDP(key, node->childkeys[i-1].data); - fprintf(file, "%*spivot %d len=%u %u\n", depth+1, "", i-1, node->childkeys[i-1].size, (unsigned)toku_dtoh32(*(int*)key)); + char *CAST_FROM_VOIDP(key, node->pivotkeys.get_pivot(i - 1).data); + fprintf(file, "%*spivot %d len=%u %u\n", depth+1, "", i-1, node->pivotkeys.get_pivot(i - 1).size, (unsigned)toku_dtoh32(*(int*)key)); } + DBT x, y; toku_dump_ftnode(file, ft_handle, BP_BLOCKNUM(node, i), depth+4, - (i==0) ? lorange : &node->childkeys[i-1], - (i==node->n_children-1) ? hirange : &node->childkeys[i]); + (i==0) ? lorange : node->pivotkeys.fill_pivot(i - 1, &x), + (i==node->n_children-1) ? hirange : node->pivotkeys.fill_pivot(i, &y)); } } } @@ -6472,17 +4509,15 @@ toku_dump_ftnode (FILE *file, FT_HANDLE ft_handle, BLOCKNUM blocknum, int depth, return result; } -int toku_dump_ft (FILE *f, FT_HANDLE ft_handle) { - int r; - assert(ft_handle->ft); - toku_dump_translation_table(f, ft_handle->ft->blocktable); - { - uint32_t fullhash = 0; - CACHEKEY root_key; - toku_calculate_root_offset_pointer(ft_handle->ft, &root_key, &fullhash); - r = toku_dump_ftnode(f, ft_handle, root_key, 0, 0, 0); - } - return r; +int toku_dump_ft(FILE *f, FT_HANDLE ft_handle) { + FT ft = ft_handle->ft; + invariant_notnull(ft); + ft->blocktable.dump_translation_table(f); + + uint32_t fullhash = 0; + CACHEKEY root_key; + toku_calculate_root_offset_pointer(ft_handle->ft, &root_key, &fullhash); + return toku_dump_ftnode(f, ft_handle, root_key, 0, 0, 0); } int toku_ft_layer_init(void) { @@ -6574,18 +4609,15 @@ void toku_ft_unlink(FT_HANDLE handle) { toku_cachefile_unlink_on_close(cf); } -int -toku_ft_get_fragmentation(FT_HANDLE ft_handle, TOKU_DB_FRAGMENTATION report) { - int r; - +int toku_ft_get_fragmentation(FT_HANDLE ft_handle, TOKU_DB_FRAGMENTATION report) { int fd = toku_cachefile_get_fd(ft_handle->ft->cf); toku_ft_lock(ft_handle->ft); int64_t file_size; - r = toku_os_get_file_size(fd, &file_size); - if (r==0) { + int r = toku_os_get_file_size(fd, &file_size); + if (r == 0) { report->file_size_bytes = file_size; - toku_block_table_get_fragmentation_unlocked(ft_handle->ft->blocktable, report); + ft_handle->ft->blocktable.get_fragmentation_unlocked(report); } toku_ft_unlock(ft_handle->ft); return r; @@ -6601,8 +4633,8 @@ static bool is_empty_fast_iter (FT_HANDLE ft_handle, FTNODE node) { { BLOCKNUM childblocknum = BP_BLOCKNUM(node,childnum); uint32_t fullhash = compute_child_fullhash(ft_handle->ft->cf, node, childnum); - struct ftnode_fetch_extra bfe; - fill_bfe_for_full_read(&bfe, ft_handle->ft); + ftnode_fetch_extra bfe; + bfe.create_for_full_read(ft_handle->ft); // don't need to pass in dependent nodes as we are not // modifying nodes we are pinning toku_pin_ftnode( @@ -6640,8 +4672,8 @@ bool toku_ft_is_empty_fast (FT_HANDLE ft_handle) { CACHEKEY root_key; toku_calculate_root_offset_pointer(ft_handle->ft, &root_key, &fullhash); - struct ftnode_fetch_extra bfe; - fill_bfe_for_full_read(&bfe, ft_handle->ft); + ftnode_fetch_extra bfe; + bfe.create_for_full_read(ft_handle->ft); toku_pin_ftnode( ft_handle->ft, root_key, @@ -6677,6 +4709,26 @@ int toku_ft_strerror_r(int error, char *buf, size_t buflen) } } +int toku_keycompare(const void *key1, uint32_t key1len, const void *key2, uint32_t key2len) { + int comparelen = key1len < key2len ? key1len : key2len; + int c = memcmp(key1, key2, comparelen); + if (__builtin_expect(c != 0, 1)) { + return c; + } else { + if (key1len < key2len) { + return -1; + } else if (key1len > key2len) { + return 1; + } else { + return 0; + } + } +} + +int toku_builtin_compare_fun(DB *db __attribute__((__unused__)), const DBT *a, const DBT*b) { + return toku_keycompare(a->data, a->size, b->data, b->size); +} + #include <toku_race_tools.h> void __attribute__((__constructor__)) toku_ft_helgrind_ignore(void); void diff --git a/storage/tokudb/ft-index/ft/ft-ops.h b/storage/tokudb/ft-index/ft/ft-ops.h index b482d2b8206..c45e0c71ef5 100644 --- a/storage/tokudb/ft-index/ft/ft-ops.h +++ b/storage/tokudb/ft-index/ft/ft-ops.h @@ -1,7 +1,5 @@ /* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */ // vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4: -#ifndef FT_OPS_H -#define FT_OPS_H #ident "$Id$" /* COPYING CONDITIONS NOTICE: @@ -31,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -88,33 +86,22 @@ PATENT RIGHTS GRANT: under this License. */ +#pragma once + #ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved." #ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it." // This must be first to make the 64-bit file mode work right in Linux #define _FILE_OFFSET_BITS 64 -#include "fttypes.h" -#include "ybt.h" + #include <db.h> -#include "cachetable.h" -#include "log.h" -#include "ft-search.h" -#include "compress.h" - -// A callback function is invoked with the key, and the data. -// The pointers (to the bytevecs) must not be modified. The data must be copied out before the callback function returns. -// Note: In the thread-safe version, the ftnode remains locked while the callback function runs. So return soon, and don't call the ft code from the callback function. -// If the callback function returns a nonzero value (an error code), then that error code is returned from the get function itself. -// The cursor object will have been updated (so that if result==0 the current value is the value being passed) -// (If r!=0 then the cursor won't have been updated.) -// If r!=0, it's up to the callback function to return that value of r. -// A 'key' bytevec of NULL means that element is not found (effectively infinity or -// -infinity depending on direction) -// When lock_only is false, the callback does optional lock tree locking and then processes the key and val. -// When lock_only is true, the callback only does optional lock tree locking. -typedef int(*FT_GET_CALLBACK_FUNCTION)(ITEMLEN keylen, bytevec key, ITEMLEN vallen, bytevec val, void *extra, bool lock_only); - -typedef bool(*FT_CHECK_INTERRUPT_CALLBACK)(void* extra); + +#include "ft/cachetable/cachetable.h" +#include "ft/comparator.h" +#include "ft/msg.h" +#include "util/dbt.h" + +typedef struct ft_handle *FT_HANDLE; int toku_open_ft_handle (const char *fname, int is_create, FT_HANDLE *, int nodesize, int basementnodesize, enum toku_compression_method compression_method, CACHETABLE, TOKUTXN, int(*)(DB *,const DBT*,const DBT*)) __attribute__ ((warn_unused_result)); @@ -125,7 +112,7 @@ int toku_open_ft_handle (const char *fname, int is_create, FT_HANDLE *, int node // ANY operations. to update the cmp descriptor after any operations have already happened, all handles // and transactions must close and reopen before the change, then you can update the cmp descriptor void toku_ft_change_descriptor(FT_HANDLE t, const DBT* old_descriptor, const DBT* new_descriptor, bool do_log, TOKUTXN txn, bool update_cmp_descriptor); -uint32_t toku_serialize_descriptor_size(const DESCRIPTOR desc); +uint32_t toku_serialize_descriptor_size(DESCRIPTOR desc); void toku_ft_handle_create(FT_HANDLE *ft); void toku_ft_set_flags(FT_HANDLE, unsigned int flags); @@ -139,11 +126,13 @@ void toku_ft_handle_set_compression_method(FT_HANDLE, enum toku_compression_meth void toku_ft_handle_get_compression_method(FT_HANDLE, enum toku_compression_method *); void toku_ft_handle_set_fanout(FT_HANDLE, unsigned int fanout); void toku_ft_handle_get_fanout(FT_HANDLE, unsigned int *fanout); +int toku_ft_handle_set_memcmp_magic(FT_HANDLE, uint8_t magic); -void toku_ft_set_bt_compare(FT_HANDLE, ft_compare_func); -ft_compare_func toku_ft_get_bt_compare (FT_HANDLE ft_h); +void toku_ft_set_bt_compare(FT_HANDLE ft_handle, ft_compare_func cmp_func); +const toku::comparator &toku_ft_get_comparator(FT_HANDLE ft_handle); -void toku_ft_set_redirect_callback(FT_HANDLE ft_h, on_redirect_callback redir_cb, void* extra); +typedef void (*on_redirect_callback)(FT_HANDLE ft_handle, void *extra); +void toku_ft_set_redirect_callback(FT_HANDLE ft_handle, on_redirect_callback cb, void *extra); // How updates (update/insert/deletes) work: // There are two flavers of upsertdels: Singleton and broadcast. @@ -181,6 +170,9 @@ void toku_ft_set_redirect_callback(FT_HANDLE ft_h, on_redirect_callback redir_cb // Implementation note: Acquires a write lock on the entire database. // This function works by sending an BROADCAST-UPDATE message containing // the key and the extra. +typedef int (*ft_update_func)(DB *db, const DBT *key, const DBT *old_val, const DBT *extra, + void (*set_val)(const DBT *new_val, void *set_extra), + void *set_extra); void toku_ft_set_update(FT_HANDLE ft_h, ft_update_func update_fun); int toku_ft_handle_open(FT_HANDLE, const char *fname_in_env, @@ -197,9 +189,17 @@ void toku_ft_handle_close(FT_HANDLE ft_handle); // close an ft handle during recovery. the underlying ft must close, and will use the given lsn. void toku_ft_handle_close_recovery(FT_HANDLE ft_handle, LSN oplsn); +// At the ydb layer, a DICTIONARY_ID uniquely identifies an open dictionary. +// With the introduction of the loader (ticket 2216), it is possible for the file that holds +// an open dictionary to change, so these are now separate and independent unique identifiers (see FILENUM) +struct DICTIONARY_ID { + uint64_t dictid; +}; +static const DICTIONARY_ID DICTIONARY_ID_NONE = { .dictid = 0 }; + int toku_ft_handle_open_with_dict_id( - FT_HANDLE t, + FT_HANDLE ft_h, const char *fname_in_env, int is_create, int only_create, @@ -208,11 +208,12 @@ toku_ft_handle_open_with_dict_id( DICTIONARY_ID use_dictionary_id ) __attribute__ ((warn_unused_result)); -int toku_ft_lookup (FT_HANDLE ft_h, DBT *k, FT_GET_CALLBACK_FUNCTION getf, void *getf_v) __attribute__ ((warn_unused_result)); - // Effect: Insert a key and data pair into an ft void toku_ft_insert (FT_HANDLE ft_h, DBT *k, DBT *v, TOKUTXN txn); +// Returns: 0 if the key was inserted, DB_KEYEXIST if the key already exists +int toku_ft_insert_unique(FT_HANDLE ft, DBT *k, DBT *v, TOKUTXN txn, bool do_logging); + // Effect: Optimize the ft void toku_ft_optimize (FT_HANDLE ft_h); @@ -244,8 +245,9 @@ void toku_ft_delete (FT_HANDLE ft_h, DBT *k, TOKUTXN txn); void toku_ft_maybe_delete (FT_HANDLE ft_h, DBT *k, TOKUTXN txn, bool oplsn_valid, LSN oplsn, bool do_logging); TXNID toku_ft_get_oldest_referenced_xid_estimate(FT_HANDLE ft_h); -TXN_MANAGER toku_ft_get_txn_manager(FT_HANDLE ft_h); +struct txn_manager *toku_ft_get_txn_manager(FT_HANDLE ft_h); +struct txn_gc_info; void toku_ft_send_insert(FT_HANDLE ft_h, DBT *key, DBT *val, XIDS xids, enum ft_msg_type type, txn_gc_info *gc_info); void toku_ft_send_delete(FT_HANDLE ft_h, DBT *key, XIDS xids, txn_gc_info *gc_info); void toku_ft_send_commit_any(FT_HANDLE ft_h, DBT *key, XIDS xids, txn_gc_info *gc_info); @@ -258,37 +260,6 @@ extern int toku_ft_debug_mode; int toku_verify_ft (FT_HANDLE ft_h) __attribute__ ((warn_unused_result)); int toku_verify_ft_with_progress (FT_HANDLE ft_h, int (*progress_callback)(void *extra, float progress), void *extra, int verbose, int keep_going) __attribute__ ((warn_unused_result)); -typedef struct ft_cursor *FT_CURSOR; -int toku_ft_cursor (FT_HANDLE, FT_CURSOR*, TOKUTXN, bool, bool) __attribute__ ((warn_unused_result)); -void toku_ft_cursor_set_leaf_mode(FT_CURSOR); -// Sets a boolean on the ft cursor that prevents uncessary copying of -// the cursor duing a one query. -void toku_ft_cursor_set_temporary(FT_CURSOR); -void toku_ft_cursor_remove_restriction(FT_CURSOR); -void toku_ft_cursor_set_check_interrupt_cb(FT_CURSOR ftcursor, FT_CHECK_INTERRUPT_CALLBACK cb, void *extra); -int toku_ft_cursor_is_leaf_mode(FT_CURSOR); -void toku_ft_cursor_set_range_lock(FT_CURSOR, const DBT *, const DBT *, bool, bool, int); - -// get is deprecated in favor of the individual functions below -int toku_ft_cursor_get (FT_CURSOR cursor, DBT *key, FT_GET_CALLBACK_FUNCTION getf, void *getf_v, int get_flags) __attribute__ ((warn_unused_result)); - -int toku_ft_cursor_first(FT_CURSOR cursor, FT_GET_CALLBACK_FUNCTION getf, void *getf_v) __attribute__ ((warn_unused_result)); -int toku_ft_cursor_last(FT_CURSOR cursor, FT_GET_CALLBACK_FUNCTION getf, void *getf_v) __attribute__ ((warn_unused_result)); -int toku_ft_cursor_next(FT_CURSOR cursor, FT_GET_CALLBACK_FUNCTION getf, void *getf_v) __attribute__ ((warn_unused_result)); -int toku_ft_cursor_prev(FT_CURSOR cursor, FT_GET_CALLBACK_FUNCTION getf, void *getf_v) __attribute__ ((warn_unused_result)); -int toku_ft_cursor_current(FT_CURSOR cursor, int op, FT_GET_CALLBACK_FUNCTION getf, void *getf_v) __attribute__ ((warn_unused_result)); -int toku_ft_cursor_set(FT_CURSOR cursor, DBT *key, FT_GET_CALLBACK_FUNCTION getf, void *getf_v) __attribute__ ((warn_unused_result)); -int toku_ft_cursor_set_range(FT_CURSOR cursor, DBT *key, DBT *key_bound, FT_GET_CALLBACK_FUNCTION getf, void *getf_v) __attribute__ ((warn_unused_result)); -int toku_ft_cursor_set_range_reverse(FT_CURSOR cursor, DBT *key, FT_GET_CALLBACK_FUNCTION getf, void *getf_v) __attribute__ ((warn_unused_result)); -int toku_ft_cursor_get_both_range(FT_CURSOR cursor, DBT *key, DBT *val, FT_GET_CALLBACK_FUNCTION getf, void *getf_v) __attribute__ ((warn_unused_result)); -int toku_ft_cursor_get_both_range_reverse(FT_CURSOR cursor, DBT *key, DBT *val, FT_GET_CALLBACK_FUNCTION getf, void *getf_v) __attribute__ ((warn_unused_result)); - -int toku_ft_cursor_delete(FT_CURSOR cursor, int flags, TOKUTXN) __attribute__ ((warn_unused_result)); -void toku_ft_cursor_close (FT_CURSOR curs); -bool toku_ft_cursor_uninitialized(FT_CURSOR c) __attribute__ ((warn_unused_result)); - -void toku_ft_cursor_peek(FT_CURSOR cursor, const DBT **pkey, const DBT **pval); - DICTIONARY_ID toku_ft_get_dictionary_id(FT_HANDLE); enum ft_flags { @@ -350,7 +321,7 @@ bool toku_ft_is_empty_fast (FT_HANDLE ft_h) __attribute__ ((warn_unused_result)) int toku_ft_strerror_r(int error, char *buf, size_t buflen); // Effect: LIke the XSI-compliant strerorr_r, extended to db_strerror(). // If error>=0 then the result is to do strerror_r(error, buf, buflen), that is fill buf with a descriptive error message. -// If error<0 then return a TokuDB-specific error code. For unknown cases, we return -1 and set errno=EINVAL, even for cases that *should* be known. (Not all DB errors are known by this function which is a bug.) +// If error<0 then return a TokuFT-specific error code. For unknown cases, we return -1 and set errno=EINVAL, even for cases that *should* be known. (Not all DB errors are known by this function which is a bug.) extern bool garbage_collection_debug; @@ -359,5 +330,3 @@ void toku_ft_set_direct_io(bool direct_io_on); void toku_ft_set_compress_buffers_before_eviction(bool compress_buffers); void toku_note_deserialized_basement_node(bool fixed_key_size); - -#endif diff --git a/storage/tokudb/ft-index/ft/ft-test-helpers.cc b/storage/tokudb/ft-index/ft/ft-test-helpers.cc index 575bd69ab7e..dc0b77099fa 100644 --- a/storage/tokudb/ft-index/ft/ft-test-helpers.cc +++ b/storage/tokudb/ft-index/ft/ft-test-helpers.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -89,12 +89,13 @@ PATENT RIGHTS GRANT: #ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved." #ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it." -#include "ft-cachetable-wrappers.h" -#include "ft-flusher.h" -#include "ft-internal.h" -#include "ft.h" -#include "fttypes.h" -#include "ule.h" +#include "ft/ft.h" +#include "ft/ft-cachetable-wrappers.h" +#include "ft/ft-internal.h" +#include "ft/ft-flusher.h" +#include "ft/serialize/ft_node-serialize.h" +#include "ft/node.h" +#include "ft/ule.h" // dummymsn needed to simulate msn because messages are injected at a lower level than toku_ft_root_put_msg() #define MIN_DUMMYMSN ((MSN) {(uint64_t)1 << 62}) @@ -123,17 +124,21 @@ int toku_testsetup_leaf(FT_HANDLE ft_handle, BLOCKNUM *blocknum, int n_children, FTNODE node; assert(testsetup_initialized); toku_create_new_ftnode(ft_handle, &node, 0, n_children); - int i; - for (i=0; i<n_children; i++) { - BP_STATE(node,i) = PT_AVAIL; + for (int i = 0; i < n_children; i++) { + BP_STATE(node, i) = PT_AVAIL; } - for (i=0; i+1<n_children; i++) { - toku_memdup_dbt(&node->childkeys[i], keys[i], keylens[i]); - node->totalchildkeylens += keylens[i]; + DBT *XMALLOC_N(n_children - 1, pivotkeys); + for (int i = 0; i + 1 < n_children; i++) { + toku_memdup_dbt(&pivotkeys[i], keys[i], keylens[i]); + } + node->pivotkeys.create_from_dbts(pivotkeys, n_children - 1); + for (int i = 0; i + 1 < n_children; i++) { + toku_destroy_dbt(&pivotkeys[i]); } + toku_free(pivotkeys); - *blocknum = node->thisnodename; + *blocknum = node->blocknum; toku_unpin_ftnode(ft_handle->ft, node); return 0; } @@ -143,16 +148,21 @@ int toku_testsetup_nonleaf (FT_HANDLE ft_handle, int height, BLOCKNUM *blocknum, FTNODE node; assert(testsetup_initialized); toku_create_new_ftnode(ft_handle, &node, height, n_children); - int i; - for (i=0; i<n_children; i++) { + for (int i = 0; i < n_children; i++) { BP_BLOCKNUM(node, i) = children[i]; BP_STATE(node,i) = PT_AVAIL; } - for (i=0; i+1<n_children; i++) { - toku_memdup_dbt(&node->childkeys[i], keys[i], keylens[i]); - node->totalchildkeylens += keylens[i]; + DBT *XMALLOC_N(n_children - 1, pivotkeys); + for (int i = 0; i + 1 < n_children; i++) { + toku_memdup_dbt(&pivotkeys[i], keys[i], keylens[i]); } - *blocknum = node->thisnodename; + node->pivotkeys.create_from_dbts(pivotkeys, n_children - 1); + for (int i = 0; i + 1 < n_children; i++) { + toku_destroy_dbt(&pivotkeys[i]); + } + toku_free(pivotkeys); + + *blocknum = node->blocknum; toku_unpin_ftnode(ft_handle->ft, node); return 0; } @@ -167,8 +177,8 @@ int toku_testsetup_get_sersize(FT_HANDLE ft_handle, BLOCKNUM diskoff) // Return { assert(testsetup_initialized); void *node_v; - struct ftnode_fetch_extra bfe; - fill_bfe_for_full_read(&bfe, ft_handle->ft); + ftnode_fetch_extra bfe; + bfe.create_for_full_read(ft_handle->ft); int r = toku_cachetable_get_and_pin( ft_handle->ft->cf, diskoff, toku_cachetable_hash(ft_handle->ft->cf, diskoff), @@ -194,8 +204,8 @@ int toku_testsetup_insert_to_leaf (FT_HANDLE ft_handle, BLOCKNUM blocknum, const assert(testsetup_initialized); - struct ftnode_fetch_extra bfe; - fill_bfe_for_full_read(&bfe, ft_handle->ft); + ftnode_fetch_extra bfe; + bfe.create_for_full_read(ft_handle->ft); r = toku_cachetable_get_and_pin( ft_handle->ft->cf, blocknum, @@ -214,26 +224,22 @@ int toku_testsetup_insert_to_leaf (FT_HANDLE ft_handle, BLOCKNUM blocknum, const toku_verify_or_set_counts(node); assert(node->height==0); - DBT keydbt,valdbt; - MSN msn = next_dummymsn(); - FT_MSG_S msg = { FT_INSERT, msn, xids_get_root_xids(), - .u = { .id = { toku_fill_dbt(&keydbt, key, keylen), - toku_fill_dbt(&valdbt, val, vallen) } } }; + DBT kdbt, vdbt; + ft_msg msg(toku_fill_dbt(&kdbt, key, keylen), toku_fill_dbt(&vdbt, val, vallen), + FT_INSERT, next_dummymsn(), toku_xids_get_root_xids()); static size_t zero_flow_deltas[] = { 0, 0 }; txn_gc_info gc_info(nullptr, TXNID_NONE, TXNID_NONE, true); - toku_ft_node_put_msg ( - ft_handle->ft->compare_fun, - ft_handle->ft->update_fun, - &ft_handle->ft->cmp_descriptor, - node, - -1, - &msg, - true, - &gc_info, - zero_flow_deltas, - NULL - ); + toku_ftnode_put_msg(ft_handle->ft->cmp, + ft_handle->ft->update_fun, + node, + -1, + msg, + true, + &gc_info, + zero_flow_deltas, + NULL + ); toku_verify_or_set_counts(node); @@ -252,8 +258,8 @@ testhelper_string_key_cmp(DB *UU(e), const DBT *a, const DBT *b) void toku_pin_node_with_min_bfe(FTNODE* node, BLOCKNUM b, FT_HANDLE t) { - struct ftnode_fetch_extra bfe; - fill_bfe_for_min_read(&bfe, t->ft); + ftnode_fetch_extra bfe; + bfe.create_for_min_read(t->ft); toku_pin_ftnode( t->ft, b, @@ -271,8 +277,8 @@ int toku_testsetup_insert_to_nonleaf (FT_HANDLE ft_handle, BLOCKNUM blocknum, en assert(testsetup_initialized); - struct ftnode_fetch_extra bfe; - fill_bfe_for_full_read(&bfe, ft_handle->ft); + ftnode_fetch_extra bfe; + bfe.create_for_full_read(ft_handle->ft); r = toku_cachetable_get_and_pin( ft_handle->ft->cf, blocknum, @@ -291,13 +297,14 @@ int toku_testsetup_insert_to_nonleaf (FT_HANDLE ft_handle, BLOCKNUM blocknum, en assert(node->height>0); DBT k; - int childnum = toku_ftnode_which_child(node, - toku_fill_dbt(&k, key, keylen), - &ft_handle->ft->cmp_descriptor, ft_handle->ft->compare_fun); + int childnum = toku_ftnode_which_child(node, toku_fill_dbt(&k, key, keylen), ft_handle->ft->cmp); - XIDS xids_0 = xids_get_root_xids(); + XIDS xids_0 = toku_xids_get_root_xids(); MSN msn = next_dummymsn(); - toku_bnc_insert_msg(BNC(node, childnum), key, keylen, val, vallen, msgtype, msn, xids_0, true, NULL, testhelper_string_key_cmp); + toku::comparator cmp; + cmp.create(testhelper_string_key_cmp, nullptr); + toku_bnc_insert_msg(BNC(node, childnum), key, keylen, val, vallen, msgtype, msn, xids_0, true, cmp); + cmp.destroy(); // Hack to get the test working. The problem is that this test // is directly queueing something in a FIFO instead of // using ft APIs. diff --git a/storage/tokudb/ft-index/ft/ft-verify.cc b/storage/tokudb/ft-index/ft/ft-verify.cc index 506a54a07a0..cbb5159e276 100644 --- a/storage/tokudb/ft-index/ft/ft-verify.cc +++ b/storage/tokudb/ft-index/ft/ft-verify.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -97,31 +97,30 @@ PATENT RIGHTS GRANT: * For each nonleaf node: All the messages have keys that are between the associated pivot keys ( left_pivot_key < message <= right_pivot_key) */ -#include "ft-cachetable-wrappers.h" -#include "ft-internal.h" -#include "ft.h" +#include "ft/serialize/block_table.h" +#include "ft/ft.h" +#include "ft/ft-cachetable-wrappers.h" +#include "ft/ft-internal.h" +#include "ft/node.h" static int compare_pairs (FT_HANDLE ft_handle, const DBT *a, const DBT *b) { - FAKE_DB(db, &ft_handle->ft->cmp_descriptor); - int cmp = ft_handle->ft->compare_fun(&db, a, b); - return cmp; + return ft_handle->ft->cmp(a, b); } static int -compare_pair_to_key (FT_HANDLE ft_handle, const DBT *a, bytevec key, ITEMLEN keylen) { +compare_pair_to_key (FT_HANDLE ft_handle, const DBT *a, const void *key, uint32_t keylen) { DBT y; - FAKE_DB(db, &ft_handle->ft->cmp_descriptor); - int cmp = ft_handle->ft->compare_fun(&db, a, toku_fill_dbt(&y, key, keylen)); - return cmp; + return ft_handle->ft->cmp(a, toku_fill_dbt(&y, key, keylen)); } static int -verify_msg_in_child_buffer(FT_HANDLE ft_handle, enum ft_msg_type type, MSN msn, bytevec key, ITEMLEN keylen, bytevec UU(data), ITEMLEN UU(datalen), XIDS UU(xids), const DBT *lesser_pivot, const DBT *greatereq_pivot) +verify_msg_in_child_buffer(FT_HANDLE ft_handle, enum ft_msg_type type, MSN msn, const void *key, uint32_t keylen, const void *UU(data), uint32_t UU(datalen), XIDS UU(xids), const DBT *lesser_pivot, const DBT *greatereq_pivot) __attribute__((warn_unused_result)); +UU() static int -verify_msg_in_child_buffer(FT_HANDLE ft_handle, enum ft_msg_type type, MSN msn, bytevec key, ITEMLEN keylen, bytevec UU(data), ITEMLEN UU(datalen), XIDS UU(xids), const DBT *lesser_pivot, const DBT *greatereq_pivot) { +verify_msg_in_child_buffer(FT_HANDLE ft_handle, enum ft_msg_type type, MSN msn, const void *key, uint32_t keylen, const void *UU(data), uint32_t UU(datalen), XIDS UU(xids), const DBT *lesser_pivot, const DBT *greatereq_pivot) { int result = 0; if (msn.msn == ZERO_MSN.msn) result = EINVAL; @@ -159,7 +158,8 @@ get_ith_key_dbt (BASEMENTNODE bn, int i) { #define VERIFY_ASSERTION(predicate, i, string) ({ \ if(!(predicate)) { \ - if (verbose) { \ + (void) verbose; \ + if (true) { \ fprintf(stderr, "%s:%d: Looking at child %d of block %" PRId64 ": %s\n", __FILE__, __LINE__, i, blocknum.b, string); \ } \ result = TOKUDB_NEEDS_REPAIR; \ @@ -169,7 +169,7 @@ get_ith_key_dbt (BASEMENTNODE bn, int i) { struct count_msgs_extra { int count; MSN msn; - FIFO fifo; + message_buffer *msg_buffer; }; // template-only function, but must be extern @@ -177,15 +177,16 @@ int count_msgs(const int32_t &offset, const uint32_t UU(idx), struct count_msgs_ __attribute__((nonnull(3))); int count_msgs(const int32_t &offset, const uint32_t UU(idx), struct count_msgs_extra *const e) { - const struct fifo_entry *entry = toku_fifo_get_entry(e->fifo, offset); - if (entry->msn.msn == e->msn.msn) { + MSN msn; + e->msg_buffer->get_message_key_msn(offset, nullptr, &msn); + if (msn.msn == e->msn.msn) { e->count++; } return 0; } struct verify_message_tree_extra { - FIFO fifo; + message_buffer *msg_buffer; bool broadcast; bool is_fresh; int i; @@ -202,20 +203,22 @@ int verify_message_tree(const int32_t &offset, const uint32_t UU(idx), struct ve BLOCKNUM blocknum = e->blocknum; int keep_going_on_failure = e->keep_going_on_failure; int result = 0; - const struct fifo_entry *entry = toku_fifo_get_entry(e->fifo, offset); + DBT k, v; + ft_msg msg = e->msg_buffer->get_message(offset, &k, &v); + bool is_fresh = e->msg_buffer->get_freshness(offset); if (e->broadcast) { - VERIFY_ASSERTION(ft_msg_type_applies_all((enum ft_msg_type) entry->type) || ft_msg_type_does_nothing((enum ft_msg_type) entry->type), + VERIFY_ASSERTION(ft_msg_type_applies_all((enum ft_msg_type) msg.type()) || ft_msg_type_does_nothing((enum ft_msg_type) msg.type()), e->i, "message found in broadcast list that is not a broadcast"); } else { - VERIFY_ASSERTION(ft_msg_type_applies_once((enum ft_msg_type) entry->type), + VERIFY_ASSERTION(ft_msg_type_applies_once((enum ft_msg_type) msg.type()), e->i, "message found in fresh or stale message tree that does not apply once"); if (e->is_fresh) { if (e->messages_have_been_moved) { - VERIFY_ASSERTION(entry->is_fresh, + VERIFY_ASSERTION(is_fresh, e->i, "message found in fresh message tree that is not fresh"); } } else { - VERIFY_ASSERTION(!entry->is_fresh, + VERIFY_ASSERTION(!is_fresh, e->i, "message found in stale message tree that is fresh"); } } @@ -235,15 +238,15 @@ int verify_marked_messages(const int32_t &offset, const uint32_t UU(idx), struct BLOCKNUM blocknum = e->blocknum; int keep_going_on_failure = e->keep_going_on_failure; int result = 0; - const struct fifo_entry *entry = toku_fifo_get_entry(e->fifo, offset); - VERIFY_ASSERTION(!entry->is_fresh, e->i, "marked message found in the fresh message tree that is fresh"); + bool is_fresh = e->msg_buffer->get_freshness(offset); + VERIFY_ASSERTION(!is_fresh, e->i, "marked message found in the fresh message tree that is fresh"); done: return result; } template<typename verify_omt_t> static int -verify_sorted_by_key_msn(FT_HANDLE ft_handle, FIFO fifo, const verify_omt_t &mt) { +verify_sorted_by_key_msn(FT_HANDLE ft_handle, message_buffer *msg_buffer, const verify_omt_t &mt) { int result = 0; size_t last_offset = 0; for (uint32_t i = 0; i < mt.size(); i++) { @@ -251,12 +254,8 @@ verify_sorted_by_key_msn(FT_HANDLE ft_handle, FIFO fifo, const verify_omt_t &mt) int r = mt.fetch(i, &offset); assert_zero(r); if (i > 0) { - struct toku_fifo_entry_key_msn_cmp_extra extra; - ZERO_STRUCT(extra); - extra.desc = &ft_handle->ft->cmp_descriptor; - extra.cmp = ft_handle->ft->compare_fun; - extra.fifo = fifo; - if (toku_fifo_entry_key_msn_cmp(extra, last_offset, offset) >= 0) { + struct toku_msg_buffer_key_msn_cmp_extra extra(ft_handle->ft->cmp, msg_buffer); + if (toku_msg_buffer_key_msn_cmp(extra, last_offset, offset) >= 0) { result = TOKUDB_NEEDS_REPAIR; break; } @@ -268,15 +267,9 @@ verify_sorted_by_key_msn(FT_HANDLE ft_handle, FIFO fifo, const verify_omt_t &mt) template<typename count_omt_t> static int -count_eq_key_msn(FT_HANDLE ft_handle, FIFO fifo, const count_omt_t &mt, const DBT *key, MSN msn) { - struct toku_fifo_entry_key_msn_heaviside_extra extra; - ZERO_STRUCT(extra); - extra.desc = &ft_handle->ft->cmp_descriptor; - extra.cmp = ft_handle->ft->compare_fun; - extra.fifo = fifo; - extra.key = key; - extra.msn = msn; - int r = mt.template find_zero<struct toku_fifo_entry_key_msn_heaviside_extra, toku_fifo_entry_key_msn_heaviside>(extra, nullptr, nullptr); +count_eq_key_msn(FT_HANDLE ft_handle, message_buffer *msg_buffer, const count_omt_t &mt, const DBT *key, MSN msn) { + struct toku_msg_buffer_key_msn_heaviside_extra extra(ft_handle->ft->cmp, msg_buffer, key, msn); + int r = mt.template find_zero<struct toku_msg_buffer_key_msn_heaviside_extra, toku_msg_buffer_key_msn_heaviside>(extra, nullptr, nullptr); int count; if (r == 0) { count = 1; @@ -295,8 +288,8 @@ toku_get_node_for_verify( ) { uint32_t fullhash = toku_cachetable_hash(ft_handle->ft->cf, blocknum); - struct ftnode_fetch_extra bfe; - fill_bfe_for_full_read(&bfe, ft_handle->ft); + ftnode_fetch_extra bfe; + bfe.create_for_full_read(ft_handle->ft); toku_pin_ftnode( ft_handle->ft, blocknum, @@ -308,9 +301,83 @@ toku_get_node_for_verify( ); } +struct verify_msg_fn { + FT_HANDLE ft_handle; + NONLEAF_CHILDINFO bnc; + const DBT *curr_less_pivot; + const DBT *curr_geq_pivot; + BLOCKNUM blocknum; + MSN this_msn; + int verbose; + int keep_going_on_failure; + bool messages_have_been_moved; + + MSN last_msn; + int msg_i; + int result = 0; // needed by VERIFY_ASSERTION + + verify_msg_fn(FT_HANDLE handle, NONLEAF_CHILDINFO nl, const DBT *less, const DBT *geq, + BLOCKNUM b, MSN tmsn, int v, int k, bool m) : + ft_handle(handle), bnc(nl), curr_less_pivot(less), curr_geq_pivot(geq), + blocknum(b), this_msn(tmsn), verbose(v), keep_going_on_failure(k), messages_have_been_moved(m), last_msn(ZERO_MSN), msg_i(0) { + } + + int operator()(const ft_msg &msg, bool is_fresh) { + enum ft_msg_type type = (enum ft_msg_type) msg.type(); + MSN msn = msg.msn(); + XIDS xid = msg.xids(); + const void *key = msg.kdbt()->data; + const void *data = msg.vdbt()->data; + uint32_t keylen = msg.kdbt()->size; + uint32_t datalen = msg.vdbt()->size; + + int r = verify_msg_in_child_buffer(ft_handle, type, msn, key, keylen, data, datalen, xid, + curr_less_pivot, + curr_geq_pivot); + VERIFY_ASSERTION(r == 0, msg_i, "A message in the buffer is out of place"); + VERIFY_ASSERTION((msn.msn > last_msn.msn), msg_i, "msn per msg must be monotonically increasing toward newer messages in buffer"); + VERIFY_ASSERTION((msn.msn <= this_msn.msn), msg_i, "all messages must have msn within limit of this node's max_msn_applied_to_node_in_memory"); + if (ft_msg_type_applies_once(type)) { + int count; + DBT keydbt; + toku_fill_dbt(&keydbt, key, keylen); + int total_count = 0; + count = count_eq_key_msn(ft_handle, &bnc->msg_buffer, bnc->fresh_message_tree, toku_fill_dbt(&keydbt, key, keylen), msn); + total_count += count; + if (is_fresh) { + VERIFY_ASSERTION(count == 1, msg_i, "a fresh message was not found in the fresh message tree"); + } else if (messages_have_been_moved) { + VERIFY_ASSERTION(count == 0, msg_i, "a stale message was found in the fresh message tree"); + } + VERIFY_ASSERTION(count <= 1, msg_i, "a message was found multiple times in the fresh message tree"); + count = count_eq_key_msn(ft_handle, &bnc->msg_buffer, bnc->stale_message_tree, &keydbt, msn); + + total_count += count; + if (is_fresh) { + VERIFY_ASSERTION(count == 0, msg_i, "a fresh message was found in the stale message tree"); + } else if (messages_have_been_moved) { + VERIFY_ASSERTION(count == 1, msg_i, "a stale message was not found in the stale message tree"); + } + VERIFY_ASSERTION(count <= 1, msg_i, "a message was found multiple times in the stale message tree"); + + VERIFY_ASSERTION(total_count <= 1, msg_i, "a message was found in both message trees (or more than once in a single tree)"); + VERIFY_ASSERTION(total_count >= 1, msg_i, "a message was not found in either message tree"); + } else { + VERIFY_ASSERTION(ft_msg_type_applies_all(type) || ft_msg_type_does_nothing(type), msg_i, "a message was found that does not apply either to all or to only one key"); + struct count_msgs_extra extra = { .count = 0, .msn = msn, .msg_buffer = &bnc->msg_buffer }; + bnc->broadcast_list.iterate<struct count_msgs_extra, count_msgs>(&extra); + VERIFY_ASSERTION(extra.count == 1, msg_i, "a broadcast message was not found in the broadcast list"); + } + last_msn = msn; + msg_i++; +done: + return result; + } +}; + static int toku_verify_ftnode_internal(FT_HANDLE ft_handle, - MSN rootmsn, MSN parentmsn, bool messages_exist_above, + MSN rootmsn, MSN parentmsn_with_messages, bool messages_exist_above, FTNODE node, int height, const DBT *lesser_pivot, // Everything in the subtree should be > lesser_pivot. (lesser_pivot==NULL if there is no lesser pivot.) const DBT *greatereq_pivot, // Everything in the subtree should be <= lesser_pivot. (lesser_pivot==NULL if there is no lesser pivot.) @@ -318,88 +385,54 @@ toku_verify_ftnode_internal(FT_HANDLE ft_handle, { int result=0; MSN this_msn; - BLOCKNUM blocknum = node->thisnodename; + BLOCKNUM blocknum = node->blocknum; //printf("%s:%d pin %p\n", __FILE__, __LINE__, node_v); - toku_assert_entire_node_in_memory(node); + toku_ftnode_assert_fully_in_memory(node); this_msn = node->max_msn_applied_to_node_on_disk; if (height >= 0) { invariant(height == node->height); // this is a bad failure if wrong } if (node->height > 0 && messages_exist_above) { - VERIFY_ASSERTION((parentmsn.msn >= this_msn.msn), 0, "node msn must be descending down tree, newest messages at top"); + VERIFY_ASSERTION((parentmsn_with_messages.msn >= this_msn.msn), 0, "node msn must be descending down tree, newest messages at top"); } // Verify that all the pivot keys are in order. for (int i = 0; i < node->n_children-2; i++) { - int compare = compare_pairs(ft_handle, &node->childkeys[i], &node->childkeys[i+1]); + DBT x, y; + int compare = compare_pairs(ft_handle, node->pivotkeys.fill_pivot(i, &x), node->pivotkeys.fill_pivot(i + 1, &y)); VERIFY_ASSERTION(compare < 0, i, "Value is >= the next value"); } // Verify that all the pivot keys are lesser_pivot < pivot <= greatereq_pivot for (int i = 0; i < node->n_children-1; i++) { + DBT x; if (lesser_pivot) { - int compare = compare_pairs(ft_handle, lesser_pivot, &node->childkeys[i]); + int compare = compare_pairs(ft_handle, lesser_pivot, node->pivotkeys.fill_pivot(i, &x)); VERIFY_ASSERTION(compare < 0, i, "Pivot is >= the lower-bound pivot"); } if (greatereq_pivot) { - int compare = compare_pairs(ft_handle, greatereq_pivot, &node->childkeys[i]); + int compare = compare_pairs(ft_handle, greatereq_pivot, node->pivotkeys.fill_pivot(i, &x)); VERIFY_ASSERTION(compare >= 0, i, "Pivot is < the upper-bound pivot"); } } for (int i = 0; i < node->n_children; i++) { - const DBT *curr_less_pivot = (i==0) ? lesser_pivot : &node->childkeys[i-1]; - const DBT *curr_geq_pivot = (i==node->n_children-1) ? greatereq_pivot : &node->childkeys[i]; + DBT x, y; + const DBT *curr_less_pivot = (i==0) ? lesser_pivot : node->pivotkeys.fill_pivot(i - 1, &x); + const DBT *curr_geq_pivot = (i==node->n_children-1) ? greatereq_pivot : node->pivotkeys.fill_pivot(i, &y); if (node->height > 0) { - MSN last_msn = ZERO_MSN; - // Verify that messages in the buffers are in the right place. NONLEAF_CHILDINFO bnc = BNC(node, i); - VERIFY_ASSERTION(verify_sorted_by_key_msn(ft_handle, bnc->buffer, bnc->fresh_message_tree) == 0, i, "fresh_message_tree"); - VERIFY_ASSERTION(verify_sorted_by_key_msn(ft_handle, bnc->buffer, bnc->stale_message_tree) == 0, i, "stale_message_tree"); - FIFO_ITERATE(bnc->buffer, key, keylen, data, datalen, itype, msn, xid, is_fresh, - ({ - enum ft_msg_type type = (enum ft_msg_type) itype; - int r = verify_msg_in_child_buffer(ft_handle, type, msn, key, keylen, data, datalen, xid, - curr_less_pivot, - curr_geq_pivot); - VERIFY_ASSERTION(r==0, i, "A message in the buffer is out of place"); - VERIFY_ASSERTION((msn.msn > last_msn.msn), i, "msn per msg must be monotonically increasing toward newer messages in buffer"); - VERIFY_ASSERTION((msn.msn <= this_msn.msn), i, "all messages must have msn within limit of this node's max_msn_applied_to_node_in_memory"); - if (ft_msg_type_applies_once(type)) { - int count; - DBT keydbt; - toku_fill_dbt(&keydbt, key, keylen); - int total_count = 0; - count = count_eq_key_msn(ft_handle, bnc->buffer, bnc->fresh_message_tree, toku_fill_dbt(&keydbt, key, keylen), msn); - total_count += count; - if (is_fresh) { - VERIFY_ASSERTION(count == 1, i, "a fresh message was not found in the fresh message tree"); - } else if (messages_have_been_moved) { - VERIFY_ASSERTION(count == 0, i, "a stale message was found in the fresh message tree"); - } - VERIFY_ASSERTION(count <= 1, i, "a message was found multiple times in the fresh message tree"); - count = count_eq_key_msn(ft_handle, bnc->buffer, bnc->stale_message_tree, &keydbt, msn); - - total_count += count; - if (is_fresh) { - VERIFY_ASSERTION(count == 0, i, "a fresh message was found in the stale message tree"); - } else if (messages_have_been_moved) { - VERIFY_ASSERTION(count == 1, i, "a stale message was not found in the stale message tree"); - } - VERIFY_ASSERTION(count <= 1, i, "a message was found multiple times in the stale message tree"); - - VERIFY_ASSERTION(total_count <= 1, i, "a message was found in both message trees (or more than once in a single tree)"); - VERIFY_ASSERTION(total_count >= 1, i, "a message was not found in either message tree"); - } else { - VERIFY_ASSERTION(ft_msg_type_applies_all(type) || ft_msg_type_does_nothing(type), i, "a message was found that does not apply either to all or to only one key"); - struct count_msgs_extra extra = { .count = 0, .msn = msn, .fifo = bnc->buffer }; - bnc->broadcast_list.iterate<struct count_msgs_extra, count_msgs>(&extra); - VERIFY_ASSERTION(extra.count == 1, i, "a broadcast message was not found in the broadcast list"); - } - last_msn = msn; - })); - struct verify_message_tree_extra extra = { .fifo = bnc->buffer, .broadcast = false, .is_fresh = true, .i = i, .verbose = verbose, .blocknum = node->thisnodename, .keep_going_on_failure = keep_going_on_failure, .messages_have_been_moved = messages_have_been_moved }; - int r = bnc->fresh_message_tree.iterate<struct verify_message_tree_extra, verify_message_tree>(&extra); + // Verify that messages in the buffers are in the right place. + VERIFY_ASSERTION(verify_sorted_by_key_msn(ft_handle, &bnc->msg_buffer, bnc->fresh_message_tree) == 0, i, "fresh_message_tree"); + VERIFY_ASSERTION(verify_sorted_by_key_msn(ft_handle, &bnc->msg_buffer, bnc->stale_message_tree) == 0, i, "stale_message_tree"); + + verify_msg_fn verify_msg(ft_handle, bnc, curr_less_pivot, curr_geq_pivot, + blocknum, this_msn, verbose, keep_going_on_failure, messages_have_been_moved); + int r = bnc->msg_buffer.iterate(verify_msg); + if (r != 0) { result = r; goto done; } + + struct verify_message_tree_extra extra = { .msg_buffer = &bnc->msg_buffer, .broadcast = false, .is_fresh = true, .i = i, .verbose = verbose, .blocknum = node->blocknum, .keep_going_on_failure = keep_going_on_failure, .messages_have_been_moved = messages_have_been_moved }; + r = bnc->fresh_message_tree.iterate<struct verify_message_tree_extra, verify_message_tree>(&extra); if (r != 0) { result = r; goto done; } extra.is_fresh = false; r = bnc->stale_message_tree.iterate<struct verify_message_tree_extra, verify_message_tree>(&extra); @@ -450,7 +483,7 @@ done: // input is a pinned node, on exit, node is unpinned int toku_verify_ftnode (FT_HANDLE ft_handle, - MSN rootmsn, MSN parentmsn, bool messages_exist_above, + MSN rootmsn, MSN parentmsn_with_messages, bool messages_exist_above, FTNODE node, int height, const DBT *lesser_pivot, // Everything in the subtree should be > lesser_pivot. (lesser_pivot==NULL if there is no lesser pivot.) const DBT *greatereq_pivot, // Everything in the subtree should be <= lesser_pivot. (lesser_pivot==NULL if there is no lesser pivot.) @@ -460,7 +493,7 @@ toku_verify_ftnode (FT_HANDLE ft_handle, MSN this_msn; //printf("%s:%d pin %p\n", __FILE__, __LINE__, node_v); - toku_assert_entire_node_in_memory(node); + toku_ftnode_assert_fully_in_memory(node); this_msn = node->max_msn_applied_to_node_on_disk; int result = 0; @@ -469,7 +502,7 @@ toku_verify_ftnode (FT_HANDLE ft_handle, // Otherwise we'll just do the next call result = toku_verify_ftnode_internal( - ft_handle, rootmsn, parentmsn, messages_exist_above, node, height, lesser_pivot, greatereq_pivot, + ft_handle, rootmsn, parentmsn_with_messages, messages_exist_above, node, height, lesser_pivot, greatereq_pivot, verbose, keep_going_on_failure, false); if (result != 0 && (!keep_going_on_failure || result != TOKUDB_NEEDS_REPAIR)) goto done; } @@ -477,7 +510,7 @@ toku_verify_ftnode (FT_HANDLE ft_handle, toku_move_ftnode_messages_to_stale(ft_handle->ft, node); } result2 = toku_verify_ftnode_internal( - ft_handle, rootmsn, parentmsn, messages_exist_above, node, height, lesser_pivot, greatereq_pivot, + ft_handle, rootmsn, parentmsn_with_messages, messages_exist_above, node, height, lesser_pivot, greatereq_pivot, verbose, keep_going_on_failure, true); if (result == 0) { result = result2; @@ -489,12 +522,17 @@ toku_verify_ftnode (FT_HANDLE ft_handle, for (int i = 0; i < node->n_children; i++) { FTNODE child_node; toku_get_node_for_verify(BP_BLOCKNUM(node, i), ft_handle, &child_node); - int r = toku_verify_ftnode(ft_handle, rootmsn, this_msn, messages_exist_above || toku_bnc_n_entries(BNC(node, i)) > 0, - child_node, node->height-1, - (i==0) ? lesser_pivot : &node->childkeys[i-1], - (i==node->n_children-1) ? greatereq_pivot : &node->childkeys[i], - progress_callback, progress_extra, - recurse, verbose, keep_going_on_failure); + DBT x, y; + int r = toku_verify_ftnode(ft_handle, rootmsn, + (toku_bnc_n_entries(BNC(node, i)) > 0 + ? this_msn + : parentmsn_with_messages), + messages_exist_above || toku_bnc_n_entries(BNC(node, i)) > 0, + child_node, node->height-1, + (i==0) ? lesser_pivot : node->pivotkeys.fill_pivot(i - 1, &x), + (i==node->n_children-1) ? greatereq_pivot : node->pivotkeys.fill_pivot(i, &y), + progress_callback, progress_extra, + recurse, verbose, keep_going_on_failure); if (r) { result = r; if (!keep_going_on_failure || result != TOKUDB_NEEDS_REPAIR) goto done; diff --git a/storage/tokudb/ft-index/ft/ft.cc b/storage/tokudb/ft-index/ft/ft.cc index 5c8e439e644..fd3960b64f6 100644 --- a/storage/tokudb/ft-index/ft/ft.cc +++ b/storage/tokudb/ft-index/ft/ft.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -89,12 +89,15 @@ PATENT RIGHTS GRANT: #ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved." #ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it." -#include "ft.h" -#include "ft-internal.h" -#include "ft-cachetable-wrappers.h" -#include "log-internal.h" - -#include <ft/log_header.h> +#include "ft/serialize/block_table.h" +#include "ft/ft.h" +#include "ft/ft-cachetable-wrappers.h" +#include "ft/ft-internal.h" +#include "ft/logger/log-internal.h" +#include "ft/log_header.h" +#include "ft/node.h" +#include "ft/serialize/ft-serialize.h" +#include "ft/serialize/ft_node-serialize.h" #include <memory.h> #include <toku_assert.h> @@ -107,10 +110,10 @@ toku_reset_root_xid_that_created(FT ft, TXNID new_root_xid_that_created) { // hold lock around setting and clearing of dirty bit // (see cooperative use of dirty bit in ft_begin_checkpoint()) - toku_ft_lock (ft); + toku_ft_lock(ft); ft->h->root_xid_that_created = new_root_xid_that_created; ft->h->dirty = 1; - toku_ft_unlock (ft); + toku_ft_unlock(ft); } static void @@ -118,9 +121,10 @@ ft_destroy(FT ft) { //header and checkpoint_header have same Blocktable pointer //cannot destroy since it is still in use by CURRENT assert(ft->h->type == FT_CURRENT); - toku_blocktable_destroy(&ft->blocktable); - if (ft->descriptor.dbt.data) toku_free(ft->descriptor.dbt.data); - if (ft->cmp_descriptor.dbt.data) toku_free(ft->cmp_descriptor.dbt.data); + ft->blocktable.destroy(); + ft->cmp.destroy(); + toku_destroy_dbt(&ft->descriptor.dbt); + toku_destroy_dbt(&ft->cmp_descriptor.dbt); toku_ft_destroy_reflock(ft); toku_free(ft->h); } @@ -187,7 +191,7 @@ ft_log_fassociate_during_checkpoint (CACHEFILE cf, void *header_v) { } // Maps to cf->begin_checkpoint_userdata -// Create checkpoint-in-progress versions of header and translation (btt) (and fifo for now...). +// Create checkpoint-in-progress versions of header and translation (btt) // Has access to fd (it is protected). // // Not reentrant for a single FT (see ft_checkpoint) @@ -199,7 +203,7 @@ static void ft_begin_checkpoint (LSN checkpoint_lsn, void *header_v) { assert(ft->checkpoint_header == NULL); ft_copy_for_checkpoint_unlocked(ft, checkpoint_lsn); ft->h->dirty = 0; // this is only place this bit is cleared (in currentheader) - toku_block_translation_note_start_checkpoint_unlocked(ft->blocktable); + ft->blocktable.note_start_checkpoint_unlocked(); toku_ft_unlock (ft); } @@ -235,8 +239,6 @@ ft_hack_highest_unused_msn_for_upgrade_for_checkpoint(FT ft) { static void ft_checkpoint (CACHEFILE cf, int fd, void *header_v) { FT ft = (FT) header_v; FT_HEADER ch = ft->checkpoint_header; - //printf("%s:%d allocated_limit=%lu writing queue to %lu\n", __FILE__, __LINE__, - // block_allocator_allocated_limit(h->block_allocator), h->unused_blocks.b*h->nodesize); assert(ch); assert(ch->type == FT_CHECKPOINT_INPROGRESS); if (ch->dirty) { // this is only place this bit is tested (in checkpoint_header) @@ -251,16 +253,15 @@ static void ft_checkpoint (CACHEFILE cf, int fd, void *header_v) { ft_hack_highest_unused_msn_for_upgrade_for_checkpoint(ft); // write translation and header to disk (or at least to OS internal buffer) - toku_serialize_ft_to(fd, ch, ft->blocktable, ft->cf); + toku_serialize_ft_to(fd, ch, &ft->blocktable, ft->cf); ch->dirty = 0; // this is only place this bit is cleared (in checkpoint_header) // fsync the cachefile toku_cachefile_fsync(cf); ft->h->checkpoint_count++; // checkpoint succeeded, next checkpoint will save to alternate header location ft->h->checkpoint_lsn = ch->checkpoint_lsn; //Header updated. - } - else { - toku_block_translation_note_skipped_checkpoint(ft->blocktable); + } else { + ft->blocktable.note_skipped_checkpoint(); } } @@ -268,14 +269,12 @@ static void ft_checkpoint (CACHEFILE cf, int fd, void *header_v) { // free unused disk space // (i.e. tell BlockAllocator to liberate blocks used by previous checkpoint). // Must have access to fd (protected) -static void ft_end_checkpoint (CACHEFILE UU(cachefile), int fd, void *header_v) { +static void ft_end_checkpoint(CACHEFILE UU(cf), int fd, void *header_v) { FT ft = (FT) header_v; assert(ft->h->type == FT_CURRENT); - toku_block_translation_note_end_checkpoint(ft->blocktable, fd); - if (ft->checkpoint_header) { - toku_free(ft->checkpoint_header); - ft->checkpoint_header = NULL; - } + ft->blocktable.note_end_checkpoint(fd); + toku_free(ft->checkpoint_header); + ft->checkpoint_header = nullptr; } // maps to cf->close_userdata @@ -309,14 +308,16 @@ static void ft_close(CACHEFILE cachefile, int fd, void *header_v, bool oplsn_val } } if (ft->h->dirty) { // this is the only place this bit is tested (in currentheader) - if (logger) { //Rollback cachefile MUST NOT BE CLOSED DIRTY - //It can be checkpointed only via 'checkpoint' - assert(logger->rollback_cachefile != cachefile); + bool do_checkpoint = true; + if (logger && logger->rollback_cachefile == cachefile) { + do_checkpoint = false; + } + if (do_checkpoint) { + ft_begin_checkpoint(lsn, header_v); + ft_checkpoint(cachefile, fd, ft); + ft_end_checkpoint(cachefile, fd, header_v); + assert(!ft->h->dirty); // dirty bit should be cleared by begin_checkpoint and never set again (because we're closing the dictionary) } - ft_begin_checkpoint(lsn, header_v); - ft_checkpoint(cachefile, fd, ft); - ft_end_checkpoint(cachefile, fd, header_v); - assert(!ft->h->dirty); // dirty bit should be cleared by begin_checkpoint and never set again (because we're closing the dictionary) } } @@ -358,11 +359,6 @@ static void ft_note_unpin_by_checkpoint (CACHEFILE UU(cachefile), void *header_v // End of Functions that are callbacks to the cachefile ///////////////////////////////////////////////////////////////////////// -void toku_node_save_ct_pair(CACHEKEY UU(key), void *value_data, PAIR p) { - FTNODE CAST_FROM_VOIDP(node, value_data); - node->ct_pair = p; -} - static void setup_initial_ft_root_node(FT ft, BLOCKNUM blocknum) { FTNODE XCALLOC(node); toku_initialize_empty_ftnode(node, blocknum, 0, 1, ft->h->layout_version, ft->h->flags); @@ -373,7 +369,7 @@ static void setup_initial_ft_root_node(FT ft, BLOCKNUM blocknum) { toku_cachetable_put(ft->cf, blocknum, fullhash, node, make_ftnode_pair_attr(node), get_write_callbacks_for_node(ft), - toku_node_save_ct_pair); + toku_ftnode_save_ct_pair); toku_unpin_ftnode(ft, node); } @@ -384,7 +380,8 @@ static void ft_init(FT ft, FT_OPTIONS options, CACHEFILE cf) { toku_list_init(&ft->live_ft_handles); - ft->compare_fun = options->compare_fun; + // intuitively, the comparator points to the FT's cmp descriptor + ft->cmp.create(options->compare_fun, &ft->cmp_descriptor, options->memcmp_magic); ft->update_fun = options->update_fun; if (ft->cf != NULL) { @@ -405,7 +402,7 @@ static void ft_init(FT ft, FT_OPTIONS options, CACHEFILE cf) { ft_note_pin_by_checkpoint, ft_note_unpin_by_checkpoint); - toku_block_verify_no_free_blocknums(ft->blocktable); + ft->blocktable.verify_no_free_blocknums(); } @@ -449,16 +446,13 @@ void toku_ft_create(FT *ftp, FT_OPTIONS options, CACHEFILE cf, TOKUTXN txn) { invariant(ftp); FT XCALLOC(ft); - memset(&ft->descriptor, 0, sizeof(ft->descriptor)); - memset(&ft->cmp_descriptor, 0, sizeof(ft->cmp_descriptor)); - ft->h = ft_header_create(options, make_blocknum(0), (txn ? txn->txnid.parent_id64: TXNID_NONE)); toku_ft_init_reflock(ft); // Assign blocknum for root block, also dirty the header - toku_blocktable_create_new(&ft->blocktable); - toku_allocate_blocknum(ft->blocktable, &ft->h->root_blocknum, ft); + ft->blocktable.create(); + ft->blocktable.allocate_blocknum(&ft->h->root_blocknum, ft); ft_init(ft, options, cf); @@ -471,33 +465,29 @@ int toku_read_ft_and_store_in_cachefile (FT_HANDLE ft_handle, CACHEFILE cf, LSN // If the cachefile has not been initialized, then don't modify anything. // max_acceptable_lsn is the latest acceptable checkpointed version of the file. { - { - FT h; - if ((h = (FT) toku_cachefile_get_userdata(cf))!=0) { - *header = h; - assert(ft_handle->options.update_fun == h->update_fun); - assert(ft_handle->options.compare_fun == h->compare_fun); - return 0; - } + FT ft = nullptr; + if ((ft = (FT) toku_cachefile_get_userdata(cf)) != nullptr) { + *header = ft; + assert(ft_handle->options.update_fun == ft->update_fun); + return 0; } - FT h = nullptr; - int r; - { - int fd = toku_cachefile_get_fd(cf); - r = toku_deserialize_ft_from(fd, max_acceptable_lsn, &h); - if (r == TOKUDB_BAD_CHECKSUM) { - fprintf(stderr, "Checksum failure while reading header in file %s.\n", toku_cachefile_fname_in_env(cf)); - assert(false); // make absolutely sure we crash before doing anything else - } + + int fd = toku_cachefile_get_fd(cf); + int r = toku_deserialize_ft_from(fd, max_acceptable_lsn, &ft); + if (r == TOKUDB_BAD_CHECKSUM) { + fprintf(stderr, "Checksum failure while reading header in file %s.\n", toku_cachefile_fname_in_env(cf)); + assert(false); // make absolutely sure we crash before doing anything else + } else if (r != 0) { + return r; } - if (r!=0) return r; - // GCC 4.8 seems to get confused by the gotos in the deserialize code and think h is maybe uninitialized. - invariant_notnull(h); - h->cf = cf; - h->compare_fun = ft_handle->options.compare_fun; - h->update_fun = ft_handle->options.update_fun; + + invariant_notnull(ft); + // intuitively, the comparator points to the FT's cmp descriptor + ft->cmp.create(ft_handle->options.compare_fun, &ft->cmp_descriptor, ft_handle->options.memcmp_magic); + ft->update_fun = ft_handle->options.update_fun; + ft->cf = cf; toku_cachefile_set_userdata(cf, - (void*)h, + reinterpret_cast<void *>(ft), ft_log_fassociate_during_checkpoint, ft_close, ft_free, @@ -506,7 +496,7 @@ int toku_read_ft_and_store_in_cachefile (FT_HANDLE ft_handle, CACHEFILE cf, LSN ft_end_checkpoint, ft_note_pin_by_checkpoint, ft_note_unpin_by_checkpoint); - *header = h; + *header = ft; return 0; } @@ -548,12 +538,12 @@ void toku_ft_evict_from_memory(FT ft, bool oplsn_valid, LSN oplsn) { } // Verifies there exists exactly one ft handle and returns it. -FT_HANDLE toku_ft_get_only_existing_ft_handle(FT h) { +FT_HANDLE toku_ft_get_only_existing_ft_handle(FT ft) { FT_HANDLE ft_handle_ret = NULL; - toku_ft_grab_reflock(h); - assert(toku_list_num_elements_est(&h->live_ft_handles) == 1); - ft_handle_ret = toku_list_struct(toku_list_head(&h->live_ft_handles), struct ft_handle, live_ft_handle_link); - toku_ft_release_reflock(h); + toku_ft_grab_reflock(ft); + assert(toku_list_num_elements_est(&ft->live_ft_handles) == 1); + ft_handle_ret = toku_list_struct(toku_list_head(&ft->live_ft_handles), struct ft_handle, live_ft_handle_link); + toku_ft_release_reflock(ft); return ft_handle_ret; } @@ -618,6 +608,7 @@ toku_ft_init(FT ft, .compression_method = compression_method, .fanout = fanout, .flags = 0, + .memcmp_magic = 0, .compare_fun = NULL, .update_fun = NULL }; @@ -628,27 +619,27 @@ toku_ft_init(FT ft, // Open an ft for use by redirect. The new ft must have the same dict_id as the old_ft passed in. (FILENUM is assigned by the ft_handle_open() function.) static int -ft_handle_open_for_redirect(FT_HANDLE *new_ftp, const char *fname_in_env, TOKUTXN txn, FT old_h) { - FT_HANDLE t; - assert(old_h->dict_id.dictid != DICTIONARY_ID_NONE.dictid); - toku_ft_handle_create(&t); - toku_ft_set_bt_compare(t, old_h->compare_fun); - toku_ft_set_update(t, old_h->update_fun); - toku_ft_handle_set_nodesize(t, old_h->h->nodesize); - toku_ft_handle_set_basementnodesize(t, old_h->h->basementnodesize); - toku_ft_handle_set_compression_method(t, old_h->h->compression_method); - toku_ft_handle_set_fanout(t, old_h->h->fanout); - CACHETABLE ct = toku_cachefile_get_cachetable(old_h->cf); - int r = toku_ft_handle_open_with_dict_id(t, fname_in_env, 0, 0, ct, txn, old_h->dict_id); +ft_handle_open_for_redirect(FT_HANDLE *new_ftp, const char *fname_in_env, TOKUTXN txn, FT old_ft) { + FT_HANDLE ft_handle; + assert(old_ft->dict_id.dictid != DICTIONARY_ID_NONE.dictid); + toku_ft_handle_create(&ft_handle); + toku_ft_set_bt_compare(ft_handle, old_ft->cmp.get_compare_func()); + toku_ft_set_update(ft_handle, old_ft->update_fun); + toku_ft_handle_set_nodesize(ft_handle, old_ft->h->nodesize); + toku_ft_handle_set_basementnodesize(ft_handle, old_ft->h->basementnodesize); + toku_ft_handle_set_compression_method(ft_handle, old_ft->h->compression_method); + toku_ft_handle_set_fanout(ft_handle, old_ft->h->fanout); + CACHETABLE ct = toku_cachefile_get_cachetable(old_ft->cf); + int r = toku_ft_handle_open_with_dict_id(ft_handle, fname_in_env, 0, 0, ct, txn, old_ft->dict_id); if (r != 0) { goto cleanup; } - assert(t->ft->dict_id.dictid == old_h->dict_id.dictid); - *new_ftp = t; + assert(ft_handle->ft->dict_id.dictid == old_ft->dict_id.dictid); + *new_ftp = ft_handle; cleanup: if (r != 0) { - toku_ft_handle_close(t); + toku_ft_handle_close(ft_handle); } return r; } @@ -656,81 +647,81 @@ ft_handle_open_for_redirect(FT_HANDLE *new_ftp, const char *fname_in_env, TOKUTX // This function performs most of the work to redirect a dictionary to different file. // It is called for redirect and to abort a redirect. (This function is almost its own inverse.) static int -dictionary_redirect_internal(const char *dst_fname_in_env, FT src_h, TOKUTXN txn, FT *dst_hp) { +dictionary_redirect_internal(const char *dst_fname_in_env, FT src_ft, TOKUTXN txn, FT *dst_ftp) { int r; - FILENUM src_filenum = toku_cachefile_filenum(src_h->cf); + FILENUM src_filenum = toku_cachefile_filenum(src_ft->cf); FILENUM dst_filenum = FILENUM_NONE; - FT dst_h = NULL; + FT dst_ft = NULL; struct toku_list *list; // open a dummy ft based off of // dst_fname_in_env to get the header // then we will change all the ft's to have - // their headers point to dst_h instead of src_h + // their headers point to dst_ft instead of src_ft FT_HANDLE tmp_dst_ft = NULL; - r = ft_handle_open_for_redirect(&tmp_dst_ft, dst_fname_in_env, txn, src_h); + r = ft_handle_open_for_redirect(&tmp_dst_ft, dst_fname_in_env, txn, src_ft); if (r != 0) { goto cleanup; } - dst_h = tmp_dst_ft->ft; + dst_ft = tmp_dst_ft->ft; // some sanity checks on dst_filenum - dst_filenum = toku_cachefile_filenum(dst_h->cf); + dst_filenum = toku_cachefile_filenum(dst_ft->cf); assert(dst_filenum.fileid!=FILENUM_NONE.fileid); assert(dst_filenum.fileid!=src_filenum.fileid); //Cannot be same file. - // for each live ft_handle, ft_handle->ft is currently src_h + // for each live ft_handle, ft_handle->ft is currently src_ft // we want to change it to dummy_dst - toku_ft_grab_reflock(src_h); - while (!toku_list_empty(&src_h->live_ft_handles)) { - list = src_h->live_ft_handles.next; + toku_ft_grab_reflock(src_ft); + while (!toku_list_empty(&src_ft->live_ft_handles)) { + list = src_ft->live_ft_handles.next; FT_HANDLE src_handle = NULL; src_handle = toku_list_struct(list, struct ft_handle, live_ft_handle_link); toku_list_remove(&src_handle->live_ft_handle_link); - toku_ft_note_ft_handle_open(dst_h, src_handle); + toku_ft_note_ft_handle_open(dst_ft, src_handle); if (src_handle->redirect_callback) { src_handle->redirect_callback(src_handle, src_handle->redirect_callback_extra); } } - assert(dst_h); - // making sure that we are not leaking src_h - assert(toku_ft_needed_unlocked(src_h)); - toku_ft_release_reflock(src_h); + assert(dst_ft); + // making sure that we are not leaking src_ft + assert(toku_ft_needed_unlocked(src_ft)); + toku_ft_release_reflock(src_ft); toku_ft_handle_close(tmp_dst_ft); - *dst_hp = dst_h; + *dst_ftp = dst_ft; cleanup: return r; } -//This is the 'abort redirect' function. The redirect of old_h to new_h was done -//and now must be undone, so here we redirect new_h back to old_h. +//This is the 'abort redirect' function. The redirect of old_ft to new_ft was done +//and now must be undone, so here we redirect new_ft back to old_ft. int -toku_dictionary_redirect_abort(FT old_h, FT new_h, TOKUTXN txn) { - char *old_fname_in_env = toku_cachefile_fname_in_env(old_h->cf); +toku_dictionary_redirect_abort(FT old_ft, FT new_ft, TOKUTXN txn) { + char *old_fname_in_env = toku_cachefile_fname_in_env(old_ft->cf); int r; { - FILENUM old_filenum = toku_cachefile_filenum(old_h->cf); - FILENUM new_filenum = toku_cachefile_filenum(new_h->cf); + FILENUM old_filenum = toku_cachefile_filenum(old_ft->cf); + FILENUM new_filenum = toku_cachefile_filenum(new_ft->cf); assert(old_filenum.fileid!=new_filenum.fileid); //Cannot be same file. //No living fts in old header. - toku_ft_grab_reflock(old_h); - assert(toku_list_empty(&old_h->live_ft_handles)); - toku_ft_release_reflock(old_h); + toku_ft_grab_reflock(old_ft); + assert(toku_list_empty(&old_ft->live_ft_handles)); + toku_ft_release_reflock(old_ft); } - FT dst_h; - // redirect back from new_h to old_h - r = dictionary_redirect_internal(old_fname_in_env, new_h, txn, &dst_h); + FT dst_ft; + // redirect back from new_ft to old_ft + r = dictionary_redirect_internal(old_fname_in_env, new_ft, txn, &dst_ft); if (r == 0) { - assert(dst_h == old_h); + assert(dst_ft == old_ft); } return r; } @@ -879,18 +870,17 @@ toku_ft_stat64 (FT ft, struct ftstat64_s *s) { s->verify_time_sec = ft->h->time_of_last_verification; } -void -toku_ft_get_fractal_tree_info64(FT ft, struct ftinfo64 *s) { - toku_blocktable_get_info64(ft->blocktable, s); +void toku_ft_get_fractal_tree_info64(FT ft, struct ftinfo64 *info) { + ft->blocktable.get_info64(info); } int toku_ft_iterate_fractal_tree_block_map(FT ft, int (*iter)(uint64_t,int64_t,int64_t,int64_t,int64_t,void*), void *iter_extra) { uint64_t this_checkpoint_count = ft->h->checkpoint_count; - return toku_blocktable_iterate_translation_tables(ft->blocktable, this_checkpoint_count, iter, iter_extra); + return ft->blocktable.iterate_translation_tables(this_checkpoint_count, iter, iter_extra); } void -toku_ft_update_descriptor(FT ft, DESCRIPTOR d) +toku_ft_update_descriptor(FT ft, DESCRIPTOR desc) // Effect: Changes the descriptor in a tree (log the change, make sure it makes it to disk eventually). // requires: the ft is fully user-opened with a valid cachefile. // descriptor updates cannot happen in parallel for an FT @@ -898,7 +888,7 @@ toku_ft_update_descriptor(FT ft, DESCRIPTOR d) { assert(ft->cf); int fd = toku_cachefile_get_fd(ft->cf); - toku_ft_update_descriptor_with_fd(ft, d, fd); + toku_ft_update_descriptor_with_fd(ft, desc, fd); } // upadate the descriptor for an ft and serialize it using @@ -907,41 +897,30 @@ toku_ft_update_descriptor(FT ft, DESCRIPTOR d) // update a descriptor before the ft is fully opened and has // a valid cachefile. void -toku_ft_update_descriptor_with_fd(FT ft, DESCRIPTOR d, int fd) { +toku_ft_update_descriptor_with_fd(FT ft, DESCRIPTOR desc, int fd) { // the checksum is four bytes, so that's where the magic number comes from // make space for the new descriptor and write it out to disk DISKOFF offset, size; - size = toku_serialize_descriptor_size(d) + 4; - toku_realloc_descriptor_on_disk(ft->blocktable, size, &offset, ft, fd); - toku_serialize_descriptor_contents_to_fd(fd, d, offset); + size = toku_serialize_descriptor_size(desc) + 4; + ft->blocktable.realloc_descriptor_on_disk(size, &offset, ft, fd); + toku_serialize_descriptor_contents_to_fd(fd, desc, offset); // cleanup the old descriptor and set the in-memory descriptor to the new one - if (ft->descriptor.dbt.data) { - toku_free(ft->descriptor.dbt.data); - } - ft->descriptor.dbt.size = d->dbt.size; - ft->descriptor.dbt.data = toku_memdup(d->dbt.data, d->dbt.size); + toku_destroy_dbt(&ft->descriptor.dbt); + toku_clone_dbt(&ft->descriptor.dbt, desc->dbt); } -void -toku_ft_update_cmp_descriptor(FT ft) { - if (ft->cmp_descriptor.dbt.data != NULL) { - toku_free(ft->cmp_descriptor.dbt.data); - } - ft->cmp_descriptor.dbt.size = ft->descriptor.dbt.size; - ft->cmp_descriptor.dbt.data = toku_xmemdup( - ft->descriptor.dbt.data, - ft->descriptor.dbt.size - ); +void toku_ft_update_cmp_descriptor(FT ft) { + // cleanup the old cmp descriptor and clone it as the in-memory descriptor + toku_destroy_dbt(&ft->cmp_descriptor.dbt); + toku_clone_dbt(&ft->cmp_descriptor.dbt, ft->descriptor.dbt); } -DESCRIPTOR -toku_ft_get_descriptor(FT_HANDLE ft_handle) { +DESCRIPTOR toku_ft_get_descriptor(FT_HANDLE ft_handle) { return &ft_handle->ft->descriptor; } -DESCRIPTOR -toku_ft_get_cmp_descriptor(FT_HANDLE ft_handle) { +DESCRIPTOR toku_ft_get_cmp_descriptor(FT_HANDLE ft_handle) { return &ft_handle->ft->cmp_descriptor; } @@ -1066,8 +1045,8 @@ garbage_helper(BLOCKNUM blocknum, int64_t UU(size), int64_t UU(address), void *e struct garbage_helper_extra *CAST_FROM_VOIDP(info, extra); FTNODE node; FTNODE_DISK_DATA ndd; - struct ftnode_fetch_extra bfe; - fill_bfe_for_full_read(&bfe, info->ft); + ftnode_fetch_extra bfe; + bfe.create_for_full_read(info->ft); int fd = toku_cachefile_get_fd(info->ft->cf); int r = toku_deserialize_ftnode_from(fd, blocknum, 0, &node, &ndd, &bfe); if (r != 0) { @@ -1101,7 +1080,7 @@ void toku_ft_get_garbage(FT ft, uint64_t *total_space, uint64_t *used_space) { .total_space = 0, .used_space = 0 }; - toku_blocktable_iterate(ft->blocktable, TRANSLATION_CHECKPOINTED, garbage_helper, &info, true, true); + ft->blocktable.iterate(block_table::TRANSLATION_CHECKPOINTED, garbage_helper, &info, true, true); *total_space = info.total_space; *used_space = info.used_space; } @@ -1111,8 +1090,6 @@ void toku_ft_get_garbage(FT ft, uint64_t *total_space, uint64_t *used_space) { #error #endif - - #define xstr(X) str(X) #define str(X) #X #define static_version_string xstr(DB_VERSION_MAJOR) "." \ @@ -1122,10 +1099,9 @@ void toku_ft_get_garbage(FT ft, uint64_t *total_space, uint64_t *used_space) { struct toku_product_name_strings_struct toku_product_name_strings; char toku_product_name[TOKU_MAX_PRODUCT_NAME_LENGTH]; -void -tokudb_update_product_name_strings(void) { - //DO ALL STRINGS HERE.. maybe have a separate FT layer version as well - { // Version string +void tokuft_update_product_name_strings(void) { + // DO ALL STRINGS HERE.. maybe have a separate FT layer version as well + { int n = snprintf(toku_product_name_strings.db_version, sizeof(toku_product_name_strings.db_version), "%s %s", toku_product_name, static_version_string); @@ -1177,7 +1153,7 @@ toku_single_process_lock(const char *lock_dir, const char *which, int *lockfd) { *lockfd = toku_os_lock_file(lockfname); if (*lockfd < 0) { int e = get_error_errno(); - fprintf(stderr, "Couldn't start tokudb because some other tokudb process is using the same directory [%s] for [%s]\n", lock_dir, which); + fprintf(stderr, "Couldn't start tokuft because some other tokuft process is using the same directory [%s] for [%s]\n", lock_dir, which); return e; } return 0; @@ -1195,10 +1171,10 @@ toku_single_process_unlock(int *lockfd) { return 0; } -int tokudb_num_envs = 0; +int tokuft_num_envs = 0; int db_env_set_toku_product_name(const char *name) { - if (tokudb_num_envs > 0) { + if (tokuft_num_envs > 0) { return EINVAL; } if (!name || strlen(name) < 1) { @@ -1209,7 +1185,7 @@ db_env_set_toku_product_name(const char *name) { } if (strncmp(toku_product_name, name, sizeof(toku_product_name))) { strcpy(toku_product_name, name); - tokudb_update_product_name_strings(); + tokuft_update_product_name_strings(); } return 0; } diff --git a/storage/tokudb/ft-index/ft/ft.h b/storage/tokudb/ft-index/ft/ft.h index e536241722c..336845475cc 100644 --- a/storage/tokudb/ft-index/ft/ft.h +++ b/storage/tokudb/ft-index/ft/ft.h @@ -1,7 +1,5 @@ /* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */ // vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4: -#ifndef FT_H -#define FT_H #ident "$Id$" /* COPYING CONDITIONS NOTICE: @@ -31,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -88,17 +86,20 @@ PATENT RIGHTS GRANT: under this License. */ +#pragma once + #ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved." #ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it." -#include "fttypes.h" -#include "ybt.h" #include <db.h> -#include "cachetable.h" -#include "log.h" -#include "ft-search.h" -#include "ft-ops.h" -#include "compress.h" + +#include "ft/cachetable/cachetable.h" +#include "ft/ft-ops.h" +#include "ft/logger/log.h" +#include "util/dbt.h" + +typedef struct ft *FT; +typedef struct ft_options *FT_OPTIONS; // unlink a ft from the filesystem with or without a txn. // if with a txn, then the unlink happens on commit. @@ -110,8 +111,11 @@ void toku_ft_destroy_reflock(FT ft); void toku_ft_grab_reflock(FT ft); void toku_ft_release_reflock(FT ft); +void toku_ft_lock(struct ft *ft); +void toku_ft_unlock(struct ft *ft); + void toku_ft_create(FT *ftp, FT_OPTIONS options, CACHEFILE cf, TOKUTXN txn); -void toku_ft_free (FT h); +void toku_ft_free (FT ft); int toku_read_ft_and_store_in_cachefile (FT_HANDLE ft_h, CACHEFILE cf, LSN max_acceptable_lsn, FT *header); void toku_ft_note_ft_handle_open(FT ft, FT_HANDLE live); @@ -123,7 +127,7 @@ bool toku_ft_has_one_reference_unlocked(FT ft); // will have to read in the ft in a new cachefile and new FT object. void toku_ft_evict_from_memory(FT ft, bool oplsn_valid, LSN oplsn); -FT_HANDLE toku_ft_get_only_existing_ft_handle(FT h); +FT_HANDLE toku_ft_get_only_existing_ft_handle(FT ft); void toku_ft_note_hot_begin(FT_HANDLE ft_h); void toku_ft_note_hot_complete(FT_HANDLE ft_h, bool success, MSN msn_at_start_of_hot); @@ -142,29 +146,29 @@ toku_ft_init( int toku_dictionary_redirect_abort(FT old_h, FT new_h, TOKUTXN txn) __attribute__ ((warn_unused_result)); int toku_dictionary_redirect (const char *dst_fname_in_env, FT_HANDLE old_ft, TOKUTXN txn); -void toku_reset_root_xid_that_created(FT h, TXNID new_root_xid_that_created); +void toku_reset_root_xid_that_created(FT ft, TXNID new_root_xid_that_created); // Reset the root_xid_that_created field to the given value. // This redefines which xid created the dictionary. -void toku_ft_add_txn_ref(FT h); -void toku_ft_remove_txn_ref(FT h); +void toku_ft_add_txn_ref(FT ft); +void toku_ft_remove_txn_ref(FT ft); -void toku_calculate_root_offset_pointer ( FT h, CACHEKEY* root_key, uint32_t *roothash); -void toku_ft_set_new_root_blocknum(FT h, CACHEKEY new_root_key); -LSN toku_ft_checkpoint_lsn(FT h) __attribute__ ((warn_unused_result)); -void toku_ft_stat64 (FT h, struct ftstat64_s *s); -void toku_ft_get_fractal_tree_info64 (FT h, struct ftinfo64 *s); +void toku_calculate_root_offset_pointer (FT ft, CACHEKEY* root_key, uint32_t *roothash); +void toku_ft_set_new_root_blocknum(FT ft, CACHEKEY new_root_key); +LSN toku_ft_checkpoint_lsn(FT ft) __attribute__ ((warn_unused_result)); +void toku_ft_stat64 (FT ft, struct ftstat64_s *s); +void toku_ft_get_fractal_tree_info64 (FT ft, struct ftinfo64 *s); int toku_ft_iterate_fractal_tree_block_map(FT ft, int (*iter)(uint64_t,int64_t,int64_t,int64_t,int64_t,void*), void *iter_extra); // unconditionally set the descriptor for an open FT. can't do this when // any operation has already occurred on the ft. // see toku_ft_change_descriptor(), which is the transactional version // used by the ydb layer. it better describes the client contract. -void toku_ft_update_descriptor(FT ft, DESCRIPTOR d); +void toku_ft_update_descriptor(FT ft, DESCRIPTOR desc); // use this version if the FT is not fully user-opened with a valid cachefile. // this is a clean hack to get deserialization code to update a descriptor // while the FT and cf are in the process of opening, for upgrade purposes -void toku_ft_update_descriptor_with_fd(FT ft, DESCRIPTOR d, int fd); +void toku_ft_update_descriptor_with_fd(FT ft, DESCRIPTOR desc, int fd); void toku_ft_update_cmp_descriptor(FT ft); // get the descriptor for a ft. safe to read as long as clients honor the @@ -174,9 +178,17 @@ void toku_ft_update_cmp_descriptor(FT ft); DESCRIPTOR toku_ft_get_descriptor(FT_HANDLE ft_handle); DESCRIPTOR toku_ft_get_cmp_descriptor(FT_HANDLE ft_handle); +typedef struct { + // delta versions in basements could be negative + int64_t numrows; + int64_t numbytes; +} STAT64INFO_S, *STAT64INFO; +static const STAT64INFO_S ZEROSTATS = { .numrows = 0, .numbytes = 0}; + void toku_ft_update_stats(STAT64INFO headerstats, STAT64INFO_S delta); void toku_ft_decrease_stats(STAT64INFO headerstats, STAT64INFO_S delta); +typedef void (*remove_ft_ref_callback)(FT ft, void *extra); void toku_ft_remove_reference(FT ft, bool oplsn_valid, LSN oplsn, remove_ft_ref_callback remove_ref, void *extra); @@ -189,7 +201,6 @@ void toku_ft_set_compression_method(FT ft, enum toku_compression_method method); void toku_ft_get_compression_method(FT ft, enum toku_compression_method *methodp); void toku_ft_set_fanout(FT ft, unsigned int fanout); void toku_ft_get_fanout(FT ft, unsigned int *fanout); -void toku_node_save_ct_pair(CACHEKEY UU(key), void *value_data, PAIR p); // mark the ft as a blackhole. any message injections will be a no op. void toku_ft_set_blackhole(FT_HANDLE ft_handle); @@ -198,15 +209,17 @@ void toku_ft_set_blackhole(FT_HANDLE ft_handle); // The difference between the two is MVCC garbage. void toku_ft_get_garbage(FT ft, uint64_t *total_space, uint64_t *used_space); +// TODO: Should be in portability int get_num_cores(void); + +// TODO: Use the cachetable's worker pool instead of something managed by the FT... struct toku_thread_pool *get_ft_pool(void); -void dump_bad_block(unsigned char *vp, uint64_t size); +// TODO: Should be in portability int toku_single_process_lock(const char *lock_dir, const char *which, int *lockfd); - int toku_single_process_unlock(int *lockfd); -void tokudb_update_product_name_strings(void); +void tokuft_update_product_name_strings(void); #define TOKU_MAX_PRODUCT_NAME_LENGTH (256) extern char toku_product_name[TOKU_MAX_PRODUCT_NAME_LENGTH]; @@ -219,5 +232,4 @@ struct toku_product_name_strings_struct { }; extern struct toku_product_name_strings_struct toku_product_name_strings; -extern int tokudb_num_envs; -#endif +extern int tokuft_num_envs; diff --git a/storage/tokudb/ft-index/ft/fttypes.h b/storage/tokudb/ft-index/ft/fttypes.h deleted file mode 100644 index 73e228cf6ff..00000000000 --- a/storage/tokudb/ft-index/ft/fttypes.h +++ /dev/null @@ -1,382 +0,0 @@ -/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */ -// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4: -#ifndef FTTYPES_H -#define FTTYPES_H - -#ident "$Id$" -/* -COPYING CONDITIONS NOTICE: - - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation, and provided that the - following conditions are met: - - * Redistributions of source code must retain this COPYING - CONDITIONS NOTICE, the COPYRIGHT NOTICE (below), the - DISCLAIMER (below), the UNIVERSITY PATENT NOTICE (below), the - PATENT MARKING NOTICE (below), and the PATENT RIGHTS - GRANT (below). - - * Redistributions in binary form must reproduce this COPYING - CONDITIONS NOTICE, the COPYRIGHT NOTICE (below), the - DISCLAIMER (below), the UNIVERSITY PATENT NOTICE (below), the - PATENT MARKING NOTICE (below), and the PATENT RIGHTS - GRANT (below) in the documentation and/or other materials - provided with the distribution. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA - 02110-1301, USA. - -COPYRIGHT NOTICE: - - TokuDB, Tokutek Fractal Tree Indexing Library. - Copyright (C) 2007-2013 Tokutek, Inc. - -DISCLAIMER: - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - -UNIVERSITY PATENT NOTICE: - - The technology is licensed by the Massachusetts Institute of - Technology, Rutgers State University of New Jersey, and the Research - Foundation of State University of New York at Stony Brook under - United States of America Serial No. 11/760379 and to the patents - and/or patent applications resulting from it. - -PATENT MARKING NOTICE: - - This software is covered by US Patent No. 8,185,551. - This software is covered by US Patent No. 8,489,638. - -PATENT RIGHTS GRANT: - - "THIS IMPLEMENTATION" means the copyrightable works distributed by - Tokutek as part of the Fractal Tree project. - - "PATENT CLAIMS" means the claims of patents that are owned or - licensable by Tokutek, both currently or in the future; and that in - the absence of this license would be infringed by THIS - IMPLEMENTATION or by using or running THIS IMPLEMENTATION. - - "PATENT CHALLENGE" shall mean a challenge to the validity, - patentability, enforceability and/or non-infringement of any of the - PATENT CLAIMS or otherwise opposing any of the PATENT CLAIMS. - - Tokutek hereby grants to you, for the term and geographical scope of - the PATENT CLAIMS, a non-exclusive, no-charge, royalty-free, - irrevocable (except as stated in this section) patent license to - make, have made, use, offer to sell, sell, import, transfer, and - otherwise run, modify, and propagate the contents of THIS - IMPLEMENTATION, where such license applies only to the PATENT - CLAIMS. This grant does not include claims that would be infringed - only as a consequence of further modifications of THIS - IMPLEMENTATION. If you or your agent or licensee institute or order - or agree to the institution of patent litigation against any entity - (including a cross-claim or counterclaim in a lawsuit) alleging that - THIS IMPLEMENTATION constitutes direct or contributory patent - infringement, or inducement of patent infringement, then any rights - granted to you under this License shall terminate as of the date - such litigation is filed. If you or your agent or exclusive - licensee institute or order or agree to the institution of a PATENT - CHALLENGE, then Tokutek may terminate any rights granted to you - under this License. -*/ - -#ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved." -#ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it." - -#include <sys/types.h> -#ifndef _XOPEN_SOURCE -#define _XOPEN_SOURCE 500 -#endif -#define _FILE_OFFSET_BITS 64 - -#include "toku_assert.h" -#include <db.h> -#include <inttypes.h> - - -// Use the C++ bool and constants (true false), rather than BOOL, TRUE, and FALSE. - -typedef struct ft_handle *FT_HANDLE; -typedef struct ftnode *FTNODE; -typedef struct ftnode_disk_data *FTNODE_DISK_DATA; -typedef struct ftnode_leaf_basement_node *BASEMENTNODE; -typedef struct ftnode_nonleaf_childinfo *NONLEAF_CHILDINFO; -typedef struct sub_block *SUB_BLOCK; -typedef struct ft *FT; -typedef struct ft_header *FT_HEADER; -typedef struct ft_options *FT_OPTIONS; - -struct wbuf; -struct dbuf; - -typedef unsigned int ITEMLEN; -typedef const void *bytevec; - -typedef int64_t DISKOFF; /* Offset in a disk. -1 is the NULL pointer. */ -typedef uint64_t TXNID; - -typedef struct txnid_pair_s { - TXNID parent_id64; - TXNID child_id64; -} TXNID_PAIR; - - -#define TXNID_NONE_LIVING ((TXNID)0) -#define TXNID_NONE ((TXNID)0) -#define TXNID_MAX ((TXNID)-1) - -static const TXNID_PAIR TXNID_PAIR_NONE = { .parent_id64 = TXNID_NONE, .child_id64 = TXNID_NONE }; - -typedef struct blocknum_s { int64_t b; } BLOCKNUM; // make a struct so that we will notice type problems. -typedef struct gid_s { uint8_t *gid; } GID; // the gid is of size [DB_GID_SIZE] -typedef TOKU_XA_XID *XIDP; // this is the type that's passed to the logger code (so that we don't have to copy all 152 bytes when only a subset are even valid.) -#define ROLLBACK_NONE ((BLOCKNUM){0}) - -static inline BLOCKNUM make_blocknum(int64_t b) { BLOCKNUM result={b}; return result; } - -// This struct hold information about values stored in the cachetable. -// As one can tell from the names, we are probably violating an -// abstraction layer by placing names. -// -// The purpose of having this struct is to have a way for the -// cachetable to accumulate the some totals we are interested in. -// Breaking this abstraction layer by having these names was the -// easiest way. -// -typedef struct pair_attr_s { - long size; // size PAIR's value takes in memory - long nonleaf_size; // size if PAIR is a nonleaf node, 0 otherwise, used only for engine status - long leaf_size; // size if PAIR is a leaf node, 0 otherwise, used only for engine status - long rollback_size; // size of PAIR is a rollback node, 0 otherwise, used only for engine status - long cache_pressure_size; // amount PAIR contributes to cache pressure, is sum of buffer sizes and workdone counts - bool is_valid; -} PAIR_ATTR; - -static inline PAIR_ATTR make_pair_attr(long size) { - PAIR_ATTR result={ - .size = size, - .nonleaf_size = 0, - .leaf_size = 0, - .rollback_size = 0, - .cache_pressure_size = 0, - .is_valid = true - }; - return result; -} - -typedef struct { - uint32_t len; - char *data; -} BYTESTRING; - -/* Log Sequence Number (LSN) - * Make the LSN be a struct instead of an integer so that we get better type checking. */ -typedef struct __toku_lsn { uint64_t lsn; } LSN; -#define ZERO_LSN ((LSN){0}) -#define MAX_LSN ((LSN){UINT64_MAX}) - -/* Message Sequence Number (MSN) - * Make the MSN be a struct instead of an integer so that we get better type checking. */ -typedef struct __toku_msn { uint64_t msn; } MSN; -#define ZERO_MSN ((MSN){0}) // dummy used for message construction, to be filled in when msg is applied to tree -#define MIN_MSN ((MSN){(uint64_t)1 << 62}) // first 2^62 values reserved for messages created before Dr. No (for upgrade) -#define MAX_MSN ((MSN){UINT64_MAX}) - -typedef struct { - int64_t numrows; // delta versions in basements could be negative - int64_t numbytes; -} STAT64INFO_S, *STAT64INFO; - -static const STAT64INFO_S ZEROSTATS = {0,0}; - -/* At the ft layer, a FILENUM uniquely identifies an open file. - * At the ydb layer, a DICTIONARY_ID uniquely identifies an open dictionary. - * With the introduction of the loader (ticket 2216), it is possible for the file that holds - * an open dictionary to change, so these are now separate and independent unique identifiers. - */ -typedef struct {uint32_t fileid;} FILENUM; -#define FILENUM_NONE ((FILENUM){UINT32_MAX}) - -typedef struct {uint64_t dictid;} DICTIONARY_ID; -#define DICTIONARY_ID_NONE ((DICTIONARY_ID){0}) - -typedef struct { - uint32_t num; - FILENUM *filenums; -} FILENUMS; - -typedef struct tokulogger *TOKULOGGER; -typedef struct txn_manager *TXN_MANAGER; -#define NULL_LOGGER ((TOKULOGGER)0) -typedef struct tokutxn *TOKUTXN; -typedef struct txninfo *TXNINFO; -#define NULL_TXN ((TOKUTXN)0) - -struct logged_btt_pair { - DISKOFF off; - int32_t size; -}; - -typedef struct cachetable *CACHETABLE; -typedef struct cachefile *CACHEFILE; -typedef struct ctpair *PAIR; -typedef class checkpointer *CHECKPOINTER; -class bn_data; - -/* tree command types */ -enum ft_msg_type { - FT_NONE = 0, - FT_INSERT = 1, - FT_DELETE_ANY = 2, // Delete any matching key. This used to be called FT_DELETE. - //FT_DELETE_BOTH = 3, - FT_ABORT_ANY = 4, // Abort any commands on any matching key. - //FT_ABORT_BOTH = 5, // Abort commands that match both the key and the value - FT_COMMIT_ANY = 6, - //FT_COMMIT_BOTH = 7, - FT_COMMIT_BROADCAST_ALL = 8, // Broadcast to all leafentries, (commit all transactions). - FT_COMMIT_BROADCAST_TXN = 9, // Broadcast to all leafentries, (commit specific transaction). - FT_ABORT_BROADCAST_TXN = 10, // Broadcast to all leafentries, (commit specific transaction). - FT_INSERT_NO_OVERWRITE = 11, - FT_OPTIMIZE = 12, // Broadcast - FT_OPTIMIZE_FOR_UPGRADE = 13, // same as FT_OPTIMIZE, but record version number in leafnode - FT_UPDATE = 14, - FT_UPDATE_BROADCAST_ALL = 15 -}; - -static inline bool -ft_msg_type_applies_once(enum ft_msg_type type) -{ - bool ret_val; - switch (type) { - case FT_INSERT_NO_OVERWRITE: - case FT_INSERT: - case FT_DELETE_ANY: - case FT_ABORT_ANY: - case FT_COMMIT_ANY: - case FT_UPDATE: - ret_val = true; - break; - case FT_COMMIT_BROADCAST_ALL: - case FT_COMMIT_BROADCAST_TXN: - case FT_ABORT_BROADCAST_TXN: - case FT_OPTIMIZE: - case FT_OPTIMIZE_FOR_UPGRADE: - case FT_UPDATE_BROADCAST_ALL: - case FT_NONE: - ret_val = false; - break; - default: - assert(false); - } - return ret_val; -} - -static inline bool -ft_msg_type_applies_all(enum ft_msg_type type) -{ - bool ret_val; - switch (type) { - case FT_NONE: - case FT_INSERT_NO_OVERWRITE: - case FT_INSERT: - case FT_DELETE_ANY: - case FT_ABORT_ANY: - case FT_COMMIT_ANY: - case FT_UPDATE: - ret_val = false; - break; - case FT_COMMIT_BROADCAST_ALL: - case FT_COMMIT_BROADCAST_TXN: - case FT_ABORT_BROADCAST_TXN: - case FT_OPTIMIZE: - case FT_OPTIMIZE_FOR_UPGRADE: - case FT_UPDATE_BROADCAST_ALL: - ret_val = true; - break; - default: - assert(false); - } - return ret_val; -} - -static inline bool -ft_msg_type_does_nothing(enum ft_msg_type type) -{ - return (type == FT_NONE); -} - -typedef struct xids_t *XIDS; -typedef struct fifo_msg_t *FIFO_MSG; -/* tree commands */ -struct ft_msg { - enum ft_msg_type type; - MSN msn; // message sequence number - XIDS xids; - union { - /* insert or delete */ - struct ft_msg_insert_delete { - const DBT *key; // for insert, delete, upsertdel - const DBT *val; // for insert, delete, (and it is the "extra" for upsertdel, upsertdel_broadcast_all) - } id; - } u; -}; - -// Message sent into the ft to implement insert, delete, update, etc -typedef struct ft_msg FT_MSG_S; -typedef struct ft_msg *FT_MSG; - -typedef int (*ft_compare_func)(DB *, const DBT *, const DBT *); -typedef void (*setval_func)(const DBT *, void *); -typedef int (*ft_update_func)(DB *, const DBT *, const DBT *, const DBT *, setval_func, void *); -typedef void (*on_redirect_callback)(FT_HANDLE, void*); -typedef void (*remove_ft_ref_callback)(FT, void*); - -#define UU(x) x __attribute__((__unused__)) - -typedef struct memarena *MEMARENA; -typedef struct rollback_log_node *ROLLBACK_LOG_NODE; -typedef struct serialized_rollback_log_node *SERIALIZED_ROLLBACK_LOG_NODE; - -// -// Types of snapshots that can be taken by a tokutxn -// - TXN_SNAPSHOT_NONE: means that there is no snapshot. Reads do not use snapshot reads. -// used for SERIALIZABLE and READ UNCOMMITTED -// - TXN_SNAPSHOT_ROOT: means that all tokutxns use their root transaction's snapshot -// used for REPEATABLE READ -// - TXN_SNAPSHOT_CHILD: means that each child tokutxn creates its own snapshot -// used for READ COMMITTED -// - -typedef enum __TXN_SNAPSHOT_TYPE { - TXN_SNAPSHOT_NONE=0, - TXN_SNAPSHOT_ROOT=1, - TXN_SNAPSHOT_CHILD=2 -} TXN_SNAPSHOT_TYPE; - -typedef struct ancestors *ANCESTORS; -typedef struct pivot_bounds const * const PIVOT_BOUNDS; -typedef struct ftnode_fetch_extra *FTNODE_FETCH_EXTRA; -typedef struct unlockers *UNLOCKERS; - -enum reactivity { - RE_STABLE, - RE_FUSIBLE, - RE_FISSIBLE -}; - -enum split_mode { - SPLIT_EVENLY, - SPLIT_LEFT_HEAVY, - SPLIT_RIGHT_HEAVY -}; - -#endif diff --git a/storage/tokudb/ft-index/ft/key.h b/storage/tokudb/ft-index/ft/key.h deleted file mode 100644 index cf32e9d7249..00000000000 --- a/storage/tokudb/ft-index/ft/key.h +++ /dev/null @@ -1,104 +0,0 @@ -/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */ -// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4: -#ifndef TOKU_KEY_H -#define TOKU_KEY_H - -#ident "$Id$" -/* -COPYING CONDITIONS NOTICE: - - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation, and provided that the - following conditions are met: - - * Redistributions of source code must retain this COPYING - CONDITIONS NOTICE, the COPYRIGHT NOTICE (below), the - DISCLAIMER (below), the UNIVERSITY PATENT NOTICE (below), the - PATENT MARKING NOTICE (below), and the PATENT RIGHTS - GRANT (below). - - * Redistributions in binary form must reproduce this COPYING - CONDITIONS NOTICE, the COPYRIGHT NOTICE (below), the - DISCLAIMER (below), the UNIVERSITY PATENT NOTICE (below), the - PATENT MARKING NOTICE (below), and the PATENT RIGHTS - GRANT (below) in the documentation and/or other materials - provided with the distribution. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA - 02110-1301, USA. - -COPYRIGHT NOTICE: - - TokuDB, Tokutek Fractal Tree Indexing Library. - Copyright (C) 2007-2013 Tokutek, Inc. - -DISCLAIMER: - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - -UNIVERSITY PATENT NOTICE: - - The technology is licensed by the Massachusetts Institute of - Technology, Rutgers State University of New Jersey, and the Research - Foundation of State University of New York at Stony Brook under - United States of America Serial No. 11/760379 and to the patents - and/or patent applications resulting from it. - -PATENT MARKING NOTICE: - - This software is covered by US Patent No. 8,185,551. - This software is covered by US Patent No. 8,489,638. - -PATENT RIGHTS GRANT: - - "THIS IMPLEMENTATION" means the copyrightable works distributed by - Tokutek as part of the Fractal Tree project. - - "PATENT CLAIMS" means the claims of patents that are owned or - licensable by Tokutek, both currently or in the future; and that in - the absence of this license would be infringed by THIS - IMPLEMENTATION or by using or running THIS IMPLEMENTATION. - - "PATENT CHALLENGE" shall mean a challenge to the validity, - patentability, enforceability and/or non-infringement of any of the - PATENT CLAIMS or otherwise opposing any of the PATENT CLAIMS. - - Tokutek hereby grants to you, for the term and geographical scope of - the PATENT CLAIMS, a non-exclusive, no-charge, royalty-free, - irrevocable (except as stated in this section) patent license to - make, have made, use, offer to sell, sell, import, transfer, and - otherwise run, modify, and propagate the contents of THIS - IMPLEMENTATION, where such license applies only to the PATENT - CLAIMS. This grant does not include claims that would be infringed - only as a consequence of further modifications of THIS - IMPLEMENTATION. If you or your agent or licensee institute or order - or agree to the institution of patent litigation against any entity - (including a cross-claim or counterclaim in a lawsuit) alleging that - THIS IMPLEMENTATION constitutes direct or contributory patent - infringement, or inducement of patent infringement, then any rights - granted to you under this License shall terminate as of the date - such litigation is filed. If you or your agent or exclusive - licensee institute or order or agree to the institution of a PATENT - CHALLENGE, then Tokutek may terminate any rights granted to you - under this License. -*/ - -#ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved." -#ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it." - -#include "ybt.h" -#include "fttypes.h" - -int toku_keycompare (bytevec key1, ITEMLEN key1len, bytevec key2, ITEMLEN key2len); - -void toku_test_keycompare (void) ; - -int toku_builtin_compare_fun (DB *, const DBT *, const DBT*) __attribute__((__visibility__("default"))); - -#endif diff --git a/storage/tokudb/ft-index/ft/le-cursor.cc b/storage/tokudb/ft-index/ft/le-cursor.cc index b08fc62632c..f840c021fd2 100644 --- a/storage/tokudb/ft-index/ft/le-cursor.cc +++ b/storage/tokudb/ft-index/ft/le-cursor.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -89,9 +89,10 @@ PATENT RIGHTS GRANT: #ident "Copyright (c) 2010-2013 Tokutek Inc. All rights reserved." #ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it." -#include "ft.h" -#include "ft-internal.h" -#include "le-cursor.h" +#include "ft/ft.h" +#include "ft/ft-internal.h" +#include "ft/le-cursor.h" +#include "ft/cursor.h" // A LE_CURSOR is a special purpose FT_CURSOR that: // - enables prefetching @@ -100,10 +101,6 @@ PATENT RIGHTS GRANT: // A LE_CURSOR is good for scanning a FT from beginning to end. Useful for hot indexing. struct le_cursor { - // TODO: remove DBs from the ft layer comparison function - // so this is never necessary - // use a fake db for comparisons. - struct __toku_db fake_db; FT_CURSOR ft_cursor; bool neg_infinity; // true when the le cursor is positioned at -infinity (initial setting) bool pos_infinity; // true when the le cursor is positioned at +infinity (when _next returns DB_NOTFOUND) @@ -123,8 +120,6 @@ toku_le_cursor_create(LE_CURSOR *le_cursor_result, FT_HANDLE ft_handle, TOKUTXN toku_ft_cursor_set_leaf_mode(le_cursor->ft_cursor); le_cursor->neg_infinity = false; le_cursor->pos_infinity = true; - // zero out the fake DB. this is a rare operation so it's not too slow. - memset(&le_cursor->fake_db, 0, sizeof(le_cursor->fake_db)); } } @@ -169,13 +164,9 @@ toku_le_cursor_is_key_greater_or_equal(LE_CURSOR le_cursor, const DBT *key) { } else if (le_cursor->pos_infinity) { result = false; // all keys are less than +infinity } else { - // get the comparison function and descriptor from the cursor's ft - FT_HANDLE ft_handle = le_cursor->ft_cursor->ft_handle; - ft_compare_func keycompare = toku_ft_get_bt_compare(ft_handle); - le_cursor->fake_db.cmp_descriptor = toku_ft_get_cmp_descriptor(ft_handle); + FT ft = le_cursor->ft_cursor->ft_handle->ft; // get the current position from the cursor and compare it to the given key. - DBT *cursor_key = &le_cursor->ft_cursor->key; - int r = keycompare(&le_cursor->fake_db, cursor_key, key); + int r = ft->cmp(&le_cursor->ft_cursor->key, key); if (r <= 0) { result = true; // key is right of the cursor key } else { diff --git a/storage/tokudb/ft-index/ft/le-cursor.h b/storage/tokudb/ft-index/ft/le-cursor.h index d443666492c..2fc5e09bc2c 100644 --- a/storage/tokudb/ft-index/ft/le-cursor.h +++ b/storage/tokudb/ft-index/ft/le-cursor.h @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -86,13 +86,12 @@ PATENT RIGHTS GRANT: under this License. */ +#pragma once + #ident "Copyright (c) 2010-2013 Tokutek Inc. All rights reserved." #ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it." -#ifndef LE_CURSOR_H -#define LE_CURSOR_H - -#include "ft-ops.h" +#include "ft/ft-internal.h" // A leaf entry cursor (LE_CURSOR) is a special type of FT_CURSOR that visits all of the leaf entries in a tree // and returns the leaf entry to the caller. It maintains a copy of the key that it was last positioned over to @@ -127,5 +126,3 @@ bool toku_le_cursor_is_key_greater_or_equal(LE_CURSOR le_cursor, const DBT *key) // extracts position of le_cursor into estimate. Responsibility of caller to handle // thread safety. Caller (the indexer), does so by ensuring indexer lock is held void toku_le_cursor_update_estimate(LE_CURSOR le_cursor, DBT* estimate); - -#endif diff --git a/storage/tokudb/ft-index/ft/leafentry.cc b/storage/tokudb/ft-index/ft/leafentry.cc index bcd3cf01b0c..075f29fa191 100644 --- a/storage/tokudb/ft-index/ft/leafentry.cc +++ b/storage/tokudb/ft-index/ft/leafentry.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -89,7 +89,7 @@ PATENT RIGHTS GRANT: #ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved." #ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it." -#include "wbuf.h" +#include "serialize/wbuf.h" #include "leafentry.h" void wbuf_nocrc_LEAFENTRY(struct wbuf *w, LEAFENTRY le) { diff --git a/storage/tokudb/ft-index/ft/leafentry.h b/storage/tokudb/ft-index/ft/leafentry.h index 5c525db5c19..eddd49481fb 100644 --- a/storage/tokudb/ft-index/ft/leafentry.h +++ b/storage/tokudb/ft-index/ft/leafentry.h @@ -1,9 +1,6 @@ /* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */ // vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4: -#ifndef TOKU_LEAFENTRY_H -#define TOKU_LEAFENTRY_H - #ident "$Id$" /* COPYING CONDITIONS NOTICE: @@ -33,7 +30,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -90,6 +87,8 @@ PATENT RIGHTS GRANT: under this License. */ +#pragma once + #ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved." #ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it." @@ -98,8 +97,9 @@ PATENT RIGHTS GRANT: #include <util/mempool.h> #include <util/omt.h> -#include "txn_manager.h" -#include "rbuf.h" +#include "ft/txn/txn_manager.h" +#include "ft/serialize/rbuf.h" +#include "ft/msg.h" /* Memory format of packed leaf entry @@ -211,6 +211,7 @@ void wbuf_nocrc_LEAFENTRY(struct wbuf *w, LEAFENTRY le); int print_klpair (FILE *outf, const void* key, uint32_t keylen, LEAFENTRY v); // Print a leafentry out in human-readable form. int le_latest_is_del(LEAFENTRY le); // Return true if it is a provisional delete. +int le_val_is_del(LEAFENTRY le, bool is_snapshot_read, TOKUTXN txn); // Returns true if the value that is to be read is empty bool le_is_clean(LEAFENTRY le); //Return how many xids exist (0 does not count) bool le_has_xids(LEAFENTRY le, XIDS xids); // Return true transaction represented by xids is still provisional in this leafentry (le's xid stack is a superset or equal to xids) void* le_latest_val (LEAFENTRY le); // Return the latest val (return NULL for provisional deletes) @@ -227,10 +228,13 @@ uint64_t le_outermost_uncommitted_xid (LEAFENTRY le); // r|r!=0&&r!=TOKUDB_ACCEPT: Quit early, return r, because something unexpected went wrong (error case) typedef int(*LE_ITERATE_CALLBACK)(TXNID id, TOKUTXN context); -int le_iterate_is_del(LEAFENTRY le, LE_ITERATE_CALLBACK f, bool *is_empty, TOKUTXN context); - int le_iterate_val(LEAFENTRY le, LE_ITERATE_CALLBACK f, void** valpp, uint32_t *vallenp, TOKUTXN context); +void le_extract_val(LEAFENTRY le, + // should we return the entire leafentry as the val? + bool is_leaf_mode, bool is_snapshot_read, + TOKUTXN ttxn, uint32_t *vallen, void **val); + size_t leafentry_disksize_13(LEAFENTRY_13 le); @@ -241,11 +245,14 @@ toku_le_upgrade_13_14(LEAFENTRY_13 old_leafentry, // NULL if there was no stored size_t *new_leafentry_memorysize, LEAFENTRY *new_leafentry_p); +class bn_data; + void -toku_le_apply_msg(FT_MSG msg, +toku_le_apply_msg(const ft_msg &msg, LEAFENTRY old_leafentry, // NULL if there was no stored data. bn_data* data_buffer, // bn_data storing leafentry, if NULL, means there is no bn_data uint32_t idx, // index in data_buffer where leafentry is stored (and should be replaced + uint32_t old_keylen, txn_gc_info *gc_info, LEAFENTRY *new_leafentry_p, int64_t * numbytes_delta_p); @@ -261,6 +268,3 @@ toku_le_garbage_collect(LEAFENTRY old_leaf_entry, txn_gc_info *gc_info, LEAFENTRY *new_leaf_entry, int64_t * numbytes_delta_p); - -#endif /* TOKU_LEAFENTRY_H */ - diff --git a/storage/tokudb/ft-index/ft/ftloader-callback.cc b/storage/tokudb/ft-index/ft/loader/callbacks.cc index 3472d294551..40069c144f6 100644 --- a/storage/tokudb/ft-index/ft/ftloader-callback.cc +++ b/storage/tokudb/ft-index/ft/loader/callbacks.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -95,8 +95,8 @@ PATENT RIGHTS GRANT: #include <errno.h> #include <string.h> -#include "ftloader-internal.h" -#include "ybt.h" +#include "loader/loader-internal.h" +#include "util/dbt.h" static void error_callback_lock(ft_loader_error_callback loader_error) { toku_mutex_lock(&loader_error->mutex); diff --git a/storage/tokudb/ft-index/ft/dbufio.cc b/storage/tokudb/ft-index/ft/loader/dbufio.cc index 69b3bd8e936..c3f72e14ab1 100644 --- a/storage/tokudb/ft-index/ft/dbufio.cc +++ b/storage/tokudb/ft-index/ft/loader/dbufio.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -89,16 +89,17 @@ PATENT RIGHTS GRANT: #ident "Copyright (c) 2010-2013 Tokutek Inc. All rights reserved." #ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it." -#include "dbufio.h" -#include "fttypes.h" -#include <toku_assert.h> #include <errno.h> -#include <unistd.h> -#include "memory.h" #include <string.h> -#include "ftloader-internal.h" -#include "ft-internal.h" -#include "ft.h" +#include <unistd.h> + +#include "portability/toku_assert.h" +#include "portability/memory.h" + +#include "ft/ft-internal.h" +#include "ft/serialize/ft_node-serialize.h" +#include "loader/dbufio.h" +#include "loader/loader-internal.h" struct dbufio_file { // i/o thread owns these diff --git a/storage/tokudb/ft-index/ft/dbufio.h b/storage/tokudb/ft-index/ft/loader/dbufio.h index 0762bf9a8c6..da31f22277d 100644 --- a/storage/tokudb/ft-index/ft/dbufio.h +++ b/storage/tokudb/ft-index/ft/loader/dbufio.h @@ -1,7 +1,5 @@ /* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */ // vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4: -#ifndef TOKU_DBUFIO_H -#define TOKU_DBUFIO_H #ident "$Id$" /* COPYING CONDITIONS NOTICE: @@ -31,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -88,6 +86,8 @@ PATENT RIGHTS GRANT: under this License. */ +#pragma once + #ident "Copyright (c) 2010-2013 Tokutek Inc. All rights reserved." #include <toku_portability.h> @@ -108,5 +108,3 @@ int dbufio_fileset_read (DBUFIO_FILESET bfs, int filenum, void *buf_v, size_t co int panic_dbufio_fileset(DBUFIO_FILESET, int error); void dbufio_print(DBUFIO_FILESET); - -#endif diff --git a/storage/tokudb/ft-index/ft/ftloader-internal.h b/storage/tokudb/ft-index/ft/loader/loader-internal.h index be1ded59890..ea1b9c5afa3 100644 --- a/storage/tokudb/ft-index/ft/ftloader-internal.h +++ b/storage/tokudb/ft-index/ft/loader/loader-internal.h @@ -1,7 +1,5 @@ /* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */ // vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4: -#ifndef FTLOADER_INTERNAL_H -#define FTLOADER_INTERNAL_H #ident "$Id$" /* COPYING CONDITIONS NOTICE: @@ -31,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -88,27 +86,30 @@ PATENT RIGHTS GRANT: under this License. */ +#pragma once + #ident "Copyright (c) 2010-2013 Tokutek Inc. All rights reserved." #include <db.h> -#include "fttypes.h" -#include "ftloader.h" -#include "queue.h" -#include <toku_pthread.h> -#include "dbufio.h" - -enum { EXTRACTOR_QUEUE_DEPTH = 2, - FILE_BUFFER_SIZE = 1<<24, - MIN_ROWSET_MEMORY = 1<<23, - MIN_MERGE_FANIN = 2, - FRACTAL_WRITER_QUEUE_DEPTH = 3, - FRACTAL_WRITER_ROWSETS = FRACTAL_WRITER_QUEUE_DEPTH + 2, - DBUFIO_DEPTH = 2, - TARGET_MERGE_BUF_SIZE = 1<<24, // we'd like the merge buffer to be this big. - MIN_MERGE_BUF_SIZE = 1<<20, // always use at least this much - MAX_UNCOMPRESSED_BUF = MIN_MERGE_BUF_SIZE -}; +#include "portability/toku_pthread.h" + +#include "loader/dbufio.h" +#include "loader/loader.h" +#include "util/queue.h" + +enum { + EXTRACTOR_QUEUE_DEPTH = 2, + FILE_BUFFER_SIZE = 1<<24, + MIN_ROWSET_MEMORY = 1<<23, + MIN_MERGE_FANIN = 2, + FRACTAL_WRITER_QUEUE_DEPTH = 3, + FRACTAL_WRITER_ROWSETS = FRACTAL_WRITER_QUEUE_DEPTH + 2, + DBUFIO_DEPTH = 2, + TARGET_MERGE_BUF_SIZE = 1<<24, // we'd like the merge buffer to be this big. + MIN_MERGE_BUF_SIZE = 1<<20, // always use at least this much + MAX_UNCOMPRESSED_BUF = MIN_MERGE_BUF_SIZE +}; /* These functions are exported to allow the tests to compile. */ @@ -245,6 +246,7 @@ struct ft_loader_s { CACHETABLE cachetable; bool did_reserve_memory; bool compress_intermediates; + bool allow_puts; uint64_t reserved_memory; // how much memory are we allowed to use? /* To make it easier to recover from errors, we don't use FILE*, instead we use an index into the file_infos. */ @@ -346,7 +348,8 @@ int toku_ft_loader_internal_init (/* out */ FTLOADER *blp, TOKUTXN txn, bool reserve_memory, uint64_t reserve_memory_size, - bool compress_intermediates); + bool compress_intermediates, + bool allow_puts); void toku_ft_loader_internal_destroy (FTLOADER bl, bool is_error); @@ -360,5 +363,3 @@ int toku_ft_loader_get_error(FTLOADER bl, int *loader_errno); void ft_loader_lock_init(FTLOADER bl); void ft_loader_lock_destroy(FTLOADER bl); void ft_loader_set_fractal_workers_count_from_c(FTLOADER bl); - -#endif // FTLOADER_INTERNAL_H diff --git a/storage/tokudb/ft-index/ft/ftloader.cc b/storage/tokudb/ft-index/ft/loader/loader.cc index 2df6d0a1cda..a6f41cd6b54 100644 --- a/storage/tokudb/ft-index/ft/ftloader.cc +++ b/storage/tokudb/ft-index/ft/loader/loader.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -100,17 +100,20 @@ PATENT RIGHTS GRANT: #include <string.h> #include <fcntl.h> -#include <util/x1764.h> - -#include "ftloader-internal.h" -#include "ft-internal.h" -#include "sub_block.h" -#include "sub_block_map.h" -#include "pqueue.h" -#include "dbufio.h" -#include "leafentry.h" -#include "log-internal.h" -#include "ft.h" +#include "ft/ft.h" +#include "ft/ft-internal.h" +#include "ft/leafentry.h" +#include "ft/loader/loader-internal.h" +#include "ft/loader/pqueue.h" +#include "ft/loader/dbufio.h" +#include "ft/logger/log-internal.h" +#include "ft/node.h" +#include "ft/serialize/block_table.h" +#include "ft/serialize/ft-serialize.h" +#include "ft/serialize/ft_node-serialize.h" +#include "ft/serialize/sub_block.h" + +#include "util/x1764.h" static size_t (*os_fwrite_fun)(const void *,size_t,size_t,FILE*)=NULL; void ft_loader_set_os_fwrite (size_t (*fwrite_fun)(const void*,size_t,size_t,FILE*)) { @@ -356,6 +359,8 @@ int ft_loader_open_temp_file (FTLOADER bl, FIDX *file_idx) */ { int result = 0; + if (result) // debug hack + return result; FILE *f = NULL; int fd = -1; char *fname = toku_strdup(bl->temp_file_template); @@ -420,6 +425,10 @@ void toku_ft_loader_internal_destroy (FTLOADER bl, bool is_error) { } destroy_rowset(&bl->primary_rowset); + if (bl->primary_rowset_queue) { + toku_queue_destroy(bl->primary_rowset_queue); + bl->primary_rowset_queue = nullptr; + } for (int i=0; i<bl->N; i++) { if ( bl->fractal_queues ) { @@ -543,7 +552,8 @@ int toku_ft_loader_internal_init (/* out */ FTLOADER *blp, TOKUTXN txn, bool reserve_memory, uint64_t reserve_memory_size, - bool compress_intermediates) + bool compress_intermediates, + bool allow_puts) // Effect: Allocate and initialize a FTLOADER, but do not create the extractor thread. { FTLOADER CALLOC(bl); // initialized to all zeros (hence CALLOC) @@ -560,10 +570,7 @@ int toku_ft_loader_internal_init (/* out */ FTLOADER *blp, bl->reserved_memory = 512*1024*1024; // if no cache table use 512MB. } bl->compress_intermediates = compress_intermediates; - if (0) { // debug - fprintf(stderr, "%s Reserved memory=%" PRId64 "\n", __FUNCTION__, bl->reserved_memory); - } - + bl->allow_puts = allow_puts; bl->src_db = src_db; bl->N = N; bl->load_lsn = load_lsn; @@ -625,10 +632,9 @@ int toku_ft_loader_internal_init (/* out */ FTLOADER *blp, int r = init_rowset(&bl->primary_rowset, memory_per_rowset_during_extract(bl)); if (r!=0) { toku_ft_loader_internal_destroy(bl, true); return r; } } - { int r = queue_create(&bl->primary_rowset_queue, EXTRACTOR_QUEUE_DEPTH); + { int r = toku_queue_create(&bl->primary_rowset_queue, EXTRACTOR_QUEUE_DEPTH); if (r!=0) { toku_ft_loader_internal_destroy(bl, true); return r; } } - //printf("%s:%d toku_pthread_create\n", __FILE__, __LINE__); { ft_loader_lock_init(bl); } @@ -638,7 +644,7 @@ int toku_ft_loader_internal_init (/* out */ FTLOADER *blp, return 0; } -int toku_ft_loader_open (/* out */ FTLOADER *blp, +int toku_ft_loader_open (FTLOADER *blp, /* out */ CACHETABLE cachetable, generate_row_for_put_func g, DB *src_db, @@ -650,34 +656,38 @@ int toku_ft_loader_open (/* out */ FTLOADER *blp, TOKUTXN txn, bool reserve_memory, uint64_t reserve_memory_size, - bool compress_intermediates) -/* Effect: called by DB_ENV->create_loader to create an ft loader. - * Arguments: - * blp Return the ft loader here. - * g The function for generating a row - * src_db The source database. Needed by g. May be NULL if that's ok with g. - * N The number of dbs to create. - * dbs An array of open databases. Used by g. The data will be put in these database. - * new_fnames The file names (these strings are owned by the caller: we make a copy for our own purposes). - * temp_file_template A template suitable for mkstemp() - * Return value: 0 on success, an error number otherwise. - */ -{ + bool compress_intermediates, + bool allow_puts) { +// Effect: called by DB_ENV->create_loader to create an ft loader. +// Arguments: +// blp Return a ft loader ("bulk loader") here. +// g The function for generating a row +// src_db The source database. Needed by g. May be NULL if that's ok with g. +// N The number of dbs to create. +// dbs An array of open databases. Used by g. The data will be put in these database. +// new_fnames The file names (these strings are owned by the caller: we make a copy for our own purposes). +// temp_file_template A template suitable for mkstemp() +// reserve_memory Cause the loader to reserve memory for its use from the cache table. +// compress_intermediates Cause the loader to compress intermediate loader files. +// allow_puts Prepare the loader for rows to insert. When puts are disabled, the loader does not run the +// extractor or the fractal tree writer threads. +// Return value: 0 on success, an error number otherwise. int result = 0; { int r = toku_ft_loader_internal_init(blp, cachetable, g, src_db, - N, fts, dbs, - new_fnames_in_env, - bt_compare_functions, - temp_file_template, - load_lsn, - txn, - reserve_memory, - reserve_memory_size, - compress_intermediates); + N, fts, dbs, + new_fnames_in_env, + bt_compare_functions, + temp_file_template, + load_lsn, + txn, + reserve_memory, + reserve_memory_size, + compress_intermediates, + allow_puts); if (r!=0) result = r; } - if (result==0) { + if (result==0 && allow_puts) { FTLOADER bl = *blp; int r = toku_pthread_create(&bl->extractor_thread, NULL, extractor_thread, (void*)bl); if (r==0) { @@ -1131,7 +1141,7 @@ static void* extractor_thread (void *blv) { while (1) { void *item; { - int rq = queue_deq(bl->primary_rowset_queue, &item, NULL, NULL); + int rq = toku_queue_deq(bl->primary_rowset_queue, &item, NULL, NULL); if (rq==EOF) break; invariant(rq==0); // other errors are arbitrarily bad. } @@ -1162,7 +1172,7 @@ static void enqueue_for_extraction (FTLOADER bl) { struct rowset *XMALLOC(enqueue_me); *enqueue_me = bl->primary_rowset; zero_rowset(&bl->primary_rowset); - int r = queue_enq(bl->primary_rowset_queue, (void*)enqueue_me, 1, NULL); + int r = toku_queue_enq(bl->primary_rowset_queue, (void*)enqueue_me, 1, NULL); resource_assert_zero(r); } @@ -1199,7 +1209,7 @@ finish_extractor (FTLOADER bl) { } //printf("%s:%d please finish extraction\n", __FILE__, __LINE__); { - int r = queue_eof(bl->primary_rowset_queue); + int r = toku_queue_eof(bl->primary_rowset_queue); invariant(r==0); } //printf("%s:%d joining\n", __FILE__, __LINE__); @@ -1211,8 +1221,9 @@ finish_extractor (FTLOADER bl) { bl->extractor_live = false; } { - int r = queue_destroy(bl->primary_rowset_queue); + int r = toku_queue_destroy(bl->primary_rowset_queue); invariant(r==0); + bl->primary_rowset_queue = nullptr; } rval = ft_loader_fi_close_all(&bl->file_infos); @@ -1374,10 +1385,9 @@ int toku_ft_loader_put (FTLOADER bl, DBT *key, DBT *val) * Return value: 0 on success, an error number otherwise. */ { - if (ft_loader_get_error(&bl->error_callback)) + if (!bl->allow_puts || ft_loader_get_error(&bl->error_callback)) return EINVAL; // previous panic bl->n_rows++; -// return loader_write_row(key, val, bl->fprimary_rows, &bl->fprimary_offset, bl); return loader_do_put(bl, key, val); } @@ -1875,7 +1885,7 @@ int toku_merge_some_files_using_dbufio (const bool to_q, FIDX dest_data, QUEUE q if (to_q) { if (row_wont_fit(output_rowset, keys[mini].size + vals[mini].size)) { { - int r = queue_enq(q, (void*)output_rowset, 1, NULL); + int r = toku_queue_enq(q, (void*)output_rowset, 1, NULL); if (r!=0) { result = r; break; @@ -1951,7 +1961,7 @@ int toku_merge_some_files_using_dbufio (const bool to_q, FIDX dest_data, QUEUE q } if (result==0 && to_q) { - int r = queue_enq(q, (void*)output_rowset, 1, NULL); + int r = toku_queue_enq(q, (void*)output_rowset, 1, NULL); if (r!=0) result = r; else @@ -2142,7 +2152,7 @@ int merge_files (struct merge_fileset *fs, if (result) ft_loader_set_panic(bl, result, true, which_db, nullptr, nullptr); { - int r = queue_eof(output_q); + int r = toku_queue_eof(output_q); if (r!=0 && result==0) result = r; } // It's conceivable that the progress_allocation could be nonzero (for example if bl->N==0) @@ -2212,16 +2222,16 @@ struct dbout { int64_t n_translations_limit; struct translation *translation; toku_mutex_t mutex; - FT h; + FT ft; }; -static inline void dbout_init(struct dbout *out, FT h) { +static inline void dbout_init(struct dbout *out, FT ft) { out->fd = -1; out->current_off = 0; out->n_translations = out->n_translations_limit = 0; out->translation = NULL; toku_mutex_init(&out->mutex, NULL); - out->h = h; + out->ft = ft; } static inline void dbout_destroy(struct dbout *out) { @@ -2338,12 +2348,12 @@ static struct leaf_buf *start_leaf (struct dbout *out, const DESCRIPTOR UU(desc) lbuf->nkeys = lbuf->ndata = lbuf->dsize = 0; lbuf->off = 0; - lbuf->xids = xids_get_root_xids(); + lbuf->xids = toku_xids_get_root_xids(); if (xid != TXNID_NONE) { XIDS new_xids = NULL; - int r = xids_create_child(lbuf->xids, &new_xids, xid); + int r = toku_xids_create_child(lbuf->xids, &new_xids, xid); assert(r == 0 && new_xids); - xids_destroy(&lbuf->xids); + toku_xids_destroy(&lbuf->xids); lbuf->xids = new_xids; } @@ -2364,7 +2374,7 @@ static int write_header (struct dbout *out, long long translation_location_on_di static void drain_writer_q(QUEUE q) { void *item; while (1) { - int r = queue_deq(q, &item, NULL, NULL); + int r = toku_queue_deq(q, &item, NULL, NULL); if (r == EOF) break; invariant(r == 0); @@ -2425,6 +2435,8 @@ static int toku_loader_write_ft_from_q (FTLOADER bl, if (r) { result = r; drain_writer_q(q); + r = toku_os_close(fd); + assert_zero(r); return result; } FILE *pivots_stream = toku_bl_fidx2file(bl, pivots_file); @@ -2492,7 +2504,7 @@ static int toku_loader_write_ft_from_q (FTLOADER bl, while (result == 0) { void *item; { - int rr = queue_deq(q, &item, NULL, NULL); + int rr = toku_queue_deq(q, &item, NULL, NULL); if (rr == EOF) break; if (rr != 0) { ft_loader_set_panic(bl, rr, true, which_db, nullptr, nullptr); @@ -2605,7 +2617,7 @@ static int toku_loader_write_ft_from_q (FTLOADER bl, { invariant(sts.n_subtrees==1); - out.h->h->root_blocknum = make_blocknum(sts.subtrees[0].block); + out.ft->h->root_blocknum = make_blocknum(sts.subtrees[0].block); toku_free(sts.subtrees); sts.subtrees = NULL; // write the descriptor @@ -2714,16 +2726,11 @@ static int loader_do_i (FTLOADER bl, struct rowset *rows = &(bl->rows[which_db]); invariant(rows->data==NULL); // the rows should be all cleaned up already - // a better allocation would be to figure out roughly how many merge passes we'll need. - int allocation_for_merge = (2*progress_allocation)/3; - progress_allocation -= allocation_for_merge; - - int r; - r = queue_create(&bl->fractal_queues[which_db], FRACTAL_WRITER_QUEUE_DEPTH); + int r = toku_queue_create(&bl->fractal_queues[which_db], FRACTAL_WRITER_QUEUE_DEPTH); if (r) goto error; { - mode_t mode = S_IRWXU|S_IRWXG|S_IRWXO; + mode_t mode = S_IRUSR+S_IWUSR + S_IRGRP+S_IWGRP; int fd = toku_os_open(new_fname, O_RDWR| O_CREAT | O_BINARY, mode); // #2621 if (fd < 0) { r = get_error_errno(); goto error; @@ -2740,49 +2747,62 @@ static int loader_do_i (FTLOADER bl, r = dest_db->get_fanout(dest_db, &target_fanout); invariant_zero(r); - // This structure must stay live until the join below. - struct fractal_thread_args fta = { bl, - descriptor, - fd, - progress_allocation, - bl->fractal_queues[which_db], - bl->extracted_datasizes[which_db], - 0, - which_db, - target_nodesize, - target_basementnodesize, - target_compression_method, - target_fanout - }; - - r = toku_pthread_create(bl->fractal_threads+which_db, NULL, fractal_thread, (void*)&fta); - if (r) { - int r2 __attribute__((__unused__)) = queue_destroy(bl->fractal_queues[which_db]); - // ignore r2, since we already have an error - goto error; - } - invariant(bl->fractal_threads_live[which_db]==false); - bl->fractal_threads_live[which_db] = true; + if (bl->allow_puts) { + // a better allocation would be to figure out roughly how many merge passes we'll need. + int allocation_for_merge = (2*progress_allocation)/3; + progress_allocation -= allocation_for_merge; + + // This structure must stay live until the join below. + struct fractal_thread_args fta = { + bl, + descriptor, + fd, + progress_allocation, + bl->fractal_queues[which_db], + bl->extracted_datasizes[which_db], + 0, + which_db, + target_nodesize, + target_basementnodesize, + target_compression_method, + target_fanout + }; + + r = toku_pthread_create(bl->fractal_threads+which_db, NULL, fractal_thread, (void*)&fta); + if (r) { + int r2 __attribute__((__unused__)) = toku_queue_destroy(bl->fractal_queues[which_db]); + // ignore r2, since we already have an error + bl->fractal_queues[which_db] = nullptr; + goto error; + } + invariant(bl->fractal_threads_live[which_db]==false); + bl->fractal_threads_live[which_db] = true; - r = merge_files(fs, bl, which_db, dest_db, compare, allocation_for_merge, bl->fractal_queues[which_db]); + r = merge_files(fs, bl, which_db, dest_db, compare, allocation_for_merge, bl->fractal_queues[which_db]); - { - void *toku_pthread_retval; - int r2 = toku_pthread_join(bl->fractal_threads[which_db], &toku_pthread_retval); - invariant(fta.bl==bl); // this is a gratuitous assertion to make sure that the fta struct is still live here. A previous bug but that struct into a C block statement. - resource_assert_zero(r2); - invariant(toku_pthread_retval==NULL); - invariant(bl->fractal_threads_live[which_db]); - bl->fractal_threads_live[which_db] = false; - if (r == 0) r = fta.errno_result; + { + void *toku_pthread_retval; + int r2 = toku_pthread_join(bl->fractal_threads[which_db], &toku_pthread_retval); + invariant(fta.bl==bl); // this is a gratuitous assertion to make sure that the fta struct is still live here. A previous bug put that struct into a C block statement. + resource_assert_zero(r2); + invariant(toku_pthread_retval==NULL); + invariant(bl->fractal_threads_live[which_db]); + bl->fractal_threads_live[which_db] = false; + if (r == 0) r = fta.errno_result; + } + } else { + toku_queue_eof(bl->fractal_queues[which_db]); + r = toku_loader_write_ft_from_q(bl, descriptor, fd, progress_allocation, + bl->fractal_queues[which_db], bl->extracted_datasizes[which_db], which_db, + target_nodesize, target_basementnodesize, target_compression_method, target_fanout); } } error: // this is the cleanup code. Even if r==0 (no error) we fall through to here. - { - int r2 = queue_destroy(bl->fractal_queues[which_db]); + if (bl->fractal_queues[which_db]) { + int r2 = toku_queue_destroy(bl->fractal_queues[which_db]); invariant(r2==0); - bl->fractal_queues[which_db]=NULL; + bl->fractal_queues[which_db] = nullptr; } // if we get here we need to free up the merge_fileset and the rowset, as well as the keys @@ -2851,6 +2871,10 @@ int toku_ft_loader_close (FTLOADER bl, if (r) result = r; invariant(!bl->extractor_live); + } else { + r = finish_primary_rows(bl); + if (r) + result = r; } // check for an error during extraction @@ -2918,16 +2942,12 @@ static void add_pair_to_leafnode (struct leaf_buf *lbuf, unsigned char *key, int // #3588 TODO can do the rebalancing here and avoid a lot of work later FTNODE leafnode = lbuf->node; uint32_t idx = BLB_DATA(leafnode, 0)->num_klpairs(); - DBT thekey = { .data = key, .size = (uint32_t) keylen }; - DBT theval = { .data = val, .size = (uint32_t) vallen }; - FT_MSG_S msg = { .type = FT_INSERT, - .msn = ZERO_MSN, - .xids = lbuf->xids, - .u = { .id = { &thekey, &theval } } }; - uint64_t workdone=0; + DBT kdbt, vdbt; + ft_msg msg(toku_fill_dbt(&kdbt, key, keylen), toku_fill_dbt(&vdbt, val, vallen), FT_INSERT, ZERO_MSN, lbuf->xids); + uint64_t workdone = 0; // there's no mvcc garbage in a bulk-loaded FT, so there's no need to pass useful gc info txn_gc_info gc_info(nullptr, TXNID_NONE, TXNID_NONE, true); - toku_ft_bn_apply_msg_once(BLB(leafnode,0), &msg, idx, NULL, &gc_info, &workdone, stats_to_update); + toku_ft_bn_apply_msg_once(BLB(leafnode,0), msg, idx, keylen, NULL, &gc_info, &workdone, stats_to_update); } static int write_literal(struct dbout *out, void*data, size_t len) { @@ -2967,7 +2987,7 @@ static void finish_leafnode (struct dbout *out, struct leaf_buf *lbuf, int progr toku_free(serialized_leaf); } toku_ftnode_free(&lbuf->node); - xids_destroy(&lbuf->xids); + toku_xids_destroy(&lbuf->xids); toku_free(lbuf); //printf("Nodewrite %d (%.1f%%):", progress_allocation, 100.0*progress_allocation/PROGRESS_MAX); @@ -3015,7 +3035,7 @@ static int write_translation_table (struct dbout *out, long long *off_of_transla static int write_header (struct dbout *out, long long translation_location_on_disk, long long translation_size_on_disk) { int result = 0; - size_t size = toku_serialize_ft_size(out->h->h); + size_t size = toku_serialize_ft_size(out->ft->h); size_t alloced_size = roundup_to_multiple(512, size); struct wbuf wbuf; char *MALLOC_N_ALIGNED(512, alloced_size, buf); @@ -3023,8 +3043,8 @@ write_header (struct dbout *out, long long translation_location_on_disk, long lo result = get_error_errno(); } else { wbuf_init(&wbuf, buf, size); - out->h->h->on_disk_stats = out->h->in_memory_stats; - toku_serialize_ft_to_wbuf(&wbuf, out->h->h, translation_location_on_disk, translation_size_on_disk); + out->ft->h->on_disk_stats = out->ft->in_memory_stats; + toku_serialize_ft_to_wbuf(&wbuf, out->ft->h, translation_location_on_disk, translation_size_on_disk); for (size_t i=size; i<alloced_size; i++) buf[i]=0; // initialize all those unused spots to zero if (wbuf.ndone != size) result = EINVAL; @@ -3146,11 +3166,7 @@ static void write_nonleaf_node (FTLOADER bl, struct dbout *out, int64_t blocknum FTNODE XMALLOC(node); toku_initialize_empty_ftnode(node, make_blocknum(blocknum_of_new_node), height, n_children, FT_LAYOUT_VERSION, 0); - node->totalchildkeylens = 0; - for (int i=0; i<n_children-1; i++) { - toku_clone_dbt(&node->childkeys[i], pivots[i]); - node->totalchildkeylens += pivots[i].size; - } + node->pivotkeys.create_from_dbts(pivots, n_children - 1); assert(node->bp); for (int i=0; i<n_children; i++) { BP_BLOCKNUM(node,i) = make_blocknum(subtree_info[i].block); @@ -3184,14 +3200,14 @@ static void write_nonleaf_node (FTLOADER bl, struct dbout *out, int64_t blocknum for (int i=0; i<n_children-1; i++) { toku_free(pivots[i].data); - toku_free(node->childkeys[i].data); } for (int i=0; i<n_children; i++) { destroy_nonleaf_childinfo(BNC(node,i)); } toku_free(pivots); + // TODO: Should be using toku_destroy_ftnode_internals, which should be renamed to toku_ftnode_destroy toku_free(node->bp); - toku_free(node->childkeys); + node->pivotkeys.destroy(); toku_free(node); toku_free(ndd); toku_free(subtree_info); diff --git a/storage/tokudb/ft-index/ft/ftloader.h b/storage/tokudb/ft-index/ft/loader/loader.h index c3376c90e91..4ef45dea0ac 100644 --- a/storage/tokudb/ft-index/ft/ftloader.h +++ b/storage/tokudb/ft-index/ft/loader/loader.h @@ -1,7 +1,5 @@ /* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */ // vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4: -#ifndef FTLOADER_H -#define FTLOADER_H #ident "$Id$" /* @@ -32,7 +30,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -89,9 +87,16 @@ PATENT RIGHTS GRANT: under this License. */ +#pragma once + #ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved." #ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it." +#include "ft/txn/txn.h" +#include "ft/cachetable/cachetable.h" +#include "ft/comparator.h" +#include "ft/ft-ops.h" + // The loader callbacks are C functions and need to be defined as such typedef void (*ft_loader_error_func)(DB *, int which_db, int err, DBT *key, DBT *val, void *extra); @@ -113,7 +118,8 @@ int toku_ft_loader_open (FTLOADER *bl, TOKUTXN txn, bool reserve_memory, uint64_t reserve_memory_size, - bool compress_intermediates); + bool compress_intermediates, + bool allow_puts); int toku_ft_loader_put (FTLOADER bl, DBT *key, DBT *val); @@ -130,5 +136,3 @@ void toku_ft_loader_set_size_factor (uint32_t factor); void ft_loader_set_os_fwrite (size_t (*fwrite_fun)(const void*,size_t,size_t,FILE*)); size_t ft_loader_leafentry_size(size_t key_size, size_t val_size, TXNID xid); - -#endif // FTLOADER_H diff --git a/storage/tokudb/ft-index/ft/pqueue.cc b/storage/tokudb/ft-index/ft/loader/pqueue.cc index fa76551b81f..c50664f5e45 100644 --- a/storage/tokudb/ft-index/ft/pqueue.cc +++ b/storage/tokudb/ft-index/ft/loader/pqueue.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -92,8 +92,8 @@ PATENT RIGHTS GRANT: #include <toku_portability.h> #include "toku_os.h" #include "ft-internal.h" -#include "ftloader-internal.h" -#include "pqueue.h" +#include "loader/loader-internal.h" +#include "loader/pqueue.h" #define pqueue_left(i) ((i) << 1) #define pqueue_right(i) (((i) << 1) + 1) diff --git a/storage/tokudb/ft-index/ft/pqueue.h b/storage/tokudb/ft-index/ft/loader/pqueue.h index cd550d70572..43df70e97ff 100644 --- a/storage/tokudb/ft-index/ft/pqueue.h +++ b/storage/tokudb/ft-index/ft/loader/pqueue.h @@ -1,7 +1,5 @@ /* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */ // vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4: -#ifndef TOKU_PQUEUE_H -#define TOKU_PQUEUE_H #ident "$Id$" /* @@ -32,7 +30,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -89,6 +87,8 @@ PATENT RIGHTS GRANT: under this License. */ +#pragma once + #ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved." #ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it." @@ -121,6 +121,3 @@ void pqueue_free(pqueue_t *q); size_t pqueue_size(pqueue_t *q); int pqueue_insert(pqueue_t *q, pqueue_node_t *d); int pqueue_pop(pqueue_t *q, pqueue_node_t **d); - - -#endif //TOKU_PQUEUE_H diff --git a/storage/tokudb/ft-index/ft/log-internal.h b/storage/tokudb/ft-index/ft/logger/log-internal.h index be8ab7a53da..5516cab30b6 100644 --- a/storage/tokudb/ft-index/ft/log-internal.h +++ b/storage/tokudb/ft-index/ft/logger/log-internal.h @@ -1,7 +1,5 @@ /* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */ // vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4: -#ifndef LOG_INTERNAL_H -#define LOG_INTERNAL_H #ident "$Id$" /* @@ -32,7 +30,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -89,6 +87,8 @@ PATENT RIGHTS GRANT: under this License. */ +#pragma once + #ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved." #ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it." @@ -96,19 +96,18 @@ PATENT RIGHTS GRANT: #include <sys/types.h> #include <string.h> #include <dirent.h> -#include "ft-internal.h" -#include "log.h" -#include "toku_list.h" -#include "logfilemgr.h" -#include "txn.h" -#include "txn_manager.h" -#include "rollback_log_node_cache.h" -#include "txn_child_manager.h" -#include <portability/toku_pthread.h> +#include "portability/toku_list.h" +#include "portability/toku_pthread.h" +#include "ft/ft-internal.h" +#include "ft/logger/log.h" +#include "ft/logger/logfilemgr.h" +#include "ft/txn/txn.h" +#include "ft/txn/txn_manager.h" +#include "ft/txn/rollback_log_node_cache.h" -#include <util/memarena.h> -#include <util/omt.h> +#include "util/memarena.h" +#include "util/omt.h" using namespace toku; // Locking for the logger @@ -119,6 +118,7 @@ using namespace toku; #define LOGGER_MIN_BUF_SIZE (1<<24) +// TODO: Remove mylock, it has no value struct mylock { toku_mutex_t lock; }; @@ -157,7 +157,7 @@ struct tokulogger { DIR *dir; // descriptor for directory int fd; CACHETABLE ct; - int lg_max; // The size of the single file in the log. Default is 100MB in TokuDB + int lg_max; // The size of the single file in the log. Default is 100MB. // To access these, you must have the input lock LSN lsn; // the next available lsn @@ -181,8 +181,6 @@ struct tokulogger { tokutime_t time_spent_writing_to_disk; // how much tokutime did we spend writing to disk? uint64_t num_wait_buf_long; // how many times we waited >= 100ms for the in buf - void (*remove_finalize_callback) (DICTIONARY_ID, void*); // ydb-level callback to be called when a transaction that ... - void * remove_finalize_callback_extra; // ... deletes a file is committed or when one that creates a file is aborted. CACHEFILE rollback_cachefile; rollback_log_node_cache rollback_cache; TXN_MANAGER txn_manager; @@ -190,99 +188,7 @@ struct tokulogger { int toku_logger_find_next_unused_log_file(const char *directory, long long *result); int toku_logger_find_logfiles (const char *directory, char ***resultp, int *n_logfiles); - -struct txn_roll_info { - // these are number of rollback nodes and rollback entries for this txn. - // - // the current rollback node below has sequence number num_rollback_nodes - 1 - // (because they are numbered 0...num-1). often, the current rollback is - // already set to this block num, which means it exists and is available to - // log some entries. if the current rollback is NONE and the number of - // rollback nodes for this transaction is non-zero, then we will use - // the number of rollback nodes to know which sequence number to assign - // to a new one we create - uint64_t num_rollback_nodes; - uint64_t num_rollentries; - uint64_t num_rollentries_processed; - uint64_t rollentry_raw_count; // the total count of every byte in the transaction and all its children. - - // spilled rollback nodes are rollback nodes that were gorged by this - // transaction, retired, and saved in a list. - - // the spilled rollback head is the block number of the first rollback node - // that makes up the rollback log chain - BLOCKNUM spilled_rollback_head; - // the spilled rollback is the block number of the last rollback node that - // makes up the rollback log chain. - BLOCKNUM spilled_rollback_tail; - // the current rollback node block number we may use. if this is ROLLBACK_NONE, - // then we need to create one and set it here before using it. - BLOCKNUM current_rollback; -}; - -struct tokutxn { - // These don't change after create: - - TXNID_PAIR txnid; - - uint64_t snapshot_txnid64; // this is the lsn of the snapshot - const TXN_SNAPSHOT_TYPE snapshot_type; - const bool for_recovery; - const TOKULOGGER logger; - const TOKUTXN parent; - // The child txn is protected by the child_txn_manager lock - // and by the user contract. The user contract states (and is - // enforced at the ydb layer) that a child txn should not be created - // while another child exists. The txn_child_manager will protect - // other threads from trying to read this value while another - // thread commits/aborts the child - TOKUTXN child; - // statically allocated child manager, if this - // txn is a root txn, this manager will be used and set to - // child_manager for this transaction and all of its children - txn_child_manager child_manager_s; - // child manager for this transaction, all of its children, - // and all of its ancestors - txn_child_manager* child_manager; - // These don't change but they're created in a way that's hard to make - // strictly const. - DB_TXN *container_db_txn; // reference to DB_TXN that contains this tokutxn - xid_omt_t *live_root_txn_list; // the root txns live when the root ancestor (self if a root) started. - XIDS xids; // Represents the xid list - - TOKUTXN snapshot_next; - TOKUTXN snapshot_prev; - - bool begin_was_logged; - bool declared_read_only; // true if the txn was declared read only when began - // These are not read until a commit, prepare, or abort starts, and - // they're "monotonic" (only go false->true) during operation: - bool do_fsync; - bool force_fsync_on_commit; //This transaction NEEDS an fsync once (if) it commits. (commit means root txn) - - // Not used until commit, prepare, or abort starts: - LSN do_fsync_lsn; - TOKU_XA_XID xa_xid; // for prepared transactions - TXN_PROGRESS_POLL_FUNCTION progress_poll_fun; - void *progress_poll_fun_extra; - - toku_mutex_t txn_lock; - // Protected by the txn lock: - omt<FT> open_fts; // a collection of the fts that we touched. Indexed by filenum. - struct txn_roll_info roll_info; // Info used to manage rollback entries - - // mutex that protects the transition of the state variable - // the rest of the variables are used by the txn code and - // hot indexing to ensure that when hot indexing is processing a - // leafentry, a TOKUTXN cannot dissappear or change state out from - // underneath it - toku_mutex_t state_lock; - toku_cond_t state_cond; - TOKUTXN_STATE state; - uint32_t num_pin; // number of threads (all hot indexes) that want this - // txn to not transition to commit or abort - uint64_t client_id; -}; +void toku_logger_free_logfiles (char **logfiles, int n_logfiles); static inline int txn_has_current_rollback_log(TOKUTXN txn) { @@ -371,5 +277,3 @@ static inline char *fixup_fname(BYTESTRING *f) { fname[f->len]=0; return fname; } - -#endif diff --git a/storage/tokudb/ft-index/ft/log.h b/storage/tokudb/ft-index/ft/logger/log.h index 18ba802df6d..180f118765b 100644 --- a/storage/tokudb/ft-index/ft/log.h +++ b/storage/tokudb/ft-index/ft/logger/log.h @@ -1,7 +1,5 @@ /* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */ // vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4: -#ifndef TOKU_LOGGGER_H -#define TOKU_LOGGGER_H #ident "$Id$" /* @@ -32,7 +30,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -89,30 +87,24 @@ PATENT RIGHTS GRANT: under this License. */ +#pragma once + #ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved." #ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it." -#include <toku_portability.h> -#include <errno.h> #include <db.h> +#include <errno.h> -#include "fttypes.h" -#include "memory.h" -#include "logger.h" -#include "rollback.h" -#include "recover.h" -#include "txn.h" +#include "portability/memory.h" +#include "portability/toku_portability.h" + +#include "ft/logger/recover.h" +#include "ft/txn/rollback.h" +#include "ft/txn/txn.h" +#include "util/bytestring.h" struct roll_entry; -static inline int toku_copy_BYTESTRING(BYTESTRING *target, BYTESTRING val) { - target->len = val.len; - target->data = (char *) toku_memdup(val.data, (size_t)val.len); - if (target->data==0) { - return get_error_errno(); - } - return 0; -} static inline void toku_free_TXNID(TXNID txnid __attribute__((__unused__))) {} static inline void toku_free_TXNID_PAIR(TXNID_PAIR txnid __attribute__((__unused__))) {} @@ -129,6 +121,3 @@ static inline void toku_free_FILENUMS(FILENUMS val) { toku_free(val.filenums); } int toku_maybe_upgrade_log (const char *env_dir, const char *log_dir, LSN * lsn_of_clean_shutdown, bool * upgrade_in_progress); uint64_t toku_log_upgrade_get_footprint(void); - - -#endif diff --git a/storage/tokudb/ft-index/ft/log_upgrade.cc b/storage/tokudb/ft-index/ft/logger/log_upgrade.cc index e5a36a88cff..6631759fae0 100644 --- a/storage/tokudb/ft-index/ft/log_upgrade.cc +++ b/storage/tokudb/ft-index/ft/logger/log_upgrade.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -92,8 +92,8 @@ PATENT RIGHTS GRANT: #include <ft/log_header.h> #include "log-internal.h" -#include "logcursor.h" -#include "checkpoint.h" +#include "logger/logcursor.h" +#include "cachetable/checkpoint.h" static uint64_t footprint = 0; // for debug and accountability @@ -209,10 +209,7 @@ cleanup: r = toku_logcursor_destroy(&cursor); assert(r == 0); cleanup_no_logcursor: - for(int i=0;i<n_logfiles;i++) { - toku_free(logfiles[i]); - } - toku_free(logfiles); + toku_logger_free_logfiles(logfiles, n_logfiles); FOOTPRINTCAPTURE; return rval; } @@ -227,10 +224,6 @@ verify_clean_shutdown_of_log_version(const char *log_dir, uint32_t version, LSN if (version < TOKU_LOG_VERSION) { FOOTPRINT(1); r = verify_clean_shutdown_of_log_version_old(log_dir, last_lsn, last_xid, version); - if (r != 0) { - fprintf(stderr, "Cannot upgrade TokuDB version %d database.", version); - fprintf(stderr, " Previous improper shutdown detected.\n"); - } } else { FOOTPRINT(2); @@ -321,10 +314,17 @@ toku_maybe_upgrade_log(const char *env_dir, const char *log_dir, LSN * lsn_of_cl r = 0; //Logs are up to date else { FOOTPRINT(4); - LSN last_lsn; - TXNID last_xid; + LSN last_lsn = ZERO_LSN; + TXNID last_xid = TXNID_NONE; r = verify_clean_shutdown_of_log_version(log_dir, version_of_logs_on_disk, &last_lsn, &last_xid); if (r != 0) { + if (TOKU_LOG_VERSION_25 <= version_of_logs_on_disk && version_of_logs_on_disk <= TOKU_LOG_VERSION_27 + && TOKU_LOG_VERSION_27 == TOKU_LOG_VERSION) { + r = 0; // can do recovery on dirty shutdown + } else { + fprintf(stderr, "Cannot upgrade TokuFT version %d database.", version_of_logs_on_disk); + fprintf(stderr, " Previous improper shutdown detected.\n"); + } goto cleanup; } FOOTPRINT(5); diff --git a/storage/tokudb/ft-index/ft/logcursor.cc b/storage/tokudb/ft-index/ft/logger/logcursor.cc index 384582e000a..dec3c923bc3 100644 --- a/storage/tokudb/ft-index/ft/logcursor.cc +++ b/storage/tokudb/ft-index/ft/logger/logcursor.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -90,7 +90,7 @@ PATENT RIGHTS GRANT: #ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it." #include "log-internal.h" -#include "logcursor.h" +#include "logger/logcursor.h" #include <limits.h> #include <unistd.h> @@ -191,7 +191,7 @@ static int lc_check_lsn(TOKULOGCURSOR lc, int dir) { // int index = lc->cur_logfiles_index; // fprintf(stderr, "Bad LSN: %d %s direction = %d, lsn.lsn = %" PRIu64 ", cur_lsn.lsn=%" PRIu64 "\n", // index, lc->logfiles[index], dir, lsn.lsn, lc->cur_lsn.lsn); - if (tokudb_recovery_trace) + if (tokuft_recovery_trace) printf("DB_RUNRECOVERY: %s:%d r=%d\n", __FUNCTION__, __LINE__, 0); return LC_LSN_ERROR; } @@ -277,11 +277,7 @@ int toku_logcursor_destroy(TOKULOGCURSOR *lc) { (*lc)->entry_valid = false; } r = lc_close_cur_logfile(*lc); - int lf; - for(lf=0;lf<(*lc)->n_logfiles;lf++) { - if ( (*lc)->logfiles[lf] ) toku_free((*lc)->logfiles[lf]); - } - if ( (*lc)->logfiles ) toku_free((*lc)->logfiles); + toku_logger_free_logfiles((*lc)->logfiles, (*lc)->n_logfiles); if ( (*lc)->logdir ) toku_free((*lc)->logdir); if ( (*lc)->buffer ) toku_free((*lc)->buffer); toku_free(*lc); @@ -307,10 +303,10 @@ static int lc_log_read(TOKULOGCURSOR lc) toku_log_free_log_entry_resources(&(lc->entry)); time_t tnow = time(NULL); if (r==DB_BADFORMAT) { - fprintf(stderr, "%.24s Tokudb bad log format in %s\n", ctime(&tnow), lc->logfiles[lc->cur_logfiles_index]); + fprintf(stderr, "%.24s TokuFT bad log format in %s\n", ctime(&tnow), lc->logfiles[lc->cur_logfiles_index]); } else { - fprintf(stderr, "%.24s Tokudb unexpected log format error '%s' in %s\n", ctime(&tnow), strerror(r), lc->logfiles[lc->cur_logfiles_index]); + fprintf(stderr, "%.24s TokuFT unexpected log format error '%s' in %s\n", ctime(&tnow), strerror(r), lc->logfiles[lc->cur_logfiles_index]); } } return r; @@ -339,10 +335,10 @@ static int lc_log_read_backward(TOKULOGCURSOR lc) toku_log_free_log_entry_resources(&(lc->entry)); time_t tnow = time(NULL); if (r==DB_BADFORMAT) { - fprintf(stderr, "%.24s Tokudb bad log format in %s\n", ctime(&tnow), lc->logfiles[lc->cur_logfiles_index]); + fprintf(stderr, "%.24s TokuFT bad log format in %s\n", ctime(&tnow), lc->logfiles[lc->cur_logfiles_index]); } else { - fprintf(stderr, "%.24s Tokudb uUnexpected log format error '%s' in %s\n", ctime(&tnow), strerror(r), lc->logfiles[lc->cur_logfiles_index]); + fprintf(stderr, "%.24s TokuFT uUnexpected log format error '%s' in %s\n", ctime(&tnow), strerror(r), lc->logfiles[lc->cur_logfiles_index]); } } return r; @@ -460,10 +456,10 @@ int toku_logcursor_last(TOKULOGCURSOR lc, struct log_entry **le) { // probably a corrupted last log entry due to a crash // try scanning forward from the beginning to find the last good entry time_t tnow = time(NULL); - fprintf(stderr, "%.24s Tokudb recovery repairing log\n", ctime(&tnow)); + fprintf(stderr, "%.24s TokuFT recovery repairing log\n", ctime(&tnow)); r = lc_fix_bad_logfile(lc); if ( r != 0 ) { - fprintf(stderr, "%.24s Tokudb recovery repair unsuccessful\n", ctime(&tnow)); + fprintf(stderr, "%.24s TokuFT recovery repair unsuccessful\n", ctime(&tnow)); return DB_BADFORMAT; } // try reading again diff --git a/storage/tokudb/ft-index/ft/logcursor.h b/storage/tokudb/ft-index/ft/logger/logcursor.h index f374f6c2874..15774fb11d3 100644 --- a/storage/tokudb/ft-index/ft/logcursor.h +++ b/storage/tokudb/ft-index/ft/logger/logcursor.h @@ -1,7 +1,5 @@ /* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */ // vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4: -#ifndef TOKULOGCURSOR_H -#define TOKULOGCURSOR_H #ident "$Id$" /* @@ -32,7 +30,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -89,12 +87,13 @@ PATENT RIGHTS GRANT: under this License. */ +#pragma once + #ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved." #ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it." #include <ft/log_header.h> - struct toku_logcursor; typedef struct toku_logcursor *TOKULOGCURSOR; @@ -127,6 +126,3 @@ int toku_logcursor_last(const TOKULOGCURSOR lc, struct log_entry **le); int toku_logcursor_log_exists(const TOKULOGCURSOR lc); void toku_logcursor_print(TOKULOGCURSOR lc); - - -#endif // TOKULOGCURSOR_H diff --git a/storage/tokudb/ft-index/ft/logfilemgr.cc b/storage/tokudb/ft-index/ft/logger/logfilemgr.cc index 917760abc6c..04d091ae1bc 100644 --- a/storage/tokudb/ft-index/ft/logfilemgr.cc +++ b/storage/tokudb/ft-index/ft/logger/logfilemgr.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -89,9 +89,9 @@ PATENT RIGHTS GRANT: #ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved." #ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it." -#include "log-internal.h" -#include "logcursor.h" -#include "logfilemgr.h" +#include "logger/log-internal.h" +#include "logger/logcursor.h" +#include "logger/logfilemgr.h" // for now, implement with singlely-linked-list // first = oldest (delete from beginning) @@ -186,10 +186,7 @@ int toku_logfilemgr_init(TOKULOGFILEMGR lfm, const char *log_dir, TXNID *last_xi toku_logfilemgr_add_logfile_info(lfm, lf_info); toku_logcursor_destroy(&cursor); } - for(int i=0;i<n_logfiles;i++) { - toku_free(logfiles[i]); - } - toku_free(logfiles); + toku_logger_free_logfiles(logfiles, n_logfiles); *last_xid_if_clean_shutdown = last_xid; return 0; } diff --git a/storage/tokudb/ft-index/ft/logfilemgr.h b/storage/tokudb/ft-index/ft/logger/logfilemgr.h index de9322604bc..70e59575127 100644 --- a/storage/tokudb/ft-index/ft/logfilemgr.h +++ b/storage/tokudb/ft-index/ft/logger/logfilemgr.h @@ -1,7 +1,5 @@ /* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */ // vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4: -#ifndef TOKULOGFILEMGR_H -#define TOKULOGFILEMGR_H #ident "$Id$" /* @@ -32,7 +30,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -89,12 +87,13 @@ PATENT RIGHTS GRANT: under this License. */ +#pragma once + #ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved." #ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it." #include <ft/log_header.h> - // this is the basic information we need to keep per logfile struct toku_logfile_info { int64_t index; @@ -118,6 +117,3 @@ LSN toku_logfilemgr_get_last_lsn(TOKULOGFILEMGR lfm); void toku_logfilemgr_update_last_lsn(TOKULOGFILEMGR lfm, LSN lsn); void toku_logfilemgr_print(TOKULOGFILEMGR lfm); - - -#endif //TOKULOGFILEMGR_H diff --git a/storage/tokudb/ft-index/ft/logformat.cc b/storage/tokudb/ft-index/ft/logger/logformat.cc index 4d32d9f6eac..698b612c078 100644 --- a/storage/tokudb/ft-index/ft/logformat.cc +++ b/storage/tokudb/ft-index/ft/logger/logformat.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -195,7 +195,7 @@ const struct logtype logtypes[] = { {"BYTESTRING", "iname", 0}, // pathname of file {"uint8_t", "unlink_on_close", 0}, NULLFIELD}, IGNORE_LOG_BEGIN}, - //We do not use a TXNINFO struct since recovery log has + //We do not use a txninfo struct since recovery log has //FILENUMS and TOKUTXN has FTs (for open_fts) {"xstillopen", 's', FA{{"TXNID_PAIR", "xid", 0}, {"TXNID_PAIR", "parentxid", 0}, @@ -798,7 +798,7 @@ generate_rollbacks (void) { fprintf(cf, " }\n assert(0);\n return 0;\n"); fprintf(cf, "}\n"); - fprintf2(cf, hf, "int toku_parse_rollback(unsigned char *buf, uint32_t n_bytes, struct roll_entry **itemp, MEMARENA ma)"); + fprintf2(cf, hf, "int toku_parse_rollback(unsigned char *buf, uint32_t n_bytes, struct roll_entry **itemp, memarena *ma)"); fprintf(hf, ";\n"); fprintf(cf, " {\n assert(n_bytes>0);\n struct roll_entry *item;\n enum rt_cmd cmd = (enum rt_cmd)(buf[0]);\n size_t mem_needed;\n"); fprintf(cf, " struct rbuf rc = {buf, n_bytes, 1};\n"); @@ -806,7 +806,7 @@ generate_rollbacks (void) { DO_ROLLBACKS(lt, { fprintf(cf, " case RT_%s:\n", lt->name); fprintf(cf, " mem_needed = sizeof(item->u.%s) + __builtin_offsetof(struct roll_entry, u.%s);\n", lt->name, lt->name); - fprintf(cf, " CAST_FROM_VOIDP(item, toku_memarena_malloc(ma, mem_needed));\n"); + fprintf(cf, " CAST_FROM_VOIDP(item, ma->malloc_from_arena(mem_needed));\n"); fprintf(cf, " item->cmd = cmd;\n"); DO_FIELDS(field_type, lt, fprintf(cf, " rbuf_ma_%s(&rc, ma, &item->u.%s.%s);\n", field_type->type, lt->name, field_type->name)); fprintf(cf, " *itemp = item;\n"); @@ -849,15 +849,14 @@ int main (int argc, const char *const argv[]) { pf = fopen(printpath, "w"); assert(pf!=0); fprintf2(cf, hf, "/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */\n"); fprintf2(cf, hf, "// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:\n"); - fprintf(hf, "#ifndef LOG_HEADER_H\n"); - fprintf(hf, "#define LOG_HEADER_H\n"); + fprintf(hf, "#pragma once\n"); fprintf2(cf, hf, "/* Do not edit this file. This code generated by logformat.c. Copyright (c) 2007-2013 Tokutek Inc. */\n"); fprintf2(cf, hf, "#ident \"Copyright (c) 2007-2013 Tokutek Inc. All rights reserved.\"\n"); fprintf2(cf, pf, "#include <stdint.h>\n"); fprintf2(cf, pf, "#include <sys/time.h>\n"); - fprintf2(cf, pf, "#include <ft/fttypes.h>\n"); - fprintf2(cf, pf, "#include <ft/log-internal.h>\n"); + fprintf2(cf, pf, "#include <ft/logger/log-internal.h>\n"); fprintf(hf, "#include <ft/ft-internal.h>\n"); + fprintf(hf, "#include <util/bytestring.h>\n"); fprintf(hf, "#include <util/memarena.h>\n"); generate_enum(); generate_log_struct(); @@ -867,7 +866,6 @@ int main (int argc, const char *const argv[]) { generate_rollbacks(); generate_log_entry_functions(); generate_logprint(); - fprintf(hf, "#endif\n"); { int r=fclose(hf); assert(r==0); r=fclose(cf); assert(r==0); diff --git a/storage/tokudb/ft-index/ft/logger.cc b/storage/tokudb/ft-index/ft/logger/logger.cc index e4fd854c637..2296a2b43f8 100644 --- a/storage/tokudb/ft-index/ft/logger.cc +++ b/storage/tokudb/ft-index/ft/logger/logger.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -94,12 +94,13 @@ PATENT RIGHTS GRANT: #include <limits.h> #include <unistd.h> -#include "ft.h" -#include "log-internal.h" -#include "txn_manager.h" -#include "rollback_log_node_cache.h" +#include "ft/serialize/block_table.h" +#include "ft/ft.h" +#include "ft/logger/log-internal.h" +#include "ft/txn/txn_manager.h" +#include "ft/txn/rollback_log_node_cache.h" -#include <util/status.h> +#include "util/status.h" static const int log_format_version=TOKU_LOG_VERSION; @@ -170,7 +171,6 @@ int toku_logger_create (TOKULOGGER *resultp) { result->write_log_files = true; result->trim_log_files = true; result->directory=0; - result->remove_finalize_callback = NULL; // fd is uninitialized on purpose // ct is uninitialized on purpose result->lg_max = 100<<20; // 100MB default @@ -269,32 +269,30 @@ bool toku_logger_rollback_is_open (TOKULOGGER logger) { #define MAX_CACHED_ROLLBACK_NODES 4096 -void -toku_logger_initialize_rollback_cache(TOKULOGGER logger, FT ft) { - toku_free_unused_blocknums(ft->blocktable, ft->h->root_blocknum); +void toku_logger_initialize_rollback_cache(TOKULOGGER logger, FT ft) { + ft->blocktable.free_unused_blocknums(ft->h->root_blocknum); logger->rollback_cache.init(MAX_CACHED_ROLLBACK_NODES); } -int -toku_logger_open_rollback(TOKULOGGER logger, CACHETABLE cachetable, bool create) { +int toku_logger_open_rollback(TOKULOGGER logger, CACHETABLE cachetable, bool create) { assert(logger->is_open); assert(!logger->rollback_cachefile); - FT_HANDLE t = NULL; // Note, there is no DB associated with this FT. - toku_ft_handle_create(&t); - int r = toku_ft_handle_open(t, toku_product_name_strings.rollback_cachefile, create, create, cachetable, NULL_TXN); + FT_HANDLE ft_handle = nullptr; // Note, there is no DB associated with this FT. + toku_ft_handle_create(&ft_handle); + int r = toku_ft_handle_open(ft_handle, toku_product_name_strings.rollback_cachefile, create, create, cachetable, nullptr); if (r == 0) { - logger->rollback_cachefile = t->ft->cf; - toku_logger_initialize_rollback_cache(logger, t->ft); - - //Verify it is empty - //Must have no data blocks (rollback logs or otherwise). - toku_block_verify_no_data_blocks_except_root(t->ft->blocktable, t->ft->h->root_blocknum); - bool is_empty; - is_empty = toku_ft_is_empty_fast(t); + FT ft = ft_handle->ft; + logger->rollback_cachefile = ft->cf; + toku_logger_initialize_rollback_cache(logger, ft_handle->ft); + + // Verify it is empty + // Must have no data blocks (rollback logs or otherwise). + ft->blocktable.verify_no_data_blocks_except_root(ft->h->root_blocknum); + bool is_empty = toku_ft_is_empty_fast(ft_handle); assert(is_empty); } else { - toku_ft_handle_close(t); + toku_ft_handle_close(ft_handle); } return r; } @@ -304,26 +302,30 @@ toku_logger_open_rollback(TOKULOGGER logger, CACHETABLE cachetable, bool create) // so it will always be clean (!h->dirty) when about to be closed. // Rollback log can only be closed when there are no open transactions, // so it will always be empty (no data blocks) when about to be closed. -void toku_logger_close_rollback(TOKULOGGER logger) { +void toku_logger_close_rollback_check_empty(TOKULOGGER logger, bool clean_shutdown) { CACHEFILE cf = logger->rollback_cachefile; // stored in logger at rollback cachefile open if (cf) { FT_HANDLE ft_to_close; { //Find "ft_to_close" logger->rollback_cache.destroy(); FT CAST_FROM_VOIDP(ft, toku_cachefile_get_userdata(cf)); - //Verify it is safe to close it. - assert(!ft->h->dirty); //Must not be dirty. - toku_free_unused_blocknums(ft->blocktable, ft->h->root_blocknum); - //Must have no data blocks (rollback logs or otherwise). - toku_block_verify_no_data_blocks_except_root(ft->blocktable, ft->h->root_blocknum); - assert(!ft->h->dirty); + if (clean_shutdown) { + //Verify it is safe to close it. + assert(!ft->h->dirty); //Must not be dirty. + ft->blocktable.free_unused_blocknums(ft->h->root_blocknum); + // Must have no data blocks (rollback logs or otherwise). + ft->blocktable.verify_no_data_blocks_except_root(ft->h->root_blocknum); + assert(!ft->h->dirty); + } else { + ft->h->dirty = 0; + } ft_to_close = toku_ft_get_only_existing_ft_handle(ft); - { + if (clean_shutdown) { bool is_empty; is_empty = toku_ft_is_empty_fast(ft_to_close); assert(is_empty); + assert(!ft->h->dirty); // it should not have been dirtied by the toku_ft_is_empty test. } - assert(!ft->h->dirty); // it should not have been dirtied by the toku_ft_is_empty test. } toku_ft_handle_close(ft_to_close); @@ -332,6 +334,10 @@ void toku_logger_close_rollback(TOKULOGGER logger) { } } +void toku_logger_close_rollback(TOKULOGGER logger) { + toku_logger_close_rollback_check_empty(logger, true); +} + // No locks held on entry // No locks held on exit. // No locks are needed, since you cannot legally close the log concurrently with doing anything else. @@ -415,7 +421,7 @@ wait_till_output_available (TOKULOGGER logger) // Implementation hint: Use a pthread_cond_wait. // Entry: Holds the output_condition_lock (but not the inlock) // Exit: Holds the output_condition_lock and logger->output_is_available -// +// { tokutime_t t0 = toku_time_now(); while (!logger->output_is_available) { @@ -484,7 +490,7 @@ release_output (TOKULOGGER logger, LSN fsynced_lsn) toku_cond_broadcast(&logger->output_condition); toku_mutex_unlock(&logger->output_condition_lock); } - + static void swap_inbuf_outbuf (TOKULOGGER logger) // Effect: Swap the inbuf and outbuf @@ -621,7 +627,7 @@ int toku_logger_find_next_unused_log_file(const char *directory, long long *resu if (d==0) return get_error_errno(); while ((de=readdir(d))) { if (de==0) return get_error_errno(); - long long thisl; + long long thisl = -1; if ( is_a_logfile(de->d_name, &thisl) ) { if ((long long)thisl > maxf) maxf = thisl; } @@ -687,7 +693,7 @@ int toku_logger_find_logfiles (const char *directory, char ***resultp, int *n_lo while ((de=readdir(d))) { uint64_t thisl; uint32_t version_ignore; - if ( !(is_a_logfile_any_version(de->d_name, &thisl, &version_ignore)) ) continue; //#2424: Skip over files that don't match the exact logfile template + if ( !(is_a_logfile_any_version(de->d_name, &thisl, &version_ignore)) ) continue; //#2424: Skip over files that don't match the exact logfile template if (n_results+1>=result_limit) { result_limit*=2; XREALLOC_N(result_limit, result); @@ -701,7 +707,7 @@ int toku_logger_find_logfiles (const char *directory, char ***resultp, int *n_lo // which are one character longer than old log file names ("xxx.tokulog2"). The comparison function // won't look beyond the terminating NUL, so an extra character in the comparison string doesn't matter. // Allow room for terminating NUL after "xxx.tokulog13" even if result[0] is of form "xxx.tokulog2." - int width = sizeof(result[0]+2); + int width = sizeof(result[0]+2); qsort(result, n_results, width, logfilenamecompare); *resultp = result; *n_logfiles = n_results; @@ -709,6 +715,12 @@ int toku_logger_find_logfiles (const char *directory, char ***resultp, int *n_lo return d ? closedir(d) : 0; } +void toku_logger_free_logfiles(char **logfiles, int n_logfiles) { + for (int i = 0; i < n_logfiles; i++) + toku_free(logfiles[i]); + toku_free(logfiles); +} + static int open_logfile (TOKULOGGER logger) // Entry and Exit: This thread has permission to modify the output. { @@ -717,7 +729,7 @@ static int open_logfile (TOKULOGGER logger) snprintf(fname, fnamelen, "%s/log%012lld.tokulog%d", logger->directory, logger->next_log_file_number, TOKU_LOG_VERSION); long long index = logger->next_log_file_number; if (logger->write_log_files) { - logger->fd = open(fname, O_CREAT+O_WRONLY+O_TRUNC+O_EXCL+O_BINARY, S_IRWXU); + logger->fd = open(fname, O_CREAT+O_WRONLY+O_TRUNC+O_EXCL+O_BINARY, S_IRUSR+S_IWUSR); if (logger->fd==-1) { return get_error_errno(); } @@ -735,7 +747,7 @@ static int open_logfile (TOKULOGGER logger) if ( logger->write_log_files ) { TOKULOGFILEINFO XMALLOC(lf_info); lf_info->index = index; - lf_info->maxlsn = logger->written_lsn; + lf_info->maxlsn = logger->written_lsn; lf_info->version = TOKU_LOG_VERSION; toku_logfilemgr_add_logfile_info(logger->logfilemgr, lf_info); } @@ -764,7 +776,7 @@ void toku_logger_maybe_trim_log(TOKULOGGER logger, LSN trim_lsn) int n_logfiles = toku_logfilemgr_num_logfiles(lfm); TOKULOGFILEINFO lf_info = NULL; - + if ( logger->write_log_files && logger->trim_log_files) { while ( n_logfiles > 1 ) { // don't delete current logfile uint32_t log_version; @@ -844,7 +856,7 @@ void toku_logger_maybe_fsync(TOKULOGGER logger, LSN lsn, int do_fsync, bool hold } static void -logger_write_buffer(TOKULOGGER logger, LSN *fsynced_lsn) +logger_write_buffer(TOKULOGGER logger, LSN *fsynced_lsn) // Entry: Holds the input lock and permission to modify output. // Exit: Holds only the permission to modify output. // Effect: Write the buffers to the output. If DO_FSYNC is true, then fsync. @@ -872,7 +884,7 @@ int toku_logger_restart(TOKULOGGER logger, LSN lastlsn) // close the log file if ( logger->write_log_files) { // fsyncs don't work to /dev/null - toku_file_fsync_without_accounting(logger->fd); + toku_file_fsync_without_accounting(logger->fd); } r = close(logger->fd); assert(r == 0); logger->fd = -1; @@ -895,7 +907,7 @@ void toku_logger_log_fcreate (TOKUTXN txn, const char *fname, FILENUM filenum, u if (txn) { BYTESTRING bs_fname = { .len = (uint32_t) strlen(fname), .data = (char *) fname }; // fsync log on fcreate - toku_log_fcreate (txn->logger, (LSN*)0, 1, txn, toku_txn_get_txnid(txn), filenum, + toku_log_fcreate (txn->logger, (LSN*)0, 1, txn, toku_txn_get_txnid(txn), filenum, bs_fname, mode, treeflags, nodesize, basementnodesize, compression_method); } } @@ -1102,7 +1114,7 @@ int toku_logprint_XIDP (FILE *outf, FILE *inf, const char *fieldname, struct x17 XIDP vp; int r = toku_fread_XIDP(inf, &vp, checksum, len); if (r!=0) return r; - fprintf(outf, "%s={formatID=0x%lx gtrid_length=%ld bqual_length=%ld data=", fieldname, vp->formatID, vp->gtrid_length, vp->bqual_length); + fprintf(outf, " %s={formatID=0x%lx gtrid_length=%ld bqual_length=%ld data=", fieldname, vp->formatID, vp->gtrid_length, vp->bqual_length); toku_print_bytes(outf, vp->gtrid_length + vp->bqual_length, vp->data); fprintf(outf, "}"); toku_free(vp); @@ -1333,7 +1345,7 @@ int toku_logger_log_archive (TOKULOGGER logger, char ***logs_p, int flags) { for (i=all_n_logs-2; i>=0; i--) { // start at all_n_logs-2 because we never archive the most recent log r = peek_at_log(logger, all_logs[i], &earliest_lsn_in_logfile); if (r!=0) continue; // In case of error, just keep going - + if (earliest_lsn_in_logfile.lsn <= save_lsn.lsn) { break; } @@ -1385,18 +1397,18 @@ void toku_logger_note_checkpoint(TOKULOGGER logger, LSN lsn) { static LOGGER_STATUS_S logger_status; -#define STATUS_INIT(k,c,t,l,inc) TOKUDB_STATUS_INIT(logger_status, k, c, t, "logger: " l, inc) +#define STATUS_INIT(k,c,t,l,inc) TOKUFT_STATUS_INIT(logger_status, k, c, t, "logger: " l, inc) static void status_init(void) { // Note, this function initializes the keyname, type, and legend fields. // Value fields are initialized to zero by compiler. STATUS_INIT(LOGGER_NEXT_LSN, nullptr, UINT64, "next LSN", TOKU_ENGINE_STATUS); - STATUS_INIT(LOGGER_NUM_WRITES, LOGGER_WRITES, UINT64, "writes", TOKU_ENGINE_STATUS|TOKU_GLOBAL_STATUS); - STATUS_INIT(LOGGER_BYTES_WRITTEN, LOGGER_WRITES_BYTES, UINT64, "writes (bytes)", TOKU_ENGINE_STATUS|TOKU_GLOBAL_STATUS); - STATUS_INIT(LOGGER_UNCOMPRESSED_BYTES_WRITTEN, LOGGER_WRITES_UNCOMPRESSED_BYTES, UINT64, "writes (uncompressed bytes)", TOKU_ENGINE_STATUS|TOKU_GLOBAL_STATUS); - STATUS_INIT(LOGGER_TOKUTIME_WRITES, LOGGER_WRITES_SECONDS, TOKUTIME, "writes (seconds)", TOKU_ENGINE_STATUS|TOKU_GLOBAL_STATUS); - STATUS_INIT(LOGGER_WAIT_BUF_LONG, LOGGER_WAIT_LONG, UINT64, "count", TOKU_ENGINE_STATUS|TOKU_GLOBAL_STATUS); + STATUS_INIT(LOGGER_NUM_WRITES, LOGGER_WRITES, UINT64, "writes", TOKU_ENGINE_STATUS|TOKU_GLOBAL_STATUS); + STATUS_INIT(LOGGER_BYTES_WRITTEN, LOGGER_WRITES_BYTES, UINT64, "writes (bytes)", TOKU_ENGINE_STATUS|TOKU_GLOBAL_STATUS); + STATUS_INIT(LOGGER_UNCOMPRESSED_BYTES_WRITTEN, LOGGER_WRITES_UNCOMPRESSED_BYTES, UINT64, "writes (uncompressed bytes)", TOKU_ENGINE_STATUS|TOKU_GLOBAL_STATUS); + STATUS_INIT(LOGGER_TOKUTIME_WRITES, LOGGER_WRITES_SECONDS, TOKUTIME, "writes (seconds)", TOKU_ENGINE_STATUS|TOKU_GLOBAL_STATUS); + STATUS_INIT(LOGGER_WAIT_BUF_LONG, LOGGER_WAIT_LONG, UINT64, "number of long logger write operations", TOKU_ENGINE_STATUS|TOKU_GLOBAL_STATUS); logger_status.initialized = true; } #undef STATUS_INIT @@ -1422,7 +1434,7 @@ toku_logger_get_status(TOKULOGGER logger, LOGGER_STATUS statp) { ////////////////////////////////////////////////////////////////////////////////////////////////////// -// Used for upgrade: +// Used for upgrade: // if any valid log files exist in log_dir, then // set *found_any_logs to true and set *version_found to version number of latest log int diff --git a/storage/tokudb/ft-index/ft/logger.h b/storage/tokudb/ft-index/ft/logger/logger.h index 6488ec0707d..83e6c9a7378 100644 --- a/storage/tokudb/ft-index/ft/logger.h +++ b/storage/tokudb/ft-index/ft/logger/logger.h @@ -1,7 +1,5 @@ /* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */ // vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4: -#ifndef TOKU_LOGGER_H -#define TOKU_LOGGER_H #ident "$Id$" /* @@ -32,7 +30,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -89,17 +87,26 @@ PATENT RIGHTS GRANT: under this License. */ +#pragma once + #ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved." #ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it." -#include "fttypes.h" -#include "ft_layout_version.h" +#include "ft/serialize/block_table.h" +#include "ft/serialize/ft_layout_version.h" +#include "ft/txn/txn.h" + +typedef struct tokulogger *TOKULOGGER; enum { TOKU_LOG_VERSION_1 = 1, TOKU_LOG_VERSION_2 = 2, //After 2 we linked the log version to the FT_LAYOUT VERSION. //So it went from 2 to 13 (3-12 do not exist) + TOKU_LOG_VERSION_24 = 24, + TOKU_LOG_VERSION_25 = 25, // change rollinclude rollback log entry + TOKU_LOG_VERSION_26 = 26, // no change from 25 + TOKU_LOG_VERSION_27 = 27, // no change from 26 TOKU_LOG_VERSION = FT_LAYOUT_VERSION, TOKU_LOG_MIN_SUPPORTED_VERSION = FT_LAYOUT_MIN_SUPPORTED_VERSION, }; @@ -109,15 +116,16 @@ int toku_logger_open (const char *directory, TOKULOGGER logger); int toku_logger_open_with_last_xid(const char *directory, TOKULOGGER logger, TXNID last_xid); void toku_logger_shutdown(TOKULOGGER logger); int toku_logger_close(TOKULOGGER *loggerp); -void toku_logger_initialize_rollback_cache(TOKULOGGER logger, FT ft); -int toku_logger_open_rollback(TOKULOGGER logger, CACHETABLE cachetable, bool create); +void toku_logger_initialize_rollback_cache(TOKULOGGER logger, struct ft *ft); +int toku_logger_open_rollback(TOKULOGGER logger, struct cachetable *ct, bool create); void toku_logger_close_rollback(TOKULOGGER logger); +void toku_logger_close_rollback_check_empty(TOKULOGGER logger, bool clean_shutdown); bool toku_logger_rollback_is_open (TOKULOGGER); // return true iff the rollback is open. void toku_logger_fsync (TOKULOGGER logger); void toku_logger_fsync_if_lsn_not_fsynced(TOKULOGGER logger, LSN lsn); int toku_logger_is_open(TOKULOGGER logger); -void toku_logger_set_cachetable (TOKULOGGER logger, CACHETABLE ct); +void toku_logger_set_cachetable (TOKULOGGER logger, struct cachetable *ct); int toku_logger_set_lg_max(TOKULOGGER logger, uint32_t lg_max); int toku_logger_get_lg_max(TOKULOGGER logger, uint32_t *lg_maxp); int toku_logger_set_lg_bsize(TOKULOGGER logger, uint32_t bsize); @@ -138,10 +146,24 @@ int toku_logger_restart(TOKULOGGER logger, LSN lastlsn); // given LSN and delete them. void toku_logger_maybe_trim_log(TOKULOGGER logger, LSN oldest_open_lsn); +// At the ft layer, a FILENUM uniquely identifies an open file. +struct FILENUM { + uint32_t fileid; +}; +static const FILENUM FILENUM_NONE = { .fileid = UINT32_MAX }; + +struct FILENUMS { + uint32_t num; + FILENUM *filenums; +}; + void toku_logger_log_fcreate(TOKUTXN txn, const char *fname, FILENUM filenum, uint32_t mode, uint32_t flags, uint32_t nodesize, uint32_t basementnodesize, enum toku_compression_method compression_method); void toku_logger_log_fdelete(TOKUTXN txn, FILENUM filenum); void toku_logger_log_fopen(TOKUTXN txn, const char * fname, FILENUM filenum, uint32_t treeflags); +// the log generation code requires a typedef if we want to pass by pointer +typedef TOKU_XA_XID *XIDP; + int toku_fread_uint8_t (FILE *f, uint8_t *v, struct x1764 *mm, uint32_t *len); int toku_fread_uint32_t_nocrclen (FILE *f, uint32_t *v); int toku_fread_uint32_t (FILE *f, uint32_t *v, struct x1764 *checksum, uint32_t *len); @@ -257,8 +279,63 @@ void toku_logger_get_status(TOKULOGGER logger, LOGGER_STATUS s); int toku_get_version_of_logs_on_disk(const char *log_dir, bool *found_any_logs, uint32_t *version_found); -TXN_MANAGER toku_logger_get_txn_manager(TOKULOGGER logger); - -static const TOKULOGGER NULL_logger __attribute__((__unused__)) = NULL; - -#endif /* TOKU_LOGGER_H */ +struct txn_manager *toku_logger_get_txn_manager(TOKULOGGER logger); + +// For serialize / deserialize + +#include "ft/serialize/wbuf.h" + +static inline void wbuf_nocrc_FILENUM(struct wbuf *wb, FILENUM fileid) { + wbuf_nocrc_uint(wb, fileid.fileid); +} + +static inline void wbuf_FILENUM(struct wbuf *wb, FILENUM fileid) { + wbuf_uint(wb, fileid.fileid); +} + +static inline void wbuf_nocrc_FILENUMS(struct wbuf *wb, FILENUMS v) { + wbuf_nocrc_uint(wb, v.num); + for (uint32_t i = 0; i < v.num; i++) { + wbuf_nocrc_FILENUM(wb, v.filenums[i]); + } +} + +static inline void wbuf_FILENUMS(struct wbuf *wb, FILENUMS v) { + wbuf_uint(wb, v.num); + for (uint32_t i = 0; i < v.num; i++) { + wbuf_FILENUM(wb, v.filenums[i]); + } +} + +static inline void wbuf_nocrc_XIDP (struct wbuf *w, TOKU_XA_XID *xid) { + wbuf_nocrc_uint32_t(w, xid->formatID); + wbuf_nocrc_uint8_t(w, xid->gtrid_length); + wbuf_nocrc_uint8_t(w, xid->bqual_length); + wbuf_nocrc_literal_bytes(w, xid->data, xid->gtrid_length+xid->bqual_length); +} + +#include "ft/serialize/rbuf.h" + +static inline void rbuf_FILENUM(struct rbuf *rb, FILENUM *filenum) { + filenum->fileid = rbuf_int(rb); +} +static inline void rbuf_ma_FILENUM(struct rbuf *rb, memarena *UU(ma), FILENUM *filenum) { + rbuf_FILENUM(rb, filenum); +} + +static inline void rbuf_FILENUMS(struct rbuf *rb, FILENUMS *filenums) { + filenums->num = rbuf_int(rb); + XMALLOC_N(filenums->num, filenums->filenums); + for (uint32_t i = 0; i < filenums->num; i++) { + rbuf_FILENUM(rb, &(filenums->filenums[i])); + } +} + +static inline void rbuf_ma_FILENUMS(struct rbuf *rb, memarena *ma, FILENUMS *filenums) { + rbuf_ma_uint32_t(rb, ma, &(filenums->num)); + filenums->filenums = (FILENUM *) ma->malloc_from_arena(filenums->num * sizeof(FILENUM)); + assert(filenums->filenums != NULL); + for (uint32_t i = 0; i < filenums->num; i++) { + rbuf_ma_FILENUM(rb, ma, &(filenums->filenums[i])); + } +} diff --git a/storage/tokudb/ft-index/ft/recover.cc b/storage/tokudb/ft-index/ft/logger/recover.cc index 2aac09855cd..ca284568f07 100644 --- a/storage/tokudb/ft-index/ft/recover.cc +++ b/storage/tokudb/ft-index/ft/logger/recover.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -89,18 +89,17 @@ PATENT RIGHTS GRANT: #ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved." #ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it." -#include <ft/log_header.h> -#include "ft.h" -#include "log-internal.h" -#include "logcursor.h" -#include "cachetable.h" -#include "checkpoint.h" -#include "txn_manager.h" +#include "ft/cachetable/cachetable.h" +#include "ft/cachetable/checkpoint.h" +#include "ft/ft.h" +#include "ft/log_header.h" +#include "ft/logger/log-internal.h" +#include "ft/logger/logcursor.h" +#include "ft/txn/txn_manager.h" +#include "util/omt.h" -#include <util/omt.h> - -int tokudb_recovery_trace = 0; // turn on recovery tracing, default off. +int tokuft_recovery_trace = 0; // turn on recovery tracing, default off. //#define DO_VERIFY_COUNTS #ifdef DO_VERIFY_COUNTS @@ -318,7 +317,7 @@ static int recover_env_init (RECOVER_ENV renv, renv->cp = toku_cachetable_get_checkpointer(renv->ct); toku_dbt_array_init(&renv->dest_keys, 1); toku_dbt_array_init(&renv->dest_vals, 1); - if (tokudb_recovery_trace) + if (tokuft_recovery_trace) fprintf(stderr, "%s:%d\n", __FUNCTION__, __LINE__); return r; } @@ -345,7 +344,7 @@ static void recover_env_cleanup (RECOVER_ENV renv) { toku_dbt_array_destroy(&renv->dest_keys); toku_dbt_array_destroy(&renv->dest_vals); - if (tokudb_recovery_trace) + if (tokuft_recovery_trace) fprintf(stderr, "%s:%d\n", __FUNCTION__, __LINE__); } @@ -424,7 +423,7 @@ static int toku_recover_begin_checkpoint (struct logtype_begin_checkpoint *l, RE r = 0; // ignore it (log only has a begin checkpoint) break; default: - fprintf(stderr, "Tokudb recovery %s: %d Unknown checkpoint state %d\n", __FILE__, __LINE__, (int)renv->ss.ss); + fprintf(stderr, "TokuFT recovery %s: %d Unknown checkpoint state %d\n", __FILE__, __LINE__, (int)renv->ss.ss); abort(); break; } @@ -434,7 +433,7 @@ static int toku_recover_begin_checkpoint (struct logtype_begin_checkpoint *l, RE static int toku_recover_backward_begin_checkpoint (struct logtype_begin_checkpoint *l, RECOVER_ENV renv) { int r; time_t tnow = time(NULL); - fprintf(stderr, "%.24s Tokudb recovery bw_begin_checkpoint at %" PRIu64 " timestamp %" PRIu64 " (%s)\n", ctime(&tnow), l->lsn.lsn, l->timestamp, recover_state(renv)); + fprintf(stderr, "%.24s TokuFT recovery bw_begin_checkpoint at %" PRIu64 " timestamp %" PRIu64 " (%s)\n", ctime(&tnow), l->lsn.lsn, l->timestamp, recover_state(renv)); switch (renv->ss.ss) { case BACKWARD_NEWER_CHECKPOINT_END: // incomplete checkpoint, nothing to do @@ -446,13 +445,13 @@ static int toku_recover_backward_begin_checkpoint (struct logtype_begin_checkpoi renv->ss.checkpoint_begin_timestamp = l->timestamp; renv->goforward = true; tnow = time(NULL); - fprintf(stderr, "%.24s Tokudb recovery turning around at begin checkpoint %" PRIu64 " time %" PRIu64 "\n", + fprintf(stderr, "%.24s TokuFT recovery turning around at begin checkpoint %" PRIu64 " time %" PRIu64 "\n", ctime(&tnow), l->lsn.lsn, renv->ss.checkpoint_end_timestamp - renv->ss.checkpoint_begin_timestamp); r = 0; break; default: - fprintf(stderr, "Tokudb recovery %s: %d Unknown checkpoint state %d\n", __FILE__, __LINE__, (int)renv->ss.ss); + fprintf(stderr, "TokuFT recovery %s: %d Unknown checkpoint state %d\n", __FILE__, __LINE__, (int)renv->ss.ss); abort(); break; } @@ -482,7 +481,7 @@ static int toku_recover_end_checkpoint (struct logtype_end_checkpoint *l, RECOVE static int toku_recover_backward_end_checkpoint (struct logtype_end_checkpoint *l, RECOVER_ENV renv) { time_t tnow = time(NULL); - fprintf(stderr, "%.24s Tokudb recovery bw_end_checkpoint at %" PRIu64 " timestamp %" PRIu64 " xid %" PRIu64 " (%s)\n", ctime(&tnow), l->lsn.lsn, l->timestamp, l->lsn_begin_checkpoint.lsn, recover_state(renv)); + fprintf(stderr, "%.24s TokuFT recovery bw_end_checkpoint at %" PRIu64 " timestamp %" PRIu64 " xid %" PRIu64 " (%s)\n", ctime(&tnow), l->lsn.lsn, l->timestamp, l->lsn_begin_checkpoint.lsn, recover_state(renv)); switch (renv->ss.ss) { case BACKWARD_NEWER_CHECKPOINT_END: renv->ss.ss = BACKWARD_BETWEEN_CHECKPOINT_BEGIN_END; @@ -491,12 +490,12 @@ static int toku_recover_backward_end_checkpoint (struct logtype_end_checkpoint * renv->ss.checkpoint_end_timestamp = l->timestamp; return 0; case BACKWARD_BETWEEN_CHECKPOINT_BEGIN_END: - fprintf(stderr, "Tokudb recovery %s:%d Should not see two end_checkpoint log entries without an intervening begin_checkpoint\n", __FILE__, __LINE__); + fprintf(stderr, "TokuFT recovery %s:%d Should not see two end_checkpoint log entries without an intervening begin_checkpoint\n", __FILE__, __LINE__); abort(); default: break; } - fprintf(stderr, "Tokudb recovery %s: %d Unknown checkpoint state %d\n", __FILE__, __LINE__, (int)renv->ss.ss); + fprintf(stderr, "TokuFT recovery %s: %d Unknown checkpoint state %d\n", __FILE__, __LINE__, (int)renv->ss.ss); abort(); } @@ -833,7 +832,7 @@ static int toku_recover_fcreate (struct logtype_fcreate *l, RECOVER_ENV renv) { if (r != 0) { int er = get_error_errno(); if (er != ENOENT) { - fprintf(stderr, "Tokudb recovery %s:%d unlink %s %d\n", __FUNCTION__, __LINE__, iname, er); + fprintf(stderr, "TokuFT recovery %s:%d unlink %s %d\n", __FUNCTION__, __LINE__, iname, er); toku_free(iname); return r; } @@ -1260,7 +1259,7 @@ static int toku_recover_backward_hot_index(struct logtype_hot_index *UU(l), RECO // Effects: If there are no log files, or if there is a clean "shutdown" at // the end of the log, then we don't need recovery to run. // Returns: true if we need recovery, otherwise false. -int tokudb_needs_recovery(const char *log_dir, bool ignore_log_empty) { +int tokuft_needs_recovery(const char *log_dir, bool ignore_log_empty) { int needs_recovery; int r; TOKULOGCURSOR logcursor = NULL; @@ -1384,7 +1383,7 @@ static int do_recovery(RECOVER_ENV renv, const char *env_dir, const char *log_di struct log_entry *le = NULL; time_t tnow = time(NULL); - fprintf(stderr, "%.24s Tokudb recovery starting in env %s\n", ctime(&tnow), env_dir); + fprintf(stderr, "%.24s TokuFT recovery starting in env %s\n", ctime(&tnow), env_dir); char org_wd[1000]; { @@ -1405,7 +1404,7 @@ static int do_recovery(RECOVER_ENV renv, const char *env_dir, const char *log_di r = toku_logcursor_last(logcursor, &le); if (r != 0) { - if (tokudb_recovery_trace) + if (tokuft_recovery_trace) fprintf(stderr, "RUNRECOVERY: %s:%d r=%d\n", __FUNCTION__, __LINE__, r); rr = DB_RUNRECOVERY; goto errorexit; } @@ -1420,10 +1419,10 @@ static int do_recovery(RECOVER_ENV renv, const char *env_dir, const char *log_di toku_struct_stat buf; if (toku_stat(env_dir, &buf)!=0) { rr = get_error_errno(); - fprintf(stderr, "%.24s Tokudb recovery error: directory does not exist: %s\n", ctime(&tnow), env_dir); + fprintf(stderr, "%.24s TokuFT recovery error: directory does not exist: %s\n", ctime(&tnow), env_dir); goto errorexit; } else if (!S_ISDIR(buf.st_mode)) { - fprintf(stderr, "%.24s Tokudb recovery error: this file is supposed to be a directory, but is not: %s\n", ctime(&tnow), env_dir); + fprintf(stderr, "%.24s TokuFT recovery error: this file is supposed to be a directory, but is not: %s\n", ctime(&tnow), env_dir); rr = ENOTDIR; goto errorexit; } } @@ -1432,13 +1431,13 @@ static int do_recovery(RECOVER_ENV renv, const char *env_dir, const char *log_di tnow = time(NULL); time_t tlast; tlast = tnow; - fprintf(stderr, "%.24s Tokudb recovery scanning backward from %" PRIu64 "\n", ctime(&tnow), lastlsn.lsn); + fprintf(stderr, "%.24s TokuFT recovery scanning backward from %" PRIu64 "\n", ctime(&tnow), lastlsn.lsn); for (unsigned i=0; 1; i++) { // get the previous log entry (first time gets the last one) le = NULL; r = toku_logcursor_prev(logcursor, &le); - if (tokudb_recovery_trace) + if (tokuft_recovery_trace) recover_trace_le(__FUNCTION__, __LINE__, r, le); if (r != 0) { if (r == DB_NOTFOUND) @@ -1452,7 +1451,7 @@ static int do_recovery(RECOVER_ENV renv, const char *env_dir, const char *log_di tnow = time(NULL); if (tnow - tlast >= TOKUDB_RECOVERY_PROGRESS_TIME) { thislsn = toku_log_entry_get_lsn(le); - fprintf(stderr, "%.24s Tokudb recovery scanning backward from %" PRIu64 " at %" PRIu64 " (%s)\n", ctime(&tnow), lastlsn.lsn, thislsn.lsn, recover_state(renv)); + fprintf(stderr, "%.24s TokuFT recovery scanning backward from %" PRIu64 " at %" PRIu64 " (%s)\n", ctime(&tnow), lastlsn.lsn, thislsn.lsn, recover_state(renv)); tlast = tnow; } } @@ -1461,10 +1460,10 @@ static int do_recovery(RECOVER_ENV renv, const char *env_dir, const char *log_di assert(renv->ss.ss == BACKWARD_BETWEEN_CHECKPOINT_BEGIN_END || renv->ss.ss == BACKWARD_NEWER_CHECKPOINT_END); logtype_dispatch_assign(le, toku_recover_backward_, r, renv); - if (tokudb_recovery_trace) + if (tokuft_recovery_trace) recover_trace_le(__FUNCTION__, __LINE__, r, le); if (r != 0) { - if (tokudb_recovery_trace) + if (tokuft_recovery_trace) fprintf(stderr, "DB_RUNRECOVERY: %s:%d r=%d\n", __FUNCTION__, __LINE__, r); rr = DB_RUNRECOVERY; goto errorexit; @@ -1481,7 +1480,7 @@ static int do_recovery(RECOVER_ENV renv, const char *env_dir, const char *log_di assert(le); thislsn = toku_log_entry_get_lsn(le); tnow = time(NULL); - fprintf(stderr, "%.24s Tokudb recovery starts scanning forward to %" PRIu64 " from %" PRIu64 " left %" PRIu64 " (%s)\n", ctime(&tnow), lastlsn.lsn, thislsn.lsn, lastlsn.lsn - thislsn.lsn, recover_state(renv)); + fprintf(stderr, "%.24s TokuFT recovery starts scanning forward to %" PRIu64 " from %" PRIu64 " left %" PRIu64 " (%s)\n", ctime(&tnow), lastlsn.lsn, thislsn.lsn, lastlsn.lsn - thislsn.lsn, recover_state(renv)); for (unsigned i=0; 1; i++) { @@ -1490,7 +1489,7 @@ static int do_recovery(RECOVER_ENV renv, const char *env_dir, const char *log_di tnow = time(NULL); if (tnow - tlast >= TOKUDB_RECOVERY_PROGRESS_TIME) { thislsn = toku_log_entry_get_lsn(le); - fprintf(stderr, "%.24s Tokudb recovery scanning forward to %" PRIu64 " at %" PRIu64 " left %" PRIu64 " (%s)\n", ctime(&tnow), lastlsn.lsn, thislsn.lsn, lastlsn.lsn - thislsn.lsn, recover_state(renv)); + fprintf(stderr, "%.24s TokuFT recovery scanning forward to %" PRIu64 " at %" PRIu64 " left %" PRIu64 " (%s)\n", ctime(&tnow), lastlsn.lsn, thislsn.lsn, lastlsn.lsn - thislsn.lsn, recover_state(renv)); tlast = tnow; } } @@ -1499,10 +1498,10 @@ static int do_recovery(RECOVER_ENV renv, const char *env_dir, const char *log_di assert(renv->ss.ss == FORWARD_BETWEEN_CHECKPOINT_BEGIN_END || renv->ss.ss == FORWARD_NEWER_CHECKPOINT_END); logtype_dispatch_assign(le, toku_recover_, r, renv); - if (tokudb_recovery_trace) + if (tokuft_recovery_trace) recover_trace_le(__FUNCTION__, __LINE__, r, le); if (r != 0) { - if (tokudb_recovery_trace) + if (tokuft_recovery_trace) fprintf(stderr, "DB_RUNRECOVERY: %s:%d r=%d\n", __FUNCTION__, __LINE__, r); rr = DB_RUNRECOVERY; goto errorexit; @@ -1511,7 +1510,7 @@ static int do_recovery(RECOVER_ENV renv, const char *env_dir, const char *log_di // get the next log entry le = NULL; r = toku_logcursor_next(logcursor, &le); - if (tokudb_recovery_trace) + if (tokuft_recovery_trace) recover_trace_le(__FUNCTION__, __LINE__, r, le); if (r != 0) { if (r == DB_NOTFOUND) @@ -1539,7 +1538,7 @@ static int do_recovery(RECOVER_ENV renv, const char *env_dir, const char *log_di uint32_t n = recover_get_num_live_txns(renv); if (n > 0) { tnow = time(NULL); - fprintf(stderr, "%.24s Tokudb recovery has %" PRIu32 " live transaction%s\n", ctime(&tnow), n, n > 1 ? "s" : ""); + fprintf(stderr, "%.24s TokuFT recovery has %" PRIu32 " live transaction%s\n", ctime(&tnow), n, n > 1 ? "s" : ""); } } recover_abort_all_live_txns(renv); @@ -1547,7 +1546,7 @@ static int do_recovery(RECOVER_ENV renv, const char *env_dir, const char *log_di uint32_t n = recover_get_num_live_txns(renv); if (n > 0) { tnow = time(NULL); - fprintf(stderr, "%.24s Tokudb recovery has %" PRIu32 " prepared transaction%s\n", ctime(&tnow), n, n > 1 ? "s" : ""); + fprintf(stderr, "%.24s TokuFT recovery has %" PRIu32 " prepared transaction%s\n", ctime(&tnow), n, n > 1 ? "s" : ""); } } @@ -1556,7 +1555,7 @@ static int do_recovery(RECOVER_ENV renv, const char *env_dir, const char *log_di n = file_map_get_num_dictionaries(&renv->fmap); if (n > 0) { tnow = time(NULL); - fprintf(stderr, "%.24s Tokudb recovery closing %" PRIu32 " dictionar%s\n", ctime(&tnow), n, n > 1 ? "ies" : "y"); + fprintf(stderr, "%.24s TokuFT recovery closing %" PRIu32 " dictionar%s\n", ctime(&tnow), n, n > 1 ? "ies" : "y"); } file_map_close_dictionaries(&renv->fmap, lastlsn); @@ -1568,17 +1567,17 @@ static int do_recovery(RECOVER_ENV renv, const char *env_dir, const char *log_di // checkpoint tnow = time(NULL); - fprintf(stderr, "%.24s Tokudb recovery making a checkpoint\n", ctime(&tnow)); + fprintf(stderr, "%.24s TokuFT recovery making a checkpoint\n", ctime(&tnow)); r = toku_checkpoint(renv->cp, renv->logger, NULL, NULL, NULL, NULL, RECOVERY_CHECKPOINT); assert(r == 0); tnow = time(NULL); - fprintf(stderr, "%.24s Tokudb recovery done\n", ctime(&tnow)); + fprintf(stderr, "%.24s TokuFT recovery done\n", ctime(&tnow)); return 0; errorexit: tnow = time(NULL); - fprintf(stderr, "%.24s Tokudb recovery failed %d\n", ctime(&tnow), rr); + fprintf(stderr, "%.24s TokuFT recovery failed %d\n", ctime(&tnow), rr); if (logcursor) { r = toku_logcursor_destroy(&logcursor); @@ -1603,7 +1602,7 @@ toku_recover_unlock(int lockfd) { return toku_single_process_unlock(&lockfd_copy); } -int tokudb_recover(DB_ENV *env, +int tokuft_recover(DB_ENV *env, prepared_txn_callback_t prepared_txn_callback, keep_cachetable_callback_t keep_cachetable_callback, TOKULOGGER logger, @@ -1621,7 +1620,7 @@ int tokudb_recover(DB_ENV *env, return r; int rr = 0; - if (tokudb_needs_recovery(log_dir, false)) { + if (tokuft_needs_recovery(log_dir, false)) { struct recover_env renv; r = recover_env_init(&renv, env_dir, @@ -1650,7 +1649,7 @@ int tokudb_recover(DB_ENV *env, // Return 0 if recovery log exists, ENOENT if log is missing int -tokudb_recover_log_exists(const char * log_dir) { +tokuft_recover_log_exists(const char * log_dir) { int r; TOKULOGCURSOR logcursor; diff --git a/storage/tokudb/ft-index/ft/recover.h b/storage/tokudb/ft-index/ft/logger/recover.h index 2ef84112784..f08abc44200 100644 --- a/storage/tokudb/ft-index/ft/recover.h +++ b/storage/tokudb/ft-index/ft/logger/recover.h @@ -1,7 +1,5 @@ /* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */ // vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4: -#ifndef TOKURECOVER_H -#define TOKURECOVER_H #ident "$Id$" /* @@ -32,7 +30,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -89,55 +87,53 @@ PATENT RIGHTS GRANT: under this License. */ +#pragma once + #ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved." #ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it." -#include <toku_portability.h> +#include <db.h> #include <errno.h> -#include <db.h> -#include <util/x1764.h> +#include "portability/memory.h" +#include "portability/toku_portability.h" -#include "fttypes.h" -#include "memory.h" +#include "ft/comparator.h" +#include "ft/ft-ops.h" +#include "util/x1764.h" -typedef void (*prepared_txn_callback_t)(DB_ENV*, TOKUTXN); -typedef void (*keep_cachetable_callback_t)(DB_ENV*, CACHETABLE); +typedef void (*prepared_txn_callback_t)(DB_ENV *env, struct tokutxn *txn); +typedef void (*keep_cachetable_callback_t)(DB_ENV *env, struct cachetable *ct); -// Run tokudb recovery from the log +// Run tokuft recovery from the log // Returns 0 if success -int tokudb_recover (DB_ENV *env, - prepared_txn_callback_t prepared_txn_callback, - keep_cachetable_callback_t keep_cachetable_callback, - TOKULOGGER logger, - const char *env_dir, const char *log_dir, - ft_compare_func bt_compare, - ft_update_func update_function, - generate_row_for_put_func generate_row_for_put, - generate_row_for_del_func generate_row_for_del, - size_t cachetable_size); - -// Effect: Check the tokudb logs to determine whether or not we need to run recovery. +int tokuft_recover(DB_ENV *env, + prepared_txn_callback_t prepared_txn_callback, + keep_cachetable_callback_t keep_cachetable_callback, + struct tokulogger *logger, + const char *env_dir, + const char *log_dir, + ft_compare_func bt_compare, + ft_update_func update_function, + generate_row_for_put_func generate_row_for_put, + generate_row_for_del_func generate_row_for_del, + size_t cachetable_size); + +// Effect: Check the tokuft logs to determine whether or not we need to run recovery. // If the log is empty or if there is a clean shutdown at the end of the log, then we // dont need to run recovery. // Returns: true if we need recovery, otherwise false. -int tokudb_needs_recovery(const char *logdir, bool ignore_empty_log); +int tokuft_needs_recovery(const char *logdir, bool ignore_empty_log); // Return 0 if recovery log exists, ENOENT if log is missing -int tokudb_recover_log_exists(const char * log_dir); +int tokuft_recover_log_exists(const char * log_dir); // For test only - set callbacks for recovery testing void toku_recover_set_callback (void (*)(void*), void*); void toku_recover_set_callback2 (void (*)(void*), void*); -extern int tokudb_recovery_trace; +extern int tokuft_recovery_trace; int toku_recover_lock (const char *lock_dir, int *lockfd); int toku_recover_unlock(int lockfd); - -static const prepared_txn_callback_t NULL_prepared_txn_callback __attribute__((__unused__)) = NULL; -static const keep_cachetable_callback_t NULL_keep_cachetable_callback __attribute__((__unused__)) = NULL; - - -#endif // TOKURECOVER_H diff --git a/storage/tokudb/ft-index/ft/ft_msg.h b/storage/tokudb/ft-index/ft/msg.cc index f468d7f647b..1fedbe745af 100644 --- a/storage/tokudb/ft-index/ft/ft_msg.h +++ b/storage/tokudb/ft-index/ft/msg.cc @@ -1,13 +1,5 @@ /* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */ // vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4: - -/* The purpose of this file is to provide access to the ft_msg, - * which is the ephemeral version of the fifo_msg. - */ - -#ifndef FT_MSG_H -#define FT_MSG_H - #ident "$Id$" /* COPYING CONDITIONS NOTICE: @@ -37,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -95,30 +87,85 @@ PATENT RIGHTS GRANT: */ #ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved." -#ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it." - - -uint32_t ft_msg_get_keylen(FT_MSG ft_msg); - -uint32_t ft_msg_get_vallen(FT_MSG ft_msg); - -XIDS ft_msg_get_xids(FT_MSG ft_msg); - -void * ft_msg_get_key(FT_MSG ft_msg); - -void * ft_msg_get_val(FT_MSG ft_msg); - -enum ft_msg_type ft_msg_get_type(FT_MSG ft_msg); - -void ft_msg_from_fifo_msg(FT_MSG ft_msg, FIFO_MSG fifo_msg); - -#if 0 - -void ft_msg_from_dbts(FT_MSG ft_msg, DBT *key, DBT *val, XIDS xids, enum ft_msg_type type); - -#endif - - -#endif // FT_MSG_H +#include "portability/toku_portability.h" + +#include "ft/msg.h" +#include "ft/txn/xids.h" +#include "util/dbt.h" + +ft_msg::ft_msg(const DBT *key, const DBT *val, enum ft_msg_type t, MSN m, XIDS x) : + _key(key ? *key : toku_empty_dbt()), + _val(val ? *val : toku_empty_dbt()), + _type(t), _msn(m), _xids(x) { +} + +ft_msg ft_msg::deserialize_from_rbuf(struct rbuf *rb, XIDS *x, bool *is_fresh) { + const void *keyp, *valp; + uint32_t keylen, vallen; + enum ft_msg_type t = (enum ft_msg_type) rbuf_char(rb); + *is_fresh = rbuf_char(rb); + MSN m = rbuf_MSN(rb); + toku_xids_create_from_buffer(rb, x); + rbuf_bytes(rb, &keyp, &keylen); + rbuf_bytes(rb, &valp, &vallen); + + DBT k, v; + return ft_msg(toku_fill_dbt(&k, keyp, keylen), toku_fill_dbt(&v, valp, vallen), t, m, *x); +} + +ft_msg ft_msg::deserialize_from_rbuf_v13(struct rbuf *rb, MSN m, XIDS *x) { + const void *keyp, *valp; + uint32_t keylen, vallen; + enum ft_msg_type t = (enum ft_msg_type) rbuf_char(rb); + toku_xids_create_from_buffer(rb, x); + rbuf_bytes(rb, &keyp, &keylen); + rbuf_bytes(rb, &valp, &vallen); + + DBT k, v; + return ft_msg(toku_fill_dbt(&k, keyp, keylen), toku_fill_dbt(&v, valp, vallen), t, m, *x); +} + +const DBT *ft_msg::kdbt() const { + return &_key; +} + +const DBT *ft_msg::vdbt() const { + return &_val; +} + +enum ft_msg_type ft_msg::type() const { + return _type; +} + +MSN ft_msg::msn() const { + return _msn; +} + +XIDS ft_msg::xids() const { + return _xids; +} + +size_t ft_msg::total_size() const { + // Must store two 4-byte lengths + static const size_t key_val_overhead = 8; + + // 1 byte type, 1 byte freshness, then 8 byte MSN + static const size_t msg_overhead = 2 + sizeof(MSN); + + static const size_t total_overhead = key_val_overhead + msg_overhead; + + const size_t keyval_size = _key.size + _val.size; + const size_t xids_size = toku_xids_get_serialize_size(xids()); + return total_overhead + keyval_size + xids_size; +} + +void ft_msg::serialize_to_wbuf(struct wbuf *wb, bool is_fresh) const { + wbuf_nocrc_char(wb, (unsigned char) _type); + wbuf_nocrc_char(wb, (unsigned char) is_fresh); + wbuf_MSN(wb, _msn); + wbuf_nocrc_xids(wb, _xids); + wbuf_nocrc_bytes(wb, _key.data, _key.size); + wbuf_nocrc_bytes(wb, _val.data, _val.size); +} diff --git a/storage/tokudb/ft-index/ft/msg.h b/storage/tokudb/ft-index/ft/msg.h new file mode 100644 index 00000000000..3a26f068399 --- /dev/null +++ b/storage/tokudb/ft-index/ft/msg.h @@ -0,0 +1,246 @@ +/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */ +// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4: + +/* The purpose of this file is to provide access to the ft_msg, + * which is the ephemeral version of the messages that lives in + * a message buffer. + */ + +#ident "$Id$" +/* +COPYING CONDITIONS NOTICE: + + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation, and provided that the + following conditions are met: + + * Redistributions of source code must retain this COPYING + CONDITIONS NOTICE, the COPYRIGHT NOTICE (below), the + DISCLAIMER (below), the UNIVERSITY PATENT NOTICE (below), the + PATENT MARKING NOTICE (below), and the PATENT RIGHTS + GRANT (below). + + * Redistributions in binary form must reproduce this COPYING + CONDITIONS NOTICE, the COPYRIGHT NOTICE (below), the + DISCLAIMER (below), the UNIVERSITY PATENT NOTICE (below), the + PATENT MARKING NOTICE (below), and the PATENT RIGHTS + GRANT (below) in the documentation and/or other materials + provided with the distribution. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + 02110-1301, USA. + +COPYRIGHT NOTICE: + + TokuFT, Tokutek Fractal Tree Indexing Library. + Copyright (C) 2007-2013 Tokutek, Inc. + +DISCLAIMER: + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + +UNIVERSITY PATENT NOTICE: + + The technology is licensed by the Massachusetts Institute of + Technology, Rutgers State University of New Jersey, and the Research + Foundation of State University of New York at Stony Brook under + United States of America Serial No. 11/760379 and to the patents + and/or patent applications resulting from it. + +PATENT MARKING NOTICE: + + This software is covered by US Patent No. 8,185,551. + This software is covered by US Patent No. 8,489,638. + +PATENT RIGHTS GRANT: + + "THIS IMPLEMENTATION" means the copyrightable works distributed by + Tokutek as part of the Fractal Tree project. + + "PATENT CLAIMS" means the claims of patents that are owned or + licensable by Tokutek, both currently or in the future; and that in + the absence of this license would be infringed by THIS + IMPLEMENTATION or by using or running THIS IMPLEMENTATION. + + "PATENT CHALLENGE" shall mean a challenge to the validity, + patentability, enforceability and/or non-infringement of any of the + PATENT CLAIMS or otherwise opposing any of the PATENT CLAIMS. + + Tokutek hereby grants to you, for the term and geographical scope of + the PATENT CLAIMS, a non-exclusive, no-charge, royalty-free, + irrevocable (except as stated in this section) patent license to + make, have made, use, offer to sell, sell, import, transfer, and + otherwise run, modify, and propagate the contents of THIS + IMPLEMENTATION, where such license applies only to the PATENT + CLAIMS. This grant does not include claims that would be infringed + only as a consequence of further modifications of THIS + IMPLEMENTATION. If you or your agent or licensee institute or order + or agree to the institution of patent litigation against any entity + (including a cross-claim or counterclaim in a lawsuit) alleging that + THIS IMPLEMENTATION constitutes direct or contributory patent + infringement, or inducement of patent infringement, then any rights + granted to you under this License shall terminate as of the date + such litigation is filed. If you or your agent or exclusive + licensee institute or order or agree to the institution of a PATENT + CHALLENGE, then Tokutek may terminate any rights granted to you + under this License. +*/ + +#pragma once + +#include <db.h> + +#include "portability/toku_assert.h" +#include "portability/toku_stdint.h" + +#include "ft/txn/xids.h" + +#ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved." +#ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it." + +// Message Sequence Number (MSN) +typedef struct __toku_msn { uint64_t msn; } MSN; + +// dummy used for message construction, to be filled in when msg is applied to tree +static const MSN ZERO_MSN = { .msn = 0 }; + +// first 2^62 values reserved for messages created before Dr. No (for upgrade) +static const MSN MIN_MSN = { .msn = 1ULL << 62 }; +static const MSN MAX_MSN = { .msn = UINT64_MAX }; + +/* tree command types */ +enum ft_msg_type { + FT_NONE = 0, + FT_INSERT = 1, + FT_DELETE_ANY = 2, // Delete any matching key. This used to be called FT_DELETE. + //FT_DELETE_BOTH = 3, + FT_ABORT_ANY = 4, // Abort any commands on any matching key. + //FT_ABORT_BOTH = 5, // Abort commands that match both the key and the value + FT_COMMIT_ANY = 6, + //FT_COMMIT_BOTH = 7, + FT_COMMIT_BROADCAST_ALL = 8, // Broadcast to all leafentries, (commit all transactions). + FT_COMMIT_BROADCAST_TXN = 9, // Broadcast to all leafentries, (commit specific transaction). + FT_ABORT_BROADCAST_TXN = 10, // Broadcast to all leafentries, (commit specific transaction). + FT_INSERT_NO_OVERWRITE = 11, + FT_OPTIMIZE = 12, // Broadcast + FT_OPTIMIZE_FOR_UPGRADE = 13, // same as FT_OPTIMIZE, but record version number in leafnode + FT_UPDATE = 14, + FT_UPDATE_BROADCAST_ALL = 15 +}; + +static inline bool +ft_msg_type_applies_once(enum ft_msg_type type) +{ + bool ret_val; + switch (type) { + case FT_INSERT_NO_OVERWRITE: + case FT_INSERT: + case FT_DELETE_ANY: + case FT_ABORT_ANY: + case FT_COMMIT_ANY: + case FT_UPDATE: + ret_val = true; + break; + case FT_COMMIT_BROADCAST_ALL: + case FT_COMMIT_BROADCAST_TXN: + case FT_ABORT_BROADCAST_TXN: + case FT_OPTIMIZE: + case FT_OPTIMIZE_FOR_UPGRADE: + case FT_UPDATE_BROADCAST_ALL: + case FT_NONE: + ret_val = false; + break; + default: + assert(false); + } + return ret_val; +} + +static inline bool +ft_msg_type_applies_all(enum ft_msg_type type) +{ + bool ret_val; + switch (type) { + case FT_NONE: + case FT_INSERT_NO_OVERWRITE: + case FT_INSERT: + case FT_DELETE_ANY: + case FT_ABORT_ANY: + case FT_COMMIT_ANY: + case FT_UPDATE: + ret_val = false; + break; + case FT_COMMIT_BROADCAST_ALL: + case FT_COMMIT_BROADCAST_TXN: + case FT_ABORT_BROADCAST_TXN: + case FT_OPTIMIZE: + case FT_OPTIMIZE_FOR_UPGRADE: + case FT_UPDATE_BROADCAST_ALL: + ret_val = true; + break; + default: + assert(false); + } + return ret_val; +} + +static inline bool +ft_msg_type_does_nothing(enum ft_msg_type type) +{ + return (type == FT_NONE); +} + +class ft_msg { +public: + ft_msg(const DBT *key, const DBT *val, enum ft_msg_type t, MSN m, XIDS x); + + enum ft_msg_type type() const; + + MSN msn() const; + + XIDS xids() const; + + const DBT *kdbt() const; + + const DBT *vdbt() const; + + size_t total_size() const; + + void serialize_to_wbuf(struct wbuf *wb, bool is_fresh) const; + + // deserialization goes through a static factory function so the ft msg + // API stays completely const and there's no default constructor + static ft_msg deserialize_from_rbuf(struct rbuf *rb, XIDS *xids, bool *is_fresh); + + // Version 13/14 messages did not have an msn - so `m' is the MSN + // that will be assigned to the message that gets deserialized. + static ft_msg deserialize_from_rbuf_v13(struct rbuf *rb, MSN m, XIDS *xids); + +private: + const DBT _key; + const DBT _val; + enum ft_msg_type _type; + MSN _msn; + XIDS _xids; +}; + +// For serialize / deserialize + +#include "ft/serialize/wbuf.h" + +static inline void wbuf_MSN(struct wbuf *wb, MSN msn) { + wbuf_ulonglong(wb, msn.msn); +} + +#include "ft/serialize/rbuf.h" + +static inline MSN rbuf_MSN(struct rbuf *rb) { + MSN msn = { .msn = rbuf_ulonglong(rb) }; + return msn; +} diff --git a/storage/tokudb/ft-index/ft/msg_buffer.cc b/storage/tokudb/ft-index/ft/msg_buffer.cc new file mode 100644 index 00000000000..3a72fdb7090 --- /dev/null +++ b/storage/tokudb/ft-index/ft/msg_buffer.cc @@ -0,0 +1,318 @@ +/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */ +// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4: + +/* +COPYING CONDITIONS NOTICE: + + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation, and provided that the + following conditions are met: + + * Redistributions of source code must retain this COPYING + CONDITIONS NOTICE, the COPYRIGHT NOTICE (below), the + DISCLAIMER (below), the UNIVERSITY PATENT NOTICE (below), the + PATENT MARKING NOTICE (below), and the PATENT RIGHTS + GRANT (below). + + * Redistributions in binary form must reproduce this COPYING + CONDITIONS NOTICE, the COPYRIGHT NOTICE (below), the + DISCLAIMER (below), the UNIVERSITY PATENT NOTICE (below), the + PATENT MARKING NOTICE (below), and the PATENT RIGHTS + GRANT (below) in the documentation and/or other materials + provided with the distribution. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + 02110-1301, USA. + +COPYRIGHT NOTICE: + + TokuFT, Tokutek Fractal Tree Indexing Library. + Copyright (C) 2014 Tokutek, Inc. + +DISCLAIMER: + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + +UNIVERSITY PATENT NOTICE: + + The technology is licensed by the Massachusetts Institute of + Technology, Rutgers State University of New Jersey, and the Research + Foundation of State University of New York at Stony Brook under + United States of America Serial No. 11/760379 and to the patents + and/or patent applications resulting from it. + +PATENT MARKING NOTICE: + + This software is covered by US Patent No. 8,185,551. + This software is covered by US Patent No. 8,489,638. + +PATENT RIGHTS GRANT: + + "THIS IMPLEMENTATION" means the copyrightable works distributed by + Tokutek as part of the Fractal Tree project. + + "PATENT CLAIMS" means the claims of patents that are owned or + licensable by Tokutek, both currently or in the future; and that in + the absence of this license would be infringed by THIS + IMPLEMENTATION or by using or running THIS IMPLEMENTATION. + + "PATENT CHALLENGE" shall mean a challenge to the validity, + patentability, enforceability and/or non-infringement of any of the + PATENT CLAIMS or otherwise opposing any of the PATENT CLAIMS. + + Tokutek hereby grants to you, for the term and geographical scope of + the PATENT CLAIMS, a non-exclusive, no-charge, royalty-free, + irrevocable (except as stated in this section) patent license to + make, have made, use, offer to sell, sell, import, transfer, and + otherwise run, modify, and propagate the contents of THIS + IMPLEMENTATION, where such license applies only to the PATENT + CLAIMS. This grant does not include claims that would be infringed + only as a consequence of further modifications of THIS + IMPLEMENTATION. If you or your agent or licensee institute or order + or agree to the institution of patent litigation against any entity + (including a cross-claim or counterclaim in a lawsuit) alleging that + THIS IMPLEMENTATION constitutes direct or contributory patent + infringement, or inducement of patent infringement, then any rights + granted to you under this License shall terminate as of the date + such litigation is filed. If you or your agent or exclusive + licensee institute or order or agree to the institution of a PATENT + CHALLENGE, then Tokutek may terminate any rights granted to you + under this License. +*/ + +#include "ft/msg_buffer.h" +#include "util/dbt.h" + +void message_buffer::create() { + _num_entries = 0; + _memory = nullptr; + _memory_size = 0; + _memory_used = 0; +} + +void message_buffer::clone(message_buffer *src) { + _num_entries = src->_num_entries; + _memory_used = src->_memory_used; + _memory_size = src->_memory_size; + XMALLOC_N(_memory_size, _memory); + memcpy(_memory, src->_memory, _memory_size); +} + +void message_buffer::destroy() { + if (_memory != nullptr) { + toku_free(_memory); + } +} + +void message_buffer::deserialize_from_rbuf(struct rbuf *rb, + int32_t **fresh_offsets, int32_t *nfresh, + int32_t **stale_offsets, int32_t *nstale, + int32_t **broadcast_offsets, int32_t *nbroadcast) { + // read the number of messages in this buffer + int n_in_this_buffer = rbuf_int(rb); + if (fresh_offsets != nullptr) { + XMALLOC_N(n_in_this_buffer, *fresh_offsets); + } + if (stale_offsets != nullptr) { + XMALLOC_N(n_in_this_buffer, *stale_offsets); + } + if (broadcast_offsets != nullptr) { + XMALLOC_N(n_in_this_buffer, *broadcast_offsets); + } + + _resize(rb->size + 64); // rb->size is a good hint for how big the buffer will be + + // deserialize each message individually, noting whether it was fresh + // and putting its buffer offset in the appropriate offsets array + for (int i = 0; i < n_in_this_buffer; i++) { + XIDS xids; + bool is_fresh; + const ft_msg msg = ft_msg::deserialize_from_rbuf(rb, &xids, &is_fresh); + + int32_t *dest; + if (ft_msg_type_applies_once(msg.type())) { + if (is_fresh) { + dest = fresh_offsets ? *fresh_offsets + (*nfresh)++ : nullptr; + } else { + dest = stale_offsets ? *stale_offsets + (*nstale)++ : nullptr; + } + } else { + invariant(ft_msg_type_applies_all(msg.type()) || ft_msg_type_does_nothing(msg.type())); + dest = broadcast_offsets ? *broadcast_offsets + (*nbroadcast)++ : nullptr; + } + + enqueue(msg, is_fresh, dest); + toku_xids_destroy(&xids); + } + + invariant(_num_entries == n_in_this_buffer); +} + +MSN message_buffer::deserialize_from_rbuf_v13(struct rbuf *rb, + MSN *highest_unused_msn_for_upgrade, + int32_t **fresh_offsets, int32_t *nfresh, + int32_t **broadcast_offsets, int32_t *nbroadcast) { + // read the number of messages in this buffer + int n_in_this_buffer = rbuf_int(rb); + if (fresh_offsets != nullptr) { + XMALLOC_N(n_in_this_buffer, *fresh_offsets); + } + if (broadcast_offsets != nullptr) { + XMALLOC_N(n_in_this_buffer, *broadcast_offsets); + } + + // Atomically decrement the header's MSN count by the number + // of messages in the buffer. + MSN highest_msn_in_this_buffer = { + .msn = toku_sync_sub_and_fetch(&highest_unused_msn_for_upgrade->msn, n_in_this_buffer) + }; + + // Create the message buffers from the deserialized buffer. + for (int i = 0; i < n_in_this_buffer; i++) { + XIDS xids; + // There were no stale messages at this version, so call it fresh. + const bool is_fresh = true; + + // Increment our MSN, the last message should have the + // newest/highest MSN. See above for a full explanation. + highest_msn_in_this_buffer.msn++; + const ft_msg msg = ft_msg::deserialize_from_rbuf_v13(rb, highest_msn_in_this_buffer, &xids); + + int32_t *dest; + if (ft_msg_type_applies_once(msg.type())) { + dest = fresh_offsets ? *fresh_offsets + (*nfresh)++ : nullptr; + } else { + invariant(ft_msg_type_applies_all(msg.type()) || ft_msg_type_does_nothing(msg.type())); + dest = broadcast_offsets ? *broadcast_offsets + (*nbroadcast)++ : nullptr; + } + + enqueue(msg, is_fresh, dest); + toku_xids_destroy(&xids); + } + + return highest_msn_in_this_buffer; +} + +void message_buffer::_resize(size_t new_size) { + XREALLOC_N(new_size, _memory); + _memory_size = new_size; +} + +static int next_power_of_two (int n) { + int r = 4096; + while (r < n) { + r*=2; + assert(r>0); + } + return r; +} + +struct message_buffer::buffer_entry *message_buffer::get_buffer_entry(int32_t offset) const { + return (struct buffer_entry *) (_memory + offset); +} + +void message_buffer::enqueue(const ft_msg &msg, bool is_fresh, int32_t *offset) { + int need_space_here = msg_memsize_in_buffer(msg); + int need_space_total = _memory_used + need_space_here; + if (_memory == nullptr || need_space_total > _memory_size) { + // resize the buffer to the next power of 2 greater than the needed space + int next_2 = next_power_of_two(need_space_total); + _resize(next_2); + } + uint32_t keylen = msg.kdbt()->size; + uint32_t datalen = msg.vdbt()->size; + struct buffer_entry *entry = get_buffer_entry(_memory_used); + entry->type = (unsigned char) msg.type(); + entry->msn = msg.msn(); + toku_xids_cpy(&entry->xids_s, msg.xids()); + entry->is_fresh = is_fresh; + unsigned char *e_key = toku_xids_get_end_of_array(&entry->xids_s); + entry->keylen = keylen; + memcpy(e_key, msg.kdbt()->data, keylen); + entry->vallen = datalen; + memcpy(e_key + keylen, msg.vdbt()->data, datalen); + if (offset) { + *offset = _memory_used; + } + _num_entries++; + _memory_used += need_space_here; +} + +void message_buffer::set_freshness(int32_t offset, bool is_fresh) { + struct buffer_entry *entry = get_buffer_entry(offset); + entry->is_fresh = is_fresh; +} + +bool message_buffer::get_freshness(int32_t offset) const { + struct buffer_entry *entry = get_buffer_entry(offset); + return entry->is_fresh; +} + +ft_msg message_buffer::get_message(int32_t offset, DBT *keydbt, DBT *valdbt) const { + struct buffer_entry *entry = get_buffer_entry(offset); + uint32_t keylen = entry->keylen; + uint32_t vallen = entry->vallen; + enum ft_msg_type type = (enum ft_msg_type) entry->type; + MSN msn = entry->msn; + const XIDS xids = (XIDS) &entry->xids_s; + const void *key = toku_xids_get_end_of_array(xids); + const void *val = (uint8_t *) key + entry->keylen; + return ft_msg(toku_fill_dbt(keydbt, key, keylen), toku_fill_dbt(valdbt, val, vallen), type, msn, xids); +} + +void message_buffer::get_message_key_msn(int32_t offset, DBT *key, MSN *msn) const { + struct buffer_entry *entry = get_buffer_entry(offset); + if (key != nullptr) { + toku_fill_dbt(key, toku_xids_get_end_of_array((XIDS) &entry->xids_s), entry->keylen); + } + if (msn != nullptr) { + *msn = entry->msn; + } +} + +int message_buffer::num_entries() const { + return _num_entries; +} + +size_t message_buffer::buffer_size_in_use() const { + return _memory_used; +} + +size_t message_buffer::memory_size_in_use() const { + return sizeof(*this) + _memory_used; +} + +size_t message_buffer::memory_footprint() const { + return sizeof(*this) + toku_memory_footprint(_memory, _memory_used); +} + +bool message_buffer::equals(message_buffer *other) const { + return (_memory_used == other->_memory_used && + memcmp(_memory, other->_memory, _memory_used) == 0); +} + +void message_buffer::serialize_to_wbuf(struct wbuf *wb) const { + wbuf_nocrc_int(wb, _num_entries); + struct msg_serialize_fn { + struct wbuf *wb; + msg_serialize_fn(struct wbuf *w) : wb(w) { } + int operator()(const ft_msg &msg, bool is_fresh) { + msg.serialize_to_wbuf(wb, is_fresh); + return 0; + } + } serialize_fn(wb); + iterate(serialize_fn); +} + +size_t message_buffer::msg_memsize_in_buffer(const ft_msg &msg) { + const uint32_t keylen = msg.kdbt()->size; + const uint32_t datalen = msg.vdbt()->size; + const size_t xidslen = toku_xids_get_size(msg.xids()); + return sizeof(struct buffer_entry) + keylen + datalen + xidslen - sizeof(XIDS_S); +} diff --git a/storage/tokudb/ft-index/ft/xids-internal.h b/storage/tokudb/ft-index/ft/msg_buffer.h index 6ceae6ee35e..b63b4a354b2 100644 --- a/storage/tokudb/ft-index/ft/xids-internal.h +++ b/storage/tokudb/ft-index/ft/msg_buffer.h @@ -1,10 +1,6 @@ /* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */ // vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4: -#ifndef XIDS_INTERNAL_H -#define XIDS_INTERNAL_H - -#ident "$Id$" /* COPYING CONDITIONS NOTICE: @@ -33,8 +29,8 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. - Copyright (C) 2007-2013 Tokutek, Inc. + TokuFT, Tokutek Fractal Tree Indexing Library. + Copyright (C) 2014 Tokutek, Inc. DISCLAIMER: @@ -90,19 +86,96 @@ PATENT RIGHTS GRANT: under this License. */ -#ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved." -#ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it." +#pragma once + +#include "ft/msg.h" +#include "ft/txn/xids.h" +#include "util/dbt.h" + +class message_buffer { +public: + void create(); + + void clone(message_buffer *dst); + + void destroy(); + + // effect: deserializes a message buffer from the given rbuf + // returns: *fresh_offsets (etc) malloc'd to be num_entries large and + // populated with *nfresh (etc) offsets in the message buffer + // requires: if fresh_offsets (etc) != nullptr, then nfresh != nullptr + void deserialize_from_rbuf(struct rbuf *rb, + int32_t **fresh_offsets, int32_t *nfresh, + int32_t **stale_offsets, int32_t *nstale, + int32_t **broadcast_offsets, int32_t *nbroadcast); + + // effect: deserializes a message buffer whose messages are at version 13/14 + // returns: similar to deserialize_from_rbuf(), excpet there are no stale messages + // and each message is assigned a sequential value from *highest_unused_msn_for_upgrade, + // which is modified as needed using toku_sync_fech_and_sub() + // returns: the highest MSN assigned to any message in this buffer + // requires: similar to deserialize_from_rbuf(), and highest_unused_msn_for_upgrade != nullptr + MSN deserialize_from_rbuf_v13(struct rbuf *rb, + MSN *highest_unused_msn_for_upgrade, + int32_t **fresh_offsets, int32_t *nfresh, + int32_t **broadcast_offsets, int32_t *nbroadcast); + + void enqueue(const ft_msg &msg, bool is_fresh, int32_t *offset); + + void set_freshness(int32_t offset, bool is_fresh); + + bool get_freshness(int32_t offset) const; + + ft_msg get_message(int32_t offset, DBT *keydbt, DBT *valdbt) const; + + void get_message_key_msn(int32_t offset, DBT *key, MSN *msn) const; + + int num_entries() const; + + size_t buffer_size_in_use() const; + + size_t memory_size_in_use() const; + + size_t memory_footprint() const; + + template <typename F> + int iterate(F &fn) const { + for (int32_t offset = 0; offset < _memory_used; ) { + DBT k, v; + const ft_msg msg = get_message(offset, &k, &v); + bool is_fresh = get_freshness(offset); + int r = fn(msg, is_fresh); + if (r != 0) { + return r; + } + offset += msg_memsize_in_buffer(msg); + } + return 0; + } + + bool equals(message_buffer *other) const; + + void serialize_to_wbuf(struct wbuf *wb) const; + + static size_t msg_memsize_in_buffer(const ft_msg &msg); + +private: + void _resize(size_t new_size); -// Variable size list of transaction ids (known in design doc as xids<>). -// ids[0] is the outermost transaction. -// ids[num_xids - 1] is the innermost transaction. -// Should only be accessed by accessor functions xids_xxx, not directly. + // If this isn't packged, the compiler aligns the xids array and we waste a lot of space + struct __attribute__((__packed__)) buffer_entry { + unsigned int keylen; + unsigned int vallen; + unsigned char type; + bool is_fresh; + MSN msn; + XIDS_S xids_s; + }; -// If the xids struct is unpacked, the compiler aligns the ids[] and we waste a lot of space -typedef struct __attribute__((__packed__)) xids_t { - uint8_t num_xids; // maximum value of MAX_TRANSACTION_RECORDS - 1 ... - // ... because transaction 0 is implicit - TXNID ids[]; -} XIDS_S; + struct buffer_entry *get_buffer_entry(int32_t offset) const; -#endif + int _num_entries; + char *_memory; // An array of bytes into which buffer entries are embedded. + int _memory_size; // How big is _memory + int _memory_used; // How many bytes are in use? +}; diff --git a/storage/tokudb/ft-index/ft/node.cc b/storage/tokudb/ft-index/ft/node.cc new file mode 100644 index 00000000000..f6a8c0bb2b3 --- /dev/null +++ b/storage/tokudb/ft-index/ft/node.cc @@ -0,0 +1,1980 @@ +/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */ +// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4: + +/* +COPYING CONDITIONS NOTICE: + + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation, and provided that the + following conditions are met: + + * Redistributions of source code must retain this COPYING + CONDITIONS NOTICE, the COPYRIGHT NOTICE (below), the + DISCLAIMER (below), the UNIVERSITY PATENT NOTICE (below), the + PATENT MARKING NOTICE (below), and the PATENT RIGHTS + GRANT (below). + + * Redistributions in binary form must reproduce this COPYING + CONDITIONS NOTICE, the COPYRIGHT NOTICE (below), the + DISCLAIMER (below), the UNIVERSITY PATENT NOTICE (below), the + PATENT MARKING NOTICE (below), and the PATENT RIGHTS + GRANT (below) in the documentation and/or other materials + provided with the distribution. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + 02110-1301, USA. + +COPYRIGHT NOTICE: + + TokuFT, Tokutek Fractal Tree Indexing Library. + Copyright (C) 2007-2013 Tokutek, Inc. + +DISCLAIMER: + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + +UNIVERSITY PATENT NOTICE: + + The technology is licensed by the Massachusetts Institute of + Technology, Rutgers State University of New Jersey, and the Research + Foundation of State University of New York at Stony Brook under + United States of America Serial No. 11/760379 and to the patents + and/or patent applications resulting from it. + +PATENT MARKING NOTICE: + + This software is covered by US Patent No. 8,185,551. + This software is covered by US Patent No. 8,489,638. + +PATENT RIGHTS GRANT: + + "THIS IMPLEMENTATION" means the copyrightable works distributed by + Tokutek as part of the Fractal Tree project. + + "PATENT CLAIMS" means the claims of patents that are owned or + licensable by Tokutek, both currently or in the future; and that in + the absence of this license would be infringed by THIS + IMPLEMENTATION or by using or running THIS IMPLEMENTATION. + + "PATENT CHALLENGE" shall mean a challenge to the validity, + patentability, enforceability and/or non-infringement of any of the + PATENT CLAIMS or otherwise opposing any of the PATENT CLAIMS. + + Tokutek hereby grants to you, for the term and geographical scope of + the PATENT CLAIMS, a non-exclusive, no-charge, royalty-free, + irrevocable (except as stated in this section) patent license to + make, have made, use, offer to sell, sell, import, transfer, and + otherwise run, modify, and propagate the contents of THIS + IMPLEMENTATION, where such license applies only to the PATENT + CLAIMS. This grant does not include claims that would be infringed + only as a consequence of further modifications of THIS + IMPLEMENTATION. If you or your agent or licensee institute or order + or agree to the institution of patent litigation against any entity + (including a cross-claim or counterclaim in a lawsuit) alleging that + THIS IMPLEMENTATION constitutes direct or contributory patent + infringement, or inducement of patent infringement, then any rights + granted to you under this License shall terminate as of the date + such litigation is filed. If you or your agent or exclusive + licensee institute or order or agree to the institution of a PATENT + CHALLENGE, then Tokutek may terminate any rights granted to you + under this License. +*/ + +#ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved." +#ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it." + +#include "ft/ft.h" +#include "ft/ft-internal.h" +#include "ft/serialize/ft_node-serialize.h" +#include "ft/node.h" +#include "ft/serialize/rbuf.h" +#include "ft/serialize/wbuf.h" +#include "util/scoped_malloc.h" +#include "util/sort.h" + +// Effect: Fill in N as an empty ftnode. +// TODO: Rename toku_ftnode_create +void toku_initialize_empty_ftnode(FTNODE n, BLOCKNUM blocknum, int height, int num_children, int layout_version, unsigned int flags) { + paranoid_invariant(layout_version != 0); + paranoid_invariant(height >= 0); + + n->max_msn_applied_to_node_on_disk = ZERO_MSN; // correct value for root node, harmless for others + n->flags = flags; + n->blocknum = blocknum; + n->layout_version = layout_version; + n->layout_version_original = layout_version; + n->layout_version_read_from_disk = layout_version; + n->height = height; + n->pivotkeys.create_empty(); + n->bp = 0; + n->n_children = num_children; + n->oldest_referenced_xid_known = TXNID_NONE; + + if (num_children > 0) { + XMALLOC_N(num_children, n->bp); + for (int i = 0; i < num_children; i++) { + BP_BLOCKNUM(n,i).b=0; + BP_STATE(n,i) = PT_INVALID; + BP_WORKDONE(n,i) = 0; + BP_INIT_TOUCHED_CLOCK(n, i); + set_BNULL(n,i); + if (height > 0) { + set_BNC(n, i, toku_create_empty_nl()); + } else { + set_BLB(n, i, toku_create_empty_bn()); + } + } + } + n->dirty = 1; // special case exception, it's okay to mark as dirty because the basements are empty + + toku_ft_status_note_ftnode(height, true); +} + +// destroys the internals of the ftnode, but it does not free the values +// that are stored +// this is common functionality for toku_ftnode_free and rebalance_ftnode_leaf +// MUST NOT do anything besides free the structures that have been allocated +void toku_destroy_ftnode_internals(FTNODE node) { + node->pivotkeys.destroy(); + for (int i = 0; i < node->n_children; i++) { + if (BP_STATE(node,i) == PT_AVAIL) { + if (node->height > 0) { + destroy_nonleaf_childinfo(BNC(node,i)); + } else { + destroy_basement_node(BLB(node, i)); + } + } else if (BP_STATE(node,i) == PT_COMPRESSED) { + SUB_BLOCK sb = BSB(node,i); + toku_free(sb->compressed_ptr); + toku_free(sb); + } else { + paranoid_invariant(is_BNULL(node, i)); + } + set_BNULL(node, i); + } + toku_free(node->bp); + node->bp = NULL; +} + +/* Frees a node, including all the stuff in the hash table. */ +void toku_ftnode_free(FTNODE *nodep) { + FTNODE node = *nodep; + toku_ft_status_note_ftnode(node->height, false); + toku_destroy_ftnode_internals(node); + toku_free(node); + *nodep = nullptr; +} + +void toku_ftnode_update_disk_stats(FTNODE ftnode, FT ft, bool for_checkpoint) { + STAT64INFO_S deltas = ZEROSTATS; + // capture deltas before rebalancing basements for serialization + deltas = toku_get_and_clear_basement_stats(ftnode); + // locking not necessary here with respect to checkpointing + // in Clayface (because of the pending lock and cachetable lock + // in toku_cachetable_begin_checkpoint) + // essentially, if we are dealing with a for_checkpoint + // parameter in a function that is called by the flush_callback, + // then the cachetable needs to ensure that this is called in a safe + // manner that does not interfere with the beginning + // of a checkpoint, which it does with the cachetable lock + // and pending lock + toku_ft_update_stats(&ft->h->on_disk_stats, deltas); + if (for_checkpoint) { + toku_ft_update_stats(&ft->checkpoint_header->on_disk_stats, deltas); + } +} + +void toku_ftnode_clone_partitions(FTNODE node, FTNODE cloned_node) { + for (int i = 0; i < node->n_children; i++) { + BP_BLOCKNUM(cloned_node,i) = BP_BLOCKNUM(node,i); + paranoid_invariant(BP_STATE(node,i) == PT_AVAIL); + BP_STATE(cloned_node,i) = PT_AVAIL; + BP_WORKDONE(cloned_node, i) = BP_WORKDONE(node, i); + if (node->height == 0) { + set_BLB(cloned_node, i, toku_clone_bn(BLB(node,i))); + } else { + set_BNC(cloned_node, i, toku_clone_nl(BNC(node,i))); + } + } +} + +void toku_evict_bn_from_memory(FTNODE node, int childnum, FT ft) { + // free the basement node + assert(!node->dirty); + BASEMENTNODE bn = BLB(node, childnum); + toku_ft_decrease_stats(&ft->in_memory_stats, bn->stat64_delta); + destroy_basement_node(bn); + set_BNULL(node, childnum); + BP_STATE(node, childnum) = PT_ON_DISK; +} + +BASEMENTNODE toku_detach_bn(FTNODE node, int childnum) { + assert(BP_STATE(node, childnum) == PT_AVAIL); + BASEMENTNODE bn = BLB(node, childnum); + set_BNULL(node, childnum); + BP_STATE(node, childnum) = PT_ON_DISK; + return bn; +} + +// +// Orthopush +// + +struct store_msg_buffer_offset_extra { + int32_t *offsets; + int i; +}; + +int store_msg_buffer_offset(const int32_t &offset, const uint32_t UU(idx), struct store_msg_buffer_offset_extra *const extra) __attribute__((nonnull(3))); +int store_msg_buffer_offset(const int32_t &offset, const uint32_t UU(idx), struct store_msg_buffer_offset_extra *const extra) +{ + extra->offsets[extra->i] = offset; + extra->i++; + return 0; +} + +/** + * Given pointers to offsets within a message buffer where we can find messages, + * figure out the MSN of each message, and compare those MSNs. Returns 1, + * 0, or -1 if a is larger than, equal to, or smaller than b. + */ +int msg_buffer_offset_msn_cmp(message_buffer &msg_buffer, const int32_t &ao, const int32_t &bo); +int msg_buffer_offset_msn_cmp(message_buffer &msg_buffer, const int32_t &ao, const int32_t &bo) +{ + MSN amsn, bmsn; + msg_buffer.get_message_key_msn(ao, nullptr, &amsn); + msg_buffer.get_message_key_msn(bo, nullptr, &bmsn); + if (amsn.msn > bmsn.msn) { + return +1; + } + if (amsn.msn < bmsn.msn) { + return -1; + } + return 0; +} + +/** + * Given a message buffer and and offset, apply the message with toku_ft_bn_apply_msg, or discard it, + * based on its MSN and the MSN of the basement node. + */ +static void +do_bn_apply_msg(FT_HANDLE ft_handle, BASEMENTNODE bn, message_buffer *msg_buffer, int32_t offset, + txn_gc_info *gc_info, uint64_t *workdone, STAT64INFO stats_to_update) { + DBT k, v; + ft_msg msg = msg_buffer->get_message(offset, &k, &v); + + // The messages are being iterated over in (key,msn) order or just in + // msn order, so all the messages for one key, from one buffer, are in + // ascending msn order. So it's ok that we don't update the basement + // node's msn until the end. + if (msg.msn().msn > bn->max_msn_applied.msn) { + toku_ft_bn_apply_msg( + ft_handle->ft->cmp, + ft_handle->ft->update_fun, + bn, + msg, + gc_info, + workdone, + stats_to_update + ); + } else { + toku_ft_status_note_msn_discard(); + } + + // We must always mark message as stale since it has been marked + // (using omt::iterate_and_mark_range) + // It is possible to call do_bn_apply_msg even when it won't apply the message because + // the node containing it could have been evicted and brought back in. + msg_buffer->set_freshness(offset, false); +} + + +struct iterate_do_bn_apply_msg_extra { + FT_HANDLE t; + BASEMENTNODE bn; + NONLEAF_CHILDINFO bnc; + txn_gc_info *gc_info; + uint64_t *workdone; + STAT64INFO stats_to_update; +}; + +int iterate_do_bn_apply_msg(const int32_t &offset, const uint32_t UU(idx), struct iterate_do_bn_apply_msg_extra *const e) __attribute__((nonnull(3))); +int iterate_do_bn_apply_msg(const int32_t &offset, const uint32_t UU(idx), struct iterate_do_bn_apply_msg_extra *const e) +{ + do_bn_apply_msg(e->t, e->bn, &e->bnc->msg_buffer, offset, e->gc_info, e->workdone, e->stats_to_update); + return 0; +} + +/** + * Given the bounds of the basement node to which we will apply messages, + * find the indexes within message_tree which contain the range of + * relevant messages. + * + * The message tree contains offsets into the buffer, where messages are + * found. The pivot_bounds are the lower bound exclusive and upper bound + * inclusive, because they come from pivot keys in the tree. We want OMT + * indices, which must have the lower bound be inclusive and the upper + * bound exclusive. We will get these by telling omt::find to look + * for something strictly bigger than each of our pivot bounds. + * + * Outputs the OMT indices in lbi (lower bound inclusive) and ube (upper + * bound exclusive). + */ +template<typename find_bounds_omt_t> +static void +find_bounds_within_message_tree( + const toku::comparator &cmp, + const find_bounds_omt_t &message_tree, /// tree holding message buffer offsets, in which we want to look for indices + message_buffer *msg_buffer, /// message buffer in which messages are found + const pivot_bounds &bounds, /// key bounds within the basement node we're applying messages to + uint32_t *lbi, /// (output) "lower bound inclusive" (index into message_tree) + uint32_t *ube /// (output) "upper bound exclusive" (index into message_tree) + ) +{ + int r = 0; + + if (!toku_dbt_is_empty(bounds.lbe())) { + // By setting msn to MAX_MSN and by using direction of +1, we will + // get the first message greater than (in (key, msn) order) any + // message (with any msn) with the key lower_bound_exclusive. + // This will be a message we want to try applying, so it is the + // "lower bound inclusive" within the message_tree. + struct toku_msg_buffer_key_msn_heaviside_extra lbi_extra(cmp, msg_buffer, bounds.lbe(), MAX_MSN); + int32_t found_lb; + r = message_tree.template find<struct toku_msg_buffer_key_msn_heaviside_extra, toku_msg_buffer_key_msn_heaviside>(lbi_extra, +1, &found_lb, lbi); + if (r == DB_NOTFOUND) { + // There is no relevant data (the lower bound is bigger than + // any message in this tree), so we have no range and we're + // done. + *lbi = 0; + *ube = 0; + return; + } + if (!toku_dbt_is_empty(bounds.ubi())) { + // Check if what we found for lbi is greater than the upper + // bound inclusive that we have. If so, there are no relevant + // messages between these bounds. + const DBT *ubi = bounds.ubi(); + const int32_t offset = found_lb; + DBT found_lbidbt; + msg_buffer->get_message_key_msn(offset, &found_lbidbt, nullptr); + int c = cmp(&found_lbidbt, ubi); + // These DBTs really are both inclusive bounds, so we need + // strict inequality in order to determine that there's + // nothing between them. If they're equal, then we actually + // need to apply the message pointed to by lbi, and also + // anything with the same key but a bigger msn. + if (c > 0) { + *lbi = 0; + *ube = 0; + return; + } + } + } else { + // No lower bound given, it's negative infinity, so we start at + // the first message in the OMT. + *lbi = 0; + } + if (!toku_dbt_is_empty(bounds.ubi())) { + // Again, we use an msn of MAX_MSN and a direction of +1 to get + // the first thing bigger than the upper_bound_inclusive key. + // This is therefore the smallest thing we don't want to apply, + // and omt::iterate_on_range will not examine it. + struct toku_msg_buffer_key_msn_heaviside_extra ube_extra(cmp, msg_buffer, bounds.ubi(), MAX_MSN); + r = message_tree.template find<struct toku_msg_buffer_key_msn_heaviside_extra, toku_msg_buffer_key_msn_heaviside>(ube_extra, +1, nullptr, ube); + if (r == DB_NOTFOUND) { + // Couldn't find anything in the buffer bigger than our key, + // so we need to look at everything up to the end of + // message_tree. + *ube = message_tree.size(); + } + } else { + // No upper bound given, it's positive infinity, so we need to go + // through the end of the OMT. + *ube = message_tree.size(); + } +} + +/** + * For each message in the ancestor's buffer (determined by childnum) that + * is key-wise between lower_bound_exclusive and upper_bound_inclusive, + * apply the message to the basement node. We treat the bounds as minus + * or plus infinity respectively if they are NULL. Do not mark the node + * as dirty (preserve previous state of 'dirty' bit). + */ +static void +bnc_apply_messages_to_basement_node( + FT_HANDLE t, // used for comparison function + BASEMENTNODE bn, // where to apply messages + FTNODE ancestor, // the ancestor node where we can find messages to apply + int childnum, // which child buffer of ancestor contains messages we want + const pivot_bounds &bounds, // contains pivot key bounds of this basement node + txn_gc_info *gc_info, + bool* msgs_applied + ) +{ + int r; + NONLEAF_CHILDINFO bnc = BNC(ancestor, childnum); + + // Determine the offsets in the message trees between which we need to + // apply messages from this buffer + STAT64INFO_S stats_delta = {0,0}; + uint64_t workdone_this_ancestor = 0; + + uint32_t stale_lbi, stale_ube; + if (!bn->stale_ancestor_messages_applied) { + find_bounds_within_message_tree(t->ft->cmp, bnc->stale_message_tree, &bnc->msg_buffer, bounds, &stale_lbi, &stale_ube); + } else { + stale_lbi = 0; + stale_ube = 0; + } + uint32_t fresh_lbi, fresh_ube; + find_bounds_within_message_tree(t->ft->cmp, bnc->fresh_message_tree, &bnc->msg_buffer, bounds, &fresh_lbi, &fresh_ube); + + // We now know where all the messages we must apply are, so one of the + // following 4 cases will do the application, depending on which of + // the lists contains relevant messages: + // + // 1. broadcast messages and anything else, or a mix of fresh and stale + // 2. only fresh messages + // 3. only stale messages + if (bnc->broadcast_list.size() > 0 || + (stale_lbi != stale_ube && fresh_lbi != fresh_ube)) { + // We have messages in multiple trees, so we grab all + // the relevant messages' offsets and sort them by MSN, then apply + // them in MSN order. + const int buffer_size = ((stale_ube - stale_lbi) + (fresh_ube - fresh_lbi) + bnc->broadcast_list.size()); + toku::scoped_malloc offsets_buf(buffer_size * sizeof(int32_t)); + int32_t *offsets = reinterpret_cast<int32_t *>(offsets_buf.get()); + struct store_msg_buffer_offset_extra sfo_extra = { .offsets = offsets, .i = 0 }; + + // Populate offsets array with offsets to stale messages + r = bnc->stale_message_tree.iterate_on_range<struct store_msg_buffer_offset_extra, store_msg_buffer_offset>(stale_lbi, stale_ube, &sfo_extra); + assert_zero(r); + + // Then store fresh offsets, and mark them to be moved to stale later. + r = bnc->fresh_message_tree.iterate_and_mark_range<struct store_msg_buffer_offset_extra, store_msg_buffer_offset>(fresh_lbi, fresh_ube, &sfo_extra); + assert_zero(r); + + // Store offsets of all broadcast messages. + r = bnc->broadcast_list.iterate<struct store_msg_buffer_offset_extra, store_msg_buffer_offset>(&sfo_extra); + assert_zero(r); + invariant(sfo_extra.i == buffer_size); + + // Sort by MSN. + toku::sort<int32_t, message_buffer, msg_buffer_offset_msn_cmp>::mergesort_r(offsets, buffer_size, bnc->msg_buffer); + + // Apply the messages in MSN order. + for (int i = 0; i < buffer_size; ++i) { + *msgs_applied = true; + do_bn_apply_msg(t, bn, &bnc->msg_buffer, offsets[i], gc_info, &workdone_this_ancestor, &stats_delta); + } + } else if (stale_lbi == stale_ube) { + // No stale messages to apply, we just apply fresh messages, and mark them to be moved to stale later. + struct iterate_do_bn_apply_msg_extra iter_extra = { .t = t, .bn = bn, .bnc = bnc, .gc_info = gc_info, .workdone = &workdone_this_ancestor, .stats_to_update = &stats_delta }; + if (fresh_ube - fresh_lbi > 0) *msgs_applied = true; + r = bnc->fresh_message_tree.iterate_and_mark_range<struct iterate_do_bn_apply_msg_extra, iterate_do_bn_apply_msg>(fresh_lbi, fresh_ube, &iter_extra); + assert_zero(r); + } else { + invariant(fresh_lbi == fresh_ube); + // No fresh messages to apply, we just apply stale messages. + + if (stale_ube - stale_lbi > 0) *msgs_applied = true; + struct iterate_do_bn_apply_msg_extra iter_extra = { .t = t, .bn = bn, .bnc = bnc, .gc_info = gc_info, .workdone = &workdone_this_ancestor, .stats_to_update = &stats_delta }; + + r = bnc->stale_message_tree.iterate_on_range<struct iterate_do_bn_apply_msg_extra, iterate_do_bn_apply_msg>(stale_lbi, stale_ube, &iter_extra); + assert_zero(r); + } + // + // update stats + // + if (workdone_this_ancestor > 0) { + (void) toku_sync_fetch_and_add(&BP_WORKDONE(ancestor, childnum), workdone_this_ancestor); + } + if (stats_delta.numbytes || stats_delta.numrows) { + toku_ft_update_stats(&t->ft->in_memory_stats, stats_delta); + } +} + +static void +apply_ancestors_messages_to_bn( + FT_HANDLE t, + FTNODE node, + int childnum, + ANCESTORS ancestors, + const pivot_bounds &bounds, + txn_gc_info *gc_info, + bool* msgs_applied + ) +{ + BASEMENTNODE curr_bn = BLB(node, childnum); + const pivot_bounds curr_bounds = bounds.next_bounds(node, childnum); + for (ANCESTORS curr_ancestors = ancestors; curr_ancestors; curr_ancestors = curr_ancestors->next) { + if (curr_ancestors->node->max_msn_applied_to_node_on_disk.msn > curr_bn->max_msn_applied.msn) { + paranoid_invariant(BP_STATE(curr_ancestors->node, curr_ancestors->childnum) == PT_AVAIL); + bnc_apply_messages_to_basement_node( + t, + curr_bn, + curr_ancestors->node, + curr_ancestors->childnum, + curr_bounds, + gc_info, + msgs_applied + ); + // We don't want to check this ancestor node again if the + // next time we query it, the msn hasn't changed. + curr_bn->max_msn_applied = curr_ancestors->node->max_msn_applied_to_node_on_disk; + } + } + // At this point, we know all the stale messages above this + // basement node have been applied, and any new messages will be + // fresh, so we don't need to look at stale messages for this + // basement node, unless it gets evicted (and this field becomes + // false when it's read in again). + curr_bn->stale_ancestor_messages_applied = true; +} + +void +toku_apply_ancestors_messages_to_node ( + FT_HANDLE t, + FTNODE node, + ANCESTORS ancestors, + const pivot_bounds &bounds, + bool* msgs_applied, + int child_to_read + ) +// Effect: +// Bring a leaf node up-to-date according to all the messages in the ancestors. +// If the leaf node is already up-to-date then do nothing. +// If the leaf node is not already up-to-date, then record the work done +// for that leaf in each ancestor. +// Requires: +// This is being called when pinning a leaf node for the query path. +// The entire root-to-leaf path is pinned and appears in the ancestors list. +{ + VERIFY_NODE(t, node); + paranoid_invariant(node->height == 0); + + TXN_MANAGER txn_manager = toku_ft_get_txn_manager(t); + txn_manager_state txn_state_for_gc(txn_manager); + + TXNID oldest_referenced_xid_for_simple_gc = toku_ft_get_oldest_referenced_xid_estimate(t); + txn_gc_info gc_info(&txn_state_for_gc, + oldest_referenced_xid_for_simple_gc, + node->oldest_referenced_xid_known, + true); + if (!node->dirty && child_to_read >= 0) { + paranoid_invariant(BP_STATE(node, child_to_read) == PT_AVAIL); + apply_ancestors_messages_to_bn( + t, + node, + child_to_read, + ancestors, + bounds, + &gc_info, + msgs_applied + ); + } + else { + // know we are a leaf node + // An important invariant: + // We MUST bring every available basement node for a dirty node up to date. + // flushing on the cleaner thread depends on this. This invariant + // allows the cleaner thread to just pick an internal node and flush it + // as opposed to being forced to start from the root. + for (int i = 0; i < node->n_children; i++) { + if (BP_STATE(node, i) != PT_AVAIL) { continue; } + apply_ancestors_messages_to_bn( + t, + node, + i, + ancestors, + bounds, + &gc_info, + msgs_applied + ); + } + } + VERIFY_NODE(t, node); +} + +static bool bn_needs_ancestors_messages( + FT ft, + FTNODE node, + int childnum, + const pivot_bounds &bounds, + ANCESTORS ancestors, + MSN* max_msn_applied + ) +{ + BASEMENTNODE bn = BLB(node, childnum); + const pivot_bounds curr_bounds = bounds.next_bounds(node, childnum); + bool needs_ancestors_messages = false; + for (ANCESTORS curr_ancestors = ancestors; curr_ancestors; curr_ancestors = curr_ancestors->next) { + if (curr_ancestors->node->max_msn_applied_to_node_on_disk.msn > bn->max_msn_applied.msn) { + paranoid_invariant(BP_STATE(curr_ancestors->node, curr_ancestors->childnum) == PT_AVAIL); + NONLEAF_CHILDINFO bnc = BNC(curr_ancestors->node, curr_ancestors->childnum); + if (bnc->broadcast_list.size() > 0) { + needs_ancestors_messages = true; + goto cleanup; + } + if (!bn->stale_ancestor_messages_applied) { + uint32_t stale_lbi, stale_ube; + find_bounds_within_message_tree(ft->cmp, + bnc->stale_message_tree, + &bnc->msg_buffer, + curr_bounds, + &stale_lbi, + &stale_ube); + if (stale_lbi < stale_ube) { + needs_ancestors_messages = true; + goto cleanup; + } + } + uint32_t fresh_lbi, fresh_ube; + find_bounds_within_message_tree(ft->cmp, + bnc->fresh_message_tree, + &bnc->msg_buffer, + curr_bounds, + &fresh_lbi, + &fresh_ube); + if (fresh_lbi < fresh_ube) { + needs_ancestors_messages = true; + goto cleanup; + } + if (curr_ancestors->node->max_msn_applied_to_node_on_disk.msn > max_msn_applied->msn) { + max_msn_applied->msn = curr_ancestors->node->max_msn_applied_to_node_on_disk.msn; + } + } + } +cleanup: + return needs_ancestors_messages; +} + +bool toku_ft_leaf_needs_ancestors_messages( + FT ft, + FTNODE node, + ANCESTORS ancestors, + const pivot_bounds &bounds, + MSN *const max_msn_in_path, + int child_to_read + ) +// Effect: Determine whether there are messages in a node's ancestors +// which must be applied to it. These messages are in the correct +// keyrange for any available basement nodes, and are in nodes with the +// correct max_msn_applied_to_node_on_disk. +// Notes: +// This is an approximate query. +// Output: +// max_msn_in_path: max of "max_msn_applied_to_node_on_disk" over +// ancestors. This is used later to update basement nodes' +// max_msn_applied values in case we don't do the full algorithm. +// Returns: +// true if there may be some such messages +// false only if there are definitely no such messages +// Rationale: +// When we pin a node with a read lock, we want to quickly determine if +// we should exchange it for a write lock in preparation for applying +// messages. If there are no messages, we don't need the write lock. +{ + paranoid_invariant(node->height == 0); + bool needs_ancestors_messages = false; + // child_to_read may be -1 in test cases + if (!node->dirty && child_to_read >= 0) { + paranoid_invariant(BP_STATE(node, child_to_read) == PT_AVAIL); + needs_ancestors_messages = bn_needs_ancestors_messages( + ft, + node, + child_to_read, + bounds, + ancestors, + max_msn_in_path + ); + } + else { + for (int i = 0; i < node->n_children; ++i) { + if (BP_STATE(node, i) != PT_AVAIL) { continue; } + needs_ancestors_messages = bn_needs_ancestors_messages( + ft, + node, + i, + bounds, + ancestors, + max_msn_in_path + ); + if (needs_ancestors_messages) { + goto cleanup; + } + } + } +cleanup: + return needs_ancestors_messages; +} + +void toku_ft_bn_update_max_msn(FTNODE node, MSN max_msn_applied, int child_to_read) { + invariant(node->height == 0); + if (!node->dirty && child_to_read >= 0) { + paranoid_invariant(BP_STATE(node, child_to_read) == PT_AVAIL); + BASEMENTNODE bn = BLB(node, child_to_read); + if (max_msn_applied.msn > bn->max_msn_applied.msn) { + // see comment below + (void) toku_sync_val_compare_and_swap(&bn->max_msn_applied.msn, bn->max_msn_applied.msn, max_msn_applied.msn); + } + } + else { + for (int i = 0; i < node->n_children; ++i) { + if (BP_STATE(node, i) != PT_AVAIL) { continue; } + BASEMENTNODE bn = BLB(node, i); + if (max_msn_applied.msn > bn->max_msn_applied.msn) { + // This function runs in a shared access context, so to silence tools + // like DRD, we use a CAS and ignore the result. + // Any threads trying to update these basement nodes should be + // updating them to the same thing (since they all have a read lock on + // the same root-to-leaf path) so this is safe. + (void) toku_sync_val_compare_and_swap(&bn->max_msn_applied.msn, bn->max_msn_applied.msn, max_msn_applied.msn); + } + } + } +} + +struct copy_to_stale_extra { + FT ft; + NONLEAF_CHILDINFO bnc; +}; + +int copy_to_stale(const int32_t &offset, const uint32_t UU(idx), struct copy_to_stale_extra *const extra) __attribute__((nonnull(3))); +int copy_to_stale(const int32_t &offset, const uint32_t UU(idx), struct copy_to_stale_extra *const extra) +{ + MSN msn; + DBT key; + extra->bnc->msg_buffer.get_message_key_msn(offset, &key, &msn); + struct toku_msg_buffer_key_msn_heaviside_extra heaviside_extra(extra->ft->cmp, &extra->bnc->msg_buffer, &key, msn); + int r = extra->bnc->stale_message_tree.insert<struct toku_msg_buffer_key_msn_heaviside_extra, toku_msg_buffer_key_msn_heaviside>(offset, heaviside_extra, nullptr); + invariant_zero(r); + return 0; +} + +void toku_ft_bnc_move_messages_to_stale(FT ft, NONLEAF_CHILDINFO bnc) { + struct copy_to_stale_extra cts_extra = { .ft = ft, .bnc = bnc }; + int r = bnc->fresh_message_tree.iterate_over_marked<struct copy_to_stale_extra, copy_to_stale>(&cts_extra); + invariant_zero(r); + bnc->fresh_message_tree.delete_all_marked(); +} + +void toku_move_ftnode_messages_to_stale(FT ft, FTNODE node) { + invariant(node->height > 0); + for (int i = 0; i < node->n_children; ++i) { + if (BP_STATE(node, i) != PT_AVAIL) { + continue; + } + NONLEAF_CHILDINFO bnc = BNC(node, i); + // We can't delete things out of the fresh tree inside the above + // procedures because we're still looking at the fresh tree. Instead + // we have to move messages after we're done looking at it. + toku_ft_bnc_move_messages_to_stale(ft, bnc); + } +} + +// +// Balance // Availibility // Size + +struct rebalance_array_info { + uint32_t offset; + LEAFENTRY *le_array; + uint32_t *key_sizes_array; + const void **key_ptr_array; + static int fn(const void* key, const uint32_t keylen, const LEAFENTRY &le, + const uint32_t idx, struct rebalance_array_info *const ai) { + ai->le_array[idx+ai->offset] = le; + ai->key_sizes_array[idx+ai->offset] = keylen; + ai->key_ptr_array[idx+ai->offset] = key; + return 0; + } +}; + +// There must still be at least one child +// Requires that all messages in buffers above have been applied. +// Because all messages above have been applied, setting msn of all new basements +// to max msn of existing basements is correct. (There cannot be any messages in +// buffers above that still need to be applied.) +void toku_ftnode_leaf_rebalance(FTNODE node, unsigned int basementnodesize) { + + assert(node->height == 0); + assert(node->dirty); + + uint32_t num_orig_basements = node->n_children; + // Count number of leaf entries in this leaf (num_le). + uint32_t num_le = 0; + for (uint32_t i = 0; i < num_orig_basements; i++) { + num_le += BLB_DATA(node, i)->num_klpairs(); + } + + uint32_t num_alloc = num_le ? num_le : 1; // simplify logic below by always having at least one entry per array + + // Create an array of OMTVALUE's that store all the pointers to all the data. + // Each element in leafpointers is a pointer to a leaf. + toku::scoped_malloc leafpointers_buf(sizeof(LEAFENTRY) * num_alloc); + LEAFENTRY *leafpointers = reinterpret_cast<LEAFENTRY *>(leafpointers_buf.get()); + leafpointers[0] = NULL; + + toku::scoped_malloc key_pointers_buf(sizeof(void *) * num_alloc); + const void **key_pointers = reinterpret_cast<const void **>(key_pointers_buf.get()); + key_pointers[0] = NULL; + + toku::scoped_malloc key_sizes_buf(sizeof(uint32_t) * num_alloc); + uint32_t *key_sizes = reinterpret_cast<uint32_t *>(key_sizes_buf.get()); + + // Capture pointers to old mempools' buffers (so they can be destroyed) + toku::scoped_malloc old_bns_buf(sizeof(BASEMENTNODE) * num_orig_basements); + BASEMENTNODE *old_bns = reinterpret_cast<BASEMENTNODE *>(old_bns_buf.get()); + old_bns[0] = NULL; + + uint32_t curr_le = 0; + for (uint32_t i = 0; i < num_orig_basements; i++) { + bn_data* bd = BLB_DATA(node, i); + struct rebalance_array_info ai {.offset = curr_le, .le_array = leafpointers, .key_sizes_array = key_sizes, .key_ptr_array = key_pointers }; + bd->iterate<rebalance_array_info, rebalance_array_info::fn>(&ai); + curr_le += bd->num_klpairs(); + } + + // Create an array that will store indexes of new pivots. + // Each element in new_pivots is the index of a pivot key. + // (Allocating num_le of them is overkill, but num_le is an upper bound.) + toku::scoped_malloc new_pivots_buf(sizeof(uint32_t) * num_alloc); + uint32_t *new_pivots = reinterpret_cast<uint32_t *>(new_pivots_buf.get()); + new_pivots[0] = 0; + + // Each element in le_sizes is the size of the leafentry pointed to by leafpointers. + toku::scoped_malloc le_sizes_buf(sizeof(size_t) * num_alloc); + size_t *le_sizes = reinterpret_cast<size_t *>(le_sizes_buf.get()); + le_sizes[0] = 0; + + // Create an array that will store the size of each basement. + // This is the sum of the leaf sizes of all the leaves in that basement. + // We don't know how many basements there will be, so we use num_le as the upper bound. + + // Sum of all le sizes in a single basement + toku::scoped_calloc bn_le_sizes_buf(sizeof(size_t) * num_alloc); + size_t *bn_le_sizes = reinterpret_cast<size_t *>(bn_le_sizes_buf.get()); + + // Sum of all key sizes in a single basement + toku::scoped_calloc bn_key_sizes_buf(sizeof(size_t) * num_alloc); + size_t *bn_key_sizes = reinterpret_cast<size_t *>(bn_key_sizes_buf.get()); + + // TODO 4050: All these arrays should be combined into a single array of some bn_info struct (pivot, msize, num_les). + // Each entry is the number of leafentries in this basement. (Again, num_le is overkill upper baound.) + toku::scoped_malloc num_les_this_bn_buf(sizeof(uint32_t) * num_alloc); + uint32_t *num_les_this_bn = reinterpret_cast<uint32_t *>(num_les_this_bn_buf.get()); + num_les_this_bn[0] = 0; + + // Figure out the new pivots. + // We need the index of each pivot, and for each basement we need + // the number of leaves and the sum of the sizes of the leaves (memory requirement for basement). + uint32_t curr_pivot = 0; + uint32_t num_le_in_curr_bn = 0; + uint32_t bn_size_so_far = 0; + for (uint32_t i = 0; i < num_le; i++) { + uint32_t curr_le_size = leafentry_disksize((LEAFENTRY) leafpointers[i]); + le_sizes[i] = curr_le_size; + if ((bn_size_so_far + curr_le_size + sizeof(uint32_t) + key_sizes[i] > basementnodesize) && (num_le_in_curr_bn != 0)) { + // cap off the current basement node to end with the element before i + new_pivots[curr_pivot] = i-1; + curr_pivot++; + num_le_in_curr_bn = 0; + bn_size_so_far = 0; + } + num_le_in_curr_bn++; + num_les_this_bn[curr_pivot] = num_le_in_curr_bn; + bn_le_sizes[curr_pivot] += curr_le_size; + bn_key_sizes[curr_pivot] += sizeof(uint32_t) + key_sizes[i]; // uint32_t le_offset + bn_size_so_far += curr_le_size + sizeof(uint32_t) + key_sizes[i]; + } + // curr_pivot is now the total number of pivot keys in the leaf node + int num_pivots = curr_pivot; + int num_children = num_pivots + 1; + + // now we need to fill in the new basement nodes and pivots + + // TODO: (Zardosht) this is an ugly thing right now + // Need to figure out how to properly deal with seqinsert. + // I am not happy with how this is being + // handled with basement nodes + uint32_t tmp_seqinsert = BLB_SEQINSERT(node, num_orig_basements - 1); + + // choose the max msn applied to any basement as the max msn applied to all new basements + MSN max_msn = ZERO_MSN; + for (uint32_t i = 0; i < num_orig_basements; i++) { + MSN curr_msn = BLB_MAX_MSN_APPLIED(node,i); + max_msn = (curr_msn.msn > max_msn.msn) ? curr_msn : max_msn; + } + // remove the basement node in the node, we've saved a copy + for (uint32_t i = 0; i < num_orig_basements; i++) { + // save a reference to the old basement nodes + // we will need them to ensure that the memory + // stays intact + old_bns[i] = toku_detach_bn(node, i); + } + // Now destroy the old basements, but do not destroy leaves + toku_destroy_ftnode_internals(node); + + // now reallocate pieces and start filling them in + invariant(num_children > 0); + + node->n_children = num_children; + XCALLOC_N(num_children, node->bp); // allocate pointers to basements (bp) + for (int i = 0; i < num_children; i++) { + set_BLB(node, i, toku_create_empty_bn()); // allocate empty basements and set bp pointers + } + + // now we start to fill in the data + + // first the pivots + toku::scoped_malloc pivotkeys_buf(num_pivots * sizeof(DBT)); + DBT *pivotkeys = reinterpret_cast<DBT *>(pivotkeys_buf.get()); + for (int i = 0; i < num_pivots; i++) { + uint32_t size = key_sizes[new_pivots[i]]; + const void *key = key_pointers[new_pivots[i]]; + toku_fill_dbt(&pivotkeys[i], key, size); + } + node->pivotkeys.create_from_dbts(pivotkeys, num_pivots); + + uint32_t baseindex_this_bn = 0; + // now the basement nodes + for (int i = 0; i < num_children; i++) { + // put back seqinsert + BLB_SEQINSERT(node, i) = tmp_seqinsert; + + // create start (inclusive) and end (exclusive) boundaries for data of basement node + uint32_t curr_start = (i==0) ? 0 : new_pivots[i-1]+1; // index of first leaf in basement + uint32_t curr_end = (i==num_pivots) ? num_le : new_pivots[i]+1; // index of first leaf in next basement + uint32_t num_in_bn = curr_end - curr_start; // number of leaves in this basement + + // create indexes for new basement + invariant(baseindex_this_bn == curr_start); + uint32_t num_les_to_copy = num_les_this_bn[i]; + invariant(num_les_to_copy == num_in_bn); + + bn_data* bd = BLB_DATA(node, i); + bd->set_contents_as_clone_of_sorted_array( + num_les_to_copy, + &key_pointers[baseindex_this_bn], + &key_sizes[baseindex_this_bn], + &leafpointers[baseindex_this_bn], + &le_sizes[baseindex_this_bn], + bn_key_sizes[i], // Total key sizes + bn_le_sizes[i] // total le sizes + ); + + BP_STATE(node,i) = PT_AVAIL; + BP_TOUCH_CLOCK(node,i); + BLB_MAX_MSN_APPLIED(node,i) = max_msn; + baseindex_this_bn += num_les_to_copy; // set to index of next bn + } + node->max_msn_applied_to_node_on_disk = max_msn; + + // destroy buffers of old mempools + for (uint32_t i = 0; i < num_orig_basements; i++) { + destroy_basement_node(old_bns[i]); + } +} + +bool toku_ftnode_fully_in_memory(FTNODE node) { + for (int i = 0; i < node->n_children; i++) { + if (BP_STATE(node,i) != PT_AVAIL) { + return false; + } + } + return true; +} + +void toku_ftnode_assert_fully_in_memory(FTNODE UU(node)) { + paranoid_invariant(toku_ftnode_fully_in_memory(node)); +} + +uint32_t toku_ftnode_leaf_num_entries(FTNODE node) { + toku_ftnode_assert_fully_in_memory(node); + uint32_t num_entries = 0; + for (int i = 0; i < node->n_children; i++) { + num_entries += BLB_DATA(node, i)->num_klpairs(); + } + return num_entries; +} + +enum reactivity toku_ftnode_get_leaf_reactivity(FTNODE node, uint32_t nodesize) { + enum reactivity re = RE_STABLE; + toku_ftnode_assert_fully_in_memory(node); + paranoid_invariant(node->height==0); + unsigned int size = toku_serialize_ftnode_size(node); + if (size > nodesize && toku_ftnode_leaf_num_entries(node) > 1) { + re = RE_FISSIBLE; + } else if ((size*4) < nodesize && !BLB_SEQINSERT(node, node->n_children-1)) { + re = RE_FUSIBLE; + } + return re; +} + +enum reactivity toku_ftnode_get_nonleaf_reactivity(FTNODE node, unsigned int fanout) { + paranoid_invariant(node->height > 0); + int n_children = node->n_children; + if (n_children > (int) fanout) { + return RE_FISSIBLE; + } + if (n_children * 4 < (int) fanout) { + return RE_FUSIBLE; + } + return RE_STABLE; +} + +enum reactivity toku_ftnode_get_reactivity(FT ft, FTNODE node) { + toku_ftnode_assert_fully_in_memory(node); + if (node->height == 0) { + return toku_ftnode_get_leaf_reactivity(node, ft->h->nodesize); + } else { + return toku_ftnode_get_nonleaf_reactivity(node, ft->h->fanout); + } +} + +unsigned int toku_bnc_nbytesinbuf(NONLEAF_CHILDINFO bnc) { + return bnc->msg_buffer.buffer_size_in_use(); +} + +// Return true if the size of the buffers plus the amount of work done is large enough. +// Return false if there is nothing to be flushed (the buffers empty). +bool toku_ftnode_nonleaf_is_gorged(FTNODE node, uint32_t nodesize) { + uint64_t size = toku_serialize_ftnode_size(node); + + bool buffers_are_empty = true; + toku_ftnode_assert_fully_in_memory(node); + // + // the nonleaf node is gorged if the following holds true: + // - the buffers are non-empty + // - the total workdone by the buffers PLUS the size of the buffers + // is greater than nodesize (which as of Maxwell should be + // 4MB) + // + paranoid_invariant(node->height > 0); + for (int child = 0; child < node->n_children; ++child) { + size += BP_WORKDONE(node, child); + } + for (int child = 0; child < node->n_children; ++child) { + if (toku_bnc_nbytesinbuf(BNC(node, child)) > 0) { + buffers_are_empty = false; + break; + } + } + return ((size > nodesize) + && + (!buffers_are_empty)); +} + +int toku_bnc_n_entries(NONLEAF_CHILDINFO bnc) { + return bnc->msg_buffer.num_entries(); +} + +// how much memory does this child buffer consume? +long toku_bnc_memory_size(NONLEAF_CHILDINFO bnc) { + return (sizeof(*bnc) + + bnc->msg_buffer.memory_footprint() + + bnc->fresh_message_tree.memory_size() + + bnc->stale_message_tree.memory_size() + + bnc->broadcast_list.memory_size()); +} + +// how much memory in this child buffer holds useful data? +// originally created solely for use by test program(s). +long toku_bnc_memory_used(NONLEAF_CHILDINFO bnc) { + return (sizeof(*bnc) + + bnc->msg_buffer.memory_size_in_use() + + bnc->fresh_message_tree.memory_size() + + bnc->stale_message_tree.memory_size() + + bnc->broadcast_list.memory_size()); +} + +// +// Garbage collection +// Message injection +// Message application +// + +// Used only by test programs: append a child node to a parent node +void toku_ft_nonleaf_append_child(FTNODE node, FTNODE child, const DBT *pivotkey) { + int childnum = node->n_children; + node->n_children++; + REALLOC_N(node->n_children, node->bp); + BP_BLOCKNUM(node,childnum) = child->blocknum; + BP_STATE(node,childnum) = PT_AVAIL; + BP_WORKDONE(node, childnum) = 0; + set_BNC(node, childnum, toku_create_empty_nl()); + if (pivotkey) { + invariant(childnum > 0); + node->pivotkeys.insert_at(pivotkey, childnum - 1); + } + node->dirty = 1; +} + +void +toku_ft_bn_apply_msg_once ( + BASEMENTNODE bn, + const ft_msg &msg, + uint32_t idx, + uint32_t le_keylen, + LEAFENTRY le, + txn_gc_info *gc_info, + uint64_t *workdone, + STAT64INFO stats_to_update + ) +// Effect: Apply msg to leafentry (msn is ignored) +// Calculate work done by message on leafentry and add it to caller's workdone counter. +// idx is the location where it goes +// le is old leafentry +{ + size_t newsize=0, oldsize=0, workdone_this_le=0; + LEAFENTRY new_le=0; + int64_t numbytes_delta = 0; // how many bytes of user data (not including overhead) were added or deleted from this row + int64_t numrows_delta = 0; // will be +1 or -1 or 0 (if row was added or deleted or not) + uint32_t key_storage_size = msg.kdbt()->size + sizeof(uint32_t); + if (le) { + oldsize = leafentry_memsize(le) + key_storage_size; + } + + // toku_le_apply_msg() may call bn_data::mempool_malloc_and_update_dmt() to allocate more space. + // That means le is guaranteed to not cause a sigsegv but it may point to a mempool that is + // no longer in use. We'll have to release the old mempool later. + toku_le_apply_msg( + msg, + le, + &bn->data_buffer, + idx, + le_keylen, + gc_info, + &new_le, + &numbytes_delta + ); + // at this point, we cannot trust cmd->u.id.key to be valid. + // The dmt may have realloced its mempool and freed the one containing key. + + newsize = new_le ? (leafentry_memsize(new_le) + + key_storage_size) : 0; + if (le && new_le) { + workdone_this_le = (oldsize > newsize ? oldsize : newsize); // work done is max of le size before and after message application + + } else { // we did not just replace a row, so ... + if (le) { + // ... we just deleted a row ... + workdone_this_le = oldsize; + numrows_delta = -1; + } + if (new_le) { + // ... or we just added a row + workdone_this_le = newsize; + numrows_delta = 1; + } + } + if (workdone) { // test programs may call with NULL + *workdone += workdone_this_le; + } + + // now update stat64 statistics + bn->stat64_delta.numrows += numrows_delta; + bn->stat64_delta.numbytes += numbytes_delta; + // the only reason stats_to_update may be null is for tests + if (stats_to_update) { + stats_to_update->numrows += numrows_delta; + stats_to_update->numbytes += numbytes_delta; + } + +} + +static const uint32_t setval_tag = 0xee0ccb99; // this was gotten by doing "cat /dev/random|head -c4|od -x" to get a random number. We want to make sure that the user actually passes us the setval_extra_s that we passed in. +struct setval_extra_s { + uint32_t tag; + bool did_set_val; + int setval_r; // any error code that setval_fun wants to return goes here. + // need arguments for toku_ft_bn_apply_msg_once + BASEMENTNODE bn; + MSN msn; // captured from original message, not currently used + XIDS xids; + const DBT *key; + uint32_t idx; + uint32_t le_keylen; + LEAFENTRY le; + txn_gc_info *gc_info; + uint64_t * workdone; // set by toku_ft_bn_apply_msg_once() + STAT64INFO stats_to_update; +}; + +/* + * If new_val == NULL, we send a delete message instead of an insert. + * This happens here instead of in do_delete() for consistency. + * setval_fun() is called from handlerton, passing in svextra_v + * from setval_extra_s input arg to ft->update_fun(). + */ +static void setval_fun (const DBT *new_val, void *svextra_v) { + struct setval_extra_s *CAST_FROM_VOIDP(svextra, svextra_v); + paranoid_invariant(svextra->tag==setval_tag); + paranoid_invariant(!svextra->did_set_val); + svextra->did_set_val = true; + + { + // can't leave scope until toku_ft_bn_apply_msg_once if + // this is a delete + DBT val; + ft_msg msg(svextra->key, + new_val ? new_val : toku_init_dbt(&val), + new_val ? FT_INSERT : FT_DELETE_ANY, + svextra->msn, svextra->xids); + toku_ft_bn_apply_msg_once(svextra->bn, msg, + svextra->idx, svextra->le_keylen, svextra->le, + svextra->gc_info, + svextra->workdone, svextra->stats_to_update); + svextra->setval_r = 0; + } +} + +// We are already past the msn filter (in toku_ft_bn_apply_msg(), which calls do_update()), +// so capturing the msn in the setval_extra_s is not strictly required. The alternative +// would be to put a dummy msn in the messages created by setval_fun(), but preserving +// the original msn seems cleaner and it preserves accountability at a lower layer. +static int do_update(ft_update_func update_fun, const DESCRIPTOR_S *desc, BASEMENTNODE bn, const ft_msg &msg, uint32_t idx, + LEAFENTRY le, + void* keydata, + uint32_t keylen, + txn_gc_info *gc_info, + uint64_t * workdone, + STAT64INFO stats_to_update) { + LEAFENTRY le_for_update; + DBT key; + const DBT *keyp; + const DBT *update_function_extra; + DBT vdbt; + const DBT *vdbtp; + + // the location of data depends whether this is a regular or + // broadcast update + if (msg.type() == FT_UPDATE) { + // key is passed in with command (should be same as from le) + // update function extra is passed in with command + keyp = msg.kdbt(); + update_function_extra = msg.vdbt(); + } else { + invariant(msg.type() == FT_UPDATE_BROADCAST_ALL); + // key is not passed in with broadcast, it comes from le + // update function extra is passed in with command + paranoid_invariant(le); // for broadcast updates, we just hit all leafentries + // so this cannot be null + paranoid_invariant(keydata); + paranoid_invariant(keylen); + paranoid_invariant(msg.kdbt()->size == 0); + keyp = toku_fill_dbt(&key, keydata, keylen); + update_function_extra = msg.vdbt(); + } + toku_ft_status_note_update(msg.type() == FT_UPDATE_BROADCAST_ALL); + + if (le && !le_latest_is_del(le)) { + // if the latest val exists, use it, and we'll use the leafentry later + uint32_t vallen; + void *valp = le_latest_val_and_len(le, &vallen); + vdbtp = toku_fill_dbt(&vdbt, valp, vallen); + } else { + // otherwise, the val and leafentry are both going to be null + vdbtp = NULL; + } + le_for_update = le; + + struct setval_extra_s setval_extra = {setval_tag, false, 0, bn, msg.msn(), msg.xids(), + keyp, idx, keylen, le_for_update, gc_info, + workdone, stats_to_update}; + // call handlerton's ft->update_fun(), which passes setval_extra to setval_fun() + FAKE_DB(db, desc); + int r = update_fun( + &db, + keyp, + vdbtp, + update_function_extra, + setval_fun, &setval_extra + ); + + if (r == 0) { r = setval_extra.setval_r; } + return r; +} + +// Should be renamed as something like "apply_msg_to_basement()." +void +toku_ft_bn_apply_msg ( + const toku::comparator &cmp, + ft_update_func update_fun, + BASEMENTNODE bn, + const ft_msg &msg, + txn_gc_info *gc_info, + uint64_t *workdone, + STAT64INFO stats_to_update + ) +// Effect: +// Put a msg into a leaf. +// Calculate work done by message on leafnode and add it to caller's workdone counter. +// The leaf could end up "too big" or "too small". The caller must fix that up. +{ + LEAFENTRY storeddata; + void* key = NULL; + uint32_t keylen = 0; + + uint32_t num_klpairs; + int r; + struct toku_msg_leafval_heaviside_extra be(cmp, msg.kdbt()); + + unsigned int doing_seqinsert = bn->seqinsert; + bn->seqinsert = 0; + + switch (msg.type()) { + case FT_INSERT_NO_OVERWRITE: + case FT_INSERT: { + uint32_t idx; + if (doing_seqinsert) { + idx = bn->data_buffer.num_klpairs(); + DBT kdbt; + r = bn->data_buffer.fetch_key_and_len(idx-1, &kdbt.size, &kdbt.data); + if (r != 0) goto fz; + int c = toku_msg_leafval_heaviside(kdbt, be); + if (c >= 0) goto fz; + r = DB_NOTFOUND; + } else { + fz: + r = bn->data_buffer.find_zero<decltype(be), toku_msg_leafval_heaviside>( + be, + &storeddata, + &key, + &keylen, + &idx + ); + } + if (r==DB_NOTFOUND) { + storeddata = 0; + } else { + assert_zero(r); + } + toku_ft_bn_apply_msg_once(bn, msg, idx, keylen, storeddata, gc_info, workdone, stats_to_update); + + // if the insertion point is within a window of the right edge of + // the leaf then it is sequential + // window = min(32, number of leaf entries/16) + { + uint32_t s = bn->data_buffer.num_klpairs(); + uint32_t w = s / 16; + if (w == 0) w = 1; + if (w > 32) w = 32; + + // within the window? + if (s - idx <= w) + bn->seqinsert = doing_seqinsert + 1; + } + break; + } + case FT_DELETE_ANY: + case FT_ABORT_ANY: + case FT_COMMIT_ANY: { + uint32_t idx; + // Apply to all the matches + + r = bn->data_buffer.find_zero<decltype(be), toku_msg_leafval_heaviside>( + be, + &storeddata, + &key, + &keylen, + &idx + ); + if (r == DB_NOTFOUND) break; + assert_zero(r); + toku_ft_bn_apply_msg_once(bn, msg, idx, keylen, storeddata, gc_info, workdone, stats_to_update); + + break; + } + case FT_OPTIMIZE_FOR_UPGRADE: + // fall through so that optimize_for_upgrade performs rest of the optimize logic + case FT_COMMIT_BROADCAST_ALL: + case FT_OPTIMIZE: + // Apply to all leafentries + num_klpairs = bn->data_buffer.num_klpairs(); + for (uint32_t idx = 0; idx < num_klpairs; ) { + void* curr_keyp = NULL; + uint32_t curr_keylen = 0; + r = bn->data_buffer.fetch_klpair(idx, &storeddata, &curr_keylen, &curr_keyp); + assert_zero(r); + int deleted = 0; + if (!le_is_clean(storeddata)) { //If already clean, nothing to do. + // message application code needs a key in order to determine how much + // work was done by this message. since this is a broadcast message, + // we have to create a new message whose key is the current le's key. + DBT curr_keydbt; + ft_msg curr_msg(toku_fill_dbt(&curr_keydbt, curr_keyp, curr_keylen), + msg.vdbt(), msg.type(), msg.msn(), msg.xids()); + toku_ft_bn_apply_msg_once(bn, curr_msg, idx, curr_keylen, storeddata, gc_info, workdone, stats_to_update); + // at this point, we cannot trust msg.kdbt to be valid. + uint32_t new_dmt_size = bn->data_buffer.num_klpairs(); + if (new_dmt_size != num_klpairs) { + paranoid_invariant(new_dmt_size + 1 == num_klpairs); + //Item was deleted. + deleted = 1; + } + } + if (deleted) + num_klpairs--; + else + idx++; + } + paranoid_invariant(bn->data_buffer.num_klpairs() == num_klpairs); + + break; + case FT_COMMIT_BROADCAST_TXN: + case FT_ABORT_BROADCAST_TXN: + // Apply to all leafentries if txn is represented + num_klpairs = bn->data_buffer.num_klpairs(); + for (uint32_t idx = 0; idx < num_klpairs; ) { + void* curr_keyp = NULL; + uint32_t curr_keylen = 0; + r = bn->data_buffer.fetch_klpair(idx, &storeddata, &curr_keylen, &curr_keyp); + assert_zero(r); + int deleted = 0; + if (le_has_xids(storeddata, msg.xids())) { + // message application code needs a key in order to determine how much + // work was done by this message. since this is a broadcast message, + // we have to create a new message whose key is the current le's key. + DBT curr_keydbt; + ft_msg curr_msg(toku_fill_dbt(&curr_keydbt, curr_keyp, curr_keylen), + msg.vdbt(), msg.type(), msg.msn(), msg.xids()); + toku_ft_bn_apply_msg_once(bn, curr_msg, idx, curr_keylen, storeddata, gc_info, workdone, stats_to_update); + uint32_t new_dmt_size = bn->data_buffer.num_klpairs(); + if (new_dmt_size != num_klpairs) { + paranoid_invariant(new_dmt_size + 1 == num_klpairs); + //Item was deleted. + deleted = 1; + } + } + if (deleted) + num_klpairs--; + else + idx++; + } + paranoid_invariant(bn->data_buffer.num_klpairs() == num_klpairs); + + break; + case FT_UPDATE: { + uint32_t idx; + r = bn->data_buffer.find_zero<decltype(be), toku_msg_leafval_heaviside>( + be, + &storeddata, + &key, + &keylen, + &idx + ); + if (r==DB_NOTFOUND) { + { + //Point to msg's copy of the key so we don't worry about le being freed + //TODO: 46 MAYBE Get rid of this when le_apply message memory is better handled + key = msg.kdbt()->data; + keylen = msg.kdbt()->size; + } + r = do_update(update_fun, cmp.get_descriptor(), bn, msg, idx, NULL, NULL, 0, gc_info, workdone, stats_to_update); + } else if (r==0) { + r = do_update(update_fun, cmp.get_descriptor(), bn, msg, idx, storeddata, key, keylen, gc_info, workdone, stats_to_update); + } // otherwise, a worse error, just return it + break; + } + case FT_UPDATE_BROADCAST_ALL: { + // apply to all leafentries. + uint32_t idx = 0; + uint32_t num_leafentries_before; + while (idx < (num_leafentries_before = bn->data_buffer.num_klpairs())) { + void* curr_key = nullptr; + uint32_t curr_keylen = 0; + r = bn->data_buffer.fetch_klpair(idx, &storeddata, &curr_keylen, &curr_key); + assert_zero(r); + + //TODO: 46 replace this with something better than cloning key + // TODO: (Zardosht) This may be unnecessary now, due to how the key + // is handled in the bndata. Investigate and determine + char clone_mem[curr_keylen]; // only lasts one loop, alloca would overflow (end of function) + memcpy((void*)clone_mem, curr_key, curr_keylen); + curr_key = (void*)clone_mem; + + // This is broken below. Have a compilation error checked + // in as a reminder + r = do_update(update_fun, cmp.get_descriptor(), bn, msg, idx, storeddata, curr_key, curr_keylen, gc_info, workdone, stats_to_update); + assert_zero(r); + + if (num_leafentries_before == bn->data_buffer.num_klpairs()) { + // we didn't delete something, so increment the index. + idx++; + } + } + break; + } + case FT_NONE: break; // don't do anything + } + + return; +} + +static inline int +key_msn_cmp(const DBT *a, const DBT *b, const MSN amsn, const MSN bmsn, const toku::comparator &cmp) { + int r = cmp(a, b); + if (r == 0) { + if (amsn.msn > bmsn.msn) { + r = +1; + } else if (amsn.msn < bmsn.msn) { + r = -1; + } else { + r = 0; + } + } + return r; +} + +int toku_msg_buffer_key_msn_heaviside(const int32_t &offset, const struct toku_msg_buffer_key_msn_heaviside_extra &extra) { + MSN query_msn; + DBT query_key; + extra.msg_buffer->get_message_key_msn(offset, &query_key, &query_msn); + return key_msn_cmp(&query_key, extra.key, query_msn, extra.msn, extra.cmp); +} + +int toku_msg_buffer_key_msn_cmp(const struct toku_msg_buffer_key_msn_cmp_extra &extra, const int32_t &ao, const int32_t &bo) { + MSN amsn, bmsn; + DBT akey, bkey; + extra.msg_buffer->get_message_key_msn(ao, &akey, &amsn); + extra.msg_buffer->get_message_key_msn(bo, &bkey, &bmsn); + return key_msn_cmp(&akey, &bkey, amsn, bmsn, extra.cmp); +} + +// Effect: Enqueue the message represented by the parameters into the +// bnc's buffer, and put it in either the fresh or stale message tree, +// or the broadcast list. +static void bnc_insert_msg(NONLEAF_CHILDINFO bnc, const ft_msg &msg, bool is_fresh, const toku::comparator &cmp) { + int r = 0; + int32_t offset; + bnc->msg_buffer.enqueue(msg, is_fresh, &offset); + enum ft_msg_type type = msg.type(); + if (ft_msg_type_applies_once(type)) { + DBT key; + toku_fill_dbt(&key, msg.kdbt()->data, msg.kdbt()->size); + struct toku_msg_buffer_key_msn_heaviside_extra extra(cmp, &bnc->msg_buffer, &key, msg.msn()); + if (is_fresh) { + r = bnc->fresh_message_tree.insert<struct toku_msg_buffer_key_msn_heaviside_extra, toku_msg_buffer_key_msn_heaviside>(offset, extra, nullptr); + assert_zero(r); + } else { + r = bnc->stale_message_tree.insert<struct toku_msg_buffer_key_msn_heaviside_extra, toku_msg_buffer_key_msn_heaviside>(offset, extra, nullptr); + assert_zero(r); + } + } else { + invariant(ft_msg_type_applies_all(type) || ft_msg_type_does_nothing(type)); + const uint32_t idx = bnc->broadcast_list.size(); + r = bnc->broadcast_list.insert_at(offset, idx); + assert_zero(r); + } +} + +// This is only exported for tests. +void toku_bnc_insert_msg(NONLEAF_CHILDINFO bnc, const void *key, uint32_t keylen, const void *data, uint32_t datalen, enum ft_msg_type type, MSN msn, XIDS xids, bool is_fresh, const toku::comparator &cmp) +{ + DBT k, v; + ft_msg msg(toku_fill_dbt(&k, key, keylen), toku_fill_dbt(&v, data, datalen), type, msn, xids); + bnc_insert_msg(bnc, msg, is_fresh, cmp); +} + +// append a msg to a nonleaf node's child buffer +static void ft_append_msg_to_child_buffer(const toku::comparator &cmp, FTNODE node, + int childnum, const ft_msg &msg, bool is_fresh) { + paranoid_invariant(BP_STATE(node,childnum) == PT_AVAIL); + bnc_insert_msg(BNC(node, childnum), msg, is_fresh, cmp); + node->dirty = 1; +} + +// This is only exported for tests. +void toku_ft_append_to_child_buffer(const toku::comparator &cmp, FTNODE node, int childnum, enum ft_msg_type type, MSN msn, XIDS xids, bool is_fresh, const DBT *key, const DBT *val) { + ft_msg msg(key, val, type, msn, xids); + ft_append_msg_to_child_buffer(cmp, node, childnum, msg, is_fresh); +} + +static void ft_nonleaf_msg_once_to_child(const toku::comparator &cmp, FTNODE node, int target_childnum, const ft_msg &msg, bool is_fresh, size_t flow_deltas[]) +// Previously we had passive aggressive promotion, but that causes a lot of I/O a the checkpoint. So now we are just putting it in the buffer here. +// Also we don't worry about the node getting overfull here. It's the caller's problem. +{ + unsigned int childnum = (target_childnum >= 0 + ? target_childnum + : toku_ftnode_which_child(node, msg.kdbt(), cmp)); + ft_append_msg_to_child_buffer(cmp, node, childnum, msg, is_fresh); + NONLEAF_CHILDINFO bnc = BNC(node, childnum); + bnc->flow[0] += flow_deltas[0]; + bnc->flow[1] += flow_deltas[1]; +} + +// TODO: Remove me, I'm boring. +static int ft_compare_pivot(const toku::comparator &cmp, const DBT *key, const DBT *pivot) { + return cmp(key, pivot); +} + +/* Find the leftmost child that may contain the key. + * If the key exists it will be in the child whose number + * is the return value of this function. + */ +int toku_ftnode_which_child(FTNODE node, const DBT *k, const toku::comparator &cmp) { + // a funny case of no pivots + if (node->n_children <= 1) return 0; + + DBT pivot; + + // check the last key to optimize seq insertions + int n = node->n_children-1; + int c = ft_compare_pivot(cmp, k, node->pivotkeys.fill_pivot(n - 1, &pivot)); + if (c > 0) return n; + + // binary search the pivots + int lo = 0; + int hi = n-1; // skip the last one, we checked it above + int mi; + while (lo < hi) { + mi = (lo + hi) / 2; + c = ft_compare_pivot(cmp, k, node->pivotkeys.fill_pivot(mi, &pivot)); + if (c > 0) { + lo = mi+1; + continue; + } + if (c < 0) { + hi = mi; + continue; + } + return mi; + } + return lo; +} + +// Used for HOT. +int toku_ftnode_hot_next_child(FTNODE node, const DBT *k, const toku::comparator &cmp) { + DBT pivot; + int low = 0; + int hi = node->n_children - 1; + int mi; + while (low < hi) { + mi = (low + hi) / 2; + int r = ft_compare_pivot(cmp, k, node->pivotkeys.fill_pivot(mi, &pivot)); + if (r > 0) { + low = mi + 1; + } else if (r < 0) { + hi = mi; + } else { + // if they were exactly equal, then we want the sub-tree under + // the next pivot. + return mi + 1; + } + } + invariant(low == hi); + return low; +} + +void toku_ftnode_save_ct_pair(CACHEKEY UU(key), void *value_data, PAIR p) { + FTNODE CAST_FROM_VOIDP(node, value_data); + node->ct_pair = p; +} + +static void +ft_nonleaf_msg_all(const toku::comparator &cmp, FTNODE node, const ft_msg &msg, bool is_fresh, size_t flow_deltas[]) +// Effect: Put the message into a nonleaf node. We put it into all children, possibly causing the children to become reactive. +// We don't do the splitting and merging. That's up to the caller after doing all the puts it wants to do. +// The re_array[i] gets set to the reactivity of any modified child i. (And there may be several such children.) +{ + for (int i = 0; i < node->n_children; i++) { + ft_nonleaf_msg_once_to_child(cmp, node, i, msg, is_fresh, flow_deltas); + } +} + +static void +ft_nonleaf_put_msg(const toku::comparator &cmp, FTNODE node, int target_childnum, const ft_msg &msg, bool is_fresh, size_t flow_deltas[]) +// Effect: Put the message into a nonleaf node. We may put it into a child, possibly causing the child to become reactive. +// We don't do the splitting and merging. That's up to the caller after doing all the puts it wants to do. +// The re_array[i] gets set to the reactivity of any modified child i. (And there may be several such children.) +// +{ + + // + // see comments in toku_ft_leaf_apply_msg + // to understand why we handle setting + // node->max_msn_applied_to_node_on_disk here, + // and don't do it in toku_ftnode_put_msg + // + MSN msg_msn = msg.msn(); + invariant(msg_msn.msn > node->max_msn_applied_to_node_on_disk.msn); + node->max_msn_applied_to_node_on_disk = msg_msn; + + if (ft_msg_type_applies_once(msg.type())) { + ft_nonleaf_msg_once_to_child(cmp, node, target_childnum, msg, is_fresh, flow_deltas); + } else if (ft_msg_type_applies_all(msg.type())) { + ft_nonleaf_msg_all(cmp, node, msg, is_fresh, flow_deltas); + } else { + paranoid_invariant(ft_msg_type_does_nothing(msg.type())); + } +} + +// Garbage collect one leaf entry. +static void +ft_basement_node_gc_once(BASEMENTNODE bn, + uint32_t index, + void* keyp, + uint32_t keylen, + LEAFENTRY leaf_entry, + txn_gc_info *gc_info, + STAT64INFO_S * delta) +{ + paranoid_invariant(leaf_entry); + + // Don't run garbage collection on non-mvcc leaf entries. + if (leaf_entry->type != LE_MVCC) { + goto exit; + } + + // Don't run garbage collection if this leafentry decides it's not worth it. + if (!toku_le_worth_running_garbage_collection(leaf_entry, gc_info)) { + goto exit; + } + + LEAFENTRY new_leaf_entry; + new_leaf_entry = NULL; + + // The mempool doesn't free itself. When it allocates new memory, + // this pointer will be set to the older memory that must now be + // freed. + void * maybe_free; + maybe_free = NULL; + + // These will represent the number of bytes and rows changed as + // part of the garbage collection. + int64_t numbytes_delta; + int64_t numrows_delta; + toku_le_garbage_collect(leaf_entry, + &bn->data_buffer, + index, + keyp, + keylen, + gc_info, + &new_leaf_entry, + &numbytes_delta); + + numrows_delta = 0; + if (new_leaf_entry) { + numrows_delta = 0; + } else { + numrows_delta = -1; + } + + // If we created a new mempool buffer we must free the + // old/original buffer. + if (maybe_free) { + toku_free(maybe_free); + } + + // Update stats. + bn->stat64_delta.numrows += numrows_delta; + bn->stat64_delta.numbytes += numbytes_delta; + delta->numrows += numrows_delta; + delta->numbytes += numbytes_delta; + +exit: + return; +} + +// Garbage collect all leaf entries for a given basement node. +static void +basement_node_gc_all_les(BASEMENTNODE bn, + txn_gc_info *gc_info, + STAT64INFO_S * delta) +{ + int r = 0; + uint32_t index = 0; + uint32_t num_leafentries_before; + while (index < (num_leafentries_before = bn->data_buffer.num_klpairs())) { + void* keyp = NULL; + uint32_t keylen = 0; + LEAFENTRY leaf_entry; + r = bn->data_buffer.fetch_klpair(index, &leaf_entry, &keylen, &keyp); + assert_zero(r); + ft_basement_node_gc_once( + bn, + index, + keyp, + keylen, + leaf_entry, + gc_info, + delta + ); + // Check if the leaf entry was deleted or not. + if (num_leafentries_before == bn->data_buffer.num_klpairs()) { + ++index; + } + } +} + +// Garbage collect all leaf entires in all basement nodes. +static void +ft_leaf_gc_all_les(FT ft, FTNODE node, txn_gc_info *gc_info) +{ + toku_ftnode_assert_fully_in_memory(node); + paranoid_invariant_zero(node->height); + // Loop through each leaf entry, garbage collecting as we go. + for (int i = 0; i < node->n_children; ++i) { + // Perform the garbage collection. + BASEMENTNODE bn = BLB(node, i); + STAT64INFO_S delta; + delta.numrows = 0; + delta.numbytes = 0; + basement_node_gc_all_les(bn, gc_info, &delta); + toku_ft_update_stats(&ft->in_memory_stats, delta); + } +} + +void toku_ftnode_leaf_run_gc(FT ft, FTNODE node) { + TOKULOGGER logger = toku_cachefile_logger(ft->cf); + if (logger) { + TXN_MANAGER txn_manager = toku_logger_get_txn_manager(logger); + txn_manager_state txn_state_for_gc(txn_manager); + txn_state_for_gc.init(); + TXNID oldest_referenced_xid_for_simple_gc = toku_txn_manager_get_oldest_referenced_xid_estimate(txn_manager); + + // Perform full garbage collection. + // + // - txn_state_for_gc + // a fresh snapshot of the transaction system. + // - oldest_referenced_xid_for_simple_gc + // the oldest xid in any live list as of right now - suitible for simple gc + // - node->oldest_referenced_xid_known + // the last known oldest referenced xid for this node and any unapplied messages. + // it is a lower bound on the actual oldest referenced xid - but becasue there + // may be abort messages above us, we need to be careful to only use this value + // for implicit promotion (as opposed to the oldest referenced xid for simple gc) + // + // The node has its own oldest referenced xid because it must be careful not to implicitly promote + // provisional entries for transactions that are no longer live, but may have abort messages + // somewhere above us in the tree. + txn_gc_info gc_info(&txn_state_for_gc, + oldest_referenced_xid_for_simple_gc, + node->oldest_referenced_xid_known, + true); + ft_leaf_gc_all_les(ft, node, &gc_info); + } +} + +void +toku_ftnode_put_msg ( + const toku::comparator &cmp, + ft_update_func update_fun, + FTNODE node, + int target_childnum, + const ft_msg &msg, + bool is_fresh, + txn_gc_info *gc_info, + size_t flow_deltas[], + STAT64INFO stats_to_update + ) +// Effect: Push message into the subtree rooted at NODE. +// If NODE is a leaf, then +// put message into leaf, applying it to the leafentries +// If NODE is a nonleaf, then push the message into the message buffer(s) of the relevent child(ren). +// The node may become overfull. That's not our problem. +{ + toku_ftnode_assert_fully_in_memory(node); + // + // see comments in toku_ft_leaf_apply_msg + // to understand why we don't handle setting + // node->max_msn_applied_to_node_on_disk here, + // and instead defer to these functions + // + if (node->height==0) { + toku_ft_leaf_apply_msg(cmp, update_fun, node, target_childnum, msg, gc_info, nullptr, stats_to_update); + } else { + ft_nonleaf_put_msg(cmp, node, target_childnum, msg, is_fresh, flow_deltas); + } +} + +// Effect: applies the message to the leaf if the appropriate basement node is in memory. +// This function is called during message injection and/or flushing, so the entire +// node MUST be in memory. +void toku_ft_leaf_apply_msg( + const toku::comparator &cmp, + ft_update_func update_fun, + FTNODE node, + int target_childnum, // which child to inject to, or -1 if unknown + const ft_msg &msg, + txn_gc_info *gc_info, + uint64_t *workdone, + STAT64INFO stats_to_update + ) +{ + VERIFY_NODE(t, node); + toku_ftnode_assert_fully_in_memory(node); + + // + // Because toku_ft_leaf_apply_msg is called with the intent of permanently + // applying a message to a leaf node (meaning the message is permanently applied + // and will be purged from the system after this call, as opposed to + // toku_apply_ancestors_messages_to_node, which applies a message + // for a query, but the message may still reside in the system and + // be reapplied later), we mark the node as dirty and + // take the opportunity to update node->max_msn_applied_to_node_on_disk. + // + node->dirty = 1; + + // + // we cannot blindly update node->max_msn_applied_to_node_on_disk, + // we must check to see if the msn is greater that the one already stored, + // because the message may have already been applied earlier (via + // toku_apply_ancestors_messages_to_node) to answer a query + // + // This is why we handle node->max_msn_applied_to_node_on_disk both here + // and in ft_nonleaf_put_msg, as opposed to in one location, toku_ftnode_put_msg. + // + MSN msg_msn = msg.msn(); + if (msg_msn.msn > node->max_msn_applied_to_node_on_disk.msn) { + node->max_msn_applied_to_node_on_disk = msg_msn; + } + + if (ft_msg_type_applies_once(msg.type())) { + unsigned int childnum = (target_childnum >= 0 + ? target_childnum + : toku_ftnode_which_child(node, msg.kdbt(), cmp)); + BASEMENTNODE bn = BLB(node, childnum); + if (msg.msn().msn > bn->max_msn_applied.msn) { + bn->max_msn_applied = msg.msn(); + toku_ft_bn_apply_msg(cmp, + update_fun, + bn, + msg, + gc_info, + workdone, + stats_to_update); + } else { + toku_ft_status_note_msn_discard(); + } + } + else if (ft_msg_type_applies_all(msg.type())) { + for (int childnum=0; childnum<node->n_children; childnum++) { + if (msg.msn().msn > BLB(node, childnum)->max_msn_applied.msn) { + BLB(node, childnum)->max_msn_applied = msg.msn(); + toku_ft_bn_apply_msg(cmp, + update_fun, + BLB(node, childnum), + msg, + gc_info, + workdone, + stats_to_update); + } else { + toku_ft_status_note_msn_discard(); + } + } + } + else if (!ft_msg_type_does_nothing(msg.type())) { + invariant(ft_msg_type_does_nothing(msg.type())); + } + VERIFY_NODE(t, node); +} + diff --git a/storage/tokudb/ft-index/ft/node.h b/storage/tokudb/ft-index/ft/node.h new file mode 100644 index 00000000000..7b1b4023d84 --- /dev/null +++ b/storage/tokudb/ft-index/ft/node.h @@ -0,0 +1,588 @@ +/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */ +// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4: + +/* +COPYING CONDITIONS NOTICE: + + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation, and provided that the + following conditions are met: + + * Redistributions of source code must retain this COPYING + CONDITIONS NOTICE, the COPYRIGHT NOTICE (below), the + DISCLAIMER (below), the UNIVERSITY PATENT NOTICE (below), the + PATENT MARKING NOTICE (below), and the PATENT RIGHTS + GRANT (below). + + * Redistributions in binary form must reproduce this COPYING + CONDITIONS NOTICE, the COPYRIGHT NOTICE (below), the + DISCLAIMER (below), the UNIVERSITY PATENT NOTICE (below), the + PATENT MARKING NOTICE (below), and the PATENT RIGHTS + GRANT (below) in the documentation and/or other materials + provided with the distribution. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + 02110-1301, USA. + +COPYRIGHT NOTICE: + + TokuFT, Tokutek Fractal Tree Indexing Library. + Copyright (C) 2007-2013 Tokutek, Inc. + +DISCLAIMER: + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + +UNIVERSITY PATENT NOTICE: + + The technology is licensed by the Massachusetts Institute of + Technology, Rutgers State University of New Jersey, and the Research + Foundation of State University of New York at Stony Brook under + United States of America Serial No. 11/760379 and to the patents + and/or patent applications resulting from it. + +PATENT MARKING NOTICE: + + This software is covered by US Patent No. 8,185,551. + This software is covered by US Patent No. 8,489,638. + +PATENT RIGHTS GRANT: + + "THIS IMPLEMENTATION" means the copyrightable works distributed by + Tokutek as part of the Fractal Tree project. + + "PATENT CLAIMS" means the claims of patents that are owned or + licensable by Tokutek, both currently or in the future; and that in + the absence of this license would be infringed by THIS + IMPLEMENTATION or by using or running THIS IMPLEMENTATION. + + "PATENT CHALLENGE" shall mean a challenge to the validity, + patentability, enforceability and/or non-infringement of any of the + PATENT CLAIMS or otherwise opposing any of the PATENT CLAIMS. + + Tokutek hereby grants to you, for the term and geographical scope of + the PATENT CLAIMS, a non-exclusive, no-charge, royalty-free, + irrevocable (except as stated in this section) patent license to + make, have made, use, offer to sell, sell, import, transfer, and + otherwise run, modify, and propagate the contents of THIS + IMPLEMENTATION, where such license applies only to the PATENT + CLAIMS. This grant does not include claims that would be infringed + only as a consequence of further modifications of THIS + IMPLEMENTATION. If you or your agent or licensee institute or order + or agree to the institution of patent litigation against any entity + (including a cross-claim or counterclaim in a lawsuit) alleging that + THIS IMPLEMENTATION constitutes direct or contributory patent + infringement, or inducement of patent infringement, then any rights + granted to you under this License shall terminate as of the date + such litigation is filed. If you or your agent or exclusive + licensee institute or order or agree to the institution of a PATENT + CHALLENGE, then Tokutek may terminate any rights granted to you + under this License. +*/ + +#pragma once + +#include "ft/bndata.h" +#include "ft/comparator.h" +#include "ft/ft.h" +#include "ft/msg_buffer.h" + +/* Pivot keys. + * Child 0's keys are <= pivotkeys[0]. + * Child 1's keys are <= pivotkeys[1]. + * Child 1's keys are > pivotkeys[0]. + * etc + */ +class ftnode_pivot_keys { +public: + // effect: create an empty set of pivot keys + void create_empty(); + + // effect: create pivot keys by copying the given DBT array + void create_from_dbts(const DBT *keys, int n); + + // effect: create pivot keys as a clone of an existing set of pivotkeys + void create_from_pivot_keys(const ftnode_pivot_keys &pivotkeys); + + void destroy(); + + // effect: deserialize pivot keys previously serialized by serialize_to_wbuf() + void deserialize_from_rbuf(struct rbuf *rb, int n); + + // returns: unowned DBT representing the i'th pivot key + DBT get_pivot(int i) const; + + // effect: fills a DBT with the i'th pivot key + // returns: the given dbt + DBT *fill_pivot(int i, DBT *dbt) const; + + // effect: insert a pivot into the i'th position, shifting others to the right + void insert_at(const DBT *key, int i); + + // effect: append pivotkeys to the end of our own pivot keys + void append(const ftnode_pivot_keys &pivotkeys); + + // effect: replace the pivot at the i'th position + void replace_at(const DBT *key, int i); + + // effect: removes the i'th pivot key, shifting others to the left + void delete_at(int i); + + // effect: split the pivot keys, removing all pivots at position greater + // than or equal to `i' and storing them in *other + // requires: *other is empty (size == 0) + void split_at(int i, ftnode_pivot_keys *other); + + // effect: serialize pivot keys to a wbuf + // requires: wbuf has at least ftnode_pivot_keys::total_size() bytes available + void serialize_to_wbuf(struct wbuf *wb) const; + + int num_pivots() const; + + // return: the total size of this data structure + size_t total_size() const; + + // return: the sum of the keys sizes of each pivot (for serialization) + size_t serialized_size() const; + +private: + inline size_t _align4(size_t x) const { + return roundup_to_multiple(4, x); + } + + // effect: create pivot keys, in fixed key format, by copying the given key array + void _create_from_fixed_keys(const char *fixedkeys, size_t fixed_keylen, int n); + + char *_fixed_key(int i) const { + return &_fixed_keys[i * _fixed_keylen_aligned]; + } + + bool _fixed_format() const { + return _fixed_keys != nullptr; + } + + void sanity_check() const; + + void _insert_at_dbt(const DBT *key, int i); + void _append_dbt(const ftnode_pivot_keys &pivotkeys); + void _replace_at_dbt(const DBT *key, int i); + void _delete_at_dbt(int i); + void _split_at_dbt(int i, ftnode_pivot_keys *other); + + void _insert_at_fixed(const DBT *key, int i); + void _append_fixed(const ftnode_pivot_keys &pivotkeys); + void _replace_at_fixed(const DBT *key, int i); + void _delete_at_fixed(int i); + void _split_at_fixed(int i, ftnode_pivot_keys *other); + + // adds/destroys keys at a certain index (in dbt format), + // maintaining _total_size, but not _num_pivots + void _add_key_dbt(const DBT *key, int i); + void _destroy_key_dbt(int i); + + // conversions to and from packed key array format + void _convert_to_dbt_format(); + void _convert_to_fixed_format(); + + // If every key is _fixed_keylen long, then _fixed_key is a + // packed array of keys.. + char *_fixed_keys; + // The actual length of the fixed key + size_t _fixed_keylen; + // The aligned length that we use for fixed key storage + size_t _fixed_keylen_aligned; + + // ..otherwise _fixed_keys is null and we store an array of dbts, + // each representing a key. this is simpler but less cache-efficient. + DBT *_dbt_keys; + + int _num_pivots; + size_t _total_size; +}; + +// TODO: class me up +struct ftnode { + MSN max_msn_applied_to_node_on_disk; // max_msn_applied that will be written to disk + unsigned int flags; + BLOCKNUM blocknum; // Which block number is this node? + int layout_version; // What version of the data structure? + int layout_version_original; // different (<) from layout_version if upgraded from a previous version (useful for debugging) + int layout_version_read_from_disk; // transient, not serialized to disk, (useful for debugging) + uint32_t build_id; // build_id (svn rev number) of software that wrote this node to disk + int height; /* height is always >= 0. 0 for leaf, >0 for nonleaf. */ + int dirty; + uint32_t fullhash; + + // for internal nodes, if n_children==fanout+1 then the tree needs to be rebalanced. + // for leaf nodes, represents number of basement nodes + int n_children; + ftnode_pivot_keys pivotkeys; + + // What's the oldest referenced xid that this node knows about? The real oldest + // referenced xid might be younger, but this is our best estimate. We use it + // as a heuristic to transition provisional mvcc entries from provisional to + // committed (from implicity committed to really committed). + // + // A better heuristic would be the oldest live txnid, but we use this since it + // still works well most of the time, and its readily available on the inject + // code path. + TXNID oldest_referenced_xid_known; + + // array of size n_children, consisting of ftnode partitions + // each one is associated with a child + // for internal nodes, the ith partition corresponds to the ith message buffer + // for leaf nodes, the ith partition corresponds to the ith basement node + struct ftnode_partition *bp; + struct ctpair *ct_pair; +}; +typedef struct ftnode *FTNODE; + +// data of an available partition of a leaf ftnode +struct ftnode_leaf_basement_node { + bn_data data_buffer; + unsigned int seqinsert; // number of sequential inserts to this leaf + MSN max_msn_applied; // max message sequence number applied + bool stale_ancestor_messages_applied; + STAT64INFO_S stat64_delta; // change in stat64 counters since basement was last written to disk +}; +typedef struct ftnode_leaf_basement_node *BASEMENTNODE; + +enum pt_state { // declare this to be packed so that when used below it will only take 1 byte. + PT_INVALID = 0, + PT_ON_DISK = 1, + PT_COMPRESSED = 2, + PT_AVAIL = 3}; + +enum ftnode_child_tag { + BCT_INVALID = 0, + BCT_NULL, + BCT_SUBBLOCK, + BCT_LEAF, + BCT_NONLEAF +}; + +typedef toku::omt<int32_t> off_omt_t; +typedef toku::omt<int32_t, int32_t, true> marked_off_omt_t; + +// data of an available partition of a nonleaf ftnode +struct ftnode_nonleaf_childinfo { + message_buffer msg_buffer; + off_omt_t broadcast_list; + marked_off_omt_t fresh_message_tree; + off_omt_t stale_message_tree; + uint64_t flow[2]; // current and last checkpoint +}; +typedef struct ftnode_nonleaf_childinfo *NONLEAF_CHILDINFO; + +typedef struct ftnode_child_pointer { + union { + struct sub_block *subblock; + struct ftnode_nonleaf_childinfo *nonleaf; + struct ftnode_leaf_basement_node *leaf; + } u; + enum ftnode_child_tag tag; +} FTNODE_CHILD_POINTER; + +struct ftnode_disk_data { + // + // stores the offset to the beginning of the partition on disk from the ftnode, and the length, needed to read a partition off of disk + // the value is only meaningful if the node is clean. If the node is dirty, then the value is meaningless + // The START is the distance from the end of the compressed node_info data, to the beginning of the compressed partition + // The SIZE is the size of the compressed partition. + // Rationale: We cannot store the size from the beginning of the node since we don't know how big the header will be. + // However, later when we are doing aligned writes, we won't be able to store the size from the end since we want things to align. + uint32_t start; + uint32_t size; +}; +typedef struct ftnode_disk_data *FTNODE_DISK_DATA; + +// TODO: Turn these into functions instead of macros +#define BP_START(node_dd,i) ((node_dd)[i].start) +#define BP_SIZE(node_dd,i) ((node_dd)[i].size) + +// a ftnode partition, associated with a child of a node +struct ftnode_partition { + // the following three variables are used for nonleaf nodes + // for leaf nodes, they are meaningless + BLOCKNUM blocknum; // blocknum of child + + // How many bytes worth of work was performed by messages in each buffer. + uint64_t workdone; + + // + // pointer to the partition. Depending on the state, they may be different things + // if state == PT_INVALID, then the node was just initialized and ptr == NULL + // if state == PT_ON_DISK, then ptr == NULL + // if state == PT_COMPRESSED, then ptr points to a struct sub_block* + // if state == PT_AVAIL, then ptr is: + // a struct ftnode_nonleaf_childinfo for internal nodes, + // a struct ftnode_leaf_basement_node for leaf nodes + // + struct ftnode_child_pointer ptr; + // + // at any time, the partitions may be in one of the following three states (stored in pt_state): + // PT_INVALID - means that the partition was just initialized + // PT_ON_DISK - means that the partition is not in memory and needs to be read from disk. To use, must read off disk and decompress + // PT_COMPRESSED - means that the partition is compressed in memory. To use, must decompress + // PT_AVAIL - means the partition is decompressed and in memory + // + enum pt_state state; // make this an enum to make debugging easier. + + // clock count used to for pe_callback to determine if a node should be evicted or not + // for now, saturating the count at 1 + uint8_t clock_count; +}; + +// +// TODO: Fix all these names +// Organize declarations +// Fix widespread parameter ordering inconsistencies +// +BASEMENTNODE toku_create_empty_bn(void); +BASEMENTNODE toku_create_empty_bn_no_buffer(void); // create a basement node with a null buffer. +NONLEAF_CHILDINFO toku_clone_nl(NONLEAF_CHILDINFO orig_childinfo); +BASEMENTNODE toku_clone_bn(BASEMENTNODE orig_bn); +NONLEAF_CHILDINFO toku_create_empty_nl(void); +void destroy_basement_node (BASEMENTNODE bn); +void destroy_nonleaf_childinfo (NONLEAF_CHILDINFO nl); +void toku_destroy_ftnode_internals(FTNODE node); +void toku_ftnode_free (FTNODE *node); +bool toku_ftnode_fully_in_memory(FTNODE node); +void toku_ftnode_assert_fully_in_memory(FTNODE node); +void toku_evict_bn_from_memory(FTNODE node, int childnum, FT ft); +BASEMENTNODE toku_detach_bn(FTNODE node, int childnum); +void toku_ftnode_update_disk_stats(FTNODE ftnode, FT ft, bool for_checkpoint); +void toku_ftnode_clone_partitions(FTNODE node, FTNODE cloned_node); + +void toku_initialize_empty_ftnode(FTNODE node, BLOCKNUM blocknum, int height, int num_children, + int layout_version, unsigned int flags); + +int toku_ftnode_which_child(FTNODE node, const DBT *k, const toku::comparator &cmp); +void toku_ftnode_save_ct_pair(CACHEKEY key, void *value_data, PAIR p); + +// +// TODO: put the heaviside functions into their respective 'struct .*extra;' namespaces +// +struct toku_msg_buffer_key_msn_heaviside_extra { + const toku::comparator &cmp; + message_buffer *msg_buffer; + const DBT *key; + MSN msn; + toku_msg_buffer_key_msn_heaviside_extra(const toku::comparator &c, message_buffer *mb, const DBT *k, MSN m) : + cmp(c), msg_buffer(mb), key(k), msn(m) { + } +}; +int toku_msg_buffer_key_msn_heaviside(const int32_t &v, const struct toku_msg_buffer_key_msn_heaviside_extra &extra); + +struct toku_msg_buffer_key_msn_cmp_extra { + const toku::comparator &cmp; + message_buffer *msg_buffer; + toku_msg_buffer_key_msn_cmp_extra(const toku::comparator &c, message_buffer *mb) : + cmp(c), msg_buffer(mb) { + } +}; +int toku_msg_buffer_key_msn_cmp(const struct toku_msg_buffer_key_msn_cmp_extra &extrap, const int &a, const int &b); + +struct toku_msg_leafval_heaviside_extra { + const toku::comparator &cmp; + DBT const *const key; + toku_msg_leafval_heaviside_extra(const toku::comparator &c, const DBT *k) : + cmp(c), key(k) { + } +}; +int toku_msg_leafval_heaviside(DBT const &kdbt, const struct toku_msg_leafval_heaviside_extra &be); + +unsigned int toku_bnc_nbytesinbuf(NONLEAF_CHILDINFO bnc); +int toku_bnc_n_entries(NONLEAF_CHILDINFO bnc); +long toku_bnc_memory_size(NONLEAF_CHILDINFO bnc); +long toku_bnc_memory_used(NONLEAF_CHILDINFO bnc); +void toku_bnc_insert_msg(NONLEAF_CHILDINFO bnc, const void *key, uint32_t keylen, const void *data, uint32_t datalen, enum ft_msg_type type, MSN msn, XIDS xids, bool is_fresh, const toku::comparator &cmp); +void toku_bnc_empty(NONLEAF_CHILDINFO bnc); +void toku_bnc_flush_to_child(FT ft, NONLEAF_CHILDINFO bnc, FTNODE child, TXNID parent_oldest_referenced_xid_known); +bool toku_bnc_should_promote(FT ft, NONLEAF_CHILDINFO bnc) __attribute__((const, nonnull)); + +bool toku_ftnode_nonleaf_is_gorged(FTNODE node, uint32_t nodesize); +uint32_t toku_ftnode_leaf_num_entries(FTNODE node); +void toku_ftnode_leaf_rebalance(FTNODE node, unsigned int basementnodesize); + +void toku_ftnode_leaf_run_gc(FT ft, FTNODE node); + +enum reactivity { + RE_STABLE, + RE_FUSIBLE, + RE_FISSIBLE +}; + +enum reactivity toku_ftnode_get_reactivity(FT ft, FTNODE node); +enum reactivity toku_ftnode_get_nonleaf_reactivity(FTNODE node, unsigned int fanout); +enum reactivity toku_ftnode_get_leaf_reactivity(FTNODE node, uint32_t nodesize); + +/** + * Finds the next child for HOT to flush to, given that everything up to + * and including k has been flattened. + * + * If k falls between pivots in node, then we return the childnum where k + * lies. + * + * If k is equal to some pivot, then we return the next (to the right) + * childnum. + */ +int toku_ftnode_hot_next_child(FTNODE node, const DBT *k, const toku::comparator &cmp); + +void toku_ftnode_put_msg(const toku::comparator &cmp, ft_update_func update_fun, + FTNODE node, int target_childnum, + const ft_msg &msg, bool is_fresh, txn_gc_info *gc_info, + size_t flow_deltas[], STAT64INFO stats_to_update); + +void toku_ft_bn_apply_msg_once(BASEMENTNODE bn, const ft_msg &msg, uint32_t idx, + uint32_t le_keylen, LEAFENTRY le, txn_gc_info *gc_info, + uint64_t *workdonep, STAT64INFO stats_to_update); + +void toku_ft_bn_apply_msg(const toku::comparator &cmp, ft_update_func update_fun, + BASEMENTNODE bn, const ft_msg &msg, txn_gc_info *gc_info, + uint64_t *workdone, STAT64INFO stats_to_update); + +void toku_ft_leaf_apply_msg(const toku::comparator &cmp, ft_update_func update_fun, + FTNODE node, int target_childnum, + const ft_msg &msg, txn_gc_info *gc_info, + uint64_t *workdone, STAT64INFO stats_to_update); + +// +// Message management for orthopush +// + +struct ancestors { + // This is the root node if next is NULL (since the root has no ancestors) + FTNODE node; + // Which buffer holds messages destined to the node whose ancestors this list represents. + int childnum; + struct ancestors *next; +}; +typedef struct ancestors *ANCESTORS; + +void toku_ft_bnc_move_messages_to_stale(FT ft, NONLEAF_CHILDINFO bnc); + +void toku_move_ftnode_messages_to_stale(FT ft, FTNODE node); + +// TODO: Should ft_handle just be FT? +class pivot_bounds; +void toku_apply_ancestors_messages_to_node(FT_HANDLE t, FTNODE node, ANCESTORS ancestors, + const pivot_bounds &bounds, + bool *msgs_applied, int child_to_read); + +bool toku_ft_leaf_needs_ancestors_messages(FT ft, FTNODE node, ANCESTORS ancestors, + const pivot_bounds &bounds, + MSN *const max_msn_in_path, int child_to_read); + +void toku_ft_bn_update_max_msn(FTNODE node, MSN max_msn_applied, int child_to_read); + +struct ft_search; +int toku_ft_search_which_child(const toku::comparator &cmp, FTNODE node, ft_search *search); + +// +// internal node inline functions +// TODO: Turn the macros into real functions +// + +static inline void set_BNULL(FTNODE node, int i) { + paranoid_invariant(i >= 0); + paranoid_invariant(i < node->n_children); + node->bp[i].ptr.tag = BCT_NULL; +} + +static inline bool is_BNULL (FTNODE node, int i) { + paranoid_invariant(i >= 0); + paranoid_invariant(i < node->n_children); + return node->bp[i].ptr.tag == BCT_NULL; +} + +static inline NONLEAF_CHILDINFO BNC(FTNODE node, int i) { + paranoid_invariant(i >= 0); + paranoid_invariant(i < node->n_children); + FTNODE_CHILD_POINTER p = node->bp[i].ptr; + paranoid_invariant(p.tag==BCT_NONLEAF); + return p.u.nonleaf; +} + +static inline void set_BNC(FTNODE node, int i, NONLEAF_CHILDINFO nl) { + paranoid_invariant(i >= 0); + paranoid_invariant(i < node->n_children); + FTNODE_CHILD_POINTER *p = &node->bp[i].ptr; + p->tag = BCT_NONLEAF; + p->u.nonleaf = nl; +} + +static inline BASEMENTNODE BLB(FTNODE node, int i) { + paranoid_invariant(i >= 0); + // The optimizer really doesn't like it when we compare + // i to n_children as signed integers. So we assert that + // n_children is in fact positive before doing a comparison + // on the values forcibly cast to unsigned ints. + paranoid_invariant(node->n_children > 0); + paranoid_invariant((unsigned) i < (unsigned) node->n_children); + FTNODE_CHILD_POINTER p = node->bp[i].ptr; + paranoid_invariant(p.tag==BCT_LEAF); + return p.u.leaf; +} + +static inline void set_BLB(FTNODE node, int i, BASEMENTNODE bn) { + paranoid_invariant(i >= 0); + paranoid_invariant(i < node->n_children); + FTNODE_CHILD_POINTER *p = &node->bp[i].ptr; + p->tag = BCT_LEAF; + p->u.leaf = bn; +} + +static inline struct sub_block *BSB(FTNODE node, int i) { + paranoid_invariant(i >= 0); + paranoid_invariant(i < node->n_children); + FTNODE_CHILD_POINTER p = node->bp[i].ptr; + paranoid_invariant(p.tag==BCT_SUBBLOCK); + return p.u.subblock; +} + +static inline void set_BSB(FTNODE node, int i, struct sub_block *sb) { + paranoid_invariant(i >= 0); + paranoid_invariant(i < node->n_children); + FTNODE_CHILD_POINTER *p = &node->bp[i].ptr; + p->tag = BCT_SUBBLOCK; + p->u.subblock = sb; +} + +// ftnode partition macros +// BP stands for ftnode_partition +#define BP_BLOCKNUM(node,i) ((node)->bp[i].blocknum) +#define BP_STATE(node,i) ((node)->bp[i].state) +#define BP_WORKDONE(node, i)((node)->bp[i].workdone) + +// +// macros for managing a node's clock +// Should be managed by ft-ops.c, NOT by serialize/deserialize +// + +// +// BP_TOUCH_CLOCK uses a compare and swap because multiple threads +// that have a read lock on an internal node may try to touch the clock +// simultaneously +// +#define BP_TOUCH_CLOCK(node, i) ((node)->bp[i].clock_count = 1) +#define BP_SWEEP_CLOCK(node, i) ((node)->bp[i].clock_count = 0) +#define BP_SHOULD_EVICT(node, i) ((node)->bp[i].clock_count == 0) +// not crazy about having these two here, one is for the case where we create new +// nodes, such as in splits and creating new roots, and the other is for when +// we are deserializing a node and not all bp's are touched +#define BP_INIT_TOUCHED_CLOCK(node, i) ((node)->bp[i].clock_count = 1) +#define BP_INIT_UNTOUCHED_CLOCK(node, i) ((node)->bp[i].clock_count = 0) + +// ftnode leaf basementnode macros, +#define BLB_MAX_MSN_APPLIED(node,i) (BLB(node,i)->max_msn_applied) +#define BLB_MAX_DSN_APPLIED(node,i) (BLB(node,i)->max_dsn_applied) +#define BLB_DATA(node,i) (&(BLB(node,i)->data_buffer)) +#define BLB_NBYTESINDATA(node,i) (BLB_DATA(node,i)->get_disk_size()) +#define BLB_SEQINSERT(node,i) (BLB(node,i)->seqinsert) diff --git a/storage/tokudb/ft-index/ft/pivotkeys.cc b/storage/tokudb/ft-index/ft/pivotkeys.cc new file mode 100644 index 00000000000..cf37777d892 --- /dev/null +++ b/storage/tokudb/ft-index/ft/pivotkeys.cc @@ -0,0 +1,491 @@ +/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */ +// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4: + +/* +COPYING CONDITIONS NOTICE: + + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation, and provided that the + following conditions are met: + + * Redistributions of source code must retain this COPYING + CONDITIONS NOTICE, the COPYRIGHT NOTICE (below), the + DISCLAIMER (below), the UNIVERSITY PATENT NOTICE (below), the + PATENT MARKING NOTICE (below), and the PATENT RIGHTS + GRANT (below). + + * Redistributions in binary form must reproduce this COPYING + CONDITIONS NOTICE, the COPYRIGHT NOTICE (below), the + DISCLAIMER (below), the UNIVERSITY PATENT NOTICE (below), the + PATENT MARKING NOTICE (below), and the PATENT RIGHTS + GRANT (below) in the documentation and/or other materials + provided with the distribution. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + 02110-1301, USA. + +COPYRIGHT NOTICE: + + TokuFT, Tokutek Fractal Tree Indexing Library. + Copyright (C) 2007-2013 Tokutek, Inc. + +DISCLAIMER: + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + +UNIVERSITY PATENT NOTICE: + + The technology is licensed by the Massachusetts Institute of + Technology, Rutgers State University of New Jersey, and the Research + Foundation of State University of New York at Stony Brook under + United States of America Serial No. 11/760379 and to the patents + and/or patent applications resulting from it. + +PATENT MARKING NOTICE: + + This software is covered by US Patent No. 8,185,551. + This software is covered by US Patent No. 8,489,638. + +PATENT RIGHTS GRANT: + + "THIS IMPLEMENTATION" means the copyrightable works distributed by + Tokutek as part of the Fractal Tree project. + + "PATENT CLAIMS" means the claims of patents that are owned or + licensable by Tokutek, both currently or in the future; and that in + the absence of this license would be infringed by THIS + IMPLEMENTATION or by using or running THIS IMPLEMENTATION. + + "PATENT CHALLENGE" shall mean a challenge to the validity, + patentability, enforceability and/or non-infringement of any of the + PATENT CLAIMS or otherwise opposing any of the PATENT CLAIMS. + + Tokutek hereby grants to you, for the term and geographical scope of + the PATENT CLAIMS, a non-exclusive, no-charge, royalty-free, + irrevocable (except as stated in this section) patent license to + make, have made, use, offer to sell, sell, import, transfer, and + otherwise run, modify, and propagate the contents of THIS + IMPLEMENTATION, where such license applies only to the PATENT + CLAIMS. This grant does not include claims that would be infringed + only as a consequence of further modifications of THIS + IMPLEMENTATION. If you or your agent or licensee institute or order + or agree to the institution of patent litigation against any entity + (including a cross-claim or counterclaim in a lawsuit) alleging that + THIS IMPLEMENTATION constitutes direct or contributory patent + infringement, or inducement of patent infringement, then any rights + granted to you under this License shall terminate as of the date + such litigation is filed. If you or your agent or exclusive + licensee institute or order or agree to the institution of a PATENT + CHALLENGE, then Tokutek may terminate any rights granted to you + under this License. +*/ + +#ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved." +#ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it." + +#include <string> + +#include "portability/memory.h" + +#include "ft/node.h" +#include "ft/serialize/rbuf.h" +#include "ft/serialize/wbuf.h" + +void ftnode_pivot_keys::create_empty() { + _num_pivots = 0; + _total_size = 0; + _fixed_keys = nullptr; + _fixed_keylen = 0; + _fixed_keylen_aligned = 0; + _dbt_keys = nullptr; +} + +void ftnode_pivot_keys::create_from_dbts(const DBT *keys, int n) { + create_empty(); + _num_pivots = n; + + // see if every key has the same length + bool keys_same_size = true; + for (int i = 1; i < _num_pivots; i++) { + if (keys[i].size != keys[i - 1].size) { + keys_same_size = false; + break; + } + } + + if (keys_same_size && _num_pivots > 0) { + // if so, store pivots in a tightly packed array of fixed length keys + _fixed_keylen = keys[0].size; + _fixed_keylen_aligned = _align4(_fixed_keylen); + _total_size = _fixed_keylen_aligned * _num_pivots; + XMALLOC_N_ALIGNED(64, _total_size, _fixed_keys); + for (int i = 0; i < _num_pivots; i++) { + invariant(keys[i].size == _fixed_keylen); + memcpy(_fixed_key(i), keys[i].data, _fixed_keylen); + } + } else { + // otherwise we'll just store the pivots in an array of dbts + XMALLOC_N_ALIGNED(64, _num_pivots, _dbt_keys); + for (int i = 0; i < _num_pivots; i++) { + size_t size = keys[i].size; + toku_memdup_dbt(&_dbt_keys[i], keys[i].data, size); + _total_size += size; + } + } + + sanity_check(); +} + +void ftnode_pivot_keys::_create_from_fixed_keys(const char *fixedkeys, size_t fixed_keylen, int n) { + create_empty(); + _num_pivots = n; + _fixed_keylen = fixed_keylen; + _fixed_keylen_aligned = _align4(fixed_keylen); + _total_size = _fixed_keylen_aligned * _num_pivots; + XMEMDUP_N(_fixed_keys, fixedkeys, _total_size); +} + +// effect: create pivot keys as a clone of an existing set of pivotkeys +void ftnode_pivot_keys::create_from_pivot_keys(const ftnode_pivot_keys &pivotkeys) { + if (pivotkeys._fixed_format()) { + _create_from_fixed_keys(pivotkeys._fixed_keys, pivotkeys._fixed_keylen, pivotkeys._num_pivots); + } else { + create_from_dbts(pivotkeys._dbt_keys, pivotkeys._num_pivots); + } + + sanity_check(); +} + +void ftnode_pivot_keys::destroy() { + if (_dbt_keys != nullptr) { + for (int i = 0; i < _num_pivots; i++) { + toku_destroy_dbt(&_dbt_keys[i]); + } + toku_free(_dbt_keys); + _dbt_keys = nullptr; + } + if (_fixed_keys != nullptr) { + toku_free(_fixed_keys); + _fixed_keys = nullptr; + } + _fixed_keylen = 0; + _fixed_keylen_aligned = 0; + _num_pivots = 0; + _total_size = 0; +} + +void ftnode_pivot_keys::_convert_to_fixed_format() { + invariant(!_fixed_format()); + + // convert to a tightly packed array of fixed length keys + _fixed_keylen = _dbt_keys[0].size; + _fixed_keylen_aligned = _align4(_fixed_keylen); + _total_size = _fixed_keylen_aligned * _num_pivots; + XMALLOC_N_ALIGNED(64, _total_size, _fixed_keys); + for (int i = 0; i < _num_pivots; i++) { + invariant(_dbt_keys[i].size == _fixed_keylen); + memcpy(_fixed_key(i), _dbt_keys[i].data, _fixed_keylen); + } + + // destroy the dbt array format + for (int i = 0; i < _num_pivots; i++) { + toku_destroy_dbt(&_dbt_keys[i]); + } + toku_free(_dbt_keys); + _dbt_keys = nullptr; + + invariant(_fixed_format()); + sanity_check(); +} + +void ftnode_pivot_keys::_convert_to_dbt_format() { + invariant(_fixed_format()); + + // convert to an aray of dbts + REALLOC_N_ALIGNED(64, _num_pivots, _dbt_keys); + for (int i = 0; i < _num_pivots; i++) { + toku_memdup_dbt(&_dbt_keys[i], _fixed_key(i), _fixed_keylen); + } + // pivots sizes are not aligned up dbt format + _total_size = _num_pivots * _fixed_keylen; + + // destroy the fixed key format + toku_free(_fixed_keys); + _fixed_keys = nullptr; + _fixed_keylen = 0; + _fixed_keylen_aligned = 0; + + invariant(!_fixed_format()); + sanity_check(); +} + +void ftnode_pivot_keys::deserialize_from_rbuf(struct rbuf *rb, int n) { + _num_pivots = n; + _total_size = 0; + _fixed_keys = nullptr; + _fixed_keylen = 0; + _dbt_keys = nullptr; + + XMALLOC_N_ALIGNED(64, _num_pivots, _dbt_keys); + bool keys_same_size = true; + for (int i = 0; i < _num_pivots; i++) { + const void *pivotkeyptr; + uint32_t size; + rbuf_bytes(rb, &pivotkeyptr, &size); + toku_memdup_dbt(&_dbt_keys[i], pivotkeyptr, size); + _total_size += size; + if (i > 0 && keys_same_size && _dbt_keys[i].size != _dbt_keys[i - 1].size) { + // not all keys are the same size, we'll stick to the dbt array format + keys_same_size = false; + } + } + + if (keys_same_size && _num_pivots > 0) { + _convert_to_fixed_format(); + } + + sanity_check(); +} + +DBT ftnode_pivot_keys::get_pivot(int i) const { + paranoid_invariant(i < _num_pivots); + if (_fixed_format()) { + paranoid_invariant(i * _fixed_keylen_aligned < _total_size); + DBT dbt; + toku_fill_dbt(&dbt, _fixed_key(i), _fixed_keylen); + return dbt; + } else { + return _dbt_keys[i]; + } +} + +DBT *ftnode_pivot_keys::fill_pivot(int i, DBT *dbt) const { + paranoid_invariant(i < _num_pivots); + if (_fixed_format()) { + toku_fill_dbt(dbt, _fixed_key(i), _fixed_keylen); + } else { + toku_copyref_dbt(dbt, _dbt_keys[i]); + } + return dbt; +} + +void ftnode_pivot_keys::_add_key_dbt(const DBT *key, int i) { + toku_clone_dbt(&_dbt_keys[i], *key); + _total_size += _dbt_keys[i].size; +} + +void ftnode_pivot_keys::_destroy_key_dbt(int i) { + invariant(_total_size >= _dbt_keys[i].size); + _total_size -= _dbt_keys[i].size; + toku_destroy_dbt(&_dbt_keys[i]); +} + +void ftnode_pivot_keys::_insert_at_dbt(const DBT *key, int i) { + // make space for a new pivot, slide existing keys to the right + REALLOC_N_ALIGNED(64, _num_pivots + 1, _dbt_keys); + memmove(&_dbt_keys[i + 1], &_dbt_keys[i], (_num_pivots - i) * sizeof(DBT)); + _add_key_dbt(key, i); +} + +void ftnode_pivot_keys::_insert_at_fixed(const DBT *key, int i) { + REALLOC_N_ALIGNED(64, (_num_pivots + 1) * _fixed_keylen_aligned, _fixed_keys); + // TODO: This is not going to be valgrind-safe, because we do not initialize the space + // between _fixed_keylen and _fixed_keylen_aligned (but we probably should) + memmove(_fixed_key(i + 1), _fixed_key(i), (_num_pivots - i) * _fixed_keylen_aligned); + memcpy(_fixed_key(i), key->data, _fixed_keylen); + _total_size += _fixed_keylen_aligned; +} + +void ftnode_pivot_keys::insert_at(const DBT *key, int i) { + invariant(i <= _num_pivots); // it's ok to insert at the end, so we check <= n + + // if the new key doesn't have the same size, we can't be in fixed format + if (_fixed_format() && key->size != _fixed_keylen) { + _convert_to_dbt_format(); + } + + if (_fixed_format()) { + _insert_at_fixed(key, i); + } else { + _insert_at_dbt(key, i); + } + _num_pivots++; + + invariant(total_size() > 0); +} + +void ftnode_pivot_keys::_append_dbt(const ftnode_pivot_keys &pivotkeys) { + REALLOC_N_ALIGNED(64, _num_pivots + pivotkeys._num_pivots, _dbt_keys); + bool other_fixed = pivotkeys._fixed_format(); + for (int i = 0; i < pivotkeys._num_pivots; i++) { + size_t size = other_fixed ? pivotkeys._fixed_keylen : + pivotkeys._dbt_keys[i].size; + toku_memdup_dbt(&_dbt_keys[_num_pivots + i], + other_fixed ? pivotkeys._fixed_key(i) : + pivotkeys._dbt_keys[i].data, + size); + _total_size += size; + } +} + +void ftnode_pivot_keys::_append_fixed(const ftnode_pivot_keys &pivotkeys) { + if (pivotkeys._fixed_format() && pivotkeys._fixed_keylen == _fixed_keylen) { + // other pivotkeys have the same fixed keylen + REALLOC_N_ALIGNED(64, (_num_pivots + pivotkeys._num_pivots) * _fixed_keylen_aligned, _fixed_keys); + memcpy(_fixed_key(_num_pivots), pivotkeys._fixed_keys, pivotkeys._total_size); + _total_size += pivotkeys._total_size; + } else { + // must convert to dbt format, other pivotkeys have different length'd keys + _convert_to_dbt_format(); + _append_dbt(pivotkeys); + } +} + +void ftnode_pivot_keys::append(const ftnode_pivot_keys &pivotkeys) { + if (_fixed_format()) { + _append_fixed(pivotkeys); + } else { + _append_dbt(pivotkeys); + } + _num_pivots += pivotkeys._num_pivots; + + sanity_check(); +} + +void ftnode_pivot_keys::_replace_at_dbt(const DBT *key, int i) { + _destroy_key_dbt(i); + _add_key_dbt(key, i); +} + +void ftnode_pivot_keys::_replace_at_fixed(const DBT *key, int i) { + if (key->size == _fixed_keylen) { + memcpy(_fixed_key(i), key->data, _fixed_keylen); + } else { + // must convert to dbt format, replacement key has different length + _convert_to_dbt_format(); + _replace_at_dbt(key, i); + } +} + +void ftnode_pivot_keys::replace_at(const DBT *key, int i) { + if (i < _num_pivots) { + if (_fixed_format()) { + _replace_at_fixed(key, i); + } else { + _replace_at_dbt(key, i); + } + } else { + invariant(i == _num_pivots); // appending to the end is ok + insert_at(key, i); + } + invariant(total_size() > 0); +} + +void ftnode_pivot_keys::_delete_at_fixed(int i) { + memmove(_fixed_key(i), _fixed_key(i + 1), (_num_pivots - 1 - i) * _fixed_keylen_aligned); + _total_size -= _fixed_keylen_aligned; +} + +void ftnode_pivot_keys::_delete_at_dbt(int i) { + // slide over existing keys, then shrink down to size + _destroy_key_dbt(i); + memmove(&_dbt_keys[i], &_dbt_keys[i + 1], (_num_pivots - 1 - i) * sizeof(DBT)); + REALLOC_N_ALIGNED(64, _num_pivots - 1, _dbt_keys); +} + +void ftnode_pivot_keys::delete_at(int i) { + invariant(i < _num_pivots); + + if (_fixed_format()) { + _delete_at_fixed(i); + } else { + _delete_at_dbt(i); + } + + _num_pivots--; +} + +void ftnode_pivot_keys::_split_at_fixed(int i, ftnode_pivot_keys *other) { + // recreate the other set of pivots from index >= i + other->_create_from_fixed_keys(_fixed_key(i), _fixed_keylen, _num_pivots - i); + + // shrink down to size + _total_size = i * _fixed_keylen_aligned; + REALLOC_N_ALIGNED(64, _total_size, _fixed_keys); +} + +void ftnode_pivot_keys::_split_at_dbt(int i, ftnode_pivot_keys *other) { + // recreate the other set of pivots from index >= i + other->create_from_dbts(&_dbt_keys[i], _num_pivots - i); + + // destroy everything greater, shrink down to size + for (int k = i; k < _num_pivots; k++) { + _destroy_key_dbt(k); + } + REALLOC_N_ALIGNED(64, i, _dbt_keys); +} + +void ftnode_pivot_keys::split_at(int i, ftnode_pivot_keys *other) { + if (i < _num_pivots) { + if (_fixed_format()) { + _split_at_fixed(i, other); + } else { + _split_at_dbt(i, other); + } + _num_pivots = i; + } + + sanity_check(); +} + +void ftnode_pivot_keys::serialize_to_wbuf(struct wbuf *wb) const { + bool fixed = _fixed_format(); + size_t written = 0; + for (int i = 0; i < _num_pivots; i++) { + size_t size = fixed ? _fixed_keylen : _dbt_keys[i].size; + invariant(size); + wbuf_nocrc_bytes(wb, fixed ? _fixed_key(i) : _dbt_keys[i].data, size); + written += size; + } + invariant(written == serialized_size()); +} + +int ftnode_pivot_keys::num_pivots() const { + // if we have fixed size keys, the number of pivots should be consistent + paranoid_invariant(_fixed_keys == nullptr || (_total_size == _fixed_keylen_aligned * _num_pivots)); + return _num_pivots; +} + +size_t ftnode_pivot_keys::total_size() const { + // if we have fixed size keys, the total size should be consistent + paranoid_invariant(_fixed_keys == nullptr || (_total_size == _fixed_keylen_aligned * _num_pivots)); + return _total_size; +} + +size_t ftnode_pivot_keys::serialized_size() const { + // we only return the size that will be used when serialized, so we calculate based + // on the fixed keylen and not the aligned keylen. + return _fixed_format() ? _num_pivots * _fixed_keylen : _total_size; +} + +void ftnode_pivot_keys::sanity_check() const { + if (_fixed_format()) { + invariant(_dbt_keys == nullptr); + invariant(_fixed_keylen_aligned == _align4(_fixed_keylen)); + invariant(_num_pivots * _fixed_keylen <= _total_size); + invariant(_num_pivots * _fixed_keylen_aligned == _total_size); + } else { + invariant(_num_pivots == 0 || _dbt_keys != nullptr); + size_t size = 0; + for (int i = 0; i < _num_pivots; i++) { + size += _dbt_keys[i].size; + } + invariant(size == _total_size); + } +} diff --git a/storage/tokudb/ft-index/ft/serialize/block_allocator.cc b/storage/tokudb/ft-index/ft/serialize/block_allocator.cc new file mode 100644 index 00000000000..6af0ae82b05 --- /dev/null +++ b/storage/tokudb/ft-index/ft/serialize/block_allocator.cc @@ -0,0 +1,513 @@ +/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */ +// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4: +/* +COPYING CONDITIONS NOTICE: + + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation, and provided that the + following conditions are met: + + * Redistributions of source code must retain this COPYING + CONDITIONS NOTICE, the COPYRIGHT NOTICE (below), the + DISCLAIMER (below), the UNIVERSITY PATENT NOTICE (below), the + PATENT MARKING NOTICE (below), and the PATENT RIGHTS + GRANT (below). + + * Redistributions in binary form must reproduce this COPYING + CONDITIONS NOTICE, the COPYRIGHT NOTICE (below), the + DISCLAIMER (below), the UNIVERSITY PATENT NOTICE (below), the + PATENT MARKING NOTICE (below), and the PATENT RIGHTS + GRANT (below) in the documentation and/or other materials + provided with the distribution. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + 02110-1301, USA. + +COPYRIGHT NOTICE: + + TokuFT, Tokutek Fractal Tree Indexing Library. + Copyright (C) 2007-2013 Tokutek, Inc. + +DISCLAIMER: + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + +UNIVERSITY PATENT NOTICE: + + The technology is licensed by the Massachusetts Institute of + Technology, Rutgers State University of New Jersey, and the Research + Foundation of State University of New York at Stony Brook under + United States of America Serial No. 11/760379 and to the patents + and/or patent applications resulting from it. + +PATENT MARKING NOTICE: + + This software is covered by US Patent No. 8,185,551. + This software is covered by US Patent No. 8,489,638. + +PATENT RIGHTS GRANT: + + "THIS IMPLEMENTATION" means the copyrightable works distributed by + Tokutek as part of the Fractal Tree project. + + "PATENT CLAIMS" means the claims of patents that are owned or + licensable by Tokutek, both currently or in the future; and that in + the absence of this license would be infringed by THIS + IMPLEMENTATION or by using or running THIS IMPLEMENTATION. + + "PATENT CHALLENGE" shall mean a challenge to the validity, + patentability, enforceability and/or non-infringement of any of the + PATENT CLAIMS or otherwise opposing any of the PATENT CLAIMS. + + Tokutek hereby grants to you, for the term and geographical scope of + the PATENT CLAIMS, a non-exclusive, no-charge, royalty-free, + irrevocable (except as stated in this section) patent license to + make, have made, use, offer to sell, sell, import, transfer, and + otherwise run, modify, and propagate the contents of THIS + IMPLEMENTATION, where such license applies only to the PATENT + CLAIMS. This grant does not include claims that would be infringed + only as a consequence of further modifications of THIS + IMPLEMENTATION. If you or your agent or licensee institute or order + or agree to the institution of patent litigation against any entity + (including a cross-claim or counterclaim in a lawsuit) alleging that + THIS IMPLEMENTATION constitutes direct or contributory patent + infringement, or inducement of patent infringement, then any rights + granted to you under this License shall terminate as of the date + such litigation is filed. If you or your agent or exclusive + licensee institute or order or agree to the institution of a PATENT + CHALLENGE, then Tokutek may terminate any rights granted to you + under this License. +*/ + +#ident "Copyright (c) 2009-2013 Tokutek Inc. All rights reserved." +#ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it." +#ident "$Id$" + +#include <algorithm> + +#include <string.h> + +#include "portability/memory.h" +#include "portability/toku_assert.h" +#include "portability/toku_stdint.h" +#include "portability/toku_stdlib.h" + +#include "ft/serialize/block_allocator.h" +#include "ft/serialize/block_allocator_strategy.h" + +#if TOKU_DEBUG_PARANOID +#define VALIDATE() validate() +#else +#define VALIDATE() +#endif + +static FILE *ba_trace_file = nullptr; + +void block_allocator::maybe_initialize_trace(void) { + const char *ba_trace_path = getenv("TOKU_BA_TRACE_PATH"); + if (ba_trace_path != nullptr) { + ba_trace_file = toku_os_fopen(ba_trace_path, "w"); + if (ba_trace_file == nullptr) { + fprintf(stderr, "tokuft: error: block allocator trace path found in environment (%s), " + "but it could not be opened for writing (errno %d)\n", + ba_trace_path, get_maybe_error_errno()); + } else { + fprintf(stderr, "tokuft: block allocator tracing enabled, path: %s\n", ba_trace_path); + } + } +} + +void block_allocator::maybe_close_trace() { + if (ba_trace_file != nullptr) { + int r = toku_os_fclose(ba_trace_file); + if (r != 0) { + fprintf(stderr, "tokuft: error: block allocator trace file did not close properly (r %d, errno %d)\n", + r, get_maybe_error_errno()); + } else { + fprintf(stderr, "tokuft: block allocator tracing finished, file closed successfully\n"); + } + } +} + +void block_allocator::_create_internal(uint64_t reserve_at_beginning, uint64_t alignment) { + // the alignment must be at least 512 and aligned with 512 to work with direct I/O + assert(alignment >= 512 && (alignment % 512) == 0); + + _reserve_at_beginning = reserve_at_beginning; + _alignment = alignment; + _n_blocks = 0; + _blocks_array_size = 1; + XMALLOC_N(_blocks_array_size, _blocks_array); + _n_bytes_in_use = reserve_at_beginning; + _strategy = BA_STRATEGY_FIRST_FIT; + + memset(&_trace_lock, 0, sizeof(toku_mutex_t)); + toku_mutex_init(&_trace_lock, nullptr); + + VALIDATE(); +} + +void block_allocator::create(uint64_t reserve_at_beginning, uint64_t alignment) { + _create_internal(reserve_at_beginning, alignment); + _trace_create(); +} + +void block_allocator::destroy() { + toku_free(_blocks_array); + _trace_destroy(); + toku_mutex_destroy(&_trace_lock); +} + +void block_allocator::set_strategy(enum allocation_strategy strategy) { + _strategy = strategy; +} + +void block_allocator::grow_blocks_array_by(uint64_t n_to_add) { + if (_n_blocks + n_to_add > _blocks_array_size) { + uint64_t new_size = _n_blocks + n_to_add; + uint64_t at_least = _blocks_array_size * 2; + if (at_least > new_size) { + new_size = at_least; + } + _blocks_array_size = new_size; + XREALLOC_N(_blocks_array_size, _blocks_array); + } +} + +void block_allocator::grow_blocks_array() { + grow_blocks_array_by(1); +} + +void block_allocator::create_from_blockpairs(uint64_t reserve_at_beginning, uint64_t alignment, + struct blockpair *pairs, uint64_t n_blocks) { + _create_internal(reserve_at_beginning, alignment); + + _n_blocks = n_blocks; + grow_blocks_array_by(_n_blocks); + memcpy(_blocks_array, pairs, _n_blocks * sizeof(struct blockpair)); + std::sort(_blocks_array, _blocks_array + _n_blocks); + for (uint64_t i = 0; i < _n_blocks; i++) { + // Allocator does not support size 0 blocks. See block_allocator_free_block. + invariant(_blocks_array[i].size > 0); + invariant(_blocks_array[i].offset >= _reserve_at_beginning); + invariant(_blocks_array[i].offset % _alignment == 0); + + _n_bytes_in_use += _blocks_array[i].size; + } + + VALIDATE(); + + _trace_create_from_blockpairs(); +} + +// Effect: align a value by rounding up. +static inline uint64_t align(uint64_t value, uint64_t ba_alignment) { + return ((value + ba_alignment - 1) / ba_alignment) * ba_alignment; +} + +struct block_allocator::blockpair * +block_allocator::choose_block_to_alloc_after(size_t size, uint64_t heat) { + switch (_strategy) { + case BA_STRATEGY_FIRST_FIT: + return block_allocator_strategy::first_fit(_blocks_array, _n_blocks, size, _alignment); + case BA_STRATEGY_BEST_FIT: + return block_allocator_strategy::best_fit(_blocks_array, _n_blocks, size, _alignment); + case BA_STRATEGY_HEAT_ZONE: + return block_allocator_strategy::heat_zone(_blocks_array, _n_blocks, size, _alignment, heat); + case BA_STRATEGY_PADDED_FIT: + return block_allocator_strategy::padded_fit(_blocks_array, _n_blocks, size, _alignment); + default: + abort(); + } +} + +// Effect: Allocate a block. The resulting block must be aligned on the ba->alignment (which to make direct_io happy must be a positive multiple of 512). +void block_allocator::alloc_block(uint64_t size, uint64_t heat, uint64_t *offset) { + struct blockpair *bp; + + // Allocator does not support size 0 blocks. See block_allocator_free_block. + invariant(size > 0); + + grow_blocks_array(); + _n_bytes_in_use += size; + + uint64_t end_of_reserve = align(_reserve_at_beginning, _alignment); + + if (_n_blocks == 0) { + // First and only block + assert(_n_bytes_in_use == _reserve_at_beginning + size); // we know exactly how many are in use + _blocks_array[0].offset = align(_reserve_at_beginning, _alignment); + _blocks_array[0].size = size; + *offset = _blocks_array[0].offset; + goto done; + } else if (end_of_reserve + size <= _blocks_array[0].offset ) { + // Check to see if the space immediately after the reserve is big enough to hold the new block. + bp = &_blocks_array[0]; + memmove(bp + 1, bp, _n_blocks * sizeof(*bp)); + bp[0].offset = end_of_reserve; + bp[0].size = size; + *offset = end_of_reserve; + goto done; + } + + bp = choose_block_to_alloc_after(size, heat); + if (bp != nullptr) { + // our allocation strategy chose the space after `bp' to fit the new block + uint64_t answer_offset = align(bp->offset + bp->size, _alignment); + uint64_t blocknum = bp - _blocks_array; + invariant(&_blocks_array[blocknum] == bp); + invariant(blocknum < _n_blocks); + memmove(bp + 2, bp + 1, (_n_blocks - blocknum - 1) * sizeof(*bp)); + bp[1].offset = answer_offset; + bp[1].size = size; + *offset = answer_offset; + } else { + // It didn't fit anywhere, so fit it on the end. + assert(_n_blocks < _blocks_array_size); + bp = &_blocks_array[_n_blocks]; + uint64_t answer_offset = align(bp[-1].offset + bp[-1].size, _alignment); + bp->offset = answer_offset; + bp->size = size; + *offset = answer_offset; + } + +done: + _n_blocks++; + VALIDATE(); + + _trace_alloc(size, heat, *offset); +} + +// Find the index in the blocks array that has a particular offset. Requires that the block exist. +// Use binary search so it runs fast. +int64_t block_allocator::find_block(uint64_t offset) { + VALIDATE(); + if (_n_blocks == 1) { + assert(_blocks_array[0].offset == offset); + return 0; + } + + uint64_t lo = 0; + uint64_t hi = _n_blocks; + while (1) { + assert(lo < hi); // otherwise no such block exists. + uint64_t mid = (lo + hi) / 2; + uint64_t thisoff = _blocks_array[mid].offset; + if (thisoff < offset) { + lo = mid + 1; + } else if (thisoff > offset) { + hi = mid; + } else { + return mid; + } + } +} + +// To support 0-sized blocks, we need to include size as an input to this function. +// All 0-sized blocks at the same offset can be considered identical, but +// a 0-sized block can share offset with a non-zero sized block. +// The non-zero sized block is not exchangable with a zero sized block (or vice versa), +// so inserting 0-sized blocks can cause corruption here. +void block_allocator::free_block(uint64_t offset) { + VALIDATE(); + int64_t bn = find_block(offset); + assert(bn >= 0); // we require that there is a block with that offset. + _n_bytes_in_use -= _blocks_array[bn].size; + memmove(&_blocks_array[bn], &_blocks_array[bn + 1], + (_n_blocks - bn - 1) * sizeof(struct blockpair)); + _n_blocks--; + VALIDATE(); + + _trace_free(offset); +} + +uint64_t block_allocator::block_size(uint64_t offset) { + int64_t bn = find_block(offset); + assert(bn >=0); // we require that there is a block with that offset. + return _blocks_array[bn].size; +} + +uint64_t block_allocator::allocated_limit() const { + if (_n_blocks == 0) { + return _reserve_at_beginning; + } else { + struct blockpair *last = &_blocks_array[_n_blocks - 1]; + return last->offset + last->size; + } +} + +// Effect: Consider the blocks in sorted order. The reserved block at the beginning is number 0. The next one is number 1 and so forth. +// Return the offset and size of the block with that number. +// Return 0 if there is a block that big, return nonzero if b is too big. +int block_allocator::get_nth_block_in_layout_order(uint64_t b, uint64_t *offset, uint64_t *size) { + if (b ==0 ) { + *offset = 0; + *size = _reserve_at_beginning; + return 0; + } else if (b > _n_blocks) { + return -1; + } else { + *offset =_blocks_array[b - 1].offset; + *size =_blocks_array[b - 1].size; + return 0; + } +} + +// Requires: report->file_size_bytes is filled in +// Requires: report->data_bytes is filled in +// Requires: report->checkpoint_bytes_additional is filled in +void block_allocator::get_unused_statistics(TOKU_DB_FRAGMENTATION report) { + assert(_n_bytes_in_use == report->data_bytes + report->checkpoint_bytes_additional); + + report->unused_bytes = 0; + report->unused_blocks = 0; + report->largest_unused_block = 0; + if (_n_blocks > 0) { + //Deal with space before block 0 and after reserve: + { + struct blockpair *bp = &_blocks_array[0]; + assert(bp->offset >= align(_reserve_at_beginning, _alignment)); + uint64_t free_space = bp->offset - align(_reserve_at_beginning, _alignment); + if (free_space > 0) { + report->unused_bytes += free_space; + report->unused_blocks++; + if (free_space > report->largest_unused_block) { + report->largest_unused_block = free_space; + } + } + } + + //Deal with space between blocks: + for (uint64_t blocknum = 0; blocknum +1 < _n_blocks; blocknum ++) { + // Consider the space after blocknum + struct blockpair *bp = &_blocks_array[blocknum]; + uint64_t this_offset = bp[0].offset; + uint64_t this_size = bp[0].size; + uint64_t end_of_this_block = align(this_offset+this_size, _alignment); + uint64_t next_offset = bp[1].offset; + uint64_t free_space = next_offset - end_of_this_block; + if (free_space > 0) { + report->unused_bytes += free_space; + report->unused_blocks++; + if (free_space > report->largest_unused_block) { + report->largest_unused_block = free_space; + } + } + } + + //Deal with space after last block + { + struct blockpair *bp = &_blocks_array[_n_blocks-1]; + uint64_t this_offset = bp[0].offset; + uint64_t this_size = bp[0].size; + uint64_t end_of_this_block = align(this_offset+this_size, _alignment); + if (end_of_this_block < report->file_size_bytes) { + uint64_t free_space = report->file_size_bytes - end_of_this_block; + assert(free_space > 0); + report->unused_bytes += free_space; + report->unused_blocks++; + if (free_space > report->largest_unused_block) { + report->largest_unused_block = free_space; + } + } + } + } else { + // No blocks. Just the reserve. + uint64_t end_of_this_block = align(_reserve_at_beginning, _alignment); + if (end_of_this_block < report->file_size_bytes) { + uint64_t free_space = report->file_size_bytes - end_of_this_block; + assert(free_space > 0); + report->unused_bytes += free_space; + report->unused_blocks++; + if (free_space > report->largest_unused_block) { + report->largest_unused_block = free_space; + } + } + } +} + +void block_allocator::get_statistics(TOKU_DB_FRAGMENTATION report) { + report->data_bytes = _n_bytes_in_use; + report->data_blocks = _n_blocks; + report->file_size_bytes = 0; + report->checkpoint_bytes_additional = 0; + get_unused_statistics(report); +} + +void block_allocator::validate() const { + uint64_t n_bytes_in_use = _reserve_at_beginning; + for (uint64_t i = 0; i < _n_blocks; i++) { + n_bytes_in_use += _blocks_array[i].size; + if (i > 0) { + assert(_blocks_array[i].offset > _blocks_array[i - 1].offset); + assert(_blocks_array[i].offset >= _blocks_array[i - 1].offset + _blocks_array[i - 1].size ); + } + } + assert(n_bytes_in_use == _n_bytes_in_use); +} + +// Tracing + +void block_allocator::_trace_create(void) { + if (ba_trace_file != nullptr) { + toku_mutex_lock(&_trace_lock); + fprintf(ba_trace_file, "ba_trace_create %p %" PRIu64 " %" PRIu64 "\n", + this, _reserve_at_beginning, _alignment); + toku_mutex_unlock(&_trace_lock); + + fflush(ba_trace_file); + } +} + +void block_allocator::_trace_create_from_blockpairs(void) { + if (ba_trace_file != nullptr) { + toku_mutex_lock(&_trace_lock); + fprintf(ba_trace_file, "ba_trace_create_from_blockpairs %p %" PRIu64 " %" PRIu64 " ", + this, _reserve_at_beginning, _alignment); + for (uint64_t i = 0; i < _n_blocks; i++) { + fprintf(ba_trace_file, "[%" PRIu64 " %" PRIu64 "] ", + _blocks_array[i].offset, _blocks_array[i].size); + } + fprintf(ba_trace_file, "\n"); + toku_mutex_unlock(&_trace_lock); + + fflush(ba_trace_file); + } +} + +void block_allocator::_trace_destroy(void) { + if (ba_trace_file != nullptr) { + toku_mutex_lock(&_trace_lock); + fprintf(ba_trace_file, "ba_trace_destroy %p\n", this); + toku_mutex_unlock(&_trace_lock); + + fflush(ba_trace_file); + } +} + +void block_allocator::_trace_alloc(uint64_t size, uint64_t heat, uint64_t offset) { + if (ba_trace_file != nullptr) { + toku_mutex_lock(&_trace_lock); + fprintf(ba_trace_file, "ba_trace_alloc %p %" PRIu64 " %" PRIu64 " %" PRIu64 "\n", + this, size, heat, offset); + toku_mutex_unlock(&_trace_lock); + + fflush(ba_trace_file); + } +} + +void block_allocator::_trace_free(uint64_t offset) { + if (ba_trace_file != nullptr) { + toku_mutex_lock(&_trace_lock); + fprintf(ba_trace_file, "ba_trace_free %p %" PRIu64 "\n", this, offset); + toku_mutex_unlock(&_trace_lock); + + fflush(ba_trace_file); + } +} diff --git a/storage/tokudb/ft-index/ft/serialize/block_allocator.h b/storage/tokudb/ft-index/ft/serialize/block_allocator.h new file mode 100644 index 00000000000..b50dadc9e56 --- /dev/null +++ b/storage/tokudb/ft-index/ft/serialize/block_allocator.h @@ -0,0 +1,267 @@ +/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */ +// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4: + +#ident "$Id$" +/* +COPYING CONDITIONS NOTICE: + + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation, and provided that the + following conditions are met: + + * Redistributions of source code must retain this COPYING + CONDITIONS NOTICE, the COPYRIGHT NOTICE (below), the + DISCLAIMER (below), the UNIVERSITY PATENT NOTICE (below), the + PATENT MARKING NOTICE (below), and the PATENT RIGHTS + GRANT (below). + + * Redistributions in binary form must reproduce this COPYING + CONDITIONS NOTICE, the COPYRIGHT NOTICE (below), the + DISCLAIMER (below), the UNIVERSITY PATENT NOTICE (below), the + PATENT MARKING NOTICE (below), and the PATENT RIGHTS + GRANT (below) in the documentation and/or other materials + provided with the distribution. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + 02110-1301, USA. + +COPYRIGHT NOTICE: + + TokuFT, Tokutek Fractal Tree Indexing Library. + Copyright (C) 2007-2013 Tokutek, Inc. + +DISCLAIMER: + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + +UNIVERSITY PATENT NOTICE: + + The technology is licensed by the Massachusetts Institute of + Technology, Rutgers State University of New Jersey, and the Research + Foundation of State University of New York at Stony Brook under + United States of America Serial No. 11/760379 and to the patents + and/or patent applications resulting from it. + +PATENT MARKING NOTICE: + + This software is covered by US Patent No. 8,185,551. + This software is covered by US Patent No. 8,489,638. + +PATENT RIGHTS GRANT: + + "THIS IMPLEMENTATION" means the copyrightable works distributed by + Tokutek as part of the Fractal Tree project. + + "PATENT CLAIMS" means the claims of patents that are owned or + licensable by Tokutek, both currently or in the future; and that in + the absence of this license would be infringed by THIS + IMPLEMENTATION or by using or running THIS IMPLEMENTATION. + + "PATENT CHALLENGE" shall mean a challenge to the validity, + patentability, enforceability and/or non-infringement of any of the + PATENT CLAIMS or otherwise opposing any of the PATENT CLAIMS. + + Tokutek hereby grants to you, for the term and geographical scope of + the PATENT CLAIMS, a non-exclusive, no-charge, royalty-free, + irrevocable (except as stated in this section) patent license to + make, have made, use, offer to sell, sell, import, transfer, and + otherwise run, modify, and propagate the contents of THIS + IMPLEMENTATION, where such license applies only to the PATENT + CLAIMS. This grant does not include claims that would be infringed + only as a consequence of further modifications of THIS + IMPLEMENTATION. If you or your agent or licensee institute or order + or agree to the institution of patent litigation against any entity + (including a cross-claim or counterclaim in a lawsuit) alleging that + THIS IMPLEMENTATION constitutes direct or contributory patent + infringement, or inducement of patent infringement, then any rights + granted to you under this License shall terminate as of the date + such litigation is filed. If you or your agent or exclusive + licensee institute or order or agree to the institution of a PATENT + CHALLENGE, then Tokutek may terminate any rights granted to you + under this License. +*/ + +#pragma once + +#ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved." +#ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it." + +#include <db.h> + +#include "portability/toku_pthread.h" +#include "portability/toku_stdint.h" + +// Block allocator. +// +// A block allocator manages the allocation of variable-sized blocks. +// The translation of block numbers to addresses is handled elsewhere. +// The allocation of block numbers is handled elsewhere. +// +// When creating a block allocator we also specify a certain-sized +// block at the beginning that is preallocated (and cannot be allocated or freed) +// +// We can allocate blocks of a particular size at a particular location. +// We can allocate blocks of a particular size at a location chosen by the allocator. +// We can free blocks. +// We can determine the size of a block. + +class block_allocator { +public: + static const size_t BLOCK_ALLOCATOR_ALIGNMENT = 4096; + + // How much must be reserved at the beginning for the block? + // The actual header is 8+4+4+8+8_4+8+ the length of the db names + 1 pointer for each root. + // So 4096 should be enough. + static const size_t BLOCK_ALLOCATOR_HEADER_RESERVE = 4096; + + static_assert(BLOCK_ALLOCATOR_HEADER_RESERVE % BLOCK_ALLOCATOR_ALIGNMENT == 0, + "block allocator header must have proper alignment"); + + static const size_t BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE = BLOCK_ALLOCATOR_HEADER_RESERVE * 2; + + enum allocation_strategy { + BA_STRATEGY_FIRST_FIT = 1, + BA_STRATEGY_BEST_FIT, + BA_STRATEGY_PADDED_FIT, + BA_STRATEGY_HEAT_ZONE + }; + + struct blockpair { + uint64_t offset; + uint64_t size; + blockpair(uint64_t o, uint64_t s) : + offset(o), size(s) { + } + int operator<(const struct blockpair &rhs) const { + return offset < rhs.offset; + } + int operator<(const uint64_t &o) const { + return offset < o; + } + }; + + // Effect: Create a block allocator, in which the first RESERVE_AT_BEGINNING bytes are not put into a block. + // The default allocation strategy is first fit (BA_STRATEGY_FIRST_FIT) + // All blocks be start on a multiple of ALIGNMENT. + // Aborts if we run out of memory. + // Parameters + // reserve_at_beginning (IN) Size of reserved block at beginning. This size does not have to be aligned. + // alignment (IN) Block alignment. + void create(uint64_t reserve_at_beginning, uint64_t alignment); + + // Effect: Create a block allocator, in which the first RESERVE_AT_BEGINNING bytes are not put into a block. + // The default allocation strategy is first fit (BA_STRATEGY_FIRST_FIT) + // The allocator is initialized to contain `n_blocks' of blockpairs, taken from `pairs' + // All blocks be start on a multiple of ALIGNMENT. + // Aborts if we run out of memory. + // Parameters + // pairs, unowned array of pairs to copy + // n_blocks, Size of pairs array + // reserve_at_beginning (IN) Size of reserved block at beginning. This size does not have to be aligned. + // alignment (IN) Block alignment. + void create_from_blockpairs(uint64_t reserve_at_beginning, uint64_t alignment, + struct blockpair *pairs, uint64_t n_blocks); + + // Effect: Destroy this block allocator + void destroy(); + + // Effect: Set the allocation strategy that the allocator should use + // Requires: No other threads are operating on this block allocator + void set_strategy(enum allocation_strategy strategy); + + // Effect: Allocate a block of the specified size at an address chosen by the allocator. + // Aborts if anything goes wrong. + // The block address will be a multiple of the alignment. + // Parameters: + // size (IN): The size of the block. (The size does not have to be aligned.) + // offset (OUT): The location of the block. + // heat (IN): A higher heat means we should be prepared to free this block soon (perhaps in the next checkpoint) + // Heat values are lexiographically ordered (like integers), but their specific values are arbitrary + void alloc_block(uint64_t size, uint64_t heat, uint64_t *offset); + + // Effect: Free the block at offset. + // Requires: There must be a block currently allocated at that offset. + // Parameters: + // offset (IN): The offset of the block. + void free_block(uint64_t offset); + + // Effect: Return the size of the block that starts at offset. + // Requires: There must be a block currently allocated at that offset. + // Parameters: + // offset (IN): The offset of the block. + uint64_t block_size(uint64_t offset); + + // Effect: Check to see if the block allocator is OK. This may take a long time. + // Usage Hints: Probably only use this for unit tests. + // TODO: Private? + void validate() const; + + // Effect: Return the unallocated block address of "infinite" size. + // That is, return the smallest address that is above all the allocated blocks. + uint64_t allocated_limit() const; + + // Effect: Consider the blocks in sorted order. The reserved block at the beginning is number 0. The next one is number 1 and so forth. + // Return the offset and size of the block with that number. + // Return 0 if there is a block that big, return nonzero if b is too big. + // Rationale: This is probably useful only for tests. + int get_nth_block_in_layout_order(uint64_t b, uint64_t *offset, uint64_t *size); + + // Effect: Fill in report to indicate how the file is used. + // Requires: + // report->file_size_bytes is filled in + // report->data_bytes is filled in + // report->checkpoint_bytes_additional is filled in + void get_unused_statistics(TOKU_DB_FRAGMENTATION report); + + // Effect: Fill in report->data_bytes with the number of bytes in use + // Fill in report->data_blocks with the number of blockpairs in use + // Fill in unused statistics using this->get_unused_statistics() + // Requires: + // report->file_size is ignored on return + // report->checkpoint_bytes_additional is ignored on return + void get_statistics(TOKU_DB_FRAGMENTATION report); + + // Block allocator tracing. + // - Enabled by setting TOKU_BA_TRACE_PATH to the file that the trace file + // should be written to. + // - Trace may be replayed by ba_trace_replay tool in tools/ directory + // eg: "cat mytracefile | ba_trace_replay" + static void maybe_initialize_trace(); + static void maybe_close_trace(); + +private: + void _create_internal(uint64_t reserve_at_beginning, uint64_t alignment); + void grow_blocks_array_by(uint64_t n_to_add); + void grow_blocks_array(); + int64_t find_block(uint64_t offset); + struct blockpair *choose_block_to_alloc_after(size_t size, uint64_t heat); + + // Tracing + toku_mutex_t _trace_lock; + void _trace_create(void); + void _trace_create_from_blockpairs(void); + void _trace_destroy(void); + void _trace_alloc(uint64_t size, uint64_t heat, uint64_t offset); + void _trace_free(uint64_t offset); + + // How much to reserve at the beginning + uint64_t _reserve_at_beginning; + // Block alignment + uint64_t _alignment; + // How many blocks + uint64_t _n_blocks; + // How big is the blocks_array. Must be >= n_blocks. + uint64_t _blocks_array_size; + // These blocks are sorted by address. + struct blockpair *_blocks_array; + // Including the reserve_at_beginning + uint64_t _n_bytes_in_use; + // The allocation strategy are we using + enum allocation_strategy _strategy; +}; diff --git a/storage/tokudb/ft-index/ft/serialize/block_allocator_strategy.cc b/storage/tokudb/ft-index/ft/serialize/block_allocator_strategy.cc new file mode 100644 index 00000000000..f896a41aaba --- /dev/null +++ b/storage/tokudb/ft-index/ft/serialize/block_allocator_strategy.cc @@ -0,0 +1,274 @@ +/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */ +// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4: + +/* +COPYING CONDITIONS NOTICE: + + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation, and provided that the + following conditions are met: + + * Redistributions of source code must retain this COPYING + CONDITIONS NOTICE, the COPYRIGHT NOTICE (below), the + DISCLAIMER (below), the UNIVERSITY PATENT NOTICE (below), the + PATENT MARKING NOTICE (below), and the PATENT RIGHTS + GRANT (below). + + * Redistributions in binary form must reproduce this COPYING + CONDITIONS NOTICE, the COPYRIGHT NOTICE (below), the + DISCLAIMER (below), the UNIVERSITY PATENT NOTICE (below), the + PATENT MARKING NOTICE (below), and the PATENT RIGHTS + GRANT (below) in the documentation and/or other materials + provided with the distribution. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + 02110-1301, USA. + +COPYRIGHT NOTICE: + + TokuFT, Tokutek Fractal Tree Indexing Library. + Copyright (C) 2007-2014 Tokutek, Inc. + +DISCLAIMER: + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + +UNIVERSITY PATENT NOTICE: + + The technology is licensed by the Massachusetts Institute of + Technology, Rutgers State University of New Jersey, and the Research + Foundation of State University of New York at Stony Brook under + United States of America Serial No. 11/760379 and to the patents + and/or patent applications resulting from it. + +PATENT MARKING NOTICE: + + This software is covered by US Patent No. 8,185,551. + This software is covered by US Patent No. 8,489,638. + +PATENT RIGHTS GRANT: + + "THIS IMPLEMENTATION" means the copyrightable works distributed by + Tokutek as part of the Fractal Tree project. + + "PATENT CLAIMS" means the claims of patents that are owned or + licensable by Tokutek, both currently or in the future; and that in + the absence of this license would be infringed by THIS + IMPLEMENTATION or by using or running THIS IMPLEMENTATION. + + "PATENT CHALLENGE" shall mean a challenge to the validity, + patentability, enforceability and/or non-infringement of any of the + PATENT CLAIMS or otherwise opposing any of the PATENT CLAIMS. + + Tokutek hereby grants to you, for the term and geographical scope of + the PATENT CLAIMS, a non-exclusive, no-charge, royalty-free, + irrevocable (except as stated in this section) patent license to + make, have made, use, offer to sell, sell, import, transfer, and + otherwise run, modify, and propagate the contents of THIS + IMPLEMENTATION, where such license applies only to the PATENT + CLAIMS. This grant does not include claims that would be infringed + only as a consequence of further modifications of THIS + IMPLEMENTATION. If you or your agent or licensee institute or order + or agree to the institution of patent litigation against any entity + (including a cross-claim or counterclaim in a lawsuit) alleging that + THIS IMPLEMENTATION constitutes direct or contributory patent + infringement, or inducement of patent infringement, then any rights + granted to you under this License shall terminate as of the date + such litigation is filed. If you or your agent or exclusive + licensee institute or order or agree to the institution of a PATENT + CHALLENGE, then Tokutek may terminate any rights granted to you + under this License. +*/ + +#include <algorithm> + +#include <string.h> + +#include "portability/toku_assert.h" + +#include "ft/serialize/block_allocator_strategy.h" + +static uint64_t _align(uint64_t value, uint64_t ba_alignment) { + return ((value + ba_alignment - 1) / ba_alignment) * ba_alignment; +} + +static uint64_t _roundup_to_power_of_two(uint64_t value) { + uint64_t r = 4096; + while (r < value) { + r *= 2; + invariant(r > 0); + } + return r; +} + +// First fit block allocation +static struct block_allocator::blockpair * +_first_fit(struct block_allocator::blockpair *blocks_array, + uint64_t n_blocks, uint64_t size, uint64_t alignment, + uint64_t max_padding) { + if (n_blocks == 1) { + // won't enter loop, can't underflow the direction < 0 case + return nullptr; + } + + struct block_allocator::blockpair *bp = &blocks_array[0]; + for (uint64_t n_spaces_to_check = n_blocks - 1; n_spaces_to_check > 0; + n_spaces_to_check--, bp++) { + // Consider the space after bp + uint64_t padded_alignment = max_padding != 0 ? _align(max_padding, alignment) : alignment; + uint64_t possible_offset = _align(bp->offset + bp->size, padded_alignment); + if (possible_offset + size <= bp[1].offset) { // bp[1] is always valid since bp < &blocks_array[n_blocks-1] + invariant(bp - blocks_array < (int64_t) n_blocks); + return bp; + } + } + return nullptr; +} + +static struct block_allocator::blockpair * +_first_fit_bw(struct block_allocator::blockpair *blocks_array, + uint64_t n_blocks, uint64_t size, uint64_t alignment, + uint64_t max_padding, struct block_allocator::blockpair *blocks_array_limit) { + if (n_blocks == 1) { + // won't enter loop, can't underflow the direction < 0 case + return nullptr; + } + + struct block_allocator::blockpair *bp = &blocks_array[-1]; + for (uint64_t n_spaces_to_check = n_blocks - 1; n_spaces_to_check > 0; + n_spaces_to_check--, bp--) { + // Consider the space after bp + uint64_t padded_alignment = max_padding != 0 ? _align(max_padding, alignment) : alignment; + uint64_t possible_offset = _align(bp->offset + bp->size, padded_alignment); + if (&bp[1] < blocks_array_limit && possible_offset + size <= bp[1].offset) { + invariant(blocks_array - bp < (int64_t) n_blocks); + return bp; + } + } + return nullptr; +} + +struct block_allocator::blockpair * +block_allocator_strategy::first_fit(struct block_allocator::blockpair *blocks_array, + uint64_t n_blocks, uint64_t size, uint64_t alignment) { + return _first_fit(blocks_array, n_blocks, size, alignment, 0); +} + +// Best fit block allocation +struct block_allocator::blockpair * +block_allocator_strategy::best_fit(struct block_allocator::blockpair *blocks_array, + uint64_t n_blocks, uint64_t size, uint64_t alignment) { + struct block_allocator::blockpair *best_bp = nullptr; + uint64_t best_hole_size = 0; + for (uint64_t blocknum = 0; blocknum + 1 < n_blocks; blocknum++) { + // Consider the space after blocknum + struct block_allocator::blockpair *bp = &blocks_array[blocknum]; + uint64_t possible_offset = _align(bp->offset + bp->size, alignment); + uint64_t possible_end_offset = possible_offset + size; + if (possible_end_offset <= bp[1].offset) { + // It fits here. Is it the best fit? + uint64_t hole_size = bp[1].offset - possible_end_offset; + if (best_bp == nullptr || hole_size < best_hole_size) { + best_hole_size = hole_size; + best_bp = bp; + } + } + } + return best_bp; +} + +static uint64_t padded_fit_alignment = 4096; + +// TODO: These compiler specific directives should be abstracted in a portability header +// portability/toku_compiler.h? +__attribute__((__constructor__)) +static void determine_padded_fit_alignment_from_env(void) { + // TODO: Should be in portability as 'toku_os_getenv()?' + const char *s = getenv("TOKU_BA_PADDED_FIT_ALIGNMENT"); + if (s != nullptr && strlen(s) > 0) { + const int64_t alignment = strtoll(s, nullptr, 10); + if (alignment <= 0) { + fprintf(stderr, "tokuft: error: block allocator padded fit alignment found in environment (%s), " + "but it's out of range (should be an integer > 0). defaulting to %" PRIu64 "\n", + s, padded_fit_alignment); + } else { + padded_fit_alignment = _roundup_to_power_of_two(alignment); + fprintf(stderr, "tokuft: setting block allocator padded fit alignment to %" PRIu64 "\n", + padded_fit_alignment); + } + } +} + +// First fit into a block that is oversized by up to max_padding. +// The hope is that if we purposefully waste a bit of space at allocation +// time we'll be more likely to reuse this block later. +struct block_allocator::blockpair * +block_allocator_strategy::padded_fit(struct block_allocator::blockpair *blocks_array, + uint64_t n_blocks, uint64_t size, uint64_t alignment) { + return _first_fit(blocks_array, n_blocks, size, alignment, padded_fit_alignment); +} + +static double hot_zone_threshold = 0.85; + +// TODO: These compiler specific directives should be abstracted in a portability header +// portability/toku_compiler.h? +__attribute__((__constructor__)) +static void determine_hot_zone_threshold_from_env(void) { + // TODO: Should be in portability as 'toku_os_getenv()?' + const char *s = getenv("TOKU_BA_HOT_ZONE_THRESHOLD"); + if (s != nullptr && strlen(s) > 0) { + const double hot_zone = strtod(s, nullptr); + if (hot_zone < 1 || hot_zone > 99) { + fprintf(stderr, "tokuft: error: block allocator hot zone threshold found in environment (%s), " + "but it's out of range (should be an integer 1 through 99). defaulting to 85\n", s); + hot_zone_threshold = 85 / 100; + } else { + fprintf(stderr, "tokuft: setting block allocator hot zone threshold to %s\n", s); + hot_zone_threshold = hot_zone / 100; + } + } +} + +struct block_allocator::blockpair * +block_allocator_strategy::heat_zone(struct block_allocator::blockpair *blocks_array, + uint64_t n_blocks, uint64_t size, uint64_t alignment, + uint64_t heat) { + if (heat > 0) { + struct block_allocator::blockpair *bp, *boundary_bp; + + // Hot allocation. Find the beginning of the hot zone. + boundary_bp = &blocks_array[n_blocks - 1]; + uint64_t highest_offset = _align(boundary_bp->offset + boundary_bp->size, alignment); + uint64_t hot_zone_offset = static_cast<uint64_t>(hot_zone_threshold * highest_offset); + + boundary_bp = std::lower_bound(blocks_array, blocks_array + n_blocks, hot_zone_offset); + uint64_t blocks_in_zone = (blocks_array + n_blocks) - boundary_bp; + uint64_t blocks_outside_zone = boundary_bp - blocks_array; + invariant(blocks_in_zone + blocks_outside_zone == n_blocks); + + if (blocks_in_zone > 0) { + // Find the first fit in the hot zone, going forward. + bp = _first_fit(boundary_bp, blocks_in_zone, size, alignment, 0); + if (bp != nullptr) { + return bp; + } + } + if (blocks_outside_zone > 0) { + // Find the first fit in the cold zone, going backwards. + bp = _first_fit_bw(boundary_bp, blocks_outside_zone, size, alignment, 0, &blocks_array[n_blocks]); + if (bp != nullptr) { + return bp; + } + } + } else { + // Cold allocations are simply first-fit from the beginning. + return _first_fit(blocks_array, n_blocks, size, alignment, 0); + } + return nullptr; +} diff --git a/storage/tokudb/ft-index/ft/ft_msg.cc b/storage/tokudb/ft-index/ft/serialize/block_allocator_strategy.h index f03ae2a417c..3b7c0bafe4e 100644 --- a/storage/tokudb/ft-index/ft/ft_msg.cc +++ b/storage/tokudb/ft-index/ft/serialize/block_allocator_strategy.h @@ -1,6 +1,6 @@ /* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */ // vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4: -#ident "$Id$" + /* COPYING CONDITIONS NOTICE: @@ -29,8 +29,8 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. - Copyright (C) 2007-2013 Tokutek, Inc. + TokuFT, Tokutek Fractal Tree Indexing Library. + Copyright (C) 2007-2014 Tokutek, Inc. DISCLAIMER: @@ -86,48 +86,30 @@ PATENT RIGHTS GRANT: under this License. */ -#ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved." - - -#include <toku_portability.h> -#include "fttypes.h" -#include "xids.h" -#include "ft_msg.h" - +#pragma once -uint32_t -ft_msg_get_keylen(FT_MSG ft_msg) { - uint32_t rval = ft_msg->u.id.key->size; - return rval; -} +#include <db.h> -uint32_t -ft_msg_get_vallen(FT_MSG ft_msg) { - uint32_t rval = ft_msg->u.id.val->size; - return rval; -} +#include "ft/serialize/block_allocator.h" -XIDS -ft_msg_get_xids(FT_MSG ft_msg) { - XIDS rval = ft_msg->xids; - return rval; -} +// Block allocation strategy implementations -void * -ft_msg_get_key(FT_MSG ft_msg) { - void * rval = ft_msg->u.id.key->data; - return rval; -} +class block_allocator_strategy { +public: + static struct block_allocator::blockpair * + first_fit(struct block_allocator::blockpair *blocks_array, + uint64_t n_blocks, uint64_t size, uint64_t alignment); -void * -ft_msg_get_val(FT_MSG ft_msg) { - void * rval = ft_msg->u.id.val->data; - return rval; -} + static struct block_allocator::blockpair * + best_fit(struct block_allocator::blockpair *blocks_array, + uint64_t n_blocks, uint64_t size, uint64_t alignment); -enum ft_msg_type -ft_msg_get_type(FT_MSG ft_msg) { - enum ft_msg_type rval = ft_msg->type; - return rval; -} + static struct block_allocator::blockpair * + padded_fit(struct block_allocator::blockpair *blocks_array, + uint64_t n_blocks, uint64_t size, uint64_t alignment); + static struct block_allocator::blockpair * + heat_zone(struct block_allocator::blockpair *blocks_array, + uint64_t n_blocks, uint64_t size, uint64_t alignment, + uint64_t heat); +}; diff --git a/storage/tokudb/ft-index/ft/serialize/block_table.cc b/storage/tokudb/ft-index/ft/serialize/block_table.cc new file mode 100644 index 00000000000..561f03a8871 --- /dev/null +++ b/storage/tokudb/ft-index/ft/serialize/block_table.cc @@ -0,0 +1,1046 @@ +/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */ +// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4: +#ident "$Id$" +/* +COPYING CONDITIONS NOTICE: + + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation, and provided that the + following conditions are met: + + * Redistributions of source code must retain this COPYING + CONDITIONS NOTICE, the COPYRIGHT NOTICE (below), the + DISCLAIMER (below), the UNIVERSITY PATENT NOTICE (below), the + PATENT MARKING NOTICE (below), and the PATENT RIGHTS + GRANT (below). + + * Redistributions in binary form must reproduce this COPYING + CONDITIONS NOTICE, the COPYRIGHT NOTICE (below), the + DISCLAIMER (below), the UNIVERSITY PATENT NOTICE (below), the + PATENT MARKING NOTICE (below), and the PATENT RIGHTS + GRANT (below) in the documentation and/or other materials + provided with the distribution. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + 02110-1301, USA. + +COPYRIGHT NOTICE: + + TokuFT, Tokutek Fractal Tree Indexing Library. + Copyright (C) 2007-2013 Tokutek, Inc. + +DISCLAIMER: + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + +UNIVERSITY PATENT NOTICE: + + The technology is licensed by the Massachusetts Institute of + Technology, Rutgers State University of New Jersey, and the Research + Foundation of State University of New York at Stony Brook under + United States of America Serial No. 11/760379 and to the patents + and/or patent applications resulting from it. + +PATENT MARKING NOTICE: + + This software is covered by US Patent No. 8,185,551. + This software is covered by US Patent No. 8,489,638. + +PATENT RIGHTS GRANT: + + "THIS IMPLEMENTATION" means the copyrightable works distributed by + Tokutek as part of the Fractal Tree project. + + "PATENT CLAIMS" means the claims of patents that are owned or + licensable by Tokutek, both currently or in the future; and that in + the absence of this license would be infringed by THIS + IMPLEMENTATION or by using or running THIS IMPLEMENTATION. + + "PATENT CHALLENGE" shall mean a challenge to the validity, + patentability, enforceability and/or non-infringement of any of the + PATENT CLAIMS or otherwise opposing any of the PATENT CLAIMS. + + Tokutek hereby grants to you, for the term and geographical scope of + the PATENT CLAIMS, a non-exclusive, no-charge, royalty-free, + irrevocable (except as stated in this section) patent license to + make, have made, use, offer to sell, sell, import, transfer, and + otherwise run, modify, and propagate the contents of THIS + IMPLEMENTATION, where such license applies only to the PATENT + CLAIMS. This grant does not include claims that would be infringed + only as a consequence of further modifications of THIS + IMPLEMENTATION. If you or your agent or licensee institute or order + or agree to the institution of patent litigation against any entity + (including a cross-claim or counterclaim in a lawsuit) alleging that + THIS IMPLEMENTATION constitutes direct or contributory patent + infringement, or inducement of patent infringement, then any rights + granted to you under this License shall terminate as of the date + such litigation is filed. If you or your agent or exclusive + licensee institute or order or agree to the institution of a PATENT + CHALLENGE, then Tokutek may terminate any rights granted to you + under this License. +*/ + +#ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved." +#ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it." + +#include "portability/memory.h" +#include "portability/toku_assert.h" +#include "portability/toku_portability.h" +#include "portability/toku_pthread.h" + +// ugly but pragmatic, need access to dirty bits while holding translation lock +// TODO: Refactor this (possibly with FT-301) +#include "ft/ft-internal.h" + +// TODO: reorganize this dependency (FT-303) +#include "ft/ft-ops.h" // for toku_maybe_truncate_file +#include "ft/serialize/block_table.h" +#include "ft/serialize/rbuf.h" +#include "ft/serialize/wbuf.h" +#include "ft/serialize/block_allocator.h" + +#include "util/nb_mutex.h" +#include "util/scoped_malloc.h" + +// indicates the end of a freelist +static const BLOCKNUM freelist_null = { -1 }; + +// value of block_translation_pair.size if blocknum is unused +static const DISKOFF size_is_free = (DISKOFF) -1; + +// value of block_translation_pair.u.diskoff if blocknum is used but does not yet have a diskblock +static const DISKOFF diskoff_unused = (DISKOFF) -2; + +void block_table::_mutex_lock() { + toku_mutex_lock(&_mutex); +} + +void block_table::_mutex_unlock() { + toku_mutex_unlock(&_mutex); +} + +// TODO: Move lock to FT +void toku_ft_lock(FT ft) { + block_table *bt = &ft->blocktable; + bt->_mutex_lock(); +} + +// TODO: Move lock to FT +void toku_ft_unlock(FT ft) { + block_table *bt = &ft->blocktable; + toku_mutex_assert_locked(&bt->_mutex); + bt->_mutex_unlock(); +} + +// There are two headers: the reserve must fit them both and be suitably aligned. +static_assert(block_allocator::BLOCK_ALLOCATOR_HEADER_RESERVE % + block_allocator::BLOCK_ALLOCATOR_ALIGNMENT == 0, + "Block allocator's header reserve must be suitibly aligned"); +static_assert(block_allocator::BLOCK_ALLOCATOR_HEADER_RESERVE * 2 == + block_allocator::BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE, + "Block allocator's total header reserve must exactly fit two headers"); + +// does NOT initialize the block allocator: the caller is responsible +void block_table::_create_internal() { + memset(&_current, 0, sizeof(struct translation)); + memset(&_inprogress, 0, sizeof(struct translation)); + memset(&_checkpointed, 0, sizeof(struct translation)); + memset(&_mutex, 0, sizeof(_mutex)); + toku_mutex_init(&_mutex, nullptr); + nb_mutex_init(&_safe_file_size_lock); +} + +// Fill in the checkpointed translation from buffer, and copy checkpointed to current. +// The one read from disk is the last known checkpointed one, so we are keeping it in +// place and then setting current (which is never stored on disk) for current use. +// The translation_buffer has translation only, we create the rest of the block_table. +int block_table::create_from_buffer(int fd, + DISKOFF location_on_disk, //Location of translation_buffer + DISKOFF size_on_disk, + unsigned char *translation_buffer) { + // Does not initialize the block allocator + _create_internal(); + + // Deserialize the translation and copy it to current + int r = _translation_deserialize_from_buffer(&_checkpointed, + location_on_disk, size_on_disk, + translation_buffer); + if (r != 0) { + return r; + } + _copy_translation(&_current, &_checkpointed, TRANSLATION_CURRENT); + + // Determine the file size + int64_t file_size; + r = toku_os_get_file_size(fd, &file_size); + lazy_assert_zero(r); + invariant(file_size >= 0); + _safe_file_size = file_size; + + // Gather the non-empty translations and use them to create the block allocator + toku::scoped_malloc pairs_buf(_checkpointed.smallest_never_used_blocknum.b * + sizeof(struct block_allocator::blockpair)); + struct block_allocator::blockpair *CAST_FROM_VOIDP(pairs, pairs_buf.get()); + uint64_t n_pairs = 0; + for (int64_t i = 0; i < _checkpointed.smallest_never_used_blocknum.b; i++) { + struct block_translation_pair pair = _checkpointed.block_translation[i]; + if (pair.size > 0) { + invariant(pair.u.diskoff != diskoff_unused); + pairs[n_pairs++] = block_allocator::blockpair(pair.u.diskoff, pair.size); + } + } + + _bt_block_allocator.create_from_blockpairs(block_allocator::BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE, + block_allocator::BLOCK_ALLOCATOR_ALIGNMENT, + pairs, n_pairs); + + return 0; +} + +void block_table::create() { + // Does not initialize the block allocator + _create_internal(); + + _checkpointed.type = TRANSLATION_CHECKPOINTED; + _checkpointed.smallest_never_used_blocknum = make_blocknum(RESERVED_BLOCKNUMS); + _checkpointed.length_of_array = _checkpointed.smallest_never_used_blocknum.b; + _checkpointed.blocknum_freelist_head = freelist_null; + XMALLOC_N(_checkpointed.length_of_array, _checkpointed.block_translation); + for (int64_t i = 0; i < _checkpointed.length_of_array; i++) { + _checkpointed.block_translation[i].size = 0; + _checkpointed.block_translation[i].u.diskoff = diskoff_unused; + } + + // we just created a default checkpointed, now copy it to current. + _copy_translation(&_current, &_checkpointed, TRANSLATION_CURRENT); + + // Create an empty block allocator. + _bt_block_allocator.create(block_allocator::BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE, + block_allocator::BLOCK_ALLOCATOR_ALIGNMENT); +} + +// TODO: Refactor with FT-303 +static void ft_set_dirty(FT ft, bool for_checkpoint) { + invariant(ft->h->type == FT_CURRENT); + if (for_checkpoint) { + invariant(ft->checkpoint_header->type == FT_CHECKPOINT_INPROGRESS); + ft->checkpoint_header->dirty = 1; + } else { + ft->h->dirty = 1; + } +} + +void block_table::_maybe_truncate_file(int fd, uint64_t size_needed_before) { + toku_mutex_assert_locked(&_mutex); + uint64_t new_size_needed = _bt_block_allocator.allocated_limit(); + //Save a call to toku_os_get_file_size (kernel call) if unlikely to be useful. + if (new_size_needed < size_needed_before && new_size_needed < _safe_file_size) { + nb_mutex_lock(&_safe_file_size_lock, &_mutex); + + // Must hold _safe_file_size_lock to change _safe_file_size. + if (new_size_needed < _safe_file_size) { + int64_t safe_file_size_before = _safe_file_size; + // Not safe to use the 'to-be-truncated' portion until truncate is done. + _safe_file_size = new_size_needed; + _mutex_unlock(); + + uint64_t size_after; + toku_maybe_truncate_file(fd, new_size_needed, safe_file_size_before, &size_after); + _mutex_lock(); + + _safe_file_size = size_after; + } + nb_mutex_unlock(&_safe_file_size_lock); + } +} + +void block_table::maybe_truncate_file_on_open(int fd) { + _mutex_lock(); + _maybe_truncate_file(fd, _safe_file_size); + _mutex_unlock(); +} + +void block_table::_copy_translation(struct translation *dst, struct translation *src, enum translation_type newtype) { + // We intend to malloc a fresh block, so the incoming translation should be empty + invariant_null(dst->block_translation); + + invariant(src->length_of_array >= src->smallest_never_used_blocknum.b); + invariant(newtype == TRANSLATION_DEBUG || + (src->type == TRANSLATION_CURRENT && newtype == TRANSLATION_INPROGRESS) || + (src->type == TRANSLATION_CHECKPOINTED && newtype == TRANSLATION_CURRENT)); + dst->type = newtype; + dst->smallest_never_used_blocknum = src->smallest_never_used_blocknum; + dst->blocknum_freelist_head = src->blocknum_freelist_head; + + // destination btt is of fixed size. Allocate + memcpy the exact length necessary. + dst->length_of_array = dst->smallest_never_used_blocknum.b; + XMALLOC_N(dst->length_of_array, dst->block_translation); + memcpy(dst->block_translation, src->block_translation, dst->length_of_array * sizeof(*dst->block_translation)); + + // New version of btt is not yet stored on disk. + dst->block_translation[RESERVED_BLOCKNUM_TRANSLATION].size = 0; + dst->block_translation[RESERVED_BLOCKNUM_TRANSLATION].u.diskoff = diskoff_unused; +} + +int64_t block_table::get_blocks_in_use_unlocked() { + BLOCKNUM b; + struct translation *t = &_current; + int64_t num_blocks = 0; + { + //Reserved blocknums do not get upgraded; They are part of the header. + for (b.b = RESERVED_BLOCKNUMS; b.b < t->smallest_never_used_blocknum.b; b.b++) { + if (t->block_translation[b.b].size != size_is_free) { + num_blocks++; + } + } + } + return num_blocks; +} + +void block_table::_maybe_optimize_translation(struct translation *t) { + //Reduce 'smallest_never_used_blocknum.b' (completely free blocknums instead of just + //on a free list. Doing so requires us to regenerate the free list. + //This is O(n) work, so do it only if you're already doing that. + + BLOCKNUM b; + paranoid_invariant(t->smallest_never_used_blocknum.b >= RESERVED_BLOCKNUMS); + //Calculate how large the free suffix is. + int64_t freed; + { + for (b.b = t->smallest_never_used_blocknum.b; b.b > RESERVED_BLOCKNUMS; b.b--) { + if (t->block_translation[b.b-1].size != size_is_free) { + break; + } + } + freed = t->smallest_never_used_blocknum.b - b.b; + } + if (freed>0) { + t->smallest_never_used_blocknum.b = b.b; + if (t->length_of_array/4 > t->smallest_never_used_blocknum.b) { + //We're using more memory than necessary to represent this now. Reduce. + uint64_t new_length = t->smallest_never_used_blocknum.b * 2; + XREALLOC_N(new_length, t->block_translation); + t->length_of_array = new_length; + //No need to zero anything out. + } + + //Regenerate free list. + t->blocknum_freelist_head.b = freelist_null.b; + for (b.b = RESERVED_BLOCKNUMS; b.b < t->smallest_never_used_blocknum.b; b.b++) { + if (t->block_translation[b.b].size == size_is_free) { + t->block_translation[b.b].u.next_free_blocknum = t->blocknum_freelist_head; + t->blocknum_freelist_head = b; + } + } + } +} + +// block table must be locked by caller of this function +void block_table::note_start_checkpoint_unlocked() { + toku_mutex_assert_locked(&_mutex); + + // We're going to do O(n) work to copy the translation, so we + // can afford to do O(n) work by optimizing the translation + _maybe_optimize_translation(&_current); + + // Copy current translation to inprogress translation. + _copy_translation(&_inprogress, &_current, TRANSLATION_INPROGRESS); + + _checkpoint_skipped = false; +} + +void block_table::note_skipped_checkpoint() { + //Purpose, alert block translation that the checkpoint was skipped, e.x. for a non-dirty header + _mutex_lock(); + paranoid_invariant_notnull(_inprogress.block_translation); + _checkpoint_skipped = true; + _mutex_unlock(); +} + +// Purpose: free any disk space used by previous checkpoint that isn't in use by either +// - current state +// - in-progress checkpoint +// capture inprogress as new checkpointed. +// For each entry in checkpointBTT +// if offset does not match offset in inprogress +// assert offset does not match offset in current +// free (offset,len) from checkpoint +// move inprogress to checkpoint (resetting type) +// inprogress = NULL +void block_table::note_end_checkpoint(int fd) { + // Free unused blocks + _mutex_lock(); + uint64_t allocated_limit_at_start = _bt_block_allocator.allocated_limit(); + paranoid_invariant_notnull(_inprogress.block_translation); + if (_checkpoint_skipped) { + toku_free(_inprogress.block_translation); + memset(&_inprogress, 0, sizeof(_inprogress)); + goto end; + } + + //Make certain inprogress was allocated space on disk + assert(_inprogress.block_translation[RESERVED_BLOCKNUM_TRANSLATION].size > 0); + assert(_inprogress.block_translation[RESERVED_BLOCKNUM_TRANSLATION].u.diskoff > 0); + + { + struct translation *t = &_checkpointed; + for (int64_t i = 0; i < t->length_of_array; i++) { + struct block_translation_pair *pair = &t->block_translation[i]; + if (pair->size > 0 && !_translation_prevents_freeing(&_inprogress, make_blocknum(i), pair)) { + assert(!_translation_prevents_freeing(&_current, make_blocknum(i), pair)); + _bt_block_allocator.free_block(pair->u.diskoff); + } + } + toku_free(_checkpointed.block_translation); + _checkpointed = _inprogress; + _checkpointed.type = TRANSLATION_CHECKPOINTED; + memset(&_inprogress, 0, sizeof(_inprogress)); + _maybe_truncate_file(fd, allocated_limit_at_start); + } +end: + _mutex_unlock(); +} + +bool block_table::_is_valid_blocknum(struct translation *t, BLOCKNUM b) { + invariant(t->length_of_array >= t->smallest_never_used_blocknum.b); + return b.b >= 0 && b.b < t->smallest_never_used_blocknum.b; +} + +void block_table::_verify_valid_blocknum(struct translation *UU(t), BLOCKNUM UU(b)) { + invariant(_is_valid_blocknum(t, b)); +} + +bool block_table::_is_valid_freeable_blocknum(struct translation *t, BLOCKNUM b) { + invariant(t->length_of_array >= t->smallest_never_used_blocknum.b); + return b.b >= RESERVED_BLOCKNUMS && b.b < t->smallest_never_used_blocknum.b; +} + +// should be freeable +void block_table::_verify_valid_freeable_blocknum(struct translation *UU(t), BLOCKNUM UU(b)) { + invariant(_is_valid_freeable_blocknum(t, b)); +} + +// Also used only in ft-serialize-test. +void block_table::block_free(uint64_t offset) { + _mutex_lock(); + _bt_block_allocator.free_block(offset); + _mutex_unlock(); +} + +int64_t block_table::_calculate_size_on_disk(struct translation *t) { + return 8 + // smallest_never_used_blocknum + 8 + // blocknum_freelist_head + t->smallest_never_used_blocknum.b * 16 + // Array + 4; // 4 for checksum +} + +// We cannot free the disk space allocated to this blocknum if it is still in use by the given translation table. +bool block_table::_translation_prevents_freeing(struct translation *t, BLOCKNUM b, struct block_translation_pair *old_pair) { + return t->block_translation && + b.b < t->smallest_never_used_blocknum.b && + old_pair->u.diskoff == t->block_translation[b.b].u.diskoff; +} + +void block_table::_realloc_on_disk_internal(BLOCKNUM b, DISKOFF size, DISKOFF *offset, FT ft, bool for_checkpoint, uint64_t heat) { + toku_mutex_assert_locked(&_mutex); + ft_set_dirty(ft, for_checkpoint); + + struct translation *t = &_current; + struct block_translation_pair old_pair = t->block_translation[b.b]; + //Free the old block if it is not still in use by the checkpoint in progress or the previous checkpoint + bool cannot_free = (bool) + ((!for_checkpoint && _translation_prevents_freeing(&_inprogress, b, &old_pair)) || + _translation_prevents_freeing(&_checkpointed, b, &old_pair)); + if (!cannot_free && old_pair.u.diskoff!=diskoff_unused) { + _bt_block_allocator.free_block(old_pair.u.diskoff); + } + + uint64_t allocator_offset = diskoff_unused; + t->block_translation[b.b].size = size; + if (size > 0) { + // Allocate a new block if the size is greater than 0, + // if the size is just 0, offset will be set to diskoff_unused + _bt_block_allocator.alloc_block(size, heat, &allocator_offset); + } + t->block_translation[b.b].u.diskoff = allocator_offset; + *offset = allocator_offset; + + //Update inprogress btt if appropriate (if called because Pending bit is set). + if (for_checkpoint) { + paranoid_invariant(b.b < _inprogress.length_of_array); + _inprogress.block_translation[b.b] = t->block_translation[b.b]; + } +} + +void block_table::_ensure_safe_write_unlocked(int fd, DISKOFF block_size, DISKOFF block_offset) { + // Requires: holding _mutex + uint64_t size_needed = block_size + block_offset; + if (size_needed > _safe_file_size) { + // Must hold _safe_file_size_lock to change _safe_file_size. + nb_mutex_lock(&_safe_file_size_lock, &_mutex); + if (size_needed > _safe_file_size) { + _mutex_unlock(); + + int64_t size_after; + toku_maybe_preallocate_in_file(fd, size_needed, _safe_file_size, &size_after); + + _mutex_lock(); + _safe_file_size = size_after; + } + nb_mutex_unlock(&_safe_file_size_lock); + } +} + +void block_table::realloc_on_disk(BLOCKNUM b, DISKOFF size, DISKOFF *offset, FT ft, int fd, bool for_checkpoint, uint64_t heat) { + _mutex_lock(); + struct translation *t = &_current; + _verify_valid_freeable_blocknum(t, b); + _realloc_on_disk_internal(b, size, offset, ft, for_checkpoint, heat); + + _ensure_safe_write_unlocked(fd, size, *offset); + _mutex_unlock(); +} + +bool block_table::_pair_is_unallocated(struct block_translation_pair *pair) { + return pair->size == 0 && pair->u.diskoff == diskoff_unused; +} + +// Effect: figure out where to put the inprogress btt on disk, allocate space for it there. +// The space must be 512-byte aligned (both the starting address and the size). +// As a result, the allcoated space may be a little bit bigger (up to the next 512-byte boundary) than the actual btt. +void block_table::_alloc_inprogress_translation_on_disk_unlocked() { + toku_mutex_assert_locked(&_mutex); + + struct translation *t = &_inprogress; + paranoid_invariant_notnull(t->block_translation); + BLOCKNUM b = make_blocknum(RESERVED_BLOCKNUM_TRANSLATION); + //Each inprogress is allocated only once + paranoid_invariant(_pair_is_unallocated(&t->block_translation[b.b])); + + //Allocate a new block + int64_t size = _calculate_size_on_disk(t); + uint64_t offset; + _bt_block_allocator.alloc_block(size, 0, &offset); + t->block_translation[b.b].u.diskoff = offset; + t->block_translation[b.b].size = size; +} + +// Effect: Serializes the blocktable to a wbuf (which starts uninitialized) +// A clean shutdown runs checkpoint start so that current and inprogress are copies. +// The resulting wbuf buffer is guaranteed to be be 512-byte aligned and the total length is a multiple of 512 (so we pad with zeros at the end if needd) +// The address is guaranteed to be 512-byte aligned, but the size is not guaranteed. +// It *is* guaranteed that we can read up to the next 512-byte boundary, however +void block_table::serialize_translation_to_wbuf(int fd, struct wbuf *w, + int64_t *address, int64_t *size) { + _mutex_lock(); + struct translation *t = &_inprogress; + + BLOCKNUM b = make_blocknum(RESERVED_BLOCKNUM_TRANSLATION); + _alloc_inprogress_translation_on_disk_unlocked(); // The allocated block must be 512-byte aligned to make O_DIRECT happy. + uint64_t size_translation = _calculate_size_on_disk(t); + uint64_t size_aligned = roundup_to_multiple(512, size_translation); + assert((int64_t)size_translation==t->block_translation[b.b].size); + { + //Init wbuf + if (0) + printf("%s:%d writing translation table of size_translation %" PRIu64 " at %" PRId64 "\n", __FILE__, __LINE__, size_translation, t->block_translation[b.b].u.diskoff); + char *XMALLOC_N_ALIGNED(512, size_aligned, buf); + for (uint64_t i=size_translation; i<size_aligned; i++) buf[i]=0; // fill in the end of the buffer with zeros. + wbuf_init(w, buf, size_aligned); + } + wbuf_BLOCKNUM(w, t->smallest_never_used_blocknum); + wbuf_BLOCKNUM(w, t->blocknum_freelist_head); + int64_t i; + for (i=0; i<t->smallest_never_used_blocknum.b; i++) { + if (0) + printf("%s:%d %" PRId64 ",%" PRId64 "\n", __FILE__, __LINE__, t->block_translation[i].u.diskoff, t->block_translation[i].size); + wbuf_DISKOFF(w, t->block_translation[i].u.diskoff); + wbuf_DISKOFF(w, t->block_translation[i].size); + } + uint32_t checksum = toku_x1764_finish(&w->checksum); + wbuf_int(w, checksum); + *address = t->block_translation[b.b].u.diskoff; + *size = size_translation; + assert((*address)%512 == 0); + + _ensure_safe_write_unlocked(fd, size_aligned, *address); + _mutex_unlock(); +} + +// Perhaps rename: purpose is get disk address of a block, given its blocknum (blockid?) +void block_table::_translate_blocknum_to_offset_size_unlocked(BLOCKNUM b, DISKOFF *offset, DISKOFF *size) { + struct translation *t = &_current; + _verify_valid_blocknum(t, b); + if (offset) { + *offset = t->block_translation[b.b].u.diskoff; + } + if (size) { + *size = t->block_translation[b.b].size; + } +} + +// Perhaps rename: purpose is get disk address of a block, given its blocknum (blockid?) +void block_table::translate_blocknum_to_offset_size(BLOCKNUM b, DISKOFF *offset, DISKOFF *size) { + _mutex_lock(); + _translate_blocknum_to_offset_size_unlocked(b, offset, size); + _mutex_unlock(); +} + +// Only called by toku_allocate_blocknum +// Effect: expand the array to maintain size invariant +// given that one more never-used blocknum will soon be used. +void block_table::_maybe_expand_translation(struct translation *t) { + if (t->length_of_array <= t->smallest_never_used_blocknum.b) { + //expansion is necessary + uint64_t new_length = t->smallest_never_used_blocknum.b * 2; + XREALLOC_N(new_length, t->block_translation); + uint64_t i; + for (i = t->length_of_array; i < new_length; i++) { + t->block_translation[i].u.next_free_blocknum = freelist_null; + t->block_translation[i].size = size_is_free; + } + t->length_of_array = new_length; + } +} + +void block_table::_allocate_blocknum_unlocked(BLOCKNUM *res, FT ft) { + toku_mutex_assert_locked(&_mutex); + BLOCKNUM result; + struct translation *t = &_current; + if (t->blocknum_freelist_head.b == freelist_null.b) { + // no previously used blocknums are available + // use a never used blocknum + _maybe_expand_translation(t); //Ensure a never used blocknums is available + result = t->smallest_never_used_blocknum; + t->smallest_never_used_blocknum.b++; + } else { // reuse a previously used blocknum + result = t->blocknum_freelist_head; + BLOCKNUM next = t->block_translation[result.b].u.next_free_blocknum; + t->blocknum_freelist_head = next; + } + //Verify the blocknum is free + paranoid_invariant(t->block_translation[result.b].size == size_is_free); + //blocknum is not free anymore + t->block_translation[result.b].u.diskoff = diskoff_unused; + t->block_translation[result.b].size = 0; + _verify_valid_freeable_blocknum(t, result); + *res = result; + ft_set_dirty(ft, false); +} + +void block_table::allocate_blocknum(BLOCKNUM *res, FT ft) { + _mutex_lock(); + _allocate_blocknum_unlocked(res, ft); + _mutex_unlock(); +} + +void block_table::_free_blocknum_in_translation(struct translation *t, BLOCKNUM b) { + _verify_valid_freeable_blocknum(t, b); + paranoid_invariant(t->block_translation[b.b].size != size_is_free); + + t->block_translation[b.b].size = size_is_free; + t->block_translation[b.b].u.next_free_blocknum = t->blocknum_freelist_head; + t->blocknum_freelist_head = b; +} + +// Effect: Free a blocknum. +// If the blocknum holds the only reference to a block on disk, free that block +void block_table::_free_blocknum_unlocked(BLOCKNUM *bp, FT ft, bool for_checkpoint) { + toku_mutex_assert_locked(&_mutex); + BLOCKNUM b = *bp; + bp->b = 0; //Remove caller's reference. + + struct block_translation_pair old_pair = _current.block_translation[b.b]; + + _free_blocknum_in_translation(&_current, b); + if (for_checkpoint) { + paranoid_invariant(ft->checkpoint_header->type == FT_CHECKPOINT_INPROGRESS); + _free_blocknum_in_translation(&_inprogress, b); + } + + //If the size is 0, no disk block has ever been assigned to this blocknum. + if (old_pair.size > 0) { + //Free the old block if it is not still in use by the checkpoint in progress or the previous checkpoint + bool cannot_free = (bool) + (_translation_prevents_freeing(&_inprogress, b, &old_pair) || + _translation_prevents_freeing(&_checkpointed, b, &old_pair)); + if (!cannot_free) { + _bt_block_allocator.free_block(old_pair.u.diskoff); + } + } + else { + paranoid_invariant(old_pair.size==0); + paranoid_invariant(old_pair.u.diskoff == diskoff_unused); + } + ft_set_dirty(ft, for_checkpoint); +} + +void block_table::free_blocknum(BLOCKNUM *bp, FT ft, bool for_checkpoint) { + _mutex_lock(); + _free_blocknum_unlocked(bp, ft, for_checkpoint); + _mutex_unlock(); +} + +// Verify there are no free blocks. +void block_table::verify_no_free_blocknums() { + invariant(_current.blocknum_freelist_head.b == freelist_null.b); +} + +// Frees blocknums that have a size of 0 and unused diskoff +// Currently used for eliminating unused cached rollback log nodes +void block_table::free_unused_blocknums(BLOCKNUM root) { + _mutex_lock(); + int64_t smallest = _current.smallest_never_used_blocknum.b; + for (int64_t i=RESERVED_BLOCKNUMS; i < smallest; i++) { + if (i == root.b) { + continue; + } + BLOCKNUM b = make_blocknum(i); + if (_current.block_translation[b.b].size == 0) { + invariant(_current.block_translation[b.b].u.diskoff == diskoff_unused); + _free_blocknum_in_translation(&_current, b); + } + } + _mutex_unlock(); +} + +bool block_table::_no_data_blocks_except_root(BLOCKNUM root) { + bool ok = true; + _mutex_lock(); + int64_t smallest = _current.smallest_never_used_blocknum.b; + if (root.b < RESERVED_BLOCKNUMS) { + ok = false; + goto cleanup; + } + for (int64_t i = RESERVED_BLOCKNUMS; i < smallest; i++) { + if (i == root.b) { + continue; + } + BLOCKNUM b = make_blocknum(i); + if (_current.block_translation[b.b].size != size_is_free) { + ok = false; + goto cleanup; + } + } + cleanup: + _mutex_unlock(); + return ok; +} + +// Verify there are no data blocks except root. +// TODO(leif): This actually takes a lock, but I don't want to fix all the callers right now. +void block_table::verify_no_data_blocks_except_root(BLOCKNUM UU(root)) { + paranoid_invariant(_no_data_blocks_except_root(root)); +} + +bool block_table::_blocknum_allocated(BLOCKNUM b) { + _mutex_lock(); + struct translation *t = &_current; + _verify_valid_blocknum(t, b); + bool ok = t->block_translation[b.b].size != size_is_free; + _mutex_unlock(); + return ok; +} + +// Verify a blocknum is currently allocated. +void block_table::verify_blocknum_allocated(BLOCKNUM UU(b)) { + paranoid_invariant(_blocknum_allocated(b)); +} + +// Only used by toku_dump_translation table (debug info) +void block_table::_dump_translation_internal(FILE *f, struct translation *t) { + if (t->block_translation) { + BLOCKNUM b = make_blocknum(RESERVED_BLOCKNUM_TRANSLATION); + fprintf(f, " length_of_array[%" PRId64 "]", t->length_of_array); + fprintf(f, " smallest_never_used_blocknum[%" PRId64 "]", t->smallest_never_used_blocknum.b); + fprintf(f, " blocknum_free_list_head[%" PRId64 "]", t->blocknum_freelist_head.b); + fprintf(f, " size_on_disk[%" PRId64 "]", t->block_translation[b.b].size); + fprintf(f, " location_on_disk[%" PRId64 "]\n", t->block_translation[b.b].u.diskoff); + int64_t i; + for (i=0; i<t->length_of_array; i++) { + fprintf(f, " %" PRId64 ": %" PRId64 " %" PRId64 "\n", i, t->block_translation[i].u.diskoff, t->block_translation[i].size); + } + fprintf(f, "\n"); + } else { + fprintf(f, " does not exist\n"); + } +} + +// Only used by toku_ft_dump which is only for debugging purposes +// "pretty" just means we use tabs so we can parse output easier later +void block_table::dump_translation_table_pretty(FILE *f) { + _mutex_lock(); + struct translation *t = &_checkpointed; + assert(t->block_translation != nullptr); + for (int64_t i = 0; i < t->length_of_array; ++i) { + fprintf(f, "%" PRId64 "\t%" PRId64 "\t%" PRId64 "\n", i, t->block_translation[i].u.diskoff, t->block_translation[i].size); + } + _mutex_unlock(); +} + +// Only used by toku_ft_dump which is only for debugging purposes +void block_table::dump_translation_table(FILE *f) { + _mutex_lock(); + fprintf(f, "Current block translation:"); + _dump_translation_internal(f, &_current); + fprintf(f, "Checkpoint in progress block translation:"); + _dump_translation_internal(f, &_inprogress); + fprintf(f, "Checkpointed block translation:"); + _dump_translation_internal(f, &_checkpointed); + _mutex_unlock(); +} + +// Only used by ftdump +void block_table::blocknum_dump_translation(BLOCKNUM b) { + _mutex_lock(); + + struct translation *t = &_current; + if (b.b < t->length_of_array) { + struct block_translation_pair *bx = &t->block_translation[b.b]; + printf("%" PRId64 ": %" PRId64 " %" PRId64 "\n", b.b, bx->u.diskoff, bx->size); + } + _mutex_unlock(); +} + +// Must not call this function when anything else is using the blocktable. +// No one may use the blocktable afterwards. +void block_table::destroy(void) { + // TODO: translation.destroy(); + toku_free(_current.block_translation); + toku_free(_inprogress.block_translation); + toku_free(_checkpointed.block_translation); + + _bt_block_allocator.destroy(); + toku_mutex_destroy(&_mutex); + nb_mutex_destroy(&_safe_file_size_lock); +} + +int block_table::_translation_deserialize_from_buffer(struct translation *t, + DISKOFF location_on_disk, + uint64_t size_on_disk, + // out: buffer with serialized translation + unsigned char *translation_buffer) { + int r = 0; + assert(location_on_disk != 0); + t->type = TRANSLATION_CHECKPOINTED; + + // check the checksum + uint32_t x1764 = toku_x1764_memory(translation_buffer, size_on_disk - 4); + uint64_t offset = size_on_disk - 4; + uint32_t stored_x1764 = toku_dtoh32(*(int*)(translation_buffer + offset)); + if (x1764 != stored_x1764) { + fprintf(stderr, "Translation table checksum failure: calc=0x%08x read=0x%08x\n", x1764, stored_x1764); + r = TOKUDB_BAD_CHECKSUM; + goto exit; + } + + struct rbuf rb; + rb.buf = translation_buffer; + rb.ndone = 0; + rb.size = size_on_disk-4;//4==checksum + + t->smallest_never_used_blocknum = rbuf_blocknum(&rb); + t->length_of_array = t->smallest_never_used_blocknum.b; + invariant(t->smallest_never_used_blocknum.b >= RESERVED_BLOCKNUMS); + t->blocknum_freelist_head = rbuf_blocknum(&rb); + XMALLOC_N(t->length_of_array, t->block_translation); + for (int64_t i = 0; i < t->length_of_array; i++) { + t->block_translation[i].u.diskoff = rbuf_DISKOFF(&rb); + t->block_translation[i].size = rbuf_DISKOFF(&rb); + } + invariant(_calculate_size_on_disk(t) == (int64_t) size_on_disk); + invariant(t->block_translation[RESERVED_BLOCKNUM_TRANSLATION].size == (int64_t) size_on_disk); + invariant(t->block_translation[RESERVED_BLOCKNUM_TRANSLATION].u.diskoff == location_on_disk); + +exit: + return r; +} + +int block_table::iterate(enum translation_type type, + BLOCKTABLE_CALLBACK f, void *extra, bool data_only, bool used_only) { + struct translation *src; + + int r = 0; + switch (type) { + case TRANSLATION_CURRENT: + src = &_current; + break; + case TRANSLATION_INPROGRESS: + src = &_inprogress; + break; + case TRANSLATION_CHECKPOINTED: + src = &_checkpointed; + break; + default: + r = EINVAL; + } + + struct translation fakecurrent; + memset(&fakecurrent, 0, sizeof(struct translation)); + + struct translation *t = &fakecurrent; + if (r == 0) { + _mutex_lock(); + _copy_translation(t, src, TRANSLATION_DEBUG); + t->block_translation[RESERVED_BLOCKNUM_TRANSLATION] = + src->block_translation[RESERVED_BLOCKNUM_TRANSLATION]; + _mutex_unlock(); + int64_t i; + for (i=0; i<t->smallest_never_used_blocknum.b; i++) { + struct block_translation_pair pair = t->block_translation[i]; + if (data_only && i< RESERVED_BLOCKNUMS) continue; + if (used_only && pair.size <= 0) continue; + r = f(make_blocknum(i), pair.size, pair.u.diskoff, extra); + if (r!=0) break; + } + toku_free(t->block_translation); + } + return r; +} + +typedef struct { + int64_t used_space; + int64_t total_space; +} frag_extra; + +static int frag_helper(BLOCKNUM UU(b), int64_t size, int64_t address, void *extra) { + frag_extra *info = (frag_extra *) extra; + + if (size + address > info->total_space) + info->total_space = size + address; + info->used_space += size; + return 0; +} + +void block_table::internal_fragmentation(int64_t *total_sizep, int64_t *used_sizep) { + frag_extra info = { 0, 0 }; + int r = iterate(TRANSLATION_CHECKPOINTED, frag_helper, &info, false, true); + assert_zero(r); + + if (total_sizep) *total_sizep = info.total_space; + if (used_sizep) *used_sizep = info.used_space; +} + +void block_table::_realloc_descriptor_on_disk_unlocked(DISKOFF size, DISKOFF *offset, FT ft) { + toku_mutex_assert_locked(&_mutex); + BLOCKNUM b = make_blocknum(RESERVED_BLOCKNUM_DESCRIPTOR); + _realloc_on_disk_internal(b, size, offset, ft, false, 0); +} + +void block_table::realloc_descriptor_on_disk(DISKOFF size, DISKOFF *offset, FT ft, int fd) { + _mutex_lock(); + _realloc_descriptor_on_disk_unlocked(size, offset, ft); + _ensure_safe_write_unlocked(fd, size, *offset); + _mutex_unlock(); +} + +void block_table::get_descriptor_offset_size(DISKOFF *offset, DISKOFF *size) { + _mutex_lock(); + BLOCKNUM b = make_blocknum(RESERVED_BLOCKNUM_DESCRIPTOR); + _translate_blocknum_to_offset_size_unlocked(b, offset, size); + _mutex_unlock(); +} + +void block_table::get_fragmentation_unlocked(TOKU_DB_FRAGMENTATION report) { + // Requires: blocktable lock is held. + // Requires: report->file_size_bytes is already filled in. + + // Count the headers. + report->data_bytes = block_allocator::BLOCK_ALLOCATOR_HEADER_RESERVE; + report->data_blocks = 1; + report->checkpoint_bytes_additional = block_allocator::BLOCK_ALLOCATOR_HEADER_RESERVE; + report->checkpoint_blocks_additional = 1; + + struct translation *current = &_current; + for (int64_t i = 0; i < current->length_of_array; i++) { + struct block_translation_pair *pair = ¤t->block_translation[i]; + if (pair->size > 0) { + report->data_bytes += pair->size; + report->data_blocks++; + } + } + + struct translation *checkpointed = &_checkpointed; + for (int64_t i = 0; i < checkpointed->length_of_array; i++) { + struct block_translation_pair *pair = &checkpointed->block_translation[i]; + if (pair->size > 0 && !(i < current->length_of_array && + current->block_translation[i].size > 0 && + current->block_translation[i].u.diskoff == pair->u.diskoff)) { + report->checkpoint_bytes_additional += pair->size; + report->checkpoint_blocks_additional++; + } + } + + struct translation *inprogress = &_inprogress; + for (int64_t i = 0; i < inprogress->length_of_array; i++) { + struct block_translation_pair *pair = &inprogress->block_translation[i]; + if (pair->size > 0 && !(i < current->length_of_array && + current->block_translation[i].size > 0 && + current->block_translation[i].u.diskoff == pair->u.diskoff) && + !(i < checkpointed->length_of_array && + checkpointed->block_translation[i].size > 0 && + checkpointed->block_translation[i].u.diskoff == pair->u.diskoff)) { + report->checkpoint_bytes_additional += pair->size; + report->checkpoint_blocks_additional++; + } + } + + _bt_block_allocator.get_unused_statistics(report); +} + +void block_table::get_info64(struct ftinfo64 *s) { + _mutex_lock(); + + struct translation *current = &_current; + s->num_blocks_allocated = current->length_of_array; + s->num_blocks_in_use = 0; + s->size_allocated = 0; + s->size_in_use = 0; + + for (int64_t i = 0; i < current->length_of_array; ++i) { + struct block_translation_pair *block = ¤t->block_translation[i]; + if (block->size != size_is_free) { + ++s->num_blocks_in_use; + s->size_in_use += block->size; + if (block->u.diskoff != diskoff_unused) { + uint64_t limit = block->u.diskoff + block->size; + if (limit > s->size_allocated) { + s->size_allocated = limit; + } + } + } + } + + _mutex_unlock(); +} + +int block_table::iterate_translation_tables(uint64_t checkpoint_count, + int (*iter)(uint64_t checkpoint_count, + int64_t total_num_rows, + int64_t blocknum, + int64_t diskoff, + int64_t size, + void *extra), + void *iter_extra) { + int error = 0; + _mutex_lock(); + + int64_t total_num_rows = _current.length_of_array + _checkpointed.length_of_array; + for (int64_t i = 0; error == 0 && i < _current.length_of_array; ++i) { + struct block_translation_pair *block = &_current.block_translation[i]; + error = iter(checkpoint_count, total_num_rows, i, block->u.diskoff, block->size, iter_extra); + } + for (int64_t i = 0; error == 0 && i < _checkpointed.length_of_array; ++i) { + struct block_translation_pair *block = &_checkpointed.block_translation[i]; + error = iter(checkpoint_count - 1, total_num_rows, i, block->u.diskoff, block->size, iter_extra); + } + + _mutex_unlock(); + return error; +} diff --git a/storage/tokudb/ft-index/ft/serialize/block_table.h b/storage/tokudb/ft-index/ft/serialize/block_table.h new file mode 100644 index 00000000000..534befaf426 --- /dev/null +++ b/storage/tokudb/ft-index/ft/serialize/block_table.h @@ -0,0 +1,338 @@ +/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */ +// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4: + +/* +COPYING CONDITIONS NOTICE: + + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation, and provided that the + following conditions are met: + + * Redistributions of source code must retain this COPYING + CONDITIONS NOTICE, the COPYRIGHT NOTICE (below), the + DISCLAIMER (below), the UNIVERSITY PATENT NOTICE (below), the + PATENT MARKING NOTICE (below), and the PATENT RIGHTS + GRANT (below). + + * Redistributions in binary form must reproduce this COPYING + CONDITIONS NOTICE, the COPYRIGHT NOTICE (below), the + DISCLAIMER (below), the UNIVERSITY PATENT NOTICE (below), the + PATENT MARKING NOTICE (below), and the PATENT RIGHTS + GRANT (below) in the documentation and/or other materials + provided with the distribution. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + 02110-1301, USA. + +COPYRIGHT NOTICE: + + TokuFT, Tokutek Fractal Tree Indexing Library. + Copyright (C) 2007-2014 Tokutek, Inc. + +DISCLAIMER: + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + +UNIVERSITY PATENT NOTICE: + + The technology is licensed by the Massachusetts Institute of + Technology, Rutgers State University of New Jersey, and the Research + Foundation of State University of New York at Stony Brook under + United States of America Serial No. 11/760379 and to the patents + and/or patent applications resulting from it. + +PATENT MARKING NOTICE: + + This software is covered by US Patent No. 8,185,551. + This software is covered by US Patent No. 8,489,638. + +PATENT RIGHTS GRANT: + + "THIS IMPLEMENTATION" means the copyrightable works distributed by + Tokutek as part of the Fractal Tree project. + + "PATENT CLAIMS" means the claims of patents that are owned or + licensable by Tokutek, both currently or in the future; and that in + the absence of this license would be infringed by THIS + IMPLEMENTATION or by using or running THIS IMPLEMENTATION. + + "PATENT CHALLENGE" shall mean a challenge to the validity, + patentability, enforceability and/or non-infringement of any of the + PATENT CLAIMS or otherwise opposing any of the PATENT CLAIMS. + + Tokutek hereby grants to you, for the term and geographical scope of + the PATENT CLAIMS, a non-exclusive, no-charge, royalty-free, + irrevocable (except as stated in this section) patent license to + make, have made, use, offer to sell, sell, import, transfer, and + otherwise run, modify, and propagate the contents of THIS + IMPLEMENTATION, where such license applies only to the PATENT + CLAIMS. This grant does not include claims that would be infringed + only as a consequence of further modifications of THIS + IMPLEMENTATION. If you or your agent or licensee institute or order + or agree to the institution of patent litigation against any entity + (including a cross-claim or counterclaim in a lawsuit) alleging that + THIS IMPLEMENTATION constitutes direct or contributory patent + infringement, or inducement of patent infringement, then any rights + granted to you under this License shall terminate as of the date + such litigation is filed. If you or your agent or exclusive + licensee institute or order or agree to the institution of a PATENT + CHALLENGE, then Tokutek may terminate any rights granted to you + under this License. +*/ + +#pragma once + +#ident "Copyright (c) 2007-2014 Tokutek Inc. All rights reserved." +#ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it." + +#include <db.h> + +#include "portability/toku_stdint.h" +#include "portability/toku_pthread.h" + +#include "ft/serialize/block_allocator.h" +#include "util/nb_mutex.h" + +struct ft; + +typedef struct blocknum_s { int64_t b; } BLOCKNUM; + +// Offset in a disk. -1 is the 'null' pointer. +typedef int64_t DISKOFF; + +// Unmovable reserved first, then reallocable. +// We reserve one blocknum for the translation table itself. +enum { + RESERVED_BLOCKNUM_NULL = 0, + RESERVED_BLOCKNUM_TRANSLATION = 1, + RESERVED_BLOCKNUM_DESCRIPTOR = 2, + RESERVED_BLOCKNUMS +}; + +typedef int (*BLOCKTABLE_CALLBACK)(BLOCKNUM b, int64_t size, int64_t address, void *extra); + +static inline BLOCKNUM make_blocknum(int64_t b) { + BLOCKNUM result = { .b = b }; + return result; +} +static const BLOCKNUM ROLLBACK_NONE = { .b = 0 }; + +/** + * There are three copies of the translation table (btt) in the block table: + * + * checkpointed Is initialized by deserializing from disk, + * and is the only version ever read from disk. + * When read from disk it is copied to current. + * It is immutable. It can be replaced by an inprogress btt. + * + * inprogress Is only filled by copying from current, + * and is the only version ever serialized to disk. + * (It is serialized to disk on checkpoint and clean shutdown.) + * At end of checkpoint it replaces 'checkpointed'. + * During a checkpoint, any 'pending' dirty writes will update + * inprogress. + * + * current Is initialized by copying from checkpointed, + * is the only version ever modified while the database is in use, + * and is the only version ever copied to inprogress. + * It is never stored on disk. + */ +class block_table { +public: + enum translation_type { + TRANSLATION_NONE = 0, + TRANSLATION_CURRENT, + TRANSLATION_INPROGRESS, + TRANSLATION_CHECKPOINTED, + TRANSLATION_DEBUG + }; + + void create(); + + int create_from_buffer(int fd, DISKOFF location_on_disk, DISKOFF size_on_disk, unsigned char *translation_buffer); + + void destroy(); + + // Checkpointing + void note_start_checkpoint_unlocked(); + void note_end_checkpoint(int fd); + void note_skipped_checkpoint(); + void maybe_truncate_file_on_open(int fd); + + // Blocknums + void allocate_blocknum(BLOCKNUM *res, struct ft *ft); + void realloc_on_disk(BLOCKNUM b, DISKOFF size, DISKOFF *offset, struct ft *ft, int fd, bool for_checkpoint, uint64_t heat); + void free_blocknum(BLOCKNUM *b, struct ft *ft, bool for_checkpoint); + void translate_blocknum_to_offset_size(BLOCKNUM b, DISKOFF *offset, DISKOFF *size); + void free_unused_blocknums(BLOCKNUM root); + void realloc_descriptor_on_disk(DISKOFF size, DISKOFF *offset, struct ft *ft, int fd); + void get_descriptor_offset_size(DISKOFF *offset, DISKOFF *size); + + // External verfication + void verify_blocknum_allocated(BLOCKNUM b); + void verify_no_data_blocks_except_root(BLOCKNUM root); + void verify_no_free_blocknums(); + + // Serialization + void serialize_translation_to_wbuf(int fd, struct wbuf *w, int64_t *address, int64_t *size); + + // DEBUG ONLY (ftdump included), tests included + void blocknum_dump_translation(BLOCKNUM b); + void dump_translation_table_pretty(FILE *f); + void dump_translation_table(FILE *f); + void block_free(uint64_t offset); + + int iterate(enum translation_type type, BLOCKTABLE_CALLBACK f, void *extra, bool data_only, bool used_only); + void internal_fragmentation(int64_t *total_sizep, int64_t *used_sizep); + + // Requires: blocktable lock is held. + // Requires: report->file_size_bytes is already filled in. + void get_fragmentation_unlocked(TOKU_DB_FRAGMENTATION report); + + int64_t get_blocks_in_use_unlocked(); + + void get_info64(struct ftinfo64 *); + + int iterate_translation_tables(uint64_t, int (*)(uint64_t, int64_t, int64_t, int64_t, int64_t, void *), void *); + +private: + struct block_translation_pair { + // If in the freelist, use next_free_blocknum, otherwise diskoff. + union { + DISKOFF diskoff; + BLOCKNUM next_free_blocknum; + } u; + + // Set to 0xFFFFFFFFFFFFFFFF for free + DISKOFF size; + }; + + // This is the BTT (block translation table) + // When the translation (btt) is stored on disk: + // In Header: + // size_on_disk + // location_on_disk + // In block translation table (in order): + // smallest_never_used_blocknum + // blocknum_freelist_head + // array + // a checksum + struct translation { + enum translation_type type; + + // Number of elements in array (block_translation). always >= smallest_never_used_blocknum + int64_t length_of_array; + BLOCKNUM smallest_never_used_blocknum; + + // Next (previously used) unused blocknum (free list) + BLOCKNUM blocknum_freelist_head; + struct block_translation_pair *block_translation; + + // size_on_disk is stored in block_translation[RESERVED_BLOCKNUM_TRANSLATION].size + // location_on is stored in block_translation[RESERVED_BLOCKNUM_TRANSLATION].u.diskoff + }; + + void _create_internal(); + int _translation_deserialize_from_buffer(struct translation *t, // destination into which to deserialize + DISKOFF location_on_disk, // location of translation_buffer + uint64_t size_on_disk, + unsigned char * translation_buffer); // buffer with serialized translation + + void _copy_translation(struct translation *dst, struct translation *src, enum translation_type newtype); + void _maybe_optimize_translation(struct translation *t); + void _maybe_expand_translation(struct translation *t); + bool _translation_prevents_freeing(struct translation *t, BLOCKNUM b, struct block_translation_pair *old_pair); + void _free_blocknum_in_translation(struct translation *t, BLOCKNUM b); + int64_t _calculate_size_on_disk(struct translation *t); + bool _pair_is_unallocated(struct block_translation_pair *pair); + void _alloc_inprogress_translation_on_disk_unlocked(); + void _dump_translation_internal(FILE *f, struct translation *t); + + // Blocknum management + void _allocate_blocknum_unlocked(BLOCKNUM *res, struct ft *ft); + void _free_blocknum_unlocked(BLOCKNUM *bp, struct ft *ft, bool for_checkpoint); + void _realloc_descriptor_on_disk_unlocked(DISKOFF size, DISKOFF *offset, struct ft *ft); + void _realloc_on_disk_internal(BLOCKNUM b, DISKOFF size, DISKOFF *offset, struct ft *ft, bool for_checkpoint, uint64_t heat); + void _translate_blocknum_to_offset_size_unlocked(BLOCKNUM b, DISKOFF *offset, DISKOFF *size); + + // File management + void _maybe_truncate_file(int fd, uint64_t size_needed_before); + void _ensure_safe_write_unlocked(int fd, DISKOFF block_size, DISKOFF block_offset); + + // Verification + bool _is_valid_blocknum(struct translation *t, BLOCKNUM b); + void _verify_valid_blocknum(struct translation *t, BLOCKNUM b); + bool _is_valid_freeable_blocknum(struct translation *t, BLOCKNUM b); + void _verify_valid_freeable_blocknum(struct translation *t, BLOCKNUM b); + bool _no_data_blocks_except_root(BLOCKNUM root); + bool _blocknum_allocated(BLOCKNUM b); + + // Locking + // + // TODO: Move the lock to the FT + void _mutex_lock(); + void _mutex_unlock(); + + // The current translation is the one used by client threads. + // It is not represented on disk. + struct translation _current; + + // The translation used by the checkpoint currently in progress. + // If the checkpoint thread allocates a block, it must also update the current translation. + struct translation _inprogress; + + // The translation for the data that shall remain inviolate on disk until the next checkpoint finishes, + // after which any blocks used only in this translation can be freed. + struct translation _checkpointed; + + // The in-memory data structure for block allocation. + // There is no on-disk data structure for block allocation. + // Note: This is *allocation* not *translation* - the block allocator is unaware of which + // blocks are used for which translation, but simply allocates and deallocates blocks. + block_allocator _bt_block_allocator; + toku_mutex_t _mutex; + struct nb_mutex _safe_file_size_lock; + bool _checkpoint_skipped; + uint64_t _safe_file_size; + + // Because the lock is in a weird place right now + friend void toku_ft_lock(struct ft *ft); + friend void toku_ft_unlock(struct ft *ft); +}; + +// For serialize / deserialize + +#include "ft/serialize/wbuf.h" + +static inline void wbuf_BLOCKNUM (struct wbuf *w, BLOCKNUM b) { + wbuf_ulonglong(w, b.b); +} + +static inline void wbuf_nocrc_BLOCKNUM (struct wbuf *w, BLOCKNUM b) { + wbuf_nocrc_ulonglong(w, b.b); +} + +static inline void wbuf_DISKOFF(struct wbuf *wb, DISKOFF off) { + wbuf_ulonglong(wb, (uint64_t) off); +} + +#include "ft/serialize/rbuf.h" + +static inline DISKOFF rbuf_DISKOFF(struct rbuf *rb) { + return rbuf_ulonglong(rb); +} + +static inline BLOCKNUM rbuf_blocknum(struct rbuf *rb) { + BLOCKNUM result = make_blocknum(rbuf_longlong(rb)); + return result; +} + +static inline void rbuf_ma_BLOCKNUM(struct rbuf *rb, memarena *UU(ma), BLOCKNUM *blocknum) { + *blocknum = rbuf_blocknum(rb); +} diff --git a/storage/tokudb/ft-index/ft/compress.cc b/storage/tokudb/ft-index/ft/serialize/compress.cc index 2b0187e0b4f..e905220026b 100644 --- a/storage/tokudb/ft-index/ft/compress.cc +++ b/storage/tokudb/ft-index/ft/serialize/compress.cc @@ -28,7 +28,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -165,11 +165,12 @@ void toku_compress (enum toku_compression_method a, assert(1 <= *destLen); *destLen = 1; } else { - qlz_state_compress *XCALLOC(qsc); + toku::scoped_calloc qsc_buf(sizeof(qlz_state_compress)); + qlz_state_compress *qsc = reinterpret_cast<qlz_state_compress *>(qsc_buf.get()); size_t actual_destlen = qlz_compress(source, (char*)(dest+1), sourceLen, qsc); - assert(actual_destlen +1 <= *destLen); - *destLen = actual_destlen+1; // add one for the rfc1950-style header byte. - toku_free(qsc); + assert(actual_destlen + 1 <= *destLen); + // add one for the rfc1950-style header byte. + *destLen = actual_destlen + 1; } // Fill in that first byte dest[0] = TOKU_QUICKLZ_METHOD + (QLZ_COMPRESSION_LEVEL << 4); diff --git a/storage/tokudb/ft-index/ft/compress.h b/storage/tokudb/ft-index/ft/serialize/compress.h index bc25b55be8b..8b3bb2185b6 100644 --- a/storage/tokudb/ft-index/ft/compress.h +++ b/storage/tokudb/ft-index/ft/serialize/compress.h @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -86,13 +86,11 @@ PATENT RIGHTS GRANT: under this License. */ +#pragma once + #ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved." #ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it." -#ifndef TOKU_COMPRESS_H -#define TOKU_COMPRESS_H - - #include <zlib.h> #include <db.h> @@ -131,5 +129,3 @@ void toku_decompress (Bytef *dest, uLongf destLen, // This function can decompress data compressed with either zlib or quicklz compression methods (calling toku_compress(), which puts an appropriate header on so we know which it is.) // Requires: destLen is equal to the actual decompressed size of the data. // Requires: The source must have been properly compressed. - -#endif diff --git a/storage/tokudb/ft-index/ft/ft-node-deserialize.cc b/storage/tokudb/ft-index/ft/serialize/ft-node-deserialize.cc index f309a32b44a..4e55c222eb7 100644 --- a/storage/tokudb/ft-index/ft/ft-node-deserialize.cc +++ b/storage/tokudb/ft-index/ft/serialize/ft-node-deserialize.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -89,8 +89,9 @@ PATENT RIGHTS GRANT: #ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved." #ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it." -#include <ft-internal.h> -#include <db.h> +#include "ft/node.h" +#include "ft/ft-internal.h" +#include "ft/serialize/ft_node-serialize.h" /* * ft-node-deserialize.c - @@ -111,7 +112,7 @@ void initialize_ftnode(FTNODE node, BLOCKNUM blocknum) { node->fullhash = 0xDEADBEEF; // <CER> Is this 'spoof' ok? - node->thisnodename = blocknum; + node->blocknum = blocknum; node->dirty = 0; node->bp = NULL; // <CER> Can we use this initialization as a correctness assert in @@ -132,7 +133,7 @@ int read_and_check_magic(struct rbuf *rb) { int r = 0; - bytevec magic; + const void *magic; rbuf_literal_bytes(rb, &magic, 8); if (memcmp(magic, "tokuleaf", 8)!=0 && memcmp(magic, "tokunode", 8)!=0) { diff --git a/storage/tokudb/ft-index/ft/ft-serialize.cc b/storage/tokudb/ft-index/ft/serialize/ft-serialize.cc index 4a4817e7f6c..4e447592255 100644 --- a/storage/tokudb/ft-index/ft/ft-serialize.cc +++ b/storage/tokudb/ft-index/ft/serialize/ft-serialize.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -89,13 +89,17 @@ PATENT RIGHTS GRANT: #ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved." #ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it." -#include "compress.h" -#include "ft.h" -#include "ft-internal.h" +#include "ft/ft.h" +#include "ft/ft-internal.h" +#include "ft/msg.h" +#include "ft/serialize/block_allocator.h" +#include "ft/serialize/block_table.h" +#include "ft/serialize/compress.h" +#include "ft/serialize/ft-serialize.h" // not version-sensitive because we only serialize a descriptor using the current layout_version uint32_t -toku_serialize_descriptor_size(const DESCRIPTOR desc) { +toku_serialize_descriptor_size(DESCRIPTOR desc) { //Checksum NOT included in this. Checksum only exists in header's version. uint32_t size = 4; // four bytes for size of descriptor size += desc->dbt.size; @@ -103,7 +107,7 @@ toku_serialize_descriptor_size(const DESCRIPTOR desc) { } static uint32_t -deserialize_descriptor_size(const DESCRIPTOR desc, int layout_version) { +deserialize_descriptor_size(DESCRIPTOR desc, int layout_version) { //Checksum NOT included in this. Checksum only exists in header's version. uint32_t size = 4; // four bytes for size of descriptor if (layout_version == FT_LAYOUT_VERSION_13) @@ -112,8 +116,7 @@ deserialize_descriptor_size(const DESCRIPTOR desc, int layout_version) { return size; } -void -toku_serialize_descriptor_contents_to_wbuf(struct wbuf *wb, const DESCRIPTOR desc) { +void toku_serialize_descriptor_contents_to_wbuf(struct wbuf *wb, DESCRIPTOR desc) { wbuf_bytes(wb, desc->dbt.data, desc->dbt.size); } @@ -121,7 +124,7 @@ toku_serialize_descriptor_contents_to_wbuf(struct wbuf *wb, const DESCRIPTOR des //descriptor. //Descriptors are NOT written during the header checkpoint process. void -toku_serialize_descriptor_contents_to_fd(int fd, const DESCRIPTOR desc, DISKOFF offset) { +toku_serialize_descriptor_contents_to_fd(int fd, DESCRIPTOR desc, DISKOFF offset) { // make the checksum int64_t size = toku_serialize_descriptor_size(desc)+4; //4 for checksum int64_t size_aligned = roundup_to_multiple(512, size); @@ -146,32 +149,24 @@ toku_serialize_descriptor_contents_to_fd(int fd, const DESCRIPTOR desc, DISKOFF static void deserialize_descriptor_from_rbuf(struct rbuf *rb, DESCRIPTOR desc, int layout_version) { if (layout_version <= FT_LAYOUT_VERSION_13) { - // in older versions of TokuDB the Descriptor had a 4 byte + // in older versions of tokuft, the descriptor had a 4 byte // version, which we skip over (void) rbuf_int(rb); } uint32_t size; - bytevec data; + const void *data; rbuf_bytes(rb, &data, &size); - bytevec data_copy = data; - if (size > 0) { - data_copy = toku_memdup(data, size); //Cannot keep the reference from rbuf. Must copy. - lazy_assert(data_copy); - } else { - lazy_assert(size==0); - data_copy = NULL; - } - toku_fill_dbt(&desc->dbt, data_copy, size); + toku_memdup_dbt(&desc->dbt, data, size); } static int -deserialize_descriptor_from(int fd, BLOCK_TABLE bt, DESCRIPTOR desc, int layout_version) { +deserialize_descriptor_from(int fd, block_table *bt, DESCRIPTOR desc, int layout_version) { int r = 0; DISKOFF offset; DISKOFF size; - unsigned char *dbuf = NULL; - toku_get_descriptor_offset_size(bt, &offset, &size); + unsigned char *dbuf = nullptr; + bt->get_descriptor_offset_size(&offset, &size); memset(desc, 0, sizeof(*desc)); if (size > 0) { lazy_assert(size>=4); //4 for checksum @@ -195,12 +190,10 @@ deserialize_descriptor_from(int fd, BLOCK_TABLE bt, DESCRIPTOR desc, int layout_ goto exit; } } - { - struct rbuf rb = {.buf = dbuf, .size = (unsigned int) size, .ndone = 0}; - //Not temporary; must have a toku_memdup'd copy. - deserialize_descriptor_from_rbuf(&rb, desc, layout_version); - } - lazy_assert(deserialize_descriptor_size(desc, layout_version)+4 == size); + + struct rbuf rb = { .buf = dbuf, .size = (unsigned int) size, .ndone = 0 }; + deserialize_descriptor_from_rbuf(&rb, desc, layout_version); + lazy_assert(deserialize_descriptor_size(desc, layout_version) + 4 == size); toku_free(dbuf); } } @@ -222,7 +215,7 @@ int deserialize_ft_versioned(int fd, struct rbuf *rb, FT *ftp, uint32_t version) //Verification of initial elements. //Check magic number - bytevec magic; + const void *magic; rbuf_literal_bytes(rb, &magic, 8); lazy_assert(memcmp(magic,"tokudata",8)==0); @@ -244,7 +237,7 @@ int deserialize_ft_versioned(int fd, struct rbuf *rb, FT *ftp, uint32_t version) size = rbuf_network_int(rb); lazy_assert(size == rb->size); - bytevec tmp_byte_order_check; + const void *tmp_byte_order_check; lazy_assert((sizeof tmp_byte_order_check) >= 8); rbuf_literal_bytes(rb, &tmp_byte_order_check, 8); //Must not translate byte order int64_t byte_order_stored; @@ -254,13 +247,13 @@ int deserialize_ft_versioned(int fd, struct rbuf *rb, FT *ftp, uint32_t version) uint64_t checkpoint_count; checkpoint_count = rbuf_ulonglong(rb); LSN checkpoint_lsn; - checkpoint_lsn = rbuf_lsn(rb); + checkpoint_lsn = rbuf_LSN(rb); unsigned nodesize; nodesize = rbuf_int(rb); DISKOFF translation_address_on_disk; - translation_address_on_disk = rbuf_diskoff(rb); + translation_address_on_disk = rbuf_DISKOFF(rb); DISKOFF translation_size_on_disk; - translation_size_on_disk = rbuf_diskoff(rb); + translation_size_on_disk = rbuf_DISKOFF(rb); lazy_assert(translation_address_on_disk > 0); lazy_assert(translation_size_on_disk > 0); @@ -281,11 +274,10 @@ int deserialize_ft_versioned(int fd, struct rbuf *rb, FT *ftp, uint32_t version) assert(readsz <= (ssize_t)size_to_read); } // Create table and read in data. - r = toku_blocktable_create_from_buffer(fd, - &ft->blocktable, - translation_address_on_disk, - translation_size_on_disk, - tbuf); + r = ft->blocktable.create_from_buffer(fd, + translation_address_on_disk, + translation_size_on_disk, + tbuf); toku_free(tbuf); if (r != 0) { goto exit; @@ -353,7 +345,7 @@ int deserialize_ft_versioned(int fd, struct rbuf *rb, FT *ftp, uint32_t version) time_of_last_optimize_begin = rbuf_ulonglong(rb); time_of_last_optimize_end = rbuf_ulonglong(rb); count_of_optimize_in_progress = rbuf_int(rb); - msn_at_start_of_last_completed_optimize = rbuf_msn(rb); + msn_at_start_of_last_completed_optimize = rbuf_MSN(rb); } enum toku_compression_method compression_method; @@ -362,7 +354,7 @@ int deserialize_ft_versioned(int fd, struct rbuf *rb, FT *ftp, uint32_t version) if (ft->layout_version_read_from_disk >= FT_LAYOUT_VERSION_19) { unsigned char method = rbuf_char(rb); compression_method = (enum toku_compression_method) method; - highest_unused_msn_for_upgrade = rbuf_msn(rb); + highest_unused_msn_for_upgrade = rbuf_MSN(rb); } else { // we hard coded zlib until 5.2, then quicklz in 5.2 if (ft->layout_version_read_from_disk < FT_LAYOUT_VERSION_18) { @@ -375,7 +367,7 @@ int deserialize_ft_versioned(int fd, struct rbuf *rb, FT *ftp, uint32_t version) MSN max_msn_in_ft; max_msn_in_ft = ZERO_MSN; // We'll upgrade it from the root node later if necessary if (ft->layout_version_read_from_disk >= FT_LAYOUT_VERSION_21) { - max_msn_in_ft = rbuf_msn(rb); + max_msn_in_ft = rbuf_MSN(rb); } (void) rbuf_int(rb); //Read in checksum and ignore (already verified). @@ -433,13 +425,14 @@ int deserialize_ft_versioned(int fd, struct rbuf *rb, FT *ftp, uint32_t version) } invariant((uint32_t) ft->layout_version_read_from_disk == version); - r = deserialize_descriptor_from(fd, ft->blocktable, &ft->descriptor, version); + r = deserialize_descriptor_from(fd, &ft->blocktable, &ft->descriptor, version); if (r != 0) { goto exit; } - // copy descriptor to cmp_descriptor for #4541 - ft->cmp_descriptor.dbt.size = ft->descriptor.dbt.size; - ft->cmp_descriptor.dbt.data = toku_xmemdup(ft->descriptor.dbt.data, ft->descriptor.dbt.size); + + // initialize for svn #4541 + toku_clone_dbt(&ft->cmp_descriptor.dbt, ft->descriptor.dbt); + // Version 13 descriptors had an extra 4 bytes that we don't read // anymore. Since the header is going to think it's the current // version if it gets written out, we need to write the descriptor in @@ -462,6 +455,7 @@ serialize_ft_min_size (uint32_t version) { size_t size = 0; switch(version) { + case FT_LAYOUT_VERSION_27: case FT_LAYOUT_VERSION_26: case FT_LAYOUT_VERSION_25: case FT_LAYOUT_VERSION_24: @@ -518,7 +512,7 @@ serialize_ft_min_size (uint32_t version) { abort(); } - lazy_assert(size <= BLOCK_ALLOCATOR_HEADER_RESERVE); + lazy_assert(size <= block_allocator::BLOCK_ALLOCATOR_HEADER_RESERVE); return size; } @@ -560,7 +554,7 @@ int deserialize_ft_from_fd_into_rbuf(int fd, rbuf_init(rb, prefix, prefix_size); //Check magic number - bytevec magic; + const void *magic; rbuf_literal_bytes(rb, &magic, 8); if (memcmp(magic,"tokudata",8)!=0) { if ((*(uint64_t*)magic) == 0) { @@ -595,7 +589,7 @@ int deserialize_ft_from_fd_into_rbuf(int fd, //If too big, it is corrupt. We would probably notice during checksum //but may have to do a multi-gigabyte malloc+read to find out. //If its too small reading rbuf would crash, so verify. - if (size > BLOCK_ALLOCATOR_HEADER_RESERVE || size < min_header_size) { + if (size > block_allocator::BLOCK_ALLOCATOR_HEADER_RESERVE || size < min_header_size) { r = TOKUDB_DICTIONARY_NO_HEADER; goto exit; } @@ -634,7 +628,7 @@ int deserialize_ft_from_fd_into_rbuf(int fd, } //Verify byte order - bytevec tmp_byte_order_check; + const void *tmp_byte_order_check; lazy_assert((sizeof toku_byte_order_host) == 8); rbuf_literal_bytes(rb, &tmp_byte_order_check, 8); //Must not translate byte order int64_t byte_order_stored; @@ -646,7 +640,7 @@ int deserialize_ft_from_fd_into_rbuf(int fd, //Load checkpoint count *checkpoint_count = rbuf_ulonglong(rb); - *checkpoint_lsn = rbuf_lsn(rb); + *checkpoint_lsn = rbuf_LSN(rb); //Restart at beginning during regular deserialization rb->ndone = 0; @@ -668,11 +662,11 @@ toku_deserialize_ft_from(int fd, { struct rbuf rb_0; struct rbuf rb_1; - uint64_t checkpoint_count_0; - uint64_t checkpoint_count_1; + uint64_t checkpoint_count_0 = 0; + uint64_t checkpoint_count_1 = 0; LSN checkpoint_lsn_0; LSN checkpoint_lsn_1; - uint32_t version_0, version_1, version = 0; + uint32_t version_0 = 0, version_1 = 0, version = 0; bool h0_acceptable = false; bool h1_acceptable = false; struct rbuf *rb = NULL; @@ -684,7 +678,7 @@ toku_deserialize_ft_from(int fd, h0_acceptable = true; } - toku_off_t header_1_off = BLOCK_ALLOCATOR_HEADER_RESERVE; + toku_off_t header_1_off = block_allocator::BLOCK_ALLOCATOR_HEADER_RESERVE; r1 = deserialize_ft_from_fd_into_rbuf(fd, header_1_off, &rb_1, &checkpoint_count_1, &checkpoint_lsn_1, &version_1); if (r1 == 0 && checkpoint_lsn_1.lsn <= max_acceptable_lsn.lsn) { h1_acceptable = true; @@ -763,7 +757,7 @@ exit: size_t toku_serialize_ft_size (FT_HEADER h) { size_t size = serialize_ft_min_size(h->layout_version); //There is no dynamic data. - lazy_assert(size <= BLOCK_ALLOCATOR_HEADER_RESERVE); + lazy_assert(size <= block_allocator::BLOCK_ALLOCATOR_HEADER_RESERVE); return size; } @@ -809,23 +803,25 @@ void toku_serialize_ft_to_wbuf ( lazy_assert(wbuf->ndone == wbuf->size); } -void toku_serialize_ft_to (int fd, FT_HEADER h, BLOCK_TABLE blocktable, CACHEFILE cf) { +void toku_serialize_ft_to(int fd, FT_HEADER h, block_table *bt, CACHEFILE cf) { lazy_assert(h->type==FT_CHECKPOINT_INPROGRESS); struct wbuf w_translation; int64_t size_translation; int64_t address_translation; - //Must serialize translation first, to get address,size for header. - toku_serialize_translation_to_wbuf(blocktable, fd, &w_translation, - &address_translation, - &size_translation); - assert(size_translation == w_translation.ndone); // the bytes written are the size - assert(w_translation.size % 512 == 0); // the number of bytes available in the buffer is 0 mod 512, and those last bytes are all initialized. + // Must serialize translation first, to get address,size for header. + bt->serialize_translation_to_wbuf(fd, &w_translation, + &address_translation, + &size_translation); + assert(size_translation == w_translation.ndone); + + // the number of bytes available in the buffer is 0 mod 512, and those last bytes are all initialized. + assert(w_translation.size % 512 == 0); struct wbuf w_main; size_t size_main = toku_serialize_ft_size(h); size_t size_main_aligned = roundup_to_multiple(512, size_main); - assert(size_main_aligned<BLOCK_ALLOCATOR_HEADER_RESERVE); + assert(size_main_aligned<block_allocator::BLOCK_ALLOCATOR_HEADER_RESERVE); char *XMALLOC_N_ALIGNED(512, size_main_aligned, mainbuf); for (size_t i=size_main; i<size_main_aligned; i++) mainbuf[i]=0; // initialize the end of the buffer with zeros wbuf_init(&w_main, mainbuf, size_main); @@ -853,7 +849,7 @@ void toku_serialize_ft_to (int fd, FT_HEADER h, BLOCK_TABLE blocktable, CACHEFIL //Alternate writing header to two locations: // Beginning (0) or BLOCK_ALLOCATOR_HEADER_RESERVE toku_off_t main_offset; - main_offset = (h->checkpoint_count & 0x1) ? 0 : BLOCK_ALLOCATOR_HEADER_RESERVE; + main_offset = (h->checkpoint_count & 0x1) ? 0 : block_allocator::BLOCK_ALLOCATOR_HEADER_RESERVE; toku_os_full_pwrite(fd, w_main.buf, size_main_aligned, main_offset); toku_free(w_main.buf); toku_free(w_translation.buf); diff --git a/storage/tokudb/ft-index/ft/tokuconst.h b/storage/tokudb/ft-index/ft/serialize/ft-serialize.h index 73ac3a6a693..dc8bb68ae40 100644 --- a/storage/tokudb/ft-index/ft/tokuconst.h +++ b/storage/tokudb/ft-index/ft/serialize/ft-serialize.h @@ -1,9 +1,6 @@ /* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */ // vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4: -#ifndef TOKUCONST_H -#define TOKUCONST_H -#ident "$Id$" /* COPYING CONDITIONS NOTICE: @@ -32,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -89,20 +86,27 @@ PATENT RIGHTS GRANT: under this License. */ -#ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved." -#ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it." +#pragma once -/* The number of transaction ids stored in the xids structure is - * represented by an 8-bit value. The value 255 is reserved. - * The constant MAX_NESTED_TRANSACTIONS is one less because - * one slot in the packed leaf entry is used for the implicit - * root transaction (id 0). - */ +#include "ft/ft.h" +#include "ft/serialize/block_table.h" +size_t toku_serialize_ft_size(struct ft_header *h); +void toku_serialize_ft_to(int fd, struct ft_header *h, block_table *bt, CACHEFILE cf); +void toku_serialize_ft_to_wbuf(struct wbuf *wbuf, struct ft_header *h, DISKOFF translation_location_on_disk, DISKOFF translation_size_on_disk); +void toku_serialize_descriptor_contents_to_fd(int fd, DESCRIPTOR desc, DISKOFF offset); +void toku_serialize_descriptor_contents_to_wbuf(struct wbuf *wb, DESCRIPTOR desc); -enum {MAX_NESTED_TRANSACTIONS = 253}; -enum {MAX_TRANSACTION_RECORDS = MAX_NESTED_TRANSACTIONS + 1}; +int toku_deserialize_ft_from(int fd, LSN max_acceptable_lsn, FT *ft); +// TODO rename +int deserialize_ft_from_fd_into_rbuf(int fd, + toku_off_t offset_of_header, + struct rbuf *rb, + uint64_t *checkpoint_count, + LSN *checkpoint_lsn, + uint32_t *version_p); -#endif - +// used by verify +// TODO rename +int deserialize_ft_versioned(int fd, struct rbuf *rb, FT *ft, uint32_t version); diff --git a/storage/tokudb/ft-index/ft/ft_layout_version.h b/storage/tokudb/ft-index/ft/serialize/ft_layout_version.h index e9c6a68328b..cf16d472355 100644 --- a/storage/tokudb/ft-index/ft/ft_layout_version.h +++ b/storage/tokudb/ft-index/ft/serialize/ft_layout_version.h @@ -1,7 +1,5 @@ /* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */ // vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4: -#ifndef FT_LAYOUT_VERSION_H -#define FT_LAYOUT_VERSION_H #ident "$Id$" /* @@ -32,7 +30,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -89,10 +87,12 @@ PATENT RIGHTS GRANT: under this License. */ +#pragma once + #ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved." #ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it." -//Must be defined before other recursive headers could include logger.h +//Must be defined before other recursive headers could include logger/recover.h enum ft_layout_version_e { FT_LAYOUT_VERSION_5 = 5, FT_LAYOUT_VERSION_6 = 6, // Diff from 5 to 6: Add leafentry_estimate @@ -120,6 +120,7 @@ enum ft_layout_version_e { FT_LAYOUT_VERSION_24 = 24, // Riddler: change logentries that log transactions to store TXNID_PAIRs instead of TXNIDs FT_LAYOUT_VERSION_25 = 25, // SecretSquirrel: ROLLBACK_LOG_NODES (on disk and in memory) now just use blocknum (instead of blocknum + hash) to point to other log nodes. same for xstillopen log entry FT_LAYOUT_VERSION_26 = 26, // Hojo: basements store key/vals separately on disk for fixed klpair length BNs + FT_LAYOUT_VERSION_27 = 27, // serialize message trees with nonleaf buffers to avoid key, msn sort on deserialize FT_NEXT_VERSION, // the version after the current version FT_LAYOUT_VERSION = FT_NEXT_VERSION-1, // A hack so I don't have to change this line. FT_LAYOUT_MIN_SUPPORTED_VERSION = FT_LAYOUT_VERSION_13, // Minimum version supported @@ -129,5 +130,3 @@ enum ft_layout_version_e { FT_FIRST_LAYOUT_VERSION_WITH_END_TO_END_CHECKSUM = FT_LAYOUT_VERSION_14, FT_FIRST_LAYOUT_VERSION_WITH_BASEMENT_NODES = FT_LAYOUT_VERSION_15, }; - -#endif diff --git a/storage/tokudb/ft-index/ft/ft_node-serialize.cc b/storage/tokudb/ft-index/ft/serialize/ft_node-serialize.cc index fcb38f11834..8e6e27b34b3 100644 --- a/storage/tokudb/ft-index/ft/ft_node-serialize.cc +++ b/storage/tokudb/ft-index/ft/serialize/ft_node-serialize.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -89,19 +89,27 @@ PATENT RIGHTS GRANT: #ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved." #ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it." -#include "ft-internal.h" -#include "log-internal.h" -#include <compress.h> -#include <portability/toku_atomic.h> -#include <util/sort.h> -#include <util/threadpool.h> -#include "ft.h" -#include <util/status.h> -#include <util/scoped_malloc.h> +#include "portability/toku_atomic.h" + +#include "ft/cachetable/cachetable.h" +#include "ft/ft.h" +#include "ft/ft-internal.h" +#include "ft/node.h" +#include "ft/logger/log-internal.h" +#include "ft/txn/rollback.h" +#include "ft/serialize/block_allocator.h" +#include "ft/serialize/block_table.h" +#include "ft/serialize/compress.h" +#include "ft/serialize/ft_node-serialize.h" +#include "ft/serialize/sub_block.h" +#include "util/sort.h" +#include "util/threadpool.h" +#include "util/status.h" +#include "util/scoped_malloc.h" static FT_UPGRADE_STATUS_S ft_upgrade_status; -#define STATUS_INIT(k,c,t,l,inc) TOKUDB_STATUS_INIT(ft_upgrade_status, k, c, t, "ft upgrade: " l, inc) +#define STATUS_INIT(k,c,t,l,inc) TOKUFT_STATUS_INIT(ft_upgrade_status, k, c, t, "ft upgrade: " l, inc) static void status_init(void) @@ -126,6 +134,7 @@ toku_ft_upgrade_get_status(FT_UPGRADE_STATUS s) { static int num_cores = 0; // cache the number of cores for the parallelization static struct toku_thread_pool *ft_pool = NULL; +bool toku_serialize_in_parallel; int get_num_cores(void) { return num_cores; @@ -135,18 +144,24 @@ struct toku_thread_pool *get_ft_pool(void) { return ft_pool; } -void -toku_ft_serialize_layer_init(void) { +void toku_serialize_set_parallel(bool in_parallel) { + toku_serialize_in_parallel = in_parallel; +} + +void toku_ft_serialize_layer_init(void) { num_cores = toku_os_get_number_active_processors(); - int r = toku_thread_pool_create(&ft_pool, num_cores); lazy_assert_zero(r); + int r = toku_thread_pool_create(&ft_pool, num_cores); + lazy_assert_zero(r); + block_allocator::maybe_initialize_trace(); + toku_serialize_in_parallel = false; } -void -toku_ft_serialize_layer_destroy(void) { +void toku_ft_serialize_layer_destroy(void) { toku_thread_pool_destroy(&ft_pool); + block_allocator::maybe_close_trace(); } -enum {FILE_CHANGE_INCREMENT = (16<<20)}; +enum { FILE_CHANGE_INCREMENT = (16 << 20) }; static inline uint64_t alignup64(uint64_t a, uint64_t b) { @@ -193,7 +208,7 @@ toku_maybe_preallocate_in_file (int fd, int64_t size, int64_t expected_size, int // Effect: make the file bigger by either doubling it or growing by 16MiB whichever is less, until it is at least size // Return 0 on success, otherwise an error number. { - int64_t file_size; + int64_t file_size = 0; //TODO(yoni): Allow variable stripe_width (perhaps from ft) for larger raids const uint64_t stripe_width = 4096; { @@ -240,9 +255,6 @@ enum { 4), // build_id }; -#include "sub_block.h" -#include "sub_block_map.h" - // uncompressed header offsets enum { uncompressed_magic_offset = 0, @@ -291,8 +303,13 @@ serialize_ftnode_partition_size (FTNODE node, int i) paranoid_invariant(node->bp[i].state == PT_AVAIL); result++; // Byte that states what the partition is if (node->height > 0) { - result += 4; // size of bytes in buffer table - result += toku_bnc_nbytesinbuf(BNC(node, i)); + NONLEAF_CHILDINFO bnc = BNC(node, i); + // number of messages (4 bytes) plus size of the buffer + result += (4 + toku_bnc_nbytesinbuf(bnc)); + // number of offsets (4 bytes) plus an array of 4 byte offsets, for each message tree + result += (4 + (4 * bnc->fresh_message_tree.size())); + result += (4 + (4 * bnc->stale_message_tree.size())); + result += (4 + (4 * bnc->broadcast_list.size())); } else { result += 4 + bn_data::HEADER_LENGTH; // n_entries in buffer table + basement header @@ -303,26 +320,57 @@ serialize_ftnode_partition_size (FTNODE node, int i) } #define FTNODE_PARTITION_DMT_LEAVES 0xaa -#define FTNODE_PARTITION_FIFO_MSG 0xbb +#define FTNODE_PARTITION_MSG_BUFFER 0xbb -static void -serialize_nonleaf_childinfo(NONLEAF_CHILDINFO bnc, struct wbuf *wb) -{ - unsigned char ch = FTNODE_PARTITION_FIFO_MSG; +UU() static int +assert_fresh(const int32_t &offset, const uint32_t UU(idx), message_buffer *const msg_buffer) { + bool is_fresh = msg_buffer->get_freshness(offset); + assert(is_fresh); + return 0; +} + +UU() static int +assert_stale(const int32_t &offset, const uint32_t UU(idx), message_buffer *const msg_buffer) { + bool is_fresh = msg_buffer->get_freshness(offset); + assert(!is_fresh); + return 0; +} + +static void bnc_verify_message_trees(NONLEAF_CHILDINFO UU(bnc)) { +#ifdef TOKU_DEBUG_PARANOID + bnc->fresh_message_tree.iterate<message_buffer, assert_fresh>(&bnc->msg_buffer); + bnc->stale_message_tree.iterate<message_buffer, assert_stale>(&bnc->msg_buffer); +#endif +} + +static int +wbuf_write_offset(const int32_t &offset, const uint32_t UU(idx), struct wbuf *const wb) { + wbuf_nocrc_int(wb, offset); + return 0; +} + +static void serialize_child_buffer(NONLEAF_CHILDINFO bnc, struct wbuf *wb) { + unsigned char ch = FTNODE_PARTITION_MSG_BUFFER; wbuf_nocrc_char(wb, ch); - // serialize the FIFO, first the number of entries, then the elements - wbuf_nocrc_int(wb, toku_bnc_n_entries(bnc)); - FIFO_ITERATE( - bnc->buffer, key, keylen, data, datalen, type, msn, xids, is_fresh, - { - paranoid_invariant((int) type >= 0 && (int) type < 256); - wbuf_nocrc_char(wb, (unsigned char) type); - wbuf_nocrc_char(wb, (unsigned char) is_fresh); - wbuf_MSN(wb, msn); - wbuf_nocrc_xids(wb, xids); - wbuf_nocrc_bytes(wb, key, keylen); - wbuf_nocrc_bytes(wb, data, datalen); - }); + + // serialize the message buffer + bnc->msg_buffer.serialize_to_wbuf(wb); + + // serialize the message trees (num entries, offsets array): + // first, verify their contents are consistent with the message buffer + bnc_verify_message_trees(bnc); + + // fresh + wbuf_nocrc_int(wb, bnc->fresh_message_tree.size()); + bnc->fresh_message_tree.iterate<struct wbuf, wbuf_write_offset>(wb); + + // stale + wbuf_nocrc_int(wb, bnc->stale_message_tree.size()); + bnc->stale_message_tree.iterate<struct wbuf, wbuf_write_offset>(wb); + + // broadcast + wbuf_nocrc_int(wb, bnc->broadcast_list.size()); + bnc->broadcast_list.iterate<struct wbuf, wbuf_write_offset>(wb); } // @@ -332,13 +380,11 @@ serialize_nonleaf_childinfo(NONLEAF_CHILDINFO bnc, struct wbuf *wb) // static void serialize_ftnode_partition(FTNODE node, int i, struct sub_block *sb) { - if (sb->uncompressed_ptr == NULL) { - assert(sb->uncompressed_size == 0); - sb->uncompressed_size = serialize_ftnode_partition_size(node,i); - sb->uncompressed_ptr = toku_xmalloc(sb->uncompressed_size); - } else { - assert(sb->uncompressed_size > 0); - } + // Caller should have allocated memory. + invariant_notnull(sb->uncompressed_ptr); + invariant(sb->uncompressed_size > 0); + paranoid_invariant(sb->uncompressed_size == serialize_ftnode_partition_size(node, i)); + // // Now put the data into sb->uncompressed_ptr // @@ -346,7 +392,7 @@ serialize_ftnode_partition(FTNODE node, int i, struct sub_block *sb) { wbuf_init(&wb, sb->uncompressed_ptr, sb->uncompressed_size); if (node->height > 0) { // TODO: (Zardosht) possibly exit early if there are no messages - serialize_nonleaf_childinfo(BNC(node, i), &wb); + serialize_child_buffer(BNC(node, i), &wb); } else { unsigned char ch = FTNODE_PARTITION_DMT_LEAVES; @@ -369,13 +415,13 @@ serialize_ftnode_partition(FTNODE node, int i, struct sub_block *sb) { // static void compress_ftnode_sub_block(struct sub_block *sb, enum toku_compression_method method) { - assert(sb->compressed_ptr == NULL); - set_compressed_size_bound(sb, method); - // add 8 extra bytes, 4 for compressed size, 4 for decompressed size - sb->compressed_ptr = toku_xmalloc(sb->compressed_size_bound + 8); + invariant(sb->compressed_ptr != nullptr); + invariant(sb->compressed_size_bound > 0); + paranoid_invariant(sb->compressed_size_bound == toku_compress_bound(method, sb->uncompressed_size)); + // // This probably seems a bit complicated. Here is what is going on. - // In TokuDB 5.0, sub_blocks were compressed and the compressed data + // In TokuFT 5.0, sub_blocks were compressed and the compressed data // was checksummed. The checksum did NOT include the size of the compressed data // and the size of the uncompressed data. The fields of sub_block only reference the // compressed data, and it is the responsibility of the user of the sub_block @@ -408,7 +454,7 @@ compress_ftnode_sub_block(struct sub_block *sb, enum toku_compression_method met // two integers at the beginning, the size and uncompressed size, and then the compressed // data. sb->xsum contains the checksum of this entire thing. // - // In TokuDB 5.0, sb->compressed_ptr only contained the compressed data, sb->xsum + // In TokuFT 5.0, sb->compressed_ptr only contained the compressed data, sb->xsum // checksummed only the compressed data, and the checksumming of the sizes were not // done here. // @@ -429,7 +475,7 @@ serialize_ftnode_info_size(FTNODE node) retval += 4; // flags retval += 4; // height; retval += 8; // oldest_referenced_xid_known - retval += node->totalchildkeylens; // total length of pivots + retval += node->pivotkeys.serialized_size(); retval += (node->n_children-1)*4; // encode length of each pivot if (node->height > 0) { retval += node->n_children*8; // child blocknum's @@ -438,13 +484,12 @@ serialize_ftnode_info_size(FTNODE node) return retval; } -static void serialize_ftnode_info(FTNODE node, - SUB_BLOCK sb // output - ) { - assert(sb->uncompressed_size == 0); - assert(sb->uncompressed_ptr == NULL); - sb->uncompressed_size = serialize_ftnode_info_size(node); - sb->uncompressed_ptr = toku_xmalloc(sb->uncompressed_size); +static void serialize_ftnode_info(FTNODE node, SUB_BLOCK sb) { + // Memory must have been allocated by our caller. + invariant(sb->uncompressed_size > 0); + invariant_notnull(sb->uncompressed_ptr); + paranoid_invariant(sb->uncompressed_size == serialize_ftnode_info_size(node)); + struct wbuf wb; wbuf_init(&wb, sb->uncompressed_ptr, sb->uncompressed_size); @@ -453,11 +498,8 @@ static void serialize_ftnode_info(FTNODE node, wbuf_nocrc_uint(&wb, node->flags); wbuf_nocrc_int (&wb, node->height); wbuf_TXNID(&wb, node->oldest_referenced_xid_known); + node->pivotkeys.serialize_to_wbuf(&wb); - // pivot information - for (int i = 0; i < node->n_children-1; i++) { - wbuf_nocrc_bytes(&wb, node->childkeys[i].data, node->childkeys[i].size); - } // child blocks, only for internal nodes if (node->height > 0) { for (int i = 0; i < node->n_children; i++) { @@ -479,7 +521,7 @@ toku_serialize_ftnode_size (FTNODE node) { // As of now, this seems to be called if and only if the entire node is supposed // to be in memory, so we will assert it. // - toku_assert_entire_node_in_memory(node); + toku_ftnode_assert_fully_in_memory(node); result += serialize_node_header_size(node); result += serialize_ftnode_info_size(node); for (int i = 0; i < node->n_children; i++) { @@ -488,208 +530,6 @@ toku_serialize_ftnode_size (FTNODE node) { return result; } -struct array_info { - uint32_t offset; - LEAFENTRY* le_array; - uint32_t* key_sizes_array; - const void** key_ptr_array; -}; - -static int -array_item(const void* key, const uint32_t keylen, const LEAFENTRY &le, const uint32_t idx, struct array_info *const ai) { - ai->le_array[idx+ai->offset] = le; - ai->key_sizes_array[idx+ai->offset] = keylen; - ai->key_ptr_array[idx+ai->offset] = key; - return 0; -} - -// There must still be at least one child -// Requires that all messages in buffers above have been applied. -// Because all messages above have been applied, setting msn of all new basements -// to max msn of existing basements is correct. (There cannot be any messages in -// buffers above that still need to be applied.) -void -rebalance_ftnode_leaf(FTNODE node, unsigned int basementnodesize) -{ - assert(node->height == 0); - assert(node->dirty); - - uint32_t num_orig_basements = node->n_children; - // Count number of leaf entries in this leaf (num_le). - uint32_t num_le = 0; - for (uint32_t i = 0; i < num_orig_basements; i++) { - num_le += BLB_DATA(node, i)->num_klpairs(); - } - - uint32_t num_alloc = num_le ? num_le : 1; // simplify logic below by always having at least one entry per array - - // Create an array of OMTVALUE's that store all the pointers to all the data. - // Each element in leafpointers is a pointer to a leaf. - toku::scoped_malloc leafpointers_buf(sizeof(LEAFENTRY) * num_alloc); - LEAFENTRY *leafpointers = reinterpret_cast<LEAFENTRY *>(leafpointers_buf.get()); - leafpointers[0] = NULL; - - toku::scoped_malloc key_pointers_buf(sizeof(void *) * num_alloc); - const void **key_pointers = reinterpret_cast<const void **>(key_pointers_buf.get()); - key_pointers[0] = NULL; - - toku::scoped_malloc key_sizes_buf(sizeof(uint32_t) * num_alloc); - uint32_t *key_sizes = reinterpret_cast<uint32_t *>(key_sizes_buf.get()); - - // Capture pointers to old mempools' buffers (so they can be destroyed) - toku::scoped_malloc old_bns_buf(sizeof(BASEMENTNODE) * num_orig_basements); - BASEMENTNODE *old_bns = reinterpret_cast<BASEMENTNODE *>(old_bns_buf.get()); - old_bns[0] = NULL; - - uint32_t curr_le = 0; - for (uint32_t i = 0; i < num_orig_basements; i++) { - bn_data* bd = BLB_DATA(node, i); - struct array_info ai {.offset = curr_le, .le_array = leafpointers, .key_sizes_array = key_sizes, .key_ptr_array = key_pointers }; - bd->iterate<array_info, array_item>(&ai); - curr_le += bd->num_klpairs(); - } - - // Create an array that will store indexes of new pivots. - // Each element in new_pivots is the index of a pivot key. - // (Allocating num_le of them is overkill, but num_le is an upper bound.) - toku::scoped_malloc new_pivots_buf(sizeof(uint32_t) * num_alloc); - uint32_t *new_pivots = reinterpret_cast<uint32_t *>(new_pivots_buf.get()); - new_pivots[0] = 0; - - // Each element in le_sizes is the size of the leafentry pointed to by leafpointers. - toku::scoped_malloc le_sizes_buf(sizeof(size_t) * num_alloc); - size_t *le_sizes = reinterpret_cast<size_t *>(le_sizes_buf.get()); - le_sizes[0] = 0; - - // Create an array that will store the size of each basement. - // This is the sum of the leaf sizes of all the leaves in that basement. - // We don't know how many basements there will be, so we use num_le as the upper bound. - - // Sum of all le sizes in a single basement - toku::scoped_calloc bn_le_sizes_buf(sizeof(size_t) * num_alloc); - size_t *bn_le_sizes = reinterpret_cast<size_t *>(bn_le_sizes_buf.get()); - - // Sum of all key sizes in a single basement - toku::scoped_calloc bn_key_sizes_buf(sizeof(size_t) * num_alloc); - size_t *bn_key_sizes = reinterpret_cast<size_t *>(bn_key_sizes_buf.get()); - - // TODO 4050: All these arrays should be combined into a single array of some bn_info struct (pivot, msize, num_les). - // Each entry is the number of leafentries in this basement. (Again, num_le is overkill upper baound.) - toku::scoped_malloc num_les_this_bn_buf(sizeof(uint32_t) * num_alloc); - uint32_t *num_les_this_bn = reinterpret_cast<uint32_t *>(num_les_this_bn_buf.get()); - num_les_this_bn[0] = 0; - - // Figure out the new pivots. - // We need the index of each pivot, and for each basement we need - // the number of leaves and the sum of the sizes of the leaves (memory requirement for basement). - uint32_t curr_pivot = 0; - uint32_t num_le_in_curr_bn = 0; - uint32_t bn_size_so_far = 0; - for (uint32_t i = 0; i < num_le; i++) { - uint32_t curr_le_size = leafentry_disksize((LEAFENTRY) leafpointers[i]); - le_sizes[i] = curr_le_size; - if ((bn_size_so_far + curr_le_size + sizeof(uint32_t) + key_sizes[i] > basementnodesize) && (num_le_in_curr_bn != 0)) { - // cap off the current basement node to end with the element before i - new_pivots[curr_pivot] = i-1; - curr_pivot++; - num_le_in_curr_bn = 0; - bn_size_so_far = 0; - } - num_le_in_curr_bn++; - num_les_this_bn[curr_pivot] = num_le_in_curr_bn; - bn_le_sizes[curr_pivot] += curr_le_size; - bn_key_sizes[curr_pivot] += sizeof(uint32_t) + key_sizes[i]; // uint32_t le_offset - bn_size_so_far += curr_le_size + sizeof(uint32_t) + key_sizes[i]; - } - // curr_pivot is now the total number of pivot keys in the leaf node - int num_pivots = curr_pivot; - int num_children = num_pivots + 1; - - // now we need to fill in the new basement nodes and pivots - - // TODO: (Zardosht) this is an ugly thing right now - // Need to figure out how to properly deal with seqinsert. - // I am not happy with how this is being - // handled with basement nodes - uint32_t tmp_seqinsert = BLB_SEQINSERT(node, num_orig_basements - 1); - - // choose the max msn applied to any basement as the max msn applied to all new basements - MSN max_msn = ZERO_MSN; - for (uint32_t i = 0; i < num_orig_basements; i++) { - MSN curr_msn = BLB_MAX_MSN_APPLIED(node,i); - max_msn = (curr_msn.msn > max_msn.msn) ? curr_msn : max_msn; - } - // remove the basement node in the node, we've saved a copy - for (uint32_t i = 0; i < num_orig_basements; i++) { - // save a reference to the old basement nodes - // we will need them to ensure that the memory - // stays intact - old_bns[i] = toku_detach_bn(node, i); - } - // Now destroy the old basements, but do not destroy leaves - toku_destroy_ftnode_internals(node); - - // now reallocate pieces and start filling them in - invariant(num_children > 0); - node->totalchildkeylens = 0; - - XCALLOC_N(num_pivots, node->childkeys); // allocate pointers to pivot structs - node->n_children = num_children; - XCALLOC_N(num_children, node->bp); // allocate pointers to basements (bp) - for (int i = 0; i < num_children; i++) { - set_BLB(node, i, toku_create_empty_bn()); // allocate empty basements and set bp pointers - } - - // now we start to fill in the data - - // first the pivots - for (int i = 0; i < num_pivots; i++) { - uint32_t keylen = key_sizes[new_pivots[i]]; - const void *key = key_pointers[new_pivots[i]]; - toku_memdup_dbt(&node->childkeys[i], key, keylen); - node->totalchildkeylens += keylen; - } - - uint32_t baseindex_this_bn = 0; - // now the basement nodes - for (int i = 0; i < num_children; i++) { - // put back seqinsert - BLB_SEQINSERT(node, i) = tmp_seqinsert; - - // create start (inclusive) and end (exclusive) boundaries for data of basement node - uint32_t curr_start = (i==0) ? 0 : new_pivots[i-1]+1; // index of first leaf in basement - uint32_t curr_end = (i==num_pivots) ? num_le : new_pivots[i]+1; // index of first leaf in next basement - uint32_t num_in_bn = curr_end - curr_start; // number of leaves in this basement - - // create indexes for new basement - invariant(baseindex_this_bn == curr_start); - uint32_t num_les_to_copy = num_les_this_bn[i]; - invariant(num_les_to_copy == num_in_bn); - - bn_data* bd = BLB_DATA(node, i); - bd->set_contents_as_clone_of_sorted_array( - num_les_to_copy, - &key_pointers[baseindex_this_bn], - &key_sizes[baseindex_this_bn], - &leafpointers[baseindex_this_bn], - &le_sizes[baseindex_this_bn], - bn_key_sizes[i], // Total key sizes - bn_le_sizes[i] // total le sizes - ); - - BP_STATE(node,i) = PT_AVAIL; - BP_TOUCH_CLOCK(node,i); - BLB_MAX_MSN_APPLIED(node,i) = max_msn; - baseindex_this_bn += num_les_to_copy; // set to index of next bn - } - node->max_msn_applied_to_node_on_disk = max_msn; - - // destroy buffers of old mempools - for (uint32_t i = 0; i < num_orig_basements; i++) { - destroy_basement_node(old_bns[i]); - } -} // end of rebalance_ftnode_leaf() - struct serialize_times { tokutime_t serialize_time; tokutime_t compress_time; @@ -854,34 +694,50 @@ int toku_serialize_ftnode_to_memory(FTNODE node, // The resulting buffer is guaranteed to be 512-byte aligned and the total length is a multiple of 512 (so we pad with zeros at the end if needed). // 512-byte padding is for O_DIRECT to work. { - toku_assert_entire_node_in_memory(node); + toku_ftnode_assert_fully_in_memory(node); if (do_rebalancing && node->height == 0) { - rebalance_ftnode_leaf(node, basementnodesize); + toku_ftnode_leaf_rebalance(node, basementnodesize); } const int npartitions = node->n_children; // Each partition represents a compressed sub block // For internal nodes, a sub block is a message buffer // For leaf nodes, a sub block is a basement node - toku::scoped_malloc sb_buf(sizeof(struct sub_block) * npartitions); + toku::scoped_calloc sb_buf(sizeof(struct sub_block) * npartitions); struct sub_block *sb = reinterpret_cast<struct sub_block *>(sb_buf.get()); XREALLOC_N(npartitions, *ndd); - struct sub_block sb_node_info; - for (int i = 0; i < npartitions; i++) { - sub_block_init(&sb[i]);; - } - sub_block_init(&sb_node_info); // // First, let's serialize and compress the individual sub blocks // - struct serialize_times st; - memset(&st, 0, sizeof(st)); + + // determine how large our serialization and compression buffers need to be. + size_t serialize_buf_size = 0, compression_buf_size = 0; + for (int i = 0; i < node->n_children; i++) { + sb[i].uncompressed_size = serialize_ftnode_partition_size(node, i); + sb[i].compressed_size_bound = toku_compress_bound(compression_method, sb[i].uncompressed_size); + serialize_buf_size += sb[i].uncompressed_size; + compression_buf_size += sb[i].compressed_size_bound + 8; // add 8 extra bytes, 4 for compressed size, 4 for decompressed size + } + + // give each sub block a base pointer to enough buffer space for serialization and compression + toku::scoped_malloc serialize_buf(serialize_buf_size); + toku::scoped_malloc compression_buf(compression_buf_size); + for (size_t i = 0, uncompressed_offset = 0, compressed_offset = 0; i < (size_t) node->n_children; i++) { + sb[i].uncompressed_ptr = reinterpret_cast<char *>(serialize_buf.get()) + uncompressed_offset; + sb[i].compressed_ptr = reinterpret_cast<char *>(compression_buf.get()) + compressed_offset; + uncompressed_offset += sb[i].uncompressed_size; + compressed_offset += sb[i].compressed_size_bound + 8; // add 8 extra bytes, 4 for compressed size, 4 for decompressed size + invariant(uncompressed_offset <= serialize_buf_size); + invariant(compressed_offset <= compression_buf_size); + } + + // do the actual serialization now that we have buffer space + struct serialize_times st = { 0, 0 }; if (in_parallel) { serialize_and_compress_in_parallel(node, npartitions, compression_method, sb, &st); - } - else { + } else { serialize_and_compress_serially(node, npartitions, compression_method, sb, &st); } @@ -889,16 +745,31 @@ int toku_serialize_ftnode_to_memory(FTNODE node, // Now lets create a sub-block that has the common node information, // This does NOT include the header // + + // determine how large our serialization and copmression buffers need to be + struct sub_block sb_node_info; + sub_block_init(&sb_node_info); + size_t sb_node_info_uncompressed_size = serialize_ftnode_info_size(node); + size_t sb_node_info_compressed_size_bound = toku_compress_bound(compression_method, sb_node_info_uncompressed_size); + toku::scoped_malloc sb_node_info_uncompressed_buf(sb_node_info_uncompressed_size); + toku::scoped_malloc sb_node_info_compressed_buf(sb_node_info_compressed_size_bound + 8); // add 8 extra bytes, 4 for compressed size, 4 for decompressed size + sb_node_info.uncompressed_size = sb_node_info_uncompressed_size; + sb_node_info.uncompressed_ptr = sb_node_info_uncompressed_buf.get(); + sb_node_info.compressed_size_bound = sb_node_info_compressed_size_bound; + sb_node_info.compressed_ptr = sb_node_info_compressed_buf.get(); + + // do the actual serialization now that we have buffer space serialize_and_compress_sb_node_info(node, &sb_node_info, compression_method, &st); + // + // At this point, we have compressed each of our pieces into individual sub_blocks, + // we can put the header and all the subblocks into a single buffer and return it. + // + // update the serialize times, ignore the header for simplicity. we captured all // of the partitions' serialize times so that's probably good enough. toku_ft_status_update_serialize_times(node, st.serialize_time, st.compress_time); - // now we have compressed each of our pieces into individual sub_blocks, - // we can put the header and all the subblocks into a single buffer - // and return it. - // The total size of the node is: // size of header + disk size of the n+1 sub_block's created above uint32_t total_node_size = (serialize_node_header_size(node) // uncompressed header @@ -916,11 +787,10 @@ int toku_serialize_ftnode_to_memory(FTNODE node, total_uncompressed_size += sb[i].uncompressed_size + 4; } + // now create the final serialized node uint32_t total_buffer_size = roundup_to_multiple(512, total_node_size); // make the buffer be 512 bytes. - char *XMALLOC_N_ALIGNED(512, total_buffer_size, data); char *curr_ptr = data; - // now create the final serialized node // write the header struct wbuf wb; @@ -944,33 +814,20 @@ int toku_serialize_ftnode_to_memory(FTNODE node, curr_ptr += sizeof(sb[i].xsum); } // Zero the rest of the buffer - for (uint32_t i=total_node_size; i<total_buffer_size; i++) { - data[i]=0; - } + memset(data + total_node_size, 0, total_buffer_size - total_node_size); assert(curr_ptr - data == total_node_size); *bytes_to_write = data; *n_bytes_to_write = total_buffer_size; *n_uncompressed_bytes = total_uncompressed_size; - // - // now that node has been serialized, go through sub_block's and free - // memory - // - toku_free(sb_node_info.compressed_ptr); - toku_free(sb_node_info.uncompressed_ptr); - for (int i = 0; i < npartitions; i++) { - toku_free(sb[i].compressed_ptr); - toku_free(sb[i].uncompressed_ptr); - } - - assert(0 == (*n_bytes_to_write)%512); - assert(0 == ((unsigned long long)(*bytes_to_write))%512); + invariant(*n_bytes_to_write % 512 == 0); + invariant(reinterpret_cast<unsigned long long>(*bytes_to_write) % 512 == 0); return 0; } int -toku_serialize_ftnode_to (int fd, BLOCKNUM blocknum, FTNODE node, FTNODE_DISK_DATA* ndd, bool do_rebalancing, FT h, bool for_checkpoint) { +toku_serialize_ftnode_to (int fd, BLOCKNUM blocknum, FTNODE node, FTNODE_DISK_DATA* ndd, bool do_rebalancing, FT ft, bool for_checkpoint) { size_t n_to_write; size_t n_uncompressed_bytes; @@ -992,10 +849,10 @@ toku_serialize_ftnode_to (int fd, BLOCKNUM blocknum, FTNODE node, FTNODE_DISK_DA int r = toku_serialize_ftnode_to_memory( node, ndd, - h->h->basementnodesize, - h->h->compression_method, + ft->h->basementnodesize, + ft->h->compression_method, do_rebalancing, - false, // in_parallel + toku_serialize_in_parallel, // in_parallel &n_to_write, &n_uncompressed_bytes, &compressed_buf @@ -1008,8 +865,12 @@ toku_serialize_ftnode_to (int fd, BLOCKNUM blocknum, FTNODE node, FTNODE_DISK_DA invariant(blocknum.b>=0); DISKOFF offset; - toku_blocknum_realloc_on_disk(h->blocktable, blocknum, n_to_write, &offset, - h, fd, for_checkpoint); //dirties h + // Dirties the ft + ft->blocktable.realloc_on_disk(blocknum, n_to_write, &offset, + ft, fd, for_checkpoint, + // Allocations for nodes high in the tree are considered 'hot', + // as they are likely to move again in the next checkpoint. + node->height); tokutime_t t0 = toku_time_now(); toku_os_full_pwrite(fd, compressed_buf, n_to_write, offset); @@ -1024,72 +885,121 @@ toku_serialize_ftnode_to (int fd, BLOCKNUM blocknum, FTNODE node, FTNODE_DISK_DA } static void -deserialize_child_buffer(NONLEAF_CHILDINFO bnc, struct rbuf *rbuf, - DESCRIPTOR desc, ft_compare_func cmp) { - int r; - int n_in_this_buffer = rbuf_int(rbuf); - int32_t *fresh_offsets = NULL, *stale_offsets = NULL; - int32_t *broadcast_offsets = NULL; - int nfresh = 0, nstale = 0; - int nbroadcast_offsets = 0; - if (cmp) { - XMALLOC_N(n_in_this_buffer, stale_offsets); - XMALLOC_N(n_in_this_buffer, fresh_offsets); - XMALLOC_N(n_in_this_buffer, broadcast_offsets); - } - toku_fifo_resize(bnc->buffer, rbuf->size + 64); - for (int i = 0; i < n_in_this_buffer; i++) { - bytevec key; ITEMLEN keylen; - bytevec val; ITEMLEN vallen; - // this is weird but it's necessary to pass icc and gcc together - unsigned char ctype = rbuf_char(rbuf); - enum ft_msg_type type = (enum ft_msg_type) ctype; - bool is_fresh = rbuf_char(rbuf); - MSN msn = rbuf_msn(rbuf); - XIDS xids; - xids_create_from_buffer(rbuf, &xids); - rbuf_bytes(rbuf, &key, &keylen); /* Returns a pointer into the rbuf. */ - rbuf_bytes(rbuf, &val, &vallen); - int32_t *dest; - if (cmp) { - if (ft_msg_type_applies_once(type)) { - if (is_fresh) { - dest = &fresh_offsets[nfresh]; - nfresh++; - } else { - dest = &stale_offsets[nstale]; - nstale++; - } - } else if (ft_msg_type_applies_all(type) || ft_msg_type_does_nothing(type)) { - dest = &broadcast_offsets[nbroadcast_offsets]; - nbroadcast_offsets++; - } else { - abort(); - } - } else { - dest = NULL; - } - r = toku_fifo_enq(bnc->buffer, key, keylen, val, vallen, type, msn, xids, is_fresh, dest); /* Copies the data into the fifo */ - lazy_assert_zero(r); - xids_destroy(&xids); - } - invariant(rbuf->ndone == rbuf->size); - - if (cmp) { - struct toku_fifo_entry_key_msn_cmp_extra extra = { .desc = desc, .cmp = cmp, .fifo = bnc->buffer }; - r = toku::sort<int32_t, const struct toku_fifo_entry_key_msn_cmp_extra, toku_fifo_entry_key_msn_cmp>::mergesort_r(fresh_offsets, nfresh, extra); - assert_zero(r); - bnc->fresh_message_tree.destroy(); - bnc->fresh_message_tree.create_steal_sorted_array(&fresh_offsets, nfresh, n_in_this_buffer); - r = toku::sort<int32_t, const struct toku_fifo_entry_key_msn_cmp_extra, toku_fifo_entry_key_msn_cmp>::mergesort_r(stale_offsets, nstale, extra); - assert_zero(r); +sort_and_steal_offset_arrays(NONLEAF_CHILDINFO bnc, + const toku::comparator &cmp, + int32_t **fresh_offsets, int32_t nfresh, + int32_t **stale_offsets, int32_t nstale, + int32_t **broadcast_offsets, int32_t nbroadcast) { + // We always have fresh / broadcast offsets (even if they are empty) + // but we may not have stale offsets, in the case of v13 upgrade. + invariant(fresh_offsets != nullptr); + invariant(broadcast_offsets != nullptr); + invariant(cmp.valid()); + + typedef toku::sort<int32_t, const struct toku_msg_buffer_key_msn_cmp_extra, toku_msg_buffer_key_msn_cmp> msn_sort; + + const int32_t n_in_this_buffer = nfresh + nstale + nbroadcast; + struct toku_msg_buffer_key_msn_cmp_extra extra(cmp, &bnc->msg_buffer); + msn_sort::mergesort_r(*fresh_offsets, nfresh, extra); + bnc->fresh_message_tree.destroy(); + bnc->fresh_message_tree.create_steal_sorted_array(fresh_offsets, nfresh, n_in_this_buffer); + if (stale_offsets) { + msn_sort::mergesort_r(*stale_offsets, nstale, extra); bnc->stale_message_tree.destroy(); - bnc->stale_message_tree.create_steal_sorted_array(&stale_offsets, nstale, n_in_this_buffer); - bnc->broadcast_list.destroy(); - bnc->broadcast_list.create_steal_sorted_array(&broadcast_offsets, nbroadcast_offsets, n_in_this_buffer); + bnc->stale_message_tree.create_steal_sorted_array(stale_offsets, nstale, n_in_this_buffer); + } + bnc->broadcast_list.destroy(); + bnc->broadcast_list.create_steal_sorted_array(broadcast_offsets, nbroadcast, n_in_this_buffer); +} + +static MSN +deserialize_child_buffer_v13(FT ft, NONLEAF_CHILDINFO bnc, struct rbuf *rb) { + // We skip 'stale' offsets for upgraded nodes. + int32_t nfresh = 0, nbroadcast = 0; + int32_t *fresh_offsets = nullptr, *broadcast_offsets = nullptr; + + // Only sort buffers if we have a valid comparison function. In certain scenarios, + // like deserialie_ft_versioned() or tokuftdump, we'll need to deserialize ftnodes + // for simple inspection and don't actually require that the message buffers are + // properly sorted. This is very ugly, but correct. + const bool sort = ft->cmp.valid(); + + MSN highest_msn_in_this_buffer = + bnc->msg_buffer.deserialize_from_rbuf_v13(rb, &ft->h->highest_unused_msn_for_upgrade, + sort ? &fresh_offsets : nullptr, &nfresh, + sort ? &broadcast_offsets : nullptr, &nbroadcast); + + if (sort) { + sort_and_steal_offset_arrays(bnc, ft->cmp, + &fresh_offsets, nfresh, + nullptr, 0, // no stale offsets + &broadcast_offsets, nbroadcast); + } + + return highest_msn_in_this_buffer; +} + +static void +deserialize_child_buffer_v26(NONLEAF_CHILDINFO bnc, struct rbuf *rb, const toku::comparator &cmp) { + int32_t nfresh = 0, nstale = 0, nbroadcast = 0; + int32_t *fresh_offsets, *stale_offsets, *broadcast_offsets; + + // Only sort buffers if we have a valid comparison function. In certain scenarios, + // like deserialie_ft_versioned() or tokuftdump, we'll need to deserialize ftnodes + // for simple inspection and don't actually require that the message buffers are + // properly sorted. This is very ugly, but correct. + const bool sort = cmp.valid(); + + // read in the message buffer + bnc->msg_buffer.deserialize_from_rbuf(rb, + sort ? &fresh_offsets : nullptr, &nfresh, + sort ? &stale_offsets : nullptr, &nstale, + sort ? &broadcast_offsets : nullptr, &nbroadcast); + + if (sort) { + sort_and_steal_offset_arrays(bnc, cmp, + &fresh_offsets, nfresh, + &stale_offsets, nstale, + &broadcast_offsets, nbroadcast); } } +static void +deserialize_child_buffer(NONLEAF_CHILDINFO bnc, struct rbuf *rb) { + // read in the message buffer + bnc->msg_buffer.deserialize_from_rbuf(rb, + nullptr, nullptr, // fresh_offsets, nfresh, + nullptr, nullptr, // stale_offsets, nstale, + nullptr, nullptr); // broadcast_offsets, nbroadcast + + // read in each message tree (fresh, stale, broadcast) + int32_t nfresh = rbuf_int(rb); + int32_t *XMALLOC_N(nfresh, fresh_offsets); + for (int i = 0; i < nfresh; i++) { + fresh_offsets[i] = rbuf_int(rb); + } + + int32_t nstale = rbuf_int(rb); + int32_t *XMALLOC_N(nstale, stale_offsets); + for (int i = 0; i < nstale; i++) { + stale_offsets[i] = rbuf_int(rb); + } + + int32_t nbroadcast = rbuf_int(rb); + int32_t *XMALLOC_N(nbroadcast, broadcast_offsets); + for (int i = 0; i < nbroadcast; i++) { + broadcast_offsets[i] = rbuf_int(rb); + } + + // build OMTs out of each offset array + bnc->fresh_message_tree.destroy(); + bnc->fresh_message_tree.create_steal_sorted_array(&fresh_offsets, nfresh, nfresh); + bnc->stale_message_tree.destroy(); + bnc->stale_message_tree.create_steal_sorted_array(&stale_offsets, nstale, nstale); + bnc->broadcast_list.destroy(); + bnc->broadcast_list.create_steal_sorted_array(&broadcast_offsets, nbroadcast, nbroadcast); +} + // dump a buffer to stderr // no locking around this for now void @@ -1153,7 +1063,7 @@ BASEMENTNODE toku_create_empty_bn_no_buffer(void) { NONLEAF_CHILDINFO toku_create_empty_nl(void) { NONLEAF_CHILDINFO XMALLOC(cn); - int r = toku_fifo_create(&cn->buffer); assert_zero(r); + cn->msg_buffer.create(); cn->fresh_message_tree.create_no_array(); cn->stale_message_tree.create_no_array(); cn->broadcast_list.create_no_array(); @@ -1161,13 +1071,16 @@ NONLEAF_CHILDINFO toku_create_empty_nl(void) { return cn; } -// does NOT create OMTs, just the FIFO +// must clone the OMTs, since we serialize them along with the message buffer NONLEAF_CHILDINFO toku_clone_nl(NONLEAF_CHILDINFO orig_childinfo) { NONLEAF_CHILDINFO XMALLOC(cn); - toku_fifo_clone(orig_childinfo->buffer, &cn->buffer); + cn->msg_buffer.clone(&orig_childinfo->msg_buffer); cn->fresh_message_tree.create_no_array(); + cn->fresh_message_tree.clone(orig_childinfo->fresh_message_tree); cn->stale_message_tree.create_no_array(); + cn->stale_message_tree.clone(orig_childinfo->stale_message_tree); cn->broadcast_list.create_no_array(); + cn->broadcast_list.clone(orig_childinfo->broadcast_list); memset(cn->flow, 0, sizeof cn->flow); return cn; } @@ -1180,7 +1093,7 @@ void destroy_basement_node (BASEMENTNODE bn) void destroy_nonleaf_childinfo (NONLEAF_CHILDINFO nl) { - toku_fifo_free(&nl->buffer); + nl->msg_buffer.destroy(); nl->fresh_message_tree.destroy(); nl->stale_message_tree.destroy(); nl->broadcast_list.destroy(); @@ -1190,13 +1103,13 @@ void destroy_nonleaf_childinfo (NONLEAF_CHILDINFO nl) void read_block_from_fd_into_rbuf( int fd, BLOCKNUM blocknum, - FT h, + FT ft, struct rbuf *rb ) { // get the file offset and block size for the block DISKOFF offset, size; - toku_translate_blocknum_to_offset_size(h->blocktable, blocknum, &offset, &size); + ft->blocktable.translate_blocknum_to_offset_size(blocknum, &offset, &size); DISKOFF size_aligned = roundup_to_multiple(512, size); uint8_t *XMALLOC_N_ALIGNED(512, size_aligned, raw_block); rbuf_init(rb, raw_block, size); @@ -1212,11 +1125,12 @@ static const int read_header_heuristic_max = 32*1024; #define MIN(a,b) (((a)>(b)) ? (b) : (a)) #endif -static void read_ftnode_header_from_fd_into_rbuf_if_small_enough (int fd, BLOCKNUM blocknum, FT ft, struct rbuf *rb, struct ftnode_fetch_extra *bfe) // Effect: If the header part of the node is small enough, then read it into the rbuf. The rbuf will be allocated to be big enough in any case. -{ +static void read_ftnode_header_from_fd_into_rbuf_if_small_enough(int fd, BLOCKNUM blocknum, + FT ft, struct rbuf *rb, + ftnode_fetch_extra *bfe) { DISKOFF offset, size; - toku_translate_blocknum_to_offset_size(ft->blocktable, blocknum, &offset, &size); + ft->blocktable.translate_blocknum_to_offset_size(blocknum, &offset, &size); DISKOFF read_size = roundup_to_multiple(512, MIN(read_header_heuristic_max, size)); uint8_t *XMALLOC_N_ALIGNED(512, roundup_to_multiple(512, size), raw_block); rbuf_init(rb, raw_block, read_size); @@ -1244,7 +1158,7 @@ read_compressed_sub_block(struct rbuf *rb, struct sub_block *sb) int r = 0; sb->compressed_size = rbuf_int(rb); sb->uncompressed_size = rbuf_int(rb); - bytevec* cp = (bytevec*)&sb->compressed_ptr; + const void **cp = (const void **) &sb->compressed_ptr; rbuf_literal_bytes(rb, cp, sb->compressed_size); sb->xsum = rbuf_int(rb); // let's check the checksum @@ -1325,7 +1239,7 @@ deserialize_ftnode_info( struct rbuf rb; rbuf_init(&rb, (unsigned char *) sb->uncompressed_ptr, data_size); - node->max_msn_applied_to_node_on_disk = rbuf_msn(&rb); + node->max_msn_applied_to_node_on_disk = rbuf_MSN(&rb); (void)rbuf_int(&rb); node->flags = rbuf_int(&rb); node->height = rbuf_int(&rb); @@ -1343,20 +1257,10 @@ deserialize_ftnode_info( // n_children is now in the header, nd the allocatio of the node->bp is in deserialize_ftnode_from_rbuf. // now the pivots - node->totalchildkeylens = 0; if (node->n_children > 1) { - XMALLOC_N(node->n_children - 1, node->childkeys); - for (int i=0; i < node->n_children-1; i++) { - bytevec childkeyptr; - unsigned int cklen; - rbuf_bytes(&rb, &childkeyptr, &cklen); - toku_memdup_dbt(&node->childkeys[i], childkeyptr, cklen); - node->totalchildkeylens += cklen; - } - } - else { - node->childkeys = NULL; - node->totalchildkeylens = 0; + node->pivotkeys.deserialize_from_rbuf(&rb, node->n_children - 1); + } else { + node->pivotkeys.create_empty(); } // if this is an internal node, unpack the block nums, and fill in necessary fields @@ -1391,7 +1295,7 @@ setup_available_ftnode_partition(FTNODE node, int i) { // Assign the child_to_read member of the bfe from the given ftnode // that has been brought into memory. static void -update_bfe_using_ftnode(FTNODE node, struct ftnode_fetch_extra *bfe) +update_bfe_using_ftnode(FTNODE node, ftnode_fetch_extra *bfe) { if (bfe->type == ftnode_fetch_subset && bfe->search != NULL) { // we do not take into account prefetching yet @@ -1400,8 +1304,7 @@ update_bfe_using_ftnode(FTNODE node, struct ftnode_fetch_extra *bfe) // we find out what basement node the query cares about // and check if it is available bfe->child_to_read = toku_ft_search_which_child( - &bfe->h->cmp_descriptor, - bfe->h->compare_fun, + bfe->ft->cmp, node, bfe->search ); @@ -1411,10 +1314,9 @@ update_bfe_using_ftnode(FTNODE node, struct ftnode_fetch_extra *bfe) // we can possibly require is a single basement node // we find out what basement node the query cares about // and check if it is available - paranoid_invariant(bfe->h->compare_fun); if (node->height == 0) { - int left_child = toku_bfe_leftmost_child_wanted(bfe, node); - int right_child = toku_bfe_rightmost_child_wanted(bfe, node); + int left_child = bfe->leftmost_child_wanted(node); + int right_child = bfe->rightmost_child_wanted(node); if (left_child == right_child) { bfe->child_to_read = left_child; } @@ -1426,14 +1328,14 @@ update_bfe_using_ftnode(FTNODE node, struct ftnode_fetch_extra *bfe) // initialize all of the given ftnode's partitions. static void setup_partitions_using_bfe(FTNODE node, - struct ftnode_fetch_extra *bfe, + ftnode_fetch_extra *bfe, bool data_in_memory) { // Leftmost and Rightmost Child bounds. int lc, rc; if (bfe->type == ftnode_fetch_subset || bfe->type == ftnode_fetch_prefetch) { - lc = toku_bfe_leftmost_child_wanted(bfe, node); - rc = toku_bfe_rightmost_child_wanted(bfe, node); + lc = bfe->leftmost_child_wanted(node); + rc = bfe->rightmost_child_wanted(node); } else { lc = -1; rc = -1; @@ -1442,11 +1344,11 @@ setup_partitions_using_bfe(FTNODE node, // // setup memory needed for the node // - //printf("node height %d, blocknum %" PRId64 ", type %d lc %d rc %d\n", node->height, node->thisnodename.b, bfe->type, lc, rc); + //printf("node height %d, blocknum %" PRId64 ", type %d lc %d rc %d\n", node->height, node->blocknum.b, bfe->type, lc, rc); for (int i = 0; i < node->n_children; i++) { BP_INIT_UNTOUCHED_CLOCK(node,i); if (data_in_memory) { - BP_STATE(node, i) = ((toku_bfe_wants_child_available(bfe, i) || (lc <= i && i <= rc)) + BP_STATE(node, i) = ((bfe->wants_child_available(i) || (lc <= i && i <= rc)) ? PT_AVAIL : PT_COMPRESSED); } else { BP_STATE(node, i) = PT_ON_DISK; @@ -1470,7 +1372,7 @@ setup_partitions_using_bfe(FTNODE node, } } -static void setup_ftnode_partitions(FTNODE node, struct ftnode_fetch_extra* bfe, bool data_in_memory) +static void setup_ftnode_partitions(FTNODE node, ftnode_fetch_extra *bfe, bool data_in_memory) // Effect: Used when reading a ftnode into main memory, this sets up the partitions. // We set bfe->child_to_read as well as the BP_STATE and the data pointers (e.g., with set_BSB or set_BNULL or other set_ operations). // Arguments: Node: the node to set up. @@ -1493,8 +1395,7 @@ deserialize_ftnode_partition( struct sub_block *sb, FTNODE node, int childnum, // which partition to deserialize - DESCRIPTOR desc, - ft_compare_func cmp + const toku::comparator &cmp ) { int r = 0; @@ -1512,8 +1413,14 @@ deserialize_ftnode_partition( ch = rbuf_char(&rb); if (node->height > 0) { - assert(ch == FTNODE_PARTITION_FIFO_MSG); - deserialize_child_buffer(BNC(node, childnum), &rb, desc, cmp); + assert(ch == FTNODE_PARTITION_MSG_BUFFER); + NONLEAF_CHILDINFO bnc = BNC(node, childnum); + if (node->layout_version_read_from_disk <= FT_LAYOUT_VERSION_26) { + // Layout version <= 26 did not serialize sorted message trees to disk. + deserialize_child_buffer_v26(bnc, &rb, cmp); + } else { + deserialize_child_buffer(bnc, &rb); + } BP_WORKDONE(node, childnum) = 0; } else { @@ -1533,7 +1440,7 @@ exit: static int decompress_and_deserialize_worker(struct rbuf curr_rbuf, struct sub_block curr_sb, FTNODE node, int child, - DESCRIPTOR desc, ft_compare_func cmp, tokutime_t *decompress_time) + const toku::comparator &cmp, tokutime_t *decompress_time) { int r = 0; tokutime_t t0 = toku_time_now(); @@ -1541,7 +1448,7 @@ decompress_and_deserialize_worker(struct rbuf curr_rbuf, struct sub_block curr_s tokutime_t t1 = toku_time_now(); if (r == 0) { // at this point, sb->uncompressed_ptr stores the serialized node partition - r = deserialize_ftnode_partition(&curr_sb, node, child, desc, cmp); + r = deserialize_ftnode_partition(&curr_sb, node, child, cmp); } *decompress_time = t1 - t0; @@ -1572,7 +1479,7 @@ static FTNODE alloc_ftnode_for_deserialize(uint32_t fullhash, BLOCKNUM blocknum) // Effect: Allocate an FTNODE and fill in the values that are not read from FTNODE XMALLOC(node); node->fullhash = fullhash; - node->thisnodename = blocknum; + node->blocknum = blocknum; node->dirty = 0; node->bp = nullptr; node->oldest_referenced_xid_known = TXNID_NONE; @@ -1584,7 +1491,7 @@ deserialize_ftnode_header_from_rbuf_if_small_enough (FTNODE *ftnode, FTNODE_DISK_DATA* ndd, BLOCKNUM blocknum, uint32_t fullhash, - struct ftnode_fetch_extra *bfe, + ftnode_fetch_extra *bfe, struct rbuf *rb, int fd) // If we have enough information in the rbuf to construct a header, then do so. @@ -1608,7 +1515,7 @@ deserialize_ftnode_header_from_rbuf_if_small_enough (FTNODE *ftnode, goto cleanup; } - bytevec magic; + const void *magic; rbuf_literal_bytes(rb, &magic, 8); if (memcmp(magic, "tokuleaf", 8)!=0 && memcmp(magic, "tokunode", 8)!=0) { @@ -1676,8 +1583,8 @@ deserialize_ftnode_header_from_rbuf_if_small_enough (FTNODE *ftnode, } // Finish reading compressed the sub_block - bytevec* cp; - cp = (bytevec*)&sb_node_info.compressed_ptr; + const void **cp; + cp = (const void **) &sb_node_info.compressed_ptr; rbuf_literal_bytes(rb, cp, sb_node_info.compressed_size); sb_node_info.xsum = rbuf_int(rb); // let's check the checksum @@ -1689,8 +1596,9 @@ deserialize_ftnode_header_from_rbuf_if_small_enough (FTNODE *ftnode, } // Now decompress the subblock - sb_node_info.uncompressed_ptr = toku_xmalloc(sb_node_info.uncompressed_size); { + toku::scoped_malloc sb_node_info_buf(sb_node_info.uncompressed_size); + sb_node_info.uncompressed_ptr = sb_node_info_buf.get(); tokutime_t decompress_t0 = toku_time_now(); toku_decompress( (Bytef *) sb_node_info.uncompressed_ptr, @@ -1700,25 +1608,21 @@ deserialize_ftnode_header_from_rbuf_if_small_enough (FTNODE *ftnode, ); tokutime_t decompress_t1 = toku_time_now(); decompress_time = decompress_t1 - decompress_t0; - } - // at this point sb->uncompressed_ptr stores the serialized node info. - r = deserialize_ftnode_info(&sb_node_info, node); - if (r != 0) { - goto cleanup; + // at this point sb->uncompressed_ptr stores the serialized node info. + r = deserialize_ftnode_info(&sb_node_info, node); + if (r != 0) { + goto cleanup; + } } - toku_free(sb_node_info.uncompressed_ptr); - sb_node_info.uncompressed_ptr = NULL; - // Now we have the ftnode_info. We have a bunch more stuff in the // rbuf, so we might be able to store the compressed data for some // objects. // We can proceed to deserialize the individual subblocks. - paranoid_invariant(is_valid_ftnode_fetch_type(bfe->type)); // setup the memory of the partitions - // for partitions being decompressed, create either FIFO or basement node + // for partitions being decompressed, create either message buffer or basement node // for partitions staying compressed, create sub_block setup_ftnode_partitions(node, bfe, false); @@ -1738,7 +1642,7 @@ deserialize_ftnode_header_from_rbuf_if_small_enough (FTNODE *ftnode, // handle clock for (int i = 0; i < node->n_children; i++) { - if (toku_bfe_wants_child_available(bfe, i)) { + if (bfe->wants_child_available(i)) { paranoid_invariant(BP_STATE(node,i) == PT_AVAIL); BP_TOUCH_CLOCK(node,i); } @@ -1771,13 +1675,12 @@ cleanup: static int deserialize_and_upgrade_internal_node(FTNODE node, struct rbuf *rb, - struct ftnode_fetch_extra* bfe, + ftnode_fetch_extra *bfe, STAT64INFO info) { - int r = 0; int version = node->layout_version_read_from_disk; - if(version == FT_LAST_LAYOUT_VERSION_WITH_FINGERPRINT) { + if (version == FT_LAST_LAYOUT_VERSION_WITH_FINGERPRINT) { (void) rbuf_int(rb); // 10. fingerprint } @@ -1801,18 +1704,8 @@ deserialize_and_upgrade_internal_node(FTNODE node, } } - node->childkeys = NULL; - node->totalchildkeylens = 0; - // I. Allocate keys based on number of children. - XMALLOC_N(node->n_children - 1, node->childkeys); - // II. Copy keys from buffer to allocated keys in ftnode. - for (int i = 0; i < node->n_children - 1; ++i) { - bytevec childkeyptr; - unsigned int cklen; - rbuf_bytes(rb, &childkeyptr, &cklen); // 17. child key pointers - toku_memdup_dbt(&node->childkeys[i], childkeyptr, cklen); - node->totalchildkeylens += cklen; - } + // Pivot keys + node->pivotkeys.deserialize_from_rbuf(rb, node->n_children - 1); // Create space for the child node buffers (a.k.a. partitions). XMALLOC_N(node->n_children, node->bp); @@ -1824,12 +1717,15 @@ deserialize_and_upgrade_internal_node(FTNODE node, } // Read in the child buffer maps. - struct sub_block_map child_buffer_map[node->n_children]; for (int i = 0; i < node->n_children; ++i) { - // The following fields are read in the - // sub_block_map_deserialize() call: - // 19. index 20. offset 21. size - sub_block_map_deserialize(&child_buffer_map[i], rb); + // The following fields were previously used by the `sub_block_map' + // They include: + // - 4 byte index + (void) rbuf_int(rb); + // - 4 byte offset + (void) rbuf_int(rb); + // - 4 byte size + (void) rbuf_int(rb); } // We need to setup this node's partitions, but we can't call the @@ -1841,8 +1737,8 @@ deserialize_and_upgrade_internal_node(FTNODE node, // sure we properly intitialize our partitions before filling them // in from our soon-to-be-upgraded node. update_bfe_using_ftnode(node, bfe); - struct ftnode_fetch_extra temp_bfe; - temp_bfe.type = ftnode_fetch_all; + ftnode_fetch_extra temp_bfe; + temp_bfe.create_for_full_read(nullptr); setup_partitions_using_bfe(node, &temp_bfe, true); // Cache the highest MSN generated for the message buffers. This @@ -1864,87 +1760,13 @@ deserialize_and_upgrade_internal_node(FTNODE node, // Deserialize de-compressed buffers. for (int i = 0; i < node->n_children; ++i) { NONLEAF_CHILDINFO bnc = BNC(node, i); - int n_in_this_buffer = rbuf_int(rb); // 22. node count - - int32_t *fresh_offsets = NULL; - int32_t *broadcast_offsets = NULL; - int nfresh = 0; - int nbroadcast_offsets = 0; - - if (bfe->h->compare_fun) { - XMALLOC_N(n_in_this_buffer, fresh_offsets); - // We skip 'stale' offsets for upgraded nodes. - XMALLOC_N(n_in_this_buffer, broadcast_offsets); - } - - // Atomically decrement the header's MSN count by the number - // of messages in the buffer. - MSN lowest; - uint64_t amount = n_in_this_buffer; - lowest.msn = toku_sync_sub_and_fetch(&bfe->h->h->highest_unused_msn_for_upgrade.msn, amount); + MSN highest_msn_in_this_buffer = deserialize_child_buffer_v13(bfe->ft, bnc, rb); if (highest_msn.msn == 0) { - highest_msn.msn = lowest.msn + n_in_this_buffer; - } - - // Create the FIFO entires from the deserialized buffer. - for (int j = 0; j < n_in_this_buffer; ++j) { - bytevec key; ITEMLEN keylen; - bytevec val; ITEMLEN vallen; - unsigned char ctype = rbuf_char(rb); // 23. message type - enum ft_msg_type type = (enum ft_msg_type) ctype; - XIDS xids; - xids_create_from_buffer(rb, &xids); // 24. XID - rbuf_bytes(rb, &key, &keylen); // 25. key - rbuf_bytes(rb, &val, &vallen); // 26. value - - // <CER> can we factor this out? - int32_t *dest; - if (bfe->h->compare_fun) { - if (ft_msg_type_applies_once(type)) { - dest = &fresh_offsets[nfresh]; - nfresh++; - } else if (ft_msg_type_applies_all(type) || ft_msg_type_does_nothing(type)) { - dest = &broadcast_offsets[nbroadcast_offsets]; - nbroadcast_offsets++; - } else { - abort(); - } - } else { - dest = NULL; - } - - // Increment our MSN, the last message should have the - // newest/highest MSN. See above for a full explanation. - lowest.msn++; - r = toku_fifo_enq(bnc->buffer, - key, - keylen, - val, - vallen, - type, - lowest, - xids, - true, - dest); - lazy_assert_zero(r); - xids_destroy(&xids); - } - - if (bfe->h->compare_fun) { - struct toku_fifo_entry_key_msn_cmp_extra extra = { .desc = &bfe->h->cmp_descriptor, - .cmp = bfe->h->compare_fun, - .fifo = bnc->buffer }; - typedef toku::sort<int32_t, const struct toku_fifo_entry_key_msn_cmp_extra, toku_fifo_entry_key_msn_cmp> key_msn_sort; - r = key_msn_sort::mergesort_r(fresh_offsets, nfresh, extra); - assert_zero(r); - bnc->fresh_message_tree.destroy(); - bnc->fresh_message_tree.create_steal_sorted_array(&fresh_offsets, nfresh, n_in_this_buffer); - bnc->broadcast_list.destroy(); - bnc->broadcast_list.create_steal_sorted_array(&broadcast_offsets, nbroadcast_offsets, n_in_this_buffer); + highest_msn.msn = highest_msn_in_this_buffer.msn; } } - // Assign the highest msn from our upgrade message FIFO queues. + // Assign the highest msn from our upgrade message buffers node->max_msn_applied_to_node_on_disk = highest_msn; // Since we assigned MSNs to this node's messages, we need to dirty it. node->dirty = 1; @@ -1962,13 +1784,13 @@ deserialize_and_upgrade_internal_node(FTNODE node, actual_xsum); fprintf(stderr, "Checksum failure while reading node in file %s.\n", - toku_cachefile_fname_in_env(bfe->h->cf)); + toku_cachefile_fname_in_env(bfe->ft->cf)); fflush(stderr); return toku_db_badformat(); } } - return r; + return 0; } // This function takes a deserialized version 13 or 14 buffer and @@ -1976,7 +1798,7 @@ deserialize_and_upgrade_internal_node(FTNODE node, static int deserialize_and_upgrade_leaf_node(FTNODE node, struct rbuf *rb, - struct ftnode_fetch_extra* bfe, + ftnode_fetch_extra *bfe, STAT64INFO info) { int r = 0; @@ -2012,23 +1834,26 @@ deserialize_and_upgrade_leaf_node(FTNODE node, // basement node. node->n_children = 1; XMALLOC_N(node->n_children, node->bp); - // This is a malloc(0), but we need to do it in order to get a pointer - // we can free() later. - XMALLOC_N(node->n_children - 1, node->childkeys); - node->totalchildkeylens = 0; + node->pivotkeys.create_empty(); // Create one basement node to contain all the leaf entries by // setting up the single partition and updating the bfe. update_bfe_using_ftnode(node, bfe); - struct ftnode_fetch_extra temp_bfe; - fill_bfe_for_full_read(&temp_bfe, bfe->h); + ftnode_fetch_extra temp_bfe; + temp_bfe.create_for_full_read(bfe->ft); setup_partitions_using_bfe(node, &temp_bfe, true); // 11. Deserialize the partition maps, though they are not used in the // newer versions of ftnodes. - struct sub_block_map part_map[npartitions]; - for (int i = 0; i < npartitions; ++i) { - sub_block_map_deserialize(&part_map[i], rb); + for (int i = 0; i < node->n_children; ++i) { + // The following fields were previously used by the `sub_block_map' + // They include: + // - 4 byte index + (void) rbuf_int(rb); + // - 4 byte offset + (void) rbuf_int(rb); + // - 4 byte size + (void) rbuf_int(rb); } // Copy all of the leaf entries into the single basement node. @@ -2086,7 +1911,7 @@ deserialize_and_upgrade_leaf_node(FTNODE node, // Whatever this is must be less than the MSNs of every message above // it, so it's ok to take it here. - bn->max_msn_applied = bfe->h->h->highest_unused_msn_for_upgrade; + bn->max_msn_applied = bfe->ft->h->highest_unused_msn_for_upgrade; bn->stale_ancestor_messages_applied = false; node->max_msn_applied_to_node_on_disk = bn->max_msn_applied; @@ -2102,7 +1927,7 @@ deserialize_and_upgrade_leaf_node(FTNODE node, actual_xsum); fprintf(stderr, "Checksum failure while reading node in file %s.\n", - toku_cachefile_fname_in_env(bfe->h->cf)); + toku_cachefile_fname_in_env(bfe->ft->cf)); fflush(stderr); return toku_db_badformat(); } @@ -2120,7 +1945,7 @@ deserialize_and_upgrade_leaf_node(FTNODE node, static int read_and_decompress_block_from_fd_into_rbuf(int fd, BLOCKNUM blocknum, DISKOFF offset, DISKOFF size, - FT h, + FT ft, struct rbuf *rb, /* out */ int *layout_version_p); @@ -2132,7 +1957,7 @@ static int deserialize_and_upgrade_ftnode(FTNODE node, FTNODE_DISK_DATA* ndd, BLOCKNUM blocknum, - struct ftnode_fetch_extra* bfe, + ftnode_fetch_extra *bfe, STAT64INFO info, int fd) { @@ -2143,16 +1968,14 @@ deserialize_and_upgrade_ftnode(FTNODE node, // we read the different sub-sections. // get the file offset and block size for the block DISKOFF offset, size; - toku_translate_blocknum_to_offset_size(bfe->h->blocktable, - blocknum, - &offset, - &size); + bfe->ft->blocktable.translate_blocknum_to_offset_size(blocknum, &offset, &size); + struct rbuf rb; r = read_and_decompress_block_from_fd_into_rbuf(fd, blocknum, offset, size, - bfe->h, + bfe->ft, &rb, &version); if (r != 0) { @@ -2162,7 +1985,7 @@ deserialize_and_upgrade_ftnode(FTNODE node, // Re-read the magic field from the previous call, since we are // restarting with a fresh rbuf. { - bytevec magic; + const void *magic; rbuf_literal_bytes(&rb, &magic, 8); // 1. magic } @@ -2224,7 +2047,7 @@ deserialize_ftnode_from_rbuf( FTNODE_DISK_DATA* ndd, BLOCKNUM blocknum, uint32_t fullhash, - struct ftnode_fetch_extra* bfe, + ftnode_fetch_extra *bfe, STAT64INFO info, struct rbuf *rb, int fd @@ -2244,7 +2067,7 @@ deserialize_ftnode_from_rbuf( // now start reading from rbuf // first thing we do is read the header information - bytevec magic; + const void *magic; rbuf_literal_bytes(rb, &magic, 8); if (memcmp(magic, "tokuleaf", 8)!=0 && memcmp(magic, "tokunode", 8)!=0) { @@ -2321,10 +2144,9 @@ deserialize_ftnode_from_rbuf( // now that the node info has been deserialized, we can proceed to deserialize // the individual sub blocks - paranoid_invariant(is_valid_ftnode_fetch_type(bfe->type)); // setup the memory of the partitions - // for partitions being decompressed, create either FIFO or basement node + // for partitions being decompressed, create either message buffer or basement node // for partitions staying compressed, create sub_block setup_ftnode_partitions(node, bfe, true); @@ -2365,7 +2187,7 @@ deserialize_ftnode_from_rbuf( // case where we read and decompress the partition tokutime_t partition_decompress_time; r = decompress_and_deserialize_worker(curr_rbuf, curr_sb, node, i, - &bfe->h->cmp_descriptor, bfe->h->compare_fun, &partition_decompress_time); + bfe->ft->cmp, &partition_decompress_time); decompress_time += partition_decompress_time; if (r != 0) { goto cleanup; @@ -2408,7 +2230,7 @@ cleanup: } int -toku_deserialize_bp_from_disk(FTNODE node, FTNODE_DISK_DATA ndd, int childnum, int fd, struct ftnode_fetch_extra* bfe) { +toku_deserialize_bp_from_disk(FTNODE node, FTNODE_DISK_DATA ndd, int childnum, int fd, ftnode_fetch_extra *bfe) { int r = 0; assert(BP_STATE(node,childnum) == PT_ON_DISK); assert(node->bp[childnum].ptr.tag == BCT_NULL); @@ -2424,16 +2246,13 @@ toku_deserialize_bp_from_disk(FTNODE node, FTNODE_DISK_DATA ndd, int childnum, i // // get the file offset and block size for the block DISKOFF node_offset, total_node_disk_size; - toku_translate_blocknum_to_offset_size( - bfe->h->blocktable, - node->thisnodename, - &node_offset, - &total_node_disk_size - ); + bfe->ft->blocktable.translate_blocknum_to_offset_size(node->blocknum, &node_offset, &total_node_disk_size); uint32_t curr_offset = BP_START(ndd, childnum); - uint32_t curr_size = BP_SIZE (ndd, childnum); - struct rbuf rb = {.buf = NULL, .size = 0, .ndone = 0}; + uint32_t curr_size = BP_SIZE (ndd, childnum); + + struct rbuf rb; + rbuf_init(&rb, nullptr, 0); uint32_t pad_at_beginning = (node_offset+curr_offset)%512; uint32_t padded_size = roundup_to_multiple(512, pad_at_beginning + curr_size); @@ -2471,7 +2290,7 @@ toku_deserialize_bp_from_disk(FTNODE node, FTNODE_DISK_DATA ndd, int childnum, i // deserialize tokutime_t t2 = toku_time_now(); - r = deserialize_ftnode_partition(&curr_sb, node, childnum, &bfe->h->cmp_descriptor, bfe->h->compare_fun); + r = deserialize_ftnode_partition(&curr_sb, node, childnum, bfe->ft->cmp); tokutime_t t3 = toku_time_now(); @@ -2491,7 +2310,7 @@ toku_deserialize_bp_from_disk(FTNODE node, FTNODE_DISK_DATA ndd, int childnum, i // Take a ftnode partition that is in the compressed state, and make it avail int -toku_deserialize_bp_from_compressed(FTNODE node, int childnum, struct ftnode_fetch_extra *bfe) { +toku_deserialize_bp_from_compressed(FTNODE node, int childnum, ftnode_fetch_extra *bfe) { int r = 0; assert(BP_STATE(node, childnum) == PT_COMPRESSED); SUB_BLOCK curr_sb = BSB(node, childnum); @@ -2515,7 +2334,7 @@ toku_deserialize_bp_from_compressed(FTNODE node, int childnum, struct ftnode_fet tokutime_t t1 = toku_time_now(); - r = deserialize_ftnode_partition(curr_sb, node, childnum, &bfe->h->cmp_descriptor, bfe->h->compare_fun); + r = deserialize_ftnode_partition(curr_sb, node, childnum, bfe->ft->cmp); tokutime_t t2 = toku_time_now(); @@ -2536,13 +2355,13 @@ deserialize_ftnode_from_fd(int fd, uint32_t fullhash, FTNODE *ftnode, FTNODE_DISK_DATA *ndd, - struct ftnode_fetch_extra *bfe, + ftnode_fetch_extra *bfe, STAT64INFO info) { struct rbuf rb = RBUF_INITIALIZER; tokutime_t t0 = toku_time_now(); - read_block_from_fd_into_rbuf(fd, blocknum, bfe->h, &rb); + read_block_from_fd_into_rbuf(fd, blocknum, bfe->ft, &rb); tokutime_t t1 = toku_time_now(); // Decompress and deserialize the ftnode. Time statistics @@ -2565,7 +2384,7 @@ toku_deserialize_ftnode_from (int fd, uint32_t fullhash, FTNODE *ftnode, FTNODE_DISK_DATA* ndd, - struct ftnode_fetch_extra* bfe + ftnode_fetch_extra *bfe ) // Effect: Read a node in. If possible, read just the header. { @@ -2575,7 +2394,7 @@ toku_deserialize_ftnode_from (int fd, // each function below takes the appropriate io/decompression/deserialize statistics if (!bfe->read_all_partitions) { - read_ftnode_header_from_fd_into_rbuf_if_small_enough(fd, blocknum, bfe->h, &rb, bfe); + read_ftnode_header_from_fd_into_rbuf_if_small_enough(fd, blocknum, bfe->ft, &rb, bfe); r = deserialize_ftnode_header_from_rbuf_if_small_enough(ftnode, ndd, blocknum, fullhash, bfe, &rb, fd); } else { // force us to do it the old way @@ -2628,7 +2447,7 @@ serialize_rollback_log_node_to_buf(ROLLBACK_LOG_NODE log, char *buf, size_t calc wbuf_nocrc_BLOCKNUM(&wb, log->previous); wbuf_nocrc_ulonglong(&wb, log->rollentry_resident_bytecount); //Write down memarena size needed to restore - wbuf_nocrc_ulonglong(&wb, toku_memarena_total_size_in_use(log->rollentry_arena)); + wbuf_nocrc_ulonglong(&wb, log->rollentry_arena.total_size_in_use()); { //Store rollback logs @@ -2724,7 +2543,7 @@ toku_serialize_rollback_log_to_memory_uncompressed(ROLLBACK_LOG_NODE log, SERIAL int toku_serialize_rollback_log_to (int fd, ROLLBACK_LOG_NODE log, SERIALIZED_ROLLBACK_LOG_NODE serialized_log, bool is_serialized, - FT h, bool for_checkpoint) { + FT ft, bool for_checkpoint) { size_t n_to_write; char *compressed_buf; struct serialized_rollback_log_node serialized_local; @@ -2736,20 +2555,26 @@ toku_serialize_rollback_log_to (int fd, ROLLBACK_LOG_NODE log, SERIALIZED_ROLLBA serialized_log = &serialized_local; toku_serialize_rollback_log_to_memory_uncompressed(log, serialized_log); } + BLOCKNUM blocknum = serialized_log->blocknum; + invariant(blocknum.b >= 0); - //Compress and malloc buffer to write + // Compress and malloc buffer to write serialize_uncompressed_block_to_memory(serialized_log->data, - serialized_log->n_sub_blocks, serialized_log->sub_block, - h->h->compression_method, &n_to_write, &compressed_buf); + serialized_log->n_sub_blocks, + serialized_log->sub_block, + ft->h->compression_method, + &n_to_write, &compressed_buf); - { - lazy_assert(blocknum.b>=0); - DISKOFF offset; - toku_blocknum_realloc_on_disk(h->blocktable, blocknum, n_to_write, &offset, - h, fd, for_checkpoint); //dirties h - toku_os_full_pwrite(fd, compressed_buf, n_to_write, offset); - } + // Dirties the ft + DISKOFF offset; + ft->blocktable.realloc_on_disk(blocknum, n_to_write, &offset, + ft, fd, for_checkpoint, + // We consider rollback log flushing the hottest possible allocation, + // since rollback logs are short-lived compared to FT nodes. + INT_MAX); + + toku_os_full_pwrite(fd, compressed_buf, n_to_write, offset); toku_free(compressed_buf); if (!is_serialized) { toku_static_serialized_rollback_log_destroy(&serialized_local); @@ -2768,13 +2593,13 @@ deserialize_rollback_log_from_rbuf (BLOCKNUM blocknum, ROLLBACK_LOG_NODE *log_p, return r; } - //printf("Deserializing %lld datasize=%d\n", off, datasize); - bytevec magic; + const void *magic; rbuf_literal_bytes(rb, &magic, 8); lazy_assert(!memcmp(magic, "tokuroll", 8)); result->layout_version = rbuf_int(rb); - lazy_assert(result->layout_version == FT_LAYOUT_VERSION); + lazy_assert((FT_LAYOUT_VERSION_25 <= result->layout_version && result->layout_version <= FT_LAYOUT_VERSION_27) || + (result->layout_version == FT_LAYOUT_VERSION)); result->layout_version_original = rbuf_int(rb); result->layout_version_read_from_disk = result->layout_version; result->build_id = rbuf_int(rb); @@ -2792,8 +2617,8 @@ deserialize_rollback_log_from_rbuf (BLOCKNUM blocknum, ROLLBACK_LOG_NODE *log_p, result->rollentry_resident_bytecount = rbuf_ulonglong(rb); size_t arena_initial_size = rbuf_ulonglong(rb); - result->rollentry_arena = toku_memarena_create_presized(arena_initial_size); - if (0) { died1: toku_memarena_destroy(&result->rollentry_arena); goto died0; } + result->rollentry_arena.create(arena_initial_size); + if (0) { died1: result->rollentry_arena.destroy(); goto died0; } //Load rollback entries lazy_assert(rb->size > 4); @@ -2802,10 +2627,10 @@ deserialize_rollback_log_from_rbuf (BLOCKNUM blocknum, ROLLBACK_LOG_NODE *log_p, while (rb->ndone < rb->size) { struct roll_entry *item; uint32_t rollback_fsize = rbuf_int(rb); //Already read 4. Rest is 4 smaller - bytevec item_vec; + const void *item_vec; rbuf_literal_bytes(rb, &item_vec, rollback_fsize-4); unsigned char* item_buf = (unsigned char*)item_vec; - r = toku_parse_rollback(item_buf, rollback_fsize-4, &item, result->rollentry_arena); + r = toku_parse_rollback(item_buf, rollback_fsize-4, &item, &result->rollentry_arena); if (r!=0) { r = toku_db_badformat(); goto died1; @@ -2834,7 +2659,7 @@ deserialize_rollback_log_from_rbuf_versioned (uint32_t version, BLOCKNUM blocknu struct rbuf *rb) { int r = 0; ROLLBACK_LOG_NODE rollback_log_node = NULL; - invariant(version==FT_LAYOUT_VERSION); //Rollback log nodes do not survive version changes. + invariant((FT_LAYOUT_VERSION_25 <= version && version <= FT_LAYOUT_VERSION_27) || version == FT_LAYOUT_VERSION); r = deserialize_rollback_log_from_rbuf(blocknum, &rollback_log_node, rb); if (r==0) { *log = rollback_log_node; @@ -2931,18 +2756,15 @@ exit: return r; } -static int -decompress_from_raw_block_into_rbuf_versioned(uint32_t version, uint8_t *raw_block, size_t raw_block_size, struct rbuf *rb, BLOCKNUM blocknum) { +static int decompress_from_raw_block_into_rbuf_versioned(uint32_t version, uint8_t *raw_block, size_t raw_block_size, struct rbuf *rb, BLOCKNUM blocknum) { // This function exists solely to accomodate future changes in compression. int r = 0; - switch (version) { - case FT_LAYOUT_VERSION_13: - case FT_LAYOUT_VERSION_14: - case FT_LAYOUT_VERSION: - r = decompress_from_raw_block_into_rbuf(raw_block, raw_block_size, rb, blocknum); - break; - default: - abort(); + if ((version == FT_LAYOUT_VERSION_13 || version == FT_LAYOUT_VERSION_14) || + (FT_LAYOUT_VERSION_25 <= version && version <= FT_LAYOUT_VERSION_27) || + version == FT_LAYOUT_VERSION) { + r = decompress_from_raw_block_into_rbuf(raw_block, raw_block_size, rb, blocknum); + } else { + abort(); } return r; } @@ -2950,7 +2772,7 @@ decompress_from_raw_block_into_rbuf_versioned(uint32_t version, uint8_t *raw_blo static int read_and_decompress_block_from_fd_into_rbuf(int fd, BLOCKNUM blocknum, DISKOFF offset, DISKOFF size, - FT h, + FT ft, struct rbuf *rb, /* out */ int *layout_version_p) { int r = 0; @@ -2989,7 +2811,7 @@ read_and_decompress_block_from_fd_into_rbuf(int fd, BLOCKNUM blocknum, if (r == TOKUDB_BAD_CHECKSUM) { fprintf(stderr, "Checksum failure while reading raw block in file %s.\n", - toku_cachefile_fname_in_env(h->cf)); + toku_cachefile_fname_in_env(ft->cf)); abort(); } else { r = toku_db_badformat(); @@ -3009,16 +2831,19 @@ cleanup: return r; } -// Read rollback log node from file into struct. Perform version upgrade if necessary. -int -toku_deserialize_rollback_log_from (int fd, BLOCKNUM blocknum, ROLLBACK_LOG_NODE *logp, FT h) { +// Read rollback log node from file into struct. +// Perform version upgrade if necessary. +int toku_deserialize_rollback_log_from(int fd, BLOCKNUM blocknum, ROLLBACK_LOG_NODE *logp, FT ft) { int layout_version = 0; int r; - struct rbuf rb = {.buf = NULL, .size = 0, .ndone = 0}; + + struct rbuf rb; + rbuf_init(&rb, nullptr, 0); // get the file offset and block size for the block DISKOFF offset, size; - toku_translate_blocknum_to_offset_size(h->blocktable, blocknum, &offset, &size); + ft->blocktable.translate_blocknum_to_offset_size(blocknum, &offset, &size); + // if the size is 0, then the blocknum is unused if (size == 0) { // blocknum is unused, just create an empty one and get out @@ -3030,7 +2855,7 @@ toku_deserialize_rollback_log_from (int fd, BLOCKNUM blocknum, ROLLBACK_LOG_NODE goto cleanup; } - r = read_and_decompress_block_from_fd_into_rbuf(fd, blocknum, offset, size, h, &rb, &layout_version); + r = read_and_decompress_block_from_fd_into_rbuf(fd, blocknum, offset, size, ft, &rb, &layout_version); if (r!=0) goto cleanup; { @@ -3044,24 +2869,26 @@ toku_deserialize_rollback_log_from (int fd, BLOCKNUM blocknum, ROLLBACK_LOG_NODE r = deserialize_rollback_log_from_rbuf_versioned(layout_version, blocknum, logp, &rb); cleanup: - if (rb.buf) toku_free(rb.buf); + if (rb.buf) { + toku_free(rb.buf); + } return r; } int -toku_upgrade_subtree_estimates_to_stat64info(int fd, FT h) +toku_upgrade_subtree_estimates_to_stat64info(int fd, FT ft) { int r = 0; // 15 was the last version with subtree estimates - invariant(h->layout_version_read_from_disk <= FT_LAYOUT_VERSION_15); + invariant(ft->layout_version_read_from_disk <= FT_LAYOUT_VERSION_15); FTNODE unused_node = NULL; FTNODE_DISK_DATA unused_ndd = NULL; - struct ftnode_fetch_extra bfe; - fill_bfe_for_min_read(&bfe, h); - r = deserialize_ftnode_from_fd(fd, h->h->root_blocknum, 0, &unused_node, &unused_ndd, - &bfe, &h->h->on_disk_stats); - h->in_memory_stats = h->h->on_disk_stats; + ftnode_fetch_extra bfe; + bfe.create_for_min_read(ft); + r = deserialize_ftnode_from_fd(fd, ft->h->root_blocknum, 0, &unused_node, &unused_ndd, + &bfe, &ft->h->on_disk_stats); + ft->in_memory_stats = ft->h->on_disk_stats; if (unused_node) { toku_ftnode_free(&unused_node); @@ -3073,22 +2900,22 @@ toku_upgrade_subtree_estimates_to_stat64info(int fd, FT h) } int -toku_upgrade_msn_from_root_to_header(int fd, FT h) +toku_upgrade_msn_from_root_to_header(int fd, FT ft) { int r; // 21 was the first version with max_msn_in_ft in the header - invariant(h->layout_version_read_from_disk <= FT_LAYOUT_VERSION_20); + invariant(ft->layout_version_read_from_disk <= FT_LAYOUT_VERSION_20); FTNODE node; FTNODE_DISK_DATA ndd; - struct ftnode_fetch_extra bfe; - fill_bfe_for_min_read(&bfe, h); - r = deserialize_ftnode_from_fd(fd, h->h->root_blocknum, 0, &node, &ndd, &bfe, nullptr); + ftnode_fetch_extra bfe; + bfe.create_for_min_read(ft); + r = deserialize_ftnode_from_fd(fd, ft->h->root_blocknum, 0, &node, &ndd, &bfe, nullptr); if (r != 0) { goto exit; } - h->h->max_msn_in_ft = node->max_msn_applied_to_node_on_disk; + ft->h->max_msn_in_ft = node->max_msn_applied_to_node_on_disk; toku_ftnode_free(&node); toku_free(ndd); exit: diff --git a/storage/tokudb/ft-index/ft/serialize/ft_node-serialize.h b/storage/tokudb/ft-index/ft/serialize/ft_node-serialize.h new file mode 100644 index 00000000000..319e270dd58 --- /dev/null +++ b/storage/tokudb/ft-index/ft/serialize/ft_node-serialize.h @@ -0,0 +1,142 @@ +/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */ +// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4: + +/* +COPYING CONDITIONS NOTICE: + + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation, and provided that the + following conditions are met: + + * Redistributions of source code must retain this COPYING + CONDITIONS NOTICE, the COPYRIGHT NOTICE (below), the + DISCLAIMER (below), the UNIVERSITY PATENT NOTICE (below), the + PATENT MARKING NOTICE (below), and the PATENT RIGHTS + GRANT (below). + + * Redistributions in binary form must reproduce this COPYING + CONDITIONS NOTICE, the COPYRIGHT NOTICE (below), the + DISCLAIMER (below), the UNIVERSITY PATENT NOTICE (below), the + PATENT MARKING NOTICE (below), and the PATENT RIGHTS + GRANT (below) in the documentation and/or other materials + provided with the distribution. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + 02110-1301, USA. + +COPYRIGHT NOTICE: + + TokuFT, Tokutek Fractal Tree Indexing Library. + Copyright (C) 2007-2013 Tokutek, Inc. + +DISCLAIMER: + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + +UNIVERSITY PATENT NOTICE: + + The technology is licensed by the Massachusetts Institute of + Technology, Rutgers State University of New Jersey, and the Research + Foundation of State University of New York at Stony Brook under + United States of America Serial No. 11/760379 and to the patents + and/or patent applications resulting from it. + +PATENT MARKING NOTICE: + + This software is covered by US Patent No. 8,185,551. + This software is covered by US Patent No. 8,489,638. + +PATENT RIGHTS GRANT: + + "THIS IMPLEMENTATION" means the copyrightable works distributed by + Tokutek as part of the Fractal Tree project. + + "PATENT CLAIMS" means the claims of patents that are owned or + licensable by Tokutek, both currently or in the future; and that in + the absence of this license would be infringed by THIS + IMPLEMENTATION or by using or running THIS IMPLEMENTATION. + + "PATENT CHALLENGE" shall mean a challenge to the validity, + patentability, enforceability and/or non-infringement of any of the + PATENT CLAIMS or otherwise opposing any of the PATENT CLAIMS. + + Tokutek hereby grants to you, for the term and geographical scope of + the PATENT CLAIMS, a non-exclusive, no-charge, royalty-free, + irrevocable (except as stated in this section) patent license to + make, have made, use, offer to sell, sell, import, transfer, and + otherwise run, modify, and propagate the contents of THIS + IMPLEMENTATION, where such license applies only to the PATENT + CLAIMS. This grant does not include claims that would be infringed + only as a consequence of further modifications of THIS + IMPLEMENTATION. If you or your agent or licensee institute or order + or agree to the institution of patent litigation against any entity + (including a cross-claim or counterclaim in a lawsuit) alleging that + THIS IMPLEMENTATION constitutes direct or contributory patent + infringement, or inducement of patent infringement, then any rights + granted to you under this License shall terminate as of the date + such litigation is filed. If you or your agent or exclusive + licensee institute or order or agree to the institution of a PATENT + CHALLENGE, then Tokutek may terminate any rights granted to you + under this License. +*/ + +#pragma once + +#include "ft/ft.h" +#include "ft/node.h" +#include "ft/serialize/sub_block.h" +#include "ft/serialize/rbuf.h" +#include "ft/serialize/wbuf.h" +#include "ft/serialize/block_table.h" + +unsigned int toku_serialize_ftnode_size(FTNODE node); +int toku_serialize_ftnode_to_memory(FTNODE node, FTNODE_DISK_DATA *ndd, + unsigned int basementnodesize, + enum toku_compression_method compression_method, + bool do_rebalancing, bool in_parallel, + size_t *n_bytes_to_write, size_t *n_uncompressed_bytes, + char **bytes_to_write); +int toku_serialize_ftnode_to(int fd, BLOCKNUM, FTNODE node, FTNODE_DISK_DATA *ndd, bool do_rebalancing, FT ft, bool for_checkpoint); +int toku_serialize_rollback_log_to(int fd, ROLLBACK_LOG_NODE log, SERIALIZED_ROLLBACK_LOG_NODE serialized_log, bool is_serialized, + FT ft, bool for_checkpoint); +void toku_serialize_rollback_log_to_memory_uncompressed(ROLLBACK_LOG_NODE log, SERIALIZED_ROLLBACK_LOG_NODE serialized); + +int toku_deserialize_rollback_log_from(int fd, BLOCKNUM blocknum, ROLLBACK_LOG_NODE *logp, FT ft); +int toku_deserialize_bp_from_disk(FTNODE node, FTNODE_DISK_DATA ndd, int childnum, int fd, ftnode_fetch_extra *bfe); +int toku_deserialize_bp_from_compressed(FTNODE node, int childnum, ftnode_fetch_extra *bfe); +int toku_deserialize_ftnode_from(int fd, BLOCKNUM off, uint32_t fullhash, FTNODE *node, FTNODE_DISK_DATA *ndd, ftnode_fetch_extra *bfe); + +void toku_serialize_set_parallel(bool); + +// used by nonleaf node partial eviction +void toku_create_compressed_partition_from_available(FTNODE node, int childnum, + enum toku_compression_method compression_method, SUB_BLOCK sb); + +// <CER> For verifying old, non-upgraded nodes (versions 13 and 14). +int decompress_from_raw_block_into_rbuf(uint8_t *raw_block, size_t raw_block_size, struct rbuf *rb, BLOCKNUM blocknum); + +// used by verify +int deserialize_ft_versioned(int fd, struct rbuf *rb, FT *ft, uint32_t version); +void read_block_from_fd_into_rbuf(int fd, BLOCKNUM blocknum, FT ft, struct rbuf *rb); +int read_compressed_sub_block(struct rbuf *rb, struct sub_block *sb); +int verify_ftnode_sub_block(struct sub_block *sb); +void just_decompress_sub_block(struct sub_block *sb); + +// used by ft-node-deserialize.cc +void initialize_ftnode(FTNODE node, BLOCKNUM blocknum); +int read_and_check_magic(struct rbuf *rb); +int read_and_check_version(FTNODE node, struct rbuf *rb); +void read_node_info(FTNODE node, struct rbuf *rb, int version); +void allocate_and_read_partition_offsets(FTNODE node, struct rbuf *rb, FTNODE_DISK_DATA *ndd); +int check_node_info_checksum(struct rbuf *rb); +void read_legacy_node_info(FTNODE node, struct rbuf *rb, int version); +int check_legacy_end_checksum(struct rbuf *rb); + +// exported so the loader can dump bad blocks +void dump_bad_block(unsigned char *vp, uint64_t size); diff --git a/storage/tokudb/ft-index/ft/quicklz.cc b/storage/tokudb/ft-index/ft/serialize/quicklz.cc index c7cd82c80d5..81f768ababf 100644 --- a/storage/tokudb/ft-index/ft/quicklz.cc +++ b/storage/tokudb/ft-index/ft/serialize/quicklz.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/ft/quicklz.h b/storage/tokudb/ft-index/ft/serialize/quicklz.h index 2f2db8cd739..362a246994f 100644 --- a/storage/tokudb/ft-index/ft/quicklz.h +++ b/storage/tokudb/ft-index/ft/serialize/quicklz.h @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -86,10 +86,10 @@ PATENT RIGHTS GRANT: under this License. */ +#pragma once + #ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved." #ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it." -#ifndef QLZ_HEADER -#define QLZ_HEADER // Fast data compression library // Copyright (C) 2006-2011 Lasse Mikkel Reinhold @@ -228,6 +228,3 @@ int qlz_get_setting(int setting); #if defined (__cplusplus) } #endif - -#endif - diff --git a/storage/tokudb/ft-index/ft/rbuf.h b/storage/tokudb/ft-index/ft/serialize/rbuf.h index a21123bfb73..c72ea6b79db 100644 --- a/storage/tokudb/ft-index/ft/rbuf.h +++ b/storage/tokudb/ft-index/ft/serialize/rbuf.h @@ -1,7 +1,5 @@ /* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */ // vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4: -#ifndef RBUF_H -#define RBUF_H #ident "$Id$" /* @@ -32,7 +30,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -89,16 +87,18 @@ PATENT RIGHTS GRANT: under this License. */ +#pragma once + #ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved." #ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it." -#include <toku_portability.h> -#include "toku_assert.h" -#include "fttypes.h" -#include "memory.h" -#include <toku_htonl.h> +#include <string.h> -#include <util/memarena.h> +#include "portability/memory.h" +#include "portability/toku_assert.h" +#include "portability/toku_htonl.h" +#include "portability/toku_portability.h" +#include "util/memarena.h" struct rbuf { unsigned char *buf; @@ -122,11 +122,11 @@ static inline unsigned char rbuf_char (struct rbuf *r) { return r->buf[r->ndone++]; } -static inline void rbuf_ma_uint8_t (struct rbuf *r, MEMARENA ma __attribute__((__unused__)), uint8_t *num) { +static inline void rbuf_ma_uint8_t (struct rbuf *r, memarena *ma __attribute__((__unused__)), uint8_t *num) { *num = rbuf_char(r); } -static inline void rbuf_ma_bool (struct rbuf *r, MEMARENA ma __attribute__((__unused__)), bool *b) { +static inline void rbuf_ma_bool (struct rbuf *r, memarena *ma __attribute__((__unused__)), bool *b) { uint8_t n = rbuf_char(r); *b = (n!=0); } @@ -158,14 +158,14 @@ static unsigned int rbuf_int (struct rbuf *r) { #endif } -static inline void rbuf_literal_bytes (struct rbuf *r, bytevec *bytes, unsigned int n_bytes) { +static inline void rbuf_literal_bytes (struct rbuf *r, const void **bytes, unsigned int n_bytes) { *bytes = &r->buf[r->ndone]; r->ndone+=n_bytes; assert(r->ndone<=r->size); } /* Return a pointer into the middle of the buffer. */ -static inline void rbuf_bytes (struct rbuf *r, bytevec *bytes, unsigned int *n_bytes) +static inline void rbuf_bytes (struct rbuf *r, const void **bytes, unsigned int *n_bytes) { *n_bytes = rbuf_int(r); rbuf_literal_bytes(r, bytes, *n_bytes); @@ -181,82 +181,14 @@ static inline signed long long rbuf_longlong (struct rbuf *r) { return (signed long long)rbuf_ulonglong(r); } -static inline DISKOFF rbuf_diskoff (struct rbuf *r) { - return rbuf_ulonglong(r); -} - -static inline LSN rbuf_lsn (struct rbuf *r) { - LSN lsn = {rbuf_ulonglong(r)}; - return lsn; -} - -static inline MSN rbuf_msn (struct rbuf *r) { - MSN msn = {rbuf_ulonglong(r)}; - return msn; -} - -static inline BLOCKNUM rbuf_blocknum (struct rbuf *r) { - BLOCKNUM result = make_blocknum(rbuf_longlong(r)); - return result; -} -static inline void rbuf_ma_BLOCKNUM (struct rbuf *r, MEMARENA ma __attribute__((__unused__)), BLOCKNUM *blocknum) { - *blocknum = rbuf_blocknum(r); -} - -static inline void rbuf_ma_uint32_t (struct rbuf *r, MEMARENA ma __attribute__((__unused__)), uint32_t *num) { +static inline void rbuf_ma_uint32_t (struct rbuf *r, memarena *ma __attribute__((__unused__)), uint32_t *num) { *num = rbuf_int(r); } -static inline void rbuf_ma_uint64_t (struct rbuf *r, MEMARENA ma __attribute__((__unused__)), uint64_t *num) { +static inline void rbuf_ma_uint64_t (struct rbuf *r, memarena *ma __attribute__((__unused__)), uint64_t *num) { *num = rbuf_ulonglong(r); } - -static inline void rbuf_TXNID (struct rbuf *r, TXNID *txnid) { - *txnid = rbuf_ulonglong(r); -} - -static inline void rbuf_TXNID_PAIR (struct rbuf *r, TXNID_PAIR *txnid) { - txnid->parent_id64 = rbuf_ulonglong(r); - txnid->child_id64 = rbuf_ulonglong(r); -} - -static inline void rbuf_ma_TXNID (struct rbuf *r, MEMARENA ma __attribute__((__unused__)), TXNID *txnid) { - rbuf_TXNID(r, txnid); -} - -static inline void rbuf_ma_TXNID_PAIR (struct rbuf *r, MEMARENA ma __attribute__((__unused__)), TXNID_PAIR *txnid) { - rbuf_TXNID_PAIR(r, txnid); -} - -static inline void rbuf_FILENUM (struct rbuf *r, FILENUM *filenum) { - filenum->fileid = rbuf_int(r); -} -static inline void rbuf_ma_FILENUM (struct rbuf *r, MEMARENA ma __attribute__((__unused__)), FILENUM *filenum) { - rbuf_FILENUM(r, filenum); -} - -// 2954 -// Don't try to use the same space, malloc it -static inline void rbuf_FILENUMS(struct rbuf *r, FILENUMS *filenums) { - filenums->num = rbuf_int(r); - filenums->filenums = (FILENUM *) toku_malloc( filenums->num * sizeof(FILENUM) ); - assert(filenums->filenums != NULL); - for (uint32_t i=0; i < filenums->num; i++) { - rbuf_FILENUM(r, &(filenums->filenums[i])); - } -} - -// 2954 -static inline void rbuf_ma_FILENUMS (struct rbuf *r, MEMARENA ma __attribute__((__unused__)), FILENUMS *filenums) { - rbuf_ma_uint32_t(r, ma, &(filenums->num)); - filenums->filenums = (FILENUM *) toku_memarena_malloc(ma, filenums->num * sizeof(FILENUM) ); - assert(filenums->filenums != NULL); - for (uint32_t i=0; i < filenums->num; i++) { - rbuf_ma_FILENUM(r, ma, &(filenums->filenums[i])); - } -} - // Don't try to use the same space, malloc it static inline void rbuf_BYTESTRING (struct rbuf *r, BYTESTRING *bs) { bs->len = rbuf_int(r); @@ -267,14 +199,12 @@ static inline void rbuf_BYTESTRING (struct rbuf *r, BYTESTRING *bs) { r->ndone = newndone; } -static inline void rbuf_ma_BYTESTRING (struct rbuf *r, MEMARENA ma, BYTESTRING *bs) { +static inline void rbuf_ma_BYTESTRING (struct rbuf *r, memarena *ma, BYTESTRING *bs) { bs->len = rbuf_int(r); uint32_t newndone = r->ndone + bs->len; assert(newndone <= r->size); - bs->data = (char *) toku_memarena_memdup(ma, &r->buf[r->ndone], (size_t)bs->len); + bs->data = (char *) ma->malloc_from_arena(bs->len); assert(bs->data); + memcpy(bs->data, &r->buf[r->ndone], bs->len); r->ndone = newndone; } - - -#endif diff --git a/storage/tokudb/ft-index/ft/sub_block.cc b/storage/tokudb/ft-index/ft/serialize/sub_block.cc index 5d8799fb2db..1346c76b103 100644 --- a/storage/tokudb/ft-index/ft/sub_block.cc +++ b/storage/tokudb/ft-index/ft/serialize/sub_block.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -89,21 +89,21 @@ PATENT RIGHTS GRANT: #ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved." #ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it." -#include "compress.h" -#include "sub_block.h" -#include "quicklz.h" - -#include <memory.h> -#include <toku_assert.h> -#include <toku_portability.h> -#include <util/threadpool.h> -#include <util/x1764.h> - +#include <errno.h> #include <stdio.h> #include <string.h> -#include <errno.h> #include <zlib.h> +#include "portability/memory.h" +#include "portability/toku_assert.h" +#include "portability/toku_portability.h" + +#include "ft/serialize/compress.h" +#include "ft/serialize/sub_block.h" +#include "ft/serialize/quicklz.h" +#include "util/threadpool.h" +#include "util/x1764.h" + SUB_BLOCK sub_block_creat(void) { SUB_BLOCK XMALLOC(sb); sub_block_init(sb); diff --git a/storage/tokudb/ft-index/ft/sub_block.h b/storage/tokudb/ft-index/ft/serialize/sub_block.h index 23fad83c966..1a371c2dcd3 100644 --- a/storage/tokudb/ft-index/ft/sub_block.h +++ b/storage/tokudb/ft-index/ft/serialize/sub_block.h @@ -1,7 +1,5 @@ /* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */ // vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4: -#ifndef TOKU_SUB_BLOCK_H -#define TOKU_SUB_BLOCK_H #ident "$Id$" /* @@ -32,7 +30,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -89,18 +87,19 @@ PATENT RIGHTS GRANT: under this License. */ +#pragma once + #ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved." #ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it." -#include "compress.h" -#include "fttypes.h" - +#include "ft/serialize/compress.h" +// TODO: Clean this abstraciton up static const int max_sub_blocks = 8; -static const int target_sub_block_size = 512*1024; +static const int target_sub_block_size = 512 * 1024; static const int max_basement_nodes = 32; -static const int max_basement_node_uncompressed_size = 256*1024; -static const int max_basement_node_compressed_size = 64*1024; +static const int max_basement_node_uncompressed_size = 256 * 1024; +static const int max_basement_node_compressed_size = 64 * 1024; struct sub_block { void *uncompressed_ptr; @@ -112,6 +111,7 @@ struct sub_block { uint32_t xsum; // sub block checksum }; +typedef struct sub_block *SUB_BLOCK; struct stored_sub_block { uint32_t uncompressed_size; @@ -212,6 +212,3 @@ int decompress_all_sub_blocks(int n_sub_blocks, struct sub_block sub_block[], unsigned char *compressed_data, unsigned char *uncompressed_data, int num_cores, struct toku_thread_pool *pool); extern int verbose_decompress_sub_block; - - -#endif diff --git a/storage/tokudb/ft-index/ft/wbuf.h b/storage/tokudb/ft-index/ft/serialize/wbuf.h index 93cfe0c7185..8c71fb16b20 100644 --- a/storage/tokudb/ft-index/ft/wbuf.h +++ b/storage/tokudb/ft-index/ft/serialize/wbuf.h @@ -1,7 +1,5 @@ /* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */ // vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4: -#ifndef WBUF_H -#define WBUF_H #ident "$Id$" /* COPYING CONDITIONS NOTICE: @@ -31,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -88,18 +86,18 @@ PATENT RIGHTS GRANT: under this License. */ +#pragma once + #ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved." #ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it." #include <memory.h> #include <string.h> -#include <portability/toku_htonl.h> -#include <util/x1764.h> - -#include "fttypes.h" +#include "portability/toku_htonl.h" -#define CRC_INCR +#include "util/bytestring.h" +#include "util/x1764.h" /* When serializing a value, write it into a buffer. */ /* This code requires that the buffer be big enough to hold whatever you put into it. */ @@ -113,13 +111,13 @@ struct wbuf { struct x1764 checksum; // The checksum state }; -static inline void wbuf_nocrc_init (struct wbuf *w, void *buf, DISKOFF size) { +static inline void wbuf_nocrc_init (struct wbuf *w, void *buf, unsigned int size) { w->buf = (unsigned char *) buf; w->size = size; w->ndone = 0; } -static inline void wbuf_init (struct wbuf *w, void *buf, DISKOFF size) { +static inline void wbuf_init (struct wbuf *w, void *buf, unsigned int size) { wbuf_nocrc_init(w, buf, size); toku_x1764_init(&w->checksum); } @@ -194,7 +192,7 @@ static inline uint8_t* wbuf_nocrc_reserve_literal_bytes(struct wbuf *w, uint32_t return dest; } -static inline void wbuf_nocrc_literal_bytes(struct wbuf *w, bytevec bytes_bv, uint32_t nbytes) { +static inline void wbuf_nocrc_literal_bytes(struct wbuf *w, const void *bytes_bv, uint32_t nbytes) { const unsigned char *bytes = (const unsigned char *) bytes_bv; #if 0 { int i; for (i=0; i<nbytes; i++) wbuf_nocrc_char(w, bytes[i]); } @@ -205,17 +203,17 @@ static inline void wbuf_nocrc_literal_bytes(struct wbuf *w, bytevec bytes_bv, ui #endif } -static inline void wbuf_literal_bytes(struct wbuf *w, bytevec bytes_bv, uint32_t nbytes) { +static inline void wbuf_literal_bytes(struct wbuf *w, const void *bytes_bv, uint32_t nbytes) { wbuf_nocrc_literal_bytes(w, bytes_bv, nbytes); toku_x1764_add(&w->checksum, &w->buf[w->ndone-nbytes], nbytes); } -static void wbuf_nocrc_bytes (struct wbuf *w, bytevec bytes_bv, uint32_t nbytes) { +static void wbuf_nocrc_bytes (struct wbuf *w, const void *bytes_bv, uint32_t nbytes) { wbuf_nocrc_uint(w, nbytes); wbuf_nocrc_literal_bytes(w, bytes_bv, nbytes); } -static void wbuf_bytes (struct wbuf *w, bytevec bytes_bv, uint32_t nbytes) { +static void wbuf_bytes (struct wbuf *w, const void *bytes_bv, uint32_t nbytes) { wbuf_uint(w, nbytes); wbuf_literal_bytes(w, bytes_bv, nbytes); } @@ -262,76 +260,3 @@ static inline void wbuf_nocrc_uint32_t (struct wbuf *w, uint32_t v) { static inline void wbuf_uint32_t (struct wbuf *w, uint32_t v) { wbuf_uint(w, v); } - -static inline void wbuf_DISKOFF (struct wbuf *w, DISKOFF off) { - wbuf_ulonglong(w, (uint64_t)off); -} - -static inline void wbuf_BLOCKNUM (struct wbuf *w, BLOCKNUM b) { - wbuf_ulonglong(w, b.b); -} -static inline void wbuf_nocrc_BLOCKNUM (struct wbuf *w, BLOCKNUM b) { - wbuf_nocrc_ulonglong(w, b.b); -} - -static inline void wbuf_nocrc_TXNID (struct wbuf *w, TXNID tid) { - wbuf_nocrc_ulonglong(w, tid); -} - -static inline void wbuf_nocrc_TXNID_PAIR (struct wbuf *w, TXNID_PAIR tid) { - wbuf_nocrc_ulonglong(w, tid.parent_id64); - wbuf_nocrc_ulonglong(w, tid.child_id64); -} - - -static inline void wbuf_TXNID (struct wbuf *w, TXNID tid) { - wbuf_ulonglong(w, tid); -} - -static inline void wbuf_nocrc_XIDP (struct wbuf *w, XIDP xid) { - wbuf_nocrc_uint32_t(w, xid->formatID); - wbuf_nocrc_uint8_t(w, xid->gtrid_length); - wbuf_nocrc_uint8_t(w, xid->bqual_length); - wbuf_nocrc_literal_bytes(w, xid->data, xid->gtrid_length+xid->bqual_length); -} - -static inline void wbuf_nocrc_LSN (struct wbuf *w, LSN lsn) { - wbuf_nocrc_ulonglong(w, lsn.lsn); -} - -static inline void wbuf_LSN (struct wbuf *w, LSN lsn) { - wbuf_ulonglong(w, lsn.lsn); -} - -static inline void wbuf_MSN (struct wbuf *w, MSN msn) { - wbuf_ulonglong(w, msn.msn); -} - -static inline void wbuf_nocrc_FILENUM (struct wbuf *w, FILENUM fileid) { - wbuf_nocrc_uint(w, fileid.fileid); -} - -static inline void wbuf_FILENUM (struct wbuf *w, FILENUM fileid) { - wbuf_uint(w, fileid.fileid); -} - -// 2954 -static inline void wbuf_nocrc_FILENUMS (struct wbuf *w, FILENUMS v) { - wbuf_nocrc_uint(w, v.num); - uint32_t i; - for (i = 0; i < v.num; i++) { - wbuf_nocrc_FILENUM(w, v.filenums[i]); - } -} - -// 2954 -static inline void wbuf_FILENUMS (struct wbuf *w, FILENUMS v) { - wbuf_uint(w, v.num); - uint32_t i; - for (i = 0; i < v.num; i++) { - wbuf_FILENUM(w, v.filenums[i]); - } -} - - -#endif diff --git a/storage/tokudb/ft-index/ft/workset.h b/storage/tokudb/ft-index/ft/serialize/workset.h index 27dd9778006..4efa042b9c3 100644 --- a/storage/tokudb/ft-index/ft/workset.h +++ b/storage/tokudb/ft-index/ft/serialize/workset.h @@ -1,7 +1,5 @@ /* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */ // vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4: -#ifndef _TOKU_WORKSET_H -#define _TOKU_WORKSET_H #ident "$Id$" /* @@ -32,7 +30,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -89,6 +87,8 @@ PATENT RIGHTS GRANT: under this License. */ +#pragma once + #ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved." #ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it." @@ -187,5 +187,3 @@ workset_join(struct workset *ws) { } workset_unlock(ws); } - -#endif diff --git a/storage/tokudb/ft-index/ft/sub_block_map.h b/storage/tokudb/ft-index/ft/sub_block_map.h deleted file mode 100644 index 3c1d71078d8..00000000000 --- a/storage/tokudb/ft-index/ft/sub_block_map.h +++ /dev/null @@ -1,127 +0,0 @@ -/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */ -// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4: -#ifndef _TOKU_SUB_BLOCK_MAP_H -#define _TOKU_SUB_BLOCK_MAP_H - -#ident "$Id$" -/* -COPYING CONDITIONS NOTICE: - - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation, and provided that the - following conditions are met: - - * Redistributions of source code must retain this COPYING - CONDITIONS NOTICE, the COPYRIGHT NOTICE (below), the - DISCLAIMER (below), the UNIVERSITY PATENT NOTICE (below), the - PATENT MARKING NOTICE (below), and the PATENT RIGHTS - GRANT (below). - - * Redistributions in binary form must reproduce this COPYING - CONDITIONS NOTICE, the COPYRIGHT NOTICE (below), the - DISCLAIMER (below), the UNIVERSITY PATENT NOTICE (below), the - PATENT MARKING NOTICE (below), and the PATENT RIGHTS - GRANT (below) in the documentation and/or other materials - provided with the distribution. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA - 02110-1301, USA. - -COPYRIGHT NOTICE: - - TokuDB, Tokutek Fractal Tree Indexing Library. - Copyright (C) 2007-2013 Tokutek, Inc. - -DISCLAIMER: - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - -UNIVERSITY PATENT NOTICE: - - The technology is licensed by the Massachusetts Institute of - Technology, Rutgers State University of New Jersey, and the Research - Foundation of State University of New York at Stony Brook under - United States of America Serial No. 11/760379 and to the patents - and/or patent applications resulting from it. - -PATENT MARKING NOTICE: - - This software is covered by US Patent No. 8,185,551. - This software is covered by US Patent No. 8,489,638. - -PATENT RIGHTS GRANT: - - "THIS IMPLEMENTATION" means the copyrightable works distributed by - Tokutek as part of the Fractal Tree project. - - "PATENT CLAIMS" means the claims of patents that are owned or - licensable by Tokutek, both currently or in the future; and that in - the absence of this license would be infringed by THIS - IMPLEMENTATION or by using or running THIS IMPLEMENTATION. - - "PATENT CHALLENGE" shall mean a challenge to the validity, - patentability, enforceability and/or non-infringement of any of the - PATENT CLAIMS or otherwise opposing any of the PATENT CLAIMS. - - Tokutek hereby grants to you, for the term and geographical scope of - the PATENT CLAIMS, a non-exclusive, no-charge, royalty-free, - irrevocable (except as stated in this section) patent license to - make, have made, use, offer to sell, sell, import, transfer, and - otherwise run, modify, and propagate the contents of THIS - IMPLEMENTATION, where such license applies only to the PATENT - CLAIMS. This grant does not include claims that would be infringed - only as a consequence of further modifications of THIS - IMPLEMENTATION. If you or your agent or licensee institute or order - or agree to the institution of patent litigation against any entity - (including a cross-claim or counterclaim in a lawsuit) alleging that - THIS IMPLEMENTATION constitutes direct or contributory patent - infringement, or inducement of patent infringement, then any rights - granted to you under this License shall terminate as of the date - such litigation is filed. If you or your agent or exclusive - licensee institute or order or agree to the institution of a PATENT - CHALLENGE, then Tokutek may terminate any rights granted to you - under this License. -*/ - -#ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved." -#ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it." - -// Map objects to a sequence of sub block -struct sub_block_map { - uint32_t idx; - uint32_t offset; - uint32_t size; -}; - -enum { - stored_sub_block_map_size = sizeof (struct sub_block_map), // size of a sub-block map on disk -}; - -static inline void -sub_block_map_init(struct sub_block_map *sbmap, uint32_t idx, uint32_t offset, uint32_t size) { - sbmap->idx = idx; - sbmap->offset = offset; - sbmap->size = size; -} - -static inline void -sub_block_map_serialize(struct sub_block_map *sbmap, struct wbuf *wbuf) { - wbuf_nocrc_int(wbuf, sbmap->idx); - wbuf_nocrc_int(wbuf, sbmap->offset); - wbuf_nocrc_int(wbuf, sbmap->size); -} - -static inline void -sub_block_map_deserialize(struct sub_block_map *sbmap, struct rbuf *rbuf) { - sbmap->idx = rbuf_int(rbuf); - sbmap->offset = rbuf_int(rbuf); - sbmap->size = rbuf_int(rbuf); -} - -#endif diff --git a/storage/tokudb/ft-index/ft/tests/CMakeLists.txt b/storage/tokudb/ft-index/ft/tests/CMakeLists.txt index 209155d692d..a363b70c5dd 100644 --- a/storage/tokudb/ft-index/ft/tests/CMakeLists.txt +++ b/storage/tokudb/ft-index/ft/tests/CMakeLists.txt @@ -101,6 +101,17 @@ if(BUILD_TESTING OR BUILD_FT_TESTS) set_property(TEST ft/upgrade_test_simple APPEND PROPERTY ENVIRONMENT "TOKUDB_DATA=${TOKUDB_DATA}") + # should be a file GLOB and a loop + declare_custom_tests(test-upgrade-recovery-logs) + add_ft_test_aux(test-upgrade-recovery-logs-24-clean test-upgrade-recovery-logs ${TOKUDB_DATA}/upgrade-recovery-logs-24-clean) + add_ft_test_aux(test-upgrade-recovery-logs-24-dirty test-upgrade-recovery-logs ${TOKUDB_DATA}/upgrade-recovery-logs-24-dirty) + add_ft_test_aux(test-upgrade-recovery-logs-25-clean test-upgrade-recovery-logs ${TOKUDB_DATA}/upgrade-recovery-logs-25-clean) + add_ft_test_aux(test-upgrade-recovery-logs-25-dirty test-upgrade-recovery-logs ${TOKUDB_DATA}/upgrade-recovery-logs-25-dirty) + add_ft_test_aux(test-upgrade-recovery-logs-26-clean test-upgrade-recovery-logs ${TOKUDB_DATA}/upgrade-recovery-logs-26-clean) + add_ft_test_aux(test-upgrade-recovery-logs-26-dirty test-upgrade-recovery-logs ${TOKUDB_DATA}/upgrade-recovery-logs-26-dirty) + add_ft_test_aux(test-upgrade-recovery-logs-27-clean test-upgrade-recovery-logs ${TOKUDB_DATA}/upgrade-recovery-logs-27-clean) + add_ft_test_aux(test-upgrade-recovery-logs-27-dirty test-upgrade-recovery-logs ${TOKUDB_DATA}/upgrade-recovery-logs-27-dirty) + ## give some tests, that time out normally, 1 hour to complete set(long_tests ft/ftloader-test-extractor-3a diff --git a/storage/tokudb/ft-index/ft/tests/benchmark-test.cc b/storage/tokudb/ft-index/ft/tests/benchmark-test.cc index 0f7a0d4f84b..0acb97daa87 100644 --- a/storage/tokudb/ft-index/ft/tests/benchmark-test.cc +++ b/storage/tokudb/ft-index/ft/tests/benchmark-test.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -119,8 +119,8 @@ static FT_HANDLE t; static void setup (void) { int r; unlink(fname); - toku_cachetable_create(&ct, 0, ZERO_LSN, NULL_LOGGER); - r = toku_open_ft_handle(fname, 1, &t, nodesize, basementnodesize, compression_method, ct, NULL_TXN, toku_builtin_compare_fun); assert(r==0); + toku_cachetable_create(&ct, 0, ZERO_LSN, nullptr); + r = toku_open_ft_handle(fname, 1, &t, nodesize, basementnodesize, compression_method, ct, nullptr, toku_builtin_compare_fun); assert(r==0); } static void toku_shutdown (void) { diff --git a/storage/tokudb/ft-index/ft/tests/block_allocator_strategy_test.cc b/storage/tokudb/ft-index/ft/tests/block_allocator_strategy_test.cc new file mode 100644 index 00000000000..6879002a025 --- /dev/null +++ b/storage/tokudb/ft-index/ft/tests/block_allocator_strategy_test.cc @@ -0,0 +1,176 @@ +/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */ +// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4: + +/* +COPYING CONDITIONS NOTICE: + + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation, and provided that the + following conditions are met: + + * Redistributions of source code must retain this COPYING + CONDITIONS NOTICE, the COPYRIGHT NOTICE (below), the + DISCLAIMER (below), the UNIVERSITY PATENT NOTICE (below), the + PATENT MARKING NOTICE (below), and the PATENT RIGHTS + GRANT (below). + + * Redistributions in binary form must reproduce this COPYING + CONDITIONS NOTICE, the COPYRIGHT NOTICE (below), the + DISCLAIMER (below), the UNIVERSITY PATENT NOTICE (below), the + PATENT MARKING NOTICE (below), and the PATENT RIGHTS + GRANT (below) in the documentation and/or other materials + provided with the distribution. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + 02110-1301, USA. + +COPYRIGHT NOTICE: + + TokuFT, Tokutek Fractal Tree Indexing Library. + Copyright (C) 2007-2013 Tokutek, Inc. + +DISCLAIMER: + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + +UNIVERSITY PATENT NOTICE: + + The technology is licensed by the Massachusetts Institute of + Technology, Rutgers State University of New Jersey, and the Research + Foundation of State University of New York at Stony Brook under + United States of America Serial No. 11/760379 and to the patents + and/or patent applications resulting from it. + +PATENT MARKING NOTICE: + + This software is covered by US Patent No. 8,185,551. + This software is covered by US Patent No. 8,489,638. + +PATENT RIGHTS GRANT: + + "THIS IMPLEMENTATION" means the copyrightable works distributed by + Tokutek as part of the Fractal Tree project. + + "PATENT CLAIMS" means the claims of patents that are owned or + licensable by Tokutek, both currently or in the future; and that in + the absence of this license would be infringed by THIS + IMPLEMENTATION or by using or running THIS IMPLEMENTATION. + + "PATENT CHALLENGE" shall mean a challenge to the validity, + patentability, enforceability and/or non-infringement of any of the + PATENT CLAIMS or otherwise opposing any of the PATENT CLAIMS. + + Tokutek hereby grants to you, for the term and geographical scope of + the PATENT CLAIMS, a non-exclusive, no-charge, royalty-free, + irrevocable (except as stated in this section) patent license to + make, have made, use, offer to sell, sell, import, transfer, and + otherwise run, modify, and propagate the contents of THIS + IMPLEMENTATION, where such license applies only to the PATENT + CLAIMS. This grant does not include claims that would be infringed + only as a consequence of further modifications of THIS + IMPLEMENTATION. If you or your agent or licensee institute or order + or agree to the institution of patent litigation against any entity + (including a cross-claim or counterclaim in a lawsuit) alleging that + THIS IMPLEMENTATION constitutes direct or contributory patent + infringement, or inducement of patent infringement, then any rights + granted to you under this License shall terminate as of the date + such litigation is filed. If you or your agent or exclusive + licensee institute or order or agree to the institution of a PATENT + CHALLENGE, then Tokutek may terminate any rights granted to you + under this License. +*/ + +#include "ft/tests/test.h" + +#include "ft/serialize/block_allocator_strategy.h" + +static const uint64_t alignment = 4096; + +static void test_first_vs_best_fit(void) { + struct block_allocator::blockpair pairs[] = { + block_allocator::blockpair(1 * alignment, 6 * alignment), + // hole between 7x align -> 8x align + block_allocator::blockpair(8 * alignment, 4 * alignment), + // hole between 12x align -> 16x align + block_allocator::blockpair(16 * alignment, 1 * alignment), + block_allocator::blockpair(17 * alignment, 2 * alignment), + // hole between 19 align -> 21x align + block_allocator::blockpair(21 * alignment, 2 * alignment), + }; + const uint64_t n_blocks = sizeof(pairs) / sizeof(pairs[0]); + + block_allocator::blockpair *bp; + + // first fit + bp = block_allocator_strategy::first_fit(pairs, n_blocks, 100, alignment); + assert(bp == &pairs[0]); + bp = block_allocator_strategy::first_fit(pairs, n_blocks, 4096, alignment); + assert(bp == &pairs[0]); + bp = block_allocator_strategy::first_fit(pairs, n_blocks, 3 * 4096, alignment); + assert(bp == &pairs[1]); + bp = block_allocator_strategy::first_fit(pairs, n_blocks, 5 * 4096, alignment); + assert(bp == nullptr); + + // best fit + bp = block_allocator_strategy::best_fit(pairs, n_blocks, 100, alignment); + assert(bp == &pairs[0]); + bp = block_allocator_strategy::best_fit(pairs, n_blocks, 4100, alignment); + assert(bp == &pairs[3]); + bp = block_allocator_strategy::best_fit(pairs, n_blocks, 3 * 4096, alignment); + assert(bp == &pairs[1]); + bp = block_allocator_strategy::best_fit(pairs, n_blocks, 5 * 4096, alignment); + assert(bp == nullptr); +} + +static void test_padded_fit(void) { + struct block_allocator::blockpair pairs[] = { + block_allocator::blockpair(1 * alignment, 1 * alignment), + // 4096 byte hole after bp[0] + block_allocator::blockpair(3 * alignment, 1 * alignment), + // 8192 byte hole after bp[1] + block_allocator::blockpair(6 * alignment, 1 * alignment), + // 16384 byte hole after bp[2] + block_allocator::blockpair(11 * alignment, 1 * alignment), + // 32768 byte hole after bp[3] + block_allocator::blockpair(17 * alignment, 1 * alignment), + // 116kb hole after bp[4] + block_allocator::blockpair(113 * alignment, 1 * alignment), + // 256kb hole after bp[5] + block_allocator::blockpair(371 * alignment, 1 * alignment), + }; + const uint64_t n_blocks = sizeof(pairs) / sizeof(pairs[0]); + + block_allocator::blockpair *bp; + + // padding for a 100 byte allocation will be < than standard alignment, + // so it should fit in the first 4096 byte hole. + bp = block_allocator_strategy::padded_fit(pairs, n_blocks, 4000, alignment); + assert(bp == &pairs[0]); + + // Even padded, a 12kb alloc will fit in a 16kb hole + bp = block_allocator_strategy::padded_fit(pairs, n_blocks, 3 * alignment, alignment); + assert(bp == &pairs[2]); + + // would normally fit in the 116kb hole but the padding will bring it over + bp = block_allocator_strategy::padded_fit(pairs, n_blocks, 116 * alignment, alignment); + assert(bp == &pairs[5]); + + bp = block_allocator_strategy::padded_fit(pairs, n_blocks, 127 * alignment, alignment); + assert(bp == &pairs[5]); +} + +int test_main(int argc, const char *argv[]) { + (void) argc; + (void) argv; + + test_first_vs_best_fit(); + test_padded_fit(); + + return 0; +} diff --git a/storage/tokudb/ft-index/ft/tests/block_allocator_test.cc b/storage/tokudb/ft-index/ft/tests/block_allocator_test.cc index ef6f1fcdc97..bbd170ebaab 100644 --- a/storage/tokudb/ft-index/ft/tests/block_allocator_test.cc +++ b/storage/tokudb/ft-index/ft/tests/block_allocator_test.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -88,75 +88,48 @@ PATENT RIGHTS GRANT: #ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved." - #include "test.h" -static void ba_alloc_at (BLOCK_ALLOCATOR ba, uint64_t size, uint64_t offset) { - block_allocator_validate(ba); - block_allocator_alloc_block_at(ba, size*512, offset*512); - block_allocator_validate(ba); -} - -static void ba_alloc (BLOCK_ALLOCATOR ba, uint64_t size, uint64_t *answer) { - block_allocator_validate(ba); +static void ba_alloc(block_allocator *ba, uint64_t size, uint64_t *answer) { + ba->validate(); uint64_t actual_answer; - block_allocator_alloc_block(ba, 512*size, &actual_answer); - block_allocator_validate(ba); + const uint64_t heat = random() % 2; + ba->alloc_block(512 * size, heat, &actual_answer); + ba->validate(); + assert(actual_answer%512==0); *answer = actual_answer/512; } -static void ba_free (BLOCK_ALLOCATOR ba, uint64_t offset) { - block_allocator_validate(ba); - block_allocator_free_block(ba, offset*512); - block_allocator_validate(ba); +static void ba_free(block_allocator *ba, uint64_t offset) { + ba->validate(); + ba->free_block(offset * 512); + ba->validate(); } -static void -ba_check_l (BLOCK_ALLOCATOR ba, uint64_t blocknum_in_layout_order, uint64_t expected_offset, uint64_t expected_size) -{ +static void ba_check_l(block_allocator *ba, uint64_t blocknum_in_layout_order, + uint64_t expected_offset, uint64_t expected_size) { uint64_t actual_offset, actual_size; - int r = block_allocator_get_nth_block_in_layout_order(ba, blocknum_in_layout_order, &actual_offset, &actual_size); + int r = ba->get_nth_block_in_layout_order(blocknum_in_layout_order, &actual_offset, &actual_size); assert(r==0); assert(expected_offset*512 == actual_offset); assert(expected_size *512 == actual_size); } -static void -ba_check_none (BLOCK_ALLOCATOR ba, uint64_t blocknum_in_layout_order) -{ +static void ba_check_none(block_allocator *ba, uint64_t blocknum_in_layout_order) { uint64_t actual_offset, actual_size; - int r = block_allocator_get_nth_block_in_layout_order(ba, blocknum_in_layout_order, &actual_offset, &actual_size); + int r = ba->get_nth_block_in_layout_order(blocknum_in_layout_order, &actual_offset, &actual_size); assert(r==-1); } // Simple block allocator test -static void -test_ba0 (void) { - BLOCK_ALLOCATOR ba; - uint64_t b0, b1; - create_block_allocator(&ba, 100*512, 1*512); - assert(block_allocator_allocated_limit(ba)==100*512); - ba_alloc_at(ba, 50, 100); - assert(block_allocator_allocated_limit(ba)==150*512); - ba_alloc_at(ba, 25, 150); - ba_alloc (ba, 10, &b0); - ba_check_l (ba, 0, 0, 100); - ba_check_l (ba, 1, 100, 50); - ba_check_l (ba, 2, 150, 25); - ba_check_l (ba, 3, b0, 10); - ba_check_none (ba, 4); - assert(b0==175); - ba_free(ba, 150); - ba_alloc_at(ba, 10, 150); - ba_alloc(ba, 10, &b0); - assert(b0==160); - ba_alloc(ba, 10, &b0); - ba_alloc(ba, 113, &b1); - assert(113*512==block_allocator_block_size(ba, b1 *512)); - assert(10 *512==block_allocator_block_size(ba, b0 *512)); - assert(50 *512==block_allocator_block_size(ba, 100*512)); +static void test_ba0(block_allocator::allocation_strategy strategy) { + block_allocator allocator; + block_allocator *ba = &allocator; + ba->create(100*512, 1*512); + ba->set_strategy(strategy); + assert(ba->allocated_limit()==100*512); uint64_t b2, b3, b4, b5, b6, b7; ba_alloc(ba, 100, &b2); @@ -183,27 +156,28 @@ test_ba0 (void) { ba_free(ba, b4); ba_alloc(ba, 100, &b4); - destroy_block_allocator(&ba); - assert(ba==0); + ba->destroy(); } // Manually to get coverage of all the code in the block allocator. static void -test_ba1 (int n_initial) { - BLOCK_ALLOCATOR ba; - create_block_allocator(&ba, 0*512, 1*512); - int i; +test_ba1(block_allocator::allocation_strategy strategy, int n_initial) { + block_allocator allocator; + block_allocator *ba = &allocator; + ba->create(0*512, 1*512); + ba->set_strategy(strategy); + int n_blocks=0; uint64_t blocks[1000]; - for (i=0; i<1000; i++) { - if (i<n_initial || random()%2 == 0) { - if (n_blocks<1000) { + for (int i = 0; i < 1000; i++) { + if (i < n_initial || random() % 2 == 0) { + if (n_blocks < 1000) { ba_alloc(ba, 1, &blocks[n_blocks]); //printf("A[%d]=%ld\n", n_blocks, blocks[n_blocks]); n_blocks++; } } else { - if (n_blocks>0) { + if (n_blocks > 0) { int blocknum = random()%n_blocks; //printf("F[%d]%ld\n", blocknum, blocks[blocknum]); ba_free(ba, blocks[blocknum]); @@ -213,19 +187,21 @@ test_ba1 (int n_initial) { } } - destroy_block_allocator(&ba); - assert(ba==0); + ba->destroy(); } // Check to see if it is first fit or best fit. static void test_ba2 (void) { - BLOCK_ALLOCATOR ba; + block_allocator allocator; + block_allocator *ba = &allocator; uint64_t b[6]; enum { BSIZE = 1024 }; - create_block_allocator(&ba, 100*512, BSIZE*512); - assert(block_allocator_allocated_limit(ba)==100*512); + ba->create(100*512, BSIZE*512); + ba->set_strategy(block_allocator::BA_STRATEGY_FIRST_FIT); + assert(ba->allocated_limit()==100*512); + ba_check_l (ba, 0, 0, 100); ba_check_none (ba, 1); @@ -234,16 +210,16 @@ test_ba2 (void) ba_check_l (ba, 1, BSIZE, 100); ba_check_none (ba, 2); - ba_alloc (ba, BSIZE+100, &b[1]); + ba_alloc (ba, BSIZE + 100, &b[1]); ba_check_l (ba, 0, 0, 100); ba_check_l (ba, 1, BSIZE, 100); - ba_check_l (ba, 2, 2*BSIZE, BSIZE+100); + ba_check_l (ba, 2, 2*BSIZE, BSIZE + 100); ba_check_none (ba, 3); ba_alloc (ba, 100, &b[2]); ba_check_l (ba, 0, 0, 100); ba_check_l (ba, 1, BSIZE, 100); - ba_check_l (ba, 2, 2*BSIZE, BSIZE+100); + ba_check_l (ba, 2, 2*BSIZE, BSIZE + 100); ba_check_l (ba, 3, 4*BSIZE, 100); ba_check_none (ba, 4); @@ -252,7 +228,7 @@ test_ba2 (void) ba_alloc (ba, 100, &b[5]); ba_check_l (ba, 0, 0, 100); ba_check_l (ba, 1, BSIZE, 100); - ba_check_l (ba, 2, 2*BSIZE, BSIZE+100); + ba_check_l (ba, 2, 2*BSIZE, BSIZE + 100); ba_check_l (ba, 3, 4*BSIZE, 100); ba_check_l (ba, 4, 5*BSIZE, 100); ba_check_l (ba, 5, 6*BSIZE, 100); @@ -262,7 +238,7 @@ test_ba2 (void) ba_free (ba, 4*BSIZE); ba_check_l (ba, 0, 0, 100); ba_check_l (ba, 1, BSIZE, 100); - ba_check_l (ba, 2, 2*BSIZE, BSIZE+100); + ba_check_l (ba, 2, 2*BSIZE, BSIZE + 100); ba_check_l (ba, 3, 5*BSIZE, 100); ba_check_l (ba, 4, 6*BSIZE, 100); ba_check_l (ba, 5, 7*BSIZE, 100); @@ -273,7 +249,7 @@ test_ba2 (void) assert(b2==4*BSIZE); ba_check_l (ba, 0, 0, 100); ba_check_l (ba, 1, BSIZE, 100); - ba_check_l (ba, 2, 2*BSIZE, BSIZE+100); + ba_check_l (ba, 2, 2*BSIZE, BSIZE + 100); ba_check_l (ba, 3, 4*BSIZE, 100); ba_check_l (ba, 4, 5*BSIZE, 100); ba_check_l (ba, 5, 6*BSIZE, 100); @@ -283,7 +259,7 @@ test_ba2 (void) ba_free (ba, BSIZE); ba_free (ba, 5*BSIZE); ba_check_l (ba, 0, 0, 100); - ba_check_l (ba, 1, 2*BSIZE, BSIZE+100); + ba_check_l (ba, 1, 2*BSIZE, BSIZE + 100); ba_check_l (ba, 2, 4*BSIZE, 100); ba_check_l (ba, 3, 6*BSIZE, 100); ba_check_l (ba, 4, 7*BSIZE, 100); @@ -301,7 +277,7 @@ test_ba2 (void) assert(b5==5*BSIZE); ba_check_l (ba, 0, 0, 100); ba_check_l (ba, 1, BSIZE, 100); - ba_check_l (ba, 2, 2*BSIZE, BSIZE+100); + ba_check_l (ba, 2, 2*BSIZE, BSIZE + 100); ba_check_l (ba, 3, 4*BSIZE, 100); ba_check_l (ba, 4, 5*BSIZE, 100); ba_check_l (ba, 5, 6*BSIZE, 100); @@ -318,7 +294,7 @@ test_ba2 (void) assert(b8==10*BSIZE); ba_check_l (ba, 0, 0, 100); ba_check_l (ba, 1, BSIZE, 100); - ba_check_l (ba, 2, 2*BSIZE, BSIZE+100); + ba_check_l (ba, 2, 2*BSIZE, BSIZE + 100); ba_check_l (ba, 3, 4*BSIZE, 100); ba_check_l (ba, 4, 5*BSIZE, 100); ba_check_l (ba, 5, 6*BSIZE, 100); @@ -344,15 +320,23 @@ test_ba2 (void) ba_alloc(ba, 100, &b11); assert(b11==5*BSIZE); - destroy_block_allocator(&ba); + ba->destroy(); } int test_main (int argc __attribute__((__unused__)), const char *argv[] __attribute__((__unused__))) { - test_ba0(); - test_ba1(0); - test_ba1(10); - test_ba1(20); + enum block_allocator::allocation_strategy strategies[] = { + block_allocator::BA_STRATEGY_FIRST_FIT, + block_allocator::BA_STRATEGY_BEST_FIT, + block_allocator::BA_STRATEGY_PADDED_FIT, + block_allocator::BA_STRATEGY_HEAT_ZONE, + }; + for (size_t i = 0; i < sizeof(strategies) / sizeof(strategies[0]); i++) { + test_ba0(strategies[i]); + test_ba1(strategies[i], 0); + test_ba1(strategies[i], 10); + test_ba1(strategies[i], 20); + } test_ba2(); return 0; } diff --git a/storage/tokudb/ft-index/ft/tests/bnc-insert-benchmark.cc b/storage/tokudb/ft-index/ft/tests/bnc-insert-benchmark.cc index 253a216e675..bd9f28c858f 100644 --- a/storage/tokudb/ft-index/ft/tests/bnc-insert-benchmark.cc +++ b/storage/tokudb/ft-index/ft/tests/bnc-insert-benchmark.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -127,9 +127,9 @@ run_test(unsigned long eltsize, unsigned long nodesize, unsigned long repeat) *p = (rand() & 0xff); } } - XIDS xids_0 = xids_get_root_xids(); + XIDS xids_0 = toku_xids_get_root_xids(); XIDS xids_123; - int r = xids_create_child(xids_0, &xids_123, (TXNID)123); + int r = toku_xids_create_child(xids_0, &xids_123, (TXNID)123); CKERR(r); NONLEAF_CHILDINFO bnc; @@ -137,6 +137,9 @@ run_test(unsigned long eltsize, unsigned long nodesize, unsigned long repeat) struct timeval t[2]; gettimeofday(&t[0], NULL); + toku::comparator cmp; + cmp.create(long_key_cmp, nullptr); + for (unsigned int i = 0; i < repeat; ++i) { bnc = toku_create_empty_nl(); for (; toku_bnc_nbytesinbuf(bnc) <= nodesize; ++cur) { @@ -144,7 +147,7 @@ run_test(unsigned long eltsize, unsigned long nodesize, unsigned long repeat) &keys[cur % 1024], sizeof keys[cur % 1024], vals[cur % 1024], eltsize - (sizeof keys[cur % 1024]), FT_NONE, next_dummymsn(), xids_123, true, - NULL, long_key_cmp); assert_zero(r); + cmp); assert_zero(r); } nbytesinserted += toku_bnc_nbytesinbuf(bnc); destroy_nonleaf_childinfo(bnc); @@ -157,6 +160,8 @@ run_test(unsigned long eltsize, unsigned long nodesize, unsigned long repeat) long long unsigned eltrate = (long) (cur / dt); printf("%0.03lf MB/sec\n", mbrate); printf("%llu elts/sec\n", eltrate); + + cmp.destroy(); } int diff --git a/storage/tokudb/ft-index/ft/tests/cachetable-4357.cc b/storage/tokudb/ft-index/ft/tests/cachetable-4357.cc index de75f6813d2..0704914cc5b 100644 --- a/storage/tokudb/ft-index/ft/tests/cachetable-4357.cc +++ b/storage/tokudb/ft-index/ft/tests/cachetable-4357.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -115,7 +115,7 @@ cachetable_test (void) { const int test_limit = 12; int r; CACHETABLE ct; - toku_cachetable_create(&ct, test_limit, ZERO_LSN, NULL_LOGGER); + toku_cachetable_create(&ct, test_limit, ZERO_LSN, nullptr); const char *fname1 = TOKU_TEST_FILENAME; unlink(fname1); r = toku_cachetable_openf(&f1, ct, fname1, O_RDWR|O_CREAT, S_IRWXU|S_IRWXG|S_IRWXO); assert(r == 0); diff --git a/storage/tokudb/ft-index/ft/tests/cachetable-4365.cc b/storage/tokudb/ft-index/ft/tests/cachetable-4365.cc index ecaeea2d631..1c5a55bf120 100644 --- a/storage/tokudb/ft-index/ft/tests/cachetable-4365.cc +++ b/storage/tokudb/ft-index/ft/tests/cachetable-4365.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -136,7 +136,7 @@ cachetable_test (void) { const int test_limit = 12; int r; CACHETABLE ct; - toku_cachetable_create(&ct, test_limit, ZERO_LSN, NULL_LOGGER); + toku_cachetable_create(&ct, test_limit, ZERO_LSN, nullptr); const char *fname1 = TOKU_TEST_FILENAME; unlink(fname1); r = toku_cachetable_openf(&f1, ct, fname1, O_RDWR|O_CREAT, S_IRWXU|S_IRWXG|S_IRWXO); assert(r == 0); diff --git a/storage/tokudb/ft-index/ft/tests/cachetable-5097.cc b/storage/tokudb/ft-index/ft/tests/cachetable-5097.cc index 7c958dd3049..5cef1f3c6d5 100644 --- a/storage/tokudb/ft-index/ft/tests/cachetable-5097.cc +++ b/storage/tokudb/ft-index/ft/tests/cachetable-5097.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -169,7 +169,7 @@ cachetable_test (void) { check_flush = false; dirty_flush_called = false; - toku_cachetable_create(&ct, test_limit, ZERO_LSN, NULL_LOGGER); + toku_cachetable_create(&ct, test_limit, ZERO_LSN, nullptr); evictor_test_helpers::disable_ev_thread(&ct->ev); // disable eviction thread toku_os_recursive_delete(TOKU_TEST_FILENAME); diff --git a/storage/tokudb/ft-index/ft/tests/cachetable-5978-2.cc b/storage/tokudb/ft-index/ft/tests/cachetable-5978-2.cc index be7c4fb2363..427bc2c4a42 100644 --- a/storage/tokudb/ft-index/ft/tests/cachetable-5978-2.cc +++ b/storage/tokudb/ft-index/ft/tests/cachetable-5978-2.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -210,7 +210,7 @@ cachetable_test (void) { int r; toku_pair_list_set_lock_size(2); // set two bucket mutexes CACHETABLE ct; - toku_cachetable_create(&ct, test_limit, ZERO_LSN, NULL_LOGGER); + toku_cachetable_create(&ct, test_limit, ZERO_LSN, nullptr); const char *fname1 = TOKU_TEST_FILENAME; unlink(fname1); r = toku_cachetable_openf(&f1, ct, fname1, O_RDWR|O_CREAT, S_IRWXU|S_IRWXG|S_IRWXO); assert(r == 0); diff --git a/storage/tokudb/ft-index/ft/tests/cachetable-5978.cc b/storage/tokudb/ft-index/ft/tests/cachetable-5978.cc index c72d67909e1..11613e5a204 100644 --- a/storage/tokudb/ft-index/ft/tests/cachetable-5978.cc +++ b/storage/tokudb/ft-index/ft/tests/cachetable-5978.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -227,7 +227,7 @@ cachetable_test (void) { int r; toku_pair_list_set_lock_size(2); // set two bucket mutexes CACHETABLE ct; - toku_cachetable_create(&ct, test_limit, ZERO_LSN, NULL_LOGGER); + toku_cachetable_create(&ct, test_limit, ZERO_LSN, nullptr); const char *fname1 = TOKU_TEST_FILENAME; unlink(fname1); r = toku_cachetable_openf(&f1, ct, fname1, O_RDWR|O_CREAT, S_IRWXU|S_IRWXG|S_IRWXO); assert(r == 0); diff --git a/storage/tokudb/ft-index/ft/tests/cachetable-all-write.cc b/storage/tokudb/ft-index/ft/tests/cachetable-all-write.cc index 3af800e7edb..b0ebd9ed5e9 100644 --- a/storage/tokudb/ft-index/ft/tests/cachetable-all-write.cc +++ b/storage/tokudb/ft-index/ft/tests/cachetable-all-write.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -116,7 +116,7 @@ cachetable_test (void) { const int test_limit = 12; int r; CACHETABLE ct; - toku_cachetable_create(&ct, test_limit, ZERO_LSN, NULL_LOGGER); + toku_cachetable_create(&ct, test_limit, ZERO_LSN, nullptr); const char *fname1 = TOKU_TEST_FILENAME; unlink(fname1); CACHEFILE f1; diff --git a/storage/tokudb/ft-index/ft/tests/cachetable-checkpoint-pending.cc b/storage/tokudb/ft-index/ft/tests/cachetable-checkpoint-pending.cc index 615a544a7f7..53570ec1f0a 100644 --- a/storage/tokudb/ft-index/ft/tests/cachetable-checkpoint-pending.cc +++ b/storage/tokudb/ft-index/ft/tests/cachetable-checkpoint-pending.cc @@ -30,7 +30,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -93,7 +93,7 @@ PATENT RIGHTS GRANT: #include <stdio.h> #include <unistd.h> #include "cachetable-test.h" -#include "checkpoint.h" +#include "cachetable/checkpoint.h" #include <portability/toku_atomic.h> static int N; // how many items in the table @@ -187,7 +187,7 @@ static void checkpoint_pending(void) { if (verbose) { printf("%s:%d n=%d\n", __FUNCTION__, __LINE__, N); fflush(stdout); } const int test_limit = N; int r; - toku_cachetable_create(&ct, test_limit*sizeof(int), ZERO_LSN, NULL_LOGGER); + toku_cachetable_create(&ct, test_limit*sizeof(int), ZERO_LSN, nullptr); const char *fname1 = TOKU_TEST_FILENAME; r = unlink(fname1); if (r!=0) CKERR2(get_error_errno(), ENOENT); r = toku_cachetable_openf(&cf, ct, fname1, O_RDWR|O_CREAT, S_IRWXU|S_IRWXG|S_IRWXO); assert(r == 0); diff --git a/storage/tokudb/ft-index/ft/tests/cachetable-checkpoint-pinned-nodes.cc b/storage/tokudb/ft-index/ft/tests/cachetable-checkpoint-pinned-nodes.cc index cf0d4e28afd..bacf48d01b1 100644 --- a/storage/tokudb/ft-index/ft/tests/cachetable-checkpoint-pinned-nodes.cc +++ b/storage/tokudb/ft-index/ft/tests/cachetable-checkpoint-pinned-nodes.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -152,7 +152,7 @@ cachetable_test (void) { const int test_limit = 20; int r; CACHETABLE ct; - toku_cachetable_create(&ct, test_limit, ZERO_LSN, NULL_LOGGER); + toku_cachetable_create(&ct, test_limit, ZERO_LSN, nullptr); const char *fname1 = TOKU_TEST_FILENAME; unlink(fname1); CACHEFILE f1; diff --git a/storage/tokudb/ft-index/ft/tests/cachetable-checkpoint-prefetched-nodes.cc b/storage/tokudb/ft-index/ft/tests/cachetable-checkpoint-prefetched-nodes.cc index fded78d5ba0..510b2fb458c 100644 --- a/storage/tokudb/ft-index/ft/tests/cachetable-checkpoint-prefetched-nodes.cc +++ b/storage/tokudb/ft-index/ft/tests/cachetable-checkpoint-prefetched-nodes.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -154,7 +154,7 @@ cachetable_test (void) { const int test_limit = 20; int r; CACHETABLE ct; - toku_cachetable_create(&ct, test_limit, ZERO_LSN, NULL_LOGGER); + toku_cachetable_create(&ct, test_limit, ZERO_LSN, nullptr); const char *fname1 = TOKU_TEST_FILENAME; unlink(fname1); CACHEFILE f1; diff --git a/storage/tokudb/ft-index/ft/tests/cachetable-checkpoint-test.cc b/storage/tokudb/ft-index/ft/tests/cachetable-checkpoint-test.cc index e86e7de4bb0..f1ea464d952 100644 --- a/storage/tokudb/ft-index/ft/tests/cachetable-checkpoint-test.cc +++ b/storage/tokudb/ft-index/ft/tests/cachetable-checkpoint-test.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -93,7 +93,7 @@ PATENT RIGHTS GRANT: #include <unistd.h> -#include "checkpoint.h" +#include "cachetable/checkpoint.h" static const int item_size = 1; @@ -145,7 +145,7 @@ static void cachetable_checkpoint_test(int n, enum cachetable_dirty dirty) { const int test_limit = n; int r; CACHETABLE ct; - toku_cachetable_create(&ct, test_limit, ZERO_LSN, NULL_LOGGER); + toku_cachetable_create(&ct, test_limit, ZERO_LSN, nullptr); const char *fname1 = TOKU_TEST_FILENAME; unlink(fname1); CACHEFILE f1; diff --git a/storage/tokudb/ft-index/ft/tests/cachetable-checkpointer-class.cc b/storage/tokudb/ft-index/ft/tests/cachetable-checkpointer-class.cc index c2adc202fb5..6b138cd0bca 100644 --- a/storage/tokudb/ft-index/ft/tests/cachetable-checkpointer-class.cc +++ b/storage/tokudb/ft-index/ft/tests/cachetable-checkpointer-class.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -89,7 +89,7 @@ PATENT RIGHTS GRANT: #ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved." #include "test.h" -#include "cachetable-internal.h" +#include "cachetable/cachetable-internal.h" #include "cachetable-test.h" // @@ -112,6 +112,14 @@ struct checkpointer_test { uint32_t k); }; +static void init_cachefile(CACHEFILE cf, int which_cf, bool for_checkpoint) { + memset(cf, 0, sizeof(*cf)); + create_dummy_functions(cf); + cf->fileid = { 0, (unsigned) which_cf }; + cf->filenum = { (unsigned) which_cf }; + cf->for_checkpoint = for_checkpoint; +} + //------------------------------------------------------------------------------ // test_begin_checkpoint() - // @@ -135,33 +143,28 @@ void checkpointer_test::test_begin_checkpoint() { // 2. Call checkpoint with ONE cachefile. //cachefile cf; struct cachefile cf; - cf.next = NULL; - cf.for_checkpoint = false; - m_cp.m_cf_list->m_active_head = &cf; - create_dummy_functions(&cf); + init_cachefile(&cf, 0, false); + m_cp.m_cf_list->add_cf_unlocked(&cf); m_cp.begin_checkpoint(); assert(m_cp.m_checkpoint_num_files == 1); assert(cf.for_checkpoint == true); + m_cp.m_cf_list->remove_cf(&cf); // 3. Call checkpoint with MANY cachefiles. const uint32_t count = 3; struct cachefile cfs[count]; - m_cp.m_cf_list->m_active_head = &cfs[0]; for (uint32_t i = 0; i < count; ++i) { - cfs[i].for_checkpoint = false; + init_cachefile(&cfs[i], i, false); create_dummy_functions(&cfs[i]); - if (i == count - 1) { - cfs[i].next = NULL; - } else { - cfs[i].next = &cfs[i + 1]; - } + m_cp.m_cf_list->add_cf_unlocked(&cfs[i]); } m_cp.begin_checkpoint(); assert(m_cp.m_checkpoint_num_files == count); for (uint32_t i = 0; i < count; ++i) { assert(cfs[i].for_checkpoint == true); + cfl.remove_cf(&cfs[i]); } ctbl.list.destroy(); m_cp.destroy(); @@ -195,10 +198,8 @@ void checkpointer_test::test_pending_bits() { // struct cachefile cf; cf.cachetable = &ctbl; - memset(&cf, 0, sizeof(cf)); - cf.next = NULL; - cf.for_checkpoint = true; - m_cp.m_cf_list->m_active_head = &cf; + init_cachefile(&cf, 0, true); + m_cp.m_cf_list->add_cf_unlocked(&cf); create_dummy_functions(&cf); CACHEKEY k; @@ -258,6 +259,7 @@ void checkpointer_test::test_pending_bits() { ctbl.list.destroy(); m_cp.destroy(); + cfl.remove_cf(&cf); cfl.destroy(); } @@ -337,14 +339,11 @@ void checkpointer_test::test_end_checkpoint() { cfl.init(); struct cachefile cf; - memset(&cf, 0, sizeof(cf)); - cf.next = NULL; - cf.for_checkpoint = true; - create_dummy_functions(&cf); + init_cachefile(&cf, 0, true); ZERO_STRUCT(m_cp); m_cp.init(&ctbl.list, NULL, &ctbl.ev, &cfl); - m_cp.m_cf_list->m_active_head = &cf; + m_cp.m_cf_list->add_cf_unlocked(&cf); // 2. Add data before running checkpoint. const uint32_t count = 6; @@ -394,6 +393,7 @@ void checkpointer_test::test_end_checkpoint() { assert(pp); m_cp.m_list->evict_completely(pp); } + cfl.remove_cf(&cf); m_cp.destroy(); ctbl.list.destroy(); cfl.destroy(); diff --git a/storage/tokudb/ft-index/ft/tests/cachetable-cleaner-checkpoint.cc b/storage/tokudb/ft-index/ft/tests/cachetable-cleaner-checkpoint.cc index 7e40d3c861f..0b726f67306 100644 --- a/storage/tokudb/ft-index/ft/tests/cachetable-cleaner-checkpoint.cc +++ b/storage/tokudb/ft-index/ft/tests/cachetable-cleaner-checkpoint.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -147,7 +147,7 @@ cachetable_test (void) { const int test_limit = 12; int r; CACHETABLE ct; - toku_cachetable_create(&ct, test_limit, ZERO_LSN, NULL_LOGGER); + toku_cachetable_create(&ct, test_limit, ZERO_LSN, nullptr); const char *fname1 = TOKU_TEST_FILENAME; unlink(fname1); r = toku_cachetable_openf(&f1, ct, fname1, O_RDWR|O_CREAT, S_IRWXU|S_IRWXG|S_IRWXO); assert(r == 0); diff --git a/storage/tokudb/ft-index/ft/tests/cachetable-cleaner-checkpoint2.cc b/storage/tokudb/ft-index/ft/tests/cachetable-cleaner-checkpoint2.cc index 4c9eacd004c..b360d21c177 100644 --- a/storage/tokudb/ft-index/ft/tests/cachetable-cleaner-checkpoint2.cc +++ b/storage/tokudb/ft-index/ft/tests/cachetable-cleaner-checkpoint2.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -147,7 +147,7 @@ cachetable_test (void) { const int test_limit = 12; int r; CACHETABLE ct; - toku_cachetable_create(&ct, test_limit, ZERO_LSN, NULL_LOGGER); + toku_cachetable_create(&ct, test_limit, ZERO_LSN, nullptr); const char *fname1 = TOKU_TEST_FILENAME; unlink(fname1); r = toku_cachetable_openf(&f1, ct, fname1, O_RDWR|O_CREAT, S_IRWXU|S_IRWXG|S_IRWXO); assert(r == 0); diff --git a/storage/tokudb/ft-index/ft/tests/cachetable-cleaner-thread-attrs-accumulate.cc b/storage/tokudb/ft-index/ft/tests/cachetable-cleaner-thread-attrs-accumulate.cc index 1318f342f2b..1b7f4825e17 100644 --- a/storage/tokudb/ft-index/ft/tests/cachetable-cleaner-thread-attrs-accumulate.cc +++ b/storage/tokudb/ft-index/ft/tests/cachetable-cleaner-thread-attrs-accumulate.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -143,7 +143,7 @@ run_test (void) { int r; CACHETABLE ct; toku_mutex_init(&attr_mutex, NULL); - toku_cachetable_create(&ct, test_limit, ZERO_LSN, NULL_LOGGER); + toku_cachetable_create(&ct, test_limit, ZERO_LSN, nullptr); const char *fname1 = TOKU_TEST_FILENAME; unlink(fname1); diff --git a/storage/tokudb/ft-index/ft/tests/cachetable-cleaner-thread-empty-cachetable.cc b/storage/tokudb/ft-index/ft/tests/cachetable-cleaner-thread-empty-cachetable.cc index 3f771b58075..8c7de0ae914 100644 --- a/storage/tokudb/ft-index/ft/tests/cachetable-cleaner-thread-empty-cachetable.cc +++ b/storage/tokudb/ft-index/ft/tests/cachetable-cleaner-thread-empty-cachetable.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -99,7 +99,7 @@ cachetable_test (void) { const int test_limit = 1000; int r; CACHETABLE ct; - toku_cachetable_create(&ct, test_limit, ZERO_LSN, NULL_LOGGER); + toku_cachetable_create(&ct, test_limit, ZERO_LSN, nullptr); toku_set_cleaner_period(ct, 1); const char *fname1 = TOKU_TEST_FILENAME; diff --git a/storage/tokudb/ft-index/ft/tests/cachetable-cleaner-thread-everything-pinned.cc b/storage/tokudb/ft-index/ft/tests/cachetable-cleaner-thread-everything-pinned.cc index 0a809339b8e..8e5a3ea40fa 100644 --- a/storage/tokudb/ft-index/ft/tests/cachetable-cleaner-thread-everything-pinned.cc +++ b/storage/tokudb/ft-index/ft/tests/cachetable-cleaner-thread-everything-pinned.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -111,7 +111,7 @@ run_test (void) { const int test_limit = 1000; int r; CACHETABLE ct; - toku_cachetable_create(&ct, test_limit, ZERO_LSN, NULL_LOGGER); + toku_cachetable_create(&ct, test_limit, ZERO_LSN, nullptr); toku_set_cleaner_period(ct, 1); const char *fname1 = TOKU_TEST_FILENAME; diff --git a/storage/tokudb/ft-index/ft/tests/cachetable-cleaner-thread-nothing-needs-flushing.cc b/storage/tokudb/ft-index/ft/tests/cachetable-cleaner-thread-nothing-needs-flushing.cc index 33a603baec9..06107b7cefd 100644 --- a/storage/tokudb/ft-index/ft/tests/cachetable-cleaner-thread-nothing-needs-flushing.cc +++ b/storage/tokudb/ft-index/ft/tests/cachetable-cleaner-thread-nothing-needs-flushing.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -111,7 +111,7 @@ run_test (void) { const int test_limit = 1000; int r; CACHETABLE ct; - toku_cachetable_create(&ct, test_limit, ZERO_LSN, NULL_LOGGER); + toku_cachetable_create(&ct, test_limit, ZERO_LSN, nullptr); toku_set_cleaner_period(ct, 1); const char *fname1 = TOKU_TEST_FILENAME; diff --git a/storage/tokudb/ft-index/ft/tests/cachetable-cleaner-thread-same-fullhash.cc b/storage/tokudb/ft-index/ft/tests/cachetable-cleaner-thread-same-fullhash.cc index 485224302b0..de1cb8b612a 100644 --- a/storage/tokudb/ft-index/ft/tests/cachetable-cleaner-thread-same-fullhash.cc +++ b/storage/tokudb/ft-index/ft/tests/cachetable-cleaner-thread-same-fullhash.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -119,7 +119,7 @@ run_test (void) { const int test_limit = 1000; int r; CACHETABLE ct; - toku_cachetable_create(&ct, test_limit, ZERO_LSN, NULL_LOGGER); + toku_cachetable_create(&ct, test_limit, ZERO_LSN, nullptr); my_cleaner_callback_called = false; const char *fname1 = TOKU_TEST_FILENAME; diff --git a/storage/tokudb/ft-index/ft/tests/cachetable-cleaner-thread-simple.cc b/storage/tokudb/ft-index/ft/tests/cachetable-cleaner-thread-simple.cc index 5d4fed42e50..a50495774f3 100644 --- a/storage/tokudb/ft-index/ft/tests/cachetable-cleaner-thread-simple.cc +++ b/storage/tokudb/ft-index/ft/tests/cachetable-cleaner-thread-simple.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -119,7 +119,7 @@ run_test (void) { const int test_limit = 1000; int r; CACHETABLE ct; - toku_cachetable_create(&ct, test_limit, ZERO_LSN, NULL_LOGGER); + toku_cachetable_create(&ct, test_limit, ZERO_LSN, nullptr); toku_set_cleaner_period(ct, 1); my_cleaner_callback_called = false; diff --git a/storage/tokudb/ft-index/ft/tests/cachetable-clock-all-pinned.cc b/storage/tokudb/ft-index/ft/tests/cachetable-clock-all-pinned.cc index af08020e4aa..9eac1304fe2 100644 --- a/storage/tokudb/ft-index/ft/tests/cachetable-clock-all-pinned.cc +++ b/storage/tokudb/ft-index/ft/tests/cachetable-clock-all-pinned.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -96,7 +96,7 @@ cachetable_test (void) { int test_limit = 6; int r; CACHETABLE ct; - toku_cachetable_create(&ct, test_limit, ZERO_LSN, NULL_LOGGER); + toku_cachetable_create(&ct, test_limit, ZERO_LSN, nullptr); const char *fname1 = TOKU_TEST_FILENAME; unlink(fname1); CACHEFILE f1; diff --git a/storage/tokudb/ft-index/ft/tests/cachetable-clock-eviction.cc b/storage/tokudb/ft-index/ft/tests/cachetable-clock-eviction.cc index f024a79e51d..ac18ce8ac32 100644 --- a/storage/tokudb/ft-index/ft/tests/cachetable-clock-eviction.cc +++ b/storage/tokudb/ft-index/ft/tests/cachetable-clock-eviction.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -143,7 +143,7 @@ cachetable_test (void) { num_entries = 0; int r; CACHETABLE ct; - toku_cachetable_create(&ct, test_limit, ZERO_LSN, NULL_LOGGER); + toku_cachetable_create(&ct, test_limit, ZERO_LSN, nullptr); const char *fname1 = TOKU_TEST_FILENAME; unlink(fname1); CACHEFILE f1; diff --git a/storage/tokudb/ft-index/ft/tests/cachetable-clock-eviction2.cc b/storage/tokudb/ft-index/ft/tests/cachetable-clock-eviction2.cc index 23926241b97..13b941ab054 100644 --- a/storage/tokudb/ft-index/ft/tests/cachetable-clock-eviction2.cc +++ b/storage/tokudb/ft-index/ft/tests/cachetable-clock-eviction2.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -186,7 +186,7 @@ cachetable_test (void) { const int test_limit = 16; int r; CACHETABLE ct; - toku_cachetable_create(&ct, test_limit, ZERO_LSN, NULL_LOGGER); + toku_cachetable_create(&ct, test_limit, ZERO_LSN, nullptr); const char *fname1 = TOKU_TEST_FILENAME; unlink(fname1); CACHEFILE f1; diff --git a/storage/tokudb/ft-index/ft/tests/cachetable-clock-eviction3.cc b/storage/tokudb/ft-index/ft/tests/cachetable-clock-eviction3.cc index 735bde724d0..9f148af1d43 100644 --- a/storage/tokudb/ft-index/ft/tests/cachetable-clock-eviction3.cc +++ b/storage/tokudb/ft-index/ft/tests/cachetable-clock-eviction3.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -202,7 +202,7 @@ cachetable_test (void) { const int test_limit = 20; int r; CACHETABLE ct; - toku_cachetable_create(&ct, test_limit, ZERO_LSN, NULL_LOGGER); + toku_cachetable_create(&ct, test_limit, ZERO_LSN, nullptr); evictor_test_helpers::set_hysteresis_limits(&ct->ev, test_limit, 100*test_limit); evictor_test_helpers::disable_ev_thread(&ct->ev); const char *fname1 = TOKU_TEST_FILENAME; diff --git a/storage/tokudb/ft-index/ft/tests/cachetable-clock-eviction4.cc b/storage/tokudb/ft-index/ft/tests/cachetable-clock-eviction4.cc index 9dc1f1a5218..e89319c90a9 100644 --- a/storage/tokudb/ft-index/ft/tests/cachetable-clock-eviction4.cc +++ b/storage/tokudb/ft-index/ft/tests/cachetable-clock-eviction4.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -181,7 +181,7 @@ cachetable_test (void) { num_entries = 0; int r; CACHETABLE ct; - toku_cachetable_create(&ct, test_limit, ZERO_LSN, NULL_LOGGER); + toku_cachetable_create(&ct, test_limit, ZERO_LSN, nullptr); const char *fname1 = TOKU_TEST_FILENAME; unlink(fname1); CACHEFILE f1; diff --git a/storage/tokudb/ft-index/ft/tests/cachetable-clone-checkpoint.cc b/storage/tokudb/ft-index/ft/tests/cachetable-clone-checkpoint.cc index f7904ffd73d..1fc36e06927 100644 --- a/storage/tokudb/ft-index/ft/tests/cachetable-clone-checkpoint.cc +++ b/storage/tokudb/ft-index/ft/tests/cachetable-clone-checkpoint.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -145,7 +145,7 @@ cachetable_test (void) { const int test_limit = 200; int r; ct = NULL; - toku_cachetable_create(&ct, test_limit, ZERO_LSN, NULL_LOGGER); + toku_cachetable_create(&ct, test_limit, ZERO_LSN, nullptr); const char *fname1 = TOKU_TEST_FILENAME; unlink(fname1); CACHEFILE f1; diff --git a/storage/tokudb/ft-index/ft/tests/cachetable-clone-partial-fetch-pinned-node.cc b/storage/tokudb/ft-index/ft/tests/cachetable-clone-partial-fetch-pinned-node.cc index 4c5e1133555..d22478b8e38 100644 --- a/storage/tokudb/ft-index/ft/tests/cachetable-clone-partial-fetch-pinned-node.cc +++ b/storage/tokudb/ft-index/ft/tests/cachetable-clone-partial-fetch-pinned-node.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -139,7 +139,7 @@ cachetable_test (void) { const int test_limit = 12; int r; CACHETABLE ct; - toku_cachetable_create(&ct, test_limit, ZERO_LSN, NULL_LOGGER); + toku_cachetable_create(&ct, test_limit, ZERO_LSN, nullptr); const char *fname1 = TOKU_TEST_FILENAME; unlink(fname1); CACHEFILE f1; diff --git a/storage/tokudb/ft-index/ft/tests/cachetable-clone-partial-fetch.cc b/storage/tokudb/ft-index/ft/tests/cachetable-clone-partial-fetch.cc index fed76332a45..92859cfac68 100644 --- a/storage/tokudb/ft-index/ft/tests/cachetable-clone-partial-fetch.cc +++ b/storage/tokudb/ft-index/ft/tests/cachetable-clone-partial-fetch.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -144,7 +144,7 @@ cachetable_test (void) { const int test_limit = 12; int r; CACHETABLE ct; - toku_cachetable_create(&ct, test_limit, ZERO_LSN, NULL_LOGGER); + toku_cachetable_create(&ct, test_limit, ZERO_LSN, nullptr); const char *fname1 = TOKU_TEST_FILENAME; unlink(fname1); CACHEFILE f1; diff --git a/storage/tokudb/ft-index/ft/tests/cachetable-clone-pin-nonblocking.cc b/storage/tokudb/ft-index/ft/tests/cachetable-clone-pin-nonblocking.cc index a56dc034202..d7cdbcc0854 100644 --- a/storage/tokudb/ft-index/ft/tests/cachetable-clone-pin-nonblocking.cc +++ b/storage/tokudb/ft-index/ft/tests/cachetable-clone-pin-nonblocking.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -126,7 +126,7 @@ cachetable_test (enum cachetable_dirty dirty, bool cloneable) { const int test_limit = 12; int r; CACHETABLE ct; - toku_cachetable_create(&ct, test_limit, ZERO_LSN, NULL_LOGGER); + toku_cachetable_create(&ct, test_limit, ZERO_LSN, nullptr); const char *fname1 = TOKU_TEST_FILENAME; unlink(fname1); CACHEFILE f1; diff --git a/storage/tokudb/ft-index/ft/tests/cachetable-clone-unpin-remove.cc b/storage/tokudb/ft-index/ft/tests/cachetable-clone-unpin-remove.cc index 1aeff2ee28e..f6cf0ec34c7 100644 --- a/storage/tokudb/ft-index/ft/tests/cachetable-clone-unpin-remove.cc +++ b/storage/tokudb/ft-index/ft/tests/cachetable-clone-unpin-remove.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -137,7 +137,7 @@ cachetable_test (void) { const int test_limit = 12; int r; CACHETABLE ct; - toku_cachetable_create(&ct, test_limit, ZERO_LSN, NULL_LOGGER); + toku_cachetable_create(&ct, test_limit, ZERO_LSN, nullptr); const char *fname1 = TOKU_TEST_FILENAME; unlink(fname1); CACHEFILE f1; diff --git a/storage/tokudb/ft-index/ft/tests/cachetable-count-pinned-test.cc b/storage/tokudb/ft-index/ft/tests/cachetable-count-pinned-test.cc index d4437278054..4dba635d22f 100644 --- a/storage/tokudb/ft-index/ft/tests/cachetable-count-pinned-test.cc +++ b/storage/tokudb/ft-index/ft/tests/cachetable-count-pinned-test.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -97,7 +97,7 @@ cachetable_count_pinned_test (int n) { const int test_limit = 2*n; int r; CACHETABLE ct; - toku_cachetable_create(&ct, test_limit, ZERO_LSN, NULL_LOGGER); + toku_cachetable_create(&ct, test_limit, ZERO_LSN, nullptr); const char *fname1 = TOKU_TEST_FILENAME; unlink(fname1); CACHEFILE f1; diff --git a/storage/tokudb/ft-index/ft/tests/cachetable-debug-test.cc b/storage/tokudb/ft-index/ft/tests/cachetable-debug-test.cc index dde4a0c69b1..fda1d0ae563 100644 --- a/storage/tokudb/ft-index/ft/tests/cachetable-debug-test.cc +++ b/storage/tokudb/ft-index/ft/tests/cachetable-debug-test.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -96,7 +96,7 @@ cachetable_debug_test (int n) { const int test_limit = n; int r; CACHETABLE ct; - toku_cachetable_create(&ct, test_limit, ZERO_LSN, NULL_LOGGER); + toku_cachetable_create(&ct, test_limit, ZERO_LSN, nullptr); const char *fname1 = TOKU_TEST_FILENAME; unlink(fname1); CACHEFILE f1; diff --git a/storage/tokudb/ft-index/ft/tests/cachetable-eviction-close-test.cc b/storage/tokudb/ft-index/ft/tests/cachetable-eviction-close-test.cc index 18a65729501..da2ff48f2eb 100644 --- a/storage/tokudb/ft-index/ft/tests/cachetable-eviction-close-test.cc +++ b/storage/tokudb/ft-index/ft/tests/cachetable-eviction-close-test.cc @@ -31,7 +31,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -155,7 +155,7 @@ static void cachetable_eviction_full_test (void) { const int test_limit = 12; int r; CACHETABLE ct; - toku_cachetable_create(&ct, test_limit, ZERO_LSN, NULL_LOGGER); + toku_cachetable_create(&ct, test_limit, ZERO_LSN, nullptr); const char *fname1 = TOKU_TEST_FILENAME; unlink(fname1); CACHEFILE f1; diff --git a/storage/tokudb/ft-index/ft/tests/cachetable-eviction-close-test2.cc b/storage/tokudb/ft-index/ft/tests/cachetable-eviction-close-test2.cc index c8004ca1cb1..d6ba0f3b136 100644 --- a/storage/tokudb/ft-index/ft/tests/cachetable-eviction-close-test2.cc +++ b/storage/tokudb/ft-index/ft/tests/cachetable-eviction-close-test2.cc @@ -31,7 +31,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -168,7 +168,7 @@ static void cachetable_eviction_full_test (void) { const int test_limit = 12; int r; CACHETABLE ct; - toku_cachetable_create(&ct, test_limit, ZERO_LSN, NULL_LOGGER); + toku_cachetable_create(&ct, test_limit, ZERO_LSN, nullptr); const char *fname1 = TOKU_TEST_FILENAME; unlink(fname1); CACHEFILE f1; diff --git a/storage/tokudb/ft-index/ft/tests/cachetable-eviction-getandpin-test.cc b/storage/tokudb/ft-index/ft/tests/cachetable-eviction-getandpin-test.cc index a1887fe6c94..51540db5739 100644 --- a/storage/tokudb/ft-index/ft/tests/cachetable-eviction-getandpin-test.cc +++ b/storage/tokudb/ft-index/ft/tests/cachetable-eviction-getandpin-test.cc @@ -31,7 +31,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -123,7 +123,7 @@ static void cachetable_predef_fetch_maybegetandpin_test (void) { const int test_limit = 12; int r; CACHETABLE ct; - toku_cachetable_create(&ct, test_limit, ZERO_LSN, NULL_LOGGER); + toku_cachetable_create(&ct, test_limit, ZERO_LSN, nullptr); evictor_test_helpers::disable_ev_thread(&ct->ev); const char *fname1 = TOKU_TEST_FILENAME; unlink(fname1); diff --git a/storage/tokudb/ft-index/ft/tests/cachetable-eviction-getandpin-test2.cc b/storage/tokudb/ft-index/ft/tests/cachetable-eviction-getandpin-test2.cc index d65048f797a..45c10bcc552 100644 --- a/storage/tokudb/ft-index/ft/tests/cachetable-eviction-getandpin-test2.cc +++ b/storage/tokudb/ft-index/ft/tests/cachetable-eviction-getandpin-test2.cc @@ -31,7 +31,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -130,7 +130,7 @@ static void cachetable_prefetch_maybegetandpin_test (void) { const int test_limit = 12; int r; CACHETABLE ct; - toku_cachetable_create(&ct, test_limit, ZERO_LSN, NULL_LOGGER); + toku_cachetable_create(&ct, test_limit, ZERO_LSN, nullptr); evictor_test_helpers::disable_ev_thread(&ct->ev); const char *fname1 = TOKU_TEST_FILENAME; unlink(fname1); diff --git a/storage/tokudb/ft-index/ft/tests/cachetable-evictor-class.cc b/storage/tokudb/ft-index/ft/tests/cachetable-evictor-class.cc index d0dff7d9570..12e463d61d8 100644 --- a/storage/tokudb/ft-index/ft/tests/cachetable-evictor-class.cc +++ b/storage/tokudb/ft-index/ft/tests/cachetable-evictor-class.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -89,7 +89,7 @@ PATENT RIGHTS GRANT: #ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved." #include "test.h" -#include "cachetable-internal.h" +#include "cachetable/cachetable-internal.h" class evictor_unit_test { public: diff --git a/storage/tokudb/ft-index/ft/tests/cachetable-fd-test.cc b/storage/tokudb/ft-index/ft/tests/cachetable-fd-test.cc index 16b757bebdf..8ff6ee94fbe 100644 --- a/storage/tokudb/ft-index/ft/tests/cachetable-fd-test.cc +++ b/storage/tokudb/ft-index/ft/tests/cachetable-fd-test.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -98,7 +98,7 @@ cachetable_fd_test (void) { const int test_limit = 1; int r; CACHETABLE ct; - toku_cachetable_create(&ct, test_limit, ZERO_LSN, NULL_LOGGER); + toku_cachetable_create(&ct, test_limit, ZERO_LSN, nullptr); toku_os_recursive_delete(TOKU_TEST_FILENAME); r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU); assert_zero(r); diff --git a/storage/tokudb/ft-index/ft/tests/cachetable-fetch-inducing-evictor.cc b/storage/tokudb/ft-index/ft/tests/cachetable-fetch-inducing-evictor.cc index ac3191b1a33..089c34498b9 100644 --- a/storage/tokudb/ft-index/ft/tests/cachetable-fetch-inducing-evictor.cc +++ b/storage/tokudb/ft-index/ft/tests/cachetable-fetch-inducing-evictor.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -113,7 +113,7 @@ cachetable_test (enum pin_evictor_test_type test_type, bool nonblocking) { const int test_limit = 7; int r; CACHETABLE ct; - toku_cachetable_create(&ct, test_limit, ZERO_LSN, NULL_LOGGER); + toku_cachetable_create(&ct, test_limit, ZERO_LSN, nullptr); evictor_test_helpers::set_hysteresis_limits(&ct->ev, test_limit, test_limit); evictor_test_helpers::disable_ev_thread(&ct->ev); const char *fname1 = TOKU_TEST_FILENAME; diff --git a/storage/tokudb/ft-index/ft/tests/cachetable-flush-during-cleaner.cc b/storage/tokudb/ft-index/ft/tests/cachetable-flush-during-cleaner.cc index d4c8c85cfba..237671fe28f 100644 --- a/storage/tokudb/ft-index/ft/tests/cachetable-flush-during-cleaner.cc +++ b/storage/tokudb/ft-index/ft/tests/cachetable-flush-during-cleaner.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -117,7 +117,7 @@ cachetable_test (void) { const int test_limit = 400; int r; CACHETABLE ct; - toku_cachetable_create(&ct, test_limit, ZERO_LSN, NULL_LOGGER); + toku_cachetable_create(&ct, test_limit, ZERO_LSN, nullptr); toku_set_cleaner_period(ct, 1); const char *fname1 = TOKU_TEST_FILENAME; diff --git a/storage/tokudb/ft-index/ft/tests/cachetable-flush-test.cc b/storage/tokudb/ft-index/ft/tests/cachetable-flush-test.cc index c4c2da0577a..2297364891b 100644 --- a/storage/tokudb/ft-index/ft/tests/cachetable-flush-test.cc +++ b/storage/tokudb/ft-index/ft/tests/cachetable-flush-test.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -97,7 +97,7 @@ test_cachetable_def_flush (int n) { const int test_limit = 2*n; int r; CACHETABLE ct; - toku_cachetable_create(&ct, test_limit, ZERO_LSN, NULL_LOGGER); + toku_cachetable_create(&ct, test_limit, ZERO_LSN, nullptr); toku_os_recursive_delete(TOKU_TEST_FILENAME); r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU); assert_zero(r); diff --git a/storage/tokudb/ft-index/ft/tests/cachetable-getandpin-test.cc b/storage/tokudb/ft-index/ft/tests/cachetable-getandpin-test.cc index 6165de34eb0..b3e4dfa1d9a 100644 --- a/storage/tokudb/ft-index/ft/tests/cachetable-getandpin-test.cc +++ b/storage/tokudb/ft-index/ft/tests/cachetable-getandpin-test.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -134,7 +134,7 @@ cachetable_getandpin_test (int n) { const int test_limit = 1024*1024; int r; CACHETABLE ct; - toku_cachetable_create(&ct, test_limit, ZERO_LSN, NULL_LOGGER); + toku_cachetable_create(&ct, test_limit, ZERO_LSN, nullptr); const char *fname1 = TOKU_TEST_FILENAME; unlink(fname1); CACHEFILE f1; diff --git a/storage/tokudb/ft-index/ft/tests/cachetable-kibbutz_and_flush_cachefile.cc b/storage/tokudb/ft-index/ft/tests/cachetable-kibbutz_and_flush_cachefile.cc index f44414cb667..b6f2a189e26 100644 --- a/storage/tokudb/ft-index/ft/tests/cachetable-kibbutz_and_flush_cachefile.cc +++ b/storage/tokudb/ft-index/ft/tests/cachetable-kibbutz_and_flush_cachefile.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -114,7 +114,7 @@ run_test (void) { const int test_limit = 12; int r; CACHETABLE ct; - toku_cachetable_create(&ct, test_limit, ZERO_LSN, NULL_LOGGER); + toku_cachetable_create(&ct, test_limit, ZERO_LSN, nullptr); const char *fname1 = TOKU_TEST_FILENAME; unlink(fname1); CACHEFILE f1; diff --git a/storage/tokudb/ft-index/ft/tests/cachetable-partial-fetch.cc b/storage/tokudb/ft-index/ft/tests/cachetable-partial-fetch.cc index 27f5800d06f..043b35ab503 100644 --- a/storage/tokudb/ft-index/ft/tests/cachetable-partial-fetch.cc +++ b/storage/tokudb/ft-index/ft/tests/cachetable-partial-fetch.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -166,7 +166,7 @@ cachetable_test (void) { int r; CACHETABLE ct; bool doing_prefetch = false; - toku_cachetable_create(&ct, test_limit, ZERO_LSN, NULL_LOGGER); + toku_cachetable_create(&ct, test_limit, ZERO_LSN, nullptr); const char *fname1 = TOKU_TEST_FILENAME; unlink(fname1); CACHEFILE f1; @@ -215,7 +215,7 @@ cachetable_test (void) { // close and reopen cachefile so we can do some simple prefetch tests toku_cachefile_close(&f1, false, ZERO_LSN); toku_cachetable_close(&ct); - toku_cachetable_create(&ct, test_limit, ZERO_LSN, NULL_LOGGER); + toku_cachetable_create(&ct, test_limit, ZERO_LSN, nullptr); r = toku_cachetable_openf(&f1, ct, fname1, O_RDWR|O_CREAT, S_IRWXU|S_IRWXG|S_IRWXO); assert(r == 0); // // verify that a prefetch of the node will succeed diff --git a/storage/tokudb/ft-index/ft/tests/cachetable-pin-checkpoint.cc b/storage/tokudb/ft-index/ft/tests/cachetable-pin-checkpoint.cc index e5022afee88..6916e974c3b 100644 --- a/storage/tokudb/ft-index/ft/tests/cachetable-pin-checkpoint.cc +++ b/storage/tokudb/ft-index/ft/tests/cachetable-pin-checkpoint.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -413,7 +413,7 @@ cachetable_test (void) { int r; - toku_cachetable_create(&ct, test_limit, ZERO_LSN, NULL_LOGGER); + toku_cachetable_create(&ct, test_limit, ZERO_LSN, nullptr); const char *fname1 = TOKU_TEST_FILENAME; unlink(fname1); r = toku_cachetable_openf(&f1, ct, fname1, O_RDWR|O_CREAT, S_IRWXU|S_IRWXG|S_IRWXO); assert(r == 0); diff --git a/storage/tokudb/ft-index/ft/tests/cachetable-pin-nonblocking-checkpoint-clean.cc b/storage/tokudb/ft-index/ft/tests/cachetable-pin-nonblocking-checkpoint-clean.cc index ba4bebab323..ca9db5e6521 100644 --- a/storage/tokudb/ft-index/ft/tests/cachetable-pin-nonblocking-checkpoint-clean.cc +++ b/storage/tokudb/ft-index/ft/tests/cachetable-pin-nonblocking-checkpoint-clean.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -100,7 +100,7 @@ run_test (void) { const int test_limit = 20; int r; ct = NULL; - toku_cachetable_create(&ct, test_limit, ZERO_LSN, NULL_LOGGER); + toku_cachetable_create(&ct, test_limit, ZERO_LSN, nullptr); const char *fname1 = TOKU_TEST_FILENAME; unlink(fname1); f1 = NULL; diff --git a/storage/tokudb/ft-index/ft/tests/cachetable-prefetch-checkpoint-test.cc b/storage/tokudb/ft-index/ft/tests/cachetable-prefetch-checkpoint-test.cc index 2122f61afa8..65465339f0e 100644 --- a/storage/tokudb/ft-index/ft/tests/cachetable-prefetch-checkpoint-test.cc +++ b/storage/tokudb/ft-index/ft/tests/cachetable-prefetch-checkpoint-test.cc @@ -32,7 +32,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -95,7 +95,7 @@ PATENT RIGHTS GRANT: #include <unistd.h> #include "cachetable-test.h" -#include "checkpoint.h" +#include "cachetable/checkpoint.h" const int item_size = 1; @@ -153,7 +153,7 @@ static void cachetable_prefetch_checkpoint_test(int n, enum cachetable_dirty dir CACHETABLE ct; CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL); wc.flush_callback = flush; - toku_cachetable_create(&ct, test_limit, ZERO_LSN, NULL_LOGGER); + toku_cachetable_create(&ct, test_limit, ZERO_LSN, nullptr); const char *fname1 = TOKU_TEST_FILENAME; unlink(fname1); CACHEFILE f1; diff --git a/storage/tokudb/ft-index/ft/tests/cachetable-prefetch-close-leak-test.cc b/storage/tokudb/ft-index/ft/tests/cachetable-prefetch-close-leak-test.cc index 3153c6f3a3c..e817c8aa65e 100644 --- a/storage/tokudb/ft-index/ft/tests/cachetable-prefetch-close-leak-test.cc +++ b/storage/tokudb/ft-index/ft/tests/cachetable-prefetch-close-leak-test.cc @@ -31,7 +31,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -139,7 +139,7 @@ static void cachetable_prefetch_close_leak_test (void) { const int test_limit = 1; int r; CACHETABLE ct; - toku_cachetable_create(&ct, test_limit, ZERO_LSN, NULL_LOGGER); + toku_cachetable_create(&ct, test_limit, ZERO_LSN, nullptr); const char *fname1 = TOKU_TEST_FILENAME; unlink(fname1); CACHEFILE f1; diff --git a/storage/tokudb/ft-index/ft/tests/cachetable-prefetch-close-test.cc b/storage/tokudb/ft-index/ft/tests/cachetable-prefetch-close-test.cc index d013db1ab73..e8d08c86aa1 100644 --- a/storage/tokudb/ft-index/ft/tests/cachetable-prefetch-close-test.cc +++ b/storage/tokudb/ft-index/ft/tests/cachetable-prefetch-close-test.cc @@ -31,7 +31,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -141,7 +141,7 @@ static void cachetable_prefetch_full_test (bool partial_fetch) { expect_pf = false; int r; CACHETABLE ct; - toku_cachetable_create(&ct, test_limit, ZERO_LSN, NULL_LOGGER); + toku_cachetable_create(&ct, test_limit, ZERO_LSN, nullptr); const char *fname1 = TOKU_TEST_FILENAME; unlink(fname1); CACHEFILE f1; diff --git a/storage/tokudb/ft-index/ft/tests/cachetable-prefetch-flowcontrol-test.cc b/storage/tokudb/ft-index/ft/tests/cachetable-prefetch-flowcontrol-test.cc index 6159e8eb67f..8736b6a4065 100644 --- a/storage/tokudb/ft-index/ft/tests/cachetable-prefetch-flowcontrol-test.cc +++ b/storage/tokudb/ft-index/ft/tests/cachetable-prefetch-flowcontrol-test.cc @@ -32,7 +32,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -93,7 +93,7 @@ PATENT RIGHTS GRANT: #include "test.h" -#include "cachetable-internal.h" +#include "cachetable/cachetable-internal.h" static int flush_calls = 0; static int flush_evict_calls = 0; @@ -152,7 +152,7 @@ fetch (CACHEFILE f __attribute__((__unused__)), static void cachetable_prefetch_flowcontrol_test (int cachetable_size_limit) { int r; CACHETABLE ct; - toku_cachetable_create(&ct, cachetable_size_limit, ZERO_LSN, NULL_LOGGER); + toku_cachetable_create(&ct, cachetable_size_limit, ZERO_LSN, nullptr); evictor_test_helpers::set_hysteresis_limits(&ct->ev, cachetable_size_limit, cachetable_size_limit); evictor_test_helpers::disable_ev_thread(&ct->ev); const char *fname1 = TOKU_TEST_FILENAME; diff --git a/storage/tokudb/ft-index/ft/tests/cachetable-prefetch-getandpin-test.cc b/storage/tokudb/ft-index/ft/tests/cachetable-prefetch-getandpin-test.cc index 9aba0fdbafa..4ba6dff51a9 100644 --- a/storage/tokudb/ft-index/ft/tests/cachetable-prefetch-getandpin-test.cc +++ b/storage/tokudb/ft-index/ft/tests/cachetable-prefetch-getandpin-test.cc @@ -31,7 +31,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -162,7 +162,7 @@ static void cachetable_prefetch_maybegetandpin_test (bool do_partial_fetch) { const int test_limit = 2; int r; CACHETABLE ct; - toku_cachetable_create(&ct, test_limit, ZERO_LSN, NULL_LOGGER); + toku_cachetable_create(&ct, test_limit, ZERO_LSN, nullptr); const char *fname1 = TOKU_TEST_FILENAME; unlink(fname1); CACHEFILE f1; diff --git a/storage/tokudb/ft-index/ft/tests/cachetable-prefetch-maybegetandpin-test.cc b/storage/tokudb/ft-index/ft/tests/cachetable-prefetch-maybegetandpin-test.cc index 14c12bbb817..0540ab5429b 100644 --- a/storage/tokudb/ft-index/ft/tests/cachetable-prefetch-maybegetandpin-test.cc +++ b/storage/tokudb/ft-index/ft/tests/cachetable-prefetch-maybegetandpin-test.cc @@ -31,7 +31,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -119,7 +119,7 @@ static void cachetable_prefetch_maybegetandpin_test (void) { const int test_limit = 1; int r; CACHETABLE ct; - toku_cachetable_create(&ct, test_limit, ZERO_LSN, NULL_LOGGER); + toku_cachetable_create(&ct, test_limit, ZERO_LSN, nullptr); const char *fname1 = TOKU_TEST_FILENAME; unlink(fname1); CACHEFILE f1; diff --git a/storage/tokudb/ft-index/ft/tests/cachetable-prefetch2-test.cc b/storage/tokudb/ft-index/ft/tests/cachetable-prefetch2-test.cc index 6c81ce49188..f7d348eaa3b 100644 --- a/storage/tokudb/ft-index/ft/tests/cachetable-prefetch2-test.cc +++ b/storage/tokudb/ft-index/ft/tests/cachetable-prefetch2-test.cc @@ -32,7 +32,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -122,7 +122,7 @@ static void cachetable_prefetch_maybegetandpin_test (void) { const int test_limit = 1; int r; CACHETABLE ct; - toku_cachetable_create(&ct, test_limit, ZERO_LSN, NULL_LOGGER); + toku_cachetable_create(&ct, test_limit, ZERO_LSN, nullptr); const char *fname1 = TOKU_TEST_FILENAME; unlink(fname1); CACHEFILE f1; diff --git a/storage/tokudb/ft-index/ft/tests/cachetable-put-checkpoint.cc b/storage/tokudb/ft-index/ft/tests/cachetable-put-checkpoint.cc index 8691e2b93b0..0b316aa528f 100644 --- a/storage/tokudb/ft-index/ft/tests/cachetable-put-checkpoint.cc +++ b/storage/tokudb/ft-index/ft/tests/cachetable-put-checkpoint.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -545,7 +545,7 @@ cachetable_test (void) { int r; - toku_cachetable_create(&ct, test_limit, ZERO_LSN, NULL_LOGGER); + toku_cachetable_create(&ct, test_limit, ZERO_LSN, nullptr); const char *fname1 = TOKU_TEST_FILENAME; unlink(fname1); r = toku_cachetable_openf(&f1, ct, fname1, O_RDWR|O_CREAT, S_IRWXU|S_IRWXG|S_IRWXO); assert(r == 0); diff --git a/storage/tokudb/ft-index/ft/tests/cachetable-put-test.cc b/storage/tokudb/ft-index/ft/tests/cachetable-put-test.cc index 0280681903e..07765bd666e 100644 --- a/storage/tokudb/ft-index/ft/tests/cachetable-put-test.cc +++ b/storage/tokudb/ft-index/ft/tests/cachetable-put-test.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -96,7 +96,7 @@ cachetable_put_test (int n) { const int test_limit = 2*n; int r; CACHETABLE ct; - toku_cachetable_create(&ct, test_limit, ZERO_LSN, NULL_LOGGER); + toku_cachetable_create(&ct, test_limit, ZERO_LSN, nullptr); const char *fname1 = TOKU_TEST_FILENAME; unlink(fname1); CACHEFILE f1; diff --git a/storage/tokudb/ft-index/ft/tests/cachetable-rwlock-test.cc b/storage/tokudb/ft-index/ft/tests/cachetable-rwlock-test.cc index 87014dc406e..7e5fb4a00bb 100644 --- a/storage/tokudb/ft-index/ft/tests/cachetable-rwlock-test.cc +++ b/storage/tokudb/ft-index/ft/tests/cachetable-rwlock-test.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/ft/tests/cachetable-simple-clone.cc b/storage/tokudb/ft-index/ft/tests/cachetable-simple-clone.cc index fe96b440248..3944182084c 100644 --- a/storage/tokudb/ft-index/ft/tests/cachetable-simple-clone.cc +++ b/storage/tokudb/ft-index/ft/tests/cachetable-simple-clone.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -150,7 +150,7 @@ test_clean (enum cachetable_dirty dirty, bool cloneable) { const int test_limit = 12; int r; CACHETABLE ct; - toku_cachetable_create(&ct, test_limit, ZERO_LSN, NULL_LOGGER); + toku_cachetable_create(&ct, test_limit, ZERO_LSN, nullptr); const char *fname1 = TOKU_TEST_FILENAME; unlink(fname1); CACHEFILE f1; diff --git a/storage/tokudb/ft-index/ft/tests/cachetable-simple-clone2.cc b/storage/tokudb/ft-index/ft/tests/cachetable-simple-clone2.cc index 7dcd2a2bb7c..177905e6721 100644 --- a/storage/tokudb/ft-index/ft/tests/cachetable-simple-clone2.cc +++ b/storage/tokudb/ft-index/ft/tests/cachetable-simple-clone2.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -137,7 +137,7 @@ test_clean (enum cachetable_dirty dirty, bool cloneable) { const int test_limit = 200; int r; CACHETABLE ct; - toku_cachetable_create(&ct, test_limit, ZERO_LSN, NULL_LOGGER); + toku_cachetable_create(&ct, test_limit, ZERO_LSN, nullptr); const char *fname1 = TOKU_TEST_FILENAME; unlink(fname1); CACHEFILE f1; diff --git a/storage/tokudb/ft-index/ft/tests/cachetable-simple-close.cc b/storage/tokudb/ft-index/ft/tests/cachetable-simple-close.cc index 03c66162aab..7a7518b78f5 100644 --- a/storage/tokudb/ft-index/ft/tests/cachetable-simple-close.cc +++ b/storage/tokudb/ft-index/ft/tests/cachetable-simple-close.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -145,7 +145,7 @@ simple_test(bool unlink_on_close) { const int test_limit = 12; int r; CACHETABLE ct; - toku_cachetable_create(&ct, test_limit, ZERO_LSN, NULL_LOGGER); + toku_cachetable_create(&ct, test_limit, ZERO_LSN, nullptr); const char *fname1 = TOKU_TEST_FILENAME; unlink(fname1); CACHEFILE f1; @@ -214,7 +214,7 @@ static void test_pair_stays_in_cache(enum cachetable_dirty dirty) { const int test_limit = 12; int r; CACHETABLE ct; - toku_cachetable_create(&ct, test_limit, ZERO_LSN, NULL_LOGGER); + toku_cachetable_create(&ct, test_limit, ZERO_LSN, nullptr); const char *fname1 = TOKU_TEST_FILENAME; unlink(fname1); CACHEFILE f1; @@ -245,7 +245,7 @@ static void test_multiple_cachefiles(bool use_same_hash) { const int test_limit = 1000; int r; CACHETABLE ct; - toku_cachetable_create(&ct, test_limit, ZERO_LSN, NULL_LOGGER); + toku_cachetable_create(&ct, test_limit, ZERO_LSN, nullptr); char fname1[strlen(TOKU_TEST_FILENAME) + sizeof("_1")]; strcpy(fname1, TOKU_TEST_FILENAME); @@ -333,7 +333,7 @@ static void test_evictor(void) { const int test_limit = 12; int r; CACHETABLE ct; - toku_cachetable_create(&ct, test_limit, ZERO_LSN, NULL_LOGGER); + toku_cachetable_create(&ct, test_limit, ZERO_LSN, nullptr); char fname1[strlen(TOKU_TEST_FILENAME) + sizeof("_1")]; strcpy(fname1, TOKU_TEST_FILENAME); diff --git a/storage/tokudb/ft-index/ft/tests/cachetable-simple-maybe-get-pin.cc b/storage/tokudb/ft-index/ft/tests/cachetable-simple-maybe-get-pin.cc index 08c14191be6..891b70fd7d1 100644 --- a/storage/tokudb/ft-index/ft/tests/cachetable-simple-maybe-get-pin.cc +++ b/storage/tokudb/ft-index/ft/tests/cachetable-simple-maybe-get-pin.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -100,7 +100,7 @@ cachetable_test (void) { const int test_limit = 12; int r; CACHETABLE ct; - toku_cachetable_create(&ct, test_limit, ZERO_LSN, NULL_LOGGER); + toku_cachetable_create(&ct, test_limit, ZERO_LSN, nullptr); const char *fname1 = TOKU_TEST_FILENAME; unlink(fname1); CACHEFILE f1; diff --git a/storage/tokudb/ft-index/ft/tests/cachetable-simple-pin-cheap.cc b/storage/tokudb/ft-index/ft/tests/cachetable-simple-pin-cheap.cc index f5608b7572c..70e7a936a26 100644 --- a/storage/tokudb/ft-index/ft/tests/cachetable-simple-pin-cheap.cc +++ b/storage/tokudb/ft-index/ft/tests/cachetable-simple-pin-cheap.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -125,7 +125,7 @@ run_test (pair_lock_type lock_type) { struct unlockers unlockers = {true, unlock_dummy, NULL, NULL}; int r; CACHETABLE ct; - toku_cachetable_create(&ct, test_limit, ZERO_LSN, NULL_LOGGER); + toku_cachetable_create(&ct, test_limit, ZERO_LSN, nullptr); const char *fname1 = TOKU_TEST_FILENAME; unlink(fname1); CACHEFILE f1; diff --git a/storage/tokudb/ft-index/ft/tests/cachetable-simple-pin-dep-nodes.cc b/storage/tokudb/ft-index/ft/tests/cachetable-simple-pin-dep-nodes.cc index d8ced02318b..8a87f006f6b 100644 --- a/storage/tokudb/ft-index/ft/tests/cachetable-simple-pin-dep-nodes.cc +++ b/storage/tokudb/ft-index/ft/tests/cachetable-simple-pin-dep-nodes.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -158,7 +158,7 @@ cachetable_test (bool write_first, bool write_second, bool start_checkpoint) { const int test_limit = 12; int r; CACHETABLE ct; - toku_cachetable_create(&ct, test_limit, ZERO_LSN, NULL_LOGGER); + toku_cachetable_create(&ct, test_limit, ZERO_LSN, nullptr); const char *fname1 = TOKU_TEST_FILENAME; unlink(fname1); CACHEFILE f1; diff --git a/storage/tokudb/ft-index/ft/tests/cachetable-simple-pin-nonblocking-cheap.cc b/storage/tokudb/ft-index/ft/tests/cachetable-simple-pin-nonblocking-cheap.cc index cec5aff8266..e1050b2da7f 100644 --- a/storage/tokudb/ft-index/ft/tests/cachetable-simple-pin-nonblocking-cheap.cc +++ b/storage/tokudb/ft-index/ft/tests/cachetable-simple-pin-nonblocking-cheap.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -164,7 +164,7 @@ run_test (void) { const int test_limit = 12; int r; CACHETABLE ct; - toku_cachetable_create(&ct, test_limit, ZERO_LSN, NULL_LOGGER); + toku_cachetable_create(&ct, test_limit, ZERO_LSN, nullptr); const char *fname1 = TOKU_TEST_FILENAME; unlink(fname1); CACHEFILE f1; diff --git a/storage/tokudb/ft-index/ft/tests/cachetable-simple-pin-nonblocking.cc b/storage/tokudb/ft-index/ft/tests/cachetable-simple-pin-nonblocking.cc index a96f7649226..33319b7a368 100644 --- a/storage/tokudb/ft-index/ft/tests/cachetable-simple-pin-nonblocking.cc +++ b/storage/tokudb/ft-index/ft/tests/cachetable-simple-pin-nonblocking.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -147,7 +147,7 @@ run_test (void) { const int test_limit = 12; int r; CACHETABLE ct; - toku_cachetable_create(&ct, test_limit, ZERO_LSN, NULL_LOGGER); + toku_cachetable_create(&ct, test_limit, ZERO_LSN, nullptr); const char *fname1 = TOKU_TEST_FILENAME; unlink(fname1); CACHEFILE f1; diff --git a/storage/tokudb/ft-index/ft/tests/cachetable-simple-pin.cc b/storage/tokudb/ft-index/ft/tests/cachetable-simple-pin.cc index e40890ccc04..b90b01bfd6c 100644 --- a/storage/tokudb/ft-index/ft/tests/cachetable-simple-pin.cc +++ b/storage/tokudb/ft-index/ft/tests/cachetable-simple-pin.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -139,7 +139,7 @@ run_test (void) { const int test_limit = 12; int r; CACHETABLE ct; - toku_cachetable_create(&ct, test_limit, ZERO_LSN, NULL_LOGGER); + toku_cachetable_create(&ct, test_limit, ZERO_LSN, nullptr); const char *fname1 = TOKU_TEST_FILENAME; unlink(fname1); CACHEFILE f1; diff --git a/storage/tokudb/ft-index/ft/tests/cachetable-simple-put-dep-nodes.cc b/storage/tokudb/ft-index/ft/tests/cachetable-simple-put-dep-nodes.cc index 1a5074a172f..eaeee0bb4db 100644 --- a/storage/tokudb/ft-index/ft/tests/cachetable-simple-put-dep-nodes.cc +++ b/storage/tokudb/ft-index/ft/tests/cachetable-simple-put-dep-nodes.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -173,7 +173,7 @@ cachetable_test (bool write_first, bool write_second, bool start_checkpoint) { const int test_limit = 12; int r; CACHETABLE ct; - toku_cachetable_create(&ct, test_limit, ZERO_LSN, NULL_LOGGER); + toku_cachetable_create(&ct, test_limit, ZERO_LSN, nullptr); const char *fname1 = TOKU_TEST_FILENAME; unlink(fname1); CACHEFILE f1; diff --git a/storage/tokudb/ft-index/ft/tests/cachetable-simple-read-pin-nonblocking.cc b/storage/tokudb/ft-index/ft/tests/cachetable-simple-read-pin-nonblocking.cc index 6a3d7c34f4a..aeb6437f670 100644 --- a/storage/tokudb/ft-index/ft/tests/cachetable-simple-read-pin-nonblocking.cc +++ b/storage/tokudb/ft-index/ft/tests/cachetable-simple-read-pin-nonblocking.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -152,7 +152,7 @@ run_test (void) { int r; void *ret; CACHETABLE ct; - toku_cachetable_create(&ct, test_limit, ZERO_LSN, NULL_LOGGER); + toku_cachetable_create(&ct, test_limit, ZERO_LSN, nullptr); const char *fname1 = TOKU_TEST_FILENAME; unlink(fname1); r = toku_cachetable_openf(&f1, ct, fname1, O_RDWR|O_CREAT, S_IRWXU|S_IRWXG|S_IRWXO); assert(r == 0); diff --git a/storage/tokudb/ft-index/ft/tests/cachetable-simple-read-pin.cc b/storage/tokudb/ft-index/ft/tests/cachetable-simple-read-pin.cc index 5bbc7455755..5f0b6eff445 100644 --- a/storage/tokudb/ft-index/ft/tests/cachetable-simple-read-pin.cc +++ b/storage/tokudb/ft-index/ft/tests/cachetable-simple-read-pin.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -158,7 +158,7 @@ run_test (void) { int r; void *ret; CACHETABLE ct; - toku_cachetable_create(&ct, test_limit, ZERO_LSN, NULL_LOGGER); + toku_cachetable_create(&ct, test_limit, ZERO_LSN, nullptr); const char *fname1 = TOKU_TEST_FILENAME; unlink(fname1); r = toku_cachetable_openf(&f1, ct, fname1, O_RDWR|O_CREAT, S_IRWXU|S_IRWXG|S_IRWXO); assert(r == 0); diff --git a/storage/tokudb/ft-index/ft/tests/cachetable-simple-unpin-remove-checkpoint.cc b/storage/tokudb/ft-index/ft/tests/cachetable-simple-unpin-remove-checkpoint.cc index b94123ad9a6..45d66073930 100644 --- a/storage/tokudb/ft-index/ft/tests/cachetable-simple-unpin-remove-checkpoint.cc +++ b/storage/tokudb/ft-index/ft/tests/cachetable-simple-unpin-remove-checkpoint.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -114,7 +114,7 @@ cachetable_test (void) { const int test_limit = 120; int r; CACHETABLE ct; - toku_cachetable_create(&ct, test_limit, ZERO_LSN, NULL_LOGGER); + toku_cachetable_create(&ct, test_limit, ZERO_LSN, nullptr); const char *fname1 = TOKU_TEST_FILENAME; unlink(fname1); CACHEFILE f1; diff --git a/storage/tokudb/ft-index/ft/tests/cachetable-simple-verify.cc b/storage/tokudb/ft-index/ft/tests/cachetable-simple-verify.cc index 99364660bd1..f38eb2214b6 100644 --- a/storage/tokudb/ft-index/ft/tests/cachetable-simple-verify.cc +++ b/storage/tokudb/ft-index/ft/tests/cachetable-simple-verify.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -95,7 +95,7 @@ cachetable_test (void) { const int test_limit = 12; int r; CACHETABLE ct; - toku_cachetable_create(&ct, test_limit, ZERO_LSN, NULL_LOGGER); + toku_cachetable_create(&ct, test_limit, ZERO_LSN, nullptr); const char *fname1 = TOKU_TEST_FILENAME; unlink(fname1); CACHEFILE f1; diff --git a/storage/tokudb/ft-index/ft/tests/cachetable-test.cc b/storage/tokudb/ft-index/ft/tests/cachetable-test.cc index e498df10a5c..a040943007a 100644 --- a/storage/tokudb/ft-index/ft/tests/cachetable-test.cc +++ b/storage/tokudb/ft-index/ft/tests/cachetable-test.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -118,7 +118,7 @@ static inline void test_mutex_unlock(void) { static void test_cachetable_create(void) { CACHETABLE ct = NULL; - toku_cachetable_create(&ct, 0, ZERO_LSN, NULL_LOGGER); + toku_cachetable_create(&ct, 0, ZERO_LSN, nullptr); toku_cachetable_close(&ct); } @@ -172,7 +172,7 @@ static void test_nested_pin (void) { void *vv,*vv2; const char *fname = TOKU_TEST_FILENAME; if (verbose) printf("creating cachetable\n"); - toku_cachetable_create(&t, 1, ZERO_LSN, NULL_LOGGER); + toku_cachetable_create(&t, 1, ZERO_LSN, nullptr); toku_os_recursive_delete(fname); r = toku_cachetable_openf(&f, t, fname, O_RDWR|O_CREAT, S_IRWXU|S_IRWXG|S_IRWXO); assert(r==0); @@ -257,7 +257,7 @@ static void test_multi_filehandles (void) { unlink(fname1); unlink(fname2); - toku_cachetable_create(&t, 4, ZERO_LSN, NULL_LOGGER); + toku_cachetable_create(&t, 4, ZERO_LSN, nullptr); r = toku_cachetable_openf(&f1, t, fname1, O_RDWR|O_CREAT, S_IRWXU|S_IRWXG|S_IRWXO); assert(r==0); r = link(fname1, fname2); assert(r==0); r = toku_cachetable_openf(&f2, t, fname2, O_RDWR|O_CREAT, S_IRWXU|S_IRWXG|S_IRWXO); assert(r==0); @@ -325,7 +325,7 @@ static void test_dirty(void) { int dirty; long long pinned; long entry_size; int r; - toku_cachetable_create(&t, 4, ZERO_LSN, NULL_LOGGER); + toku_cachetable_create(&t, 4, ZERO_LSN, nullptr); const char *fname = TOKU_TEST_FILENAME; toku_os_recursive_delete(fname); @@ -455,7 +455,7 @@ static void test_size_resize(void) { int n = 3; long size = 1; - toku_cachetable_create(&t, n*size, ZERO_LSN, NULL_LOGGER); + toku_cachetable_create(&t, n*size, ZERO_LSN, nullptr); const char *fname = TOKU_TEST_FILENAME; unlink(fname); @@ -509,7 +509,7 @@ static void test_size_flush(void) { const int n = 8; long long size = 1*1024*1024; - toku_cachetable_create(&t, n*size, ZERO_LSN, NULL_LOGGER); + toku_cachetable_create(&t, n*size, ZERO_LSN, nullptr); const char *fname = TOKU_TEST_FILENAME; unlink(fname); diff --git a/storage/tokudb/ft-index/ft/tests/cachetable-test.h b/storage/tokudb/ft-index/ft/tests/cachetable-test.h index 6d143237c11..6d27a9b71bb 100644 --- a/storage/tokudb/ft-index/ft/tests/cachetable-test.h +++ b/storage/tokudb/ft-index/ft/tests/cachetable-test.h @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -86,10 +86,11 @@ PATENT RIGHTS GRANT: under this License. */ -#ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved." +#pragma once +#ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved." -#include "cachetable-internal.h" +#include "cachetable/cachetable-internal.h" // // Dummy callbacks for checkpointing diff --git a/storage/tokudb/ft-index/ft/tests/cachetable-unpin-and-remove-test.cc b/storage/tokudb/ft-index/ft/tests/cachetable-unpin-and-remove-test.cc index 8e199e153d6..9063cdc2bcc 100644 --- a/storage/tokudb/ft-index/ft/tests/cachetable-unpin-and-remove-test.cc +++ b/storage/tokudb/ft-index/ft/tests/cachetable-unpin-and-remove-test.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -116,7 +116,7 @@ cachetable_unpin_and_remove_test (int n) { int i; CACHETABLE ct; - toku_cachetable_create(&ct, table_limit, ZERO_LSN, NULL_LOGGER); + toku_cachetable_create(&ct, table_limit, ZERO_LSN, nullptr); const char *fname1 = TOKU_TEST_FILENAME; unlink(fname1); CACHEFILE f1; @@ -172,7 +172,7 @@ cachetable_put_evict_remove_test (int n) { int i; CACHETABLE ct; - toku_cachetable_create(&ct, table_limit, ZERO_LSN, NULL_LOGGER); + toku_cachetable_create(&ct, table_limit, ZERO_LSN, nullptr); const char *fname1 = TOKU_TEST_FILENAME; unlink(fname1); CACHEFILE f1; diff --git a/storage/tokudb/ft-index/ft/tests/cachetable-unpin-remove-and-checkpoint.cc b/storage/tokudb/ft-index/ft/tests/cachetable-unpin-remove-and-checkpoint.cc index e121f2165d9..406df310de5 100644 --- a/storage/tokudb/ft-index/ft/tests/cachetable-unpin-remove-and-checkpoint.cc +++ b/storage/tokudb/ft-index/ft/tests/cachetable-unpin-remove-and-checkpoint.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -114,7 +114,7 @@ run_test (void) { const int test_limit = 12; int r; ct = NULL; - toku_cachetable_create(&ct, test_limit, ZERO_LSN, NULL_LOGGER); + toku_cachetable_create(&ct, test_limit, ZERO_LSN, nullptr); const char *fname1 = TOKU_TEST_FILENAME; unlink(fname1); CACHEFILE f1; diff --git a/storage/tokudb/ft-index/ft/tests/cachetable-unpin-test.cc b/storage/tokudb/ft-index/ft/tests/cachetable-unpin-test.cc index 4d0fe46f5f4..1d8c2b03abc 100644 --- a/storage/tokudb/ft-index/ft/tests/cachetable-unpin-test.cc +++ b/storage/tokudb/ft-index/ft/tests/cachetable-unpin-test.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -97,7 +97,7 @@ cachetable_unpin_test (int n) { const int test_limit = 2*n; int r; CACHETABLE ct; - toku_cachetable_create(&ct, test_limit, ZERO_LSN, NULL_LOGGER); + toku_cachetable_create(&ct, test_limit, ZERO_LSN, nullptr); const char *fname1 = TOKU_TEST_FILENAME; unlink(fname1); CACHEFILE f1; @@ -145,7 +145,7 @@ unpin_and_evictor_test(enum unpin_evictor_test_type test_type) { int r; CACHETABLE ct; int test_limit = 4; - toku_cachetable_create(&ct, test_limit, ZERO_LSN, NULL_LOGGER); + toku_cachetable_create(&ct, test_limit, ZERO_LSN, nullptr); const char *fname1 = TOKU_TEST_FILENAME; unlink(fname1); CACHEFILE f1; diff --git a/storage/tokudb/ft-index/ft/tests/cachetable-writer-thread-limit.cc b/storage/tokudb/ft-index/ft/tests/cachetable-writer-thread-limit.cc index fe7a26e4b3a..92f5a1906f6 100644 --- a/storage/tokudb/ft-index/ft/tests/cachetable-writer-thread-limit.cc +++ b/storage/tokudb/ft-index/ft/tests/cachetable-writer-thread-limit.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -125,7 +125,7 @@ cachetable_test (void) { test_limit = 6; int r; CACHETABLE ct; - toku_cachetable_create(&ct, test_limit, ZERO_LSN, NULL_LOGGER); + toku_cachetable_create(&ct, test_limit, ZERO_LSN, nullptr); const char *fname1 = TOKU_TEST_FILENAME; unlink(fname1); CACHEFILE f1; diff --git a/storage/tokudb/ft-index/ft/tests/comparator-test.cc b/storage/tokudb/ft-index/ft/tests/comparator-test.cc index ad09ad0c3ab..0ac3bd569cc 100644 --- a/storage/tokudb/ft-index/ft/tests/comparator-test.cc +++ b/storage/tokudb/ft-index/ft/tests/comparator-test.cc @@ -28,7 +28,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -112,14 +112,31 @@ static void test_desc(void) { // create with d1, make sure it gets used cmp.create(magic_compare, &d1); expected_desc = &d1; - c = cmp.compare(&dbt_a, &dbt_b); + c = cmp(&dbt_a, &dbt_b); invariant(c == MAGIC); // set desc to d2, make sure it gets used - cmp.set_descriptor(&d2); + toku::comparator cmp2; + cmp2.create(magic_compare, &d2); + cmp.inherit(cmp2); expected_desc = &d2; - c = cmp.compare(&dbt_a, &dbt_b); + c = cmp(&dbt_a, &dbt_b); invariant(c == MAGIC); + cmp2.destroy(); + + // go back to using d1, but using the create_from API + toku::comparator cmp3, cmp4; + cmp3.create(magic_compare, &d1); // cmp3 has d1 + cmp4.create_from(cmp3); // cmp4 should get d1 from cmp3 + expected_desc = &d1; + c = cmp3(&dbt_a, &dbt_b); + invariant(c == MAGIC); + c = cmp4(&dbt_a, &dbt_b); + invariant(c == MAGIC); + cmp3.destroy(); + cmp4.destroy(); + + cmp.destroy(); } static int dont_compare_me_bro(DB *db, const DBT *a, const DBT *b) { @@ -137,20 +154,22 @@ static void test_infinity(void) { // should never be called and thus the dbt never actually read. DBT arbitrary_dbt; - c = cmp.compare(&arbitrary_dbt, toku_dbt_positive_infinity()); + c = cmp(&arbitrary_dbt, toku_dbt_positive_infinity()); invariant(c < 0); - c = cmp.compare(toku_dbt_negative_infinity(), &arbitrary_dbt); + c = cmp(toku_dbt_negative_infinity(), &arbitrary_dbt); invariant(c < 0); - c = cmp.compare(toku_dbt_positive_infinity(), &arbitrary_dbt); + c = cmp(toku_dbt_positive_infinity(), &arbitrary_dbt); invariant(c > 0); - c = cmp.compare(&arbitrary_dbt, toku_dbt_negative_infinity()); + c = cmp(&arbitrary_dbt, toku_dbt_negative_infinity()); invariant(c > 0); - c = cmp.compare(toku_dbt_negative_infinity(), toku_dbt_negative_infinity()); + c = cmp(toku_dbt_negative_infinity(), toku_dbt_negative_infinity()); invariant(c == 0); - c = cmp.compare(toku_dbt_positive_infinity(), toku_dbt_positive_infinity()); + c = cmp(toku_dbt_positive_infinity(), toku_dbt_positive_infinity()); invariant(c == 0); + + cmp.destroy(); } int main(void) { diff --git a/storage/tokudb/ft-index/ft/tests/compress-test.cc b/storage/tokudb/ft-index/ft/tests/compress-test.cc index 55b70132029..7f7a97274c8 100644 --- a/storage/tokudb/ft-index/ft/tests/compress-test.cc +++ b/storage/tokudb/ft-index/ft/tests/compress-test.cc @@ -30,7 +30,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -91,7 +91,7 @@ PATENT RIGHTS GRANT: #ident "$Id$" #include "test.h" -#include "compress.h" +#include "serialize/compress.h" static void test_compress_buf_method (unsigned char *buf, int i, enum toku_compression_method m) { int bound = toku_compress_bound(m, i); diff --git a/storage/tokudb/ft-index/ft/tests/dbufio-test-destroy.cc b/storage/tokudb/ft-index/ft/tests/dbufio-test-destroy.cc index 8110f9554ad..c9984879a86 100644 --- a/storage/tokudb/ft-index/ft/tests/dbufio-test-destroy.cc +++ b/storage/tokudb/ft-index/ft/tests/dbufio-test-destroy.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -88,7 +88,7 @@ PATENT RIGHTS GRANT: #ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved." #ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it." -#include "dbufio.h" +#include "loader/dbufio.h" #include <stdio.h> #include <fcntl.h> #include <toku_assert.h> diff --git a/storage/tokudb/ft-index/ft/tests/dbufio-test.cc b/storage/tokudb/ft-index/ft/tests/dbufio-test.cc index cffc081921b..6f562d8ac85 100644 --- a/storage/tokudb/ft-index/ft/tests/dbufio-test.cc +++ b/storage/tokudb/ft-index/ft/tests/dbufio-test.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -88,7 +88,7 @@ PATENT RIGHTS GRANT: #ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved." #ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it." -#include "dbufio.h" +#include "loader/dbufio.h" #include <stdio.h> #include <fcntl.h> #include <toku_assert.h> diff --git a/storage/tokudb/ft-index/ft/tests/dmt-test.cc b/storage/tokudb/ft-index/ft/tests/dmt-test.cc index e4f1e53751e..adc759a3c4b 100644 --- a/storage/tokudb/ft-index/ft/tests/dmt-test.cc +++ b/storage/tokudb/ft-index/ft/tests/dmt-test.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/ft/tests/dmt-test2.cc b/storage/tokudb/ft-index/ft/tests/dmt-test2.cc index 8943aae7324..707ad9a5a7e 100644 --- a/storage/tokudb/ft-index/ft/tests/dmt-test2.cc +++ b/storage/tokudb/ft-index/ft/tests/dmt-test2.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/ft/tests/fifo-test.cc b/storage/tokudb/ft-index/ft/tests/fifo-test.cc index 0a2047ab920..30815160684 100644 --- a/storage/tokudb/ft-index/ft/tests/fifo-test.cc +++ b/storage/tokudb/ft-index/ft/tests/fifo-test.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -94,90 +94,95 @@ PATENT RIGHTS GRANT: #include "test.h" static void -test_fifo_create (void) { - int r; - FIFO f; +test_create (void) { + message_buffer msg_buffer; + msg_buffer.create(); + msg_buffer.destroy(); +} - f = 0; - r = toku_fifo_create(&f); - assert(r == 0); assert(f != 0); +static char *buildkey(size_t len) { + char *XMALLOC_N(len, k); + memset(k, 0, len); + return k; +} - toku_fifo_free(&f); - assert(f == 0); +static char *buildval(size_t len) { + char *XMALLOC_N(len, v); + memset(v, ~len, len); + return v; } static void -test_fifo_enq (int n) { - int r; - FIFO f; +test_enqueue(int n) { MSN startmsn = ZERO_MSN; - f = 0; - r = toku_fifo_create(&f); - assert(r == 0); assert(f != 0); - - char *thekey = 0; int thekeylen; - char *theval = 0; int thevallen; - - // this was a function but icc cant handle it -#define buildkey(len) { \ - thekeylen = len+1; \ - XREALLOC_N(thekeylen, thekey); \ - memset(thekey, len, thekeylen); \ - } - -#define buildval(len) { \ - thevallen = len+2; \ - XREALLOC_N(thevallen, theval); \ - memset(theval, ~len, thevallen); \ - } + message_buffer msg_buffer; + msg_buffer.create(); for (int i=0; i<n; i++) { - buildkey(i); - buildval(i); + int thekeylen = i + 1; + int thevallen = i + 2; + char *thekey = buildkey(thekeylen); + char *theval = buildval(thevallen); XIDS xids; - if (i==0) - xids = xids_get_root_xids(); - else { - r = xids_create_child(xids_get_root_xids(), &xids, (TXNID)i); - assert(r==0); + if (i == 0) { + xids = toku_xids_get_root_xids(); + } else { + int r = toku_xids_create_child(toku_xids_get_root_xids(), &xids, (TXNID)i); + assert_zero(r); } MSN msn = next_dummymsn(); if (startmsn.msn == ZERO_MSN.msn) startmsn = msn; enum ft_msg_type type = (enum ft_msg_type) i; - r = toku_fifo_enq(f, thekey, thekeylen, theval, thevallen, type, msn, xids, true, NULL); assert(r == 0); - xids_destroy(&xids); + DBT k, v; + ft_msg msg(toku_fill_dbt(&k, thekey, thekeylen), toku_fill_dbt(&v, theval, thevallen), type, msn, xids); + msg_buffer.enqueue(msg, true, nullptr); + toku_xids_destroy(&xids); + toku_free(thekey); + toku_free(theval); } - int i = 0; - FIFO_ITERATE(f, key, keylen, val, vallen, type, msn, xids, UU(is_fresh), { - if (verbose) printf("checkit %d %d %" PRIu64 "\n", i, type, msn.msn); - assert(msn.msn == startmsn.msn + i); - buildkey(i); - buildval(i); - assert((int) keylen == thekeylen); assert(memcmp(key, thekey, keylen) == 0); - assert((int) vallen == thevallen); assert(memcmp(val, theval, vallen) == 0); - assert(i % 256 == (int)type); - assert((TXNID)i==xids_get_innermost_xid(xids)); - i += 1; - }); - assert(i == n); - - if (thekey) toku_free(thekey); - if (theval) toku_free(theval); - - toku_fifo_free(&f); - assert(f == 0); + struct checkit_fn { + MSN startmsn; + int verbose; + int i; + checkit_fn(MSN smsn, bool v) + : startmsn(smsn), verbose(v), i(0) { + } + int operator()(const ft_msg &msg, bool UU(is_fresh)) { + int thekeylen = i + 1; + int thevallen = i + 2; + char *thekey = buildkey(thekeylen); + char *theval = buildval(thevallen); + + MSN msn = msg.msn(); + enum ft_msg_type type = msg.type(); + if (verbose) printf("checkit %d %d %" PRIu64 "\n", i, type, msn.msn); + assert(msn.msn == startmsn.msn + i); + assert((int) msg.kdbt()->size == thekeylen); assert(memcmp(msg.kdbt()->data, thekey, msg.kdbt()->size) == 0); + assert((int) msg.vdbt()->size == thevallen); assert(memcmp(msg.vdbt()->data, theval, msg.vdbt()->size) == 0); + assert(i % 256 == (int)type); + assert((TXNID)i == toku_xids_get_innermost_xid(msg.xids())); + i += 1; + toku_free(thekey); + toku_free(theval); + return 0; + } + } checkit(startmsn, verbose); + msg_buffer.iterate(checkit); + assert(checkit.i == n); + + msg_buffer.destroy(); } int test_main(int argc, const char *argv[]) { default_parse_args(argc, argv); initialize_dummymsn(); - test_fifo_create(); - test_fifo_enq(4); - test_fifo_enq(512); + test_create(); + test_enqueue(4); + test_enqueue(512); return 0; } diff --git a/storage/tokudb/ft-index/ft/tests/ft-bfe-query.cc b/storage/tokudb/ft-index/ft/tests/ft-bfe-query.cc index 8759732a76e..d91ae001884 100644 --- a/storage/tokudb/ft-index/ft/tests/ft-bfe-query.cc +++ b/storage/tokudb/ft-index/ft/tests/ft-bfe-query.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -103,7 +103,6 @@ int64_key_cmp (DB *db UU(), const DBT *a, const DBT *b) { static void test_prefetch_read(int fd, FT_HANDLE UU(ft), FT ft_h) { int r; - ft_h->compare_fun = int64_key_cmp; FT_CURSOR XMALLOC(cursor); FTNODE dn = NULL; PAIR_ATTR attr; @@ -115,12 +114,12 @@ test_prefetch_read(int fd, FT_HANDLE UU(ft), FT ft_h) { cursor->right_is_pos_infty = true; cursor->disable_prefetching = false; - struct ftnode_fetch_extra bfe; + ftnode_fetch_extra bfe; // quick test to see that we have the right behavior when we set // disable_prefetching to true cursor->disable_prefetching = true; - fill_bfe_for_prefetch(&bfe, ft_h, cursor); + bfe.create_for_prefetch( ft_h, cursor); FTNODE_DISK_DATA ndd = NULL; r = toku_deserialize_ftnode_from(fd, make_blocknum(20), 0/*pass zero for hash*/, &dn, &ndd, &bfe); assert(r==0); @@ -132,14 +131,14 @@ test_prefetch_read(int fd, FT_HANDLE UU(ft), FT ft_h) { assert(BP_STATE(dn,0) == PT_ON_DISK); assert(BP_STATE(dn,1) == PT_ON_DISK); assert(BP_STATE(dn,2) == PT_ON_DISK); - destroy_bfe_for_prefetch(&bfe); + bfe.destroy(); toku_ftnode_free(&dn); toku_free(ndd); // now enable prefetching again cursor->disable_prefetching = false; - fill_bfe_for_prefetch(&bfe, ft_h, cursor); + bfe.create_for_prefetch( ft_h, cursor); r = toku_deserialize_ftnode_from(fd, make_blocknum(20), 0/*pass zero for hash*/, &dn, &ndd, &bfe); assert(r==0); assert(dn->n_children == 3); @@ -154,14 +153,14 @@ test_prefetch_read(int fd, FT_HANDLE UU(ft), FT ft_h) { assert(BP_STATE(dn,0) == PT_AVAIL); assert(BP_STATE(dn,1) == PT_AVAIL); assert(BP_STATE(dn,2) == PT_AVAIL); - destroy_bfe_for_prefetch(&bfe); + bfe.destroy(); toku_ftnode_free(&dn); toku_free(ndd); uint64_t left_key = 150; toku_fill_dbt(&cursor->range_lock_left_key, &left_key, sizeof(uint64_t)); cursor->left_is_neg_infty = false; - fill_bfe_for_prefetch(&bfe, ft_h, cursor); + bfe.create_for_prefetch( ft_h, cursor); r = toku_deserialize_ftnode_from(fd, make_blocknum(20), 0/*pass zero for hash*/, &dn, &ndd, &bfe); assert(r==0); assert(dn->n_children == 3); @@ -176,14 +175,14 @@ test_prefetch_read(int fd, FT_HANDLE UU(ft), FT ft_h) { assert(BP_STATE(dn,0) == PT_ON_DISK); assert(BP_STATE(dn,1) == PT_AVAIL); assert(BP_STATE(dn,2) == PT_AVAIL); - destroy_bfe_for_prefetch(&bfe); + bfe.destroy(); toku_ftnode_free(&dn); toku_free(ndd); uint64_t right_key = 151; toku_fill_dbt(&cursor->range_lock_right_key, &right_key, sizeof(uint64_t)); cursor->right_is_pos_infty = false; - fill_bfe_for_prefetch(&bfe, ft_h, cursor); + bfe.create_for_prefetch( ft_h, cursor); r = toku_deserialize_ftnode_from(fd, make_blocknum(20), 0/*pass zero for hash*/, &dn, &ndd, &bfe); assert(r==0); assert(dn->n_children == 3); @@ -198,13 +197,13 @@ test_prefetch_read(int fd, FT_HANDLE UU(ft), FT ft_h) { assert(BP_STATE(dn,0) == PT_ON_DISK); assert(BP_STATE(dn,1) == PT_AVAIL); assert(BP_STATE(dn,2) == PT_ON_DISK); - destroy_bfe_for_prefetch(&bfe); + bfe.destroy(); toku_ftnode_free(&dn); toku_free(ndd); left_key = 100000; right_key = 100000; - fill_bfe_for_prefetch(&bfe, ft_h, cursor); + bfe.create_for_prefetch( ft_h, cursor); r = toku_deserialize_ftnode_from(fd, make_blocknum(20), 0/*pass zero for hash*/, &dn, &ndd, &bfe); assert(r==0); assert(dn->n_children == 3); @@ -219,13 +218,13 @@ test_prefetch_read(int fd, FT_HANDLE UU(ft), FT ft_h) { assert(BP_STATE(dn,0) == PT_ON_DISK); assert(BP_STATE(dn,1) == PT_ON_DISK); assert(BP_STATE(dn,2) == PT_AVAIL); - destroy_bfe_for_prefetch(&bfe); + bfe.destroy(); toku_free(ndd); toku_ftnode_free(&dn); left_key = 100; right_key = 100; - fill_bfe_for_prefetch(&bfe, ft_h, cursor); + bfe.create_for_prefetch( ft_h, cursor); r = toku_deserialize_ftnode_from(fd, make_blocknum(20), 0/*pass zero for hash*/, &dn, &ndd, &bfe); assert(r==0); assert(dn->n_children == 3); @@ -240,7 +239,7 @@ test_prefetch_read(int fd, FT_HANDLE UU(ft), FT ft_h) { assert(BP_STATE(dn,0) == PT_AVAIL); assert(BP_STATE(dn,1) == PT_ON_DISK); assert(BP_STATE(dn,2) == PT_ON_DISK); - destroy_bfe_for_prefetch(&bfe); + bfe.destroy(); toku_ftnode_free(&dn); toku_free(ndd); @@ -250,7 +249,6 @@ test_prefetch_read(int fd, FT_HANDLE UU(ft), FT ft_h) { static void test_subset_read(int fd, FT_HANDLE UU(ft), FT ft_h) { int r; - ft_h->compare_fun = int64_key_cmp; FT_CURSOR XMALLOC(cursor); FTNODE dn = NULL; FTNODE_DISK_DATA ndd = NULL; @@ -262,15 +260,14 @@ test_subset_read(int fd, FT_HANDLE UU(ft), FT ft_h) { cursor->left_is_neg_infty = true; cursor->right_is_pos_infty = true; - struct ftnode_fetch_extra bfe; - uint64_t left_key = 150; uint64_t right_key = 151; DBT left, right; toku_fill_dbt(&left, &left_key, sizeof(left_key)); toku_fill_dbt(&right, &right_key, sizeof(right_key)); - fill_bfe_for_subset_read( - &bfe, + + ftnode_fetch_extra bfe; + bfe.create_for_subset_read( ft_h, NULL, &left, @@ -372,7 +369,7 @@ test_prefetching(void) { // source_ft.fd=fd; sn.max_msn_applied_to_node_on_disk.msn = 0; sn.flags = 0x11223344; - sn.thisnodename.b = 20; + sn.blocknum.b = 20; sn.layout_version = FT_LAYOUT_VERSION; sn.layout_version_original = FT_LAYOUT_VERSION; sn.height = 1; @@ -384,10 +381,10 @@ test_prefetching(void) { uint64_t key2 = 200; MALLOC_N(sn.n_children, sn.bp); - MALLOC_N(sn.n_children-1, sn.childkeys); - toku_memdup_dbt(&sn.childkeys[0], &key1, sizeof(key1)); - toku_memdup_dbt(&sn.childkeys[1], &key2, sizeof(key2)); - sn.totalchildkeylens = sizeof(key1) + sizeof(key2); + DBT pivotkeys[2]; + toku_fill_dbt(&pivotkeys[0], &key1, sizeof(key1)); + toku_fill_dbt(&pivotkeys[1], &key2, sizeof(key2)); + sn.pivotkeys.create_from_dbts(pivotkeys, 2); BP_BLOCKNUM(&sn, 0).b = 30; BP_BLOCKNUM(&sn, 1).b = 35; BP_BLOCKNUM(&sn, 2).b = 40; @@ -398,19 +395,19 @@ test_prefetching(void) { set_BNC(&sn, 1, toku_create_empty_nl()); set_BNC(&sn, 2, toku_create_empty_nl()); //Create XIDS - XIDS xids_0 = xids_get_root_xids(); + XIDS xids_0 = toku_xids_get_root_xids(); XIDS xids_123; XIDS xids_234; - r = xids_create_child(xids_0, &xids_123, (TXNID)123); + r = toku_xids_create_child(xids_0, &xids_123, (TXNID)123); CKERR(r); - r = xids_create_child(xids_123, &xids_234, (TXNID)234); + r = toku_xids_create_child(xids_123, &xids_234, (TXNID)234); CKERR(r); // data in the buffers does not matter in this test //Cleanup: - xids_destroy(&xids_0); - xids_destroy(&xids_123); - xids_destroy(&xids_234); + toku_xids_destroy(&xids_0); + toku_xids_destroy(&xids_123); + toku_xids_destroy(&xids_234); FT_HANDLE XMALLOC(ft); FT XCALLOC(ft_h); @@ -422,24 +419,25 @@ test_prefetching(void) { 128*1024, TOKU_DEFAULT_COMPRESSION_METHOD, 16); + ft_h->cmp.create(int64_key_cmp, nullptr); ft->ft = ft_h; - toku_blocktable_create_new(&ft_h->blocktable); + ft_h->blocktable.create(); { int r_truncate = ftruncate(fd, 0); CKERR(r_truncate); } //Want to use block #20 BLOCKNUM b = make_blocknum(0); while (b.b < 20) { - toku_allocate_blocknum(ft_h->blocktable, &b, ft_h); + ft_h->blocktable.allocate_blocknum(&b, ft_h); } assert(b.b == 20); { DISKOFF offset; DISKOFF size; - toku_blocknum_realloc_on_disk(ft_h->blocktable, b, 100, &offset, ft_h, fd, false); - assert(offset==BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE); + ft_h->blocktable.realloc_on_disk(b, 100, &offset, ft_h, fd, false, 0); + assert(offset==(DISKOFF)block_allocator::BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE); - toku_translate_blocknum_to_offset_size(ft_h->blocktable, b, &offset, &size); - assert(offset == BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE); + ft_h->blocktable.translate_blocknum_to_offset_size(b, &offset, &size); + assert(offset == (DISKOFF)block_allocator::BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE); assert(size == 100); } FTNODE_DISK_DATA ndd = NULL; @@ -449,16 +447,11 @@ test_prefetching(void) { test_prefetch_read(fd, ft, ft_h); test_subset_read(fd, ft, ft_h); - toku_free(sn.childkeys[0].data); - toku_free(sn.childkeys[1].data); - destroy_nonleaf_childinfo(BNC(&sn, 0)); - destroy_nonleaf_childinfo(BNC(&sn, 1)); - destroy_nonleaf_childinfo(BNC(&sn, 2)); - toku_free(sn.bp); - toku_free(sn.childkeys); + toku_destroy_ftnode_internals(&sn); - toku_block_free(ft_h->blocktable, BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE); - toku_blocktable_destroy(&ft_h->blocktable); + ft_h->blocktable.block_free(block_allocator::BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE); + ft_h->blocktable.destroy(); + ft_h->cmp.destroy(); toku_free(ft_h->h); toku_free(ft_h); toku_free(ft); diff --git a/storage/tokudb/ft-index/ft/tests/ft-clock-test.cc b/storage/tokudb/ft-index/ft/tests/ft-clock-test.cc index b637b9d3986..50bb6d67ca5 100644 --- a/storage/tokudb/ft-index/ft/tests/ft-clock-test.cc +++ b/storage/tokudb/ft-index/ft/tests/ft-clock-test.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -90,7 +90,7 @@ PATENT RIGHTS GRANT: #include "test.h" - +#include "ft/cursor.h" enum ftnode_verify_type { read_all=1, @@ -146,9 +146,8 @@ le_malloc(bn_data* bn, uint32_t idx, const char *key, const char *val) static void test1(int fd, FT ft_h, FTNODE *dn) { int r; - struct ftnode_fetch_extra bfe_all; - ft_h->compare_fun = string_key_cmp; - fill_bfe_for_full_read(&bfe_all, ft_h); + ftnode_fetch_extra bfe_all; + bfe_all.create_for_full_read(ft_h); FTNODE_DISK_DATA ndd = NULL; r = toku_deserialize_ftnode_from(fd, make_blocknum(20), 0/*pass zero for hash*/, dn, &ndd, &bfe_all); bool is_leaf = ((*dn)->height == 0); @@ -218,19 +217,17 @@ static int search_cmp(const struct ft_search& UU(so), const DBT* UU(key)) { static void test2(int fd, FT ft_h, FTNODE *dn) { - struct ftnode_fetch_extra bfe_subset; DBT left, right; DB dummy_db; memset(&dummy_db, 0, sizeof(dummy_db)); memset(&left, 0, sizeof(left)); memset(&right, 0, sizeof(right)); - ft_search_t search_t; + ft_search search; - ft_h->compare_fun = string_key_cmp; - fill_bfe_for_subset_read( - &bfe_subset, + ftnode_fetch_extra bfe_subset; + bfe_subset.create_for_subset_read( ft_h, - ft_search_init(&search_t, search_cmp, FT_SEARCH_LEFT, nullptr, nullptr, nullptr), + ft_search_init(&search, search_cmp, FT_SEARCH_LEFT, nullptr, nullptr, nullptr), &left, &right, true, @@ -238,6 +235,7 @@ test2(int fd, FT ft_h, FTNODE *dn) { false, false ); + FTNODE_DISK_DATA ndd = NULL; int r = toku_deserialize_ftnode_from(fd, make_blocknum(20), 0/*pass zero for hash*/, dn, &ndd, &bfe_subset); assert(r==0); @@ -272,18 +270,15 @@ test2(int fd, FT ft_h, FTNODE *dn) { static void test3_leaf(int fd, FT ft_h, FTNODE *dn) { - struct ftnode_fetch_extra bfe_min; DBT left, right; DB dummy_db; memset(&dummy_db, 0, sizeof(dummy_db)); memset(&left, 0, sizeof(left)); memset(&right, 0, sizeof(right)); - ft_h->compare_fun = string_key_cmp; - fill_bfe_for_min_read( - &bfe_min, - ft_h - ); + ftnode_fetch_extra bfe_min; + bfe_min.create_for_min_read(ft_h); + FTNODE_DISK_DATA ndd = NULL; int r = toku_deserialize_ftnode_from(fd, make_blocknum(20), 0/*pass zero for hash*/, dn, &ndd, &bfe_min); assert(r==0); @@ -309,20 +304,17 @@ test_serialize_nonleaf(void) { // source_ft.fd=fd; sn.max_msn_applied_to_node_on_disk.msn = 0; - char *hello_string; sn.flags = 0x11223344; - sn.thisnodename.b = 20; + sn.blocknum.b = 20; sn.layout_version = FT_LAYOUT_VERSION; sn.layout_version_original = FT_LAYOUT_VERSION; sn.height = 1; sn.n_children = 2; sn.dirty = 1; sn.oldest_referenced_xid_known = TXNID_NONE; - hello_string = toku_strdup("hello"); MALLOC_N(2, sn.bp); - MALLOC_N(1, sn.childkeys); - toku_fill_dbt(&sn.childkeys[0], hello_string, 6); - sn.totalchildkeylens = 6; + DBT pivotkey; + sn.pivotkeys.create_from_dbts(toku_fill_dbt(&pivotkey, "hello", 6), 1); BP_BLOCKNUM(&sn, 0).b = 30; BP_BLOCKNUM(&sn, 1).b = 35; BP_STATE(&sn,0) = PT_AVAIL; @@ -330,21 +322,26 @@ test_serialize_nonleaf(void) { set_BNC(&sn, 0, toku_create_empty_nl()); set_BNC(&sn, 1, toku_create_empty_nl()); //Create XIDS - XIDS xids_0 = xids_get_root_xids(); + XIDS xids_0 = toku_xids_get_root_xids(); XIDS xids_123; XIDS xids_234; - r = xids_create_child(xids_0, &xids_123, (TXNID)123); + r = toku_xids_create_child(xids_0, &xids_123, (TXNID)123); CKERR(r); - r = xids_create_child(xids_123, &xids_234, (TXNID)234); + r = toku_xids_create_child(xids_123, &xids_234, (TXNID)234); CKERR(r); - toku_bnc_insert_msg(BNC(&sn, 0), "a", 2, "aval", 5, FT_NONE, next_dummymsn(), xids_0, true, NULL, string_key_cmp); - toku_bnc_insert_msg(BNC(&sn, 0), "b", 2, "bval", 5, FT_NONE, next_dummymsn(), xids_123, false, NULL, string_key_cmp); - toku_bnc_insert_msg(BNC(&sn, 1), "x", 2, "xval", 5, FT_NONE, next_dummymsn(), xids_234, true, NULL, string_key_cmp); + toku::comparator cmp; + cmp.create(string_key_cmp, nullptr); + + toku_bnc_insert_msg(BNC(&sn, 0), "a", 2, "aval", 5, FT_NONE, next_dummymsn(), xids_0, true, cmp); + toku_bnc_insert_msg(BNC(&sn, 0), "b", 2, "bval", 5, FT_NONE, next_dummymsn(), xids_123, false, cmp); + toku_bnc_insert_msg(BNC(&sn, 1), "x", 2, "xval", 5, FT_NONE, next_dummymsn(), xids_234, true, cmp); + //Cleanup: - xids_destroy(&xids_0); - xids_destroy(&xids_123); - xids_destroy(&xids_234); + toku_xids_destroy(&xids_0); + toku_xids_destroy(&xids_123); + toku_xids_destroy(&xids_234); + cmp.destroy(); FT_HANDLE XMALLOC(ft); FT XCALLOC(ft_h); @@ -356,25 +353,26 @@ test_serialize_nonleaf(void) { 128*1024, TOKU_DEFAULT_COMPRESSION_METHOD, 16); + ft_h->cmp.create(string_key_cmp, nullptr); ft->ft = ft_h; - toku_blocktable_create_new(&ft_h->blocktable); + ft_h->blocktable.create(); { int r_truncate = ftruncate(fd, 0); CKERR(r_truncate); } //Want to use block #20 BLOCKNUM b = make_blocknum(0); while (b.b < 20) { - toku_allocate_blocknum(ft_h->blocktable, &b, ft_h); + ft_h->blocktable.allocate_blocknum(&b, ft_h); } assert(b.b == 20); { DISKOFF offset; DISKOFF size; - toku_blocknum_realloc_on_disk(ft_h->blocktable, b, 100, &offset, ft_h, fd, false); - assert(offset==BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE); + ft_h->blocktable.realloc_on_disk(b, 100, &offset, ft_h, fd, false, 0); + assert(offset==(DISKOFF)block_allocator::BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE); - toku_translate_blocknum_to_offset_size(ft_h->blocktable, b, &offset, &size); - assert(offset == BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE); + ft_h->blocktable.translate_blocknum_to_offset_size(b, &offset, &size); + assert(offset == (DISKOFF)block_allocator::BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE); assert(size == 100); } FTNODE_DISK_DATA ndd = NULL; @@ -384,16 +382,13 @@ test_serialize_nonleaf(void) { test1(fd, ft_h, &dn); test2(fd, ft_h, &dn); - toku_free(hello_string); - destroy_nonleaf_childinfo(BNC(&sn, 0)); - destroy_nonleaf_childinfo(BNC(&sn, 1)); - toku_free(sn.bp); - toku_free(sn.childkeys); + toku_destroy_ftnode_internals(&sn); toku_free(ndd); - toku_block_free(ft_h->blocktable, BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE); - toku_blocktable_destroy(&ft_h->blocktable); + ft_h->blocktable.block_free(block_allocator::BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE); + ft_h->blocktable.destroy(); toku_free(ft_h->h); + ft_h->cmp.destroy(); toku_free(ft_h); toku_free(ft); @@ -411,7 +406,7 @@ test_serialize_leaf(void) { sn.max_msn_applied_to_node_on_disk.msn = 0; sn.flags = 0x11223344; - sn.thisnodename.b = 20; + sn.blocknum.b = 20; sn.layout_version = FT_LAYOUT_VERSION; sn.layout_version_original = FT_LAYOUT_VERSION; sn.height = 0; @@ -419,9 +414,8 @@ test_serialize_leaf(void) { sn.dirty = 1; sn.oldest_referenced_xid_known = TXNID_NONE; MALLOC_N(sn.n_children, sn.bp); - MALLOC_N(1, sn.childkeys); - toku_memdup_dbt(&sn.childkeys[0], "b", 2); - sn.totalchildkeylens = 2; + DBT pivotkey; + sn.pivotkeys.create_from_dbts(toku_fill_dbt(&pivotkey, "b", 2), 1); BP_STATE(&sn,0) = PT_AVAIL; BP_STATE(&sn,1) = PT_AVAIL; set_BLB(&sn, 0, toku_create_empty_bn()); @@ -442,23 +436,23 @@ test_serialize_leaf(void) { 16); ft->ft = ft_h; - toku_blocktable_create_new(&ft_h->blocktable); + ft_h->blocktable.create(); { int r_truncate = ftruncate(fd, 0); CKERR(r_truncate); } //Want to use block #20 BLOCKNUM b = make_blocknum(0); while (b.b < 20) { - toku_allocate_blocknum(ft_h->blocktable, &b, ft_h); + ft_h->blocktable.allocate_blocknum(&b, ft_h); } assert(b.b == 20); { DISKOFF offset; DISKOFF size; - toku_blocknum_realloc_on_disk(ft_h->blocktable, b, 100, &offset, ft_h, fd, false); - assert(offset==BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE); + ft_h->blocktable.realloc_on_disk(b, 100, &offset, ft_h, fd, false, 0); + assert(offset==(DISKOFF)block_allocator::BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE); - toku_translate_blocknum_to_offset_size(ft_h->blocktable, b, &offset, &size); - assert(offset == BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE); + ft_h->blocktable.translate_blocknum_to_offset_size(b, &offset, &size); + assert(offset == (DISKOFF)block_allocator::BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE); assert(size == 100); } FTNODE_DISK_DATA ndd = NULL; @@ -468,17 +462,10 @@ test_serialize_leaf(void) { test1(fd, ft_h, &dn); test3_leaf(fd, ft_h,&dn); - for (int i = 0; i < sn.n_children-1; ++i) { - toku_free(sn.childkeys[i].data); - } - for (int i = 0; i < sn.n_children; i++) { - destroy_basement_node(BLB(&sn, i)); - } - toku_free(sn.bp); - toku_free(sn.childkeys); + toku_destroy_ftnode_internals(&sn); - toku_block_free(ft_h->blocktable, BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE); - toku_blocktable_destroy(&ft_h->blocktable); + ft_h->blocktable.block_free(block_allocator::BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE); + ft_h->blocktable.destroy(); toku_free(ft_h->h); toku_free(ft_h); toku_free(ft); diff --git a/storage/tokudb/ft-index/ft/tests/ft-serialize-benchmark.cc b/storage/tokudb/ft-index/ft/tests/ft-serialize-benchmark.cc index 285ee64e8a3..82b96742ceb 100644 --- a/storage/tokudb/ft-index/ft/tests/ft-serialize-benchmark.cc +++ b/storage/tokudb/ft-index/ft/tests/ft-serialize-benchmark.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -144,7 +144,7 @@ test_serialize_leaf(int valsize, int nelts, double entropy, int ser_runs, int de sn->max_msn_applied_to_node_on_disk.msn = 0; sn->flags = 0x11223344; - sn->thisnodename.b = 20; + sn->blocknum.b = 20; sn->layout_version = FT_LAYOUT_VERSION; sn->layout_version_original = FT_LAYOUT_VERSION; sn->height = 0; @@ -152,8 +152,7 @@ test_serialize_leaf(int valsize, int nelts, double entropy, int ser_runs, int de sn->dirty = 1; sn->oldest_referenced_xid_known = TXNID_NONE; MALLOC_N(sn->n_children, sn->bp); - MALLOC_N(sn->n_children-1, sn->childkeys); - sn->totalchildkeylens = 0; + sn->pivotkeys.create_empty(); for (int i = 0; i < sn->n_children; ++i) { BP_STATE(sn,i) = PT_AVAIL; set_BLB(sn, i, toku_create_empty_bn()); @@ -181,8 +180,8 @@ test_serialize_leaf(int valsize, int nelts, double entropy, int ser_runs, int de ); } if (ck < 7) { - toku_memdup_dbt(&sn->childkeys[ck], &k, sizeof k); - sn->totalchildkeylens += sizeof k; + DBT pivotkey; + sn->pivotkeys.insert_at(toku_fill_dbt(&pivotkey, &k, sizeof(k)), ck); } } @@ -196,26 +195,26 @@ test_serialize_leaf(int valsize, int nelts, double entropy, int ser_runs, int de 128*1024, TOKU_DEFAULT_COMPRESSION_METHOD, 16); + ft_h->cmp.create(long_key_cmp, nullptr); ft->ft = ft_h; - ft_h->compare_fun = long_key_cmp; - toku_blocktable_create_new(&ft_h->blocktable); + ft_h->blocktable.create(); { int r_truncate = ftruncate(fd, 0); CKERR(r_truncate); } //Want to use block #20 BLOCKNUM b = make_blocknum(0); while (b.b < 20) { - toku_allocate_blocknum(ft_h->blocktable, &b, ft_h); + ft_h->blocktable.allocate_blocknum(&b, ft_h); } assert(b.b == 20); { DISKOFF offset; DISKOFF size; - toku_blocknum_realloc_on_disk(ft_h->blocktable, b, 100, &offset, ft_h, fd, false); - assert(offset==BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE); + ft_h->blocktable.realloc_on_disk(b, 100, &offset, ft_h, fd, false, 0); + assert(offset==(DISKOFF)block_allocator::BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE); - toku_translate_blocknum_to_offset_size(ft_h->blocktable, b, &offset, &size); - assert(offset == BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE); + ft_h->blocktable.translate_blocknum_to_offset_size(b, &offset, &size); + assert(offset == (DISKOFF)block_allocator::BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE); assert(size == 100); } @@ -248,9 +247,9 @@ test_serialize_leaf(int valsize, int nelts, double entropy, int ser_runs, int de total_start.tv_sec = total_start.tv_usec = 0; total_end.tv_sec = total_end.tv_usec = 0; - struct ftnode_fetch_extra bfe; + ftnode_fetch_extra bfe; for (int i = 0; i < deser_runs; i++) { - fill_bfe_for_full_read(&bfe, ft_h); + bfe.create_for_full_read(ft_h); gettimeofday(&t[0], NULL); FTNODE_DISK_DATA ndd2 = NULL; r = toku_deserialize_ftnode_from(fd, make_blocknum(20), 0/*pass zero for hash*/, &dn, &ndd2, &bfe); @@ -278,8 +277,9 @@ test_serialize_leaf(int valsize, int nelts, double entropy, int ser_runs, int de toku_ftnode_free(&sn); - toku_block_free(ft_h->blocktable, BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE); - toku_blocktable_destroy(&ft_h->blocktable); + ft_h->blocktable.block_free(block_allocator::BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE); + ft_h->blocktable.destroy(); + ft_h->cmp.destroy(); toku_free(ft_h->h); toku_free(ft_h); toku_free(ft); @@ -299,7 +299,7 @@ test_serialize_nonleaf(int valsize, int nelts, double entropy, int ser_runs, int // source_ft.fd=fd; sn.max_msn_applied_to_node_on_disk.msn = 0; sn.flags = 0x11223344; - sn.thisnodename.b = 20; + sn.blocknum.b = 20; sn.layout_version = FT_LAYOUT_VERSION; sn.layout_version_original = FT_LAYOUT_VERSION; sn.height = 1; @@ -307,18 +307,19 @@ test_serialize_nonleaf(int valsize, int nelts, double entropy, int ser_runs, int sn.dirty = 1; sn.oldest_referenced_xid_known = TXNID_NONE; MALLOC_N(sn.n_children, sn.bp); - MALLOC_N(sn.n_children-1, sn.childkeys); - sn.totalchildkeylens = 0; + sn.pivotkeys.create_empty(); for (int i = 0; i < sn.n_children; ++i) { BP_BLOCKNUM(&sn, i).b = 30 + (i*5); BP_STATE(&sn,i) = PT_AVAIL; set_BNC(&sn, i, toku_create_empty_nl()); } //Create XIDS - XIDS xids_0 = xids_get_root_xids(); + XIDS xids_0 = toku_xids_get_root_xids(); XIDS xids_123; - r = xids_create_child(xids_0, &xids_123, (TXNID)123); + r = toku_xids_create_child(xids_0, &xids_123, (TXNID)123); CKERR(r); + toku::comparator cmp; + cmp.create(long_key_cmp, nullptr); int nperchild = nelts / 8; for (int ck = 0; ck < sn.n_children; ++ck) { long k; @@ -334,17 +335,18 @@ test_serialize_nonleaf(int valsize, int nelts, double entropy, int ser_runs, int } memset(&buf[c], 0, valsize - c); - toku_bnc_insert_msg(bnc, &k, sizeof k, buf, valsize, FT_NONE, next_dummymsn(), xids_123, true, NULL, long_key_cmp); + toku_bnc_insert_msg(bnc, &k, sizeof k, buf, valsize, FT_NONE, next_dummymsn(), xids_123, true, cmp); } if (ck < 7) { - toku_memdup_dbt(&sn.childkeys[ck], &k, sizeof k); - sn.totalchildkeylens += sizeof k; + DBT pivotkey; + sn.pivotkeys.insert_at(toku_fill_dbt(&pivotkey, &k, sizeof(k)), ck); } } //Cleanup: - xids_destroy(&xids_0); - xids_destroy(&xids_123); + toku_xids_destroy(&xids_0); + toku_xids_destroy(&xids_123); + cmp.destroy(); FT_HANDLE XMALLOC(ft); FT XCALLOC(ft_h); @@ -356,26 +358,26 @@ test_serialize_nonleaf(int valsize, int nelts, double entropy, int ser_runs, int 128*1024, TOKU_DEFAULT_COMPRESSION_METHOD, 16); + ft_h->cmp.create(long_key_cmp, nullptr); ft->ft = ft_h; - ft_h->compare_fun = long_key_cmp; - toku_blocktable_create_new(&ft_h->blocktable); + ft_h->blocktable.create(); { int r_truncate = ftruncate(fd, 0); CKERR(r_truncate); } //Want to use block #20 BLOCKNUM b = make_blocknum(0); while (b.b < 20) { - toku_allocate_blocknum(ft_h->blocktable, &b, ft_h); + ft_h->blocktable.allocate_blocknum(&b, ft_h); } assert(b.b == 20); { DISKOFF offset; DISKOFF size; - toku_blocknum_realloc_on_disk(ft_h->blocktable, b, 100, &offset, ft_h, fd, false); - assert(offset==BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE); + ft_h->blocktable.realloc_on_disk(b, 100, &offset, ft_h, fd, false, 0); + assert(offset==(DISKOFF)block_allocator::BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE); - toku_translate_blocknum_to_offset_size(ft_h->blocktable, b, &offset, &size); - assert(offset == BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE); + ft_h->blocktable.translate_blocknum_to_offset_size(b, &offset, &size); + assert(offset == (DISKOFF)block_allocator::BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE); assert(size == 100); } @@ -390,8 +392,8 @@ test_serialize_nonleaf(int valsize, int nelts, double entropy, int ser_runs, int dt *= 1000; printf("serialize nonleaf(ms): %0.05lf (IGNORED RUNS=%d)\n", dt, ser_runs); - struct ftnode_fetch_extra bfe; - fill_bfe_for_full_read(&bfe, ft_h); + ftnode_fetch_extra bfe; + bfe.create_for_full_read(ft_h); gettimeofday(&t[0], NULL); FTNODE_DISK_DATA ndd2 = NULL; r = toku_deserialize_ftnode_from(fd, make_blocknum(20), 0/*pass zero for hash*/, &dn, &ndd2, &bfe); @@ -408,19 +410,12 @@ test_serialize_nonleaf(int valsize, int nelts, double entropy, int ser_runs, int ); toku_ftnode_free(&dn); + toku_destroy_ftnode_internals(&sn); - for (int i = 0; i < sn.n_children-1; ++i) { - toku_free(sn.childkeys[i].data); - } - for (int i = 0; i < sn.n_children; ++i) { - destroy_nonleaf_childinfo(BNC(&sn, i)); - } - toku_free(sn.bp); - toku_free(sn.childkeys); - - toku_block_free(ft_h->blocktable, BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE); - toku_blocktable_destroy(&ft_h->blocktable); + ft_h->blocktable.block_free(block_allocator::BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE); + ft_h->blocktable.destroy(); toku_free(ft_h->h); + ft_h->cmp.destroy(); toku_free(ft_h); toku_free(ft); toku_free(ndd); diff --git a/storage/tokudb/ft-index/ft/tests/ft-serialize-sub-block-test.cc b/storage/tokudb/ft-index/ft/tests/ft-serialize-sub-block-test.cc index 47865bfcce7..f0be59a811c 100644 --- a/storage/tokudb/ft-index/ft/tests/ft-serialize-sub-block-test.cc +++ b/storage/tokudb/ft-index/ft/tests/ft-serialize-sub-block-test.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -112,7 +112,7 @@ static void test_sub_block(int n) { unlink(fname); - toku_cachetable_create(&ct, 0, ZERO_LSN, NULL_LOGGER); + toku_cachetable_create(&ct, 0, ZERO_LSN, nullptr); error = toku_open_ft_handle(fname, true, &ft, nodesize, basementnodesize, compression_method, ct, null_txn, toku_builtin_compare_fun); assert(error == 0); diff --git a/storage/tokudb/ft-index/ft/tests/ft-serialize-test.cc b/storage/tokudb/ft-index/ft/tests/ft-serialize-test.cc index 25a6a0227bc..266cf50f8ce 100644 --- a/storage/tokudb/ft-index/ft/tests/ft-serialize-test.cc +++ b/storage/tokudb/ft-index/ft/tests/ft-serialize-test.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -164,16 +164,15 @@ string_key_cmp(DB *UU(e), const DBT *a, const DBT *b) static void setup_dn(enum ftnode_verify_type bft, int fd, FT ft_h, FTNODE *dn, FTNODE_DISK_DATA* ndd) { int r; - ft_h->compare_fun = string_key_cmp; if (bft == read_all) { - struct ftnode_fetch_extra bfe; - fill_bfe_for_full_read(&bfe, ft_h); + ftnode_fetch_extra bfe; + bfe.create_for_full_read(ft_h); r = toku_deserialize_ftnode_from(fd, make_blocknum(20), 0/*pass zero for hash*/, dn, ndd, &bfe); assert(r==0); } else if (bft == read_compressed || bft == read_none) { - struct ftnode_fetch_extra bfe; - fill_bfe_for_min_read(&bfe, ft_h); + ftnode_fetch_extra bfe; + bfe.create_for_min_read(ft_h); r = toku_deserialize_ftnode_from(fd, make_blocknum(20), 0/*pass zero for hash*/, dn, ndd, &bfe); assert(r==0); // assert all bp's are compressed or on disk. @@ -200,7 +199,7 @@ setup_dn(enum ftnode_verify_type bft, int fd, FT ft_h, FTNODE *dn, FTNODE_DISK_D // that it is available // then run partial eviction to get it compressed PAIR_ATTR attr; - fill_bfe_for_full_read(&bfe, ft_h); + bfe.create_for_full_read(ft_h); assert(toku_ftnode_pf_req_callback(*dn, &bfe)); r = toku_ftnode_pf_callback(*dn, *ndd, &bfe, fd, &attr); assert(r==0); @@ -222,7 +221,7 @@ setup_dn(enum ftnode_verify_type bft, int fd, FT ft_h, FTNODE *dn, FTNODE_DISK_D } } // now decompress them - fill_bfe_for_full_read(&bfe, ft_h); + bfe.create_for_full_read(ft_h); assert(toku_ftnode_pf_req_callback(*dn, &bfe)); PAIR_ATTR attr; r = toku_ftnode_pf_callback(*dn, *ndd, &bfe, fd, &attr); @@ -271,7 +270,7 @@ test_serialize_leaf_check_msn(enum ftnode_verify_type bft, bool do_clone) { sn.max_msn_applied_to_node_on_disk = PRESERIALIZE_MSN_ON_DISK; sn.flags = 0x11223344; - sn.thisnodename.b = 20; + sn.blocknum.b = 20; sn.layout_version = FT_LAYOUT_VERSION; sn.layout_version_original = FT_LAYOUT_VERSION; sn.height = 0; @@ -279,9 +278,8 @@ test_serialize_leaf_check_msn(enum ftnode_verify_type bft, bool do_clone) { sn.dirty = 1; sn.oldest_referenced_xid_known = TXNID_NONE; MALLOC_N(sn.n_children, sn.bp); - MALLOC_N(1, sn.childkeys); - toku_memdup_dbt(&sn.childkeys[0], "b", 2); - sn.totalchildkeylens = 2; + DBT pivotkey; + sn.pivotkeys.create_from_dbts(toku_fill_dbt(&pivotkey, "b", 2), 1); BP_STATE(&sn,0) = PT_AVAIL; BP_STATE(&sn,1) = PT_AVAIL; set_BLB(&sn, 0, toku_create_empty_bn()); @@ -303,24 +301,24 @@ test_serialize_leaf_check_msn(enum ftnode_verify_type bft, bool do_clone) { TOKU_DEFAULT_COMPRESSION_METHOD, 16); ft->ft = ft_h; - toku_blocktable_create_new(&ft_h->blocktable); + ft_h->blocktable.create(); { int r_truncate = ftruncate(fd, 0); CKERR(r_truncate); } //Want to use block #20 BLOCKNUM b = make_blocknum(0); while (b.b < 20) { - toku_allocate_blocknum(ft_h->blocktable, &b, ft_h); + ft_h->blocktable.allocate_blocknum(&b, ft_h); } assert(b.b == 20); { DISKOFF offset; DISKOFF size; - toku_blocknum_realloc_on_disk(ft_h->blocktable, b, 100, &offset, ft_h, fd, false); - assert(offset==BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE); + ft_h->blocktable.realloc_on_disk(b, 100, &offset, ft_h, fd, false, 0); + assert(offset==(DISKOFF)block_allocator::BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE); - toku_translate_blocknum_to_offset_size(ft_h->blocktable, b, &offset, &size); - assert(offset == BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE); + ft_h->blocktable.translate_blocknum_to_offset_size(b, &offset, &size); + assert(offset == (DISKOFF)block_allocator::BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE); assert(size == 100); } FTNODE_DISK_DATA src_ndd = NULL; @@ -330,7 +328,7 @@ test_serialize_leaf_check_msn(enum ftnode_verify_type bft, bool do_clone) { setup_dn(bft, fd, ft_h, &dn, &dest_ndd); - assert(dn->thisnodename.b==20); + assert(dn->blocknum.b==20); assert(dn->layout_version ==FT_LAYOUT_VERSION); assert(dn->layout_version_original ==FT_LAYOUT_VERSION); @@ -346,7 +344,6 @@ test_serialize_leaf_check_msn(enum ftnode_verify_type bft, bool do_clone) { elts[1].init("b", "bval"); elts[2].init("x", "xval"); const uint32_t npartitions = dn->n_children; - assert(dn->totalchildkeylens==(2*(npartitions-1))); uint32_t last_i = 0; for (uint32_t bn = 0; bn < npartitions; ++bn) { assert(BLB_MAX_MSN_APPLIED(dn, bn).msn == POSTSERIALIZE_MSN_ON_DISK.msn); @@ -363,7 +360,7 @@ test_serialize_leaf_check_msn(enum ftnode_verify_type bft, bool do_clone) { assert(leafentry_memsize(curr_le) == leafentry_memsize(elts[last_i].le)); assert(memcmp(curr_le, elts[last_i].le, leafentry_memsize(curr_le)) == 0); if (bn < npartitions-1) { - assert(strcmp((char*)dn->childkeys[bn].data, elts[last_i].keyp) <= 0); + assert(strcmp((char*)dn->pivotkeys.get_pivot(bn).data, elts[last_i].keyp) <= 0); } // TODO for later, get a key comparison here as well last_i++; @@ -372,19 +369,12 @@ test_serialize_leaf_check_msn(enum ftnode_verify_type bft, bool do_clone) { } assert(last_i == 3); } - toku_ftnode_free(&dn); - for (int i = 0; i < sn.n_children-1; ++i) { - toku_free(sn.childkeys[i].data); - } - for (int i = 0; i < sn.n_children; i++) { - destroy_basement_node(BLB(&sn, i)); - } - toku_free(sn.bp); - toku_free(sn.childkeys); + toku_ftnode_free(&dn); + toku_destroy_ftnode_internals(&sn); - toku_block_free(ft_h->blocktable, BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE); - toku_blocktable_destroy(&ft_h->blocktable); + ft_h->blocktable.block_free(block_allocator::BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE); + ft_h->blocktable.destroy(); toku_free(ft_h->h); toku_free(ft_h); toku_free(ft); @@ -405,7 +395,7 @@ test_serialize_leaf_with_large_pivots(enum ftnode_verify_type bft, bool do_clone sn.max_msn_applied_to_node_on_disk.msn = 0; sn.flags = 0x11223344; - sn.thisnodename.b = 20; + sn.blocknum.b = 20; sn.layout_version = FT_LAYOUT_VERSION; sn.layout_version_original = FT_LAYOUT_VERSION; sn.height = 0; @@ -414,8 +404,7 @@ test_serialize_leaf_with_large_pivots(enum ftnode_verify_type bft, bool do_clone sn.oldest_referenced_xid_known = TXNID_NONE; MALLOC_N(sn.n_children, sn.bp); - MALLOC_N(sn.n_children-1, sn.childkeys); - sn.totalchildkeylens = (sn.n_children-1)*sizeof(int); + sn.pivotkeys.create_empty(); for (int i = 0; i < sn.n_children; ++i) { BP_STATE(&sn,i) = PT_AVAIL; set_BLB(&sn, i, toku_create_empty_bn()); @@ -430,7 +419,8 @@ test_serialize_leaf_with_large_pivots(enum ftnode_verify_type bft, bool do_clone uint32_t keylen; void* curr_key; BLB_DATA(&sn, i)->fetch_key_and_len(0, &keylen, &curr_key); - toku_memdup_dbt(&sn.childkeys[i], curr_key, keylen); + DBT pivotkey; + sn.pivotkeys.insert_at(toku_fill_dbt(&pivotkey, curr_key, keylen), i); } } @@ -445,23 +435,23 @@ test_serialize_leaf_with_large_pivots(enum ftnode_verify_type bft, bool do_clone TOKU_DEFAULT_COMPRESSION_METHOD, 16); ft->ft = ft_h; - toku_blocktable_create_new(&ft_h->blocktable); + ft_h->blocktable.create(); { int r_truncate = ftruncate(fd, 0); CKERR(r_truncate); } //Want to use block #20 BLOCKNUM b = make_blocknum(0); while (b.b < 20) { - toku_allocate_blocknum(ft_h->blocktable, &b, ft_h); + ft_h->blocktable.allocate_blocknum(&b, ft_h); } assert(b.b == 20); { DISKOFF offset; DISKOFF size; - toku_blocknum_realloc_on_disk(ft_h->blocktable, b, 100, &offset, ft_h, fd, false); - assert(offset==BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE); + ft_h->blocktable.realloc_on_disk(b, 100, &offset, ft_h, fd, false, 0); + assert(offset==(DISKOFF)block_allocator::BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE); - toku_translate_blocknum_to_offset_size(ft_h->blocktable, b, &offset, &size); - assert(offset == BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE); + ft_h->blocktable.translate_blocknum_to_offset_size(b, &offset, &size); + assert(offset == (DISKOFF)block_allocator::BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE); assert(size == 100); } FTNODE_DISK_DATA src_ndd = NULL; @@ -471,7 +461,7 @@ test_serialize_leaf_with_large_pivots(enum ftnode_verify_type bft, bool do_clone setup_dn(bft, fd, ft_h, &dn, &dest_ndd); - assert(dn->thisnodename.b==20); + assert(dn->blocknum.b==20); assert(dn->layout_version ==FT_LAYOUT_VERSION); assert(dn->layout_version_original ==FT_LAYOUT_VERSION); @@ -489,7 +479,6 @@ test_serialize_leaf_with_large_pivots(enum ftnode_verify_type bft, bool do_clone } } const uint32_t npartitions = dn->n_children; - assert(dn->totalchildkeylens==(keylens*(npartitions-1))); uint32_t last_i = 0; for (uint32_t bn = 0; bn < npartitions; ++bn) { assert(dest_ndd[bn].start > 0); @@ -506,7 +495,7 @@ test_serialize_leaf_with_large_pivots(enum ftnode_verify_type bft, bool do_clone assert(leafentry_memsize(curr_le) == leafentry_memsize(les[last_i].le)); assert(memcmp(curr_le, les[last_i].le, leafentry_memsize(curr_le)) == 0); if (bn < npartitions-1) { - assert(strcmp((char*)dn->childkeys[bn].data, les[last_i].keyp) <= 0); + assert(strcmp((char*)dn->pivotkeys.get_pivot(bn).data, les[last_i].keyp) <= 0); } // TODO for later, get a key comparison here as well last_i++; @@ -517,17 +506,10 @@ test_serialize_leaf_with_large_pivots(enum ftnode_verify_type bft, bool do_clone } toku_ftnode_free(&dn); - for (int i = 0; i < sn.n_children-1; ++i) { - toku_free(sn.childkeys[i].data); - } - toku_free(sn.childkeys); - for (int i = 0; i < sn.n_children; i++) { - destroy_basement_node(BLB(&sn, i)); - } - toku_free(sn.bp); + toku_destroy_ftnode_internals(&sn); - toku_block_free(ft_h->blocktable, BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE); - toku_blocktable_destroy(&ft_h->blocktable); + ft_h->blocktable.block_free(block_allocator::BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE); + ft_h->blocktable.destroy(); toku_free(ft_h->h); toku_free(ft_h); toku_free(ft); @@ -546,7 +528,7 @@ test_serialize_leaf_with_many_rows(enum ftnode_verify_type bft, bool do_clone) { sn.max_msn_applied_to_node_on_disk.msn = 0; sn.flags = 0x11223344; - sn.thisnodename.b = 20; + sn.blocknum.b = 20; sn.layout_version = FT_LAYOUT_VERSION; sn.layout_version_original = FT_LAYOUT_VERSION; sn.height = 0; @@ -555,8 +537,7 @@ test_serialize_leaf_with_many_rows(enum ftnode_verify_type bft, bool do_clone) { sn.oldest_referenced_xid_known = TXNID_NONE; XMALLOC_N(sn.n_children, sn.bp); - XMALLOC_N(sn.n_children-1, sn.childkeys); - sn.totalchildkeylens = (sn.n_children-1)*sizeof(int); + sn.pivotkeys.create_empty(); for (int i = 0; i < sn.n_children; ++i) { BP_STATE(&sn,i) = PT_AVAIL; set_BLB(&sn, i, toku_create_empty_bn()); @@ -580,23 +561,23 @@ test_serialize_leaf_with_many_rows(enum ftnode_verify_type bft, bool do_clone) { 16); ft->ft = ft_h; - toku_blocktable_create_new(&ft_h->blocktable); + ft_h->blocktable.create(); { int r_truncate = ftruncate(fd, 0); CKERR(r_truncate); } //Want to use block #20 BLOCKNUM b = make_blocknum(0); while (b.b < 20) { - toku_allocate_blocknum(ft_h->blocktable, &b, ft_h); + ft_h->blocktable.allocate_blocknum(&b, ft_h); } assert(b.b == 20); { DISKOFF offset; DISKOFF size; - toku_blocknum_realloc_on_disk(ft_h->blocktable, b, 100, &offset, ft_h, fd, false); - assert(offset==BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE); + ft_h->blocktable.realloc_on_disk(b, 100, &offset, ft_h, fd, false, 0); + assert(offset==(DISKOFF)block_allocator::BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE); - toku_translate_blocknum_to_offset_size(ft_h->blocktable, b, &offset, &size); - assert(offset == BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE); + ft_h->blocktable.translate_blocknum_to_offset_size(b, &offset, &size); + assert(offset == (DISKOFF)block_allocator::BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE); assert(size == 100); } @@ -606,7 +587,7 @@ test_serialize_leaf_with_many_rows(enum ftnode_verify_type bft, bool do_clone) { setup_dn(bft, fd, ft_h, &dn, &dest_ndd); - assert(dn->thisnodename.b==20); + assert(dn->blocknum.b==20); assert(dn->layout_version ==FT_LAYOUT_VERSION); assert(dn->layout_version_original ==FT_LAYOUT_VERSION); @@ -621,7 +602,6 @@ test_serialize_leaf_with_many_rows(enum ftnode_verify_type bft, bool do_clone) { } } const uint32_t npartitions = dn->n_children; - assert(dn->totalchildkeylens==(sizeof(int)*(npartitions-1))); uint32_t last_i = 0; for (uint32_t bn = 0; bn < npartitions; ++bn) { assert(dest_ndd[bn].start > 0); @@ -638,7 +618,7 @@ test_serialize_leaf_with_many_rows(enum ftnode_verify_type bft, bool do_clone) { assert(leafentry_memsize(curr_le) == leafentry_memsize(les[last_i].le)); assert(memcmp(curr_le, les[last_i].le, leafentry_memsize(curr_le)) == 0); if (bn < npartitions-1) { - uint32_t *CAST_FROM_VOIDP(pivot, dn->childkeys[bn].data); + uint32_t *CAST_FROM_VOIDP(pivot, dn->pivotkeys.get_pivot(bn).data); void* tmp = les[last_i].keyp; uint32_t *CAST_FROM_VOIDP(item, tmp); assert(*pivot >= *item); @@ -654,17 +634,10 @@ test_serialize_leaf_with_many_rows(enum ftnode_verify_type bft, bool do_clone) { } toku_ftnode_free(&dn); - for (int i = 0; i < sn.n_children-1; ++i) { - toku_free(sn.childkeys[i].data); - } - for (int i = 0; i < sn.n_children; i++) { - destroy_basement_node(BLB(&sn, i)); - } - toku_free(sn.bp); - toku_free(sn.childkeys); + toku_destroy_ftnode_internals(&sn); - toku_block_free(ft_h->blocktable, BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE); - toku_blocktable_destroy(&ft_h->blocktable); + ft_h->blocktable.block_free(block_allocator::BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE); + ft_h->blocktable.destroy(); toku_free(ft_h->h); toku_free(ft_h); toku_free(ft); @@ -687,7 +660,7 @@ test_serialize_leaf_with_large_rows(enum ftnode_verify_type bft, bool do_clone) sn.max_msn_applied_to_node_on_disk.msn = 0; sn.flags = 0x11223344; - sn.thisnodename.b = 20; + sn.blocknum.b = 20; sn.layout_version = FT_LAYOUT_VERSION; sn.layout_version_original = FT_LAYOUT_VERSION; sn.height = 0; @@ -696,8 +669,7 @@ test_serialize_leaf_with_large_rows(enum ftnode_verify_type bft, bool do_clone) sn.oldest_referenced_xid_known = TXNID_NONE; MALLOC_N(sn.n_children, sn.bp); - MALLOC_N(sn.n_children-1, sn.childkeys); - sn.totalchildkeylens = (sn.n_children-1)*8; + sn.pivotkeys.create_empty(); for (int i = 0; i < sn.n_children; ++i) { BP_STATE(&sn,i) = PT_AVAIL; set_BLB(&sn, i, toku_create_empty_bn()); @@ -724,23 +696,23 @@ test_serialize_leaf_with_large_rows(enum ftnode_verify_type bft, bool do_clone) 16); ft->ft = ft_h; - toku_blocktable_create_new(&ft_h->blocktable); + ft_h->blocktable.create(); { int r_truncate = ftruncate(fd, 0); CKERR(r_truncate); } //Want to use block #20 BLOCKNUM b = make_blocknum(0); while (b.b < 20) { - toku_allocate_blocknum(ft_h->blocktable, &b, ft_h); + ft_h->blocktable.allocate_blocknum(&b, ft_h); } assert(b.b == 20); { DISKOFF offset; DISKOFF size; - toku_blocknum_realloc_on_disk(ft_h->blocktable, b, 100, &offset, ft_h, fd, false); - assert(offset==BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE); + ft_h->blocktable.realloc_on_disk(b, 100, &offset, ft_h, fd, false, 0); + assert(offset==(DISKOFF)block_allocator::BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE); - toku_translate_blocknum_to_offset_size(ft_h->blocktable, b, &offset, &size); - assert(offset == BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE); + ft_h->blocktable.translate_blocknum_to_offset_size(b, &offset, &size); + assert(offset == (DISKOFF)block_allocator::BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE); assert(size == 100); } @@ -750,7 +722,7 @@ test_serialize_leaf_with_large_rows(enum ftnode_verify_type bft, bool do_clone) setup_dn(bft, fd, ft_h, &dn, &dest_ndd); - assert(dn->thisnodename.b==20); + assert(dn->blocknum.b==20); assert(dn->layout_version ==FT_LAYOUT_VERSION); assert(dn->layout_version_original ==FT_LAYOUT_VERSION); @@ -771,7 +743,6 @@ test_serialize_leaf_with_large_rows(enum ftnode_verify_type bft, bool do_clone) } const uint32_t npartitions = dn->n_children; assert(npartitions == nrows); - assert(dn->totalchildkeylens==(key_size*(npartitions-1))); uint32_t last_i = 0; for (uint32_t bn = 0; bn < npartitions; ++bn) { assert(dest_ndd[bn].start > 0); @@ -788,7 +759,7 @@ test_serialize_leaf_with_large_rows(enum ftnode_verify_type bft, bool do_clone) assert(leafentry_memsize(curr_le) == leafentry_memsize(les[last_i].le)); assert(memcmp(curr_le, les[last_i].le, leafentry_memsize(curr_le)) == 0); if (bn < npartitions-1) { - assert(strcmp((char*)dn->childkeys[bn].data, (char*)(les[last_i].keyp)) <= 0); + assert(strcmp((char*)dn->pivotkeys.get_pivot(bn).data, (char*)(les[last_i].keyp)) <= 0); } // TODO for later, get a key comparison here as well last_i++; @@ -800,17 +771,10 @@ test_serialize_leaf_with_large_rows(enum ftnode_verify_type bft, bool do_clone) } toku_ftnode_free(&dn); - for (int i = 0; i < sn.n_children-1; ++i) { - toku_free(sn.childkeys[i].data); - } - for (int i = 0; i < sn.n_children; i++) { - destroy_basement_node(BLB(&sn, i)); - } - toku_free(sn.bp); - toku_free(sn.childkeys); + toku_destroy_ftnode_internals(&sn); - toku_block_free(ft_h->blocktable, BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE); - toku_blocktable_destroy(&ft_h->blocktable); + ft_h->blocktable.block_free(block_allocator::BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE); + ft_h->blocktable.destroy(); toku_free(ft_h->h); toku_free(ft_h); toku_free(ft); @@ -831,7 +795,7 @@ test_serialize_leaf_with_empty_basement_nodes(enum ftnode_verify_type bft, bool sn.max_msn_applied_to_node_on_disk.msn = 0; sn.flags = 0x11223344; - sn.thisnodename.b = 20; + sn.blocknum.b = 20; sn.layout_version = FT_LAYOUT_VERSION; sn.layout_version_original = FT_LAYOUT_VERSION; sn.height = 0; @@ -839,14 +803,14 @@ test_serialize_leaf_with_empty_basement_nodes(enum ftnode_verify_type bft, bool sn.dirty = 1; sn.oldest_referenced_xid_known = TXNID_NONE; MALLOC_N(sn.n_children, sn.bp); - MALLOC_N(sn.n_children-1, sn.childkeys); - toku_memdup_dbt(&sn.childkeys[0], "A", 2); - toku_memdup_dbt(&sn.childkeys[1], "a", 2); - toku_memdup_dbt(&sn.childkeys[2], "a", 2); - toku_memdup_dbt(&sn.childkeys[3], "b", 2); - toku_memdup_dbt(&sn.childkeys[4], "b", 2); - toku_memdup_dbt(&sn.childkeys[5], "x", 2); - sn.totalchildkeylens = (sn.n_children-1)*2; + DBT pivotkeys[6]; + toku_fill_dbt(&pivotkeys[0], "A", 2); + toku_fill_dbt(&pivotkeys[1], "a", 2); + toku_fill_dbt(&pivotkeys[2], "a", 2); + toku_fill_dbt(&pivotkeys[3], "b", 2); + toku_fill_dbt(&pivotkeys[4], "b", 2); + toku_fill_dbt(&pivotkeys[5], "x", 2); + sn.pivotkeys.create_from_dbts(pivotkeys, 6); for (int i = 0; i < sn.n_children; ++i) { BP_STATE(&sn,i) = PT_AVAIL; set_BLB(&sn, i, toku_create_empty_bn()); @@ -868,23 +832,23 @@ test_serialize_leaf_with_empty_basement_nodes(enum ftnode_verify_type bft, bool 16); ft->ft = ft_h; - toku_blocktable_create_new(&ft_h->blocktable); + ft_h->blocktable.create(); { int r_truncate = ftruncate(fd, 0); CKERR(r_truncate); } //Want to use block #20 BLOCKNUM b = make_blocknum(0); while (b.b < 20) { - toku_allocate_blocknum(ft_h->blocktable, &b, ft_h); + ft_h->blocktable.allocate_blocknum(&b, ft_h); } assert(b.b == 20); { DISKOFF offset; DISKOFF size; - toku_blocknum_realloc_on_disk(ft_h->blocktable, b, 100, &offset, ft_h, fd, false); - assert(offset==BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE); + ft_h->blocktable.realloc_on_disk(b, 100, &offset, ft_h, fd, false, 0); + assert(offset==(DISKOFF)block_allocator::BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE); - toku_translate_blocknum_to_offset_size(ft_h->blocktable, b, &offset, &size); - assert(offset == BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE); + ft_h->blocktable.translate_blocknum_to_offset_size(b, &offset, &size); + assert(offset == (DISKOFF)block_allocator::BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE); assert(size == 100); } FTNODE_DISK_DATA src_ndd = NULL; @@ -893,7 +857,7 @@ test_serialize_leaf_with_empty_basement_nodes(enum ftnode_verify_type bft, bool setup_dn(bft, fd, ft_h, &dn, &dest_ndd); - assert(dn->thisnodename.b==20); + assert(dn->blocknum.b==20); assert(dn->layout_version ==FT_LAYOUT_VERSION); assert(dn->layout_version_original ==FT_LAYOUT_VERSION); @@ -909,7 +873,6 @@ test_serialize_leaf_with_empty_basement_nodes(enum ftnode_verify_type bft, bool elts[1].init("b", "bval"); elts[2].init("x", "xval"); const uint32_t npartitions = dn->n_children; - assert(dn->totalchildkeylens==(2*(npartitions-1))); uint32_t last_i = 0; for (uint32_t bn = 0; bn < npartitions; ++bn) { assert(dest_ndd[bn].start > 0); @@ -925,7 +888,7 @@ test_serialize_leaf_with_empty_basement_nodes(enum ftnode_verify_type bft, bool assert(leafentry_memsize(curr_le) == leafentry_memsize(elts[last_i].le)); assert(memcmp(curr_le, elts[last_i].le, leafentry_memsize(curr_le)) == 0); if (bn < npartitions-1) { - assert(strcmp((char*)dn->childkeys[bn].data, (char*)(elts[last_i].keyp)) <= 0); + assert(strcmp((char*)dn->pivotkeys.get_pivot(bn).data, (char*)(elts[last_i].keyp)) <= 0); } // TODO for later, get a key comparison here as well last_i++; @@ -934,19 +897,12 @@ test_serialize_leaf_with_empty_basement_nodes(enum ftnode_verify_type bft, bool } assert(last_i == 3); } - toku_ftnode_free(&dn); - for (int i = 0; i < sn.n_children-1; ++i) { - toku_free(sn.childkeys[i].data); - } - for (int i = 0; i < sn.n_children; i++) { - destroy_basement_node(BLB(&sn, i)); - } - toku_free(sn.bp); - toku_free(sn.childkeys); + toku_ftnode_free(&dn); + toku_destroy_ftnode_internals(&sn); - toku_block_free(ft_h->blocktable, BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE); - toku_blocktable_destroy(&ft_h->blocktable); + ft_h->blocktable.block_free(block_allocator::BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE); + ft_h->blocktable.destroy(); toku_free(ft_h->h); toku_free(ft_h); toku_free(ft); @@ -966,7 +922,7 @@ test_serialize_leaf_with_multiple_empty_basement_nodes(enum ftnode_verify_type b sn.max_msn_applied_to_node_on_disk.msn = 0; sn.flags = 0x11223344; - sn.thisnodename.b = 20; + sn.blocknum.b = 20; sn.layout_version = FT_LAYOUT_VERSION; sn.layout_version_original = FT_LAYOUT_VERSION; sn.height = 0; @@ -974,11 +930,11 @@ test_serialize_leaf_with_multiple_empty_basement_nodes(enum ftnode_verify_type b sn.dirty = 1; sn.oldest_referenced_xid_known = TXNID_NONE; MALLOC_N(sn.n_children, sn.bp); - MALLOC_N(sn.n_children-1, sn.childkeys); - toku_memdup_dbt(&sn.childkeys[0], "A", 2); - toku_memdup_dbt(&sn.childkeys[1], "A", 2); - toku_memdup_dbt(&sn.childkeys[2], "A", 2); - sn.totalchildkeylens = (sn.n_children-1)*2; + DBT pivotkeys[3]; + toku_fill_dbt(&pivotkeys[0], "A", 2); + toku_fill_dbt(&pivotkeys[1], "A", 2); + toku_fill_dbt(&pivotkeys[2], "A", 2); + sn.pivotkeys.create_from_dbts(pivotkeys, 3); for (int i = 0; i < sn.n_children; ++i) { BP_STATE(&sn,i) = PT_AVAIL; set_BLB(&sn, i, toku_create_empty_bn()); @@ -996,23 +952,23 @@ test_serialize_leaf_with_multiple_empty_basement_nodes(enum ftnode_verify_type b 16); ft->ft = ft_h; - toku_blocktable_create_new(&ft_h->blocktable); + ft_h->blocktable.create(); { int r_truncate = ftruncate(fd, 0); CKERR(r_truncate); } //Want to use block #20 BLOCKNUM b = make_blocknum(0); while (b.b < 20) { - toku_allocate_blocknum(ft_h->blocktable, &b, ft_h); + ft_h->blocktable.allocate_blocknum(&b, ft_h); } assert(b.b == 20); { DISKOFF offset; DISKOFF size; - toku_blocknum_realloc_on_disk(ft_h->blocktable, b, 100, &offset, ft_h, fd, false); - assert(offset==BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE); + ft_h->blocktable.realloc_on_disk(b, 100, &offset, ft_h, fd, false, 0); + assert(offset==(DISKOFF)block_allocator::BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE); - toku_translate_blocknum_to_offset_size(ft_h->blocktable, b, &offset, &size); - assert(offset == BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE); + ft_h->blocktable.translate_blocknum_to_offset_size(b, &offset, &size); + assert(offset == (DISKOFF)block_allocator::BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE); assert(size == 100); } @@ -1022,7 +978,7 @@ test_serialize_leaf_with_multiple_empty_basement_nodes(enum ftnode_verify_type b setup_dn(bft, fd, ft_h, &dn, &dest_ndd); - assert(dn->thisnodename.b==20); + assert(dn->blocknum.b==20); assert(dn->layout_version ==FT_LAYOUT_VERSION); assert(dn->layout_version_original ==FT_LAYOUT_VERSION); @@ -1031,7 +987,6 @@ test_serialize_leaf_with_multiple_empty_basement_nodes(enum ftnode_verify_type b assert(dn->n_children == 1); { const uint32_t npartitions = dn->n_children; - assert(dn->totalchildkeylens==(2*(npartitions-1))); for (uint32_t i = 0; i < npartitions; ++i) { assert(dest_ndd[i].start > 0); assert(dest_ndd[i].size > 0); @@ -1041,19 +996,12 @@ test_serialize_leaf_with_multiple_empty_basement_nodes(enum ftnode_verify_type b assert(BLB_DATA(dn, i)->num_klpairs() == 0); } } + toku_ftnode_free(&dn); + toku_destroy_ftnode_internals(&sn); - for (int i = 0; i < sn.n_children-1; ++i) { - toku_free(sn.childkeys[i].data); - } - for (int i = 0; i < sn.n_children; i++) { - destroy_basement_node(BLB(&sn, i)); - } - toku_free(sn.bp); - toku_free(sn.childkeys); - - toku_block_free(ft_h->blocktable, BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE); - toku_blocktable_destroy(&ft_h->blocktable); + ft_h->blocktable.block_free(block_allocator::BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE); + ft_h->blocktable.destroy(); toku_free(ft_h->h); toku_free(ft_h); toku_free(ft); @@ -1076,7 +1024,7 @@ test_serialize_nonleaf(enum ftnode_verify_type bft, bool do_clone) { // source_ft.fd=fd; sn.max_msn_applied_to_node_on_disk.msn = 0; sn.flags = 0x11223344; - sn.thisnodename.b = 20; + sn.blocknum.b = 20; sn.layout_version = FT_LAYOUT_VERSION; sn.layout_version_original = FT_LAYOUT_VERSION; sn.height = 1; @@ -1084,9 +1032,8 @@ test_serialize_nonleaf(enum ftnode_verify_type bft, bool do_clone) { sn.dirty = 1; sn.oldest_referenced_xid_known = TXNID_NONE; MALLOC_N(2, sn.bp); - MALLOC_N(1, sn.childkeys); - toku_memdup_dbt(&sn.childkeys[0], "hello", 6); - sn.totalchildkeylens = 6; + DBT pivotkey; + sn.pivotkeys.create_from_dbts(toku_fill_dbt(&pivotkey, "hello", 6), 1); BP_BLOCKNUM(&sn, 0).b = 30; BP_BLOCKNUM(&sn, 1).b = 35; BP_STATE(&sn,0) = PT_AVAIL; @@ -1094,21 +1041,26 @@ test_serialize_nonleaf(enum ftnode_verify_type bft, bool do_clone) { set_BNC(&sn, 0, toku_create_empty_nl()); set_BNC(&sn, 1, toku_create_empty_nl()); //Create XIDS - XIDS xids_0 = xids_get_root_xids(); + XIDS xids_0 = toku_xids_get_root_xids(); XIDS xids_123; XIDS xids_234; - r = xids_create_child(xids_0, &xids_123, (TXNID)123); + r = toku_xids_create_child(xids_0, &xids_123, (TXNID)123); CKERR(r); - r = xids_create_child(xids_123, &xids_234, (TXNID)234); + r = toku_xids_create_child(xids_123, &xids_234, (TXNID)234); CKERR(r); - toku_bnc_insert_msg(BNC(&sn, 0), "a", 2, "aval", 5, FT_NONE, next_dummymsn(), xids_0, true, NULL, string_key_cmp); - toku_bnc_insert_msg(BNC(&sn, 0), "b", 2, "bval", 5, FT_NONE, next_dummymsn(), xids_123, false, NULL, string_key_cmp); - toku_bnc_insert_msg(BNC(&sn, 1), "x", 2, "xval", 5, FT_NONE, next_dummymsn(), xids_234, true, NULL, string_key_cmp); + toku::comparator cmp; + cmp.create(string_key_cmp, nullptr); + + toku_bnc_insert_msg(BNC(&sn, 0), "a", 2, "aval", 5, FT_NONE, next_dummymsn(), xids_0, true, cmp); + toku_bnc_insert_msg(BNC(&sn, 0), "b", 2, "bval", 5, FT_NONE, next_dummymsn(), xids_123, false, cmp); + toku_bnc_insert_msg(BNC(&sn, 1), "x", 2, "xval", 5, FT_NONE, next_dummymsn(), xids_234, true, cmp); + //Cleanup: - xids_destroy(&xids_0); - xids_destroy(&xids_123); - xids_destroy(&xids_234); + toku_xids_destroy(&xids_0); + toku_xids_destroy(&xids_123); + toku_xids_destroy(&xids_234); + cmp.destroy(); FT_HANDLE XMALLOC(ft); FT XCALLOC(ft_h); @@ -1120,25 +1072,26 @@ test_serialize_nonleaf(enum ftnode_verify_type bft, bool do_clone) { 128*1024, TOKU_DEFAULT_COMPRESSION_METHOD, 16); + ft_h->cmp.create(string_key_cmp, nullptr); ft->ft = ft_h; - toku_blocktable_create_new(&ft_h->blocktable); + ft_h->blocktable.create(); { int r_truncate = ftruncate(fd, 0); CKERR(r_truncate); } //Want to use block #20 BLOCKNUM b = make_blocknum(0); while (b.b < 20) { - toku_allocate_blocknum(ft_h->blocktable, &b, ft_h); + ft_h->blocktable.allocate_blocknum(&b, ft_h); } assert(b.b == 20); { DISKOFF offset; DISKOFF size; - toku_blocknum_realloc_on_disk(ft_h->blocktable, b, 100, &offset, ft_h, fd, false); - assert(offset==BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE); + ft_h->blocktable.realloc_on_disk(b, 100, &offset, ft_h, fd, false, 0); + assert(offset==(DISKOFF)block_allocator::BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE); - toku_translate_blocknum_to_offset_size(ft_h->blocktable, b, &offset, &size); - assert(offset == BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE); + ft_h->blocktable.translate_blocknum_to_offset_size(b, &offset, &size); + assert(offset == (DISKOFF)block_allocator::BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE); assert(size == 100); } FTNODE_DISK_DATA src_ndd = NULL; @@ -1147,37 +1100,32 @@ test_serialize_nonleaf(enum ftnode_verify_type bft, bool do_clone) { setup_dn(bft, fd, ft_h, &dn, &dest_ndd); - assert(dn->thisnodename.b==20); + assert(dn->blocknum.b==20); assert(dn->layout_version ==FT_LAYOUT_VERSION); assert(dn->layout_version_original ==FT_LAYOUT_VERSION); assert(dn->layout_version_read_from_disk ==FT_LAYOUT_VERSION); assert(dn->height == 1); assert(dn->n_children==2); - assert(strcmp((char*)dn->childkeys[0].data, "hello")==0); - assert(dn->childkeys[0].size==6); - assert(dn->totalchildkeylens==6); + assert(strcmp((char*)dn->pivotkeys.get_pivot(0).data, "hello")==0); + assert(dn->pivotkeys.get_pivot(0).size==6); assert(BP_BLOCKNUM(dn,0).b==30); assert(BP_BLOCKNUM(dn,1).b==35); - FIFO src_fifo_1 = BNC(&sn, 0)->buffer; - FIFO src_fifo_2 = BNC(&sn, 1)->buffer; - FIFO dest_fifo_1 = BNC(dn, 0)->buffer; - FIFO dest_fifo_2 = BNC(dn, 1)->buffer; + message_buffer *src_msg_buffer1 = &BNC(&sn, 0)->msg_buffer; + message_buffer *src_msg_buffer2 = &BNC(&sn, 1)->msg_buffer; + message_buffer *dest_msg_buffer1 = &BNC(dn, 0)->msg_buffer; + message_buffer *dest_msg_buffer2 = &BNC(dn, 1)->msg_buffer; - assert(toku_are_fifos_same(src_fifo_1, dest_fifo_1)); - assert(toku_are_fifos_same(src_fifo_2, dest_fifo_2)); + assert(src_msg_buffer1->equals(dest_msg_buffer1)); + assert(src_msg_buffer2->equals(dest_msg_buffer2)); toku_ftnode_free(&dn); + toku_destroy_ftnode_internals(&sn); - toku_free(sn.childkeys[0].data); - destroy_nonleaf_childinfo(BNC(&sn, 0)); - destroy_nonleaf_childinfo(BNC(&sn, 1)); - toku_free(sn.bp); - toku_free(sn.childkeys); - - toku_block_free(ft_h->blocktable, BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE); - toku_blocktable_destroy(&ft_h->blocktable); + ft_h->blocktable.block_free(block_allocator::BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE); + ft_h->blocktable.destroy(); + ft_h->cmp.destroy(); toku_free(ft_h->h); toku_free(ft_h); toku_free(ft); diff --git a/storage/tokudb/ft-index/ft/tests/ft-test-cursor-2.cc b/storage/tokudb/ft-index/ft/tests/ft-test-cursor-2.cc index 8421e5eb39f..6e38884d571 100644 --- a/storage/tokudb/ft-index/ft/tests/ft-test-cursor-2.cc +++ b/storage/tokudb/ft-index/ft/tests/ft-test-cursor-2.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -96,7 +96,7 @@ static const char *fname = TOKU_TEST_FILENAME; static TOKUTXN const null_txn = 0; static int -save_data (ITEMLEN UU(keylen), bytevec UU(key), ITEMLEN vallen, bytevec val, void *v, bool lock_only) { +save_data (uint32_t UU(keylen), const void *UU(key), uint32_t vallen, const void *val, void *v, bool lock_only) { if (lock_only) return 0; assert(key!=NULL); void **CAST_FROM_VOIDP(vp, v); @@ -106,7 +106,7 @@ save_data (ITEMLEN UU(keylen), bytevec UU(key), ITEMLEN vallen, bytevec val, voi // Verify that different cursors return different data items when a DBT is initialized to all zeros (no flags) -// Note: The BRT test used to implement DBTs with per-cursor allocated space, but there isn't any such thing any more +// Note: The ft test used to implement DBTs with per-cursor allocated space, but there isn't any such thing any more // so this test is a little bit obsolete. static void test_multiple_ft_cursor_dbts(int n) { if (verbose) printf("test_multiple_ft_cursors:%d\n", n); @@ -118,7 +118,7 @@ static void test_multiple_ft_cursor_dbts(int n) { unlink(fname); - toku_cachetable_create(&ct, 0, ZERO_LSN, NULL_LOGGER); + toku_cachetable_create(&ct, 0, ZERO_LSN, nullptr); r = toku_open_ft_handle(fname, 1, &ft, 1<<12, 1<<9, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, toku_builtin_compare_fun); assert(r==0); diff --git a/storage/tokudb/ft-index/ft/tests/ft-test-cursor.cc b/storage/tokudb/ft-index/ft/tests/ft-test-cursor.cc index 4132ae968ed..3e7aa6875e0 100644 --- a/storage/tokudb/ft-index/ft/tests/ft-test-cursor.cc +++ b/storage/tokudb/ft-index/ft/tests/ft-test-cursor.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -94,7 +94,6 @@ PATENT RIGHTS GRANT: static const char *fname = TOKU_TEST_FILENAME; static TOKUTXN const null_txn = 0; -static DB * const null_db = 0; static int test_cursor_debug = 0; @@ -170,7 +169,7 @@ static void test_ft_cursor_first(int n) { unlink(fname); - toku_cachetable_create(&ct, 0, ZERO_LSN, NULL_LOGGER); + toku_cachetable_create(&ct, 0, ZERO_LSN, nullptr); r = toku_open_ft_handle(fname, 1, &ft, 1<<12, 1<<9, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, test_ft_cursor_keycompare); assert(r==0); @@ -208,7 +207,7 @@ static void test_ft_cursor_last(int n) { unlink(fname); - toku_cachetable_create(&ct, 0, ZERO_LSN, NULL_LOGGER); + toku_cachetable_create(&ct, 0, ZERO_LSN, nullptr); r = toku_open_ft_handle(fname, 1, &ft, 1<<12, 1<<9, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, test_ft_cursor_keycompare); assert(r==0); @@ -247,7 +246,7 @@ static void test_ft_cursor_first_last(int n) { unlink(fname); - toku_cachetable_create(&ct, 0, ZERO_LSN, NULL_LOGGER); + toku_cachetable_create(&ct, 0, ZERO_LSN, nullptr); r = toku_open_ft_handle(fname, 1, &ft, 1<<12, 1<<9, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, test_ft_cursor_keycompare); assert(r==0); @@ -289,7 +288,7 @@ static void test_ft_cursor_rfirst(int n) { unlink(fname); - toku_cachetable_create(&ct, 0, ZERO_LSN, NULL_LOGGER); + toku_cachetable_create(&ct, 0, ZERO_LSN, nullptr); r = toku_open_ft_handle(fname, 1, &ft, 1<<12, 1<<9, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, test_ft_cursor_keycompare); assert(r==0); @@ -353,7 +352,7 @@ static void test_ft_cursor_walk(int n) { unlink(fname); - toku_cachetable_create(&ct, 0, ZERO_LSN, NULL_LOGGER); + toku_cachetable_create(&ct, 0, ZERO_LSN, nullptr); r = toku_open_ft_handle(fname, 1, &ft, 1<<12, 1<<9, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, test_ft_cursor_keycompare); assert(r==0); @@ -415,7 +414,7 @@ static void test_ft_cursor_rwalk(int n) { unlink(fname); - toku_cachetable_create(&ct, 0, ZERO_LSN, NULL_LOGGER); + toku_cachetable_create(&ct, 0, ZERO_LSN, nullptr); r = toku_open_ft_handle(fname, 1, &ft, 1<<12, 1<<9, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, test_ft_cursor_keycompare); assert(r==0); @@ -443,7 +442,7 @@ static void test_ft_cursor_rwalk(int n) { } static int -ascending_key_string_checkf (ITEMLEN keylen, bytevec key, ITEMLEN UU(vallen), bytevec UU(val), void *v, bool lock_only) +ascending_key_string_checkf (uint32_t keylen, const void *key, uint32_t UU(vallen), const void *UU(val), void *v, bool lock_only) // the keys are strings. Verify that they keylen matches the key, that the keys are ascending. Use (char**)v to hold a // malloc'd previous string. { @@ -496,7 +495,7 @@ static void test_ft_cursor_rand(int n) { unlink(fname); - toku_cachetable_create(&ct, 0, ZERO_LSN, NULL_LOGGER); + toku_cachetable_create(&ct, 0, ZERO_LSN, nullptr); r = toku_open_ft_handle(fname, 1, &ft, 1<<12, 1<<9, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, test_ft_cursor_keycompare); assert(r==0); @@ -546,7 +545,7 @@ static void test_ft_cursor_split(int n) { unlink(fname); - toku_cachetable_create(&ct, 0, ZERO_LSN, NULL_LOGGER); + toku_cachetable_create(&ct, 0, ZERO_LSN, nullptr); r = toku_open_ft_handle(fname, 1, &ft, 1<<12, 1<<9, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, test_ft_cursor_keycompare); assert(r==0); @@ -617,7 +616,7 @@ static void test_multiple_ft_cursors(int n) { unlink(fname); - toku_cachetable_create(&ct, 0, ZERO_LSN, NULL_LOGGER); + toku_cachetable_create(&ct, 0, ZERO_LSN, nullptr); r = toku_open_ft_handle(fname, 1, &ft, 1<<12, 1<<9, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, test_ft_cursor_keycompare); assert(r==0); @@ -663,7 +662,7 @@ static void test_multiple_ft_cursor_walk(int n) { int nodesize = 1<<12; int h = log16(n); int cachesize = 2 * h * ncursors * nodesize; - toku_cachetable_create(&ct, cachesize, ZERO_LSN, NULL_LOGGER); + toku_cachetable_create(&ct, cachesize, ZERO_LSN, nullptr); r = toku_open_ft_handle(fname, 1, &ft, 1<<12, 1<<9, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, test_ft_cursor_keycompare); assert(r==0); @@ -736,7 +735,7 @@ static void test_ft_cursor_set(int n, int cursor_op) { unlink(fname); - toku_cachetable_create(&ct, 0, ZERO_LSN, NULL_LOGGER); + toku_cachetable_create(&ct, 0, ZERO_LSN, nullptr); r = toku_open_ft_handle(fname, 1, &ft, 1<<12, 1<<9, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, test_ft_cursor_keycompare); assert(r==0); @@ -804,7 +803,7 @@ static void test_ft_cursor_set_range(int n) { unlink(fname); - toku_cachetable_create(&ct, 0, ZERO_LSN, NULL_LOGGER); + toku_cachetable_create(&ct, 0, ZERO_LSN, nullptr); r = toku_open_ft_handle(fname, 1, &ft, 1<<12, 1<<9, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, test_ft_cursor_keycompare); assert(r==0); @@ -864,7 +863,7 @@ static void test_ft_cursor_delete(int n) { unlink(fname); - toku_cachetable_create(&ct, 0, ZERO_LSN, NULL_LOGGER); + toku_cachetable_create(&ct, 0, ZERO_LSN, nullptr); error = toku_open_ft_handle(fname, 1, &ft, 1<<12, 1<<9, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, test_ft_cursor_keycompare); assert(error == 0); diff --git a/storage/tokudb/ft-index/ft/tests/ft-test-header.cc b/storage/tokudb/ft-index/ft/tests/ft-test-header.cc index cf3a9838860..18da9502b48 100644 --- a/storage/tokudb/ft-index/ft/tests/ft-test-header.cc +++ b/storage/tokudb/ft-index/ft/tests/ft-test-header.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -104,7 +104,7 @@ static void test_header (void) { const char *fname = TOKU_TEST_FILENAME; // First create dictionary - toku_cachetable_create(&ct, 0, ZERO_LSN, NULL_LOGGER); + toku_cachetable_create(&ct, 0, ZERO_LSN, nullptr); unlink(fname); r = toku_open_ft_handle(fname, 1, &t, 1024, 256, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, toku_builtin_compare_fun); assert(r==0); @@ -122,7 +122,7 @@ static void test_header (void) { toku_cachetable_close(&ct); // Now read dictionary back into memory and examine some header fields - toku_cachetable_create(&ct, 0, ZERO_LSN, NULL_LOGGER); + toku_cachetable_create(&ct, 0, ZERO_LSN, nullptr); r = toku_open_ft_handle(fname, 0, &t, 1024, 256, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, toku_builtin_compare_fun); assert(r==0); diff --git a/storage/tokudb/ft-index/ft/tests/ft-test.cc b/storage/tokudb/ft-index/ft/tests/ft-test.cc index 7bd77595954..e9981c92572 100644 --- a/storage/tokudb/ft-index/ft/tests/ft-test.cc +++ b/storage/tokudb/ft-index/ft/tests/ft-test.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -92,7 +92,6 @@ PATENT RIGHTS GRANT: #include "test.h" static TOKUTXN const null_txn = 0; -static DB * const null_db = 0; static const char *fname = TOKU_TEST_FILENAME; @@ -101,7 +100,7 @@ static void test_dump_empty_db (void) { CACHETABLE ct; int r; - toku_cachetable_create(&ct, 0, ZERO_LSN, NULL_LOGGER); + toku_cachetable_create(&ct, 0, ZERO_LSN, nullptr); unlink(fname); r = toku_open_ft_handle(fname, 1, &t, 1024, 256, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, toku_builtin_compare_fun); assert(r==0); @@ -124,7 +123,7 @@ static void test_multiple_files_of_size (int size) { toku_os_recursive_delete(TOKU_TEST_FILENAME); r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU); assert(r == 0); - toku_cachetable_create(&ct, 0, ZERO_LSN, NULL_LOGGER); + toku_cachetable_create(&ct, 0, ZERO_LSN, nullptr); r = toku_open_ft_handle(n0, 1, &t0, size, size / 4, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, toku_builtin_compare_fun); assert(r==0); r = toku_open_ft_handle(n1, 1, &t1, size, size / 4, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, toku_builtin_compare_fun); assert(r==0); for (i=0; i<10000; i++) { @@ -148,7 +147,7 @@ static void test_multiple_files_of_size (int size) { /* Now see if the data is all there. */ - toku_cachetable_create(&ct, 0, ZERO_LSN, NULL_LOGGER); + toku_cachetable_create(&ct, 0, ZERO_LSN, nullptr); r = toku_open_ft_handle(n0, 0, &t0, 1<<12, 1<<9, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, toku_builtin_compare_fun); if (verbose) printf("%s:%d r=%d\n", __FILE__, __LINE__,r); assert(r==0); @@ -184,7 +183,7 @@ static void test_multiple_ft_handles_one_db_one_file (void) { if (verbose) printf("test_multiple_ft_handles_one_db_one_file:"); unlink(fname); - toku_cachetable_create(&ct, 32, ZERO_LSN, NULL_LOGGER); + toku_cachetable_create(&ct, 32, ZERO_LSN, nullptr); for (i=0; i<MANYN; i++) { r = toku_open_ft_handle(fname, (i==0), &trees[i], 1<<12, 1<<9, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, toku_builtin_compare_fun); assert(r==0); @@ -223,7 +222,7 @@ static void test_read_what_was_written (void) { unlink(fname); - toku_cachetable_create(&ct, 0, ZERO_LSN, NULL_LOGGER); + toku_cachetable_create(&ct, 0, ZERO_LSN, nullptr); r = toku_open_ft_handle(fname, 1, &ft, 1<<12, 1<<9, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, toku_builtin_compare_fun); assert(r==0); r = toku_close_ft_handle_nolsn(ft, 0); assert(r==0); toku_cachetable_close(&ct); @@ -231,7 +230,7 @@ static void test_read_what_was_written (void) { /* Now see if we can read an empty tree in. */ - toku_cachetable_create(&ct, 0, ZERO_LSN, NULL_LOGGER); + toku_cachetable_create(&ct, 0, ZERO_LSN, nullptr); r = toku_open_ft_handle(fname, 0, &ft, 1<<12, 1<<9, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, toku_builtin_compare_fun); assert(r==0); /* See if we can put something in it. */ @@ -246,7 +245,7 @@ static void test_read_what_was_written (void) { /* Now see if we can read it in and get the value. */ - toku_cachetable_create(&ct, 0, ZERO_LSN, NULL_LOGGER); + toku_cachetable_create(&ct, 0, ZERO_LSN, nullptr); r = toku_open_ft_handle(fname, 0, &ft, 1<<12, 1<<9, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, toku_builtin_compare_fun); assert(r==0); ft_lookup_and_check_nodup(ft, "hello", "there"); @@ -308,7 +307,7 @@ static void test_read_what_was_written (void) { - toku_cachetable_create(&ct, 0, ZERO_LSN, NULL_LOGGER); + toku_cachetable_create(&ct, 0, ZERO_LSN, nullptr); r = toku_open_ft_handle(fname, 0, &ft, 1<<12, 1<<9, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, toku_builtin_compare_fun); assert(r==0); ft_lookup_and_check_nodup(ft, "hello", "there"); @@ -341,7 +340,7 @@ static void test_cursor_last_empty(void) { unlink(fname); //printf("%s:%d %d alloced\n", __FILE__, __LINE__, toku_get_n_items_malloced()); toku_print_malloced_items(); - toku_cachetable_create(&ct, 0, ZERO_LSN, NULL_LOGGER); + toku_cachetable_create(&ct, 0, ZERO_LSN, nullptr); //printf("%s:%d %d alloced\n", __FILE__, __LINE__, toku_get_n_items_malloced()); toku_print_malloced_items(); r = toku_open_ft_handle(fname, 1, &ft, 1<<12, 1<<9, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, toku_builtin_compare_fun); assert(r==0); //printf("%s:%d %d alloced\n", __FILE__, __LINE__, toku_get_n_items_malloced()); toku_print_malloced_items(); @@ -375,7 +374,7 @@ static void test_cursor_next (void) { unlink(fname); - toku_cachetable_create(&ct, 0, ZERO_LSN, NULL_LOGGER); + toku_cachetable_create(&ct, 0, ZERO_LSN, nullptr); //printf("%s:%d %d alloced\n", __FILE__, __LINE__, toku_get_n_items_malloced()); toku_print_malloced_items(); r = toku_open_ft_handle(fname, 1, &ft, 1<<12, 1<<9, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, toku_builtin_compare_fun); assert(r==0); //printf("%s:%d %d alloced\n", __FILE__, __LINE__, toku_get_n_items_malloced()); toku_print_malloced_items(); @@ -451,7 +450,7 @@ static void test_wrongendian_compare (int wrong_p, unsigned int N) { assert(wrong_compare_fun(NULL, toku_fill_dbt(&at, b, 4), toku_fill_dbt(&bt, a, 4))<0); } - toku_cachetable_create(&ct, 0, ZERO_LSN, NULL_LOGGER); + toku_cachetable_create(&ct, 0, ZERO_LSN, nullptr); //printf("%s:%d WRONG=%d\n", __FILE__, __LINE__, wrong_p); if (0) { // ???? Why is this commented out? @@ -548,7 +547,7 @@ static void test_large_kv(int bsize, int ksize, int vsize) { if (verbose) printf("test_large_kv: %d %d %d\n", bsize, ksize, vsize); - toku_cachetable_create(&ct, 0, ZERO_LSN, NULL_LOGGER); + toku_cachetable_create(&ct, 0, ZERO_LSN, nullptr); unlink(fname); r = toku_open_ft_handle(fname, 1, &t, bsize, bsize / 4, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, toku_builtin_compare_fun); assert(r==0); @@ -592,7 +591,7 @@ static void test_ft_delete_empty(void) { int r; CACHETABLE ct; - toku_cachetable_create(&ct, 0, ZERO_LSN, NULL_LOGGER); + toku_cachetable_create(&ct, 0, ZERO_LSN, nullptr); unlink(fname); r = toku_open_ft_handle(fname, 1, &t, 4096, 1024, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, toku_builtin_compare_fun); assert(r==0); @@ -618,7 +617,7 @@ static void test_ft_delete_present(int n) { CACHETABLE ct; int i; - toku_cachetable_create(&ct, 0, ZERO_LSN, NULL_LOGGER); + toku_cachetable_create(&ct, 0, ZERO_LSN, nullptr); unlink(fname); r = toku_open_ft_handle(fname, 1, &t, 4096, 1024, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, toku_builtin_compare_fun); assert(r==0); @@ -681,7 +680,7 @@ static void test_ft_delete_not_present(int n) { CACHETABLE ct; int i; - toku_cachetable_create(&ct, 0, ZERO_LSN, NULL_LOGGER); + toku_cachetable_create(&ct, 0, ZERO_LSN, nullptr); unlink(fname); r = toku_open_ft_handle(fname, 1, &t, 4096, 1024, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, toku_builtin_compare_fun); assert(r==0); @@ -725,7 +724,7 @@ static void test_ft_delete_cursor_first(int n) { CACHETABLE ct; int i; - toku_cachetable_create(&ct, 0, ZERO_LSN, NULL_LOGGER); + toku_cachetable_create(&ct, 0, ZERO_LSN, nullptr); unlink(fname); r = toku_open_ft_handle(fname, 1, &t, 4096, 1024, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, toku_builtin_compare_fun); assert(r==0); @@ -820,7 +819,7 @@ static void test_insert_delete_lookup(int n) { CACHETABLE ct; int i; - toku_cachetable_create(&ct, 0, ZERO_LSN, NULL_LOGGER); + toku_cachetable_create(&ct, 0, ZERO_LSN, nullptr); unlink(fname); r = toku_open_ft_handle(fname, 1, &t, 4096, 1024, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, toku_builtin_compare_fun); assert(r==0); @@ -907,7 +906,7 @@ static void test_new_ft_cursor_first(int n) { CACHETABLE ct; int i; - toku_cachetable_create(&ct, 0, ZERO_LSN, NULL_LOGGER); + toku_cachetable_create(&ct, 0, ZERO_LSN, nullptr); unlink(fname); toku_ft_handle_create(&t); toku_ft_handle_set_nodesize(t, 4096); @@ -959,7 +958,7 @@ static void test_new_ft_cursor_last(int n) { CACHETABLE ct; int i; - toku_cachetable_create(&ct, 0, ZERO_LSN, NULL_LOGGER); + toku_cachetable_create(&ct, 0, ZERO_LSN, nullptr); unlink(fname); toku_ft_handle_create(&t); toku_ft_handle_set_nodesize(t, 4096); @@ -1012,7 +1011,7 @@ static void test_new_ft_cursor_next(int n) { CACHETABLE ct; int i; - toku_cachetable_create(&ct, 0, ZERO_LSN, NULL_LOGGER); + toku_cachetable_create(&ct, 0, ZERO_LSN, nullptr); unlink(fname); toku_ft_handle_create(&t); toku_ft_handle_set_nodesize(t, 4096); @@ -1055,7 +1054,7 @@ static void test_new_ft_cursor_prev(int n) { CACHETABLE ct; int i; - toku_cachetable_create(&ct, 0, ZERO_LSN, NULL_LOGGER); + toku_cachetable_create(&ct, 0, ZERO_LSN, nullptr); unlink(fname); toku_ft_handle_create(&t); toku_ft_handle_set_nodesize(t, 4096); @@ -1098,7 +1097,7 @@ static void test_new_ft_cursor_current(int n) { CACHETABLE ct; int i; - toku_cachetable_create(&ct, 0, ZERO_LSN, NULL_LOGGER); + toku_cachetable_create(&ct, 0, ZERO_LSN, nullptr); unlink(fname); toku_ft_handle_create(&t); toku_ft_handle_set_nodesize(t, 4096); @@ -1180,7 +1179,7 @@ static void test_new_ft_cursor_set_range(int n) { FT_HANDLE ft=0; FT_CURSOR cursor=0; - toku_cachetable_create(&ct, 0, ZERO_LSN, NULL_LOGGER); + toku_cachetable_create(&ct, 0, ZERO_LSN, nullptr); unlink(fname); toku_ft_handle_create(&ft); toku_ft_handle_set_nodesize(ft, 4096); @@ -1241,7 +1240,7 @@ static void test_new_ft_cursor_set(int n, int cursor_op, DB *db) { unlink(fname); - toku_cachetable_create(&ct, 0, ZERO_LSN, NULL_LOGGER); + toku_cachetable_create(&ct, 0, ZERO_LSN, nullptr); r = toku_open_ft_handle(fname, 1, &ft, 1<<12, 1<<9, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, test_ft_cursor_keycompare); assert(r==0); diff --git a/storage/tokudb/ft-index/ft/tests/ft-test0.cc b/storage/tokudb/ft-index/ft/tests/ft-test0.cc index 01b79fc0ff4..719bfd35588 100644 --- a/storage/tokudb/ft-index/ft/tests/ft-test0.cc +++ b/storage/tokudb/ft-index/ft/tests/ft-test0.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -92,7 +92,6 @@ PATENT RIGHTS GRANT: #include "test.h" static TOKUTXN const null_txn = 0; -static DB * const null_db = 0; static void test0 (void) { FT_HANDLE t; @@ -101,7 +100,7 @@ static void test0 (void) { const char *fname = TOKU_TEST_FILENAME; if (verbose) printf("%s:%d test0\n", __FILE__, __LINE__); - toku_cachetable_create(&ct, 0, ZERO_LSN, NULL_LOGGER); + toku_cachetable_create(&ct, 0, ZERO_LSN, nullptr); if (verbose) printf("%s:%d test0\n", __FILE__, __LINE__); unlink(fname); r = toku_open_ft_handle(fname, 1, &t, 1024, 256, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, toku_builtin_compare_fun); diff --git a/storage/tokudb/ft-index/ft/tests/ft-test1.cc b/storage/tokudb/ft-index/ft/tests/ft-test1.cc index 9fd22431fa8..5ab4b779403 100644 --- a/storage/tokudb/ft-index/ft/tests/ft-test1.cc +++ b/storage/tokudb/ft-index/ft/tests/ft-test1.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -92,7 +92,6 @@ PATENT RIGHTS GRANT: #include "test.h" static TOKUTXN const null_txn = 0; -static DB * const null_db = 0; static void test1 (void) { FT_HANDLE t; @@ -101,7 +100,7 @@ static void test1 (void) { const char *fname = TOKU_TEST_FILENAME; DBT k,v; - toku_cachetable_create(&ct, 0, ZERO_LSN, NULL_LOGGER); + toku_cachetable_create(&ct, 0, ZERO_LSN, nullptr); unlink(fname); r = toku_open_ft_handle(fname, 1, &t, 1024, 256, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, toku_builtin_compare_fun); assert(r==0); diff --git a/storage/tokudb/ft-index/ft/tests/ft-test2.cc b/storage/tokudb/ft-index/ft/tests/ft-test2.cc index 981b2933ff4..ccd94a74ade 100644 --- a/storage/tokudb/ft-index/ft/tests/ft-test2.cc +++ b/storage/tokudb/ft-index/ft/tests/ft-test2.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -92,7 +92,6 @@ PATENT RIGHTS GRANT: #include "test.h" static TOKUTXN const null_txn = 0; -static DB * const null_db = 0; static void test2 (int limit) { FT_HANDLE t; @@ -102,7 +101,7 @@ static void test2 (int limit) { const char *fname = TOKU_TEST_FILENAME; if (verbose) printf("%s:%d checking\n", __FILE__, __LINE__); - toku_cachetable_create(&ct, 0, ZERO_LSN, NULL_LOGGER); + toku_cachetable_create(&ct, 0, ZERO_LSN, nullptr); unlink(fname); r = toku_open_ft_handle(fname, 1, &t, 1024, 256, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, toku_builtin_compare_fun); if (verbose) printf("%s:%d did setup\n", __FILE__, __LINE__); diff --git a/storage/tokudb/ft-index/ft/tests/ft-test3.cc b/storage/tokudb/ft-index/ft/tests/ft-test3.cc index 3049114a74b..4b57eb8e323 100644 --- a/storage/tokudb/ft-index/ft/tests/ft-test3.cc +++ b/storage/tokudb/ft-index/ft/tests/ft-test3.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -97,7 +97,6 @@ static const char *fname = TOKU_TEST_FILENAME; static const enum toku_compression_method compression_method = TOKU_DEFAULT_COMPRESSION_METHOD; static TOKUTXN const null_txn = 0; -static DB * const null_db = 0; static void test3 (int nodesize, int basementnodesize, int count) { FT_HANDLE t; @@ -106,7 +105,7 @@ static void test3 (int nodesize, int basementnodesize, int count) { int i; CACHETABLE ct; - toku_cachetable_create(&ct, 0, ZERO_LSN, NULL_LOGGER); + toku_cachetable_create(&ct, 0, ZERO_LSN, nullptr); gettimeofday(&t0, 0); unlink(fname); r = toku_open_ft_handle(fname, 1, &t, nodesize, basementnodesize, compression_method, ct, null_txn, toku_builtin_compare_fun); diff --git a/storage/tokudb/ft-index/ft/tests/ft-test4.cc b/storage/tokudb/ft-index/ft/tests/ft-test4.cc index 4752d85ad74..945d05f0a17 100644 --- a/storage/tokudb/ft-index/ft/tests/ft-test4.cc +++ b/storage/tokudb/ft-index/ft/tests/ft-test4.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -95,7 +95,6 @@ PATENT RIGHTS GRANT: static const char *fname = TOKU_TEST_FILENAME; static TOKUTXN const null_txn = 0; -static DB * const null_db = 0; static void test4 (int nodesize, int count) { FT_HANDLE t; @@ -106,7 +105,7 @@ static void test4 (int nodesize, int count) { gettimeofday(&t0, 0); unlink(fname); - toku_cachetable_create(&ct, 0, ZERO_LSN, NULL_LOGGER); + toku_cachetable_create(&ct, 0, ZERO_LSN, nullptr); r = toku_open_ft_handle(fname, 1, &t, nodesize, nodesize / 8, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, toku_builtin_compare_fun); assert(r==0); for (i=0; i<count; i++) { char key[100],val[100]; diff --git a/storage/tokudb/ft-index/ft/tests/ft-test5.cc b/storage/tokudb/ft-index/ft/tests/ft-test5.cc index dddcd54af66..8144754b12d 100644 --- a/storage/tokudb/ft-index/ft/tests/ft-test5.cc +++ b/storage/tokudb/ft-index/ft/tests/ft-test5.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -92,7 +92,6 @@ PATENT RIGHTS GRANT: #include "test.h" static TOKUTXN const null_txn = 0; -static DB * const null_db = 0; static void test5 (void) { int r; @@ -106,7 +105,7 @@ static void test5 (void) { MALLOC_N(limit,values); for (i=0; i<limit; i++) values[i]=-1; unlink(fname); - toku_cachetable_create(&ct, 0, ZERO_LSN, NULL_LOGGER); + toku_cachetable_create(&ct, 0, ZERO_LSN, nullptr); r = toku_open_ft_handle(fname, 1, &t, 1<<12, 1<<9, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, toku_builtin_compare_fun); assert(r==0); for (i=0; i<limit/2; i++) { char key[100],val[100]; @@ -128,7 +127,7 @@ static void test5 (void) { if (i%1000==0 && verbose) { printf("r"); fflush(stdout); } snprintf(key, 100, "key%d", rk); snprintf(valexpected, 100, "val%d", values[rk]); - struct check_pair pair = {(ITEMLEN) (1+strlen(key)), key, (ITEMLEN) (1+strlen(valexpected)), valexpected, 0}; + struct check_pair pair = {(uint32_t) (1+strlen(key)), key, (uint32_t) (1+strlen(valexpected)), valexpected, 0}; r = toku_ft_lookup(t, toku_fill_dbt(&k, key, 1+strlen(key)), lookup_checkf, &pair); assert(r==0); assert(pair.call_count==1); diff --git a/storage/tokudb/ft-index/ft/tests/ftloader-error-injector.h b/storage/tokudb/ft-index/ft/tests/ftloader-error-injector.h index 656e8a3dfe1..d0d4cc87c88 100644 --- a/storage/tokudb/ft-index/ft/tests/ftloader-error-injector.h +++ b/storage/tokudb/ft-index/ft/tests/ftloader-error-injector.h @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -86,12 +86,11 @@ PATENT RIGHTS GRANT: under this License. */ +#pragma once + #ident "Copyright (c) 2010-2013 Tokutek Inc. All rights reserved." #ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it." -#ifndef FTLOADER_ERROR_INJECTOR_H -#define FTLOADER_ERROR_INJECTOR_H - #include <portability/toku_atomic.h> static toku_mutex_t event_mutex = TOKU_MUTEX_INITIALIZER; @@ -224,5 +223,3 @@ static void *my_realloc(void *p, size_t n) { } return realloc(p, n); } - -#endif diff --git a/storage/tokudb/ft-index/ft/tests/ftloader-test-bad-generate.cc b/storage/tokudb/ft-index/ft/tests/ftloader-test-bad-generate.cc index 1ecae89da78..3cc574b759a 100644 --- a/storage/tokudb/ft-index/ft/tests/ftloader-test-bad-generate.cc +++ b/storage/tokudb/ft-index/ft/tests/ftloader-test-bad-generate.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -94,8 +94,8 @@ PATENT RIGHTS GRANT: #define DONT_DEPRECATE_MALLOC #define DONT_DEPRECATE_WRITES #include "test.h" -#include "ftloader.h" -#include "ftloader-internal.h" +#include "loader/loader.h" +#include "loader/loader-internal.h" #include "ftloader-error-injector.h" #include "memory.h" #include <portability/toku_path.h> @@ -170,7 +170,7 @@ static void test_extractor(int nrows, int nrowsets, bool expect_fail) { } FTLOADER loader; - r = toku_ft_loader_open(&loader, NULL, generate, NULL, N, fts, dbs, fnames, compares, "tempXXXXXX", ZERO_LSN, nullptr, true, 0, false); + r = toku_ft_loader_open(&loader, NULL, generate, NULL, N, fts, dbs, fnames, compares, "tempXXXXXX", ZERO_LSN, nullptr, true, 0, false, true); assert(r == 0); struct rowset *rowset[nrowsets]; @@ -183,7 +183,7 @@ static void test_extractor(int nrows, int nrowsets, bool expect_fail) { // feed rowsets to the extractor for (int i = 0; i < nrowsets; i++) { - r = queue_enq(loader->primary_rowset_queue, rowset[i], 1, NULL); + r = toku_queue_enq(loader->primary_rowset_queue, rowset[i], 1, NULL); assert(r == 0); } diff --git a/storage/tokudb/ft-index/ft/tests/ftloader-test-extractor-errors.cc b/storage/tokudb/ft-index/ft/tests/ftloader-test-extractor-errors.cc index 4dcd7fb2f8c..4418e475bac 100644 --- a/storage/tokudb/ft-index/ft/tests/ftloader-test-extractor-errors.cc +++ b/storage/tokudb/ft-index/ft/tests/ftloader-test-extractor-errors.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -95,8 +95,8 @@ PATENT RIGHTS GRANT: #define DONT_DEPRECATE_MALLOC #define DONT_DEPRECATE_WRITES #include "test.h" -#include "ftloader.h" -#include "ftloader-internal.h" +#include "loader/loader.h" +#include "loader/loader-internal.h" #include "ftloader-error-injector.h" #include "memory.h" #include <portability/toku_path.h> @@ -180,7 +180,7 @@ static void test_extractor(int nrows, int nrowsets, bool expect_fail, const char sprintf(temp, "%s/%s", testdir, "tempXXXXXX"); FTLOADER loader; - r = toku_ft_loader_open(&loader, NULL, generate, NULL, N, fts, dbs, fnames, compares, "tempXXXXXX", ZERO_LSN, nullptr, true, 0, false); + r = toku_ft_loader_open(&loader, NULL, generate, NULL, N, fts, dbs, fnames, compares, "tempXXXXXX", ZERO_LSN, nullptr, true, 0, false, true); assert(r == 0); struct rowset *rowset[nrowsets]; @@ -201,7 +201,7 @@ static void test_extractor(int nrows, int nrowsets, bool expect_fail, const char // feed rowsets to the extractor for (int i = 0; i < nrowsets; i++) { - r = queue_enq(loader->primary_rowset_queue, rowset[i], 1, NULL); + r = toku_queue_enq(loader->primary_rowset_queue, rowset[i], 1, NULL); assert(r == 0); } diff --git a/storage/tokudb/ft-index/ft/tests/ftloader-test-extractor.cc b/storage/tokudb/ft-index/ft/tests/ftloader-test-extractor.cc index 0a8ce157269..b20dd2fb2c3 100644 --- a/storage/tokudb/ft-index/ft/tests/ftloader-test-extractor.cc +++ b/storage/tokudb/ft-index/ft/tests/ftloader-test-extractor.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -95,8 +95,8 @@ PATENT RIGHTS GRANT: #define DONT_DEPRECATE_MALLOC #define DONT_DEPRECATE_WRITES #include "test.h" -#include "ftloader.h" -#include "ftloader-internal.h" +#include "loader/loader.h" +#include "loader/loader-internal.h" #include "memory.h" #include <portability/toku_path.h> @@ -402,7 +402,7 @@ static void test_extractor(int nrows, int nrowsets, const char *testdir) { sprintf(temp, "%s/%s", testdir, "tempXXXXXX"); FTLOADER loader; - r = toku_ft_loader_open(&loader, NULL, generate, NULL, N, fts, dbs, fnames, compares, temp, ZERO_LSN, nullptr, true, 0, false); + r = toku_ft_loader_open(&loader, NULL, generate, NULL, N, fts, dbs, fnames, compares, temp, ZERO_LSN, nullptr, true, 0, false, true); assert(r == 0); struct rowset *rowset[nrowsets]; @@ -415,7 +415,7 @@ static void test_extractor(int nrows, int nrowsets, const char *testdir) { // feed rowsets to the extractor for (int i = 0; i < nrowsets; i++) { - r = queue_enq(loader->primary_rowset_queue, rowset[i], 1, NULL); + r = toku_queue_enq(loader->primary_rowset_queue, rowset[i], 1, NULL); assert(r == 0); } r = toku_ft_loader_finish_extractor(loader); diff --git a/storage/tokudb/ft-index/ft/tests/ftloader-test-merge-files-dbufio.cc b/storage/tokudb/ft-index/ft/tests/ftloader-test-merge-files-dbufio.cc index 82583595470..d450bd00923 100644 --- a/storage/tokudb/ft-index/ft/tests/ftloader-test-merge-files-dbufio.cc +++ b/storage/tokudb/ft-index/ft/tests/ftloader-test-merge-files-dbufio.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -95,7 +95,7 @@ PATENT RIGHTS GRANT: #define DONT_DEPRECATE_MALLOC #include "test.h" -#include "ftloader-internal.h" +#include "loader/loader-internal.h" #include <portability/toku_path.h> static int event_count, event_count_trigger; @@ -346,7 +346,7 @@ static void *consumer_thread (void *ctv) { struct consumer_thunk *cthunk = (struct consumer_thunk *)ctv; while (1) { void *item; - int r = queue_deq(cthunk->q, &item, NULL, NULL); + int r = toku_queue_deq(cthunk->q, &item, NULL, NULL); if (r==EOF) return NULL; assert(r==0); struct rowset *rowset = (struct rowset *)item; @@ -412,7 +412,7 @@ static void test (const char *directory, bool is_error) { bt_compare_functions, "tempxxxxxx", *lsnp, - nullptr, true, 0, false); + nullptr, true, 0, false, true); assert(r==0); } @@ -423,7 +423,7 @@ static void test (const char *directory, bool is_error) { ft_loader_set_fractal_workers_count_from_c(bl); QUEUE q; - { int r = queue_create(&q, 1000); assert(r==0); } + { int r = toku_queue_create(&q, 1000); assert(r==0); } DBUFIO_FILESET bfs; const int MERGE_BUF_SIZE = 100000; // bigger than 64K so that we will trigger malloc issues. { int r = create_dbufio_fileset(&bfs, N_SOURCES, fds, MERGE_BUF_SIZE, false); assert(r==0); } @@ -474,7 +474,7 @@ static void test (const char *directory, bool is_error) { panic_dbufio_fileset(bfs, r); } { - int r = queue_eof(q); + int r = toku_queue_eof(q); assert(r==0); } @@ -500,13 +500,8 @@ static void test (const char *directory, bool is_error) { assert(cthunk.n_read == N_RECORDS); } } - //printf("%s:%d Destroying\n", __FILE__, __LINE__); { - int r = queue_destroy(bl->primary_rowset_queue); - assert(r==0); - } - { - int r = queue_destroy(q); + int r = toku_queue_destroy(q); assert(r==0); } toku_ft_loader_internal_destroy(bl, false); diff --git a/storage/tokudb/ft-index/ft/tests/ftloader-test-open.cc b/storage/tokudb/ft-index/ft/tests/ftloader-test-open.cc index f2919f04d3d..5c4b689a824 100644 --- a/storage/tokudb/ft-index/ft/tests/ftloader-test-open.cc +++ b/storage/tokudb/ft-index/ft/tests/ftloader-test-open.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -94,8 +94,8 @@ PATENT RIGHTS GRANT: #define DONT_DEPRECATE_MALLOC #include "test.h" -#include "ftloader.h" -#include "ftloader-internal.h" +#include "loader/loader.h" +#include "loader/loader-internal.h" #include "memory.h" #include <portability/toku_path.h> @@ -143,7 +143,7 @@ static void test_loader_open(int ndbs) { for (i = 0; ; i++) { set_my_malloc_trigger(i+1); - r = toku_ft_loader_open(&loader, NULL, NULL, NULL, ndbs, fts, dbs, fnames, compares, "", ZERO_LSN, nullptr, true, 0, false); + r = toku_ft_loader_open(&loader, NULL, NULL, NULL, ndbs, fts, dbs, fnames, compares, "", ZERO_LSN, nullptr, true, 0, false, true); if (r == 0) break; } diff --git a/storage/tokudb/ft-index/ft/tests/ftloader-test-vm.cc b/storage/tokudb/ft-index/ft/tests/ftloader-test-vm.cc index d9a0566144d..9dd7ffec027 100644 --- a/storage/tokudb/ft-index/ft/tests/ftloader-test-vm.cc +++ b/storage/tokudb/ft-index/ft/tests/ftloader-test-vm.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -90,7 +90,7 @@ PATENT RIGHTS GRANT: #ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it." #include "test.h" -#include "cachetable.h" +#include "cachetable/cachetable.h" #include <inttypes.h> /* Test for #2755. The ft_loader is using too much VM. */ diff --git a/storage/tokudb/ft-index/ft/tests/ftloader-test-writer-errors.cc b/storage/tokudb/ft-index/ft/tests/ftloader-test-writer-errors.cc index 7767cee00e0..d2669aee72a 100644 --- a/storage/tokudb/ft-index/ft/tests/ftloader-test-writer-errors.cc +++ b/storage/tokudb/ft-index/ft/tests/ftloader-test-writer-errors.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -95,7 +95,7 @@ PATENT RIGHTS GRANT: #define DONT_DEPRECATE_MALLOC #include "test.h" -#include "ftloader-internal.h" +#include "loader/loader-internal.h" #include "ftloader-error-injector.h" #include <portability/toku_path.h> @@ -159,20 +159,20 @@ static int write_dbfile (char *tf_template, int n, char *output_name, bool expec ft_loader_fi_close_all(&bl.file_infos); QUEUE q; - r = queue_create(&q, 0xFFFFFFFF); // infinite queue. + r = toku_queue_create(&q, 0xFFFFFFFF); // infinite queue. assert(r==0); r = merge_files(&fs, &bl, 0, dest_db, compare_ints, 0, q); CKERR(r); assert(fs.n_temp_files==0); QUEUE q2; - r = queue_create(&q2, 0xFFFFFFFF); // infinite queue. + r = toku_queue_create(&q2, 0xFFFFFFFF); // infinite queue. assert(r==0); size_t num_found = 0; size_t found_size_est = 0; while (1) { void *v; - r = queue_deq(q, &v, NULL, NULL); + r = toku_queue_deq(q, &v, NULL, NULL); if (r==EOF) break; struct rowset *rs = (struct rowset *)v; if (verbose) printf("v=%p\n", v); @@ -187,16 +187,16 @@ static int write_dbfile (char *tf_template, int n, char *output_name, bool expec num_found += rs->n_rows; - r = queue_enq(q2, v, 0, NULL); + r = toku_queue_enq(q2, v, 0, NULL); assert(r==0); } assert((int)num_found == n); if (!expect_error) assert(found_size_est == size_est); - r = queue_eof(q2); + r = toku_queue_eof(q2); assert(r==0); - r = queue_destroy(q); + r = toku_queue_destroy(q); assert(r==0); DESCRIPTOR_S desc; @@ -225,7 +225,7 @@ static int write_dbfile (char *tf_template, int n, char *output_name, bool expec ft_loader_destroy_poll_callback(&bl.poll_callback); ft_loader_lock_destroy(&bl); - r = queue_destroy(q2); + r = toku_queue_destroy(q2); assert(r==0); destroy_merge_fileset(&fs); diff --git a/storage/tokudb/ft-index/ft/tests/ftloader-test-writer.cc b/storage/tokudb/ft-index/ft/tests/ftloader-test-writer.cc index bf0641a3939..e57b800adad 100644 --- a/storage/tokudb/ft-index/ft/tests/ftloader-test-writer.cc +++ b/storage/tokudb/ft-index/ft/tests/ftloader-test-writer.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -93,7 +93,7 @@ PATENT RIGHTS GRANT: #include "test.h" -#include "ftloader-internal.h" +#include "loader/loader-internal.h" #include <inttypes.h> #include <portability/toku_path.h> @@ -129,7 +129,7 @@ static void verify_dbfile(int n, const char *name) { int r; CACHETABLE ct; - toku_cachetable_create(&ct, 0, ZERO_LSN, NULL_LOGGER); + toku_cachetable_create(&ct, 0, ZERO_LSN, nullptr); TOKUTXN const null_txn = NULL; FT_HANDLE t = NULL; @@ -215,20 +215,20 @@ static void test_write_dbfile (char *tf_template, int n, char *output_name, TXNI ft_loader_fi_close_all(&bl.file_infos); QUEUE q; - r = queue_create(&q, 0xFFFFFFFF); // infinite queue. + r = toku_queue_create(&q, 0xFFFFFFFF); // infinite queue. assert(r==0); r = merge_files(&fs, &bl, 0, dest_db, compare_ints, 0, q); CKERR(r); assert(fs.n_temp_files==0); QUEUE q2; - r = queue_create(&q2, 0xFFFFFFFF); // infinite queue. + r = toku_queue_create(&q2, 0xFFFFFFFF); // infinite queue. assert(r==0); size_t num_found = 0; size_t found_size_est = 0; while (1) { void *v; - r = queue_deq(q, &v, NULL, NULL); + r = toku_queue_deq(q, &v, NULL, NULL); if (r==EOF) break; struct rowset *rs = (struct rowset *)v; if (verbose) printf("v=%p\n", v); @@ -243,16 +243,16 @@ static void test_write_dbfile (char *tf_template, int n, char *output_name, TXNI num_found += rs->n_rows; - r = queue_enq(q2, v, 0, NULL); + r = toku_queue_enq(q2, v, 0, NULL); assert(r==0); } assert((int)num_found == n); assert(found_size_est == size_est); - r = queue_eof(q2); + r = toku_queue_eof(q2); assert(r==0); - r = queue_destroy(q); + r = toku_queue_destroy(q); assert(r==0); DESCRIPTOR_S desc; @@ -265,7 +265,7 @@ static void test_write_dbfile (char *tf_template, int n, char *output_name, TXNI r = toku_loader_write_ft_from_q_in_C(&bl, &desc, fd, 1000, q2, size_est, 0, 0, 0, TOKU_DEFAULT_COMPRESSION_METHOD, 16); assert(r==0); - r = queue_destroy(q2); + r = toku_queue_destroy(q2); assert_zero(r); destroy_merge_fileset(&fs); diff --git a/storage/tokudb/ft-index/ft/tests/ftloader-test.cc b/storage/tokudb/ft-index/ft/tests/ftloader-test.cc index 343262720a8..46271eeb451 100644 --- a/storage/tokudb/ft-index/ft/tests/ftloader-test.cc +++ b/storage/tokudb/ft-index/ft/tests/ftloader-test.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -94,7 +94,7 @@ PATENT RIGHTS GRANT: #include <string.h> #include <stdio.h> #include <unistd.h> -#include "ftloader-internal.h" +#include "loader/loader-internal.h" #include "memory.h" #include <portability/toku_path.h> @@ -186,7 +186,7 @@ static void test_merge_internal (int a[], int na, int b[], int nb, bool dups) { static void test_merge (void) { { int avals[]={1,2,3,4,5}; - int *bvals = NULL; //icc won't let us use a zero-sized array explicitly or by [] = {} construction. + int *bvals = NULL; test_merge_internal(avals, 5, bvals, 0, false); test_merge_internal(bvals, 0, avals, 5, false); } @@ -336,7 +336,7 @@ static void verify_dbfile(int n, int sorted_keys[], const char *sorted_vals[], c int r; CACHETABLE ct; - toku_cachetable_create(&ct, 0, ZERO_LSN, NULL_LOGGER); + toku_cachetable_create(&ct, 0, ZERO_LSN, nullptr); TOKUTXN const null_txn = NULL; FT_HANDLE t = NULL; @@ -350,7 +350,7 @@ static void verify_dbfile(int n, int sorted_keys[], const char *sorted_vals[], c size_t userdata = 0; int i; for (i=0; i<n; i++) { - struct check_pair pair = {sizeof sorted_keys[i], &sorted_keys[i], (ITEMLEN) strlen(sorted_vals[i]), sorted_vals[i], 0}; + struct check_pair pair = {sizeof sorted_keys[i], &sorted_keys[i], (uint32_t) strlen(sorted_vals[i]), sorted_vals[i], 0}; r = toku_ft_cursor_get(cursor, NULL, lookup_checkf, &pair, DB_NEXT); if (r != 0) { assert(pair.call_count ==0); @@ -412,7 +412,7 @@ static void test_merge_files (const char *tf_template, const char *output_name) ft_loader_fi_close_all(&bl.file_infos); QUEUE q; - r = queue_create(&q, 0xFFFFFFFF); // infinite queue. + r = toku_queue_create(&q, 0xFFFFFFFF); // infinite queue. assert(r==0); r = merge_files(&fs, &bl, 0, dest_db, compare_ints, 0, q); CKERR(r); @@ -436,7 +436,7 @@ static void test_merge_files (const char *tf_template, const char *output_name) // verify the dbfile verify_dbfile(10, sorted_keys, sorted_vals, output_name); - r = queue_destroy(q); + r = toku_queue_destroy(q); assert(r==0); } diff --git a/storage/tokudb/ft-index/portability/rdtsc.h b/storage/tokudb/ft-index/ft/tests/generate-upgrade-recovery-logs.cc index e70f636e169..b221e6a4fc5 100644 --- a/storage/tokudb/ft-index/portability/rdtsc.h +++ b/storage/tokudb/ft-index/ft/tests/generate-upgrade-recovery-logs.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -88,40 +88,64 @@ PATENT RIGHTS GRANT: #ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved." #ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it." -// read the processor time stamp register -#if defined __ICC +// Generate a recovery log with a checkpoint and an optional shutdown log entry. +// These logs will be used later to test recovery. -#define USE_RDTSC 1 -#define rdtsc _rdtsc +#include "test.h" -#elif defined __i386__ +static void generate_recovery_log(const char *testdir, bool do_shutdown) { + int r; -#define USE_RDTSC 1 + // setup the test dir + toku_os_recursive_delete(testdir); + r = toku_os_mkdir(testdir, S_IRWXU); + CKERR(r); -static inline unsigned long long rdtsc(void) { - unsigned long hi, lo; - __asm__ __volatile__ ("rdtsc\n" - "movl %%edx,%0\n" - "movl %%eax,%1" : "=r"(hi), "=r"(lo) : : "edx", "eax"); - return ((unsigned long long) hi << 32ULL) + (unsigned long long) lo; -} + // open the log + TOKULOGGER logger; + r = toku_logger_create(&logger); + CKERR(r); + r = toku_logger_open(testdir, logger); + CKERR(r); -#elif defined __x86_64__ + // log checkpoint + LSN beginlsn; + toku_log_begin_checkpoint(logger, &beginlsn, false, 0, 0); + toku_log_end_checkpoint(logger, nullptr, false, beginlsn, 0, 0, 0); -#define USE_RDTSC 1 + // log shutdown + if (do_shutdown) { + toku_log_shutdown(logger, nullptr, true, 0, 0); + } -static inline unsigned long long rdtsc(void) { - unsigned long long r; - __asm__ __volatile__ ("rdtsc\n" - "shl $32,%%rdx\n" - "or %%rdx,%%rax\n" - "movq %%rax,%0" : "=r"(r) : : "edx", "eax", "rdx", "rax"); - return r; + r = toku_logger_close(&logger); + CKERR(r); } -#else - -#define USE_RDTSC 0 - -#endif +int test_main(int argc, const char *argv[]) { + bool do_shutdown = true; + for (int i = 1; i < argc; i++) { + if (strcmp(argv[i], "-v") == 0) { + verbose++; + continue; + } + if (strcmp(argv[i], "-q") == 0) { + if (verbose > 0) + verbose--; + continue; + } + if (strcmp(argv[i], "--clean") == 0) { + do_shutdown = true; + continue; + } + if (strcmp(argv[i], "--dirty") == 0) { + do_shutdown = false; + continue; + } + } + char testdir[256]; + sprintf(testdir, "upgrade-recovery-logs-%d-%s", TOKU_LOG_VERSION, do_shutdown ? "clean" : "dirty"); + generate_recovery_log(testdir, do_shutdown); + return 0; +} diff --git a/storage/tokudb/ft-index/ft/tests/is_empty.cc b/storage/tokudb/ft-index/ft/tests/is_empty.cc index 40dfd6bb5e5..b415343fba1 100644 --- a/storage/tokudb/ft-index/ft/tests/is_empty.cc +++ b/storage/tokudb/ft-index/ft/tests/is_empty.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -92,7 +92,7 @@ PATENT RIGHTS GRANT: #include "test.h" #include "toku_os.h" -#include "checkpoint.h" +#include "cachetable/checkpoint.h" #define FILENAME "test0.ft" diff --git a/storage/tokudb/ft-index/ft/tests/keyrange.cc b/storage/tokudb/ft-index/ft/tests/keyrange.cc index 67651ae4f21..6c191adf890 100644 --- a/storage/tokudb/ft-index/ft/tests/keyrange.cc +++ b/storage/tokudb/ft-index/ft/tests/keyrange.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -96,7 +96,6 @@ PATENT RIGHTS GRANT: #include <unistd.h> static TOKUTXN const null_txn = 0; -static DB * const null_db = 0; static const char *fname = TOKU_TEST_FILENAME; static CACHETABLE ct; @@ -111,7 +110,7 @@ static void close_ft_and_ct (void) { static void open_ft_and_ct (bool unlink_old) { int r; if (unlink_old) unlink(fname); - toku_cachetable_create(&ct, 0, ZERO_LSN, NULL_LOGGER); + toku_cachetable_create(&ct, 0, ZERO_LSN, nullptr); r = toku_open_ft_handle(fname, 1, &t, 1<<12, 1<<9, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, toku_builtin_compare_fun); assert(r==0); } diff --git a/storage/tokudb/ft-index/ft/tests/keytest.cc b/storage/tokudb/ft-index/ft/tests/keytest.cc index 93896a819b4..bde2a4ca9ca 100644 --- a/storage/tokudb/ft-index/ft/tests/keytest.cc +++ b/storage/tokudb/ft-index/ft/tests/keytest.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -88,11 +88,12 @@ PATENT RIGHTS GRANT: #ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved." #ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it." + #include "test.h" -#include "key.h" +#include "ft.h" -void -toku_test_keycompare (void) { +static void +test_keycompare (void) { assert(toku_keycompare("a",1, "a",1)==0); assert(toku_keycompare("aa",2, "a",1)>0); assert(toku_keycompare("a",1, "aa",2)<0); @@ -109,7 +110,7 @@ int test_main (int argc , const char *argv[]) { default_parse_args(argc, argv); - toku_test_keycompare(); + test_keycompare(); if (verbose) printf("test ok\n"); return 0; } diff --git a/storage/tokudb/ft-index/ft/tests/le-cursor-provdel.cc b/storage/tokudb/ft-index/ft/tests/le-cursor-provdel.cc index d22b0f130e6..33729527ca1 100644 --- a/storage/tokudb/ft-index/ft/tests/le-cursor-provdel.cc +++ b/storage/tokudb/ft-index/ft/tests/le-cursor-provdel.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -91,15 +91,13 @@ PATENT RIGHTS GRANT: // test the LE_CURSOR next function with provisionally deleted rows -#include "checkpoint.h" +#include "cachetable/checkpoint.h" #include "le-cursor.h" #include "test.h" -static TOKUTXN const null_txn = 0; -static DB * const null_db = 0; static int -get_next_callback(ITEMLEN keylen, bytevec key, ITEMLEN vallen UU(), bytevec val UU(), void *extra, bool lock_only) { +get_next_callback(uint32_t keylen, const void *key, uint32_t vallen UU(), const void *val UU(), void *extra, bool lock_only) { DBT *CAST_FROM_VOIDP(key_dbt, extra); if (!lock_only) { toku_dbt_set(keylen, key, key_dbt, NULL); diff --git a/storage/tokudb/ft-index/ft/tests/le-cursor-right.cc b/storage/tokudb/ft-index/ft/tests/le-cursor-right.cc index 050a278098f..1c6ac9eab5c 100644 --- a/storage/tokudb/ft-index/ft/tests/le-cursor-right.cc +++ b/storage/tokudb/ft-index/ft/tests/le-cursor-right.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -94,15 +94,14 @@ PATENT RIGHTS GRANT: // - LE_CURSOR somewhere else -#include "checkpoint.h" +#include "cachetable/checkpoint.h" #include "le-cursor.h" #include "test.h" static TOKUTXN const null_txn = 0; -static DB * const null_db = 0; static int -get_next_callback(ITEMLEN keylen, bytevec key, ITEMLEN vallen UU(), bytevec val UU(), void *extra, bool lock_only) { +get_next_callback(uint32_t keylen, const void *key, uint32_t vallen UU(), const void *val UU(), void *extra, bool lock_only) { DBT *CAST_FROM_VOIDP(key_dbt, extra); if (!lock_only) { toku_dbt_set(keylen, key, key_dbt, NULL); @@ -196,7 +195,7 @@ test_pos_infinity(const char *fname, int n) { int error; CACHETABLE ct = NULL; - toku_cachetable_create(&ct, 0, ZERO_LSN, NULL_LOGGER); + toku_cachetable_create(&ct, 0, ZERO_LSN, nullptr); FT_HANDLE ft = NULL; error = toku_open_ft_handle(fname, 1, &ft, 1<<12, 1<<9, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, test_keycompare); @@ -230,7 +229,7 @@ test_neg_infinity(const char *fname, int n) { int error; CACHETABLE ct = NULL; - toku_cachetable_create(&ct, 0, ZERO_LSN, NULL_LOGGER); + toku_cachetable_create(&ct, 0, ZERO_LSN, nullptr); FT_HANDLE ft = NULL; error = toku_open_ft_handle(fname, 1, &ft, 1<<12, 1<<9, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, test_keycompare); @@ -284,7 +283,7 @@ test_between(const char *fname, int n) { int error; CACHETABLE ct = NULL; - toku_cachetable_create(&ct, 0, ZERO_LSN, NULL_LOGGER); + toku_cachetable_create(&ct, 0, ZERO_LSN, nullptr); FT_HANDLE ft = NULL; error = toku_open_ft_handle(fname, 1, &ft, 1<<12, 1<<9, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, test_keycompare); diff --git a/storage/tokudb/ft-index/ft/tests/le-cursor-walk.cc b/storage/tokudb/ft-index/ft/tests/le-cursor-walk.cc index 0cebb50a768..af26228ddfe 100644 --- a/storage/tokudb/ft-index/ft/tests/le-cursor-walk.cc +++ b/storage/tokudb/ft-index/ft/tests/le-cursor-walk.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -91,16 +91,15 @@ PATENT RIGHTS GRANT: // test the LE_CURSOR next function -#include "checkpoint.h" +#include "cachetable/checkpoint.h" #include "le-cursor.h" #include "test.h" #include <unistd.h> static TOKUTXN const null_txn = 0; -static DB * const null_db = 0; static int -get_next_callback(ITEMLEN keylen, bytevec key, ITEMLEN vallen UU(), bytevec val UU(), void *extra, bool lock_only) { +get_next_callback(uint32_t keylen, const void *key, uint32_t vallen UU(), const void *val UU(), void *extra, bool lock_only) { DBT *CAST_FROM_VOIDP(key_dbt, extra); if (!lock_only) { toku_dbt_set(keylen, key, key_dbt, NULL); @@ -192,7 +191,7 @@ walk_tree(const char *fname, int n) { int error; CACHETABLE ct = NULL; - toku_cachetable_create(&ct, 0, ZERO_LSN, NULL_LOGGER); + toku_cachetable_create(&ct, 0, ZERO_LSN, nullptr); FT_HANDLE ft = NULL; error = toku_open_ft_handle(fname, 1, &ft, 1<<12, 1<<9, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, test_ft_cursor_keycompare); diff --git a/storage/tokudb/ft-index/ft/tests/list-test.cc b/storage/tokudb/ft-index/ft/tests/list-test.cc index 0922b831e1f..c7286048e24 100644 --- a/storage/tokudb/ft-index/ft/tests/list-test.cc +++ b/storage/tokudb/ft-index/ft/tests/list-test.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/ft/tests/log-test-maybe-trim.cc b/storage/tokudb/ft-index/ft/tests/log-test-maybe-trim.cc index 6f2398eead4..2e12fa81579 100644 --- a/storage/tokudb/ft-index/ft/tests/log-test-maybe-trim.cc +++ b/storage/tokudb/ft-index/ft/tests/log-test-maybe-trim.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -91,7 +91,7 @@ PATENT RIGHTS GRANT: // verify that the log file trimmer does not delete the log file containing the // begin checkpoint when the checkpoint log entries span multiple log files. -#include "logcursor.h" +#include "logger/logcursor.h" #include "test.h" int diff --git a/storage/tokudb/ft-index/ft/tests/log-test.cc b/storage/tokudb/ft-index/ft/tests/log-test.cc index 7b7005ccb2c..c21e43641d5 100644 --- a/storage/tokudb/ft-index/ft/tests/log-test.cc +++ b/storage/tokudb/ft-index/ft/tests/log-test.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/ft/tests/log-test2.cc b/storage/tokudb/ft-index/ft/tests/log-test2.cc index d5fb7bfe102..eafdd26d08b 100644 --- a/storage/tokudb/ft-index/ft/tests/log-test2.cc +++ b/storage/tokudb/ft-index/ft/tests/log-test2.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/ft/tests/log-test3.cc b/storage/tokudb/ft-index/ft/tests/log-test3.cc index 69443377751..e9bb72fdfb0 100644 --- a/storage/tokudb/ft-index/ft/tests/log-test3.cc +++ b/storage/tokudb/ft-index/ft/tests/log-test3.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/ft/tests/log-test4.cc b/storage/tokudb/ft-index/ft/tests/log-test4.cc index 8a0d230983e..c4e92fe2fcc 100644 --- a/storage/tokudb/ft-index/ft/tests/log-test4.cc +++ b/storage/tokudb/ft-index/ft/tests/log-test4.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/ft/tests/log-test5.cc b/storage/tokudb/ft-index/ft/tests/log-test5.cc index c6ad061aa11..bbe24640d7e 100644 --- a/storage/tokudb/ft-index/ft/tests/log-test5.cc +++ b/storage/tokudb/ft-index/ft/tests/log-test5.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/ft/tests/log-test6.cc b/storage/tokudb/ft-index/ft/tests/log-test6.cc index 8e07365a967..5e2ff52ea75 100644 --- a/storage/tokudb/ft-index/ft/tests/log-test6.cc +++ b/storage/tokudb/ft-index/ft/tests/log-test6.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/ft/tests/log-test7.cc b/storage/tokudb/ft-index/ft/tests/log-test7.cc index a07e6775818..d6ac8bd688f 100644 --- a/storage/tokudb/ft-index/ft/tests/log-test7.cc +++ b/storage/tokudb/ft-index/ft/tests/log-test7.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/ft/tests/logcursor-bad-checksum.cc b/storage/tokudb/ft-index/ft/tests/logcursor-bad-checksum.cc index 74a9ec27bf5..d9dc5ce04df 100644 --- a/storage/tokudb/ft-index/ft/tests/logcursor-bad-checksum.cc +++ b/storage/tokudb/ft-index/ft/tests/logcursor-bad-checksum.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -88,7 +88,7 @@ PATENT RIGHTS GRANT: #ident "Copyright (c) 2007, 2008 Tokutek Inc. All rights reserved." -#include "logcursor.h" +#include "logger/logcursor.h" #include "test.h" // log a couple of timestamp entries and verify the log by walking diff --git a/storage/tokudb/ft-index/ft/tests/logcursor-bw.cc b/storage/tokudb/ft-index/ft/tests/logcursor-bw.cc index 00fc757e836..3d5a0d32f62 100644 --- a/storage/tokudb/ft-index/ft/tests/logcursor-bw.cc +++ b/storage/tokudb/ft-index/ft/tests/logcursor-bw.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/ft/tests/logcursor-empty-logdir.cc b/storage/tokudb/ft-index/ft/tests/logcursor-empty-logdir.cc index a4822f14811..cdce3881605 100644 --- a/storage/tokudb/ft-index/ft/tests/logcursor-empty-logdir.cc +++ b/storage/tokudb/ft-index/ft/tests/logcursor-empty-logdir.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -88,7 +88,7 @@ PATENT RIGHTS GRANT: #ident "Copyright (c) 2007, 2008 Tokutek Inc. All rights reserved." -#include "logcursor.h" +#include "logger/logcursor.h" #include "test.h" // a logcursor in an empty directory should not find any log entries diff --git a/storage/tokudb/ft-index/ft/tests/logcursor-empty-logfile-2.cc b/storage/tokudb/ft-index/ft/tests/logcursor-empty-logfile-2.cc index 5bf7269cfc9..665623788af 100644 --- a/storage/tokudb/ft-index/ft/tests/logcursor-empty-logfile-2.cc +++ b/storage/tokudb/ft-index/ft/tests/logcursor-empty-logfile-2.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -88,7 +88,7 @@ PATENT RIGHTS GRANT: #ident "Copyright (c) 2007, 2008 Tokutek Inc. All rights reserved." -#include "logcursor.h" +#include "logger/logcursor.h" #include "test.h" const int N = 2; diff --git a/storage/tokudb/ft-index/ft/tests/logcursor-empty-logfile-3.cc b/storage/tokudb/ft-index/ft/tests/logcursor-empty-logfile-3.cc index 85cce4e7bcd..12bf4ba4859 100644 --- a/storage/tokudb/ft-index/ft/tests/logcursor-empty-logfile-3.cc +++ b/storage/tokudb/ft-index/ft/tests/logcursor-empty-logfile-3.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -88,7 +88,7 @@ PATENT RIGHTS GRANT: #ident "Copyright (c) 2007, 2008 Tokutek Inc. All rights reserved." -#include "logcursor.h" +#include "logger/logcursor.h" #include "test.h" const int N = 2; diff --git a/storage/tokudb/ft-index/ft/tests/logcursor-empty-logfile.cc b/storage/tokudb/ft-index/ft/tests/logcursor-empty-logfile.cc index 7b6de69b061..0cc4bd34285 100644 --- a/storage/tokudb/ft-index/ft/tests/logcursor-empty-logfile.cc +++ b/storage/tokudb/ft-index/ft/tests/logcursor-empty-logfile.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -88,7 +88,7 @@ PATENT RIGHTS GRANT: #ident "Copyright (c) 2007, 2008 Tokutek Inc. All rights reserved." -#include "logcursor.h" +#include "logger/logcursor.h" #include "test.h" const int N = 2; diff --git a/storage/tokudb/ft-index/ft/tests/logcursor-fw.cc b/storage/tokudb/ft-index/ft/tests/logcursor-fw.cc index 51fa24b1078..9f0510c6118 100644 --- a/storage/tokudb/ft-index/ft/tests/logcursor-fw.cc +++ b/storage/tokudb/ft-index/ft/tests/logcursor-fw.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/ft/tests/logcursor-print.cc b/storage/tokudb/ft-index/ft/tests/logcursor-print.cc index 957a7d18494..902dc494a66 100644 --- a/storage/tokudb/ft-index/ft/tests/logcursor-print.cc +++ b/storage/tokudb/ft-index/ft/tests/logcursor-print.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -89,7 +89,7 @@ PATENT RIGHTS GRANT: #ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved." #ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it." #include "test.h" -#include "logcursor.h" +#include "logger/logcursor.h" int test_main(int argc, const char *argv[]) { int r; diff --git a/storage/tokudb/ft-index/ft/tests/logcursor-timestamp.cc b/storage/tokudb/ft-index/ft/tests/logcursor-timestamp.cc index b79bd199e8f..a329cb49d3d 100644 --- a/storage/tokudb/ft-index/ft/tests/logcursor-timestamp.cc +++ b/storage/tokudb/ft-index/ft/tests/logcursor-timestamp.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -88,7 +88,7 @@ PATENT RIGHTS GRANT: #ident "Copyright (c) 2007, 2008 Tokutek Inc. All rights reserved." -#include "logcursor.h" +#include "logger/logcursor.h" #include "test.h" static uint64_t now(void) { diff --git a/storage/tokudb/ft-index/ft/tests/logfilemgr-create-destroy.cc b/storage/tokudb/ft-index/ft/tests/logfilemgr-create-destroy.cc index 2ec8071cfed..c7a06d90d41 100644 --- a/storage/tokudb/ft-index/ft/tests/logfilemgr-create-destroy.cc +++ b/storage/tokudb/ft-index/ft/tests/logfilemgr-create-destroy.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -88,8 +88,9 @@ PATENT RIGHTS GRANT: #ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved." #ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it." -#include "test.h" -#include "logfilemgr.h" + +#include "ft/tests/test.h" +#include "ft/logger/logfilemgr.h" int test_main(int argc __attribute__((unused)), const char *argv[] __attribute__((unused))) { int r; diff --git a/storage/tokudb/ft-index/ft/tests/logfilemgr-print.cc b/storage/tokudb/ft-index/ft/tests/logfilemgr-print.cc index 883d7bf0131..6a50cd3f091 100644 --- a/storage/tokudb/ft-index/ft/tests/logfilemgr-print.cc +++ b/storage/tokudb/ft-index/ft/tests/logfilemgr-print.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -88,8 +88,8 @@ PATENT RIGHTS GRANT: #ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved." #ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it." -#include "test.h" -#include "logfilemgr.h" +#include "ft/tests/test.h" +#include "ft/logger/logfilemgr.h" int test_main(int argc __attribute__((unused)), const char *argv[] __attribute__((unused))) { int r; diff --git a/storage/tokudb/ft-index/ft/tests/make-tree.cc b/storage/tokudb/ft-index/ft/tests/make-tree.cc index a8a04b7a6a4..51a4b66618a 100644 --- a/storage/tokudb/ft-index/ft/tests/make-tree.cc +++ b/storage/tokudb/ft-index/ft/tests/make-tree.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -125,8 +125,8 @@ append_leaf(FTNODE leafnode, void *key, size_t keylen, void *val, size_t vallen) // apply an insert to the leaf node txn_gc_info gc_info(nullptr, TXNID_NONE, TXNID_NONE, false); - FT_MSG_S msg = { FT_INSERT, msn, xids_get_root_xids(), .u = {.id = { &thekey, &theval }} }; - toku_ft_bn_apply_msg_once(BLB(leafnode,0), &msg, idx, NULL, &gc_info, NULL, NULL); + ft_msg msg(&thekey, &theval, FT_INSERT, msn, toku_xids_get_root_xids()); + toku_ft_bn_apply_msg_once(BLB(leafnode,0), msg, idx, keylen, NULL, &gc_info, NULL, NULL); leafnode->max_msn_applied_to_node_on_disk = msn; @@ -152,7 +152,7 @@ insert_into_child_buffer(FT_HANDLE ft, FTNODE node, int childnum, int minkey, in unsigned int key = htonl(val); DBT thekey; toku_fill_dbt(&thekey, &key, sizeof key); DBT theval; toku_fill_dbt(&theval, &val, sizeof val); - toku_ft_append_to_child_buffer(ft->ft->compare_fun, NULL, node, childnum, FT_INSERT, msn, xids_get_root_xids(), true, &thekey, &theval); + toku_ft_append_to_child_buffer(ft->ft->cmp, node, childnum, FT_INSERT, msn, toku_xids_get_root_xids(), true, &thekey, &theval); node->max_msn_applied_to_node_on_disk = msn; } } @@ -209,7 +209,7 @@ test_make_tree(int height, int fanout, int nperleaf, int do_verify) { // create a cachetable CACHETABLE ct = NULL; - toku_cachetable_create(&ct, 0, ZERO_LSN, NULL_LOGGER); + toku_cachetable_create(&ct, 0, ZERO_LSN, nullptr); // create the ft TOKUTXN null_txn = NULL; @@ -222,7 +222,7 @@ test_make_tree(int height, int fanout, int nperleaf, int do_verify) { FTNODE newroot = make_tree(ft, height, fanout, nperleaf, &seq, &minkey, &maxkey); // set the new root to point to the new tree - toku_ft_set_new_root_blocknum(ft->ft, newroot->thisnodename); + toku_ft_set_new_root_blocknum(ft->ft, newroot->blocknum); ft->ft->h->max_msn_in_ft = last_dummymsn(); // capture msn of last message injected into tree diff --git a/storage/tokudb/ft-index/ft/tests/mempool-115.cc b/storage/tokudb/ft-index/ft/tests/mempool-115.cc index 3b9280364e8..11960aa4ef2 100644 --- a/storage/tokudb/ft-index/ft/tests/mempool-115.cc +++ b/storage/tokudb/ft-index/ft/tests/mempool-115.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -123,6 +123,7 @@ le_overwrite(bn_data* bn, uint32_t idx, const char *key, int keysize, const cha idx, key, keysize, + keysize, // old_keylen size_needed, // old_le_size size_needed, &r, @@ -148,7 +149,7 @@ public: // just copy this code from a previous test // don't care what it does, just want to get a node up and running sn.flags = 0x11223344; - sn.thisnodename.b = 20; + sn.blocknum.b = 20; sn.layout_version = FT_LAYOUT_VERSION; sn.layout_version_original = FT_LAYOUT_VERSION; sn.height = 0; @@ -156,9 +157,8 @@ public: sn.dirty = 1; sn.oldest_referenced_xid_known = TXNID_NONE; MALLOC_N(sn.n_children, sn.bp); - MALLOC_N(1, sn.childkeys); - toku_memdup_dbt(&sn.childkeys[0], "b", 2); - sn.totalchildkeylens = 2; + DBT pivotkey; + sn.pivotkeys.create_from_dbts(toku_fill_dbt(&pivotkey, "b", 2), 1); BP_STATE(&sn,0) = PT_AVAIL; BP_STATE(&sn,1) = PT_AVAIL; set_BLB(&sn, 0, toku_create_empty_bn()); @@ -167,8 +167,6 @@ public: le_add_to_bn(BLB_DATA(&sn, 0), 1, "b", 2, "bval", 5); le_add_to_bn(BLB_DATA(&sn, 1), 0, "x", 2, "xval", 5); - - // now this is the test. If I keep getting space for overwrite // like crazy, it should expose the bug bn_data* bnd = BLB_DATA(&sn, 0); @@ -186,15 +184,7 @@ public: // on. It may be that some algorithm has changed. assert(new_size < 5*old_size); - - for (int i = 0; i < sn.n_children-1; ++i) { - toku_free(sn.childkeys[i].data); - } - for (int i = 0; i < sn.n_children; i++) { - destroy_basement_node(BLB(&sn, i)); - } - toku_free(sn.bp); - toku_free(sn.childkeys); + toku_destroy_ftnode_internals(&sn); } }; diff --git a/storage/tokudb/ft-index/ft/tests/msnfilter.cc b/storage/tokudb/ft-index/ft/tests/msnfilter.cc index 9881f4bb1a4..41615028168 100644 --- a/storage/tokudb/ft-index/ft/tests/msnfilter.cc +++ b/storage/tokudb/ft-index/ft/tests/msnfilter.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -131,18 +131,18 @@ append_leaf(FT_HANDLE ft, FTNODE leafnode, void *key, uint32_t keylen, void *val // apply an insert to the leaf node MSN msn = next_dummymsn(); ft->ft->h->max_msn_in_ft = msn; - FT_MSG_S msg = { FT_INSERT, msn, xids_get_root_xids(), .u={.id = { &thekey, &theval }} }; + ft_msg msg(&thekey, &theval, FT_INSERT, msn, toku_xids_get_root_xids()); txn_gc_info gc_info(nullptr, TXNID_NONE, TXNID_NONE, false); - toku_ft_leaf_apply_msg(ft->ft->compare_fun, ft->ft->update_fun, &ft->ft->cmp_descriptor, leafnode, -1, &msg, &gc_info, nullptr, nullptr); + toku_ft_leaf_apply_msg(ft->ft->cmp, ft->ft->update_fun, leafnode, -1, msg, &gc_info, nullptr, nullptr); { int r = toku_ft_lookup(ft, &thekey, lookup_checkf, &pair); assert(r==0); assert(pair.call_count==1); } - FT_MSG_S badmsg = { FT_INSERT, msn, xids_get_root_xids(), .u={.id = { &thekey, &badval }} }; - toku_ft_leaf_apply_msg(ft->ft->compare_fun, ft->ft->update_fun, &ft->ft->cmp_descriptor, leafnode, -1, &badmsg, &gc_info, nullptr, nullptr); + ft_msg badmsg(&thekey, &badval, FT_INSERT, msn, toku_xids_get_root_xids()); + toku_ft_leaf_apply_msg(ft->ft->cmp, ft->ft->update_fun, leafnode, -1, badmsg, &gc_info, nullptr, nullptr); // message should be rejected for duplicate msn, row should still have original val { @@ -154,8 +154,8 @@ append_leaf(FT_HANDLE ft, FTNODE leafnode, void *key, uint32_t keylen, void *val // now verify that message with proper msn gets through msn = next_dummymsn(); ft->ft->h->max_msn_in_ft = msn; - FT_MSG_S msg2 = { FT_INSERT, msn, xids_get_root_xids(), .u={.id = { &thekey, &val2 }} }; - toku_ft_leaf_apply_msg(ft->ft->compare_fun, ft->ft->update_fun, &ft->ft->cmp_descriptor, leafnode, -1, &msg2, &gc_info, nullptr, nullptr); + ft_msg msg2(&thekey, &val2, FT_INSERT, msn, toku_xids_get_root_xids()); + toku_ft_leaf_apply_msg(ft->ft->cmp, ft->ft->update_fun, leafnode, -1, msg2, &gc_info, nullptr, nullptr); // message should be accepted, val should have new value { @@ -166,8 +166,8 @@ append_leaf(FT_HANDLE ft, FTNODE leafnode, void *key, uint32_t keylen, void *val // now verify that message with lesser (older) msn is rejected msn.msn = msn.msn - 10; - FT_MSG_S msg3 = { FT_INSERT, msn, xids_get_root_xids(), .u={.id = { &thekey, &badval } }}; - toku_ft_leaf_apply_msg(ft->ft->compare_fun, ft->ft->update_fun, &ft->ft->cmp_descriptor, leafnode, -1, &msg3, &gc_info, nullptr, nullptr); + ft_msg msg3(&thekey, &badval, FT_INSERT, msn, toku_xids_get_root_xids()); + toku_ft_leaf_apply_msg(ft->ft->cmp, ft->ft->update_fun, leafnode, -1, msg3, &gc_info, nullptr, nullptr); // message should be rejected, val should still have value in pair2 { @@ -202,7 +202,7 @@ test_msnfilter(int do_verify) { // create a cachetable CACHETABLE ct = NULL; - toku_cachetable_create(&ct, 0, ZERO_LSN, NULL_LOGGER); + toku_cachetable_create(&ct, 0, ZERO_LSN, nullptr); // create the ft TOKUTXN null_txn = NULL; @@ -213,7 +213,7 @@ test_msnfilter(int do_verify) { FTNODE newroot = make_node(ft, 0); // set the new root to point to the new tree - toku_ft_set_new_root_blocknum(ft->ft, newroot->thisnodename); + toku_ft_set_new_root_blocknum(ft->ft, newroot->blocknum); // KLUDGE: Unpin the new root so toku_ft_lookup() can pin it. (Pin lock is no longer a recursive // mutex.) Just leaving it unpinned for this test program works because it is the only diff --git a/storage/tokudb/ft-index/ft/tests/orthopush-flush.cc b/storage/tokudb/ft-index/ft/tests/orthopush-flush.cc index 7c198a3cb46..749729838e3 100644 --- a/storage/tokudb/ft-index/ft/tests/orthopush-flush.cc +++ b/storage/tokudb/ft-index/ft/tests/orthopush-flush.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -94,25 +94,9 @@ PATENT RIGHTS GRANT: #include "ule.h" static TOKUTXN const null_txn = 0; -static DB * const null_db = 0; static const char *fname = TOKU_TEST_FILENAME; static txn_gc_info non_mvcc_gc_info(nullptr, TXNID_NONE, TXNID_NONE, false); - -static int dummy_cmp(DB *db __attribute__((unused)), - const DBT *a, const DBT *b) { - int c; - if (a->size > b->size) { - c = memcmp(a->data, b->data, b->size); - } else if (a->size < b->size) { - c = memcmp(a->data, b->data, a->size); - } else { - return memcmp(a->data, b->data, a->size); - } - if (c == 0) { - c = a->size - b->size; - } - return c; -} +static toku::comparator dummy_cmp; // generate size random bytes into dest static void @@ -148,7 +132,7 @@ rand_bytes_limited(void *dest, int size) // generate a random message with xids and a key starting with pfx, insert // it in bnc, and save it in output params save and is_fresh_out static void -insert_random_message(NONLEAF_CHILDINFO bnc, FT_MSG_S **save, bool *is_fresh_out, XIDS xids, int pfx) +insert_random_message(NONLEAF_CHILDINFO bnc, ft_msg **save, bool *is_fresh_out, XIDS xids, int pfx) { int keylen = (random() % 128) + 16; int vallen = (random() % 128) + 16; @@ -160,23 +144,15 @@ insert_random_message(NONLEAF_CHILDINFO bnc, FT_MSG_S **save, bool *is_fresh_out MSN msn = next_dummymsn(); bool is_fresh = (random() & 0x100) == 0; - DBT *keydbt, *valdbt; - XMALLOC(keydbt); - XMALLOC(valdbt); - toku_fill_dbt(keydbt, key, keylen + (sizeof pfx)); - toku_fill_dbt(valdbt, val, vallen); - FT_MSG_S *XMALLOC(result); - result->type = FT_INSERT; - result->msn = msn; - result->xids = xids; - result->u.id.key = keydbt; - result->u.id.val = valdbt; - *save = result; + DBT keydbt, valdbt; + toku_fill_dbt(&keydbt, key, keylen + (sizeof pfx)); + toku_fill_dbt(&valdbt, val, vallen); + *save = new ft_msg(&keydbt, &valdbt, FT_INSERT, msn, xids); *is_fresh_out = is_fresh; toku_bnc_insert_msg(bnc, key, keylen + (sizeof pfx), val, vallen, FT_INSERT, msn, xids, is_fresh, - NULL, dummy_cmp); + dummy_cmp); } // generate a random message with xids and a key starting with pfx, insert @@ -209,17 +185,12 @@ insert_random_message_to_bn( valdbt = &valdbt_s; toku_fill_dbt(keydbt, key, (sizeof *pfxp) + keylen); toku_fill_dbt(valdbt, val, vallen); - FT_MSG_S msg; - msg.type = FT_INSERT; - msg.msn = msn; - msg.xids = xids; - msg.u.id.key = keydbt; - msg.u.id.val = valdbt; *keylenp = keydbt->size; *keyp = toku_xmemdup(keydbt->data, keydbt->size); + ft_msg msg(keydbt, valdbt, FT_INSERT, msn, xids); int64_t numbytes; - toku_le_apply_msg(&msg, NULL, NULL, 0, &non_mvcc_gc_info, save, &numbytes); - toku_ft_bn_apply_msg(t->ft->compare_fun, t->ft->update_fun, NULL, blb, &msg, &non_mvcc_gc_info, NULL, NULL); + toku_le_apply_msg(msg, NULL, NULL, 0, keydbt->size, &non_mvcc_gc_info, save, &numbytes); + toku_ft_bn_apply_msg(t->ft->cmp, t->ft->update_fun, blb, msg, &non_mvcc_gc_info, NULL, NULL); if (msn.msn > blb->max_msn_applied.msn) { blb->max_msn_applied = msn; } @@ -259,21 +230,16 @@ insert_same_message_to_bns( valdbt = &valdbt_s; toku_fill_dbt(keydbt, key, (sizeof *pfxp) + keylen); toku_fill_dbt(valdbt, val, vallen); - FT_MSG_S msg; - msg.type = FT_INSERT; - msg.msn = msn; - msg.xids = xids; - msg.u.id.key = keydbt; - msg.u.id.val = valdbt; *keylenp = keydbt->size; *keyp = toku_xmemdup(keydbt->data, keydbt->size); + ft_msg msg(keydbt, valdbt, FT_INSERT, msn, xids); int64_t numbytes; - toku_le_apply_msg(&msg, NULL, NULL, 0, &non_mvcc_gc_info, save, &numbytes); - toku_ft_bn_apply_msg(t->ft->compare_fun, t->ft->update_fun, NULL, blb1, &msg, &non_mvcc_gc_info, NULL, NULL); + toku_le_apply_msg(msg, NULL, NULL, 0, keydbt->size, &non_mvcc_gc_info, save, &numbytes); + toku_ft_bn_apply_msg(t->ft->cmp, t->ft->update_fun, blb1, msg, &non_mvcc_gc_info, NULL, NULL); if (msn.msn > blb1->max_msn_applied.msn) { blb1->max_msn_applied = msn; } - toku_ft_bn_apply_msg(t->ft->compare_fun, t->ft->update_fun, NULL, blb2, &msg, &non_mvcc_gc_info, NULL, NULL); + toku_ft_bn_apply_msg(t->ft->cmp, t->ft->update_fun, blb2, msg, &non_mvcc_gc_info, NULL, NULL); if (msn.msn > blb2->max_msn_applied.msn) { blb2->max_msn_applied = msn; } @@ -300,7 +266,7 @@ orthopush_flush_update_fun(DB * UU(db), const DBT *UU(key), const DBT *UU(old_va // the update message will overwrite the value with something generated // here, and add one to the int pointed to by applied static void -insert_random_update_message(NONLEAF_CHILDINFO bnc, FT_MSG_S **save, bool is_fresh, XIDS xids, int pfx, int *applied, MSN *max_msn) +insert_random_update_message(NONLEAF_CHILDINFO bnc, ft_msg **save, bool is_fresh, XIDS xids, int pfx, int *applied, MSN *max_msn) { int keylen = (random() % 16) + 16; int vallen = (random() % 16) + 16; @@ -313,48 +279,38 @@ insert_random_update_message(NONLEAF_CHILDINFO bnc, FT_MSG_S **save, bool is_fre update_extra->num_applications = applied; MSN msn = next_dummymsn(); - DBT *keydbt, *valdbt; - XMALLOC(keydbt); - XMALLOC(valdbt); - toku_fill_dbt(keydbt, key, keylen + (sizeof pfx)); - toku_fill_dbt(valdbt, update_extra, sizeof *update_extra); - FT_MSG_S *XMALLOC(result); - result->type = FT_UPDATE; - result->msn = msn; - result->xids = xids; - result->u.id.key = keydbt; - result->u.id.val = valdbt; - *save = result; + DBT keydbt, valdbt; + toku_fill_dbt(&keydbt, key, keylen + (sizeof pfx)); + toku_fill_dbt(&valdbt, update_extra, sizeof *update_extra); + *save = new ft_msg(&keydbt, &valdbt, FT_UPDATE, msn, xids); toku_bnc_insert_msg(bnc, key, keylen + (sizeof pfx), update_extra, sizeof *update_extra, FT_UPDATE, msn, xids, is_fresh, - NULL, dummy_cmp); + dummy_cmp); if (msn.msn > max_msn->msn) { *max_msn = msn; } } -const int M = 1024 * 1024; - // flush from one internal node to another, where both only have one // buffer static void flush_to_internal(FT_HANDLE t) { int r; - FT_MSG_S **MALLOC_N(4096,parent_messages); // 128k / 32 = 4096 - FT_MSG_S **MALLOC_N(4096,child_messages); + ft_msg **MALLOC_N(4096,parent_messages); // 128k / 32 = 4096 + ft_msg **MALLOC_N(4096,child_messages); bool *MALLOC_N(4096,parent_messages_is_fresh); bool *MALLOC_N(4096,child_messages_is_fresh); memset(parent_messages_is_fresh, 0, 4096*(sizeof parent_messages_is_fresh[0])); memset(child_messages_is_fresh, 0, 4096*(sizeof child_messages_is_fresh[0])); - XIDS xids_0 = xids_get_root_xids(); + XIDS xids_0 = toku_xids_get_root_xids(); XIDS xids_123, xids_234; - r = xids_create_child(xids_0, &xids_123, (TXNID)123); + r = toku_xids_create_child(xids_0, &xids_123, (TXNID)123); CKERR(r); - r = xids_create_child(xids_0, &xids_234, (TXNID)234); + r = toku_xids_create_child(xids_0, &xids_234, (TXNID)234); CKERR(r); NONLEAF_CHILDINFO child_bnc = toku_create_empty_nl(); @@ -384,41 +340,60 @@ flush_to_internal(FT_HANDLE t) { memset(parent_messages_present, 0, sizeof parent_messages_present); memset(child_messages_present, 0, sizeof child_messages_present); - FIFO_ITERATE(child_bnc->buffer, key, keylen, val, vallen, type, msn, xids, is_fresh, - { - DBT keydbt; - DBT valdbt; - toku_fill_dbt(&keydbt, key, keylen); - toku_fill_dbt(&valdbt, val, vallen); - int found = 0; - for (i = 0; i < num_parent_messages; ++i) { - if (dummy_cmp(NULL, &keydbt, parent_messages[i]->u.id.key) == 0 && - msn.msn == parent_messages[i]->msn.msn) { - assert(parent_messages_present[i] == 0); - assert(found == 0); - assert(dummy_cmp(NULL, &valdbt, parent_messages[i]->u.id.val) == 0); - assert(type == parent_messages[i]->type); - assert(xids_get_innermost_xid(xids) == xids_get_innermost_xid(parent_messages[i]->xids)); - assert(parent_messages_is_fresh[i] == is_fresh); - parent_messages_present[i]++; - found++; - } - } - for (i = 0; i < num_child_messages; ++i) { - if (dummy_cmp(NULL, &keydbt, child_messages[i]->u.id.key) == 0 && - msn.msn == child_messages[i]->msn.msn) { - assert(child_messages_present[i] == 0); - assert(found == 0); - assert(dummy_cmp(NULL, &valdbt, child_messages[i]->u.id.val) == 0); - assert(type == child_messages[i]->type); - assert(xids_get_innermost_xid(xids) == xids_get_innermost_xid(child_messages[i]->xids)); - assert(child_messages_is_fresh[i] == is_fresh); - child_messages_present[i]++; - found++; - } - } - assert(found == 1); - }); + struct checkit_fn { + int num_parent_messages; + ft_msg **parent_messages; + int *parent_messages_present; + bool *parent_messages_is_fresh; + int num_child_messages; + ft_msg **child_messages; + int *child_messages_present; + bool *child_messages_is_fresh; + checkit_fn(int np, ft_msg **pm, int *npp, bool *pmf, int nc, ft_msg **cm, int *ncp, bool *cmf) : + num_parent_messages(np), parent_messages(pm), parent_messages_present(npp), parent_messages_is_fresh(pmf), + num_child_messages(nc), child_messages(cm), child_messages_present(ncp), child_messages_is_fresh(cmf) { + } + int operator()(const ft_msg &msg, bool is_fresh) { + DBT keydbt; + DBT valdbt; + toku_fill_dbt(&keydbt, msg.kdbt()->data, msg.kdbt()->size); + toku_fill_dbt(&valdbt, msg.vdbt()->data, msg.vdbt()->size); + int found = 0; + MSN msn = msg.msn(); + enum ft_msg_type type = msg.type(); + XIDS xids = msg.xids(); + for (int k = 0; k < num_parent_messages; ++k) { + if (dummy_cmp(&keydbt, parent_messages[k]->kdbt()) == 0 && + msn.msn == parent_messages[k]->msn().msn) { + assert(parent_messages_present[k] == 0); + assert(found == 0); + assert(dummy_cmp(&valdbt, parent_messages[k]->vdbt()) == 0); + assert(type == parent_messages[k]->type()); + assert(toku_xids_get_innermost_xid(xids) == toku_xids_get_innermost_xid(parent_messages[k]->xids())); + assert(parent_messages_is_fresh[k] == is_fresh); + parent_messages_present[k]++; + found++; + } + } + for (int k = 0; k < num_child_messages; ++k) { + if (dummy_cmp(&keydbt, child_messages[k]->kdbt()) == 0 && + msn.msn == child_messages[k]->msn().msn) { + assert(child_messages_present[k] == 0); + assert(found == 0); + assert(dummy_cmp(&valdbt, child_messages[k]->vdbt()) == 0); + assert(type == child_messages[k]->type()); + assert(toku_xids_get_innermost_xid(xids) == toku_xids_get_innermost_xid(child_messages[k]->xids())); + assert(child_messages_is_fresh[k] == is_fresh); + child_messages_present[k]++; + found++; + } + } + assert(found == 1); + return 0; + } + } checkit(num_parent_messages, parent_messages, parent_messages_present, parent_messages_is_fresh, + num_child_messages, child_messages, child_messages_present, child_messages_is_fresh); + child_bnc->msg_buffer.iterate(checkit); for (i = 0; i < num_parent_messages; ++i) { assert(parent_messages_present[i] == 1); @@ -427,23 +402,19 @@ flush_to_internal(FT_HANDLE t) { assert(child_messages_present[i] == 1); } - xids_destroy(&xids_0); - xids_destroy(&xids_123); - xids_destroy(&xids_234); + toku_xids_destroy(&xids_0); + toku_xids_destroy(&xids_123); + toku_xids_destroy(&xids_234); for (i = 0; i < num_parent_messages; ++i) { - toku_free(parent_messages[i]->u.id.key->data); - toku_free((DBT *) parent_messages[i]->u.id.key); - toku_free(parent_messages[i]->u.id.val->data); - toku_free((DBT *) parent_messages[i]->u.id.val); - toku_free(parent_messages[i]); + toku_free(parent_messages[i]->kdbt()->data); + toku_free(parent_messages[i]->vdbt()->data); + delete parent_messages[i]; } for (i = 0; i < num_child_messages; ++i) { - toku_free(child_messages[i]->u.id.key->data); - toku_free((DBT *) child_messages[i]->u.id.key); - toku_free(child_messages[i]->u.id.val->data); - toku_free((DBT *) child_messages[i]->u.id.val); - toku_free(child_messages[i]); + toku_free(child_messages[i]->kdbt()->data); + toku_free(child_messages[i]->vdbt()->data); + delete child_messages[i]; } destroy_nonleaf_childinfo(parent_bnc); toku_ftnode_free(&child); @@ -458,22 +429,22 @@ static void flush_to_internal_multiple(FT_HANDLE t) { int r; - FT_MSG_S **MALLOC_N(4096,parent_messages); // 128k / 32 = 4096 - FT_MSG_S **MALLOC_N(4096,child_messages); + ft_msg **MALLOC_N(4096,parent_messages); // 128k / 32 = 4096 + ft_msg **MALLOC_N(4096,child_messages); bool *MALLOC_N(4096,parent_messages_is_fresh); bool *MALLOC_N(4096,child_messages_is_fresh); memset(parent_messages_is_fresh, 0, 4096*(sizeof parent_messages_is_fresh[0])); memset(child_messages_is_fresh, 0, 4096*(sizeof child_messages_is_fresh[0])); - XIDS xids_0 = xids_get_root_xids(); + XIDS xids_0 = toku_xids_get_root_xids(); XIDS xids_123, xids_234; - r = xids_create_child(xids_0, &xids_123, (TXNID)123); + r = toku_xids_create_child(xids_0, &xids_123, (TXNID)123); CKERR(r); - r = xids_create_child(xids_0, &xids_234, (TXNID)234); + r = toku_xids_create_child(xids_0, &xids_234, (TXNID)234); CKERR(r); NONLEAF_CHILDINFO child_bncs[8]; - FT_MSG childkeys[7]; + ft_msg *childkeys[7]; int i; for (i = 0; i < 8; ++i) { child_bncs[i] = toku_create_empty_nl(); @@ -487,7 +458,7 @@ flush_to_internal_multiple(FT_HANDLE t) { insert_random_message(child_bncs[i%8], &child_messages[i], &child_messages_is_fresh[i], xids_123, i%8); total_size += toku_bnc_memory_used(child_bncs[i%8]); if (i % 8 < 7) { - if (childkeys[i%8] == NULL || dummy_cmp(NULL, child_messages[i]->u.id.key, childkeys[i%8]->u.id.key) > 0) { + if (childkeys[i%8] == NULL || dummy_cmp(child_messages[i]->kdbt(), childkeys[i%8]->kdbt()) > 0) { childkeys[i%8] = child_messages[i]; } } @@ -508,7 +479,7 @@ flush_to_internal_multiple(FT_HANDLE t) { set_BNC(child, i, child_bncs[i]); BP_STATE(child, i) = PT_AVAIL; if (i < 7) { - toku_clone_dbt(&child->childkeys[i], *childkeys[i]->u.id.key); + child->pivotkeys.insert_at(childkeys[i]->kdbt(), i); } } @@ -525,41 +496,60 @@ flush_to_internal_multiple(FT_HANDLE t) { memset(child_messages_present, 0, sizeof child_messages_present); for (int j = 0; j < 8; ++j) { - FIFO_ITERATE(child_bncs[j]->buffer, key, keylen, val, vallen, type, msn, xids, is_fresh, - { - DBT keydbt; - DBT valdbt; - toku_fill_dbt(&keydbt, key, keylen); - toku_fill_dbt(&valdbt, val, vallen); - int found = 0; - for (i = 0; i < num_parent_messages; ++i) { - if (dummy_cmp(NULL, &keydbt, parent_messages[i]->u.id.key) == 0 && - msn.msn == parent_messages[i]->msn.msn) { - assert(parent_messages_present[i] == 0); - assert(found == 0); - assert(dummy_cmp(NULL, &valdbt, parent_messages[i]->u.id.val) == 0); - assert(type == parent_messages[i]->type); - assert(xids_get_innermost_xid(xids) == xids_get_innermost_xid(parent_messages[i]->xids)); - assert(parent_messages_is_fresh[i] == is_fresh); - parent_messages_present[i]++; - found++; - } - } - for (i = 0; i < num_child_messages; ++i) { - if (dummy_cmp(NULL, &keydbt, child_messages[i]->u.id.key) == 0 && - msn.msn == child_messages[i]->msn.msn) { - assert(child_messages_present[i] == 0); - assert(found == 0); - assert(dummy_cmp(NULL, &valdbt, child_messages[i]->u.id.val) == 0); - assert(type == child_messages[i]->type); - assert(xids_get_innermost_xid(xids) == xids_get_innermost_xid(child_messages[i]->xids)); - assert(child_messages_is_fresh[i] == is_fresh); - child_messages_present[i]++; - found++; - } - } - assert(found == 1); - }); + struct checkit_fn { + int num_parent_messages; + ft_msg **parent_messages; + int *parent_messages_present; + bool *parent_messages_is_fresh; + int num_child_messages; + ft_msg **child_messages; + int *child_messages_present; + bool *child_messages_is_fresh; + checkit_fn(int np, ft_msg **pm, int *npp, bool *pmf, int nc, ft_msg **cm, int *ncp, bool *cmf) : + num_parent_messages(np), parent_messages(pm), parent_messages_present(npp), parent_messages_is_fresh(pmf), + num_child_messages(nc), child_messages(cm), child_messages_present(ncp), child_messages_is_fresh(cmf) { + } + int operator()(const ft_msg &msg, bool is_fresh) { + DBT keydbt; + DBT valdbt; + toku_fill_dbt(&keydbt, msg.kdbt()->data, msg.kdbt()->size); + toku_fill_dbt(&valdbt, msg.vdbt()->data, msg.vdbt()->size); + int found = 0; + MSN msn = msg.msn(); + enum ft_msg_type type = msg.type(); + XIDS xids = msg.xids(); + for (int _i = 0; _i < num_parent_messages; ++_i) { + if (dummy_cmp(&keydbt, parent_messages[_i]->kdbt()) == 0 && + msn.msn == parent_messages[_i]->msn().msn) { + assert(parent_messages_present[_i] == 0); + assert(found == 0); + assert(dummy_cmp(&valdbt, parent_messages[_i]->vdbt()) == 0); + assert(type == parent_messages[_i]->type()); + assert(toku_xids_get_innermost_xid(xids) == toku_xids_get_innermost_xid(parent_messages[_i]->xids())); + assert(parent_messages_is_fresh[_i] == is_fresh); + parent_messages_present[_i]++; + found++; + } + } + for (int _i = 0; _i < num_child_messages; ++_i) { + if (dummy_cmp(&keydbt, child_messages[_i]->kdbt()) == 0 && + msn.msn == child_messages[_i]->msn().msn) { + assert(child_messages_present[_i] == 0); + assert(found == 0); + assert(dummy_cmp(&valdbt, child_messages[_i]->vdbt()) == 0); + assert(type == child_messages[_i]->type()); + assert(toku_xids_get_innermost_xid(xids) == toku_xids_get_innermost_xid(child_messages[_i]->xids())); + assert(child_messages_is_fresh[_i] == is_fresh); + child_messages_present[_i]++; + found++; + } + } + assert(found == 1); + return 0; + } + } checkit(num_parent_messages, parent_messages, parent_messages_present, parent_messages_is_fresh, + num_child_messages, child_messages, child_messages_present, child_messages_is_fresh); + child_bncs[j]->msg_buffer.iterate(checkit); } for (i = 0; i < num_parent_messages; ++i) { @@ -569,23 +559,19 @@ flush_to_internal_multiple(FT_HANDLE t) { assert(child_messages_present[i] == 1); } - xids_destroy(&xids_0); - xids_destroy(&xids_123); - xids_destroy(&xids_234); + toku_xids_destroy(&xids_0); + toku_xids_destroy(&xids_123); + toku_xids_destroy(&xids_234); for (i = 0; i < num_parent_messages; ++i) { - toku_free(parent_messages[i]->u.id.key->data); - toku_free((DBT *) parent_messages[i]->u.id.key); - toku_free(parent_messages[i]->u.id.val->data); - toku_free((DBT *) parent_messages[i]->u.id.val); - toku_free(parent_messages[i]); + toku_free(parent_messages[i]->kdbt()->data); + toku_free(parent_messages[i]->vdbt()->data); + delete parent_messages[i]; } for (i = 0; i < num_child_messages; ++i) { - toku_free(child_messages[i]->u.id.key->data); - toku_free((DBT *) child_messages[i]->u.id.key); - toku_free(child_messages[i]->u.id.val->data); - toku_free((DBT *) child_messages[i]->u.id.val); - toku_free(child_messages[i]); + toku_free(child_messages[i]->kdbt()->data); + toku_free(child_messages[i]->vdbt()->data); + delete child_messages[i]; } destroy_nonleaf_childinfo(parent_bnc); toku_ftnode_free(&child); @@ -607,7 +593,7 @@ static void flush_to_leaf(FT_HANDLE t, bool make_leaf_up_to_date, bool use_flush) { int r; - FT_MSG_S **MALLOC_N(4096,parent_messages); // 128k / 32 = 4096 + ft_msg **MALLOC_N(4096,parent_messages); // 128k / 32 = 4096 LEAFENTRY* child_messages = NULL; XMALLOC_N(4096,child_messages); void** key_pointers = NULL; @@ -619,11 +605,11 @@ flush_to_leaf(FT_HANDLE t, bool make_leaf_up_to_date, bool use_flush) { int *MALLOC_N(4096,parent_messages_applied); memset(parent_messages_applied, 0, 4096*(sizeof parent_messages_applied[0])); - XIDS xids_0 = xids_get_root_xids(); + XIDS xids_0 = toku_xids_get_root_xids(); XIDS xids_123, xids_234; - r = xids_create_child(xids_0, &xids_123, (TXNID)123); + r = toku_xids_create_child(xids_0, &xids_123, (TXNID)123); CKERR(r); - r = xids_create_child(xids_0, &xids_234, (TXNID)234); + r = toku_xids_create_child(xids_0, &xids_234, (TXNID)234); CKERR(r); BASEMENTNODE child_blbs[8]; @@ -653,7 +639,7 @@ flush_to_leaf(FT_HANDLE t, bool make_leaf_up_to_date, bool use_flush) { total_size += child_blbs[i%8]->data_buffer.get_memory_size(); if (i % 8 < 7) { DBT keydbt; - if (childkeys[i%8].size == 0 || dummy_cmp(NULL, toku_fill_dbt(&keydbt, key_pointers[i], keylens[i]), &childkeys[i%8]) > 0) { + if (childkeys[i%8].size == 0 || dummy_cmp(toku_fill_dbt(&keydbt, key_pointers[i], keylens[i]), &childkeys[i%8]) > 0) { toku_fill_dbt(&childkeys[i%8], key_pointers[i], keylens[i]); } } @@ -663,7 +649,7 @@ flush_to_leaf(FT_HANDLE t, bool make_leaf_up_to_date, bool use_flush) { for (i = 0; i < num_child_messages; ++i) { DBT keydbt; if (i % 8 < 7) { - assert(dummy_cmp(NULL, toku_fill_dbt(&keydbt, key_pointers[i], keylens[i]), &childkeys[i%8]) <= 0); + assert(dummy_cmp(toku_fill_dbt(&keydbt, key_pointers[i], keylens[i]), &childkeys[i%8]) <= 0); } } @@ -679,13 +665,13 @@ flush_to_leaf(FT_HANDLE t, bool make_leaf_up_to_date, bool use_flush) { int num_parent_messages = i; for (i = 0; i < 7; ++i) { - toku_clone_dbt(&child->childkeys[i], childkeys[i]); + child->pivotkeys.insert_at(&childkeys[i], i); } if (make_leaf_up_to_date) { for (i = 0; i < num_parent_messages; ++i) { if (!parent_messages_is_fresh[i]) { - toku_ft_leaf_apply_msg(t->ft->compare_fun, t->ft->update_fun, &t->ft->descriptor, child, -1, parent_messages[i], &non_mvcc_gc_info, NULL, NULL); + toku_ft_leaf_apply_msg(t->ft->cmp, t->ft->update_fun, child, -1, *parent_messages[i], &non_mvcc_gc_info, NULL, NULL); } } for (i = 0; i < 8; ++i) { @@ -717,15 +703,16 @@ flush_to_leaf(FT_HANDLE t, bool make_leaf_up_to_date, bool use_flush) { BP_STATE(parentnode, 0) = PT_AVAIL; parentnode->max_msn_applied_to_node_on_disk = max_parent_msn; struct ancestors ancestors = { .node = parentnode, .childnum = 0, .next = NULL }; - const struct pivot_bounds infinite_bounds = { .lower_bound_exclusive = NULL, .upper_bound_inclusive = NULL }; bool msgs_applied; - toku_apply_ancestors_messages_to_node(t, child, &ancestors, &infinite_bounds, &msgs_applied, -1); + toku_apply_ancestors_messages_to_node(t, child, &ancestors, pivot_bounds::infinite_bounds(), &msgs_applied, -1); - FIFO_ITERATE(parent_bnc->buffer, key, keylen, val, vallen, type, msn, xids, is_fresh, - { - key = key; keylen = keylen; val = val; vallen = vallen; type = type; msn = msn; xids = xids; - assert(!is_fresh); - }); + struct checkit_fn { + int operator()(const ft_msg &UU(msg), bool is_fresh) { + assert(!is_fresh); + return 0; + } + } checkit; + parent_bnc->msg_buffer.iterate(checkit); invariant(parent_bnc->fresh_message_tree.size() + parent_bnc->stale_message_tree.size() == (uint32_t) num_parent_messages); @@ -763,10 +750,10 @@ flush_to_leaf(FT_HANDLE t, bool make_leaf_up_to_date, bool use_flush) { } int found = 0; for (i = num_parent_messages - 1; i >= 0; --i) { - if (dummy_cmp(NULL, &keydbt, parent_messages[i]->u.id.key) == 0) { + if (dummy_cmp(&keydbt, parent_messages[i]->kdbt()) == 0) { if (found == 0) { - struct orthopush_flush_update_fun_extra *CAST_FROM_VOIDP(e, parent_messages[i]->u.id.val->data); - assert(dummy_cmp(NULL, &valdbt, &e->new_val) == 0); + struct orthopush_flush_update_fun_extra *CAST_FROM_VOIDP(e, parent_messages[i]->vdbt()->data); + assert(dummy_cmp(&valdbt, &e->new_val) == 0); found++; } assert(parent_messages_present[i] == 0); @@ -782,9 +769,9 @@ flush_to_leaf(FT_HANDLE t, bool make_leaf_up_to_date, bool use_flush) { toku_fill_dbt(&childkeydbt, key_pointers[i], keylens[i]); toku_fill_dbt(&childvaldbt, valp, vallen); } - if (dummy_cmp(NULL, &keydbt, &childkeydbt) == 0) { + if (dummy_cmp(&keydbt, &childkeydbt) == 0) { if (found == 0) { - assert(dummy_cmp(NULL, &valdbt, &childvaldbt) == 0); + assert(dummy_cmp(&valdbt, &childvaldbt) == 0); found++; } assert(child_messages_present[i] == 0); @@ -801,18 +788,16 @@ flush_to_leaf(FT_HANDLE t, bool make_leaf_up_to_date, bool use_flush) { assert(child_messages_present[i] == 1); } - xids_destroy(&xids_0); - xids_destroy(&xids_123); - xids_destroy(&xids_234); + toku_xids_destroy(&xids_0); + toku_xids_destroy(&xids_123); + toku_xids_destroy(&xids_234); for (i = 0; i < num_parent_messages; ++i) { - toku_free(parent_messages[i]->u.id.key->data); - toku_free((DBT *) parent_messages[i]->u.id.key); - struct orthopush_flush_update_fun_extra *CAST_FROM_VOIDP(extra, parent_messages[i]->u.id.val->data); + toku_free(parent_messages[i]->kdbt()->data); + struct orthopush_flush_update_fun_extra *CAST_FROM_VOIDP(extra, parent_messages[i]->vdbt()->data); toku_free(extra->new_val.data); - toku_free(parent_messages[i]->u.id.val->data); - toku_free((DBT *) parent_messages[i]->u.id.val); - toku_free(parent_messages[i]); + toku_free(parent_messages[i]->vdbt()->data); + delete parent_messages[i]; } for (i = 0; i < num_child_messages; ++i) { toku_free(child_messages[i]); @@ -837,7 +822,7 @@ static void flush_to_leaf_with_keyrange(FT_HANDLE t, bool make_leaf_up_to_date) { int r; - FT_MSG_S **MALLOC_N(4096,parent_messages); // 128k / 32 = 4k + ft_msg **MALLOC_N(4096,parent_messages); // 128k / 32 = 4k LEAFENTRY* child_messages = NULL; XMALLOC_N(4096,child_messages); void** key_pointers = NULL; @@ -849,11 +834,11 @@ flush_to_leaf_with_keyrange(FT_HANDLE t, bool make_leaf_up_to_date) { int *MALLOC_N(4096,parent_messages_applied); memset(parent_messages_applied, 0, 4096*(sizeof parent_messages_applied[0])); - XIDS xids_0 = xids_get_root_xids(); + XIDS xids_0 = toku_xids_get_root_xids(); XIDS xids_123, xids_234; - r = xids_create_child(xids_0, &xids_123, (TXNID)123); + r = toku_xids_create_child(xids_0, &xids_123, (TXNID)123); CKERR(r); - r = xids_create_child(xids_0, &xids_234, (TXNID)234); + r = toku_xids_create_child(xids_0, &xids_234, (TXNID)234); CKERR(r); BASEMENTNODE child_blbs[8]; @@ -879,7 +864,7 @@ flush_to_leaf_with_keyrange(FT_HANDLE t, bool make_leaf_up_to_date) { insert_random_message_to_bn(t, child_blbs[i%8], &key_pointers[i], &keylens[i], &child_messages[i], xids_123, i%8); total_size += child_blbs[i%8]->data_buffer.get_memory_size(); DBT keydbt; - if (childkeys[i%8].size == 0 || dummy_cmp(NULL, toku_fill_dbt(&keydbt, key_pointers[i], keylens[i]), &childkeys[i%8]) > 0) { + if (childkeys[i%8].size == 0 || dummy_cmp(toku_fill_dbt(&keydbt, key_pointers[i], keylens[i]), &childkeys[i%8]) > 0) { toku_fill_dbt(&childkeys[i%8], key_pointers[i], keylens[i]); } } @@ -887,7 +872,7 @@ flush_to_leaf_with_keyrange(FT_HANDLE t, bool make_leaf_up_to_date) { for (i = 0; i < num_child_messages; ++i) { DBT keydbt; - assert(dummy_cmp(NULL, toku_fill_dbt(&keydbt, key_pointers[i], keylens[i]), &childkeys[i%8]) <= 0); + assert(dummy_cmp(toku_fill_dbt(&keydbt, key_pointers[i], keylens[i]), &childkeys[i%8]) <= 0); } { @@ -902,14 +887,14 @@ flush_to_leaf_with_keyrange(FT_HANDLE t, bool make_leaf_up_to_date) { int num_parent_messages = i; for (i = 0; i < 7; ++i) { - toku_clone_dbt(&child->childkeys[i], childkeys[i]); + child->pivotkeys.insert_at(&childkeys[i], i); } if (make_leaf_up_to_date) { for (i = 0; i < num_parent_messages; ++i) { - if (dummy_cmp(NULL, parent_messages[i]->u.id.key, &childkeys[7]) <= 0 && + if (dummy_cmp(parent_messages[i]->kdbt(), &childkeys[7]) <= 0 && !parent_messages_is_fresh[i]) { - toku_ft_leaf_apply_msg(t->ft->compare_fun, t->ft->update_fun, &t->ft->descriptor, child, -1, parent_messages[i], &non_mvcc_gc_info, NULL, NULL); + toku_ft_leaf_apply_msg(t->ft->cmp, t->ft->update_fun, child, -1, *parent_messages[i], &non_mvcc_gc_info, NULL, NULL); } } for (i = 0; i < 8; ++i) { @@ -923,7 +908,7 @@ flush_to_leaf_with_keyrange(FT_HANDLE t, bool make_leaf_up_to_date) { for (i = 0; i < num_parent_messages; ++i) { if (make_leaf_up_to_date && - dummy_cmp(NULL, parent_messages[i]->u.id.key, &childkeys[7]) <= 0 && + dummy_cmp(parent_messages[i]->kdbt(), &childkeys[7]) <= 0 && !parent_messages_is_fresh[i]) { assert(parent_messages_applied[i] == 1); } else { @@ -940,30 +925,39 @@ flush_to_leaf_with_keyrange(FT_HANDLE t, bool make_leaf_up_to_date) { parentnode->max_msn_applied_to_node_on_disk = max_parent_msn; struct ancestors ancestors = { .node = parentnode, .childnum = 0, .next = NULL }; DBT lbe, ubi; - const struct pivot_bounds bounds = { - .lower_bound_exclusive = toku_init_dbt(&lbe), - .upper_bound_inclusive = toku_clone_dbt(&ubi, childkeys[7]) - }; + toku_init_dbt(&lbe); + toku_clone_dbt(&ubi, childkeys[7]); + const pivot_bounds bounds(lbe, ubi); bool msgs_applied; - toku_apply_ancestors_messages_to_node(t, child, &ancestors, &bounds, &msgs_applied, -1); - - FIFO_ITERATE(parent_bnc->buffer, key, keylen, val, vallen, type, msn, xids, is_fresh, - { - val = val; vallen = vallen; type = type; msn = msn; xids = xids; - DBT keydbt; - toku_fill_dbt(&keydbt, key, keylen); - if (dummy_cmp(NULL, &keydbt, &childkeys[7]) > 0) { - for (i = 0; i < num_parent_messages; ++i) { - if (dummy_cmp(NULL, &keydbt, parent_messages[i]->u.id.key) == 0 && - msn.msn == parent_messages[i]->msn.msn) { - assert(is_fresh == parent_messages_is_fresh[i]); - break; - } - } - } else { - assert(!is_fresh); - } - }); + toku_apply_ancestors_messages_to_node(t, child, &ancestors, bounds, &msgs_applied, -1); + + struct checkit_fn { + DBT *childkeys; + int num_parent_messages; + ft_msg **parent_messages; + bool *parent_messages_is_fresh; + checkit_fn(DBT *ck, int np, ft_msg **pm, bool *pmf) : + childkeys(ck), num_parent_messages(np), parent_messages(pm), parent_messages_is_fresh(pmf) { + } + int operator()(const ft_msg &msg, bool is_fresh) { + DBT keydbt; + toku_fill_dbt(&keydbt, msg.kdbt()->data, msg.kdbt()->size); + MSN msn = msg.msn(); + if (dummy_cmp(&keydbt, &childkeys[7]) > 0) { + for (int _i = 0; _i < num_parent_messages; ++_i) { + if (dummy_cmp(&keydbt, parent_messages[_i]->kdbt()) == 0 && + msn.msn == parent_messages[_i]->msn().msn) { + assert(is_fresh == parent_messages_is_fresh[_i]); + break; + } + } + } else { + assert(!is_fresh); + } + return 0; + } + } checkit(childkeys, num_parent_messages, parent_messages, parent_messages_is_fresh); + parent_bnc->msg_buffer.iterate(checkit); toku_ftnode_free(&parentnode); @@ -974,25 +968,23 @@ flush_to_leaf_with_keyrange(FT_HANDLE t, bool make_leaf_up_to_date) { assert(total_messages <= num_parent_messages + num_child_messages); for (i = 0; i < num_parent_messages; ++i) { - if (dummy_cmp(NULL, parent_messages[i]->u.id.key, &childkeys[7]) <= 0) { + if (dummy_cmp(parent_messages[i]->kdbt(), &childkeys[7]) <= 0) { assert(parent_messages_applied[i] == 1); } else { assert(parent_messages_applied[i] == 0); } } - xids_destroy(&xids_0); - xids_destroy(&xids_123); - xids_destroy(&xids_234); + toku_xids_destroy(&xids_0); + toku_xids_destroy(&xids_123); + toku_xids_destroy(&xids_234); for (i = 0; i < num_parent_messages; ++i) { - toku_free(parent_messages[i]->u.id.key->data); - toku_free((DBT *) parent_messages[i]->u.id.key); - struct orthopush_flush_update_fun_extra *CAST_FROM_VOIDP(extra, parent_messages[i]->u.id.val->data); + toku_free(parent_messages[i]->kdbt()->data); + struct orthopush_flush_update_fun_extra *CAST_FROM_VOIDP(extra, parent_messages[i]->vdbt()->data); toku_free(extra->new_val.data); - toku_free(parent_messages[i]->u.id.val->data); - toku_free((DBT *) parent_messages[i]->u.id.val); - toku_free(parent_messages[i]); + toku_free(parent_messages[i]->vdbt()->data); + delete parent_messages[i]; } for (i = 0; i < num_child_messages; ++i) { toku_free(child_messages[i]); @@ -1019,7 +1011,7 @@ static void compare_apply_and_flush(FT_HANDLE t, bool make_leaf_up_to_date) { int r; - FT_MSG_S **MALLOC_N(4096,parent_messages); // 128k / 32 = 4k + ft_msg **MALLOC_N(4096,parent_messages); // 128k / 32 = 4k LEAFENTRY* child_messages = NULL; XMALLOC_N(4096,child_messages); void** key_pointers = NULL; @@ -1031,11 +1023,11 @@ compare_apply_and_flush(FT_HANDLE t, bool make_leaf_up_to_date) { int *MALLOC_N(4096,parent_messages_applied); memset(parent_messages_applied, 0, 4096*(sizeof parent_messages_applied[0])); - XIDS xids_0 = xids_get_root_xids(); + XIDS xids_0 = toku_xids_get_root_xids(); XIDS xids_123, xids_234; - r = xids_create_child(xids_0, &xids_123, (TXNID)123); + r = toku_xids_create_child(xids_0, &xids_123, (TXNID)123); CKERR(r); - r = xids_create_child(xids_0, &xids_234, (TXNID)234); + r = toku_xids_create_child(xids_0, &xids_234, (TXNID)234); CKERR(r); BASEMENTNODE child1_blbs[8], child2_blbs[8]; @@ -1070,7 +1062,7 @@ compare_apply_and_flush(FT_HANDLE t, bool make_leaf_up_to_date) { total_size += child1_blbs[i%8]->data_buffer.get_memory_size(); if (i % 8 < 7) { DBT keydbt; - if (child1keys[i%8].size == 0 || dummy_cmp(NULL, toku_fill_dbt(&keydbt, key_pointers[i], keylens[i]), &child1keys[i%8]) > 0) { + if (child1keys[i%8].size == 0 || dummy_cmp(toku_fill_dbt(&keydbt, key_pointers[i], keylens[i]), &child1keys[i%8]) > 0) { toku_fill_dbt(&child1keys[i%8], key_pointers[i], keylens[i]); toku_fill_dbt(&child2keys[i%8], key_pointers[i], keylens[i]); } @@ -1081,8 +1073,8 @@ compare_apply_and_flush(FT_HANDLE t, bool make_leaf_up_to_date) { for (i = 0; i < num_child_messages; ++i) { DBT keydbt; if (i % 8 < 7) { - assert(dummy_cmp(NULL, toku_fill_dbt(&keydbt, key_pointers[i], keylens[i]), &child1keys[i%8]) <= 0); - assert(dummy_cmp(NULL, toku_fill_dbt(&keydbt, key_pointers[i], keylens[i]), &child2keys[i%8]) <= 0); + assert(dummy_cmp(toku_fill_dbt(&keydbt, key_pointers[i], keylens[i]), &child1keys[i%8]) <= 0); + assert(dummy_cmp(toku_fill_dbt(&keydbt, key_pointers[i], keylens[i]), &child2keys[i%8]) <= 0); } } @@ -1098,15 +1090,15 @@ compare_apply_and_flush(FT_HANDLE t, bool make_leaf_up_to_date) { int num_parent_messages = i; for (i = 0; i < 7; ++i) { - toku_clone_dbt(&child1->childkeys[i], child1keys[i]); - toku_clone_dbt(&child2->childkeys[i], child2keys[i]); + child1->pivotkeys.insert_at(&child1keys[i], i); + child2->pivotkeys.insert_at(&child2keys[i], i); } if (make_leaf_up_to_date) { for (i = 0; i < num_parent_messages; ++i) { if (!parent_messages_is_fresh[i]) { - toku_ft_leaf_apply_msg(t->ft->compare_fun, t->ft->update_fun, &t->ft->descriptor, child1, -1, parent_messages[i], &non_mvcc_gc_info, NULL, NULL); - toku_ft_leaf_apply_msg(t->ft->compare_fun, t->ft->update_fun, &t->ft->descriptor, child2, -1, parent_messages[i], &non_mvcc_gc_info, NULL, NULL); + toku_ft_leaf_apply_msg(t->ft->cmp, t->ft->update_fun, child1, -1, *parent_messages[i], &non_mvcc_gc_info, NULL, NULL); + toku_ft_leaf_apply_msg(t->ft->cmp, t->ft->update_fun, child2, -1, *parent_messages[i], &non_mvcc_gc_info, NULL, NULL); } } for (i = 0; i < 8; ++i) { @@ -1130,15 +1122,16 @@ compare_apply_and_flush(FT_HANDLE t, bool make_leaf_up_to_date) { BP_STATE(parentnode, 0) = PT_AVAIL; parentnode->max_msn_applied_to_node_on_disk = max_parent_msn; struct ancestors ancestors = { .node = parentnode, .childnum = 0, .next = NULL }; - const struct pivot_bounds infinite_bounds = { .lower_bound_exclusive = NULL, .upper_bound_inclusive = NULL }; bool msgs_applied; - toku_apply_ancestors_messages_to_node(t, child2, &ancestors, &infinite_bounds, &msgs_applied, -1); + toku_apply_ancestors_messages_to_node(t, child2, &ancestors, pivot_bounds::infinite_bounds(), &msgs_applied, -1); - FIFO_ITERATE(parent_bnc->buffer, key, keylen, val, vallen, type, msn, xids, is_fresh, - { - key = key; keylen = keylen; val = val; vallen = vallen; type = type; msn = msn; xids = xids; - assert(!is_fresh); - }); + struct checkit_fn { + int operator()(const ft_msg &UU(msg), bool is_fresh) { + assert(!is_fresh); + return 0; + } + } checkit; + parent_bnc->msg_buffer.iterate(checkit); invariant(parent_bnc->fresh_message_tree.size() + parent_bnc->stale_message_tree.size() == (uint32_t) num_parent_messages); @@ -1170,23 +1163,21 @@ compare_apply_and_flush(FT_HANDLE t, bool make_leaf_up_to_date) { toku_fill_dbt(&key2dbt, keyp, keylen); toku_fill_dbt(&val2dbt, valp, vallen); } - assert(dummy_cmp(NULL, &key1dbt, &key2dbt) == 0); - assert(dummy_cmp(NULL, &val1dbt, &val2dbt) == 0); + assert(dummy_cmp(&key1dbt, &key2dbt) == 0); + assert(dummy_cmp(&val1dbt, &val2dbt) == 0); } } - xids_destroy(&xids_0); - xids_destroy(&xids_123); - xids_destroy(&xids_234); + toku_xids_destroy(&xids_0); + toku_xids_destroy(&xids_123); + toku_xids_destroy(&xids_234); for (i = 0; i < num_parent_messages; ++i) { - toku_free(parent_messages[i]->u.id.key->data); - toku_free((DBT *) parent_messages[i]->u.id.key); - struct orthopush_flush_update_fun_extra *CAST_FROM_VOIDP(extra, parent_messages[i]->u.id.val->data); + toku_free(parent_messages[i]->kdbt()->data); + struct orthopush_flush_update_fun_extra *CAST_FROM_VOIDP(extra, parent_messages[i]->vdbt()->data); toku_free(extra->new_val.data); - toku_free(parent_messages[i]->u.id.val->data); - toku_free((DBT *) parent_messages[i]->u.id.val); - toku_free(parent_messages[i]); + toku_free(parent_messages[i]->vdbt()->data); + delete parent_messages[i]; } for (i = 0; i < num_child_messages; ++i) { toku_free(key_pointers[i]); @@ -1219,14 +1210,32 @@ parse_args(int argc, const char *argv[]) { } } +static int cmp_fn(DB *db __attribute__((unused)), + const DBT *a, const DBT *b) { + int c; + if (a->size > b->size) { + c = memcmp(a->data, b->data, b->size); + } else if (a->size < b->size) { + c = memcmp(a->data, b->data, a->size); + } else { + return memcmp(a->data, b->data, a->size); + } + if (c == 0) { + c = a->size - b->size; + } + return c; +} + int test_main (int argc, const char *argv[]) { parse_args(argc, argv); + dummy_cmp.create(cmp_fn, nullptr); + initialize_dummymsn(); int r; CACHETABLE ct; - toku_cachetable_create(&ct, 0, ZERO_LSN, NULL_LOGGER); + toku_cachetable_create(&ct, 0, ZERO_LSN, nullptr); unlink(fname); FT_HANDLE t; r = toku_open_ft_handle(fname, 1, &t, 128*1024, 4096, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, toku_builtin_compare_fun); assert(r==0); @@ -1256,5 +1265,7 @@ test_main (int argc, const char *argv[]) { r = toku_close_ft_handle_nolsn(t, 0); assert(r==0); toku_cachetable_close(&ct); + dummy_cmp.destroy(); + return 0; } diff --git a/storage/tokudb/ft-index/ft/tests/pqueue-test.cc b/storage/tokudb/ft-index/ft/tests/pqueue-test.cc index a42cf830c9e..a10fcd77483 100644 --- a/storage/tokudb/ft-index/ft/tests/pqueue-test.cc +++ b/storage/tokudb/ft-index/ft/tests/pqueue-test.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -91,8 +91,8 @@ PATENT RIGHTS GRANT: #include "test.h" -#include "ftloader-internal.h" -#include "pqueue.h" +#include "loader/loader-internal.h" +#include "loader/pqueue.h" int found_dup = -1; diff --git a/storage/tokudb/ft-index/ft/tests/quicklz-test.cc b/storage/tokudb/ft-index/ft/tests/quicklz-test.cc index 44bec12fb08..2c8b88440f9 100644 --- a/storage/tokudb/ft-index/ft/tests/quicklz-test.cc +++ b/storage/tokudb/ft-index/ft/tests/quicklz-test.cc @@ -30,7 +30,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -91,7 +91,7 @@ PATENT RIGHTS GRANT: #ident "$Id$" #include "test.h" -#include "quicklz.h" +#include "serialize/quicklz.h" static void test_qlz_random_i (int i) { if (verbose) printf("i=%d\n", i); diff --git a/storage/tokudb/ft-index/ft/tests/recovery-bad-last-entry.cc b/storage/tokudb/ft-index/ft/tests/recovery-bad-last-entry.cc index 214218f5a1a..80d0f295465 100644 --- a/storage/tokudb/ft-index/ft/tests/recovery-bad-last-entry.cc +++ b/storage/tokudb/ft-index/ft/tests/recovery-bad-last-entry.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -146,7 +146,7 @@ run_test(void) { else break; // run recovery - r = tokudb_recover(NULL, + r = tokuft_recover(NULL, NULL_prepared_txn_callback, NULL_keep_cachetable_callback, NULL_logger, diff --git a/storage/tokudb/ft-index/ft/tests/recovery-cbegin-cend-hello.cc b/storage/tokudb/ft-index/ft/tests/recovery-cbegin-cend-hello.cc index b0da2695e47..6686ba61f64 100644 --- a/storage/tokudb/ft-index/ft/tests/recovery-cbegin-cend-hello.cc +++ b/storage/tokudb/ft-index/ft/tests/recovery-cbegin-cend-hello.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -128,7 +128,7 @@ run_test(void) { r = close(devnul); assert(r==0); // run recovery - r = tokudb_recover(NULL, + r = tokuft_recover(NULL, NULL_prepared_txn_callback, NULL_keep_cachetable_callback, NULL_logger, TOKU_TEST_FILENAME, TOKU_TEST_FILENAME, 0, 0, 0, NULL, 0); diff --git a/storage/tokudb/ft-index/ft/tests/recovery-cbegin-cend.cc b/storage/tokudb/ft-index/ft/tests/recovery-cbegin-cend.cc index b192ad64af4..ef953923813 100644 --- a/storage/tokudb/ft-index/ft/tests/recovery-cbegin-cend.cc +++ b/storage/tokudb/ft-index/ft/tests/recovery-cbegin-cend.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -113,7 +113,7 @@ run_test(void) { r = toku_logger_close(&logger); assert(r == 0); // run recovery - r = tokudb_recover(NULL, + r = tokuft_recover(NULL, NULL_prepared_txn_callback, NULL_keep_cachetable_callback, NULL_logger, TOKU_TEST_FILENAME, TOKU_TEST_FILENAME, 0, 0, 0, NULL, 0); diff --git a/storage/tokudb/ft-index/ft/tests/recovery-cbegin.cc b/storage/tokudb/ft-index/ft/tests/recovery-cbegin.cc index 411684770d0..54d69d2a605 100644 --- a/storage/tokudb/ft-index/ft/tests/recovery-cbegin.cc +++ b/storage/tokudb/ft-index/ft/tests/recovery-cbegin.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -119,7 +119,7 @@ run_test(void) { r = close(devnul); assert(r==0); - r = tokudb_recover(NULL, + r = tokuft_recover(NULL, NULL_prepared_txn_callback, NULL_keep_cachetable_callback, NULL_logger, TOKU_TEST_FILENAME, TOKU_TEST_FILENAME, 0, 0, 0, NULL, 0); diff --git a/storage/tokudb/ft-index/ft/tests/recovery-cend-cbegin.cc b/storage/tokudb/ft-index/ft/tests/recovery-cend-cbegin.cc index 8c155c35b0e..d03b95fd9f7 100644 --- a/storage/tokudb/ft-index/ft/tests/recovery-cend-cbegin.cc +++ b/storage/tokudb/ft-index/ft/tests/recovery-cend-cbegin.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -121,7 +121,7 @@ run_test(void) { } // run recovery - r = tokudb_recover(NULL, + r = tokuft_recover(NULL, NULL_prepared_txn_callback, NULL_keep_cachetable_callback, NULL_logger, TOKU_TEST_FILENAME, TOKU_TEST_FILENAME, diff --git a/storage/tokudb/ft-index/ft/tests/recovery-datadir-is-file.cc b/storage/tokudb/ft-index/ft/tests/recovery-datadir-is-file.cc index 7a8108a347e..5df3b6bdca4 100644 --- a/storage/tokudb/ft-index/ft/tests/recovery-datadir-is-file.cc +++ b/storage/tokudb/ft-index/ft/tests/recovery-datadir-is-file.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -130,7 +130,7 @@ run_test(void) { strncat(buf, testfile, TOKU_PATH_MAX); r = system(buf); CKERR(r); } - r = tokudb_recover(NULL, + r = tokuft_recover(NULL, NULL_prepared_txn_callback, NULL_keep_cachetable_callback, NULL_logger, diff --git a/storage/tokudb/ft-index/ft/tests/recovery-empty.cc b/storage/tokudb/ft-index/ft/tests/recovery-empty.cc index 33c7333bc54..37acb97e82b 100644 --- a/storage/tokudb/ft-index/ft/tests/recovery-empty.cc +++ b/storage/tokudb/ft-index/ft/tests/recovery-empty.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -116,7 +116,7 @@ run_test(void) { } // run recovery - r = tokudb_recover(NULL, + r = tokuft_recover(NULL, NULL_prepared_txn_callback, NULL_keep_cachetable_callback, NULL_logger, TOKU_TEST_FILENAME, TOKU_TEST_FILENAME, 0, 0, 0, NULL, 0); diff --git a/storage/tokudb/ft-index/ft/tests/recovery-fopen-missing-file.cc b/storage/tokudb/ft-index/ft/tests/recovery-fopen-missing-file.cc index 63a5f5a5fee..7590ea162bb 100644 --- a/storage/tokudb/ft-index/ft/tests/recovery-fopen-missing-file.cc +++ b/storage/tokudb/ft-index/ft/tests/recovery-fopen-missing-file.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -109,7 +109,7 @@ run_test(void) { toku_log_begin_checkpoint(logger, &beginlsn, true, 0, 0); toku_log_end_checkpoint(logger, NULL, true, beginlsn, 0, 0, 0); - BYTESTRING iname = { (uint32_t) strlen("missing_tokudb_file"), (char *) "missing_tokudb_file" }; + BYTESTRING iname = { (uint32_t) strlen("missing_tokuft_file"), (char *) "missing_tokuft_file" }; FILENUM filenum = {42}; uint32_t treeflags = 0; toku_log_fopen(logger, NULL, true, iname, filenum, treeflags); @@ -122,7 +122,7 @@ run_test(void) { r = close(devnul); assert(r==0); // run recovery - r = tokudb_recover(NULL, + r = tokuft_recover(NULL, NULL_prepared_txn_callback, NULL_keep_cachetable_callback, NULL_logger, TOKU_TEST_FILENAME, TOKU_TEST_FILENAME, 0, 0, 0, NULL, 0); diff --git a/storage/tokudb/ft-index/ft/tests/recovery-hello.cc b/storage/tokudb/ft-index/ft/tests/recovery-hello.cc index d31698f795b..36126c576af 100644 --- a/storage/tokudb/ft-index/ft/tests/recovery-hello.cc +++ b/storage/tokudb/ft-index/ft/tests/recovery-hello.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -122,7 +122,7 @@ run_test(void) { r = close(devnul); assert(r==0); // run recovery - r = tokudb_recover(NULL, + r = tokuft_recover(NULL, NULL_prepared_txn_callback, NULL_keep_cachetable_callback, NULL_logger, TOKU_TEST_FILENAME, TOKU_TEST_FILENAME, 0, 0, 0, NULL, 0); diff --git a/storage/tokudb/ft-index/ft/tests/recovery-lsn-error-during-forward-scan.cc b/storage/tokudb/ft-index/ft/tests/recovery-lsn-error-during-forward-scan.cc index 253c674ae70..f21c307ccf5 100644 --- a/storage/tokudb/ft-index/ft/tests/recovery-lsn-error-during-forward-scan.cc +++ b/storage/tokudb/ft-index/ft/tests/recovery-lsn-error-during-forward-scan.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -152,7 +152,7 @@ run_test(void) { toku_recover_set_callback(recover_callback_at_turnaround, NULL); // run recovery - r = tokudb_recover(NULL, + r = tokuft_recover(NULL, NULL_prepared_txn_callback, NULL_keep_cachetable_callback, NULL_logger, TOKU_TEST_FILENAME, TOKU_TEST_FILENAME, 0, 0, 0, NULL, 0); diff --git a/storage/tokudb/ft-index/ft/tests/recovery-no-datadir.cc b/storage/tokudb/ft-index/ft/tests/recovery-no-datadir.cc index 222de5bdbcb..b79ea03bca5 100644 --- a/storage/tokudb/ft-index/ft/tests/recovery-no-datadir.cc +++ b/storage/tokudb/ft-index/ft/tests/recovery-no-datadir.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -116,7 +116,7 @@ run_test(void) { r = close(devnul); assert(r==0); // run recovery - r = tokudb_recover(NULL, + r = tokuft_recover(NULL, NULL_prepared_txn_callback, NULL_keep_cachetable_callback, NULL_logger, "/junk", TOKU_TEST_FILENAME, 0, 0, 0, NULL, 0); diff --git a/storage/tokudb/ft-index/ft/tests/recovery-no-log.cc b/storage/tokudb/ft-index/ft/tests/recovery-no-log.cc index ac71769e580..a2fd7b2e010 100644 --- a/storage/tokudb/ft-index/ft/tests/recovery-no-log.cc +++ b/storage/tokudb/ft-index/ft/tests/recovery-no-log.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -108,7 +108,7 @@ run_test(void) { r = close(devnul); assert(r==0); // run recovery - r = tokudb_recover(NULL, + r = tokuft_recover(NULL, NULL_prepared_txn_callback, NULL_keep_cachetable_callback, NULL_logger, TOKU_TEST_FILENAME, TOKU_TEST_FILENAME, 0, 0, 0, NULL, 0); diff --git a/storage/tokudb/ft-index/ft/tests/recovery-no-logdir.cc b/storage/tokudb/ft-index/ft/tests/recovery-no-logdir.cc index ad72decd134..3e889b665a6 100644 --- a/storage/tokudb/ft-index/ft/tests/recovery-no-logdir.cc +++ b/storage/tokudb/ft-index/ft/tests/recovery-no-logdir.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -102,7 +102,7 @@ run_test(void) { r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU); assert(r == 0); // run recovery - r = tokudb_recover(NULL, + r = tokuft_recover(NULL, NULL_prepared_txn_callback, NULL_keep_cachetable_callback, NULL_logger, NULL, NULL, 0, 0, 0, NULL, 0); diff --git a/storage/tokudb/ft-index/ft/tests/recovery-test5123.cc b/storage/tokudb/ft-index/ft/tests/recovery-test5123.cc index 7020ea39b24..955a842e6e9 100644 --- a/storage/tokudb/ft-index/ft/tests/recovery-test5123.cc +++ b/storage/tokudb/ft-index/ft/tests/recovery-test5123.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -92,7 +92,7 @@ PATENT RIGHTS GRANT: #include "test.h" #include "toku_os.h" -#include "checkpoint.h" +#include "cachetable/checkpoint.h" #include "test-ft-txns.h" diff --git a/storage/tokudb/ft-index/ft/tests/shortcut.cc b/storage/tokudb/ft-index/ft/tests/shortcut.cc index 15ff6e563f9..fc08868ccf0 100644 --- a/storage/tokudb/ft-index/ft/tests/shortcut.cc +++ b/storage/tokudb/ft-index/ft/tests/shortcut.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -108,7 +108,7 @@ test_main (int argc __attribute__((__unused__)), const char *argv[] __attribute unlink(fname); - toku_cachetable_create(&ct, 0, ZERO_LSN, NULL_LOGGER); + toku_cachetable_create(&ct, 0, ZERO_LSN, nullptr); r = toku_open_ft_handle(fname, 1, &ft, 1<<12, 1<<9, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, test_ft_cursor_keycompare); assert(r==0); r = toku_ft_cursor(ft, &cursor, NULL, false, false); assert(r==0); diff --git a/storage/tokudb/ft-index/ft/tests/subblock-test-checksum.cc b/storage/tokudb/ft-index/ft/tests/subblock-test-checksum.cc index 1885ce0f55c..0ba9e88ee83 100644 --- a/storage/tokudb/ft-index/ft/tests/subblock-test-checksum.cc +++ b/storage/tokudb/ft-index/ft/tests/subblock-test-checksum.cc @@ -30,7 +30,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -91,8 +91,8 @@ PATENT RIGHTS GRANT: #include "test.h" -#include "compress.h" -#include "sub_block.h" +#include "serialize/compress.h" +#include "serialize/sub_block.h" #include <toku_portability.h> #include <util/threadpool.h> diff --git a/storage/tokudb/ft-index/ft/tests/subblock-test-compression.cc b/storage/tokudb/ft-index/ft/tests/subblock-test-compression.cc index ccd7a4e521c..ba3ab4113f4 100644 --- a/storage/tokudb/ft-index/ft/tests/subblock-test-compression.cc +++ b/storage/tokudb/ft-index/ft/tests/subblock-test-compression.cc @@ -30,7 +30,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -95,7 +95,7 @@ PATENT RIGHTS GRANT: #include <errno.h> #include <string.h> -#include "sub_block.h" +#include "serialize/sub_block.h" static void test_sub_block_compression(void *buf, int total_size, int my_max_sub_blocks, int n_cores, enum toku_compression_method method) { diff --git a/storage/tokudb/ft-index/ft/tests/subblock-test-index.cc b/storage/tokudb/ft-index/ft/tests/subblock-test-index.cc index 2821429c3eb..d6e035af6d3 100644 --- a/storage/tokudb/ft-index/ft/tests/subblock-test-index.cc +++ b/storage/tokudb/ft-index/ft/tests/subblock-test-index.cc @@ -30,7 +30,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -95,7 +95,7 @@ PATENT RIGHTS GRANT: #include <errno.h> #include <string.h> -#include "sub_block.h" +#include "serialize/sub_block.h" static void test_sub_block_index(void) { diff --git a/storage/tokudb/ft-index/ft/tests/subblock-test-size.cc b/storage/tokudb/ft-index/ft/tests/subblock-test-size.cc index 5a226a4b443..e21b15f33d2 100644 --- a/storage/tokudb/ft-index/ft/tests/subblock-test-size.cc +++ b/storage/tokudb/ft-index/ft/tests/subblock-test-size.cc @@ -30,7 +30,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -95,7 +95,7 @@ PATENT RIGHTS GRANT: #include <errno.h> #include <string.h> -#include "sub_block.h" +#include "serialize/sub_block.h" static void test_sub_block_size(int total_size) { diff --git a/storage/tokudb/ft-index/ft/tests/test-assert.cc b/storage/tokudb/ft-index/ft/tests/test-assert.cc index a06b389584d..f6221c2d152 100644 --- a/storage/tokudb/ft-index/ft/tests/test-assert.cc +++ b/storage/tokudb/ft-index/ft/tests/test-assert.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/ft/tests/test-bjm.cc b/storage/tokudb/ft-index/ft/tests/test-bjm.cc index dc0f833992d..4969f8c4a8b 100644 --- a/storage/tokudb/ft-index/ft/tests/test-bjm.cc +++ b/storage/tokudb/ft-index/ft/tests/test-bjm.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -89,7 +89,7 @@ PATENT RIGHTS GRANT: #ident "Copyright (c) 2011-2013 Tokutek Inc. All rights reserved." #ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it." -#include "background_job_manager.h" +#include "cachetable/background_job_manager.h" #include "test.h" diff --git a/storage/tokudb/ft-index/ft/tests/test-checkpoint-during-flush.cc b/storage/tokudb/ft-index/ft/tests/test-checkpoint-during-flush.cc index 976a5a5b958..22fbf37d500 100644 --- a/storage/tokudb/ft-index/ft/tests/test-checkpoint-during-flush.cc +++ b/storage/tokudb/ft-index/ft/tests/test-checkpoint-during-flush.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -96,10 +96,9 @@ PATENT RIGHTS GRANT: #include <ft-cachetable-wrappers.h> #include "ft-flusher.h" #include "ft-flusher-internal.h" -#include "checkpoint.h" +#include "cachetable/checkpoint.h" static TOKUTXN const null_txn = 0; -static DB * const null_db = 0; enum { NODESIZE = 1024, KSIZE=NODESIZE-100, TOKU_PSIZE=20 }; @@ -184,7 +183,7 @@ doit (bool after_child_pin) { toku_flusher_thread_set_callback(flusher_callback, &after_child_pin); - toku_cachetable_create(&ct, 500*1024*1024, ZERO_LSN, NULL_LOGGER); + toku_cachetable_create(&ct, 500*1024*1024, ZERO_LSN, nullptr); unlink("foo1.ft_handle"); r = toku_open_ft_handle("foo1.ft_handle", 1, &t, NODESIZE, NODESIZE/2, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, toku_builtin_compare_fun); assert(r==0); @@ -228,8 +227,8 @@ doit (bool after_child_pin) { ); FTNODE node = NULL; - struct ftnode_fetch_extra bfe; - fill_bfe_for_min_read(&bfe, t->ft); + ftnode_fetch_extra bfe; + bfe.create_for_min_read(t->ft); toku_pin_ftnode( t->ft, node_root, @@ -283,7 +282,7 @@ doit (bool after_child_pin) { // // now pin the root, verify that we have a message in there, and that it is clean // - fill_bfe_for_full_read(&bfe, c_ft->ft); + bfe.create_for_full_read(c_ft->ft); toku_pin_ftnode( c_ft->ft, node_root, diff --git a/storage/tokudb/ft-index/ft/tests/test-checkpoint-during-merge.cc b/storage/tokudb/ft-index/ft/tests/test-checkpoint-during-merge.cc index d3950ee746a..0ad417f2712 100644 --- a/storage/tokudb/ft-index/ft/tests/test-checkpoint-during-merge.cc +++ b/storage/tokudb/ft-index/ft/tests/test-checkpoint-during-merge.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -96,10 +96,9 @@ PATENT RIGHTS GRANT: #include <ft-cachetable-wrappers.h> #include "ft-flusher.h" #include "ft-flusher-internal.h" -#include "checkpoint.h" +#include "cachetable/checkpoint.h" static TOKUTXN const null_txn = 0; -static DB * const null_db = 0; enum { NODESIZE = 1024, KSIZE=NODESIZE-100, TOKU_PSIZE=20 }; @@ -175,7 +174,7 @@ doit (int state) { toku_flusher_thread_set_callback(flusher_callback, &state); - toku_cachetable_create(&ct, 500*1024*1024, ZERO_LSN, NULL_LOGGER); + toku_cachetable_create(&ct, 500*1024*1024, ZERO_LSN, nullptr); unlink("foo2.ft_handle"); unlink("bar2.ft_handle"); // note the basement node size is 5 times the node size @@ -246,8 +245,8 @@ doit (int state) { toku_unpin_ftnode(t->ft, node); - struct ftnode_fetch_extra bfe; - fill_bfe_for_min_read(&bfe, t->ft); + ftnode_fetch_extra bfe; + bfe.create_for_min_read(t->ft); toku_pin_ftnode_with_dep_nodes( t->ft, node_root, @@ -306,7 +305,7 @@ doit (int state) { // // now pin the root, verify that the state is what we expect // - fill_bfe_for_full_read(&bfe, c_ft->ft); + bfe.create_for_full_read(c_ft->ft); toku_pin_ftnode_with_dep_nodes( c_ft->ft, node_root, diff --git a/storage/tokudb/ft-index/ft/tests/test-checkpoint-during-rebalance.cc b/storage/tokudb/ft-index/ft/tests/test-checkpoint-during-rebalance.cc index 0a78e260e34..7870cd2fa58 100644 --- a/storage/tokudb/ft-index/ft/tests/test-checkpoint-during-rebalance.cc +++ b/storage/tokudb/ft-index/ft/tests/test-checkpoint-during-rebalance.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -96,10 +96,9 @@ PATENT RIGHTS GRANT: #include <ft-cachetable-wrappers.h> #include "ft-flusher.h" #include "ft-flusher-internal.h" -#include "checkpoint.h" +#include "cachetable/checkpoint.h" static TOKUTXN const null_txn = 0; -static DB * const null_db = 0; enum { NODESIZE = 1024, KSIZE=NODESIZE-100, TOKU_PSIZE=20 }; @@ -175,7 +174,7 @@ doit (int state) { toku_flusher_thread_set_callback(flusher_callback, &state); - toku_cachetable_create(&ct, 500*1024*1024, ZERO_LSN, NULL_LOGGER); + toku_cachetable_create(&ct, 500*1024*1024, ZERO_LSN, nullptr); unlink("foo3.ft_handle"); unlink("bar3.ft_handle"); // note the basement node size is 5 times the node size @@ -266,8 +265,8 @@ doit (int state) { toku_unpin_ftnode(t->ft, node); - struct ftnode_fetch_extra bfe; - fill_bfe_for_min_read(&bfe, t->ft); + ftnode_fetch_extra bfe; + bfe.create_for_min_read(t->ft); toku_pin_ftnode( t->ft, node_root, @@ -322,7 +321,7 @@ doit (int state) { // // now pin the root, verify that the state is what we expect // - fill_bfe_for_full_read(&bfe, c_ft->ft); + bfe.create_for_full_read(c_ft->ft); toku_pin_ftnode( c_ft->ft, node_root, diff --git a/storage/tokudb/ft-index/ft/tests/test-checkpoint-during-split.cc b/storage/tokudb/ft-index/ft/tests/test-checkpoint-during-split.cc index c1f7f0d9b1f..8e24ae2bb43 100644 --- a/storage/tokudb/ft-index/ft/tests/test-checkpoint-during-split.cc +++ b/storage/tokudb/ft-index/ft/tests/test-checkpoint-during-split.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -96,10 +96,9 @@ PATENT RIGHTS GRANT: #include <ft-cachetable-wrappers.h> #include "ft-flusher.h" #include "ft-flusher-internal.h" -#include "checkpoint.h" +#include "cachetable/checkpoint.h" static TOKUTXN const null_txn = 0; -static DB * const null_db = 0; enum { NODESIZE = 1024, KSIZE=NODESIZE-100, TOKU_PSIZE=20 }; @@ -184,7 +183,7 @@ doit (bool after_split) { toku_flusher_thread_set_callback(flusher_callback, &after_split); - toku_cachetable_create(&ct, 500*1024*1024, ZERO_LSN, NULL_LOGGER); + toku_cachetable_create(&ct, 500*1024*1024, ZERO_LSN, nullptr); unlink("foo4.ft_handle"); unlink("bar4.ft_handle"); // note the basement node size is 5 times the node size @@ -242,8 +241,8 @@ doit (bool after_split) { ); FTNODE node = NULL; - struct ftnode_fetch_extra bfe; - fill_bfe_for_min_read(&bfe, t->ft); + ftnode_fetch_extra bfe; + bfe.create_for_min_read(t->ft); toku_pin_ftnode( t->ft, node_root, @@ -298,7 +297,7 @@ doit (bool after_split) { // // now pin the root, verify that we have a message in there, and that it is clean // - fill_bfe_for_full_read(&bfe, c_ft->ft); + bfe.create_for_full_read(c_ft->ft); toku_pin_ftnode( c_ft->ft, node_root, diff --git a/storage/tokudb/ft-index/ft/tests/test-del-inorder.cc b/storage/tokudb/ft-index/ft/tests/test-del-inorder.cc index 9054661fa0e..75a1c255bd9 100644 --- a/storage/tokudb/ft-index/ft/tests/test-del-inorder.cc +++ b/storage/tokudb/ft-index/ft/tests/test-del-inorder.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -95,7 +95,6 @@ PATENT RIGHTS GRANT: static TOKUTXN const null_txn = 0; -static DB * const null_db = 0; enum { NODESIZE = 1024, KSIZE=NODESIZE-100, TOKU_PSIZE=20 }; @@ -109,7 +108,7 @@ doit (void) { int r; - toku_cachetable_create(&ct, 16*1024, ZERO_LSN, NULL_LOGGER); + toku_cachetable_create(&ct, 16*1024, ZERO_LSN, nullptr); unlink(fname); r = toku_open_ft_handle(fname, 1, &t, NODESIZE, NODESIZE, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, toku_builtin_compare_fun); assert(r==0); diff --git a/storage/tokudb/ft-index/ft/tests/test-dirty-flushes-on-cleaner.cc b/storage/tokudb/ft-index/ft/tests/test-dirty-flushes-on-cleaner.cc index 39b835e278f..a88c07c0ca1 100644 --- a/storage/tokudb/ft-index/ft/tests/test-dirty-flushes-on-cleaner.cc +++ b/storage/tokudb/ft-index/ft/tests/test-dirty-flushes-on-cleaner.cc @@ -28,7 +28,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -94,11 +94,10 @@ PATENT RIGHTS GRANT: #include <ft-cachetable-wrappers.h> #include "ft-flusher.h" -#include "checkpoint.h" +#include "cachetable/checkpoint.h" static TOKUTXN const null_txn = 0; -static DB * const null_db = 0; enum { NODESIZE = 1024, KSIZE=NODESIZE-100, TOKU_PSIZE=20 }; @@ -132,7 +131,7 @@ doit (void) { int r; - toku_cachetable_create(&ct, 500*1024*1024, ZERO_LSN, NULL_LOGGER); + toku_cachetable_create(&ct, 500*1024*1024, ZERO_LSN, nullptr); unlink(fname); r = toku_open_ft_handle(fname, 1, &ft, NODESIZE, NODESIZE/2, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, toku_builtin_compare_fun); assert(r==0); @@ -238,8 +237,8 @@ doit (void) { // now lock and release the leaf node to make sure it is what we expect it to be. FTNODE node = NULL; - struct ftnode_fetch_extra bfe; - fill_bfe_for_min_read(&bfe, ft->ft); + ftnode_fetch_extra bfe; + bfe.create_for_min_read(ft->ft); toku_pin_ftnode_with_dep_nodes( ft->ft, node_leaf, @@ -269,7 +268,7 @@ doit (void) { // node is in memory and another is // on disk // - fill_bfe_for_min_read(&bfe, ft->ft); + bfe.create_for_min_read(ft->ft); toku_pin_ftnode_with_dep_nodes( ft->ft, node_leaf, @@ -290,7 +289,7 @@ doit (void) { // // now let us induce a clean on the internal node // - fill_bfe_for_min_read(&bfe, ft->ft); + bfe.create_for_min_read(ft->ft); toku_pin_ftnode_with_dep_nodes( ft->ft, node_internal, @@ -315,7 +314,7 @@ doit (void) { ); // verify that node_internal's buffer is empty - fill_bfe_for_min_read(&bfe, ft->ft); + bfe.create_for_min_read(ft->ft); toku_pin_ftnode_with_dep_nodes( ft->ft, node_internal, diff --git a/storage/tokudb/ft-index/ft/tests/test-dump-ft.cc b/storage/tokudb/ft-index/ft/tests/test-dump-ft.cc index f18723c525e..f1c76d0bd13 100644 --- a/storage/tokudb/ft-index/ft/tests/test-dump-ft.cc +++ b/storage/tokudb/ft-index/ft/tests/test-dump-ft.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -94,7 +94,6 @@ PATENT RIGHTS GRANT: #include "test.h" static TOKUTXN const null_txn = 0; -static DB * const null_db = 0; int test_main(int argc, const char *argv[]) { @@ -106,7 +105,7 @@ test_main(int argc, const char *argv[]) { FILE *f = fopen("test-dump-ft.out", "w"); unlink(n); assert(f); - toku_cachetable_create(&ct, 0, ZERO_LSN, NULL_LOGGER); + toku_cachetable_create(&ct, 0, ZERO_LSN, nullptr); r = toku_open_ft_handle(n, 1, &t, 1<<12, 1<<9, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, toku_builtin_compare_fun); assert(r==0); int i; for (i=0; i<10000; i++) { diff --git a/storage/tokudb/ft-index/ft/tests/test-flushes-on-cleaner.cc b/storage/tokudb/ft-index/ft/tests/test-flushes-on-cleaner.cc index 32b03496e5e..fa00100d3ed 100644 --- a/storage/tokudb/ft-index/ft/tests/test-flushes-on-cleaner.cc +++ b/storage/tokudb/ft-index/ft/tests/test-flushes-on-cleaner.cc @@ -28,7 +28,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -94,11 +94,10 @@ PATENT RIGHTS GRANT: #include <ft-cachetable-wrappers.h> #include "ft-flusher.h" -#include "checkpoint.h" +#include "cachetable/checkpoint.h" static TOKUTXN const null_txn = 0; -static DB * const null_db = 0; enum { NODESIZE = 1024, KSIZE=NODESIZE-100, TOKU_PSIZE=20 }; @@ -132,7 +131,7 @@ doit (bool keep_other_bn_in_memory) { int r; - toku_cachetable_create(&ct, 500*1024*1024, ZERO_LSN, NULL_LOGGER); + toku_cachetable_create(&ct, 500*1024*1024, ZERO_LSN, nullptr); unlink(fname); r = toku_open_ft_handle(fname, 1, &ft, NODESIZE, NODESIZE/2, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, toku_builtin_compare_fun); assert(r==0); @@ -244,8 +243,8 @@ doit (bool keep_other_bn_in_memory) { assert_zero(r); // now lock and release the leaf node to make sure it is what we expect it to be. FTNODE node = NULL; - struct ftnode_fetch_extra bfe; - fill_bfe_for_min_read(&bfe, ft->ft); + ftnode_fetch_extra bfe; + bfe.create_for_min_read(ft->ft); toku_pin_ftnode( ft->ft, node_leaf, @@ -281,7 +280,7 @@ doit (bool keep_other_bn_in_memory) { // but only one should have broadcast message // applied. // - fill_bfe_for_full_read(&bfe, ft->ft); + bfe.create_for_full_read(ft->ft); } else { // @@ -290,7 +289,7 @@ doit (bool keep_other_bn_in_memory) { // node is in memory and another is // on disk // - fill_bfe_for_min_read(&bfe, ft->ft); + bfe.create_for_min_read(ft->ft); } toku_pin_ftnode( ft->ft, @@ -315,7 +314,7 @@ doit (bool keep_other_bn_in_memory) { // // now let us induce a clean on the internal node // - fill_bfe_for_min_read(&bfe, ft->ft); + bfe.create_for_min_read(ft->ft); toku_pin_ftnode( ft->ft, node_internal, @@ -338,7 +337,7 @@ doit (bool keep_other_bn_in_memory) { ); // verify that node_internal's buffer is empty - fill_bfe_for_min_read(&bfe, ft->ft); + bfe.create_for_min_read(ft->ft); toku_pin_ftnode( ft->ft, node_internal, diff --git a/storage/tokudb/ft-index/ft/tests/test-ft-overflow.cc b/storage/tokudb/ft-index/ft/tests/test-ft-overflow.cc index dee6dd36496..d8e51b5ab7c 100644 --- a/storage/tokudb/ft-index/ft/tests/test-ft-overflow.cc +++ b/storage/tokudb/ft-index/ft/tests/test-ft-overflow.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -97,7 +97,6 @@ PATENT RIGHTS GRANT: static const char *fname = TOKU_TEST_FILENAME; static TOKUTXN const null_txn = 0; -static DB * const null_db = 0; static void test_overflow (void) { @@ -106,7 +105,7 @@ test_overflow (void) { uint32_t nodesize = 1<<20; int r; unlink(fname); - toku_cachetable_create(&ct, 0, ZERO_LSN, NULL_LOGGER); + toku_cachetable_create(&ct, 0, ZERO_LSN, nullptr); r = toku_open_ft_handle(fname, 1, &t, nodesize, nodesize / 8, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, toku_builtin_compare_fun); assert(r==0); DBT k,v; diff --git a/storage/tokudb/ft-index/ft/tests/test-ft-txns.h b/storage/tokudb/ft-index/ft/tests/test-ft-txns.h index bc887391589..04b2cfdf8bf 100644 --- a/storage/tokudb/ft-index/ft/tests/test-ft-txns.h +++ b/storage/tokudb/ft-index/ft/tests/test-ft-txns.h @@ -1,7 +1,5 @@ /* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */ // vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4: -#ifndef TEST_FT_TXNS_H -#define TEST_FT_TXNS_H #ident "$Id$" /* @@ -32,7 +30,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -89,6 +87,8 @@ PATENT RIGHTS GRANT: under this License. */ +#pragma once + #ident "Copyright (c) 2010-2013 Tokutek Inc. All rights reserved." #ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it." @@ -136,7 +136,7 @@ static inline void test_setup_and_recover(const char *envdir, TOKULOGGER *logger CKERR(r); DB_ENV *CAST_FROM_VOIDP(ctv, (void *) &ct); // Use intermediate to avoid compiler warning. - r = tokudb_recover(ctv, + r = tokuft_recover(ctv, NULL_prepared_txn_callback, xid_lsn_keep_cachetable_callback, logger, @@ -179,5 +179,3 @@ static inline void shutdown_after_recovery(TOKULOGGER *loggerp, CACHETABLE *ctp) int r = toku_logger_close(loggerp); CKERR(r); } - -#endif /* TEST_FT_TXNS_H */ diff --git a/storage/tokudb/ft-index/ft/tests/test-hot-with-bounds.cc b/storage/tokudb/ft-index/ft/tests/test-hot-with-bounds.cc index b6eaab3073a..419cbd2cb51 100644 --- a/storage/tokudb/ft-index/ft/tests/test-hot-with-bounds.cc +++ b/storage/tokudb/ft-index/ft/tests/test-hot-with-bounds.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -96,10 +96,9 @@ PATENT RIGHTS GRANT: #include <ft-cachetable-wrappers.h> #include "ft-flusher.h" #include "ft-flusher-internal.h" -#include "checkpoint.h" +#include "cachetable/checkpoint.h" static TOKUTXN const null_txn = 0; -static DB * const null_db = 0; enum { NODESIZE = 1024, KSIZE=NODESIZE-100, TOKU_PSIZE=20 }; @@ -113,7 +112,7 @@ doit (void) { int r; - toku_cachetable_create(&ct, 500*1024*1024, ZERO_LSN, NULL_LOGGER); + toku_cachetable_create(&ct, 500*1024*1024, ZERO_LSN, nullptr); unlink(TOKU_TEST_FILENAME); r = toku_open_ft_handle(TOKU_TEST_FILENAME, 1, &t, NODESIZE, NODESIZE/2, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, toku_builtin_compare_fun); assert(r==0); @@ -181,8 +180,8 @@ doit (void) { // the root, one in each buffer, let's verify this. FTNODE node = NULL; - struct ftnode_fetch_extra bfe; - fill_bfe_for_min_read(&bfe, t->ft); + ftnode_fetch_extra bfe; + bfe.create_for_min_read(t->ft); toku_pin_ftnode( t->ft, node_root, @@ -211,7 +210,7 @@ doit (void) { // at this point, we have should have flushed // only the middle buffer, let's verify this. node = NULL; - fill_bfe_for_min_read(&bfe, t->ft); + bfe.create_for_min_read(t->ft); toku_pin_ftnode( t->ft, node_root, diff --git a/storage/tokudb/ft-index/ft/tests/test-inc-split.cc b/storage/tokudb/ft-index/ft/tests/test-inc-split.cc index cafcb496f7a..13510855cb0 100644 --- a/storage/tokudb/ft-index/ft/tests/test-inc-split.cc +++ b/storage/tokudb/ft-index/ft/tests/test-inc-split.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -120,7 +120,6 @@ PATENT RIGHTS GRANT: static TOKUTXN const null_txn = 0; -static DB * const null_db = 0; enum { NODESIZE = 1024, KSIZE=NODESIZE-100, TOKU_PSIZE=20 }; @@ -137,7 +136,7 @@ doit (int ksize __attribute__((__unused__))) { int i; int r; - toku_cachetable_create(&ct, 16*1024, ZERO_LSN, NULL_LOGGER); + toku_cachetable_create(&ct, 16*1024, ZERO_LSN, nullptr); unlink(fname); r = toku_open_ft_handle(fname, 1, &t, NODESIZE, NODESIZE, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, toku_builtin_compare_fun); assert(r==0); diff --git a/storage/tokudb/ft-index/ft/tests/test-leafentry-child-txn.cc b/storage/tokudb/ft-index/ft/tests/test-leafentry-child-txn.cc index e55b20d6a3f..30dd15d3e39 100644 --- a/storage/tokudb/ft-index/ft/tests/test-leafentry-child-txn.cc +++ b/storage/tokudb/ft-index/ft/tests/test-leafentry-child-txn.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -91,10 +91,9 @@ PATENT RIGHTS GRANT: #include <string.h> #include "test.h" -#include "fttypes.h" -#include "ule.h" -#include "ule-internal.h" +#include "ft/ule.h" +#include "ft/ule-internal.h" static void init_empty_ule(ULE ule) { ule->num_cuxrs = 0; @@ -111,17 +110,6 @@ static void add_committed_entry(ULE ule, DBT *val, TXNID xid) { ule->uxrs[index].xid = xid; } -static FT_MSG_S -msg_init(enum ft_msg_type type, XIDS xids, - DBT *key, DBT *val) { - FT_MSG_S msg; - msg.type = type; - msg.xids = xids; - msg.u.id.key = key; - msg.u.id.val = val; - return msg; -} - //Test all the different things that can happen to a //committed leafentry (logical equivalent of a committed insert). static void @@ -144,14 +132,14 @@ run_test(void) { // test case where we apply a message and the innermost child_id // is the same as the innermost committed TXNID - XIDS root_xids = xids_get_root_xids(); + XIDS root_xids = toku_xids_get_root_xids(); TXNID root_txnid = 1000; TXNID child_id = 10; XIDS msg_xids_1; XIDS msg_xids_2; - r = xids_create_child(root_xids, &msg_xids_1, root_txnid); + r = toku_xids_create_child(root_xids, &msg_xids_1, root_txnid); assert(r==0); - r = xids_create_child(msg_xids_1, &msg_xids_2, child_id); + r = toku_xids_create_child(msg_xids_1, &msg_xids_2, child_id); assert(r==0); init_empty_ule(&ule_initial); @@ -161,45 +149,49 @@ run_test(void) { add_committed_entry(&ule_initial, &val, 10); // now do the application of xids to the ule - FT_MSG_S msg; // do a commit - msg = msg_init(FT_COMMIT_ANY, msg_xids_2, &key, &val); - test_msg_modify_ule(&ule_initial, &msg); - assert(ule->num_cuxrs == 2); - assert(ule->uxrs[0].xid == TXNID_NONE); - assert(ule->uxrs[1].xid == 10); - assert(ule->uxrs[0].valp == &val_data_one); - assert(ule->uxrs[1].valp == &val_data_two); + { + ft_msg msg(&key, &val, FT_COMMIT_ANY, ZERO_MSN, msg_xids_2); + test_msg_modify_ule(&ule_initial, msg); + assert(ule->num_cuxrs == 2); + assert(ule->uxrs[0].xid == TXNID_NONE); + assert(ule->uxrs[1].xid == 10); + assert(ule->uxrs[0].valp == &val_data_one); + assert(ule->uxrs[1].valp == &val_data_two); + } // do an abort - msg = msg_init(FT_ABORT_ANY, msg_xids_2, &key, &val); - test_msg_modify_ule(&ule_initial, &msg); - assert(ule->num_cuxrs == 2); - assert(ule->uxrs[0].xid == TXNID_NONE); - assert(ule->uxrs[1].xid == 10); - assert(ule->uxrs[0].valp == &val_data_one); - assert(ule->uxrs[1].valp == &val_data_two); + { + ft_msg msg(&key, &val, FT_ABORT_ANY, ZERO_MSN, msg_xids_2); + test_msg_modify_ule(&ule_initial, msg); + assert(ule->num_cuxrs == 2); + assert(ule->uxrs[0].xid == TXNID_NONE); + assert(ule->uxrs[1].xid == 10); + assert(ule->uxrs[0].valp == &val_data_one); + assert(ule->uxrs[1].valp == &val_data_two); + } // do an insert val.data = &val_data_three; - msg = msg_init(FT_INSERT, msg_xids_2, &key, &val); - test_msg_modify_ule(&ule_initial, &msg); - // now that message applied, verify that things are good - assert(ule->num_cuxrs == 2); - assert(ule->num_puxrs == 2); - assert(ule->uxrs[0].xid == TXNID_NONE); - assert(ule->uxrs[1].xid == 10); - assert(ule->uxrs[2].xid == 1000); - assert(ule->uxrs[3].xid == 10); - assert(ule->uxrs[0].valp == &val_data_one); - assert(ule->uxrs[1].valp == &val_data_two); - assert(ule->uxrs[2].type == XR_PLACEHOLDER); - assert(ule->uxrs[3].valp == &val_data_three); - - - xids_destroy(&msg_xids_2); - xids_destroy(&msg_xids_1); - xids_destroy(&root_xids); + { + ft_msg msg(&key, &val, FT_INSERT, ZERO_MSN, msg_xids_2); + test_msg_modify_ule(&ule_initial, msg); + // now that message applied, verify that things are good + assert(ule->num_cuxrs == 2); + assert(ule->num_puxrs == 2); + assert(ule->uxrs[0].xid == TXNID_NONE); + assert(ule->uxrs[1].xid == 10); + assert(ule->uxrs[2].xid == 1000); + assert(ule->uxrs[3].xid == 10); + assert(ule->uxrs[0].valp == &val_data_one); + assert(ule->uxrs[1].valp == &val_data_two); + assert(ule->uxrs[2].type == XR_PLACEHOLDER); + assert(ule->uxrs[3].valp == &val_data_three); + } + + toku_xids_destroy(&msg_xids_2); + toku_xids_destroy(&msg_xids_1); + toku_xids_destroy(&root_xids); } diff --git a/storage/tokudb/ft-index/ft/tests/test-leafentry-nested.cc b/storage/tokudb/ft-index/ft/tests/test-leafentry-nested.cc index 0335c284cb3..9253ff814c9 100644 --- a/storage/tokudb/ft-index/ft/tests/test-leafentry-nested.cc +++ b/storage/tokudb/ft-index/ft/tests/test-leafentry-nested.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -91,10 +91,9 @@ PATENT RIGHTS GRANT: #include <string.h> #include "test.h" -#include "fttypes.h" -#include "ule.h" -#include "ule-internal.h" +#include "ft/ule.h" +#include "ft/ule-internal.h" enum {MAX_SIZE = 256}; static XIDS nested_xids[MAX_TRANSACTION_RECORDS]; @@ -213,7 +212,7 @@ test_le_offsets (void) { static void test_ule_packs_to_nothing (ULE ule) { LEAFENTRY le; - int r = le_pack(ule, NULL, 0, NULL, 0, 0, &le, nullptr); + int r = le_pack(ule, NULL, 0, NULL, 0, 0, 0, &le, nullptr); assert(r==0); assert(le==NULL); } @@ -319,7 +318,7 @@ test_le_pack_committed (void) { size_t memsize; LEAFENTRY le; - int r = le_pack(&ule, nullptr, 0, nullptr, 0, 0, &le, nullptr); + int r = le_pack(&ule, nullptr, 0, nullptr, 0, 0, 0, &le, nullptr); assert(r==0); assert(le!=NULL); memsize = le_memsize_from_ule(&ule); @@ -329,7 +328,7 @@ test_le_pack_committed (void) { verify_ule_equal(&ule, &tmp_ule); LEAFENTRY tmp_le; size_t tmp_memsize; - r = le_pack(&tmp_ule, nullptr, 0, nullptr, 0, 0, &tmp_le, nullptr); + r = le_pack(&tmp_ule, nullptr, 0, nullptr, 0, 0, 0, &tmp_le, nullptr); tmp_memsize = le_memsize_from_ule(&tmp_ule); assert(r==0); assert(tmp_memsize == memsize); @@ -377,7 +376,7 @@ test_le_pack_uncommitted (uint8_t committed_type, uint8_t prov_type, int num_pla size_t memsize; LEAFENTRY le; - int r = le_pack(&ule, nullptr, 0, nullptr, 0, 0, &le, nullptr); + int r = le_pack(&ule, nullptr, 0, nullptr, 0, 0, 0, &le, nullptr); assert(r==0); assert(le!=NULL); memsize = le_memsize_from_ule(&ule); @@ -387,7 +386,7 @@ test_le_pack_uncommitted (uint8_t committed_type, uint8_t prov_type, int num_pla verify_ule_equal(&ule, &tmp_ule); LEAFENTRY tmp_le; size_t tmp_memsize; - r = le_pack(&tmp_ule, nullptr, 0, nullptr, 0, 0, &tmp_le, nullptr); + r = le_pack(&tmp_ule, nullptr, 0, nullptr, 0, 0, 0, &tmp_le, nullptr); tmp_memsize = le_memsize_from_ule(&tmp_ule); assert(r==0); assert(tmp_memsize == memsize); @@ -442,13 +441,13 @@ test_le_pack (void) { } static void -test_le_apply(ULE ule_initial, FT_MSG msg, ULE ule_expected) { +test_le_apply(ULE ule_initial, const ft_msg &msg, ULE ule_expected) { int r; LEAFENTRY le_initial; LEAFENTRY le_expected; LEAFENTRY le_result; - r = le_pack(ule_initial, nullptr, 0, nullptr, 0, 0, &le_initial, nullptr); + r = le_pack(ule_initial, nullptr, 0, nullptr, 0, 0, 0, &le_initial, nullptr); CKERR(r); size_t result_memsize = 0; @@ -458,6 +457,7 @@ test_le_apply(ULE ule_initial, FT_MSG msg, ULE ule_expected) { le_initial, nullptr, 0, + 0, &gc_info, &le_result, &ignoreme); @@ -467,7 +467,7 @@ test_le_apply(ULE ule_initial, FT_MSG msg, ULE ule_expected) { } size_t expected_memsize = 0; - r = le_pack(ule_expected, nullptr, 0, nullptr, 0, 0, &le_expected, nullptr); + r = le_pack(ule_expected, nullptr, 0, nullptr, 0, 0, 0, &le_expected, nullptr); CKERR(r); if (le_expected) { expected_memsize = leafentry_memsize(le_expected); @@ -495,17 +495,6 @@ static const ULE_S ule_committed_delete = { .uxrs = (UXR_S *)ule_committed_delete.uxrs_static }; -static FT_MSG_S -msg_init(enum ft_msg_type type, XIDS xids, - DBT *key, DBT *val) { - FT_MSG_S msg; - msg.type = type; - msg.xids = xids; - msg.u.id.key = key; - msg.u.id.val = val; - return msg; -} - static uint32_t next_nesting_level(uint32_t current) { uint32_t rval = current + 1; @@ -530,13 +519,13 @@ generate_committed_for(ULE ule, DBT *val) { } static void -generate_provpair_for(ULE ule, FT_MSG msg) { +generate_provpair_for(ULE ule, const ft_msg &msg) { uint32_t level; - XIDS xids = msg->xids; + XIDS xids = msg.xids(); ule->uxrs = ule->uxrs_static; ule->num_cuxrs = 1; - ule->num_puxrs = xids_get_num_xids(xids); + ule->num_puxrs = toku_xids_get_num_xids(xids); uint32_t num_uxrs = ule->num_cuxrs + ule->num_puxrs; ule->uxrs[0].type = XR_DELETE; ule->uxrs[0].vallen = 0; @@ -546,12 +535,12 @@ generate_provpair_for(ULE ule, FT_MSG msg) { ule->uxrs[level].type = XR_PLACEHOLDER; ule->uxrs[level].vallen = 0; ule->uxrs[level].valp = NULL; - ule->uxrs[level].xid = xids_get_xid(xids, level-1); + ule->uxrs[level].xid = toku_xids_get_xid(xids, level-1); } ule->uxrs[num_uxrs - 1].type = XR_INSERT; - ule->uxrs[num_uxrs - 1].vallen = msg->u.id.val->size; - ule->uxrs[num_uxrs - 1].valp = msg->u.id.val->data; - ule->uxrs[num_uxrs - 1].xid = xids_get_innermost_xid(xids); + ule->uxrs[num_uxrs - 1].vallen = msg.vdbt()->size; + ule->uxrs[num_uxrs - 1].valp = msg.vdbt()->data; + ule->uxrs[num_uxrs - 1].xid = toku_xids_get_innermost_xid(xids); } //Test all the different things that can happen to a @@ -559,7 +548,6 @@ generate_provpair_for(ULE ule, FT_MSG msg) { static void test_le_empty_apply(void) { ULE_S ule_initial = ule_committed_delete; - FT_MSG_S msg; DBT key; DBT val; @@ -584,34 +572,41 @@ test_le_empty_apply(void) { //Abort/commit of an empty le is an empty le ULE_S ule_expected = ule_committed_delete; - msg = msg_init(FT_COMMIT_ANY, msg_xids, &key, &val); - test_le_apply(&ule_initial, &msg, &ule_expected); - msg = msg_init(FT_COMMIT_BROADCAST_TXN, msg_xids, &key, &val); - test_le_apply(&ule_initial, &msg, &ule_expected); - - msg = msg_init(FT_ABORT_ANY, msg_xids, &key, &val); - test_le_apply(&ule_initial, &msg, &ule_expected); - msg = msg_init(FT_ABORT_BROADCAST_TXN, msg_xids, &key, &val); - test_le_apply(&ule_initial, &msg, &ule_expected); + { + ft_msg msg(&key, &val, FT_COMMIT_ANY, ZERO_MSN, msg_xids); + test_le_apply(&ule_initial, msg, &ule_expected); + } + { + ft_msg msg(&key, &val, FT_COMMIT_BROADCAST_TXN, ZERO_MSN, msg_xids); + test_le_apply(&ule_initial, msg, &ule_expected); + } + { + ft_msg msg(&key, &val, FT_ABORT_ANY, ZERO_MSN, msg_xids); + test_le_apply(&ule_initial, msg, &ule_expected); + } + { + ft_msg msg(&key, &val, FT_ABORT_BROADCAST_TXN, ZERO_MSN, msg_xids); + test_le_apply(&ule_initial, msg, &ule_expected); + } } { //delete of an empty le is an empty le ULE_S ule_expected = ule_committed_delete; - msg = msg_init(FT_DELETE_ANY, msg_xids, &key, &val); - test_le_apply(&ule_initial, &msg, &ule_expected); + ft_msg msg(&key, &val, FT_DELETE_ANY, ZERO_MSN, msg_xids); + test_le_apply(&ule_initial, msg, &ule_expected); } { - msg = msg_init(FT_INSERT, msg_xids, &key, &val); + ft_msg msg(&key, &val, FT_INSERT, ZERO_MSN, msg_xids); ULE_S ule_expected; - generate_provpair_for(&ule_expected, &msg); - test_le_apply(&ule_initial, &msg, &ule_expected); + generate_provpair_for(&ule_expected, msg); + test_le_apply(&ule_initial, msg, &ule_expected); } { - msg = msg_init(FT_INSERT_NO_OVERWRITE, msg_xids, &key, &val); + ft_msg msg(&key, &val, FT_INSERT_NO_OVERWRITE, ZERO_MSN, msg_xids); ULE_S ule_expected; - generate_provpair_for(&ule_expected, &msg); - test_le_apply(&ule_initial, &msg, &ule_expected); + generate_provpair_for(&ule_expected, msg); + test_le_apply(&ule_initial, msg, &ule_expected); } } } @@ -619,36 +614,36 @@ test_le_empty_apply(void) { } static void -generate_provdel_for(ULE ule, FT_MSG msg) { +generate_provdel_for(ULE ule, const ft_msg &msg) { uint32_t level; - XIDS xids = msg->xids; + XIDS xids = msg.xids(); ule->num_cuxrs = 1; - ule->num_puxrs = xids_get_num_xids(xids); + ule->num_puxrs = toku_xids_get_num_xids(xids); uint32_t num_uxrs = ule->num_cuxrs + ule->num_puxrs; ule->uxrs[0].type = XR_INSERT; - ule->uxrs[0].vallen = msg->u.id.val->size; - ule->uxrs[0].valp = msg->u.id.val->data; + ule->uxrs[0].vallen = msg.vdbt()->size; + ule->uxrs[0].valp = msg.vdbt()->data; ule->uxrs[0].xid = TXNID_NONE; for (level = ule->num_cuxrs; level < ule->num_cuxrs + ule->num_puxrs - 1; level++) { ule->uxrs[level].type = XR_PLACEHOLDER; ule->uxrs[level].vallen = 0; ule->uxrs[level].valp = NULL; - ule->uxrs[level].xid = xids_get_xid(xids, level-1); + ule->uxrs[level].xid = toku_xids_get_xid(xids, level-1); } ule->uxrs[num_uxrs - 1].type = XR_DELETE; ule->uxrs[num_uxrs - 1].vallen = 0; ule->uxrs[num_uxrs - 1].valp = NULL; - ule->uxrs[num_uxrs - 1].xid = xids_get_innermost_xid(xids); + ule->uxrs[num_uxrs - 1].xid = toku_xids_get_innermost_xid(xids); } static void -generate_both_for(ULE ule, DBT *oldval, FT_MSG msg) { +generate_both_for(ULE ule, DBT *oldval, const ft_msg &msg) { uint32_t level; - XIDS xids = msg->xids; + XIDS xids = msg.xids(); ule->num_cuxrs = 1; - ule->num_puxrs = xids_get_num_xids(xids); + ule->num_puxrs = toku_xids_get_num_xids(xids); uint32_t num_uxrs = ule->num_cuxrs + ule->num_puxrs; ule->uxrs[0].type = XR_INSERT; ule->uxrs[0].vallen = oldval->size; @@ -658,12 +653,12 @@ generate_both_for(ULE ule, DBT *oldval, FT_MSG msg) { ule->uxrs[level].type = XR_PLACEHOLDER; ule->uxrs[level].vallen = 0; ule->uxrs[level].valp = NULL; - ule->uxrs[level].xid = xids_get_xid(xids, level-1); + ule->uxrs[level].xid = toku_xids_get_xid(xids, level-1); } ule->uxrs[num_uxrs - 1].type = XR_INSERT; - ule->uxrs[num_uxrs - 1].vallen = msg->u.id.val->size; - ule->uxrs[num_uxrs - 1].valp = msg->u.id.val->data; - ule->uxrs[num_uxrs - 1].xid = xids_get_innermost_xid(xids); + ule->uxrs[num_uxrs - 1].vallen = msg.vdbt()->size; + ule->uxrs[num_uxrs - 1].valp = msg.vdbt()->data; + ule->uxrs[num_uxrs - 1].xid = toku_xids_get_innermost_xid(xids); } //Test all the different things that can happen to a @@ -672,7 +667,6 @@ static void test_le_committed_apply(void) { ULE_S ule_initial; ule_initial.uxrs = ule_initial.uxrs_static; - FT_MSG_S msg; DBT key; DBT val; @@ -695,23 +689,30 @@ test_le_committed_apply(void) { if (nesting_level > 0) { //Commit/abort will not change a committed le ULE_S ule_expected = ule_initial; - msg = msg_init(FT_COMMIT_ANY, msg_xids, &key, &val); - test_le_apply(&ule_initial, &msg, &ule_expected); - msg = msg_init(FT_COMMIT_BROADCAST_TXN, msg_xids, &key, &val); - test_le_apply(&ule_initial, &msg, &ule_expected); - - msg = msg_init(FT_ABORT_ANY, msg_xids, &key, &val); - test_le_apply(&ule_initial, &msg, &ule_expected); - msg = msg_init(FT_ABORT_BROADCAST_TXN, msg_xids, &key, &val); - test_le_apply(&ule_initial, &msg, &ule_expected); + { + ft_msg msg(&key, &val, FT_COMMIT_ANY, ZERO_MSN, msg_xids); + test_le_apply(&ule_initial, msg, &ule_expected); + } + { + ft_msg msg(&key, &val, FT_COMMIT_BROADCAST_TXN, ZERO_MSN, msg_xids); + test_le_apply(&ule_initial, msg, &ule_expected); + } + { + ft_msg msg(&key, &val, FT_ABORT_ANY, ZERO_MSN, msg_xids); + test_le_apply(&ule_initial, msg, &ule_expected); + } + { + ft_msg msg(&key, &val, FT_ABORT_BROADCAST_TXN, ZERO_MSN, msg_xids); + test_le_apply(&ule_initial, msg, &ule_expected); + } } { - msg = msg_init(FT_DELETE_ANY, msg_xids, &key, &val); + ft_msg msg(&key, &val, FT_DELETE_ANY, ZERO_MSN, msg_xids); ULE_S ule_expected; ule_expected.uxrs = ule_expected.uxrs_static; - generate_provdel_for(&ule_expected, &msg); - test_le_apply(&ule_initial, &msg, &ule_expected); + generate_provdel_for(&ule_expected, msg); + test_le_apply(&ule_initial, msg, &ule_expected); } { @@ -720,11 +721,11 @@ test_le_committed_apply(void) { fillrandom(valbuf2, valsize2); DBT val2; toku_fill_dbt(&val2, valbuf2, valsize2); - msg = msg_init(FT_INSERT, msg_xids, &key, &val2); + ft_msg msg(&key, &val2, FT_INSERT, ZERO_MSN, msg_xids); ULE_S ule_expected; ule_expected.uxrs = ule_expected.uxrs_static; - generate_both_for(&ule_expected, &val, &msg); - test_le_apply(&ule_initial, &msg, &ule_expected); + generate_both_for(&ule_expected, &val, msg); + test_le_apply(&ule_initial, msg, &ule_expected); } { //INSERT_NO_OVERWRITE will not change a committed insert @@ -734,8 +735,8 @@ test_le_committed_apply(void) { fillrandom(valbuf2, valsize2); DBT val2; toku_fill_dbt(&val2, valbuf2, valsize2); - msg = msg_init(FT_INSERT_NO_OVERWRITE, msg_xids, &key, &val2); - test_le_apply(&ule_initial, &msg, &ule_expected); + ft_msg msg(&key, &val2, FT_INSERT_NO_OVERWRITE, ZERO_MSN, msg_xids); + test_le_apply(&ule_initial, msg, &ule_expected); } } } @@ -749,7 +750,7 @@ test_le_apply_messages(void) { static bool ule_worth_running_garbage_collection(ULE ule, TXNID oldest_referenced_xid_known) { LEAFENTRY le; - int r = le_pack(ule, nullptr, 0, nullptr, 0, 0, &le, nullptr); CKERR(r); + int r = le_pack(ule, nullptr, 0, nullptr, 0, 0, 0, &le, nullptr); CKERR(r); invariant_notnull(le); txn_gc_info gc_info(nullptr, oldest_referenced_xid_known, oldest_referenced_xid_known, true); bool worth_running = toku_le_worth_running_garbage_collection(le, &gc_info); @@ -854,7 +855,6 @@ static void test_le_garbage_collection_birdie(void) { } static void test_le_optimize(void) { - FT_MSG_S msg; DBT key; DBT val; ULE_S ule_initial; @@ -868,11 +868,11 @@ static void test_le_optimize(void) { TXNID optimize_txnid = 1000; memset(&key, 0, sizeof(key)); memset(&val, 0, sizeof(val)); - XIDS root_xids = xids_get_root_xids(); + XIDS root_xids = toku_xids_get_root_xids(); XIDS msg_xids; - int r = xids_create_child(root_xids, &msg_xids, optimize_txnid); + int r = toku_xids_create_child(root_xids, &msg_xids, optimize_txnid); assert(r==0); - msg = msg_init(FT_OPTIMIZE, msg_xids, &key, &val); + ft_msg msg(&key, &val, FT_OPTIMIZE, ZERO_MSN, msg_xids); // // create the key @@ -897,8 +897,8 @@ static void test_le_optimize(void) { ule_expected.uxrs[0].vallen = valsize; ule_expected.uxrs[0].valp = valbuf; - test_msg_modify_ule(&ule_initial,&msg); - verify_ule_equal(&ule_initial,&ule_expected); + test_msg_modify_ule(&ule_initial, msg); + verify_ule_equal(&ule_initial, &ule_expected); // // add another committed entry and ensure no effect @@ -915,8 +915,8 @@ static void test_le_optimize(void) { ule_expected.uxrs[1].vallen = 0; ule_expected.uxrs[1].valp = NULL; - test_msg_modify_ule(&ule_initial,&msg); - verify_ule_equal(&ule_initial,&ule_expected); + test_msg_modify_ule(&ule_initial, msg); + verify_ule_equal(&ule_initial, &ule_expected); // // now test when there is one provisional, three cases, after, equal, and before FT_OPTIMIZE's transaction @@ -928,20 +928,20 @@ static void test_le_optimize(void) { ule_expected.num_cuxrs = 1; ule_expected.num_puxrs = 1; ule_expected.uxrs[1].xid = 1500; - test_msg_modify_ule(&ule_initial,&msg); - verify_ule_equal(&ule_initial,&ule_expected); + test_msg_modify_ule(&ule_initial, msg); + verify_ule_equal(&ule_initial, &ule_expected); ule_initial.uxrs[1].xid = 1000; ule_expected.uxrs[1].xid = 1000; - test_msg_modify_ule(&ule_initial,&msg); - verify_ule_equal(&ule_initial,&ule_expected); + test_msg_modify_ule(&ule_initial, msg); + verify_ule_equal(&ule_initial, &ule_expected); ule_initial.uxrs[1].xid = 500; ule_expected.uxrs[1].xid = 500; ule_expected.num_cuxrs = 2; ule_expected.num_puxrs = 0; - test_msg_modify_ule(&ule_initial,&msg); - verify_ule_equal(&ule_initial,&ule_expected); + test_msg_modify_ule(&ule_initial, msg); + verify_ule_equal(&ule_initial, &ule_expected); // // now test cases with two provisional @@ -962,13 +962,13 @@ static void test_le_optimize(void) { ule_expected.uxrs[2].vallen = valsize; ule_expected.uxrs[2].valp = valbuf; ule_expected.uxrs[1].xid = 1200; - test_msg_modify_ule(&ule_initial,&msg); - verify_ule_equal(&ule_initial,&ule_expected); + test_msg_modify_ule(&ule_initial, msg); + verify_ule_equal(&ule_initial, &ule_expected); ule_initial.uxrs[1].xid = 1000; ule_expected.uxrs[1].xid = 1000; - test_msg_modify_ule(&ule_initial,&msg); - verify_ule_equal(&ule_initial,&ule_expected); + test_msg_modify_ule(&ule_initial, msg); + verify_ule_equal(&ule_initial, &ule_expected); ule_initial.uxrs[1].xid = 800; ule_expected.uxrs[1].xid = 800; @@ -977,12 +977,12 @@ static void test_le_optimize(void) { ule_expected.uxrs[1].type = ule_initial.uxrs[2].type; ule_expected.uxrs[1].valp = ule_initial.uxrs[2].valp; ule_expected.uxrs[1].vallen = ule_initial.uxrs[2].vallen; - test_msg_modify_ule(&ule_initial,&msg); - verify_ule_equal(&ule_initial,&ule_expected); + test_msg_modify_ule(&ule_initial, msg); + verify_ule_equal(&ule_initial, &ule_expected); - xids_destroy(&msg_xids); - xids_destroy(&root_xids); + toku_xids_destroy(&msg_xids); + toku_xids_destroy(&root_xids); } //TODO: #1125 tests: @@ -1020,9 +1020,9 @@ static void test_le_optimize(void) { static void init_xids(void) { uint32_t i; - nested_xids[0] = xids_get_root_xids(); + nested_xids[0] = toku_xids_get_root_xids(); for (i = 1; i < MAX_TRANSACTION_RECORDS; i++) { - int r = xids_create_child(nested_xids[i-1], &nested_xids[i], i * 37 + random() % 36); + int r = toku_xids_create_child(nested_xids[i-1], &nested_xids[i], i * 37 + random() % 36); assert(r==0); } } @@ -1031,7 +1031,7 @@ static void destroy_xids(void) { uint32_t i; for (i = 0; i < MAX_TRANSACTION_RECORDS; i++) { - xids_destroy(&nested_xids[i]); + toku_xids_destroy(&nested_xids[i]); } } diff --git a/storage/tokudb/ft-index/ft/tests/test-merges-on-cleaner.cc b/storage/tokudb/ft-index/ft/tests/test-merges-on-cleaner.cc index 142a41ae6c9..f67cfa78734 100644 --- a/storage/tokudb/ft-index/ft/tests/test-merges-on-cleaner.cc +++ b/storage/tokudb/ft-index/ft/tests/test-merges-on-cleaner.cc @@ -28,7 +28,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -94,10 +94,9 @@ PATENT RIGHTS GRANT: #include <ft-cachetable-wrappers.h> #include "ft-flusher.h" -#include "checkpoint.h" +#include "cachetable/checkpoint.h" static TOKUTXN const null_txn = 0; -static DB * const null_db = 0; enum { NODESIZE = 1024, KSIZE=NODESIZE-100, TOKU_PSIZE=20 }; @@ -131,7 +130,7 @@ doit (void) { int r; - toku_cachetable_create(&ct, 500*1024*1024, ZERO_LSN, NULL_LOGGER); + toku_cachetable_create(&ct, 500*1024*1024, ZERO_LSN, nullptr); unlink(fname); r = toku_open_ft_handle(fname, 1, &ft, NODESIZE, NODESIZE/2, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, toku_builtin_compare_fun); assert(r==0); @@ -230,8 +229,8 @@ doit (void) { r = toku_ft_lookup(ft, toku_fill_dbt(&k, "a", 2), lookup_checkf, &pair); assert(r==0); - struct ftnode_fetch_extra bfe; - fill_bfe_for_min_read(&bfe, ft->ft); + ftnode_fetch_extra bfe; + bfe.create_for_min_read(ft->ft); toku_pin_ftnode( ft->ft, node_internal, @@ -253,7 +252,7 @@ doit (void) { ); // verify that node_internal's buffer is empty - fill_bfe_for_min_read(&bfe, ft->ft); + bfe.create_for_min_read(ft->ft); toku_pin_ftnode( ft->ft, node_internal, diff --git a/storage/tokudb/ft-index/ft/tests/test-oldest-referenced-xid-flush.cc b/storage/tokudb/ft-index/ft/tests/test-oldest-referenced-xid-flush.cc index 60728582389..fc642eab8df 100644 --- a/storage/tokudb/ft-index/ft/tests/test-oldest-referenced-xid-flush.cc +++ b/storage/tokudb/ft-index/ft/tests/test-oldest-referenced-xid-flush.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -131,7 +131,7 @@ static void test_oldest_referenced_xid_gets_propogated(void) { FT_HANDLE t; BLOCKNUM grandchild_leaf_blocknum, child_nonleaf_blocknum, root_blocknum; - toku_cachetable_create(&ct, 500*1024*1024, ZERO_LSN, NULL_LOGGER); + toku_cachetable_create(&ct, 500*1024*1024, ZERO_LSN, nullptr); unlink("foo1.ft_handle"); r = toku_open_ft_handle("foo1.ft_handle", 1, &t, NODESIZE, NODESIZE/2, TOKU_DEFAULT_COMPRESSION_METHOD, ct, nullptr, toku_builtin_compare_fun); assert(r==0); @@ -167,8 +167,8 @@ static void test_oldest_referenced_xid_gets_propogated(void) { // first verify the child FTNODE node = NULL; - struct ftnode_fetch_extra bfe; - fill_bfe_for_min_read(&bfe, t->ft); + ftnode_fetch_extra bfe; + bfe.create_for_min_read(t->ft); toku_pin_ftnode( t->ft, child_nonleaf_blocknum, diff --git a/storage/tokudb/ft-index/ft/tests/test-pick-child-to-flush.cc b/storage/tokudb/ft-index/ft/tests/test-pick-child-to-flush.cc index 96482177a31..d2fe0ef9469 100644 --- a/storage/tokudb/ft-index/ft/tests/test-pick-child-to-flush.cc +++ b/storage/tokudb/ft-index/ft/tests/test-pick-child-to-flush.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -97,10 +97,9 @@ PATENT RIGHTS GRANT: #include "ft-flusher.h" #include "ft-flusher-internal.h" -#include "checkpoint.h" +#include "cachetable/checkpoint.h" static TOKUTXN const null_txn = 0; -static DB * const null_db = 0; enum { NODESIZE = 1024, KSIZE=NODESIZE-100, TOKU_PSIZE=20 }; @@ -165,7 +164,7 @@ doit (void) { BLOCKNUM node_leaf[2]; int r; - toku_cachetable_create(&ct, 500*1024*1024, ZERO_LSN, NULL_LOGGER); + toku_cachetable_create(&ct, 500*1024*1024, ZERO_LSN, nullptr); unlink(fname); r = toku_open_ft_handle(fname, 1, &t, NODESIZE, NODESIZE/2, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, toku_builtin_compare_fun); assert(r==0); @@ -245,7 +244,7 @@ doit (void) { // what we say and flushes the child we pick FTNODE node = NULL; toku_pin_node_with_min_bfe(&node, node_internal, t); - toku_assert_entire_node_in_memory(node); + toku_ftnode_assert_fully_in_memory(node); assert(node->n_children == 2); assert(!node->dirty); assert(toku_bnc_n_entries(node->bp[0].ptr.u.nonleaf) > 0); @@ -268,7 +267,7 @@ doit (void) { assert(num_flushes_called == 1); toku_pin_node_with_min_bfe(&node, node_internal, t); - toku_assert_entire_node_in_memory(node); + toku_ftnode_assert_fully_in_memory(node); assert(node->dirty); assert(node->n_children == 2); // child 0 should have empty buffer because it flushed @@ -287,7 +286,7 @@ doit (void) { toku_pin_node_with_min_bfe(&node, node_internal, t); assert(node->dirty); - toku_assert_entire_node_in_memory(node); + toku_ftnode_assert_fully_in_memory(node); assert(node->n_children == 2); // both buffers should be empty now assert(toku_bnc_n_entries(node->bp[0].ptr.u.nonleaf) == 0); @@ -305,7 +304,7 @@ doit (void) { toku_pin_node_with_min_bfe(&node, node_internal, t); assert(node->dirty); // nothing was flushed, but since we were trying to flush to a leaf, both become dirty - toku_assert_entire_node_in_memory(node); + toku_ftnode_assert_fully_in_memory(node); assert(node->n_children == 2); // both buffers should be empty now assert(toku_bnc_n_entries(node->bp[0].ptr.u.nonleaf) == 0); @@ -326,7 +325,7 @@ doit (void) { // use a for loop so to get us down both paths for (int i = 0; i < 2; i++) { toku_pin_node_with_min_bfe(&node, node_root, t); - toku_assert_entire_node_in_memory(node); // entire root is in memory + toku_ftnode_assert_fully_in_memory(node); // entire root is in memory curr_child_to_flush = i; num_flushes_called = 0; toku_ft_flush_some_child(t->ft, node, &fa); @@ -376,7 +375,7 @@ doit (void) { //now let's do the same test as above toku_pin_node_with_min_bfe(&node, node_root, t); - toku_assert_entire_node_in_memory(node); // entire root is in memory + toku_ftnode_assert_fully_in_memory(node); // entire root is in memory curr_child_to_flush = 0; num_flushes_called = 0; toku_ft_flush_some_child(t->ft, node, &fa); diff --git a/storage/tokudb/ft-index/ft/tests/test-txn-child-manager.cc b/storage/tokudb/ft-index/ft/tests/test-txn-child-manager.cc index 6ce44f0b3d3..8a67df8aa97 100644 --- a/storage/tokudb/ft-index/ft/tests/test-txn-child-manager.cc +++ b/storage/tokudb/ft-index/ft/tests/test-txn-child-manager.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -92,7 +92,7 @@ PATENT RIGHTS GRANT: #include "test.h" #include "toku_os.h" -#include "checkpoint.h" +#include "cachetable/checkpoint.h" #include "test-ft-txns.h" diff --git a/storage/tokudb/ft-index/ft/key.cc b/storage/tokudb/ft-index/ft/tests/test-upgrade-recovery-logs.cc index 3940e1e274a..528e7889599 100644 --- a/storage/tokudb/ft-index/ft/key.cc +++ b/storage/tokudb/ft-index/ft/tests/test-upgrade-recovery-logs.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -89,101 +89,105 @@ PATENT RIGHTS GRANT: #ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved." #ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it." -#include "key.h" -#include "fttypes.h" -#include <memory.h> - -#if 0 -int toku_keycompare (bytevec key1b, ITEMLEN key1len, bytevec key2b, ITEMLEN key2len) { - const unsigned char *key1 = key1b; - const unsigned char *key2 = key2b; - while (key1len > 0 && key2len > 0) { - unsigned char b1 = key1[0]; - unsigned char b2 = key2[0]; - if (b1<b2) return -1; - if (b1>b2) return 1; - key1len--; key1++; - key2len--; key2++; +// Test that recovery works correctly on a recovery log in a log directory. + +#include "test.h" +#include <libgen.h> + +static void run_recovery(const char *testdir) { + int r; + + int log_version; + char shutdown[32+1]; + r = sscanf(testdir, "upgrade-recovery-logs-%d-%32s", &log_version, shutdown); + assert(r == 2); + + char **logfiles = nullptr; + int n_logfiles = 0; + r = toku_logger_find_logfiles(testdir, &logfiles, &n_logfiles); + CKERR(r); + assert(n_logfiles > 0); + + FILE *f = fopen(logfiles[n_logfiles-1], "r"); + assert(f); + uint32_t real_log_version; + r = toku_read_logmagic(f, &real_log_version); + CKERR(r); + assert((uint32_t)log_version == (uint32_t)real_log_version); + r = fclose(f); + CKERR(r); + + toku_logger_free_logfiles(logfiles, n_logfiles); + + // test needs recovery + r = tokuft_needs_recovery(testdir, false); + if (strcmp(shutdown, "clean") == 0) { + CKERR(r); // clean does not need recovery + } else if (strncmp(shutdown, "dirty", 5) == 0) { + CKERR2(r, 1); // dirty needs recovery + } else { + CKERR(EINVAL); } - if (key1len<key2len) return -1; - if (key1len>key2len) return 1; - return 0; -} -#elif 0 -int toku_keycompare (bytevec key1, ITEMLEN key1len, bytevec key2, ITEMLEN key2len) { - if (key1len==key2len) { - return memcmp(key1,key2,key1len); - } else if (key1len<key2len) { - int r = memcmp(key1,key2,key1len); - if (r<=0) return -1; /* If the keys are the same up to 1's length, then return -1, since key1 is shorter than key2. */ - else return 1; + // test maybe upgrade log + LSN lsn_of_clean_shutdown; + bool upgrade_in_progress; + r = toku_maybe_upgrade_log(testdir, testdir, &lsn_of_clean_shutdown, &upgrade_in_progress); + if (strcmp(shutdown, "dirty") == 0 && log_version <= 24) { + CKERR2(r, TOKUDB_UPGRADE_FAILURE); // we dont support dirty upgrade from versions <= 24 + return; } else { - return -toku_keycompare(key2,key2len,key1,key1len); + CKERR(r); } -} -#elif 0 - -int toku_keycompare (bytevec key1, ITEMLEN key1len, bytevec key2, ITEMLEN key2len) { - if (key1len==key2len) { - return memcmp(key1,key2,key1len); - } else if (key1len<key2len) { - int r = memcmp(key1,key2,key1len); - if (r<=0) return -1; /* If the keys are the same up to 1's length, then return -1, since key1 is shorter than key2. */ - else return 1; - } else { - int r = memcmp(key1,key2,key2len); - if (r>=0) return 1; /* If the keys are the same up to 2's length, then return 1 since key1 is longer than key2 */ - else return -1; + + if (!verbose) { + // redirect stderr + int devnul = open(DEV_NULL_FILE, O_WRONLY); + assert(devnul >= 0); + int rr = toku_dup2(devnul, fileno(stderr)); + assert(rr == fileno(stderr)); + rr = close(devnul); + assert(rr == 0); } -} -#elif 0 -/* This one looks tighter, but it does use memcmp... */ -int toku_keycompare (bytevec key1, ITEMLEN key1len, bytevec key2, ITEMLEN key2len) { - int comparelen = key1len<key2len ? key1len : key2len; - const unsigned char *k1; - const unsigned char *k2; - for (k1=key1, k2=key2; - comparelen>0; - k1++, k2++, comparelen--) { - if (*k1 != *k2) { - return (int)*k1-(int)*k2; - } + + // run recovery + if (r == 0) { + r = tokuft_recover(NULL, + NULL_prepared_txn_callback, + NULL_keep_cachetable_callback, + NULL_logger, testdir, testdir, 0, 0, 0, NULL, 0); + CKERR(r); } - if (key1len<key2len) return -1; - if (key1len>key2len) return 1; - return 0; } -#else -/* unroll that one four times */ -// when a and b are chars, return a-b is safe here because return type is int. No over/underflow possible. -int toku_keycompare (bytevec key1, ITEMLEN key1len, bytevec key2, ITEMLEN key2len) { - int comparelen = key1len<key2len ? key1len : key2len; - const unsigned char *k1; - const unsigned char *k2; - for (CAST_FROM_VOIDP(k1, key1), CAST_FROM_VOIDP(k2, key2); - comparelen>4; - k1+=4, k2+=4, comparelen-=4) { - { int v1=k1[0], v2=k2[0]; if (v1!=v2) return v1-v2; } - { int v1=k1[1], v2=k2[1]; if (v1!=v2) return v1-v2; } - { int v1=k1[2], v2=k2[2]; if (v1!=v2) return v1-v2; } - { int v1=k1[3], v2=k2[3]; if (v1!=v2) return v1-v2; } + +int test_main(int argc, const char *argv[]) { + int i = 0; + for (i = 1; i < argc; i++) { + if (strcmp(argv[i], "-v") == 0) { + verbose++; + continue; + } + if (strcmp(argv[i], "-q") == 0) { + if (verbose > 0) + verbose--; + continue; + } + break; } - for (; - comparelen>0; - k1++, k2++, comparelen--) { - if (*k1 != *k2) { - return (int)*k1-(int)*k2; - } + if (i < argc) { + const char *full_test_dir = argv[i]; + const char *test_dir = basename((char *)full_test_dir); + if (strcmp(full_test_dir, test_dir) != 0) { + int r; + char cmd[32 + strlen(full_test_dir) + strlen(test_dir)]; + sprintf(cmd, "rm -rf %s", test_dir); + r = system(cmd); + CKERR(r); + sprintf(cmd, "cp -r %s %s", full_test_dir, test_dir); + r = system(cmd); + CKERR(r); + } + run_recovery(test_dir); } - if (key1len<key2len) return -1; - if (key1len>key2len) return 1; return 0; } - -#endif - -int -toku_builtin_compare_fun (DB *db __attribute__((__unused__)), const DBT *a, const DBT*b) { - return toku_keycompare(a->data, a->size, b->data, b->size); -} diff --git a/storage/tokudb/ft-index/ft/tests/test.h b/storage/tokudb/ft-index/ft/tests/test.h index f22d8cdbf67..3170146a120 100644 --- a/storage/tokudb/ft-index/ft/tests/test.h +++ b/storage/tokudb/ft-index/ft/tests/test.h @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -86,6 +86,8 @@ PATENT RIGHTS GRANT: under this License. */ +#pragma once + #ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved." #ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it." @@ -99,15 +101,19 @@ PATENT RIGHTS GRANT: #include <string.h> #include <portability/toku_path.h> -#include "ft.h" -#include "key.h" -#include "block_table.h" -#include "log-internal.h" -#include "logger.h" -#include "fttypes.h" -#include "ft-ops.h" -#include "cachetable.h" -#include "cachetable-internal.h" +#include "ft/serialize/block_allocator.h" +#include "ft/serialize/block_table.h" +#include "ft/cachetable/cachetable.h" +#include "ft/cachetable/cachetable-internal.h" +#include "ft/cursor.h" +#include "ft/ft.h" +#include "ft/ft-ops.h" +#include "ft/serialize/ft-serialize.h" +#include "ft/serialize/ft_node-serialize.h" +#include "ft/logger/log-internal.h" +#include "ft/logger/logger.h" +#include "ft/node.h" +#include "util/bytestring.h" #define CKERR(r) ({ int __r = r; if (__r!=0) fprintf(stderr, "%s:%d error %d %s\n", __FILE__, __LINE__, __r, strerror(r)); assert(__r==0); }) #define CKERR2(r,r2) do { if (r!=r2) fprintf(stderr, "%s:%d error %d %s, expected %d\n", __FILE__, __LINE__, r, strerror(r), r2); assert(r==r2); } while (0) @@ -118,15 +124,17 @@ PATENT RIGHTS GRANT: fflush(stderr); \ } while (0) -const ITEMLEN len_ignore = 0xFFFFFFFF; +const uint32_t len_ignore = 0xFFFFFFFF; +static const prepared_txn_callback_t NULL_prepared_txn_callback __attribute__((__unused__)) = NULL; +static const keep_cachetable_callback_t NULL_keep_cachetable_callback __attribute__((__unused__)) = NULL; +static const TOKULOGGER NULL_logger __attribute__((__unused__)) = NULL; // dummymsn needed to simulate msn because test messages are injected at a lower level than toku_ft_root_put_msg() #define MIN_DUMMYMSN ((MSN) {(uint64_t)1<<62}) static MSN dummymsn; static int dummymsn_initialized = 0; - static void initialize_dummymsn(void) { if (dummymsn_initialized == 0) { @@ -150,14 +158,14 @@ last_dummymsn(void) { struct check_pair { - ITEMLEN keylen; // A keylen equal to 0xFFFFFFFF means don't check the keylen or the key. - bytevec key; // A NULL key means don't check the key. - ITEMLEN vallen; // Similarly for vallen and null val. - bytevec val; + uint32_t keylen; // A keylen equal to 0xFFFFFFFF means don't check the keylen or the key. + const void *key; // A NULL key means don't check the key. + uint32_t vallen; // Similarly for vallen and null val. + const void *val; int call_count; }; static int -lookup_checkf (ITEMLEN keylen, bytevec key, ITEMLEN vallen, bytevec val, void *pair_v, bool lock_only) { +lookup_checkf (uint32_t keylen, const void *key, uint32_t vallen, const void *val, void *pair_v, bool lock_only) { if (!lock_only) { struct check_pair *pair = (struct check_pair *) pair_v; if (key!=NULL) { @@ -182,8 +190,8 @@ ft_lookup_and_check_nodup (FT_HANDLE t, const char *keystring, const char *valst { DBT k; toku_fill_dbt(&k, keystring, strlen(keystring) + 1); - struct check_pair pair = {(ITEMLEN) (1+strlen(keystring)), keystring, - (ITEMLEN) (1+strlen(valstring)), valstring, + struct check_pair pair = {(uint32_t) (1+strlen(keystring)), keystring, + (uint32_t) (1+strlen(valstring)), valstring, 0}; int r = toku_ft_lookup(t, &k, lookup_checkf, &pair); assert(r==0); @@ -195,7 +203,7 @@ ft_lookup_and_fail_nodup (FT_HANDLE t, char *keystring) { DBT k; toku_fill_dbt(&k, keystring, strlen(keystring) + 1); - struct check_pair pair = {(ITEMLEN) (1+strlen(keystring)), keystring, + struct check_pair pair = {(uint32_t) (1+strlen(keystring)), keystring, 0, 0, 0}; int r = toku_ft_lookup(t, &k, lookup_checkf, &pair); @@ -392,4 +400,3 @@ main(int argc, const char *argv[]) { toku_ft_layer_destroy(); return r; } - diff --git a/storage/tokudb/ft-index/ft/tests/test1308a.cc b/storage/tokudb/ft-index/ft/tests/test1308a.cc index a39953ad354..ddbc43de7dc 100644 --- a/storage/tokudb/ft-index/ft/tests/test1308a.cc +++ b/storage/tokudb/ft-index/ft/tests/test1308a.cc @@ -30,7 +30,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/ft/tests/test3681.cc b/storage/tokudb/ft-index/ft/tests/test3681.cc index 44f522d059f..db5e8232cd4 100644 --- a/storage/tokudb/ft-index/ft/tests/test3681.cc +++ b/storage/tokudb/ft-index/ft/tests/test3681.cc @@ -28,7 +28,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -95,19 +95,18 @@ PATENT RIGHTS GRANT: // * Thread 1 calls apply_msg_to_in_memory_leaves, calls get_and_pin_if_in_memory, tries to get a read lock on the root node and blocks on the rwlock because there is a write request on the lock. -#include "checkpoint.h" +#include "cachetable/checkpoint.h" #include "test.h" CACHETABLE ct; FT_HANDLE t; -static DB * const null_db = 0; static TOKUTXN const null_txn = 0; volatile bool done = false; static void setup (void) { - toku_cachetable_create(&ct, 0, ZERO_LSN, NULL_LOGGER); + toku_cachetable_create(&ct, 0, ZERO_LSN, nullptr); const char *fname = TOKU_TEST_FILENAME; unlink(fname); { int r = toku_open_ft_handle(fname, 1, &t, 1024, 256, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, toku_builtin_compare_fun); assert(r==0); } diff --git a/storage/tokudb/ft-index/ft/tests/test3856.cc b/storage/tokudb/ft-index/ft/tests/test3856.cc index 6a8b1155f51..c0b693e3421 100644 --- a/storage/tokudb/ft-index/ft/tests/test3856.cc +++ b/storage/tokudb/ft-index/ft/tests/test3856.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -90,7 +90,7 @@ PATENT RIGHTS GRANT: // it used to be the case that we copied the left and right keys of a // range to be prelocked but never freed them, this test checks that they -// are freed (as of this time, this happens in destroy_bfe_for_prefetch) +// are freed (as of this time, this happens in ftnode_fetch_extra::destroy()) #include "test.h" @@ -99,7 +99,6 @@ PATENT RIGHTS GRANT: static const char *fname = TOKU_TEST_FILENAME; static TOKUTXN const null_txn = 0; -static DB * const null_db = 0; static int const nodesize = 1<<12, basementnodesize = 1<<9; static const enum toku_compression_method compression_method = TOKU_DEFAULT_COMPRESSION_METHOD; static int const count = 1000; @@ -111,7 +110,7 @@ string_cmp(DB* UU(db), const DBT *a, const DBT *b) } static int -found(ITEMLEN UU(keylen), bytevec key, ITEMLEN UU(vallen), bytevec UU(val), void *UU(extra), bool lock_only) +found(uint32_t UU(keylen), const void *key, uint32_t UU(vallen), const void *UU(val), void *UU(extra), bool lock_only) { assert(key != NULL && !lock_only); return 0; @@ -123,7 +122,7 @@ test_main (int argc __attribute__((__unused__)), const char *argv[] __attribute_ CACHETABLE ct; FT_HANDLE t; - toku_cachetable_create(&ct, 0, ZERO_LSN, NULL_LOGGER); + toku_cachetable_create(&ct, 0, ZERO_LSN, nullptr); unlink(fname); int r = toku_open_ft_handle(fname, 1, &t, nodesize, basementnodesize, compression_method, ct, null_txn, string_cmp); assert(r==0); @@ -137,7 +136,7 @@ test_main (int argc __attribute__((__unused__)), const char *argv[] __attribute_ r = toku_close_ft_handle_nolsn(t, 0); assert(r == 0); toku_cachetable_close(&ct); - toku_cachetable_create(&ct, 0, ZERO_LSN, NULL_LOGGER); + toku_cachetable_create(&ct, 0, ZERO_LSN, nullptr); r = toku_open_ft_handle(fname, 1, &t, nodesize, basementnodesize, compression_method, ct, null_txn, string_cmp); assert(r == 0); for (int n = 0; n < count/100; ++n) { diff --git a/storage/tokudb/ft-index/ft/tests/test3884.cc b/storage/tokudb/ft-index/ft/tests/test3884.cc index 7fc3059800d..a4a9e8568cf 100644 --- a/storage/tokudb/ft-index/ft/tests/test3884.cc +++ b/storage/tokudb/ft-index/ft/tests/test3884.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -90,7 +90,7 @@ PATENT RIGHTS GRANT: // it used to be the case that we copied the left and right keys of a // range to be prelocked but never freed them, this test checks that they -// are freed (as of this time, this happens in destroy_bfe_for_prefetch) +// are freed (as of this time, this happens in ftnode_fetch_extra::destroy()) #include "test.h" @@ -111,7 +111,6 @@ static const int vallen = 64 - sizeof(long) - (sizeof(((LEAFENTRY)NULL)->type) #define dummy_msn_3884 ((MSN) { (uint64_t) 3884 * MIN_MSN.msn }) static TOKUTXN const null_txn = 0; -static DB * const null_db = 0; static const char *fname = TOKU_TEST_FILENAME; static void @@ -154,12 +153,11 @@ static void setup_ftnode_header(struct ftnode *node) { node->flags = 0x11223344; - node->thisnodename.b = 20; + node->blocknum.b = 20; node->layout_version = FT_LAYOUT_VERSION; node->layout_version_original = FT_LAYOUT_VERSION; node->height = 0; node->dirty = 1; - node->totalchildkeylens = 0; node->oldest_referenced_xid_known = TXNID_NONE; } @@ -169,12 +167,12 @@ setup_ftnode_partitions(struct ftnode *node, int n_children, const MSN msn, size node->n_children = n_children; node->max_msn_applied_to_node_on_disk = msn; MALLOC_N(node->n_children, node->bp); - MALLOC_N(node->n_children - 1, node->childkeys); for (int bn = 0; bn < node->n_children; ++bn) { BP_STATE(node, bn) = PT_AVAIL; set_BLB(node, bn, toku_create_empty_bn()); BLB_MAX_MSN_APPLIED(node, bn) = msn; } + node->pivotkeys.create_empty(); } static void @@ -186,7 +184,7 @@ verify_basement_node_msns(FTNODE node, MSN expected) } // -// Maximum node size according to the BRT: 1024 (expected node size after split) +// Maximum node size according to the FT: 1024 (expected node size after split) // Maximum basement node size: 256 // Actual node size before split: 2048 // Actual basement node size before split: 256 @@ -210,15 +208,15 @@ test_split_on_boundary(void) insert_dummy_value(&sn, bn, k, i); } if (bn < sn.n_children - 1) { - toku_memdup_dbt(&sn.childkeys[bn], &k, sizeof k); - sn.totalchildkeylens += (sizeof k); + DBT pivotkey; + sn.pivotkeys.insert_at(toku_fill_dbt(&pivotkey, &k, sizeof(k)), bn); } } unlink(fname); CACHETABLE ct; FT_HANDLE ft; - toku_cachetable_create(&ct, 0, ZERO_LSN, NULL_LOGGER); + toku_cachetable_create(&ct, 0, ZERO_LSN, nullptr); r = toku_open_ft_handle(fname, 1, &ft, nodesize, bnsize, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, toku_builtin_compare_fun); assert(r==0); FTNODE nodea, nodeb; @@ -233,15 +231,12 @@ test_split_on_boundary(void) r = toku_close_ft_handle_nolsn(ft, NULL); assert(r == 0); toku_cachetable_close(&ct); - if (splitk.data) { - toku_free(splitk.data); - } - + toku_destroy_dbt(&splitk); toku_destroy_ftnode_internals(&sn); } // -// Maximum node size according to the BRT: 1024 (expected node size after split) +// Maximum node size according to the FT: 1024 (expected node size after split) // Maximum basement node size: 256 (except the last) // Actual node size before split: 4095 // Actual basement node size before split: 256 (except the last, of size 2K) @@ -270,8 +265,8 @@ test_split_with_everything_on_the_left(void) k = bn * eltsperbn + i; big_val_size += insert_dummy_value(&sn, bn, k, i); } - toku_memdup_dbt(&sn.childkeys[bn], &k, sizeof k); - sn.totalchildkeylens += (sizeof k); + DBT pivotkey; + sn.pivotkeys.insert_at(toku_fill_dbt(&pivotkey, &k, sizeof(k)), bn); } else { k = bn * eltsperbn; // we want this to be as big as the rest of our data and a @@ -288,7 +283,7 @@ test_split_with_everything_on_the_left(void) unlink(fname); CACHETABLE ct; FT_HANDLE ft; - toku_cachetable_create(&ct, 0, ZERO_LSN, NULL_LOGGER); + toku_cachetable_create(&ct, 0, ZERO_LSN, nullptr); r = toku_open_ft_handle(fname, 1, &ft, nodesize, bnsize, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, toku_builtin_compare_fun); assert(r==0); FTNODE nodea, nodeb; @@ -300,16 +295,13 @@ test_split_with_everything_on_the_left(void) r = toku_close_ft_handle_nolsn(ft, NULL); assert(r == 0); toku_cachetable_close(&ct); - if (splitk.data) { - toku_free(splitk.data); - } - + toku_destroy_dbt(&splitk); toku_destroy_ftnode_internals(&sn); } // -// Maximum node size according to the BRT: 1024 (expected node size after split) +// Maximum node size according to the FT: 1024 (expected node size after split) // Maximum basement node size: 256 (except the last) // Actual node size before split: 4095 // Actual basement node size before split: 256 (except the last, of size 2K) @@ -339,8 +331,8 @@ test_split_on_boundary_of_last_node(void) k = bn * eltsperbn + i; big_val_size += insert_dummy_value(&sn, bn, k, i); } - toku_memdup_dbt(&sn.childkeys[bn], &k, sizeof k); - sn.totalchildkeylens += (sizeof k); + DBT pivotkey; + sn.pivotkeys.insert_at(toku_fill_dbt(&pivotkey, &k, sizeof(k)), bn); } else { k = bn * eltsperbn; // we want this to be slightly smaller than all the rest of @@ -360,7 +352,7 @@ test_split_on_boundary_of_last_node(void) unlink(fname); CACHETABLE ct; FT_HANDLE ft; - toku_cachetable_create(&ct, 0, ZERO_LSN, NULL_LOGGER); + toku_cachetable_create(&ct, 0, ZERO_LSN, nullptr); r = toku_open_ft_handle(fname, 1, &ft, nodesize, bnsize, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, toku_builtin_compare_fun); assert(r==0); FTNODE nodea, nodeb; @@ -372,10 +364,7 @@ test_split_on_boundary_of_last_node(void) r = toku_close_ft_handle_nolsn(ft, NULL); assert(r == 0); toku_cachetable_close(&ct); - if (splitk.data) { - toku_free(splitk.data); - } - + toku_destroy_dbt(&splitk); toku_destroy_ftnode_internals(&sn); } @@ -405,8 +394,8 @@ test_split_at_begin(void) totalbytes += insert_dummy_value(&sn, bn, k, i-1); } if (bn < sn.n_children - 1) { - toku_memdup_dbt(&sn.childkeys[bn], &k, sizeof k); - sn.totalchildkeylens += (sizeof k); + DBT pivotkey; + sn.pivotkeys.insert_at(toku_fill_dbt(&pivotkey, &k, sizeof(k)), bn); } } { // now add the first element @@ -424,7 +413,7 @@ test_split_at_begin(void) unlink(fname); CACHETABLE ct; FT_HANDLE ft; - toku_cachetable_create(&ct, 0, ZERO_LSN, NULL_LOGGER); + toku_cachetable_create(&ct, 0, ZERO_LSN, nullptr); r = toku_open_ft_handle(fname, 1, &ft, nodesize, bnsize, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, toku_builtin_compare_fun); assert(r==0); FTNODE nodea, nodeb; @@ -436,10 +425,7 @@ test_split_at_begin(void) r = toku_close_ft_handle_nolsn(ft, NULL); assert(r == 0); toku_cachetable_close(&ct); - if (splitk.data) { - toku_free(splitk.data); - } - + toku_destroy_dbt(&splitk); toku_destroy_ftnode_internals(&sn); } @@ -476,15 +462,15 @@ test_split_at_end(void) } } if (bn < sn.n_children - 1) { - toku_memdup_dbt(&sn.childkeys[bn], &k, sizeof k); - sn.totalchildkeylens += (sizeof k); + DBT pivotkey; + sn.pivotkeys.insert_at(toku_fill_dbt(&pivotkey, &k, sizeof(k)), bn); } } unlink(fname); CACHETABLE ct; FT_HANDLE ft; - toku_cachetable_create(&ct, 0, ZERO_LSN, NULL_LOGGER); + toku_cachetable_create(&ct, 0, ZERO_LSN, nullptr); r = toku_open_ft_handle(fname, 1, &ft, nodesize, bnsize, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, toku_builtin_compare_fun); assert(r==0); FTNODE nodea, nodeb; @@ -496,14 +482,11 @@ test_split_at_end(void) r = toku_close_ft_handle_nolsn(ft, NULL); assert(r == 0); toku_cachetable_close(&ct); - if (splitk.data) { - toku_free(splitk.data); - } - + toku_destroy_dbt(&splitk); toku_destroy_ftnode_internals(&sn); } -// Maximum node size according to the BRT: 1024 (expected node size after split) +// Maximum node size according to the FT: 1024 (expected node size after split) // Maximum basement node size: 256 // Actual node size before split: 2048 // Actual basement node size before split: 256 @@ -530,15 +513,15 @@ test_split_odd_nodes(void) insert_dummy_value(&sn, bn, k, i); } if (bn < sn.n_children - 1) { - toku_memdup_dbt(&sn.childkeys[bn], &k, sizeof k); - sn.totalchildkeylens += (sizeof k); + DBT pivotkey; + sn.pivotkeys.insert_at(toku_fill_dbt(&pivotkey, &k, sizeof(k)), bn); } } unlink(fname); CACHETABLE ct; FT_HANDLE ft; - toku_cachetable_create(&ct, 0, ZERO_LSN, NULL_LOGGER); + toku_cachetable_create(&ct, 0, ZERO_LSN, nullptr); r = toku_open_ft_handle(fname, 1, &ft, nodesize, bnsize, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, toku_builtin_compare_fun); assert(r==0); FTNODE nodea, nodeb; @@ -553,10 +536,7 @@ test_split_odd_nodes(void) r = toku_close_ft_handle_nolsn(ft, NULL); assert(r == 0); toku_cachetable_close(&ct); - if (splitk.data) { - toku_free(splitk.data); - } - + toku_destroy_dbt(&splitk); toku_destroy_ftnode_internals(&sn); } diff --git a/storage/tokudb/ft-index/ft/tests/test4115.cc b/storage/tokudb/ft-index/ft/tests/test4115.cc index 5f1d041896f..e24696af057 100644 --- a/storage/tokudb/ft-index/ft/tests/test4115.cc +++ b/storage/tokudb/ft-index/ft/tests/test4115.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -96,7 +96,6 @@ PATENT RIGHTS GRANT: #include <unistd.h> static TOKUTXN const null_txn = 0; -static DB * const null_db = 0; const char *fname = TOKU_TEST_FILENAME; CACHETABLE ct; @@ -117,7 +116,7 @@ static void close_ft_and_ct (void) { static void open_ft_and_ct (bool unlink_old) { int r; if (unlink_old) unlink(fname); - toku_cachetable_create(&ct, 0, ZERO_LSN, NULL_LOGGER); + toku_cachetable_create(&ct, 0, ZERO_LSN, nullptr); r = toku_open_ft_handle(fname, 1, &t, 1<<12, 1<<9, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, toku_builtin_compare_fun); assert(r==0); toku_ft_set_bt_compare(t, dont_allow_prefix); } diff --git a/storage/tokudb/ft-index/ft/tests/test4244.cc b/storage/tokudb/ft-index/ft/tests/test4244.cc index 10810c7710e..3c2728e941e 100644 --- a/storage/tokudb/ft-index/ft/tests/test4244.cc +++ b/storage/tokudb/ft-index/ft/tests/test4244.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -96,7 +96,6 @@ PATENT RIGHTS GRANT: #include <ft-cachetable-wrappers.h> static TOKUTXN const null_txn = 0; -static DB * const null_db = 0; enum { NODESIZE = 1024, KSIZE=NODESIZE-100, TOKU_PSIZE=20 }; @@ -110,7 +109,7 @@ doit (void) { int r; - toku_cachetable_create(&ct, 500*1024*1024, ZERO_LSN, NULL_LOGGER); + toku_cachetable_create(&ct, 500*1024*1024, ZERO_LSN, nullptr); unlink(fname); r = toku_open_ft_handle(fname, 1, &t, NODESIZE, NODESIZE/2, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, toku_builtin_compare_fun); assert(r==0); @@ -148,8 +147,8 @@ doit (void) { // then node_internal should be huge // we pin it and verify that it is not FTNODE node; - struct ftnode_fetch_extra bfe; - fill_bfe_for_full_read(&bfe, t->ft); + ftnode_fetch_extra bfe; + bfe.create_for_full_read(t->ft); toku_pin_ftnode( t->ft, node_internal, diff --git a/storage/tokudb/ft-index/ft/tests/test_block_allocator_merge.cc b/storage/tokudb/ft-index/ft/tests/test_block_allocator_merge.cc deleted file mode 100644 index af66c7408bf..00000000000 --- a/storage/tokudb/ft-index/ft/tests/test_block_allocator_merge.cc +++ /dev/null @@ -1,236 +0,0 @@ -/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */ -// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4: -#ident "$Id$" -/* -COPYING CONDITIONS NOTICE: - - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation, and provided that the - following conditions are met: - - * Redistributions of source code must retain this COPYING - CONDITIONS NOTICE, the COPYRIGHT NOTICE (below), the - DISCLAIMER (below), the UNIVERSITY PATENT NOTICE (below), the - PATENT MARKING NOTICE (below), and the PATENT RIGHTS - GRANT (below). - - * Redistributions in binary form must reproduce this COPYING - CONDITIONS NOTICE, the COPYRIGHT NOTICE (below), the - DISCLAIMER (below), the UNIVERSITY PATENT NOTICE (below), the - PATENT MARKING NOTICE (below), and the PATENT RIGHTS - GRANT (below) in the documentation and/or other materials - provided with the distribution. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA - 02110-1301, USA. - -COPYRIGHT NOTICE: - - TokuDB, Tokutek Fractal Tree Indexing Library. - Copyright (C) 2007-2013 Tokutek, Inc. - -DISCLAIMER: - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - -UNIVERSITY PATENT NOTICE: - - The technology is licensed by the Massachusetts Institute of - Technology, Rutgers State University of New Jersey, and the Research - Foundation of State University of New York at Stony Brook under - United States of America Serial No. 11/760379 and to the patents - and/or patent applications resulting from it. - -PATENT MARKING NOTICE: - - This software is covered by US Patent No. 8,185,551. - This software is covered by US Patent No. 8,489,638. - -PATENT RIGHTS GRANT: - - "THIS IMPLEMENTATION" means the copyrightable works distributed by - Tokutek as part of the Fractal Tree project. - - "PATENT CLAIMS" means the claims of patents that are owned or - licensable by Tokutek, both currently or in the future; and that in - the absence of this license would be infringed by THIS - IMPLEMENTATION or by using or running THIS IMPLEMENTATION. - - "PATENT CHALLENGE" shall mean a challenge to the validity, - patentability, enforceability and/or non-infringement of any of the - PATENT CLAIMS or otherwise opposing any of the PATENT CLAIMS. - - Tokutek hereby grants to you, for the term and geographical scope of - the PATENT CLAIMS, a non-exclusive, no-charge, royalty-free, - irrevocable (except as stated in this section) patent license to - make, have made, use, offer to sell, sell, import, transfer, and - otherwise run, modify, and propagate the contents of THIS - IMPLEMENTATION, where such license applies only to the PATENT - CLAIMS. This grant does not include claims that would be infringed - only as a consequence of further modifications of THIS - IMPLEMENTATION. If you or your agent or licensee institute or order - or agree to the institution of patent litigation against any entity - (including a cross-claim or counterclaim in a lawsuit) alleging that - THIS IMPLEMENTATION constitutes direct or contributory patent - infringement, or inducement of patent infringement, then any rights - granted to you under this License shall terminate as of the date - such litigation is filed. If you or your agent or exclusive - licensee institute or order or agree to the institution of a PATENT - CHALLENGE, then Tokutek may terminate any rights granted to you - under this License. -*/ - -#ident "Copyright (c) 2009-2013 Tokutek Inc. All rights reserved." -#include "../block_allocator.h" -#include <memory.h> -#include <assert.h> -// Test the merger. - -int verbose = 0; - -static void -print_array (uint64_t n, const struct block_allocator_blockpair a[/*n*/]) { - printf("{"); - for (uint64_t i=0; i<n; i++) printf(" %016lx", (long)a[i].offset); - printf("}\n"); -} - -static int -compare_blockpairs (const void *av, const void *bv) { - const struct block_allocator_blockpair *CAST_FROM_VOIDP(a, av); - const struct block_allocator_blockpair *CAST_FROM_VOIDP(b, bv); - if (a->offset < b->offset) return -1; - if (a->offset > b->offset) return +1; - return 0; -} - -static void -test_merge (uint64_t an, const struct block_allocator_blockpair a[/*an*/], - uint64_t bn, const struct block_allocator_blockpair b[/*bn*/]) { - if (verbose>1) { printf("a:"); print_array(an, a); } - if (verbose>1) { printf("b:"); print_array(bn, b); } - struct block_allocator_blockpair *MALLOC_N(an+bn, q); - struct block_allocator_blockpair *MALLOC_N(an+bn, m); - if (q==0 || m==0) { - fprintf(stderr, "malloc failed, continuing\n"); - goto malloc_failed; - } - for (uint64_t i=0; i<an; i++) { - q[i] = m[i] = a[i]; - } - for (uint64_t i=0; i<bn; i++) { - q[an+i] = b[i]; - } - if (verbose) printf("qsort\n"); - qsort(q, an+bn, sizeof(*q), compare_blockpairs); - if (verbose>1) { printf("q:"); print_array(an+bn, q); } - if (verbose) printf("merge\n"); - block_allocator_merge_blockpairs_into(an, m, bn, b); - if (verbose) printf("compare\n"); - if (verbose>1) { printf("m:"); print_array(an+bn, m); } - for (uint64_t i=0; i<an+bn; i++) { - assert(q[i].offset == m[i].offset); - } - malloc_failed: - toku_free(q); - toku_free(m); -} - -static uint64_t -compute_a (uint64_t i, int mode) { - if (mode==0) return (((uint64_t)random()) << 32) + i; - if (mode==1) return 2*i; - if (mode==2) return i; - if (mode==3) return (1LL<<50) + i; - abort(); -} -static uint64_t -compute_b (uint64_t i, int mode) { - if (mode==0) return (((uint64_t)random()) << 32) + i; - if (mode==1) return 2*i+1; - if (mode==2) return (1LL<<50) + i; - if (mode==3) return i; - abort(); -} - - -static void -test_merge_n_m (uint64_t n, uint64_t m, int mode) -{ - struct block_allocator_blockpair *MALLOC_N(n, na); - struct block_allocator_blockpair *MALLOC_N(m, ma); - if (na==0 || ma==0) { - fprintf(stderr, "malloc failed, continuing\n"); - goto malloc_failed; - } - if (verbose) printf("Filling a[%" PRIu64 "]\n", n); - for (uint64_t i=0; i<n; i++) { - na[i].offset = compute_a(i, mode); - } - if (verbose) printf("Filling b[%" PRIu64 "]\n", m); - for (uint64_t i=0; i<m; i++) { - if (verbose && i % (1+m/10) == 0) { printf("."); fflush(stdout); } - ma[i].offset = compute_b(i, mode); - } - qsort(na, n, sizeof(*na), compare_blockpairs); - qsort(ma, m, sizeof(*ma), compare_blockpairs); - if (verbose) fprintf(stderr, "\ntest_merge\n"); - test_merge(n, na, m, ma); - malloc_failed: - toku_free(na); - toku_free(ma); -} - -static void -test_big_merge (void) { - uint64_t G = 1024LL * 1024LL * 1024LL; - if (toku_os_get_phys_memory_size() < 40 * G) { - fprintf(stderr, "Skipping big merge because there is only %4.1fGiB physical memory\n", toku_os_get_phys_memory_size()/(1024.0*1024.0*1024.0)); - } else { - uint64_t twoG = 2*G; - - uint64_t an = twoG; - uint64_t bn = 1; - struct block_allocator_blockpair *MALLOC_N(an+bn, a); - struct block_allocator_blockpair *MALLOC_N(bn, b); - if (a == nullptr) { - fprintf(stderr, "%s:%u malloc failed, continuing\n", __FUNCTION__, __LINE__); - goto malloc_failed; - } - if (b == nullptr) { - fprintf(stderr, "%s:%u malloc failed, continuing\n", __FUNCTION__, __LINE__); - goto malloc_failed; - } - assert(a); - assert(b); - for (uint64_t i=0; i<an; i++) a[i].offset=i+1; - b[0].offset = 0; - block_allocator_merge_blockpairs_into(an, a, bn, b); - for (uint64_t i=0; i<an+bn; i++) assert(a[i].offset == i); - malloc_failed: - toku_free(a); - toku_free(b); - } -} - -int main (int argc __attribute__((__unused__)), char *argv[] __attribute__((__unused__))) { - test_merge_n_m(4, 4, 0); - test_merge_n_m(16, 16, 0); - test_merge_n_m(0, 100, 0); - test_merge_n_m(100, 0, 0); - test_merge_n_m(1000000, 1000000, 0); - // Cannot run this on my laptop, or even on pointy -#if 0 - uint64_t too_big = 1024LL * 1024LL * 1024LL * 2; - test_merge_n_m(too_big, too_big); - test_merge_n_m(1, too_big, 0); -#endif - test_big_merge(); - return 0; -} diff --git a/storage/tokudb/ft-index/ft/tests/test_logcursor.cc b/storage/tokudb/ft-index/ft/tests/test_logcursor.cc index 7b3f46e3d38..41644a02d07 100644 --- a/storage/tokudb/ft-index/ft/tests/test_logcursor.cc +++ b/storage/tokudb/ft-index/ft/tests/test_logcursor.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -90,9 +90,8 @@ PATENT RIGHTS GRANT: #include <toku_portability.h> #include <string.h> -#include "logcursor.h" +#include "logger/logcursor.h" #include "test.h" -#include "fttypes.h" #if defined(HAVE_LIMITS_H) # include <limits.h> @@ -105,7 +104,6 @@ const char LOGDIR[100] = "./dir.test_logcursor"; const int FSYNC = 1; const int NO_FSYNC = 0; -const int envflags = DB_INIT_MPOOL|DB_CREATE|DB_THREAD |DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN; const char *namea="a.db"; const char *nameb="b.db"; const char *a="a"; diff --git a/storage/tokudb/ft-index/ft/tests/test_oexcl.cc b/storage/tokudb/ft-index/ft/tests/test_oexcl.cc index b6eacd92362..72fd01c2c89 100644 --- a/storage/tokudb/ft-index/ft/tests/test_oexcl.cc +++ b/storage/tokudb/ft-index/ft/tests/test_oexcl.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/ft/tests/test_rightmost_leaf_seqinsert_heuristic.cc b/storage/tokudb/ft-index/ft/tests/test_rightmost_leaf_seqinsert_heuristic.cc new file mode 100644 index 00000000000..69962153979 --- /dev/null +++ b/storage/tokudb/ft-index/ft/tests/test_rightmost_leaf_seqinsert_heuristic.cc @@ -0,0 +1,183 @@ +/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */ +// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4: +#ident "$Id$" +/* +COPYING CONDITIONS NOTICE: + + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation, and provided that the + following conditions are met: + + * Redistributions of source code must retain this COPYING + CONDITIONS NOTICE, the COPYRIGHT NOTICE (below), the + DISCLAIMER (below), the UNIVERSITY PATENT NOTICE (below), the + PATENT MARKING NOTICE (below), and the PATENT RIGHTS + GRANT (below). + + * Redistributions in binary form must reproduce this COPYING + CONDITIONS NOTICE, the COPYRIGHT NOTICE (below), the + DISCLAIMER (below), the UNIVERSITY PATENT NOTICE (below), the + PATENT MARKING NOTICE (below), and the PATENT RIGHTS + GRANT (below) in the documentation and/or other materials + provided with the distribution. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + 02110-1301, USA. + +COPYRIGHT NOTICE: + + TokuFT, Tokutek Fractal Tree Indexing Library. + Copyright (C) 2007-2014 Tokutek, Inc. + +DISCLAIMER: + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + +UNIVERSITY PATENT NOTICE: + + The technology is licensed by the Massachusetts Institute of + Technology, Rutgers State University of New Jersey, and the Research + Foundation of State University of New York at Stony Brook under + United States of America Serial No. 11/760379 and to the patents + and/or patent applications resulting from it. + +PATENT MARKING NOTICE: + + This software is covered by US Patent No. 8,185,551. + This software is covered by US Patent No. 8,489,638. + +PATENT RIGHTS GRANT: + + "THIS IMPLEMENTATION" means the copyrightable works distributed by + Tokutek as part of the Fractal Tree project. + + "PATENT CLAIMS" means the claims of patents that are owned or + licensable by Tokutek, both currently or in the future; and that in + the absence of this license would be infringed by THIS + IMPLEMENTATION or by using or running THIS IMPLEMENTATION. + + "PATENT CHALLENGE" shall mean a challenge to the validity, + patentability, enforceability and/or non-infringement of any of the + PATENT CLAIMS or otherwise opposing any of the PATENT CLAIMS. + + Tokutek hereby grants to you, for the term and geographical scope of + the PATENT CLAIMS, a non-exclusive, no-charge, royalty-free, + irrevocable (except as stated in this section) patent license to + make, have made, use, offer to sell, sell, import, transfer, and + otherwise run, modify, and propagate the contents of THIS + IMPLEMENTATION, where such license applies only to the PATENT + CLAIMS. This grant does not include claims that would be infringed + only as a consequence of further modifications of THIS + IMPLEMENTATION. If you or your agent or licensee institute or order + or agree to the institution of patent litigation against any entity + (including a cross-claim or counterclaim in a lawsuit) alleging that + THIS IMPLEMENTATION constitutes direct or contributory patent + infringement, or inducement of patent infringement, then any rights + granted to you under this License shall terminate as of the date + such litigation is filed. If you or your agent or exclusive + licensee institute or order or agree to the institution of a PATENT + CHALLENGE, then Tokutek may terminate any rights granted to you + under this License. +*/ + +#ident "Copyright (c) 2014 Tokutek Inc. All rights reserved." + +#include "test.h" + +#include <util/dbt.h> +#include <ft/ft-cachetable-wrappers.h> + +// Each FT maintains a sequential insert heuristic to determine if its +// worth trying to insert directly into a well-known rightmost leaf node. +// +// The heuristic is only maintained when a rightmost leaf node is known. +// +// This test verifies that sequential inserts increase the seqinsert score +// and that a single non-sequential insert resets the score. + +static void test_seqinsert_heuristic(void) { + int r = 0; + char name[TOKU_PATH_MAX + 1]; + toku_path_join(name, 2, TOKU_TEST_FILENAME, "ftdata"); + toku_os_recursive_delete(TOKU_TEST_FILENAME); + r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU); CKERR(r); + + FT_HANDLE ft_handle; + CACHETABLE ct; + toku_cachetable_create(&ct, 0, ZERO_LSN, nullptr); + r = toku_open_ft_handle(name, 1, &ft_handle, + 4*1024*1024, 64*1024, + TOKU_DEFAULT_COMPRESSION_METHOD, ct, NULL, + toku_builtin_compare_fun); CKERR(r); + FT ft = ft_handle->ft; + + int k; + DBT key, val; + const int val_size = 1024 * 1024; + char *XMALLOC_N(val_size, val_buf); + memset(val_buf, 'x', val_size); + toku_fill_dbt(&val, val_buf, val_size); + + // Insert many rows sequentially. This is enough data to: + // - force the root to split (the righmost leaf will then be known) + // - raise the seqinsert score high enough to enable direct rightmost injections + const int rows_to_insert = 200; + for (int i = 0; i < rows_to_insert; i++) { + k = toku_htonl(i); + toku_fill_dbt(&key, &k, sizeof(k)); + toku_ft_insert(ft_handle, &key, &val, NULL); + } + invariant(ft->rightmost_blocknum.b != RESERVED_BLOCKNUM_NULL); + invariant(ft->seqinsert_score == FT_SEQINSERT_SCORE_THRESHOLD); + + // Insert on the left extreme. The seq insert score is high enough + // that we will attempt to insert into the rightmost leaf. We won't + // be successful because key 0 won't be in the bounds of the rightmost leaf. + // This failure should reset the seqinsert score back to 0. + k = toku_htonl(0); + toku_fill_dbt(&key, &k, sizeof(k)); + toku_ft_insert(ft_handle, &key, &val, NULL); + invariant(ft->seqinsert_score == 0); + + // Insert in the middle. The score should not go up. + k = toku_htonl(rows_to_insert / 2); + toku_fill_dbt(&key, &k, sizeof(k)); + toku_ft_insert(ft_handle, &key, &val, NULL); + invariant(ft->seqinsert_score == 0); + + // Insert on the right extreme. The score should go up. + k = toku_htonl(rows_to_insert); + toku_fill_dbt(&key, &k, sizeof(k)); + toku_ft_insert(ft_handle, &key, &val, NULL); + invariant(ft->seqinsert_score == 1); + + // Insert again on the right extreme again, the score should go up. + k = toku_htonl(rows_to_insert + 1); + toku_fill_dbt(&key, &k, sizeof(k)); + toku_ft_insert(ft_handle, &key, &val, NULL); + invariant(ft->seqinsert_score == 2); + + // Insert close to, but not at, the right extreme. The score should reset. + // -- the magic number 4 derives from the fact that vals are 1mb and nodes are 4mb + k = toku_htonl(rows_to_insert - 4); + toku_fill_dbt(&key, &k, sizeof(k)); + toku_ft_insert(ft_handle, &key, &val, NULL); + invariant(ft->seqinsert_score == 0); + + toku_free(val_buf); + toku_ft_handle_close(ft_handle); + toku_cachetable_close(&ct); + toku_os_recursive_delete(TOKU_TEST_FILENAME); +} + +int test_main(int argc, const char *argv[]) { + default_parse_args(argc, argv); + test_seqinsert_heuristic(); + return 0; +} diff --git a/storage/tokudb/ft-index/ft/tests/test_rightmost_leaf_split_merge.cc b/storage/tokudb/ft-index/ft/tests/test_rightmost_leaf_split_merge.cc new file mode 100644 index 00000000000..29515d9925f --- /dev/null +++ b/storage/tokudb/ft-index/ft/tests/test_rightmost_leaf_split_merge.cc @@ -0,0 +1,213 @@ +/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */ +// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4: +#ident "$Id$" +/* +COPYING CONDITIONS NOTICE: + + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation, and provided that the + following conditions are met: + + * Redistributions of source code must retain this COPYING + CONDITIONS NOTICE, the COPYRIGHT NOTICE (below), the + DISCLAIMER (below), the UNIVERSITY PATENT NOTICE (below), the + PATENT MARKING NOTICE (below), and the PATENT RIGHTS + GRANT (below). + + * Redistributions in binary form must reproduce this COPYING + CONDITIONS NOTICE, the COPYRIGHT NOTICE (below), the + DISCLAIMER (below), the UNIVERSITY PATENT NOTICE (below), the + PATENT MARKING NOTICE (below), and the PATENT RIGHTS + GRANT (below) in the documentation and/or other materials + provided with the distribution. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + 02110-1301, USA. + +COPYRIGHT NOTICE: + + TokuFT, Tokutek Fractal Tree Indexing Library. + Copyright (C) 2007-2014 Tokutek, Inc. + +DISCLAIMER: + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + +UNIVERSITY PATENT NOTICE: + + The technology is licensed by the Massachusetts Institute of + Technology, Rutgers State University of New Jersey, and the Research + Foundation of State University of New York at Stony Brook under + United States of America Serial No. 11/760379 and to the patents + and/or patent applications resulting from it. + +PATENT MARKING NOTICE: + + This software is covered by US Patent No. 8,185,551. + This software is covered by US Patent No. 8,489,638. + +PATENT RIGHTS GRANT: + + "THIS IMPLEMENTATION" means the copyrightable works distributed by + Tokutek as part of the Fractal Tree project. + + "PATENT CLAIMS" means the claims of patents that are owned or + licensable by Tokutek, both currently or in the future; and that in + the absence of this license would be infringed by THIS + IMPLEMENTATION or by using or running THIS IMPLEMENTATION. + + "PATENT CHALLENGE" shall mean a challenge to the validity, + patentability, enforceability and/or non-infringement of any of the + PATENT CLAIMS or otherwise opposing any of the PATENT CLAIMS. + + Tokutek hereby grants to you, for the term and geographical scope of + the PATENT CLAIMS, a non-exclusive, no-charge, royalty-free, + irrevocable (except as stated in this section) patent license to + make, have made, use, offer to sell, sell, import, transfer, and + otherwise run, modify, and propagate the contents of THIS + IMPLEMENTATION, where such license applies only to the PATENT + CLAIMS. This grant does not include claims that would be infringed + only as a consequence of further modifications of THIS + IMPLEMENTATION. If you or your agent or licensee institute or order + or agree to the institution of patent litigation against any entity + (including a cross-claim or counterclaim in a lawsuit) alleging that + THIS IMPLEMENTATION constitutes direct or contributory patent + infringement, or inducement of patent infringement, then any rights + granted to you under this License shall terminate as of the date + such litigation is filed. If you or your agent or exclusive + licensee institute or order or agree to the institution of a PATENT + CHALLENGE, then Tokutek may terminate any rights granted to you + under this License. +*/ + +#ident "Copyright (c) 2014 Tokutek Inc. All rights reserved." + +#include "test.h" + +#include <util/dbt.h> +#include <ft/ft-cachetable-wrappers.h> +#include <ft/ft-flusher.h> + +// Promotion tracks the rightmost blocknum in the FT when a message +// is successfully promoted to a non-root leaf node on the right extreme. +// +// This test verifies that a split or merge of the rightmost leaf properly +// maintains the rightmost blocknum (which is constant - the pair's swap values, +// like the root blocknum). + +static void test_split_merge(void) { + int r = 0; + char name[TOKU_PATH_MAX + 1]; + toku_path_join(name, 2, TOKU_TEST_FILENAME, "ftdata"); + toku_os_recursive_delete(TOKU_TEST_FILENAME); + r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU); CKERR(r); + + FT_HANDLE ft_handle; + CACHETABLE ct; + toku_cachetable_create(&ct, 0, ZERO_LSN, nullptr); + r = toku_open_ft_handle(name, 1, &ft_handle, + 4*1024*1024, 64*1024, + TOKU_DEFAULT_COMPRESSION_METHOD, ct, NULL, + toku_builtin_compare_fun); CKERR(r); + + // We have a root blocknum, but no rightmost blocknum yet. + FT ft = ft_handle->ft; + invariant(ft->h->root_blocknum.b != RESERVED_BLOCKNUM_NULL); + invariant(ft->rightmost_blocknum.b == RESERVED_BLOCKNUM_NULL); + + int k; + DBT key, val; + const int val_size = 1 * 1024 * 1024; + char *XMALLOC_N(val_size, val_buf); + memset(val_buf, 'x', val_size); + toku_fill_dbt(&val, val_buf, val_size); + + // Insert 16 rows (should induce a few splits) + const int rows_to_insert = 16; + for (int i = 0; i < rows_to_insert; i++) { + k = toku_htonl(i); + toku_fill_dbt(&key, &k, sizeof(k)); + toku_ft_insert(ft_handle, &key, &val, NULL); + } + + // rightmost blocknum should be set, because the root split and promotion + // did a rightmost insertion directly into the rightmost leaf, lazily + // initializing the rightmost blocknum. + invariant(ft->rightmost_blocknum.b != RESERVED_BLOCKNUM_NULL); + + BLOCKNUM root_blocknum = ft->h->root_blocknum; + FTNODE root_node; + ftnode_fetch_extra bfe; + bfe.create_for_full_read(ft); + toku_pin_ftnode(ft, root_blocknum, + toku_cachetable_hash(ft->cf, ft->h->root_blocknum), + &bfe, PL_WRITE_EXPENSIVE, &root_node, true); + // root blocknum should be consistent + invariant(root_node->blocknum.b == ft->h->root_blocknum.b); + // root should have split at least once, and it should now be at height 1 + invariant(root_node->n_children > 1); + invariant(root_node->height == 1); + // rightmost blocknum should no longer be the root, since the root split + invariant(ft->h->root_blocknum.b != ft->rightmost_blocknum.b); + // the right child should have the rightmost blocknum + invariant(BP_BLOCKNUM(root_node, root_node->n_children - 1).b == ft->rightmost_blocknum.b); + + BLOCKNUM rightmost_blocknum_before_merge = ft->rightmost_blocknum; + const int num_children_before_merge = root_node->n_children; + + // delete the last 6 rows. + // - 1mb each, so 6mb deleted + // - should be enough to delete the entire rightmost leaf + some of its neighbor + const int rows_to_delete = 6; + toku_unpin_ftnode(ft, root_node); + for (int i = 0; i < rows_to_delete; i++) { + k = toku_htonl(rows_to_insert - i); + toku_fill_dbt(&key, &k, sizeof(k)); + toku_ft_delete(ft_handle, &key, NULL); + } + toku_pin_ftnode(ft, root_blocknum, + toku_cachetable_hash(ft->cf, root_blocknum), + &bfe, PL_WRITE_EXPENSIVE, &root_node, true); + + // - rightmost leaf should be fusible after those deletes (which were promoted directly to the leaf) + FTNODE rightmost_leaf; + toku_pin_ftnode(ft, rightmost_blocknum_before_merge, + toku_cachetable_hash(ft->cf, rightmost_blocknum_before_merge), + &bfe, PL_WRITE_EXPENSIVE, &rightmost_leaf, true); + invariant(toku_ftnode_get_reactivity(ft, rightmost_leaf) == RE_FUSIBLE); + toku_unpin_ftnode(ft, rightmost_leaf); + + // - merge the rightmost child now that it's fusible + toku_ft_merge_child(ft, root_node, root_node->n_children - 1); + toku_pin_ftnode(ft, root_blocknum, + toku_cachetable_hash(ft->cf, root_blocknum), + &bfe, PL_WRITE_EXPENSIVE, &root_node, true); + + // the merge should have worked, and the root should still be at height 1 + invariant(root_node->n_children < num_children_before_merge); + invariant(root_node->height == 1); + // the rightmost child of the root has the rightmost blocknum + invariant(BP_BLOCKNUM(root_node, root_node->n_children - 1).b == ft->rightmost_blocknum.b); + // the value for rightmost blocknum itself should not have changed + // (we keep it constant, like the root blocknum) + invariant(rightmost_blocknum_before_merge.b == ft->rightmost_blocknum.b); + + toku_unpin_ftnode(ft, root_node); + + toku_free(val_buf); + toku_ft_handle_close(ft_handle); + toku_cachetable_close(&ct); + toku_os_recursive_delete(TOKU_TEST_FILENAME); +} + +int test_main(int argc, const char *argv[]) { + default_parse_args(argc, argv); + test_split_merge(); + return 0; +} diff --git a/storage/tokudb/ft-index/ft/tests/test_toku_malloc_plain_free.cc b/storage/tokudb/ft-index/ft/tests/test_toku_malloc_plain_free.cc index e7188bb0402..7f166fda836 100644 --- a/storage/tokudb/ft-index/ft/tests/test_toku_malloc_plain_free.cc +++ b/storage/tokudb/ft-index/ft/tests/test_toku_malloc_plain_free.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/ft/tests/upgrade_test_simple.cc b/storage/tokudb/ft-index/ft/tests/upgrade_test_simple.cc index 31811527aa2..3496df47e7c 100644 --- a/storage/tokudb/ft-index/ft/tests/upgrade_test_simple.cc +++ b/storage/tokudb/ft-index/ft/tests/upgrade_test_simple.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -97,13 +97,12 @@ PATENT RIGHTS GRANT: #include "ft-flusher.h" -#include "checkpoint.h" +#include "cachetable/checkpoint.h" static TOKUTXN const null_txn = NULL; -static DB * const null_db = NULL; static int -noop_getf(ITEMLEN UU(keylen), bytevec UU(key), ITEMLEN UU(vallen), bytevec UU(val), void *extra, bool UU(lock_only)) +noop_getf(uint32_t UU(keylen), const void *UU(key), uint32_t UU(vallen), const void *UU(val), void *extra, bool UU(lock_only)) { int *CAST_FROM_VOIDP(calledp, extra); (*calledp)++; @@ -176,7 +175,7 @@ with_open_tree(const char *fname, tree_cb cb, void *cb_extra) FT_HANDLE t; CACHETABLE ct; - toku_cachetable_create(&ct, 16*(1<<20), ZERO_LSN, NULL_LOGGER); + toku_cachetable_create(&ct, 16*(1<<20), ZERO_LSN, nullptr); r = toku_open_ft_handle(fname, 0, &t, diff --git a/storage/tokudb/ft-index/ft/tests/verify-bad-msn.cc b/storage/tokudb/ft-index/ft/tests/verify-bad-msn.cc index 5eef196f611..a7e3beea1e5 100644 --- a/storage/tokudb/ft-index/ft/tests/verify-bad-msn.cc +++ b/storage/tokudb/ft-index/ft/tests/verify-bad-msn.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -127,9 +127,9 @@ append_leaf(FTNODE leafnode, void *key, size_t keylen, void *val, size_t vallen) MSN msn = next_dummymsn(); // apply an insert to the leaf node - FT_MSG_S msg = { FT_INSERT, msn, xids_get_root_xids(), .u={.id = { &thekey, &theval }} }; + ft_msg msg(&thekey, &theval, FT_INSERT, msn, toku_xids_get_root_xids()); txn_gc_info gc_info(nullptr, TXNID_NONE, TXNID_NONE, false); - toku_ft_bn_apply_msg_once(BLB(leafnode, 0), &msg, idx, NULL, &gc_info, NULL, NULL); + toku_ft_bn_apply_msg_once(BLB(leafnode, 0), msg, idx, keylen, NULL, &gc_info, NULL, NULL); // Create bad tree (don't do following): // leafnode->max_msn_applied_to_node = msn; @@ -156,7 +156,7 @@ insert_into_child_buffer(FT_HANDLE ft, FTNODE node, int childnum, int minkey, in unsigned int key = htonl(val); DBT thekey; toku_fill_dbt(&thekey, &key, sizeof key); DBT theval; toku_fill_dbt(&theval, &val, sizeof val); - toku_ft_append_to_child_buffer(ft->ft->compare_fun, NULL, node, childnum, FT_INSERT, msn, xids_get_root_xids(), true, &thekey, &theval); + toku_ft_append_to_child_buffer(ft->ft->cmp, node, childnum, FT_INSERT, msn, toku_xids_get_root_xids(), true, &thekey, &theval); // Create bad tree (don't do following): // node->max_msn_applied_to_node = msn; @@ -212,7 +212,7 @@ test_make_tree(int height, int fanout, int nperleaf, int do_verify) { // create a cachetable CACHETABLE ct = NULL; - toku_cachetable_create(&ct, 0, ZERO_LSN, NULL_LOGGER); + toku_cachetable_create(&ct, 0, ZERO_LSN, nullptr); // create the ft TOKUTXN null_txn = NULL; @@ -225,7 +225,7 @@ test_make_tree(int height, int fanout, int nperleaf, int do_verify) { FTNODE newroot = make_tree(ft, height, fanout, nperleaf, &seq, &minkey, &maxkey); // set the new root to point to the new tree - toku_ft_set_new_root_blocknum(ft->ft, newroot->thisnodename); + toku_ft_set_new_root_blocknum(ft->ft, newroot->blocknum); // Create bad tree (don't do following): // newroot->max_msn_applied_to_node = last_dummymsn(); // capture msn of last message injected into tree diff --git a/storage/tokudb/ft-index/ft/tests/verify-bad-pivots.cc b/storage/tokudb/ft-index/ft/tests/verify-bad-pivots.cc index 0d477ea22c4..f36ae77a973 100644 --- a/storage/tokudb/ft-index/ft/tests/verify-bad-pivots.cc +++ b/storage/tokudb/ft-index/ft/tests/verify-bad-pivots.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -115,9 +115,9 @@ append_leaf(FTNODE leafnode, void *key, size_t keylen, void *val, size_t vallen) // apply an insert to the leaf node MSN msn = next_dummymsn(); - FT_MSG_S msg = { FT_INSERT, msn, xids_get_root_xids(), .u={.id = { &thekey, &theval }} }; + ft_msg msg(&thekey, &theval, FT_INSERT, msn, toku_xids_get_root_xids()); txn_gc_info gc_info(nullptr, TXNID_NONE, TXNID_NONE, false); - toku_ft_bn_apply_msg_once(BLB(leafnode, 0), &msg, idx, NULL, &gc_info, NULL, NULL); + toku_ft_bn_apply_msg_once(BLB(leafnode, 0), msg, idx, keylen, NULL, &gc_info, NULL, NULL); // dont forget to dirty the node leafnode->dirty = 1; @@ -182,7 +182,7 @@ test_make_tree(int height, int fanout, int nperleaf, int do_verify) { // create a cachetable CACHETABLE ct = NULL; - toku_cachetable_create(&ct, 0, ZERO_LSN, NULL_LOGGER); + toku_cachetable_create(&ct, 0, ZERO_LSN, nullptr); // create the ft TOKUTXN null_txn = NULL; @@ -195,7 +195,7 @@ test_make_tree(int height, int fanout, int nperleaf, int do_verify) { FTNODE newroot = make_tree(ft, height, fanout, nperleaf, &seq, &minkey, &maxkey); // discard the old root block - toku_ft_set_new_root_blocknum(ft->ft, newroot->thisnodename); + toku_ft_set_new_root_blocknum(ft->ft, newroot->blocknum); // unpin the new root toku_unpin_ftnode(ft->ft, newroot); diff --git a/storage/tokudb/ft-index/ft/tests/verify-dup-in-leaf.cc b/storage/tokudb/ft-index/ft/tests/verify-dup-in-leaf.cc index ee0256b4882..a2c6567fdb4 100644 --- a/storage/tokudb/ft-index/ft/tests/verify-dup-in-leaf.cc +++ b/storage/tokudb/ft-index/ft/tests/verify-dup-in-leaf.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -116,9 +116,9 @@ append_leaf(FTNODE leafnode, void *key, size_t keylen, void *val, size_t vallen) // apply an insert to the leaf node MSN msn = next_dummymsn(); - FT_MSG_S msg = { FT_INSERT, msn, xids_get_root_xids(), .u={.id = { &thekey, &theval }} }; + ft_msg msg(&thekey, &theval, FT_INSERT, msn, toku_xids_get_root_xids()); txn_gc_info gc_info(nullptr, TXNID_NONE, TXNID_NONE, false); - toku_ft_bn_apply_msg_once(BLB(leafnode, 0), &msg, idx, NULL, &gc_info, NULL, NULL); + toku_ft_bn_apply_msg_once(BLB(leafnode, 0), msg, idx, keylen, NULL, &gc_info, NULL, NULL); // dont forget to dirty the node leafnode->dirty = 1; @@ -140,7 +140,7 @@ test_dup_in_leaf(int do_verify) { // create a cachetable CACHETABLE ct = NULL; - toku_cachetable_create(&ct, 0, ZERO_LSN, NULL_LOGGER); + toku_cachetable_create(&ct, 0, ZERO_LSN, nullptr); // create the ft TOKUTXN null_txn = NULL; @@ -155,7 +155,7 @@ test_dup_in_leaf(int do_verify) { populate_leaf(newroot, htonl(2), 2); // set the new root to point to the new tree - toku_ft_set_new_root_blocknum(ft->ft, newroot->thisnodename); + toku_ft_set_new_root_blocknum(ft->ft, newroot->blocknum); // unpin the new root toku_unpin_ftnode(ft->ft, newroot); diff --git a/storage/tokudb/ft-index/ft/tests/verify-dup-pivots.cc b/storage/tokudb/ft-index/ft/tests/verify-dup-pivots.cc index f2b74dc3891..4dc42a06c82 100644 --- a/storage/tokudb/ft-index/ft/tests/verify-dup-pivots.cc +++ b/storage/tokudb/ft-index/ft/tests/verify-dup-pivots.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -115,9 +115,9 @@ append_leaf(FTNODE leafnode, void *key, size_t keylen, void *val, size_t vallen) // apply an insert to the leaf node MSN msn = next_dummymsn(); - FT_MSG_S msg = { FT_INSERT, msn, xids_get_root_xids(), .u={.id = { &thekey, &theval }} }; + ft_msg msg(&thekey, &theval, FT_INSERT, msn, toku_xids_get_root_xids()); txn_gc_info gc_info(nullptr, TXNID_NONE, TXNID_NONE, false); - toku_ft_bn_apply_msg_once(BLB(leafnode, 0), &msg, idx, NULL, &gc_info, NULL, NULL); + toku_ft_bn_apply_msg_once(BLB(leafnode, 0), msg, idx, keylen, NULL, &gc_info, NULL, NULL); // dont forget to dirty the node leafnode->dirty = 1; @@ -185,7 +185,7 @@ test_make_tree(int height, int fanout, int nperleaf, int do_verify) { // create a cachetable CACHETABLE ct = NULL; - toku_cachetable_create(&ct, 0, ZERO_LSN, NULL_LOGGER); + toku_cachetable_create(&ct, 0, ZERO_LSN, nullptr); // create the ft TOKUTXN null_txn = NULL; @@ -199,7 +199,7 @@ test_make_tree(int height, int fanout, int nperleaf, int do_verify) { // discard the old root block // set the new root to point to the new tree - toku_ft_set_new_root_blocknum(ft->ft, newroot->thisnodename); + toku_ft_set_new_root_blocknum(ft->ft, newroot->blocknum); // unpin the new root toku_unpin_ftnode(ft->ft, newroot); diff --git a/storage/tokudb/ft-index/ft/tests/verify-misrouted-msgs.cc b/storage/tokudb/ft-index/ft/tests/verify-misrouted-msgs.cc index 7efdd374f01..d671dd7a7f3 100644 --- a/storage/tokudb/ft-index/ft/tests/verify-misrouted-msgs.cc +++ b/storage/tokudb/ft-index/ft/tests/verify-misrouted-msgs.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -116,9 +116,9 @@ append_leaf(FTNODE leafnode, void *key, size_t keylen, void *val, size_t vallen) // apply an insert to the leaf node MSN msn = next_dummymsn(); - FT_MSG_S msg = { FT_INSERT, msn, xids_get_root_xids(), .u={.id = { &thekey, &theval }} }; + ft_msg msg(&thekey, &theval, FT_INSERT, msn, toku_xids_get_root_xids()); txn_gc_info gc_info(nullptr, TXNID_NONE, TXNID_NONE, false); - toku_ft_bn_apply_msg_once(BLB(leafnode,0), &msg, idx, NULL, &gc_info, NULL, NULL); + toku_ft_bn_apply_msg_once(BLB(leafnode,0), msg, idx, keylen, NULL, &gc_info, NULL, NULL); // dont forget to dirty the node leafnode->dirty = 1; @@ -144,7 +144,7 @@ insert_into_child_buffer(FT_HANDLE ft, FTNODE node, int childnum, int minkey, in DBT thekey; toku_fill_dbt(&thekey, &key, sizeof key); DBT theval; toku_fill_dbt(&theval, &val, sizeof val); MSN msn = next_dummymsn(); - toku_ft_append_to_child_buffer(ft->ft->compare_fun, NULL, node, childnum, FT_INSERT, msn, xids_get_root_xids(), true, &thekey, &theval); + toku_ft_append_to_child_buffer(ft->ft->cmp, node, childnum, FT_INSERT, msn, toku_xids_get_root_xids(), true, &thekey, &theval); } } @@ -197,7 +197,7 @@ test_make_tree(int height, int fanout, int nperleaf, int do_verify) { // create a cachetable CACHETABLE ct = NULL; - toku_cachetable_create(&ct, 0, ZERO_LSN, NULL_LOGGER); + toku_cachetable_create(&ct, 0, ZERO_LSN, nullptr); // create the ft TOKUTXN null_txn = NULL; @@ -211,7 +211,7 @@ test_make_tree(int height, int fanout, int nperleaf, int do_verify) { // discard the old root block // set the new root to point to the new tree - toku_ft_set_new_root_blocknum(ft->ft, newroot->thisnodename); + toku_ft_set_new_root_blocknum(ft->ft, newroot->blocknum); // unpin the new root toku_unpin_ftnode(ft->ft, newroot); diff --git a/storage/tokudb/ft-index/ft/tests/verify-unsorted-leaf.cc b/storage/tokudb/ft-index/ft/tests/verify-unsorted-leaf.cc index 92d68dd56ee..6a1fe6d0e23 100644 --- a/storage/tokudb/ft-index/ft/tests/verify-unsorted-leaf.cc +++ b/storage/tokudb/ft-index/ft/tests/verify-unsorted-leaf.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -118,9 +118,9 @@ append_leaf(FTNODE leafnode, void *key, size_t keylen, void *val, size_t vallen) // apply an insert to the leaf node MSN msn = next_dummymsn(); - FT_MSG_S msg = { FT_INSERT, msn, xids_get_root_xids(), .u={.id = { &thekey, &theval }} }; + ft_msg msg(&thekey, &theval, FT_INSERT, msn, toku_xids_get_root_xids()); txn_gc_info gc_info(nullptr, TXNID_NONE, TXNID_NONE, false); - toku_ft_bn_apply_msg_once(BLB(leafnode, 0), &msg, idx, NULL, &gc_info, NULL, NULL); + toku_ft_bn_apply_msg_once(BLB(leafnode, 0), msg, idx, keylen, NULL, &gc_info, NULL, NULL); // dont forget to dirty the node leafnode->dirty = 1; @@ -142,7 +142,7 @@ test_dup_in_leaf(int do_verify) { // create a cachetable CACHETABLE ct = NULL; - toku_cachetable_create(&ct, 0, ZERO_LSN, NULL_LOGGER); + toku_cachetable_create(&ct, 0, ZERO_LSN, nullptr); // create the ft TOKUTXN null_txn = NULL; @@ -156,7 +156,7 @@ test_dup_in_leaf(int do_verify) { populate_leaf(newroot, htonl(1), 2); // set the new root to point to the new tree - toku_ft_set_new_root_blocknum(ft->ft, newroot->thisnodename); + toku_ft_set_new_root_blocknum(ft->ft, newroot->blocknum); // unpin the new root toku_unpin_ftnode(ft->ft, newroot); diff --git a/storage/tokudb/ft-index/ft/tests/verify-unsorted-pivots.cc b/storage/tokudb/ft-index/ft/tests/verify-unsorted-pivots.cc index e1b9d9aba22..bb20733f3e1 100644 --- a/storage/tokudb/ft-index/ft/tests/verify-unsorted-pivots.cc +++ b/storage/tokudb/ft-index/ft/tests/verify-unsorted-pivots.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -115,9 +115,9 @@ append_leaf(FTNODE leafnode, void *key, size_t keylen, void *val, size_t vallen) // apply an insert to the leaf node MSN msn = next_dummymsn(); - FT_MSG_S msg = { FT_INSERT, msn, xids_get_root_xids(), .u={.id = { &thekey, &theval }} }; + ft_msg msg(&thekey, &theval, FT_INSERT, msn, toku_xids_get_root_xids()); txn_gc_info gc_info(nullptr, TXNID_NONE, TXNID_NONE, false); - toku_ft_bn_apply_msg_once(BLB(leafnode, 0), &msg, idx, NULL, &gc_info, NULL, NULL); + toku_ft_bn_apply_msg_once(BLB(leafnode, 0), msg, idx, keylen, NULL, &gc_info, NULL, NULL); // dont forget to dirty the node leafnode->dirty = 1; @@ -182,7 +182,7 @@ test_make_tree(int height, int fanout, int nperleaf, int do_verify) { // create a cachetable CACHETABLE ct = NULL; - toku_cachetable_create(&ct, 0, ZERO_LSN, NULL_LOGGER); + toku_cachetable_create(&ct, 0, ZERO_LSN, nullptr); // create the ft TOKUTXN null_txn = NULL; @@ -195,7 +195,7 @@ test_make_tree(int height, int fanout, int nperleaf, int do_verify) { FTNODE newroot = make_tree(ft, height, fanout, nperleaf, &seq, &minkey, &maxkey); // discard the old root block - toku_ft_set_new_root_blocknum(ft->ft, newroot->thisnodename); + toku_ft_set_new_root_blocknum(ft->ft, newroot->blocknum); // unpin the new root toku_unpin_ftnode(ft->ft, newroot); diff --git a/storage/tokudb/ft-index/ft/tests/xid_lsn_independent.cc b/storage/tokudb/ft-index/ft/tests/xid_lsn_independent.cc index 93762525a09..545fcf4d927 100644 --- a/storage/tokudb/ft-index/ft/tests/xid_lsn_independent.cc +++ b/storage/tokudb/ft-index/ft/tests/xid_lsn_independent.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -92,7 +92,7 @@ PATENT RIGHTS GRANT: #include "test.h" #include "toku_os.h" -#include "checkpoint.h" +#include "cachetable/checkpoint.h" #define ENVDIR TOKU_TEST_FILENAME #include "test-ft-txns.h" diff --git a/storage/tokudb/ft-index/ft/tests/ybt-test.cc b/storage/tokudb/ft-index/ft/tests/ybt-test.cc index 5e3c6f4b1a7..d53c03718e9 100644 --- a/storage/tokudb/ft-index/ft/tests/ybt-test.cc +++ b/storage/tokudb/ft-index/ft/tests/ybt-test.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -111,11 +111,11 @@ static void ybt_test0 (void) { toku_init_dbt(&t0); toku_init_dbt(&t1); { - bytevec temp1 = "hello"; + const void *temp1 = "hello"; toku_dbt_set(6, temp1, &t0, &v0); } { - bytevec temp2 = "foo"; + const void *temp2 = "foo"; toku_dbt_set( 4, temp2, &t1, &v1); } assert(t0.size==6); @@ -124,7 +124,7 @@ static void ybt_test0 (void) { assert(strcmp((char*)t1.data, "foo")==0); { - bytevec temp3 = "byebye"; + const void *temp3 = "byebye"; toku_dbt_set(7, temp3, &t1, &v0); /* Use v0, not v1 */ } // This assertion would be wrong, since v0 may have been realloc'd, and t0.data may now point @@ -141,7 +141,7 @@ static void ybt_test0 (void) { t0.flags = DB_DBT_USERMEM; t0.ulen = 0; { - bytevec temp4 = "hello"; + const void *temp4 = "hello"; toku_dbt_set(6, temp4, &t0, 0); } assert(t0.data==0); @@ -152,7 +152,7 @@ static void ybt_test0 (void) { t0.flags = DB_DBT_REALLOC; cleanup(&v0); { - bytevec temp5 = "internationalization"; + const void *temp5 = "internationalization"; toku_dbt_set(21, temp5, &t0, &v0); } assert(v0.data==0); /* Didn't change v0 */ @@ -160,7 +160,7 @@ static void ybt_test0 (void) { assert(strcmp((char*)t0.data, "internationalization")==0); { - bytevec temp6 = "provincial"; + const void *temp6 = "provincial"; toku_dbt_set(11, temp6, &t0, &v0); } assert(t0.size==11); diff --git a/storage/tokudb/ft-index/ft/txn.h b/storage/tokudb/ft-index/ft/txn.h deleted file mode 100644 index 5e83d6511a2..00000000000 --- a/storage/tokudb/ft-index/ft/txn.h +++ /dev/null @@ -1,225 +0,0 @@ -/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */ -// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4: -#ifndef TOKUTXN_H -#define TOKUTXN_H - -#ident "$Id$" -/* -COPYING CONDITIONS NOTICE: - - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation, and provided that the - following conditions are met: - - * Redistributions of source code must retain this COPYING - CONDITIONS NOTICE, the COPYRIGHT NOTICE (below), the - DISCLAIMER (below), the UNIVERSITY PATENT NOTICE (below), the - PATENT MARKING NOTICE (below), and the PATENT RIGHTS - GRANT (below). - - * Redistributions in binary form must reproduce this COPYING - CONDITIONS NOTICE, the COPYRIGHT NOTICE (below), the - DISCLAIMER (below), the UNIVERSITY PATENT NOTICE (below), the - PATENT MARKING NOTICE (below), and the PATENT RIGHTS - GRANT (below) in the documentation and/or other materials - provided with the distribution. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA - 02110-1301, USA. - -COPYRIGHT NOTICE: - - TokuDB, Tokutek Fractal Tree Indexing Library. - Copyright (C) 2007-2013 Tokutek, Inc. - -DISCLAIMER: - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - -UNIVERSITY PATENT NOTICE: - - The technology is licensed by the Massachusetts Institute of - Technology, Rutgers State University of New Jersey, and the Research - Foundation of State University of New York at Stony Brook under - United States of America Serial No. 11/760379 and to the patents - and/or patent applications resulting from it. - -PATENT MARKING NOTICE: - - This software is covered by US Patent No. 8,185,551. - This software is covered by US Patent No. 8,489,638. - -PATENT RIGHTS GRANT: - - "THIS IMPLEMENTATION" means the copyrightable works distributed by - Tokutek as part of the Fractal Tree project. - - "PATENT CLAIMS" means the claims of patents that are owned or - licensable by Tokutek, both currently or in the future; and that in - the absence of this license would be infringed by THIS - IMPLEMENTATION or by using or running THIS IMPLEMENTATION. - - "PATENT CHALLENGE" shall mean a challenge to the validity, - patentability, enforceability and/or non-infringement of any of the - PATENT CLAIMS or otherwise opposing any of the PATENT CLAIMS. - - Tokutek hereby grants to you, for the term and geographical scope of - the PATENT CLAIMS, a non-exclusive, no-charge, royalty-free, - irrevocable (except as stated in this section) patent license to - make, have made, use, offer to sell, sell, import, transfer, and - otherwise run, modify, and propagate the contents of THIS - IMPLEMENTATION, where such license applies only to the PATENT - CLAIMS. This grant does not include claims that would be infringed - only as a consequence of further modifications of THIS - IMPLEMENTATION. If you or your agent or licensee institute or order - or agree to the institution of patent litigation against any entity - (including a cross-claim or counterclaim in a lawsuit) alleging that - THIS IMPLEMENTATION constitutes direct or contributory patent - infringement, or inducement of patent infringement, then any rights - granted to you under this License shall terminate as of the date - such litigation is filed. If you or your agent or exclusive - licensee institute or order or agree to the institution of a PATENT - CHALLENGE, then Tokutek may terminate any rights granted to you - under this License. -*/ - -#ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved." -#ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it." - -#include "txn_manager.h" - -void txn_status_init(void); -void txn_status_destroy(void); - - -inline bool txn_pair_is_none(TXNID_PAIR txnid) { - return txnid.parent_id64 == TXNID_NONE && txnid.child_id64 == TXNID_NONE; -} - -inline bool txn_needs_snapshot(TXN_SNAPSHOT_TYPE snapshot_type, TOKUTXN parent) { - // we need a snapshot if the snapshot type is a child or - // if the snapshot type is root and we have no parent. - // Cases that we don't need a snapshot: when snapshot type is NONE - // or when it is ROOT and we have a parent - return (snapshot_type != TXN_SNAPSHOT_NONE && (parent==NULL || snapshot_type == TXN_SNAPSHOT_CHILD)); -} - -void toku_txn_lock(TOKUTXN txn); -void toku_txn_unlock(TOKUTXN txn); - -uint64_t toku_txn_get_root_id(TOKUTXN txn); -bool txn_declared_read_only(TOKUTXN txn); - -int toku_txn_begin_txn ( - DB_TXN *container_db_txn, - TOKUTXN parent_tokutxn, - TOKUTXN *tokutxn, - TOKULOGGER logger, - TXN_SNAPSHOT_TYPE snapshot_type, - bool read_only - ); - -DB_TXN * toku_txn_get_container_db_txn (TOKUTXN tokutxn); -void toku_txn_set_container_db_txn (TOKUTXN, DB_TXN*); - -// toku_txn_begin_with_xid is called from recovery and has no containing DB_TXN -int toku_txn_begin_with_xid ( - TOKUTXN parent_tokutxn, - TOKUTXN *tokutxn, - TOKULOGGER logger, - TXNID_PAIR xid, - TXN_SNAPSHOT_TYPE snapshot_type, - DB_TXN *container_db_txn, - bool for_recovery, - bool read_only - ); - -void toku_txn_update_xids_in_txn(TOKUTXN txn, TXNID xid); - -int toku_txn_load_txninfo (TOKUTXN txn, TXNINFO info); - -int toku_txn_commit_txn (TOKUTXN txn, int nosync, - TXN_PROGRESS_POLL_FUNCTION poll, void *poll_extra); -int toku_txn_commit_with_lsn(TOKUTXN txn, int nosync, LSN oplsn, - TXN_PROGRESS_POLL_FUNCTION poll, void *poll_extra); - -int toku_txn_abort_txn(TOKUTXN txn, - TXN_PROGRESS_POLL_FUNCTION poll, void *poll_extra); -int toku_txn_abort_with_lsn(TOKUTXN txn, LSN oplsn, - TXN_PROGRESS_POLL_FUNCTION poll, void *poll_extra); - -void toku_txn_prepare_txn (TOKUTXN txn, TOKU_XA_XID *xid); -// Effect: Do the internal work of preparing a transaction (does not log the prepare record). - -void toku_txn_get_prepared_xa_xid (TOKUTXN, TOKU_XA_XID *); -// Effect: Fill in the XID information for a transaction. The caller allocates the XID and the function fills in values. - -void toku_txn_maybe_fsync_log(TOKULOGGER logger, LSN do_fsync_lsn, bool do_fsync); - -void toku_txn_get_fsync_info(TOKUTXN ttxn, bool* do_fsync, LSN* do_fsync_lsn); - -// Complete and destroy a txn -void toku_txn_close_txn(TOKUTXN txn); - -// Remove a txn from any live txn lists -void toku_txn_complete_txn(TOKUTXN txn); - -// Free the memory of a txn -void toku_txn_destroy_txn(TOKUTXN txn); - -XIDS toku_txn_get_xids (TOKUTXN); - -// Force fsync on commit -void toku_txn_force_fsync_on_commit(TOKUTXN txn); - -typedef enum { - TXN_BEGIN, // total number of transactions begun (does not include recovered txns) - TXN_READ_BEGIN, // total number of read only transactions begun (does not include recovered txns) - TXN_COMMIT, // successful commits - TXN_ABORT, - TXN_STATUS_NUM_ROWS -} txn_status_entry; - -typedef struct { - bool initialized; - TOKU_ENGINE_STATUS_ROW_S status[TXN_STATUS_NUM_ROWS]; -} TXN_STATUS_S, *TXN_STATUS; - -void toku_txn_get_status(TXN_STATUS s); - -bool toku_is_txn_in_live_root_txn_list(const xid_omt_t &live_root_txn_list, TXNID xid); - -TXNID toku_get_oldest_in_live_root_txn_list(TOKUTXN txn); - -#include "txn_state.h" - -TOKUTXN_STATE toku_txn_get_state(TOKUTXN txn); - -struct tokulogger_preplist { - TOKU_XA_XID xid; - DB_TXN *txn; -}; -int toku_logger_recover_txn (TOKULOGGER logger, struct tokulogger_preplist preplist[/*count*/], long count, /*out*/ long *retp, uint32_t flags); - -void toku_maybe_log_begin_txn_for_write_operation(TOKUTXN txn); - -// Return whether txn (or it's descendents) have done no work. -bool toku_txn_is_read_only(TOKUTXN txn); - -void toku_txn_lock_state(TOKUTXN txn); -void toku_txn_unlock_state(TOKUTXN txn); -void toku_txn_pin_live_txn_unlocked(TOKUTXN txn); -void toku_txn_unpin_live_txn(TOKUTXN txn); - -bool toku_txn_has_spilled_rollback(TOKUTXN txn); - -uint64_t toku_txn_get_client_id(TOKUTXN txn); -void toku_txn_set_client_id(TOKUTXN txn, uint64_t client_id); - -#endif //TOKUTXN_H diff --git a/storage/tokudb/ft-index/ft/roll.cc b/storage/tokudb/ft-index/ft/txn/roll.cc index 2c8e1d9307c..affa9fa802c 100644 --- a/storage/tokudb/ft-index/ft/roll.cc +++ b/storage/tokudb/ft-index/ft/txn/roll.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -91,14 +91,13 @@ PATENT RIGHTS GRANT: /* rollback and rollforward routines. */ -#include <ft/log_header.h> -#include "ft.h" -#include "ft-ops.h" -#include "log-internal.h" -//#include "txn_manager.h" -#include "xids.h" -#include "rollback-apply.h" +#include "ft/ft.h" +#include "ft/ft-ops.h" +#include "ft/log_header.h" +#include "ft/logger/log-internal.h" +#include "ft/txn/xids.h" +#include "ft/txn/rollback-apply.h" // functionality provided by roll.c is exposed by an autogenerated // header file, logheader.h @@ -220,9 +219,9 @@ done: return 0; } -int find_ft_from_filenum (const FT &h, const FILENUM &filenum); -int find_ft_from_filenum (const FT &h, const FILENUM &filenum) { - FILENUM thisfnum = toku_cachefile_filenum(h->cf); +int find_ft_from_filenum (const FT &ft, const FILENUM &filenum); +int find_ft_from_filenum (const FT &ft, const FILENUM &filenum) { + FILENUM thisfnum = toku_cachefile_filenum(ft->cf); if (thisfnum.fileid<filenum.fileid) return -1; if (thisfnum.fileid>filenum.fileid) return +1; return 0; @@ -236,9 +235,8 @@ static int do_insertion (enum ft_msg_type type, FILENUM filenum, BYTESTRING key, bool reset_root_xid_that_created) { int r = 0; //printf("%s:%d committing insert %s %s\n", __FILE__, __LINE__, key.data, data.data); - FT h; - h = NULL; - r = txn->open_fts.find_zero<FILENUM, find_ft_from_filenum>(filenum, &h, NULL); + FT ft = nullptr; + r = txn->open_fts.find_zero<FILENUM, find_ft_from_filenum>(filenum, &ft, NULL); if (r == DB_NOTFOUND) { assert(txn->for_recovery); r = 0; @@ -247,7 +245,7 @@ static int do_insertion (enum ft_msg_type type, FILENUM filenum, BYTESTRING key, assert(r==0); if (oplsn.lsn != 0) { // if we are executing the recovery algorithm - LSN treelsn = toku_ft_checkpoint_lsn(h); + LSN treelsn = toku_ft_checkpoint_lsn(ft); if (oplsn.lsn <= treelsn.lsn) { // if operation was already applied to tree ... r = 0; // ... do not apply it again. goto done; @@ -258,13 +256,11 @@ static int do_insertion (enum ft_msg_type type, FILENUM filenum, BYTESTRING key, XIDS xids; xids = toku_txn_get_xids(txn); { - FT_MSG_S ftmsg = { type, ZERO_MSN, xids, - .u = { .id = { (key.len > 0) - ? toku_fill_dbt(&key_dbt, key.data, key.len) - : toku_init_dbt(&key_dbt), - data - ? toku_fill_dbt(&data_dbt, data->data, data->len) - : toku_init_dbt(&data_dbt) } } }; + const DBT *kdbt = key.len > 0 ? toku_fill_dbt(&key_dbt, key.data, key.len) : + toku_init_dbt(&key_dbt); + const DBT *vdbt = data ? toku_fill_dbt(&data_dbt, data->data, data->len) : + toku_init_dbt(&data_dbt); + ft_msg msg(kdbt, vdbt, type, ZERO_MSN, xids); TXN_MANAGER txn_manager = toku_logger_get_txn_manager(txn->logger); txn_manager_state txn_state_for_gc(txn_manager); @@ -275,10 +271,10 @@ static int do_insertion (enum ft_msg_type type, FILENUM filenum, BYTESTRING key, // no messages above us, we can implicitly promote uxrs based on this xid oldest_referenced_xid_estimate, !txn->for_recovery); - toku_ft_root_put_msg(h, &ftmsg, &gc_info); + toku_ft_root_put_msg(ft, msg, &gc_info); if (reset_root_xid_that_created) { - TXNID new_root_xid_that_created = xids_get_outermost_xid(xids); - toku_reset_root_xid_that_created(h, new_root_xid_that_created); + TXNID new_root_xid_that_created = toku_xids_get_outermost_xid(xids); + toku_reset_root_xid_that_created(ft, new_root_xid_that_created); } } done: @@ -579,15 +575,15 @@ toku_rollback_dictionary_redirect (FILENUM old_filenum, CACHEFILE new_cf = NULL; r = toku_cachefile_of_filenum(txn->logger->ct, new_filenum, &new_cf); assert(r == 0); - FT CAST_FROM_VOIDP(new_h, toku_cachefile_get_userdata(new_cf)); + FT CAST_FROM_VOIDP(new_ft, toku_cachefile_get_userdata(new_cf)); CACHEFILE old_cf = NULL; r = toku_cachefile_of_filenum(txn->logger->ct, old_filenum, &old_cf); assert(r == 0); - FT CAST_FROM_VOIDP(old_h, toku_cachefile_get_userdata(old_cf)); + FT CAST_FROM_VOIDP(old_ft, toku_cachefile_get_userdata(old_cf)); //Redirect back from new to old. - r = toku_dictionary_redirect_abort(old_h, new_h, txn); + r = toku_dictionary_redirect_abort(old_ft, new_ft, txn); assert(r==0); } return r; diff --git a/storage/tokudb/ft-index/ft/rollback-apply.cc b/storage/tokudb/ft-index/ft/txn/rollback-apply.cc index 1dd3062b33e..258994223cc 100644 --- a/storage/tokudb/ft-index/ft/rollback-apply.cc +++ b/storage/tokudb/ft-index/ft/txn/rollback-apply.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -89,12 +89,10 @@ PATENT RIGHTS GRANT: #ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved." #ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it." -#include "fttypes.h" -#include "log-internal.h" -#include "rollback-apply.h" +#include "ft/logger/log-internal.h" +#include "ft/txn/rollback-apply.h" -static void -poll_txn_progress_function(TOKUTXN txn, uint8_t is_commit, uint8_t stall_for_checkpoint) { +static void poll_txn_progress_function(TOKUTXN txn, uint8_t is_commit, uint8_t stall_for_checkpoint) { if (txn->progress_poll_fun) { TOKU_TXN_PROGRESS_S progress = { .entries_total = txn->roll_info.num_rollentries, @@ -125,17 +123,14 @@ int toku_abort_rollback_item (TOKUTXN txn, struct roll_entry *item, LSN lsn) { return r; } -int -note_ft_used_in_txns_parent(const FT &ft, uint32_t UU(index), TOKUTXN const child); -int -note_ft_used_in_txns_parent(const FT &ft, uint32_t UU(index), TOKUTXN const child) { +int note_ft_used_in_txns_parent(const FT &ft, uint32_t UU(index), TOKUTXN const child); +int note_ft_used_in_txns_parent(const FT &ft, uint32_t UU(index), TOKUTXN const child) { TOKUTXN parent = child->parent; toku_txn_maybe_note_ft(parent, ft); return 0; } -static int -apply_txn(TOKUTXN txn, LSN lsn, apply_rollback_item func) { +static int apply_txn(TOKUTXN txn, LSN lsn, apply_rollback_item func) { int r = 0; // do the commit/abort calls and free everything // we do the commit/abort calls in reverse order too. @@ -258,9 +253,9 @@ int toku_rollback_commit(TOKUTXN txn, LSN lsn) { } child_log->newest_logentry = child_log->oldest_logentry = 0; // Put all the memarena data into the parent. - if (toku_memarena_total_size_in_use(child_log->rollentry_arena) > 0) { + if (child_log->rollentry_arena.total_size_in_use() > 0) { // If there are no bytes to move, then just leave things alone, and let the memory be reclaimed on txn is closed. - toku_memarena_move_buffers(parent_log->rollentry_arena, child_log->rollentry_arena); + child_log->rollentry_arena.move_memory(&parent_log->rollentry_arena); } // each txn tries to give back at most one rollback log node // to the cache. All other rollback log nodes for this child @@ -303,3 +298,9 @@ int toku_rollback_abort(TOKUTXN txn, LSN lsn) { assert(r==0); return r; } + +int toku_rollback_discard(TOKUTXN txn) { + txn->roll_info.current_rollback = ROLLBACK_NONE; + return 0; +} + diff --git a/storage/tokudb/ft-index/ft/rollback-apply.h b/storage/tokudb/ft-index/ft/txn/rollback-apply.h index 50e53ea6d24..3d91c154a32 100644 --- a/storage/tokudb/ft-index/ft/rollback-apply.h +++ b/storage/tokudb/ft-index/ft/txn/rollback-apply.h @@ -1,7 +1,5 @@ /* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */ // vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4: -#ifndef ROLLBACK_APPLY_H -#define ROLLBACK_APPLY_H #ident "$Id$" /* @@ -32,7 +30,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -89,16 +87,15 @@ PATENT RIGHTS GRANT: under this License. */ +#pragma once + #ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved." #ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it." - typedef int(*apply_rollback_item)(TOKUTXN txn, struct roll_entry *item, LSN lsn); int toku_commit_rollback_item (TOKUTXN txn, struct roll_entry *item, LSN lsn); int toku_abort_rollback_item (TOKUTXN txn, struct roll_entry *item, LSN lsn); int toku_rollback_commit(TOKUTXN txn, LSN lsn); int toku_rollback_abort(TOKUTXN txn, LSN lsn); - - -#endif // ROLLBACK_APPLY_H +int toku_rollback_discard(TOKUTXN txn); diff --git a/storage/tokudb/ft-index/ft/rollback-ct-callbacks.cc b/storage/tokudb/ft-index/ft/txn/rollback-ct-callbacks.cc index 7083e17bd0d..bb60e787735 100644 --- a/storage/tokudb/ft-index/ft/rollback-ct-callbacks.cc +++ b/storage/tokudb/ft-index/ft/txn/rollback-ct-callbacks.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -89,15 +89,16 @@ PATENT RIGHTS GRANT: #ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved." #ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it." -#include <toku_portability.h> -#include <memory.h> +#include "portability/memory.h" +#include "portability/toku_portability.h" -#include "ft-internal.h" -#include "fttypes.h" -#include "rollback.h" -#include "rollback-ct-callbacks.h" +#include "ft/serialize/block_table.h" +#include "ft/ft-internal.h" +#include "ft/serialize/ft_node-serialize.h" +#include "ft/txn/rollback.h" +#include "ft/txn/rollback-ct-callbacks.h" -#include <util/memarena.h> +#include "util/memarena.h" // Address used as a sentinel. Otherwise unused. static struct serialized_rollback_log_node cloned_rollback; @@ -125,8 +126,7 @@ toku_rollback_flush_unused_log( { if (write_me) { DISKOFF offset; - toku_blocknum_realloc_on_disk(ft->blocktable, logname, 0, &offset, - ft, fd, for_checkpoint); + ft->blocktable.realloc_on_disk(logname, 0, &offset, ft, fd, for_checkpoint, INT_MAX); } if (!keep_me && !is_clone) { toku_free(log); diff --git a/storage/tokudb/ft-index/ft/rollback-ct-callbacks.h b/storage/tokudb/ft-index/ft/txn/rollback-ct-callbacks.h index aeb4650e17d..aee13f2e94d 100644 --- a/storage/tokudb/ft-index/ft/rollback-ct-callbacks.h +++ b/storage/tokudb/ft-index/ft/txn/rollback-ct-callbacks.h @@ -1,7 +1,5 @@ /* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */ // vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4: -#ifndef ROLLBACK_CT_CALLBACKS_H -#define ROLLBACK_CT_CALLBACKS_H #ident "$Id$" /* @@ -32,7 +30,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -89,12 +87,12 @@ PATENT RIGHTS GRANT: under this License. */ +#pragma once + #ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved." #ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it." - -#include "cachetable.h" -#include "fttypes.h" +#include "ft/cachetable/cachetable.h" void toku_rollback_flush_callback(CACHEFILE cachefile, int fd, BLOCKNUM logname, void *rollback_v, void** UU(disk_data), void *extraargs, PAIR_ATTR size, PAIR_ATTR* new_size, bool write_me, bool keep_me, bool for_checkpoint, bool UU(is_clone)); int toku_rollback_fetch_callback(CACHEFILE cachefile, PAIR p, int fd, BLOCKNUM logname, uint32_t fullhash, void **rollback_pv, void** UU(disk_data), PAIR_ATTR *sizep, int * UU(dirtyp), void *extraargs); @@ -123,7 +121,7 @@ int toku_rollback_cleaner_callback ( void* UU(extraargs) ); -static inline CACHETABLE_WRITE_CALLBACK get_write_callbacks_for_rollback_log(FT h) { +static inline CACHETABLE_WRITE_CALLBACK get_write_callbacks_for_rollback_log(FT ft) { CACHETABLE_WRITE_CALLBACK wc; wc.flush_callback = toku_rollback_flush_callback; wc.pe_est_callback = toku_rollback_pe_est_callback; @@ -131,9 +129,6 @@ static inline CACHETABLE_WRITE_CALLBACK get_write_callbacks_for_rollback_log(FT wc.cleaner_callback = toku_rollback_cleaner_callback; wc.clone_callback = toku_rollback_clone_callback; wc.checkpoint_complete_callback = nullptr; - wc.write_extraargs = h; + wc.write_extraargs = ft; return wc; } - - -#endif // ROLLBACK_CT_CALLBACKS_H diff --git a/storage/tokudb/ft-index/ft/rollback.cc b/storage/tokudb/ft-index/ft/txn/rollback.cc index 1b1a99d908e..54a7d9b58ae 100644 --- a/storage/tokudb/ft-index/ft/rollback.cc +++ b/storage/tokudb/ft-index/ft/txn/rollback.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -91,25 +91,21 @@ PATENT RIGHTS GRANT: #include <toku_stdint.h> -#include "ft.h" -#include "log-internal.h" -#include "rollback-ct-callbacks.h" +#include "ft/serialize/block_table.h" +#include "ft/ft.h" +#include "ft/logger/log-internal.h" +#include "ft/txn/rollback-ct-callbacks.h" static void rollback_unpin_remove_callback(CACHEKEY* cachekey, bool for_checkpoint, void* extra) { - FT CAST_FROM_VOIDP(h, extra); - toku_free_blocknum( - h->blocktable, - cachekey, - h, - for_checkpoint - ); + FT CAST_FROM_VOIDP(ft, extra); + ft->blocktable.free_blocknum(cachekey, ft, for_checkpoint); } void toku_rollback_log_unpin_and_remove(TOKUTXN txn, ROLLBACK_LOG_NODE log) { int r; CACHEFILE cf = txn->logger->rollback_cachefile; - FT CAST_FROM_VOIDP(h, toku_cachefile_get_userdata(cf)); - r = toku_cachetable_unpin_and_remove (cf, log->ct_pair, rollback_unpin_remove_callback, h); + FT CAST_FROM_VOIDP(ft, toku_cachefile_get_userdata(cf)); + r = toku_cachetable_unpin_and_remove (cf, log->ct_pair, rollback_unpin_remove_callback, ft); assert(r == 0); } @@ -120,13 +116,17 @@ toku_find_xid_by_xid (const TXNID &xid, const TXNID &xidfind) { return 0; } +// TODO: fix this name +// toku_rollback_malloc void *toku_malloc_in_rollback(ROLLBACK_LOG_NODE log, size_t size) { - return toku_memarena_malloc(log->rollentry_arena, size); + return log->rollentry_arena.malloc_from_arena(size); } +// TODO: fix this name +// toku_rollback_memdup void *toku_memdup_in_rollback(ROLLBACK_LOG_NODE log, const void *v, size_t len) { - void *r=toku_malloc_in_rollback(log, len); - memcpy(r,v,len); + void *r = toku_malloc_in_rollback(log, len); + memcpy(r, v, len); return r; } @@ -145,8 +145,8 @@ static inline PAIR_ATTR make_rollback_pair_attr(long size) { PAIR_ATTR rollback_memory_size(ROLLBACK_LOG_NODE log) { size_t size = sizeof(*log); - if (log->rollentry_arena) { - size += toku_memarena_total_footprint(log->rollentry_arena); + if (&log->rollentry_arena) { + size += log->rollentry_arena.total_footprint(); } return make_rollback_pair_attr(size); } @@ -175,12 +175,10 @@ void rollback_empty_log_init(ROLLBACK_LOG_NODE log) { log->previous = make_blocknum(0); log->oldest_logentry = NULL; log->newest_logentry = NULL; - log->rollentry_arena = NULL; + log->rollentry_arena.create(0); log->rollentry_resident_bytecount = 0; } - - static void rollback_initialize_for_txn( ROLLBACK_LOG_NODE log, TOKUTXN txn, @@ -192,13 +190,14 @@ static void rollback_initialize_for_txn( log->previous = previous; log->oldest_logentry = NULL; log->newest_logentry = NULL; - log->rollentry_arena = toku_memarena_create(); + log->rollentry_arena.create(1024); log->rollentry_resident_bytecount = 0; log->dirty = true; } +// TODO: fix this name void make_rollback_log_empty(ROLLBACK_LOG_NODE log) { - toku_memarena_destroy(&log->rollentry_arena); + log->rollentry_arena.destroy(); rollback_empty_log_init(log); } @@ -217,7 +216,7 @@ static void rollback_log_create ( CACHEFILE cf = txn->logger->rollback_cachefile; FT CAST_FROM_VOIDP(ft, toku_cachefile_get_userdata(cf)); rollback_initialize_for_txn(log, txn, previous); - toku_allocate_blocknum(ft->blocktable, &log->blocknum, ft); + ft->blocktable.allocate_blocknum(&log->blocknum, ft); const uint32_t hash = toku_cachetable_hash(ft->cf, log->blocknum); *result = log; toku_cachetable_put(cf, log->blocknum, hash, diff --git a/storage/tokudb/ft-index/ft/rollback.h b/storage/tokudb/ft-index/ft/txn/rollback.h index 2e9493b0e6b..c9f779e677b 100644 --- a/storage/tokudb/ft-index/ft/rollback.h +++ b/storage/tokudb/ft-index/ft/txn/rollback.h @@ -1,7 +1,5 @@ /* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */ // vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4: -#ifndef TOKU_ROLLBACK_H -#define TOKU_ROLLBACK_H #ident "$Id$" /* @@ -32,7 +30,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -89,11 +87,19 @@ PATENT RIGHTS GRANT: under this License. */ +#pragma once + #ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved." #ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it." -#include <util/omt.h> -#include "sub_block.h" +#include "ft/cachetable/cachetable.h" +#include "ft/serialize/sub_block.h" +#include "ft/txn/txn.h" + +#include "util/memarena.h" + +typedef struct rollback_log_node *ROLLBACK_LOG_NODE; +typedef struct serialized_rollback_log_node *SERIALIZED_ROLLBACK_LOG_NODE; void toku_poll_txn_progress_function(TOKUTXN txn, uint8_t is_commit, uint8_t stall_for_checkpoint); @@ -132,7 +138,7 @@ void *toku_memdup_in_rollback(ROLLBACK_LOG_NODE log, const void *v, size_t len); // if necessary. void toku_maybe_spill_rollbacks(TOKUTXN txn, ROLLBACK_LOG_NODE log); -void toku_txn_maybe_note_ft (TOKUTXN txn, FT h); +void toku_txn_maybe_note_ft (TOKUTXN txn, struct ft *ft); int toku_logger_txn_rollback_stats(TOKUTXN txn, struct txn_stat *txn_stat); int toku_find_xid_by_xid (const TXNID &xid, const TXNID &xidfind); @@ -160,7 +166,7 @@ struct rollback_log_node { BLOCKNUM previous; struct roll_entry *oldest_logentry; struct roll_entry *newest_logentry; - MEMARENA rollentry_arena; + memarena rollentry_arena; size_t rollentry_resident_bytecount; // How many bytes for the rollentries that are stored in main memory. PAIR ct_pair; }; @@ -172,6 +178,7 @@ struct serialized_rollback_log_node { BLOCKNUM blocknum; struct sub_block sub_block[max_sub_blocks]; }; +typedef struct serialized_rollback_log_node *SERIALIZED_ROLLBACK_LOG_NODE; static inline void toku_static_serialized_rollback_log_destroy(SERIALIZED_ROLLBACK_LOG_NODE log) { @@ -190,6 +197,3 @@ void make_rollback_log_empty(ROLLBACK_LOG_NODE log); static inline bool rollback_log_is_unused(ROLLBACK_LOG_NODE log) { return (log->txnid.parent_id64 == TXNID_NONE); } - - -#endif // TOKU_ROLLBACK_H diff --git a/storage/tokudb/ft-index/ft/rollback_log_node_cache.cc b/storage/tokudb/ft-index/ft/txn/rollback_log_node_cache.cc index d3ea3471489..95a54d6fd76 100644 --- a/storage/tokudb/ft-index/ft/rollback_log_node_cache.cc +++ b/storage/tokudb/ft-index/ft/txn/rollback_log_node_cache.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -92,7 +92,7 @@ PATENT RIGHTS GRANT: #include <memory.h> #include <portability/toku_portability.h> -#include "rollback_log_node_cache.h" +#include "txn/rollback_log_node_cache.h" void rollback_log_node_cache::init (uint32_t max_num_avail_nodes) { XMALLOC_N(max_num_avail_nodes, m_avail_blocknums); diff --git a/storage/tokudb/ft-index/ft/rollback_log_node_cache.h b/storage/tokudb/ft-index/ft/txn/rollback_log_node_cache.h index 0db99faf23b..4aa9daee207 100644 --- a/storage/tokudb/ft-index/ft/rollback_log_node_cache.h +++ b/storage/tokudb/ft-index/ft/txn/rollback_log_node_cache.h @@ -1,7 +1,5 @@ /* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */ // vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4: -#ifndef TOKU_ROLLBACK_LOG_NODE_CACHE_H -#define TOKU_ROLLBACK_LOG_NODE_CACHE_H #ident "$Id$" /* @@ -32,7 +30,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -89,10 +87,12 @@ PATENT RIGHTS GRANT: under this License. */ +#pragma once + #ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved." #ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it." -#include "rollback.h" +#include "ft/txn/rollback.h" class rollback_log_node_cache { public: @@ -115,5 +115,3 @@ private: }; ENSURE_POD(rollback_log_node_cache); - -#endif // TOKU_ROLLBACK_LOG_NODE_CACHE_H diff --git a/storage/tokudb/ft-index/ft/txn.cc b/storage/tokudb/ft-index/ft/txn/txn.cc index 403c8e92c45..7b475c2c975 100644 --- a/storage/tokudb/ft-index/ft/txn.cc +++ b/storage/tokudb/ft-index/ft/txn/txn.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -90,15 +90,14 @@ PATENT RIGHTS GRANT: #ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it." -#include "ft.h" -#include "txn.h" -#include "log-internal.h" -#include "checkpoint.h" -#include "ule.h" -#include "rollback-apply.h" -#include "txn_manager.h" -#include "txn_child_manager.h" -#include <util/status.h> +#include "ft/cachetable/checkpoint.h" +#include "ft/ft.h" +#include "ft/logger/log-internal.h" +#include "ft/ule.h" +#include "ft/txn/rollback-apply.h" +#include "ft/txn/txn.h" +#include "ft/txn/txn_manager.h" +#include "util/status.h" /////////////////////////////////////////////////////////////////////////////////// // Engine status @@ -108,7 +107,7 @@ PATENT RIGHTS GRANT: static TXN_STATUS_S txn_status; -#define STATUS_INIT(k,c,t,l,inc) TOKUDB_STATUS_INIT(txn_status, k, c, t, "txn: " l, inc) +#define STATUS_INIT(k,c,t,l,inc) TOKUFT_STATUS_INIT(txn_status, k, c, t, "txn: " l, inc) void txn_status_init(void) { @@ -189,13 +188,13 @@ txn_create_xids(TOKUTXN txn, TOKUTXN parent) { XIDS xids; XIDS parent_xids; if (parent == NULL) { - parent_xids = xids_get_root_xids(); + parent_xids = toku_xids_get_root_xids(); } else { parent_xids = parent->xids; } - xids_create_unknown_child(parent_xids, &xids); + toku_xids_create_unknown_child(parent_xids, &xids); TXNID finalized_xid = (parent == NULL) ? txn->txnid.parent_id64 : txn->txnid.child_id64; - xids_finalize_with_child(xids, finalized_xid); + toku_xids_finalize_with_child(xids, finalized_xid); txn->xids = xids; } @@ -218,7 +217,7 @@ toku_txn_begin_with_xid ( TOKUTXN txn; // check for case where we are trying to // create too many nested transactions - if (!read_only && parent && !xids_can_create_child(parent->xids)) { + if (!read_only && parent && !toku_xids_can_create_child(parent->xids)) { r = EINVAL; goto exit; } @@ -388,7 +387,7 @@ toku_txn_update_xids_in_txn(TOKUTXN txn, TXNID xid) //Used on recovery to recover a transaction. int -toku_txn_load_txninfo (TOKUTXN txn, TXNINFO info) { +toku_txn_load_txninfo (TOKUTXN txn, struct txninfo *info) { txn->roll_info.rollentry_raw_count = info->rollentry_raw_count; uint32_t i; for (i = 0; i < info->num_fts; i++) { @@ -618,7 +617,7 @@ int remove_txn (const FT &h, const uint32_t UU(idx), TOKUTXN const UU(txn)) return 0; } -// for every BRT in txn, remove it. +// for every ft in txn, remove it. static void note_txn_closing (TOKUTXN txn) { txn->open_fts.iterate<struct tokutxn, remove_txn>(txn); } @@ -628,7 +627,7 @@ void toku_txn_complete_txn(TOKUTXN txn) { assert(txn->roll_info.spilled_rollback_tail.b == ROLLBACK_NONE.b); assert(txn->roll_info.current_rollback.b == ROLLBACK_NONE.b); assert(txn->num_pin == 0); - assert(txn->state == TOKUTXN_COMMITTING || txn->state == TOKUTXN_ABORTING); + assert(txn->state == TOKUTXN_COMMITTING || txn->state == TOKUTXN_ABORTING || txn->state == TOKUTXN_PREPARING); if (txn->parent) { toku_txn_manager_handle_snapshot_destroy_for_child_txn( txn, @@ -649,7 +648,7 @@ void toku_txn_complete_txn(TOKUTXN txn) { void toku_txn_destroy_txn(TOKUTXN txn) { txn->open_fts.destroy(); if (txn->xids) { - xids_destroy(&txn->xids); + toku_xids_destroy(&txn->xids); } toku_mutex_destroy(&txn->txn_lock); toku_mutex_destroy(&txn->state_lock); @@ -658,7 +657,7 @@ void toku_txn_destroy_txn(TOKUTXN txn) { } XIDS toku_txn_get_xids (TOKUTXN txn) { - if (txn==0) return xids_get_root_xids(); + if (txn==0) return toku_xids_get_root_xids(); else return txn->xids; } @@ -786,6 +785,26 @@ void toku_txn_set_client_id(TOKUTXN txn, uint64_t client_id) { txn->client_id = client_id; } +int toku_txn_reads_txnid(TXNID txnid, TOKUTXN txn) { + int r = 0; + TXNID oldest_live_in_snapshot = toku_get_oldest_in_live_root_txn_list(txn); + if (oldest_live_in_snapshot == TXNID_NONE && txnid < txn->snapshot_txnid64) { + r = TOKUDB_ACCEPT; + } else if (txnid < oldest_live_in_snapshot || txnid == txn->txnid.parent_id64) { + r = TOKUDB_ACCEPT; + } else if (txnid > txn->snapshot_txnid64 || toku_is_txn_in_live_root_txn_list(*txn->live_root_txn_list, txnid)) { + r = 0; + } else { + r = TOKUDB_ACCEPT; + } + return r; +} + +int toku_txn_discard_txn(TOKUTXN txn) { + int r = toku_rollback_discard(txn); + return r; +} + #include <toku_race_tools.h> void __attribute__((__constructor__)) toku_txn_status_helgrind_ignore(void); void toku_txn_status_helgrind_ignore(void) { diff --git a/storage/tokudb/ft-index/ft/txn/txn.h b/storage/tokudb/ft-index/ft/txn/txn.h new file mode 100644 index 00000000000..c458df3b5b2 --- /dev/null +++ b/storage/tokudb/ft-index/ft/txn/txn.h @@ -0,0 +1,435 @@ +/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */ +// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4: + +#ident "$Id$" +/* +COPYING CONDITIONS NOTICE: + + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation, and provided that the + following conditions are met: + + * Redistributions of source code must retain this COPYING + CONDITIONS NOTICE, the COPYRIGHT NOTICE (below), the + DISCLAIMER (below), the UNIVERSITY PATENT NOTICE (below), the + PATENT MARKING NOTICE (below), and the PATENT RIGHTS + GRANT (below). + + * Redistributions in binary form must reproduce this COPYING + CONDITIONS NOTICE, the COPYRIGHT NOTICE (below), the + DISCLAIMER (below), the UNIVERSITY PATENT NOTICE (below), the + PATENT MARKING NOTICE (below), and the PATENT RIGHTS + GRANT (below) in the documentation and/or other materials + provided with the distribution. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + 02110-1301, USA. + +COPYRIGHT NOTICE: + + TokuFT, Tokutek Fractal Tree Indexing Library. + Copyright (C) 2007-2013 Tokutek, Inc. + +DISCLAIMER: + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + +UNIVERSITY PATENT NOTICE: + + The technology is licensed by the Massachusetts Institute of + Technology, Rutgers State University of New Jersey, and the Research + Foundation of State University of New York at Stony Brook under + United States of America Serial No. 11/760379 and to the patents + and/or patent applications resulting from it. + +PATENT MARKING NOTICE: + + This software is covered by US Patent No. 8,185,551. + This software is covered by US Patent No. 8,489,638. + +PATENT RIGHTS GRANT: + + "THIS IMPLEMENTATION" means the copyrightable works distributed by + Tokutek as part of the Fractal Tree project. + + "PATENT CLAIMS" means the claims of patents that are owned or + licensable by Tokutek, both currently or in the future; and that in + the absence of this license would be infringed by THIS + IMPLEMENTATION or by using or running THIS IMPLEMENTATION. + + "PATENT CHALLENGE" shall mean a challenge to the validity, + patentability, enforceability and/or non-infringement of any of the + PATENT CLAIMS or otherwise opposing any of the PATENT CLAIMS. + + Tokutek hereby grants to you, for the term and geographical scope of + the PATENT CLAIMS, a non-exclusive, no-charge, royalty-free, + irrevocable (except as stated in this section) patent license to + make, have made, use, offer to sell, sell, import, transfer, and + otherwise run, modify, and propagate the contents of THIS + IMPLEMENTATION, where such license applies only to the PATENT + CLAIMS. This grant does not include claims that would be infringed + only as a consequence of further modifications of THIS + IMPLEMENTATION. If you or your agent or licensee institute or order + or agree to the institution of patent litigation against any entity + (including a cross-claim or counterclaim in a lawsuit) alleging that + THIS IMPLEMENTATION constitutes direct or contributory patent + infringement, or inducement of patent infringement, then any rights + granted to you under this License shall terminate as of the date + such litigation is filed. If you or your agent or exclusive + licensee institute or order or agree to the institution of a PATENT + CHALLENGE, then Tokutek may terminate any rights granted to you + under this License. +*/ + +#pragma once + +#ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved." +#ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it." + +#include "portability/toku_stdint.h" + +#include "ft/txn/txn_state.h" +#include "ft/serialize/block_table.h" +#include "util/omt.h" + +typedef uint64_t TXNID; + +typedef struct tokutxn *TOKUTXN; + +#define TXNID_NONE_LIVING ((TXNID)0) +#define TXNID_NONE ((TXNID)0) +#define TXNID_MAX ((TXNID)-1) + +typedef struct txnid_pair_s { + TXNID parent_id64; + TXNID child_id64; +} TXNID_PAIR; + +static const TXNID_PAIR TXNID_PAIR_NONE = { .parent_id64 = TXNID_NONE, .child_id64 = TXNID_NONE }; + +// We include the child manager here beacuse it uses the TXNID / TOKUTXN types +#include "ft/txn/txn_child_manager.h" + +/* Log Sequence Number (LSN) + * Make the LSN be a struct instead of an integer so that we get better type checking. */ +typedef struct __toku_lsn { uint64_t lsn; } LSN; +static const LSN ZERO_LSN = { .lsn = 0 }; +static const LSN MAX_LSN = { .lsn = UINT64_MAX }; + +// +// Types of snapshots that can be taken by a tokutxn +// - TXN_SNAPSHOT_NONE: means that there is no snapshot. Reads do not use snapshot reads. +// used for SERIALIZABLE and READ UNCOMMITTED +// - TXN_SNAPSHOT_ROOT: means that all tokutxns use their root transaction's snapshot +// used for REPEATABLE READ +// - TXN_SNAPSHOT_CHILD: means that each child tokutxn creates its own snapshot +// used for READ COMMITTED +// + +typedef enum __TXN_SNAPSHOT_TYPE { + TXN_SNAPSHOT_NONE=0, + TXN_SNAPSHOT_ROOT=1, + TXN_SNAPSHOT_CHILD=2 +} TXN_SNAPSHOT_TYPE; + +typedef toku::omt<struct tokutxn *> txn_omt_t; +typedef toku::omt<TXNID> xid_omt_t; +typedef toku::omt<struct referenced_xid_tuple, struct referenced_xid_tuple *> rx_omt_t; + +inline bool txn_pair_is_none(TXNID_PAIR txnid) { + return txnid.parent_id64 == TXNID_NONE && txnid.child_id64 == TXNID_NONE; +} + +inline bool txn_needs_snapshot(TXN_SNAPSHOT_TYPE snapshot_type, struct tokutxn *parent) { + // we need a snapshot if the snapshot type is a child or + // if the snapshot type is root and we have no parent. + // Cases that we don't need a snapshot: when snapshot type is NONE + // or when it is ROOT and we have a parent + return (snapshot_type != TXN_SNAPSHOT_NONE && (parent==NULL || snapshot_type == TXN_SNAPSHOT_CHILD)); +} + +struct tokulogger; + +struct txn_roll_info { + // these are number of rollback nodes and rollback entries for this txn. + // + // the current rollback node below has sequence number num_rollback_nodes - 1 + // (because they are numbered 0...num-1). often, the current rollback is + // already set to this block num, which means it exists and is available to + // log some entries. if the current rollback is NONE and the number of + // rollback nodes for this transaction is non-zero, then we will use + // the number of rollback nodes to know which sequence number to assign + // to a new one we create + uint64_t num_rollback_nodes; + uint64_t num_rollentries; + uint64_t num_rollentries_processed; + uint64_t rollentry_raw_count; // the total count of every byte in the transaction and all its children. + + // spilled rollback nodes are rollback nodes that were gorged by this + // transaction, retired, and saved in a list. + + // the spilled rollback head is the block number of the first rollback node + // that makes up the rollback log chain + BLOCKNUM spilled_rollback_head; + + // the spilled rollback is the block number of the last rollback node that + // makes up the rollback log chain. + BLOCKNUM spilled_rollback_tail; + + // the current rollback node block number we may use. if this is ROLLBACK_NONE, + // then we need to create one and set it here before using it. + BLOCKNUM current_rollback; +}; + +struct tokutxn { + // These don't change after create: + + TXNID_PAIR txnid; + + uint64_t snapshot_txnid64; // this is the lsn of the snapshot + const TXN_SNAPSHOT_TYPE snapshot_type; + const bool for_recovery; + struct tokulogger *const logger; + struct tokutxn *const parent; + // The child txn is protected by the child_txn_manager lock + // and by the user contract. The user contract states (and is + // enforced at the ydb layer) that a child txn should not be created + // while another child exists. The txn_child_manager will protect + // other threads from trying to read this value while another + // thread commits/aborts the child + struct tokutxn *child; + + // statically allocated child manager, if this + // txn is a root txn, this manager will be used and set to + // child_manager for this transaction and all of its children + txn_child_manager child_manager_s; + + // child manager for this transaction, all of its children, + // and all of its ancestors + txn_child_manager* child_manager; + + // These don't change but they're created in a way that's hard to make + // strictly const. + DB_TXN *container_db_txn; // reference to DB_TXN that contains this tokutxn + xid_omt_t *live_root_txn_list; // the root txns live when the root ancestor (self if a root) started. + struct XIDS_S *xids; // Represents the xid list + + struct tokutxn *snapshot_next; + struct tokutxn *snapshot_prev; + + bool begin_was_logged; + bool declared_read_only; // true if the txn was declared read only when began + + // These are not read until a commit, prepare, or abort starts, and + // they're "monotonic" (only go false->true) during operation: + bool do_fsync; + bool force_fsync_on_commit; //This transaction NEEDS an fsync once (if) it commits. (commit means root txn) + + // Not used until commit, prepare, or abort starts: + LSN do_fsync_lsn; + TOKU_XA_XID xa_xid; // for prepared transactions + TXN_PROGRESS_POLL_FUNCTION progress_poll_fun; + void *progress_poll_fun_extra; + + toku_mutex_t txn_lock; + // Protected by the txn lock: + toku::omt<struct ft*> open_fts; // a collection of the fts that we touched. Indexed by filenum. + struct txn_roll_info roll_info; // Info used to manage rollback entries + + // mutex that protects the transition of the state variable + // the rest of the variables are used by the txn code and + // hot indexing to ensure that when hot indexing is processing a + // leafentry, a TOKUTXN cannot dissappear or change state out from + // underneath it + toku_mutex_t state_lock; + toku_cond_t state_cond; + TOKUTXN_STATE state; + uint32_t num_pin; // number of threads (all hot indexes) that want this + // txn to not transition to commit or abort + uint64_t client_id; +}; +typedef struct tokutxn *TOKUTXN; + +void toku_txn_lock(struct tokutxn *txn); +void toku_txn_unlock(struct tokutxn *txn); + +uint64_t toku_txn_get_root_id(struct tokutxn *txn); +bool txn_declared_read_only(struct tokutxn *txn); + +int toku_txn_begin_txn ( + DB_TXN *container_db_txn, + struct tokutxn *parent_tokutxn, + struct tokutxn **tokutxn, + struct tokulogger *logger, + TXN_SNAPSHOT_TYPE snapshot_type, + bool read_only + ); + +DB_TXN * toku_txn_get_container_db_txn (struct tokutxn *tokutxn); +void toku_txn_set_container_db_txn(struct tokutxn *txn, DB_TXN *db_txn); + +// toku_txn_begin_with_xid is called from recovery and has no containing DB_TXN +int toku_txn_begin_with_xid ( + struct tokutxn *parent_tokutxn, + struct tokutxn **tokutxn, + struct tokulogger *logger, + TXNID_PAIR xid, + TXN_SNAPSHOT_TYPE snapshot_type, + DB_TXN *container_db_txn, + bool for_recovery, + bool read_only + ); + +void toku_txn_update_xids_in_txn(struct tokutxn *txn, TXNID xid); + +int toku_txn_load_txninfo (struct tokutxn *txn, struct txninfo *info); + +int toku_txn_commit_txn (struct tokutxn *txn, int nosync, + TXN_PROGRESS_POLL_FUNCTION poll, void *poll_extra); +int toku_txn_commit_with_lsn(struct tokutxn *txn, int nosync, LSN oplsn, + TXN_PROGRESS_POLL_FUNCTION poll, void *poll_extra); + +int toku_txn_abort_txn(struct tokutxn *txn, + TXN_PROGRESS_POLL_FUNCTION poll, void *poll_extra); +int toku_txn_abort_with_lsn(struct tokutxn *txn, LSN oplsn, + TXN_PROGRESS_POLL_FUNCTION poll, void *poll_extra); + +int toku_txn_discard_txn(struct tokutxn *txn); + +void toku_txn_prepare_txn (struct tokutxn *txn, TOKU_XA_XID *xid); +// Effect: Do the internal work of preparing a transaction (does not log the prepare record). + +void toku_txn_get_prepared_xa_xid(struct tokutxn *txn, TOKU_XA_XID *xa_xid); +// Effect: Fill in the XID information for a transaction. The caller allocates the XID and the function fills in values. + +void toku_txn_maybe_fsync_log(struct tokulogger *logger, LSN do_fsync_lsn, bool do_fsync); + +void toku_txn_get_fsync_info(struct tokutxn *ttxn, bool* do_fsync, LSN* do_fsync_lsn); + +// Complete and destroy a txn +void toku_txn_close_txn(struct tokutxn *txn); + +// Remove a txn from any live txn lists +void toku_txn_complete_txn(struct tokutxn *txn); + +// Free the memory of a txn +void toku_txn_destroy_txn(struct tokutxn *txn); + +struct XIDS_S *toku_txn_get_xids(struct tokutxn *txn); + +// Force fsync on commit +void toku_txn_force_fsync_on_commit(struct tokutxn *txn); + +typedef enum { + TXN_BEGIN, // total number of transactions begun (does not include recovered txns) + TXN_READ_BEGIN, // total number of read only transactions begun (does not include recovered txns) + TXN_COMMIT, // successful commits + TXN_ABORT, + TXN_STATUS_NUM_ROWS +} txn_status_entry; + +typedef struct { + bool initialized; + TOKU_ENGINE_STATUS_ROW_S status[TXN_STATUS_NUM_ROWS]; +} TXN_STATUS_S, *TXN_STATUS; + +void toku_txn_get_status(TXN_STATUS s); + +bool toku_is_txn_in_live_root_txn_list(const xid_omt_t &live_root_txn_list, TXNID xid); + +TXNID toku_get_oldest_in_live_root_txn_list(struct tokutxn *txn); + +TOKUTXN_STATE toku_txn_get_state(struct tokutxn *txn); + +struct tokulogger_preplist { + TOKU_XA_XID xid; + DB_TXN *txn; +}; +int toku_logger_recover_txn (struct tokulogger *logger, struct tokulogger_preplist preplist[/*count*/], long count, /*out*/ long *retp, uint32_t flags); + +void toku_maybe_log_begin_txn_for_write_operation(struct tokutxn *txn); + +// Return whether txn (or it's descendents) have done no work. +bool toku_txn_is_read_only(struct tokutxn *txn); + +void toku_txn_lock_state(struct tokutxn *txn); +void toku_txn_unlock_state(struct tokutxn *txn); +void toku_txn_pin_live_txn_unlocked(struct tokutxn *txn); +void toku_txn_unpin_live_txn(struct tokutxn *txn); + +bool toku_txn_has_spilled_rollback(struct tokutxn *txn); + +uint64_t toku_txn_get_client_id(struct tokutxn *txn); +void toku_txn_set_client_id(struct tokutxn *txn, uint64_t client_id); + +// +// This function is used by the leafentry iterators. +// returns TOKUDB_ACCEPT if live transaction context is allowed to read a value +// that is written by transaction with LSN of id +// live transaction context may read value if either id is the root ancestor of context, or if +// id was committed before context's snapshot was taken. +// For id to be committed before context's snapshot was taken, the following must be true: +// - id < context->snapshot_txnid64 AND id is not in context's live root transaction list +// For the above to NOT be true: +// - id > context->snapshot_txnid64 OR id is in context's live root transaction list +// +int toku_txn_reads_txnid(TXNID txnid, struct tokutxn *txn); + +void txn_status_init(void); + +void txn_status_destroy(void); + +// For serialize / deserialize + +#include "ft/serialize/wbuf.h" + +static inline void wbuf_TXNID(struct wbuf *wb, TXNID txnid) { + wbuf_ulonglong(wb, txnid); +} + +static inline void wbuf_nocrc_TXNID(struct wbuf *wb, TXNID txnid) { + wbuf_nocrc_ulonglong(wb, txnid); +} + +static inline void wbuf_nocrc_TXNID_PAIR(struct wbuf *wb, TXNID_PAIR txnid) { + wbuf_nocrc_ulonglong(wb, txnid.parent_id64); + wbuf_nocrc_ulonglong(wb, txnid.child_id64); +} + +static inline void wbuf_nocrc_LSN(struct wbuf *wb, LSN lsn) { + wbuf_nocrc_ulonglong(wb, lsn.lsn); +} + +static inline void wbuf_LSN(struct wbuf *wb, LSN lsn) { + wbuf_ulonglong(wb, lsn.lsn); +} + +#include "ft/serialize/rbuf.h" + +static inline void rbuf_TXNID(struct rbuf *rb, TXNID *txnid) { + *txnid = rbuf_ulonglong(rb); +} + +static inline void rbuf_TXNID_PAIR(struct rbuf *rb, TXNID_PAIR *txnid) { + txnid->parent_id64 = rbuf_ulonglong(rb); + txnid->child_id64 = rbuf_ulonglong(rb); +} + +static inline void rbuf_ma_TXNID(struct rbuf *rb, memarena *UU(ma), TXNID *txnid) { + rbuf_TXNID(rb, txnid); +} + +static inline void rbuf_ma_TXNID_PAIR (struct rbuf *r, memarena *ma __attribute__((__unused__)), TXNID_PAIR *txnid) { + rbuf_TXNID_PAIR(r, txnid); +} + +static inline LSN rbuf_LSN(struct rbuf *rb) { + LSN lsn = { .lsn = rbuf_ulonglong(rb) }; + return lsn; +} diff --git a/storage/tokudb/ft-index/ft/txn_child_manager.cc b/storage/tokudb/ft-index/ft/txn/txn_child_manager.cc index bb74a1cb8ae..3a006285e20 100644 --- a/storage/tokudb/ft-index/ft/txn_child_manager.cc +++ b/storage/tokudb/ft-index/ft/txn/txn_child_manager.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -89,8 +89,8 @@ PATENT RIGHTS GRANT: #ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved." #ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it." -#include "log-internal.h" -#include "txn_child_manager.h" +#include "ft/logger/log-internal.h" +#include "ft/txn/txn_child_manager.h" // // initialized a txn_child_manager, diff --git a/storage/tokudb/ft-index/ft/txn_child_manager.h b/storage/tokudb/ft-index/ft/txn/txn_child_manager.h index 07cf2ee3b5e..99d98e2fe59 100644 --- a/storage/tokudb/ft-index/ft/txn_child_manager.h +++ b/storage/tokudb/ft-index/ft/txn/txn_child_manager.h @@ -1,9 +1,7 @@ /* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */ // vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4: -#ifndef TOKU_TXN_CHILD_MANAGER_H -#define TOKU_TXN_CHILD_MANAGER_H -#ident "$Id: rollback.h 49033 2012-10-17 18:48:30Z zardosht $" +#ident "$Id: txn/rollback.h 49033 2012-10-17 18:48:30Z zardosht $" /* COPYING CONDITIONS NOTICE: @@ -32,7 +30,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -89,11 +87,15 @@ PATENT RIGHTS GRANT: under this License. */ +#pragma once + +// We should be including ft/txn/txn.h here but that header includes this one, +// so we don't. +#include "portability/toku_pthread.h" + #ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved." #ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it." -#include "txn_manager.h" - class txn_child_manager { public: void init (TOKUTXN root); @@ -104,17 +106,15 @@ public: void suspend(); void resume(); void find_tokutxn_by_xid_unlocked(TXNID_PAIR xid, TOKUTXN* result); - int iterate(txn_mgr_iter_callback cb, void* extra); + int iterate(int (*cb)(TOKUTXN txn, void *extra), void* extra); private: TXNID m_last_xid; TOKUTXN m_root; toku_mutex_t m_mutex; -friend class txn_child_manager_unit_test; + friend class txn_child_manager_unit_test; }; ENSURE_POD(txn_child_manager); - -#endif // TOKU_TXN_CHILD_MANAGER_H diff --git a/storage/tokudb/ft-index/ft/txn_manager.cc b/storage/tokudb/ft-index/ft/txn/txn_manager.cc index 12c6ba4e887..570174f9b9f 100644 --- a/storage/tokudb/ft-index/ft/txn_manager.cc +++ b/storage/tokudb/ft-index/ft/txn/txn_manager.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -89,16 +89,15 @@ PATENT RIGHTS GRANT: #ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved." #ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it." -#include <toku_race_tools.h> +#include "portability/toku_race_tools.h" -#include <util/omt.h> - -#include "log-internal.h" -#include "txn.h" -#include "checkpoint.h" -#include "ule.h" -#include "txn_manager.h" -#include "rollback.h" +#include "ft/cachetable/checkpoint.h" +#include "ft/logger/log-internal.h" +#include "ft/ule.h" +#include "ft/txn/txn.h" +#include "ft/txn/txn_manager.h" +#include "ft/txn/rollback.h" +#include "util/omt.h" bool garbage_collection_debug = false; @@ -339,7 +338,11 @@ int live_root_txn_list_iter(const TOKUTXN &live_xid, const uint32_t UU(index), T // Create list of root transactions that were live when this txn began. static inline void setup_live_root_txn_list(xid_omt_t* live_root_txnid, xid_omt_t* live_root_txn_list) { - live_root_txn_list->clone(*live_root_txnid); + if (live_root_txnid->size() > 0) { + live_root_txn_list->clone(*live_root_txnid); + } else { + live_root_txn_list->create_no_array(); + } } //Heaviside function to search through an OMT by a TXNID diff --git a/storage/tokudb/ft-index/ft/txn_manager.h b/storage/tokudb/ft-index/ft/txn/txn_manager.h index 12267297a0e..5df1e23115c 100644 --- a/storage/tokudb/ft-index/ft/txn_manager.h +++ b/storage/tokudb/ft-index/ft/txn/txn_manager.h @@ -1,7 +1,5 @@ /* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */ // vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4: -#ifndef TOKUTXN_MANAGER_H -#define TOKUTXN_MANAGER_H #ident "$Id$" /* @@ -32,7 +30,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -89,14 +87,17 @@ PATENT RIGHTS GRANT: under this License. */ +#pragma once + #ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved." #ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it." -#include <toku_portability.h> -#include <util/omt.h> -#include "fttypes.h" -#include <portability/toku_pthread.h> -#include <util/omt.h> +#include "portability/toku_portability.h" +#include "portability/toku_pthread.h" + +#include "ft/txn/txn.h" + +typedef struct txn_manager *TXN_MANAGER; struct referenced_xid_tuple { TXNID begin_id; @@ -104,10 +105,6 @@ struct referenced_xid_tuple { uint32_t references; }; -typedef toku::omt<TOKUTXN> txn_omt_t; -typedef toku::omt<TXNID> xid_omt_t; -typedef toku::omt<struct referenced_xid_tuple, struct referenced_xid_tuple *> rx_omt_t; - struct txn_manager { toku_mutex_t txn_manager_lock; // a lock protecting this object txn_omt_t live_root_txns; // a sorted tree. @@ -123,6 +120,7 @@ struct txn_manager { TXNID last_xid_seen_for_recover; TXNID last_calculated_oldest_referenced_xid; }; +typedef struct txn_manager *TXN_MANAGER; struct txn_manager_state { txn_manager_state(TXN_MANAGER mgr) : @@ -268,5 +266,3 @@ bool toku_txn_manager_txns_exist(TXN_MANAGER mgr); void toku_txn_manager_increase_last_xid(TXN_MANAGER mgr, uint64_t increment); TXNID toku_get_youngest_live_list_txnid_for(TXNID xc, const xid_omt_t &snapshot_txnids, const rx_omt_t &referenced_xids); - -#endif // TOKUTXN_MANAGER_H diff --git a/storage/tokudb/ft-index/ft/txn_state.h b/storage/tokudb/ft-index/ft/txn/txn_state.h index d8e192edec3..75c3f51ce79 100644 --- a/storage/tokudb/ft-index/ft/txn_state.h +++ b/storage/tokudb/ft-index/ft/txn/txn_state.h @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -86,10 +86,10 @@ PATENT RIGHTS GRANT: under this License. */ +#pragma once + #ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved." #ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it." -#if !defined(TOKUTXN_STATE_H) -#define TOKUTXN_STATE_H // this is a separate file so that the hotindexing tests can see the txn states @@ -101,5 +101,3 @@ enum tokutxn_state { TOKUTXN_RETIRED, // txn no longer exists }; typedef enum tokutxn_state TOKUTXN_STATE; - -#endif diff --git a/storage/tokudb/ft-index/ft/xids.cc b/storage/tokudb/ft-index/ft/txn/xids.cc index 5733a10550f..6308f3c0368 100644 --- a/storage/tokudb/ft-index/ft/xids.cc +++ b/storage/tokudb/ft-index/ft/txn/xids.cc @@ -30,7 +30,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -101,18 +101,15 @@ PATENT RIGHTS GRANT: * host order. */ - #include <errno.h> #include <string.h> -#include <toku_portability.h> -#include "fttypes.h" -#include "xids.h" -#include "xids-internal.h" -#include "toku_assert.h" -#include "memory.h" -#include <toku_htod.h> +#include "portability/memory.h" +#include "portability/toku_assert.h" +#include "portability/toku_htod.h" +#include "portability/toku_portability.h" +#include "ft/txn/xids.h" ///////////////////////////////////////////////////////////////////////////////// // This layer of abstraction (xids_xxx) understands xids<> and nothing else. @@ -124,22 +121,20 @@ PATENT RIGHTS GRANT: // the variable num_xids. // // The xids struct is immutable. The caller gets an initial version of XIDS -// by calling xids_get_root_xids(), which returns the constant struct +// by calling toku_xids_get_root_xids(), which returns the constant struct // representing the root transaction (id 0). When a transaction begins, // a new XIDS is created with the id of the current transaction appended to // the list. // // - // This is the xids list for a transactionless environment. // It is also the initial state of any xids list created for // nested transactions. - XIDS -xids_get_root_xids(void) { - static const struct xids_t root_xids = { +toku_xids_get_root_xids(void) { + static const struct XIDS_S root_xids = { .num_xids = 0 }; @@ -148,14 +143,13 @@ xids_get_root_xids(void) { } bool -xids_can_create_child(XIDS xids) { +toku_xids_can_create_child(XIDS xids) { invariant(xids->num_xids < MAX_TRANSACTION_RECORDS); return (xids->num_xids + 1) != MAX_TRANSACTION_RECORDS; } - int -xids_create_unknown_child(XIDS parent_xids, XIDS *xids_p) { +toku_xids_create_unknown_child(XIDS parent_xids, XIDS *xids_p) { // Postcondition: // xids_p points to an xids that is an exact copy of parent_xids, but with room for one more xid. int rval; @@ -174,9 +168,9 @@ xids_create_unknown_child(XIDS parent_xids, XIDS *xids_p) { } void -xids_finalize_with_child(XIDS xids, TXNID this_xid) { +toku_xids_finalize_with_child(XIDS xids, TXNID this_xid) { // Precondition: - // - xids was created by xids_create_unknown_child + // - xids was created by toku_xids_create_unknown_child TXNID this_xid_disk = toku_htod64(this_xid); uint32_t num_child_xids = ++xids->num_xids; xids->ids[num_child_xids - 1] = this_xid_disk; @@ -185,21 +179,21 @@ xids_finalize_with_child(XIDS xids, TXNID this_xid) { // xids is immutable. This function creates a new xids by copying the // parent's list and then appending the xid of the new transaction. int -xids_create_child(XIDS parent_xids, // xids list for parent transaction - XIDS * xids_p, // xids list created - TXNID this_xid) { // xid of this transaction (new innermost) - bool can_create_child = xids_can_create_child(parent_xids); +toku_xids_create_child(XIDS parent_xids, // xids list for parent transaction + XIDS *xids_p, // xids list created + TXNID this_xid) { // xid of this transaction (new innermost) + bool can_create_child = toku_xids_can_create_child(parent_xids); if (!can_create_child) { return EINVAL; } - xids_create_unknown_child(parent_xids, xids_p); - xids_finalize_with_child(*xids_p, this_xid); + toku_xids_create_unknown_child(parent_xids, xids_p); + toku_xids_finalize_with_child(*xids_p, this_xid); return 0; } void -xids_create_from_buffer(struct rbuf *rb, // xids list for parent transaction - XIDS * xids_p) { // xids list created +toku_xids_create_from_buffer(struct rbuf *rb, // xids list for parent transaction + XIDS *xids_p) { // xids list created uint8_t num_xids = rbuf_char(rb); invariant(num_xids < MAX_TRANSACTION_RECORDS); XIDS CAST_FROM_VOIDP(xids, toku_xmalloc(sizeof(*xids) + num_xids*sizeof(xids->ids[0]))); @@ -211,61 +205,59 @@ xids_create_from_buffer(struct rbuf *rb, // xids list for parent transaction *xids_p = xids; } - void -xids_destroy(XIDS *xids_p) { - if (*xids_p != xids_get_root_xids()) toku_free(*xids_p); +toku_xids_destroy(XIDS *xids_p) { + if (*xids_p != toku_xids_get_root_xids()) toku_free(*xids_p); *xids_p = NULL; } - // Return xid at requested position. // If requesting an xid out of range (which will be the case if xids array is empty) // then return 0, the xid of the root transaction. TXNID -xids_get_xid(XIDS xids, uint8_t index) { - invariant(index < xids_get_num_xids(xids)); +toku_xids_get_xid(XIDS xids, uint8_t index) { + invariant(index < toku_xids_get_num_xids(xids)); TXNID rval = xids->ids[index]; rval = toku_dtoh64(rval); return rval; } uint8_t -xids_get_num_xids(XIDS xids) { +toku_xids_get_num_xids(XIDS xids) { uint8_t rval = xids->num_xids; return rval; } - // Return innermost xid TXNID -xids_get_innermost_xid(XIDS xids) { +toku_xids_get_innermost_xid(XIDS xids) { TXNID rval = TXNID_NONE; - if (xids_get_num_xids(xids)) { + if (toku_xids_get_num_xids(xids)) { // if clause above makes this cast ok - uint8_t innermost_xid = (uint8_t)(xids_get_num_xids(xids)-1); - rval = xids_get_xid(xids, innermost_xid); + uint8_t innermost_xid = (uint8_t) (toku_xids_get_num_xids(xids) - 1); + rval = toku_xids_get_xid(xids, innermost_xid); } return rval; } TXNID -xids_get_outermost_xid(XIDS xids) { +toku_xids_get_outermost_xid(XIDS xids) { TXNID rval = TXNID_NONE; - if (xids_get_num_xids(xids)) - rval = xids_get_xid(xids, 0); + if (toku_xids_get_num_xids(xids)) { + rval = toku_xids_get_xid(xids, 0); + } return rval; } void -xids_cpy(XIDS target, XIDS source) { - size_t size = xids_get_size(source); +toku_xids_cpy(XIDS target, XIDS source) { + size_t size = toku_xids_get_size(source); memcpy(target, source, size); } // return size in bytes uint32_t -xids_get_size(XIDS xids){ +toku_xids_get_size(XIDS xids) { uint32_t rval; uint8_t num_xids = xids->num_xids; rval = sizeof(*xids) + num_xids * sizeof(xids->ids[0]); @@ -273,7 +265,7 @@ xids_get_size(XIDS xids){ } uint32_t -xids_get_serialize_size(XIDS xids){ +toku_xids_get_serialize_size(XIDS xids) { uint32_t rval; uint8_t num_xids = xids->num_xids; rval = 1 + //num xids @@ -281,9 +273,8 @@ xids_get_serialize_size(XIDS xids){ return rval; } - unsigned char * -xids_get_end_of_array(XIDS xids) { +toku_xids_get_end_of_array(XIDS xids) { TXNID *r = xids->ids + xids->num_xids; return (unsigned char*)r; } @@ -297,13 +288,13 @@ void wbuf_nocrc_xids(struct wbuf *wb, XIDS xids) { } void -xids_fprintf(FILE* fp, XIDS xids) { +toku_xids_fprintf(FILE *fp, XIDS xids) { uint8_t index; - unsigned num_xids = xids_get_num_xids(xids); + unsigned num_xids = toku_xids_get_num_xids(xids); fprintf(fp, "[|%u| ", num_xids); - for (index = 0; index < xids_get_num_xids(xids); index++) { + for (index = 0; index < toku_xids_get_num_xids(xids); index++) { if (index) fprintf(fp, ","); - fprintf(fp, "%" PRIx64, xids_get_xid(xids, index)); + fprintf(fp, "%" PRIx64, toku_xids_get_xid(xids, index)); } fprintf(fp, "]"); } diff --git a/storage/tokudb/ft-index/ft/xids.h b/storage/tokudb/ft-index/ft/txn/xids.h index 45246785775..5b0e95c2eee 100644 --- a/storage/tokudb/ft-index/ft/xids.h +++ b/storage/tokudb/ft-index/ft/txn/xids.h @@ -12,9 +12,6 @@ * TokuWiki/Imp/TransactionsOverview. */ -#ifndef XIDS_H -#define XIDS_H - #ident "$Id$" /* COPYING CONDITIONS NOTICE: @@ -44,7 +41,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -101,51 +98,73 @@ PATENT RIGHTS GRANT: under this License. */ +#pragma once + #ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved." #ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it." -#include "rbuf.h" -#include "wbuf.h" -#include "tokuconst.h" +#include "ft/txn/txn.h" +#include "ft/serialize/rbuf.h" +#include "ft/serialize/wbuf.h" + +/* The number of transaction ids stored in the xids structure is + * represented by an 8-bit value. The value 255 is reserved. + * The constant MAX_NESTED_TRANSACTIONS is one less because + * one slot in the packed leaf entry is used for the implicit + * root transaction (id 0). + */ +enum { + MAX_NESTED_TRANSACTIONS = 253, + MAX_TRANSACTION_RECORDS = MAX_NESTED_TRANSACTIONS + 1 +}; + +// Variable size list of transaction ids (known in design doc as xids<>). +// ids[0] is the outermost transaction. +// ids[num_xids - 1] is the innermost transaction. +// Should only be accessed by accessor functions toku_xids_xxx, not directly. -//Retrieve an XIDS representing the root transaction. -XIDS xids_get_root_xids(void); +// If the xids struct is unpacked, the compiler aligns the ids[] and we waste a lot of space +struct __attribute__((__packed__)) XIDS_S { + // maximum value of MAX_TRANSACTION_RECORDS - 1 because transaction 0 is implicit + uint8_t num_xids; + TXNID ids[]; +}; +typedef struct XIDS_S *XIDS; -bool xids_can_create_child(XIDS xids); +// Retrieve an XIDS representing the root transaction. +XIDS toku_xids_get_root_xids(void); -void xids_cpy(XIDS target, XIDS source); +bool toku_xids_can_create_child(XIDS xids); + +void toku_xids_cpy(XIDS target, XIDS source); //Creates an XIDS representing this transaction. //You must pass in an XIDS representing the parent of this transaction. -int xids_create_child(XIDS parent_xids, XIDS *xids_p, TXNID this_xid); +int toku_xids_create_child(XIDS parent_xids, XIDS *xids_p, TXNID this_xid); -// The following two functions (in order) are equivalent to xids_create child, +// The following two functions (in order) are equivalent to toku_xids_create child, // but allow you to do most of the work without knowing the new xid. -int xids_create_unknown_child(XIDS parent_xids, XIDS *xids_p); -void xids_finalize_with_child(XIDS xids, TXNID this_xid); +int toku_xids_create_unknown_child(XIDS parent_xids, XIDS *xids_p); +void toku_xids_finalize_with_child(XIDS xids, TXNID this_xid); -void xids_create_from_buffer(struct rbuf *rb, XIDS * xids_p); +void toku_xids_create_from_buffer(struct rbuf *rb, XIDS *xids_p); -void xids_destroy(XIDS *xids_p); +void toku_xids_destroy(XIDS *xids_p); -TXNID xids_get_xid(XIDS xids, uint8_t index); +TXNID toku_xids_get_xid(XIDS xids, uint8_t index); -uint8_t xids_get_num_xids(XIDS xids); +uint8_t toku_xids_get_num_xids(XIDS xids); -TXNID xids_get_innermost_xid(XIDS xids); -TXNID xids_get_outermost_xid(XIDS xids); +TXNID toku_xids_get_innermost_xid(XIDS xids); +TXNID toku_xids_get_outermost_xid(XIDS xids); // return size in bytes -uint32_t xids_get_size(XIDS xids); +uint32_t toku_xids_get_size(XIDS xids); -uint32_t xids_get_serialize_size(XIDS xids); +uint32_t toku_xids_get_serialize_size(XIDS xids); -unsigned char *xids_get_end_of_array(XIDS xids); +unsigned char *toku_xids_get_end_of_array(XIDS xids); void wbuf_nocrc_xids(struct wbuf *wb, XIDS xids); -void xids_fprintf(FILE* fp, XIDS xids); - - - -#endif +void toku_xids_fprintf(FILE* fp, XIDS xids); diff --git a/storage/tokudb/ft-index/ft/ule-internal.h b/storage/tokudb/ft-index/ft/ule-internal.h index 00b9847a13f..9a42ead3d90 100644 --- a/storage/tokudb/ft-index/ft/ule-internal.h +++ b/storage/tokudb/ft-index/ft/ule-internal.h @@ -5,9 +5,6 @@ * ule mechanisms that do not belong in the public interface. */ -#ifndef TOKU_ULE_INTERNAL_H -#define TOKU_ULE_INTERNAL_H - #ident "$Id$" /* COPYING CONDITIONS NOTICE: @@ -37,7 +34,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -94,6 +91,8 @@ PATENT RIGHTS GRANT: under this License. */ +#pragma once + #ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved." #ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it." @@ -136,7 +135,7 @@ typedef struct ule { // unpacked leaf entry -void test_msg_modify_ule(ULE ule, FT_MSG msg); +void test_msg_modify_ule(ULE ule, const ft_msg &msg); ////////////////////////////////////////////////////////////////////////////////////// @@ -148,6 +147,7 @@ le_pack(ULE ule, // data to be packed into new leafentry uint32_t idx, void* keyp, uint32_t keylen, + uint32_t old_keylen, uint32_t old_le_size, LEAFENTRY * const new_leafentry_p, // this is what this function creates void **const maybe_free @@ -156,7 +156,3 @@ le_pack(ULE ule, // data to be packed into new leafentry size_t le_memsize_from_ule (ULE ule); void ule_cleanup(ULE ule); - - -#endif // TOKU_ULE_H - diff --git a/storage/tokudb/ft-index/ft/ule.cc b/storage/tokudb/ft-index/ft/ule.cc index c364fc4603e..03ec452cbd2 100644 --- a/storage/tokudb/ft-index/ft/ule.cc +++ b/storage/tokudb/ft-index/ft/ule.cc @@ -31,7 +31,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -102,27 +102,27 @@ PATENT RIGHTS GRANT: // See design documentation for nested transactions at // TokuWiki/Imp/TransactionsOverview. -#include <toku_portability.h> -#include "fttypes.h" -#include "ft-internal.h" - -#include <util/omt.h> - -#include "leafentry.h" -#include "xids.h" -#include "ft_msg.h" -#include "ule.h" -#include "txn_manager.h" -#include "ule-internal.h" -#include <util/status.h> -#include <util/scoped_malloc.h> -#include <util/partitioned_counter.h> +#include "portability/toku_portability.h" + +#include "ft/ft-internal.h" +#include "ft/leafentry.h" +#include "ft/logger/logger.h" +#include "ft/msg.h" +#include "ft/txn/txn.h" +#include "ft/txn/txn_manager.h" +#include "ft/ule.h" +#include "ft/ule-internal.h" +#include "ft/txn/xids.h" +#include "util/bytestring.h" +#include "util/omt.h" +#include "util/partitioned_counter.h" +#include "util/scoped_malloc.h" +#include "util/status.h" #define ULE_DEBUG 0 static uint32_t ule_get_innermost_numbytes(ULE ule, uint32_t keylen); - /////////////////////////////////////////////////////////////////////////////////// // Engine status // @@ -131,7 +131,7 @@ static uint32_t ule_get_innermost_numbytes(ULE ule, uint32_t keylen); static LE_STATUS_S le_status; -#define STATUS_INIT(k,c,t,l,inc) TOKUDB_STATUS_INIT(le_status, k, c, t, "le: " l, inc) +#define STATUS_INIT(k,c,t,l,inc) TOKUFT_STATUS_INIT(le_status, k, c, t, "le: " l, inc) void toku_ule_status_init(void) { // Note, this function initializes the keyname, type, and legend fields. @@ -216,7 +216,7 @@ const UXR_S committed_delete = { // Local functions: static void msg_init_empty_ule(ULE ule); -static void msg_modify_ule(ULE ule, FT_MSG msg); +static void msg_modify_ule(ULE ule, const ft_msg &msg); static void ule_init_empty_ule(ULE ule); static void ule_do_implicit_promotions(ULE ule, XIDS xids); static void ule_try_promote_provisional_outermost(ULE ule, TXNID oldest_possible_live_xid); @@ -256,6 +256,7 @@ static void get_space_for_le( uint32_t idx, void* keyp, uint32_t keylen, + uint32_t old_keylen, uint32_t old_le_size, size_t size, LEAFENTRY* new_le_space, @@ -268,7 +269,7 @@ static void get_space_for_le( else { // this means we are overwriting something if (old_le_size > 0) { - data_buffer->get_space_for_overwrite(idx, keyp, keylen, old_le_size, size, new_le_space, maybe_free); + data_buffer->get_space_for_overwrite(idx, keyp, keylen, old_keylen, old_le_size, size, new_le_space, maybe_free); } // this means we are inserting something new else { @@ -327,11 +328,11 @@ xid_reads_committed_xid(TXNID tl1, TXNID xc, const xid_omt_t &snapshot_txnids, c // static void ule_simple_garbage_collection(ULE ule, txn_gc_info *gc_info) { - uint32_t curr_index = 0; - uint32_t num_entries; if (ule->num_cuxrs == 1) { - goto done; + return; } + + uint32_t curr_index = 0; if (gc_info->mvcc_needed) { // starting at the top of the committed stack, find the first // uxr with a txnid that is less than oldest_referenced_xid @@ -341,37 +342,34 @@ ule_simple_garbage_collection(ULE ule, txn_gc_info *gc_info) { break; } } - } - else { + } else { // if mvcc is not needed, we can need the top committed // value and nothing else curr_index = ule->num_cuxrs - 1; } + // curr_index is now set to the youngest uxr older than oldest_referenced_xid - if (curr_index == 0) { - goto done; + // so if it's not the bottom of the stack.. + if (curr_index != 0) { + // ..then we need to get rid of the entries below curr_index + uint32_t num_entries = ule->num_cuxrs + ule->num_puxrs - curr_index; + memmove(&ule->uxrs[0], &ule->uxrs[curr_index], num_entries * sizeof(ule->uxrs[0])); + ule->uxrs[0].xid = TXNID_NONE; // New 'bottom of stack' loses its TXNID + ule->num_cuxrs -= curr_index; } - - // now get rid of the entries below curr_index - num_entries = ule->num_cuxrs + ule->num_puxrs - curr_index; - memmove(&ule->uxrs[0], &ule->uxrs[curr_index], num_entries * sizeof(ule->uxrs[0])); - ule->uxrs[0].xid = TXNID_NONE; //New 'bottom of stack' loses its TXNID - ule->num_cuxrs -= curr_index; - -done:; } +// TODO: Clean this up +extern bool garbage_collection_debug; + static void ule_garbage_collect(ULE ule, const xid_omt_t &snapshot_xids, const rx_omt_t &referenced_xids, const xid_omt_t &live_root_txns) { - if (ule->num_cuxrs == 1) goto done; - // will fail if too many num_cuxrs - bool necessary_static[MAX_TRANSACTION_RECORDS]; - bool *necessary; - necessary = necessary_static; - if (ule->num_cuxrs >= MAX_TRANSACTION_RECORDS) { - XMALLOC_N(ule->num_cuxrs, necessary); + if (ule->num_cuxrs == 1) { + return; } - memset(necessary, 0, sizeof(necessary[0])*ule->num_cuxrs); + + toku::scoped_calloc necessary_buf(ule->num_cuxrs * sizeof(bool)); + bool *necessary = reinterpret_cast<bool *>(necessary_buf.get()); uint32_t curr_committed_entry; curr_committed_entry = ule->num_cuxrs - 1; @@ -401,24 +399,21 @@ ule_garbage_collect(ULE ule, const xid_omt_t &snapshot_xids, const rx_omt_t &ref } tl1 = toku_get_youngest_live_list_txnid_for(xc, snapshot_xids, referenced_xids); - if (tl1 == xc) { - // if tl1 == xc, that means xc should be live and show up in - // live_root_txns, which we check above. So, if we get - // here, something is wrong. - assert(false); - } + + // if tl1 == xc, that means xc should be live and show up in live_root_txns, which we check above. + invariant(tl1 != xc); + if (tl1 == TXNID_NONE) { // set tl1 to youngest live transaction older than ule->uxrs[curr_committed_entry]->xid tl1 = get_next_older_txnid(xc, snapshot_xids); if (tl1 == TXNID_NONE) { - //Remainder is garbage, we're done + // remainder is garbage, we're done break; } } - if (garbage_collection_debug) - { + if (garbage_collection_debug) { int r = snapshot_xids.find_zero<TXNID, toku_find_xid_by_xid>(tl1, nullptr, nullptr); - invariant(r==0); //make sure that the txn you are claiming is live is actually live + invariant_zero(r); // make sure that the txn you are claiming is live is actually live } // // tl1 should now be set @@ -432,30 +427,23 @@ ule_garbage_collect(ULE ule, const xid_omt_t &snapshot_xids, const rx_omt_t &ref curr_committed_entry--; } } - uint32_t first_free; - first_free = 0; - uint32_t i; - for (i = 0; i < ule->num_cuxrs; i++) { - //Shift values to 'delete' garbage values. + uint32_t first_free = 0; + for (uint32_t i = 0; i < ule->num_cuxrs; i++) { + // Shift values to 'delete' garbage values. if (necessary[i]) { ule->uxrs[first_free] = ule->uxrs[i]; first_free++; } } - uint32_t saved; - saved = first_free; + uint32_t saved = first_free; invariant(saved <= ule->num_cuxrs); invariant(saved >= 1); ule->uxrs[0].xid = TXNID_NONE; //New 'bottom of stack' loses its TXNID if (first_free != ule->num_cuxrs) { - //Shift provisional values + // Shift provisional values memmove(&ule->uxrs[first_free], &ule->uxrs[ule->num_cuxrs], ule->num_puxrs * sizeof(ule->uxrs[0])); } ule->num_cuxrs = saved; - if (necessary != necessary_static) { - toku_free(necessary); - } -done:; } static size_t ule_packed_memsize(ULE ule) { @@ -492,10 +480,11 @@ enum { // Otehrwise the new_leafentry_p points at the new leaf entry. // As of October 2011, this function always returns 0. void -toku_le_apply_msg(FT_MSG msg, +toku_le_apply_msg(const ft_msg &msg, LEAFENTRY old_leafentry, // NULL if there was no stored data. bn_data* data_buffer, // bn_data storing leafentry, if NULL, means there is no bn_data uint32_t idx, // index in data_buffer where leafentry is stored (and should be replaced + uint32_t old_keylen, // length of the any key in data_buffer txn_gc_info *gc_info, LEAFENTRY *new_leafentry_p, int64_t * numbytes_delta_p) { // change in total size of key and val, not including any overhead @@ -505,7 +494,7 @@ toku_le_apply_msg(FT_MSG msg, int64_t oldnumbytes = 0; int64_t newnumbytes = 0; uint64_t oldmemsize = 0; - uint32_t keylen = ft_msg_get_keylen(msg); + uint32_t keylen = msg.kdbt()->size; if (old_leafentry == NULL) { msg_init_empty_ule(&ule); @@ -550,8 +539,9 @@ toku_le_apply_msg(FT_MSG msg, &ule, // create packed leafentry data_buffer, idx, - ft_msg_get_key(msg), // contract of this function is caller has this set, always + msg.kdbt()->data, // contract of this function is caller has this set, always keylen, // contract of this function is caller has this set, always + old_keylen, oldmemsize, new_leafentry_p, &maybe_free @@ -655,6 +645,7 @@ toku_le_garbage_collect(LEAFENTRY old_leaf_entry, idx, keyp, keylen, + keylen, // old_keylen, same because the key isn't going to change for gc old_mem_size, new_leaf_entry, &maybe_free @@ -686,10 +677,10 @@ msg_init_empty_ule(ULE ule) { // Purpose is to modify the unpacked leafentry in our private workspace. // static void -msg_modify_ule(ULE ule, FT_MSG msg) { - XIDS xids = ft_msg_get_xids(msg); - invariant(xids_get_num_xids(xids) < MAX_TRANSACTION_RECORDS); - enum ft_msg_type type = ft_msg_get_type(msg); +msg_modify_ule(ULE ule, const ft_msg &msg) { + XIDS xids = msg.xids(); + invariant(toku_xids_get_num_xids(xids) < MAX_TRANSACTION_RECORDS); + enum ft_msg_type type = msg.type(); if (type != FT_OPTIMIZE && type != FT_OPTIMIZE_FOR_UPGRADE) { ule_do_implicit_promotions(ule, xids); } @@ -702,9 +693,9 @@ msg_modify_ule(ULE ule, FT_MSG msg) { //fall through to FT_INSERT on purpose. } case FT_INSERT: { - uint32_t vallen = ft_msg_get_vallen(msg); + uint32_t vallen = msg.vdbt()->size; invariant(IS_VALID_LEN(vallen)); - void * valp = ft_msg_get_val(msg); + void * valp = msg.vdbt()->data; ule_apply_insert(ule, xids, vallen, valp); break; } @@ -731,25 +722,23 @@ msg_modify_ule(ULE ule, FT_MSG msg) { assert(false); // These messages don't get this far. Instead they get translated (in setval_fun in do_update) into FT_INSERT messages. break; default: - assert(false /* illegal FT_MSG.type */); + assert(false); /* illegal ft msg type */ break; } } -void -test_msg_modify_ule(ULE ule, FT_MSG msg){ +void test_msg_modify_ule(ULE ule, const ft_msg &msg){ msg_modify_ule(ule,msg); } - static void ule_optimize(ULE ule, XIDS xids) { if (ule->num_puxrs) { TXNID uncommitted = ule->uxrs[ule->num_cuxrs].xid; // outermost uncommitted TXNID oldest_living_xid = TXNID_NONE; - uint32_t num_xids = xids_get_num_xids(xids); + uint32_t num_xids = toku_xids_get_num_xids(xids); if (num_xids > 0) { invariant(num_xids==1); - oldest_living_xid = xids_get_xid(xids, 0); + oldest_living_xid = toku_xids_get_xid(xids, 0); } if (oldest_living_xid == TXNID_NONE || uncommitted < oldest_living_xid) { ule_promote_provisional_innermost_to_committed(ule); @@ -974,6 +963,7 @@ le_pack(ULE ule, // data to be packed into new leafentry uint32_t idx, void* keyp, uint32_t keylen, + uint32_t old_keylen, uint32_t old_le_size, LEAFENTRY * const new_leafentry_p, // this is what this function creates void **const maybe_free @@ -996,7 +986,8 @@ le_pack(ULE ule, // data to be packed into new leafentry } } if (data_buffer && old_le_size > 0) { - data_buffer->delete_leafentry(idx, keylen, old_le_size); + // must pass old_keylen and old_le_size, since that's what is actually stored in data_buffer + data_buffer->delete_leafentry(idx, old_keylen, old_le_size); } *new_leafentry_p = NULL; rval = 0; @@ -1005,7 +996,7 @@ le_pack(ULE ule, // data to be packed into new leafentry found_insert: memsize = le_memsize_from_ule(ule); LEAFENTRY new_leafentry; - get_space_for_le(data_buffer, idx, keyp, keylen, old_le_size, memsize, &new_leafentry, maybe_free); + get_space_for_le(data_buffer, idx, keyp, keylen, old_keylen, old_le_size, memsize, &new_leafentry, maybe_free); //p always points to first unused byte after leafentry we are packing uint8_t *p; @@ -1343,9 +1334,9 @@ int le_latest_is_del(LEAFENTRY le) { bool le_has_xids(LEAFENTRY le, XIDS xids) { //Read num_uxrs - uint32_t num_xids = xids_get_num_xids(xids); + uint32_t num_xids = toku_xids_get_num_xids(xids); invariant(num_xids > 0); //Disallow checking for having TXNID_NONE - TXNID xid = xids_get_xid(xids, 0); + TXNID xid = toku_xids_get_xid(xids, 0); invariant(xid!=TXNID_NONE); bool rval = (le_outermost_uncommitted_xid(le) == xid); @@ -1595,13 +1586,13 @@ ule_do_implicit_promotions(ULE ule, XIDS xids) { //Optimization for (most) common case. //No commits necessary if everything is already committed. if (ule->num_puxrs > 0) { - int num_xids = xids_get_num_xids(xids); + int num_xids = toku_xids_get_num_xids(xids); invariant(num_xids>0); uint32_t max_index = ule->num_cuxrs + min_i32(ule->num_puxrs, num_xids) - 1; uint32_t ica_index = max_index; uint32_t index; for (index = ule->num_cuxrs; index <= max_index; index++) { - TXNID current_msg_xid = xids_get_xid(xids, index - ule->num_cuxrs); + TXNID current_msg_xid = toku_xids_get_xid(xids, index - ule->num_cuxrs); TXNID current_ule_xid = ule_get_xid(ule, index); if (current_msg_xid != current_ule_xid) { //ica is innermost transaction with matching xids. @@ -1691,7 +1682,7 @@ ule_promote_provisional_innermost_to_index(ULE ule, uint32_t index) { static void ule_apply_insert(ULE ule, XIDS xids, uint32_t vallen, void * valp) { ule_prepare_for_new_uxr(ule, xids); - TXNID this_xid = xids_get_innermost_xid(xids); // xid of transaction doing this insert + TXNID this_xid = toku_xids_get_innermost_xid(xids); // xid of transaction doing this insert ule_push_insert_uxr(ule, this_xid == TXNID_NONE, this_xid, vallen, valp); } @@ -1699,7 +1690,7 @@ ule_apply_insert(ULE ule, XIDS xids, uint32_t vallen, void * valp) { static void ule_apply_delete(ULE ule, XIDS xids) { ule_prepare_for_new_uxr(ule, xids); - TXNID this_xid = xids_get_innermost_xid(xids); // xid of transaction doing this delete + TXNID this_xid = toku_xids_get_innermost_xid(xids); // xid of transaction doing this delete ule_push_delete_uxr(ule, this_xid == TXNID_NONE, this_xid); } @@ -1710,7 +1701,7 @@ ule_apply_delete(ULE ule, XIDS xids) { // with placeholders. static void ule_prepare_for_new_uxr(ULE ule, XIDS xids) { - TXNID this_xid = xids_get_innermost_xid(xids); + TXNID this_xid = toku_xids_get_innermost_xid(xids); //This is for LOADER_USE_PUTS or transactionless environment //where messages use XIDS of 0 if (this_xid == TXNID_NONE && ule_get_innermost_xid(ule) == TXNID_NONE) { @@ -1735,7 +1726,7 @@ ule_prepare_for_new_uxr(ULE ule, XIDS xids) { // Remember, the innermost uxr can only be an insert or a delete, not a placeholder. static void ule_apply_abort(ULE ule, XIDS xids) { - TXNID this_xid = xids_get_innermost_xid(xids); // xid of transaction doing this abort + TXNID this_xid = toku_xids_get_innermost_xid(xids); // xid of transaction doing this abort invariant(this_xid!=TXNID_NONE); UXR innermost = ule_get_innermost_uxr(ule); // need to check for provisional entries in ule, otherwise @@ -1766,7 +1757,7 @@ ule_apply_broadcast_commit_all (ULE ule) { // If this transaction did modify the leafentry, then promote whatever it did. // Remember, the innermost uxr can only be an insert or a delete, not a placeholder. void ule_apply_commit(ULE ule, XIDS xids) { - TXNID this_xid = xids_get_innermost_xid(xids); // xid of transaction committing + TXNID this_xid = toku_xids_get_innermost_xid(xids); // xid of transaction committing invariant(this_xid!=TXNID_NONE); // need to check for provisional entries in ule, otherwise // there is nothing to abort, not checking this may result @@ -1908,7 +1899,7 @@ ule_add_placeholders(ULE ule, XIDS xids) { //Placeholders can be placed on top of the committed uxr. invariant(ule->num_cuxrs > 0); - uint32_t num_xids = xids_get_num_xids(xids); + uint32_t num_xids = toku_xids_get_num_xids(xids); // we assume that implicit promotion has happened // when we get this call, so the number of xids MUST // be greater than the number of provisional entries @@ -1916,12 +1907,12 @@ ule_add_placeholders(ULE ule, XIDS xids) { // make sure that the xids stack matches up to a certain amount // this first for loop is just debug code for (uint32_t i = 0; i < ule->num_puxrs; i++) { - TXNID current_msg_xid = xids_get_xid(xids, i); + TXNID current_msg_xid = toku_xids_get_xid(xids, i); TXNID current_ule_xid = ule_get_xid(ule, i + ule->num_cuxrs); invariant(current_msg_xid == current_ule_xid); } for (uint32_t i = ule->num_puxrs; i < num_xids-1; i++) { - TXNID current_msg_xid = xids_get_xid(xids, i); + TXNID current_msg_xid = toku_xids_get_xid(xids, i); ule_push_placeholder_uxr(ule, current_msg_xid); } } @@ -2073,7 +2064,7 @@ ule_verify_xids(ULE ule, uint32_t interesting, TXNID *xids) { // is_delp - output parameter that returns answer // context - parameter for f // -int +static int le_iterate_is_del(LEAFENTRY le, LE_ITERATE_CALLBACK f, bool *is_delp, TOKUTXN context) { #if ULE_DEBUG ULE_S ule; @@ -2142,6 +2133,27 @@ cleanup: } // +// Returns true if the value that is to be read is empty. +// +int le_val_is_del(LEAFENTRY le, bool is_snapshot_read, TOKUTXN txn) { + int rval; + if (is_snapshot_read) { + bool is_del = false; + le_iterate_is_del( + le, + toku_txn_reads_txnid, + &is_del, + txn + ); + rval = is_del; + } + else { + rval = le_latest_is_del(le); + } + return rval; +} + +// // Iterates over "possible" TXNIDs in a leafentry's stack, until one is accepted by 'f'. Set // valpp and vallenp to value and length associated with accepted TXNID // The "possible" TXNIDs are: @@ -2261,6 +2273,27 @@ cleanup: return r; } +void le_extract_val(LEAFENTRY le, + // should we return the entire leafentry as the val? + bool is_leaf_mode, bool is_snapshot_read, + TOKUTXN ttxn, uint32_t *vallen, void **val) { + if (is_leaf_mode) { + *val = le; + *vallen = leafentry_memsize(le); + } else if (is_snapshot_read) { + int r = le_iterate_val( + le, + toku_txn_reads_txnid, + val, + vallen, + ttxn + ); + lazy_assert_zero(r); + } else { + *val = le_latest_val_and_len(le, vallen); + } +} + // This is an on-disk format. static_asserts verify everything is packed and aligned correctly. struct __attribute__ ((__packed__)) leafentry_13 { struct leafentry_committed_13 { @@ -2467,6 +2500,7 @@ toku_le_upgrade_13_14(LEAFENTRY_13 old_leafentry, nullptr, //only matters if we are passing in a bn_data 0, //only matters if we are passing in a bn_data 0, //only matters if we are passing in a bn_data + 0, //only matters if we are passing in a bn_data new_leafentry_p, nullptr //only matters if we are passing in a bn_data ); diff --git a/storage/tokudb/ft-index/ft/ule.h b/storage/tokudb/ft-index/ft/ule.h index 0dd34212ff1..337abf25a5f 100644 --- a/storage/tokudb/ft-index/ft/ule.h +++ b/storage/tokudb/ft-index/ft/ule.h @@ -6,9 +6,6 @@ * requirements of the nested transaction logic belongs here. */ -#ifndef TOKU_ULE_H -#define TOKU_ULE_H - #ident "$Id$" /* COPYING CONDITIONS NOTICE: @@ -38,7 +35,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -95,11 +92,13 @@ PATENT RIGHTS GRANT: under this License. */ +#pragma once + #ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved." #ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it." #include "leafentry.h" -#include "txn_manager.h" +#include "txn/txn_manager.h" #include <util/mempool.h> void toku_ule_status_init(void); @@ -130,5 +129,3 @@ TXNID uxr_get_txnid(UXRHANDLE uxr); //1 does much slower debugging #define GARBAGE_COLLECTION_DEBUG 0 - -#endif // TOKU_ULE_H diff --git a/storage/tokudb/ft-index/ft/valgrind.suppressions b/storage/tokudb/ft-index/ft/valgrind.suppressions index b1ee1662079..d8b9b09bd1f 100644 --- a/storage/tokudb/ft-index/ft/valgrind.suppressions +++ b/storage/tokudb/ft-index/ft/valgrind.suppressions @@ -281,3 +281,16 @@ fun:_dl_start obj:/lib/x86_64-linux-gnu/ld-2.17.so } +{ + <ld_is_not_clean_on_arch_linux_june_2014> + Memcheck:Leak + match-leak-kinds: reachable + fun:calloc + obj:/usr/lib/libdl-2.19.so + fun:dlsym + fun:_Z19toku_memory_startupv + fun:call_init.part.0 + fun:_dl_init + obj:/usr/lib/ld-2.19.so +} + diff --git a/storage/tokudb/ft-index/locktree/concurrent_tree.cc b/storage/tokudb/ft-index/locktree/concurrent_tree.cc index 37fa8eee0cb..b472be23111 100644 --- a/storage/tokudb/ft-index/locktree/concurrent_tree.cc +++ b/storage/tokudb/ft-index/locktree/concurrent_tree.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -91,7 +91,7 @@ PATENT RIGHTS GRANT: #include <toku_assert.h> -void concurrent_tree::create(comparator *cmp) { +void concurrent_tree::create(const comparator *cmp) { // start with an empty root node. we do this instead of // setting m_root to null so there's always a root to lock m_root.create_root(cmp); diff --git a/storage/tokudb/ft-index/locktree/concurrent_tree.h b/storage/tokudb/ft-index/locktree/concurrent_tree.h index 740a5f1311c..82977bbf5f1 100644 --- a/storage/tokudb/ft-index/locktree/concurrent_tree.h +++ b/storage/tokudb/ft-index/locktree/concurrent_tree.h @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -86,12 +86,11 @@ PATENT RIGHTS GRANT: under this License. */ +#pragma once + #ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved." #ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it." -#ifndef CONCURRENT_TREE_H -#define CONCURRENT_TREE_H - #include <ft/comparator.h> #include "treenode.h" @@ -174,7 +173,7 @@ public: }; // effect: initialize the tree to an empty state - void create(comparator *cmp); + void create(const comparator *cmp); // effect: destroy the tree. // requires: tree is empty @@ -203,5 +202,3 @@ private: #include "concurrent_tree.cc" } /* namespace toku */ - -#endif /* CONCURRENT_TREE_H */ diff --git a/storage/tokudb/ft-index/locktree/keyrange.cc b/storage/tokudb/ft-index/locktree/keyrange.cc index 0bf9790196c..c7cb19a597f 100644 --- a/storage/tokudb/ft-index/locktree/keyrange.cc +++ b/storage/tokudb/ft-index/locktree/keyrange.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -91,165 +91,165 @@ PATENT RIGHTS GRANT: #include "keyrange.h" -#include <ft/ybt.h> +#include <util/dbt.h> namespace toku { -// create a keyrange by borrowing the left and right dbt -// pointers. no memory is copied. no checks for infinity needed. -void keyrange::create(const DBT *left, const DBT *right) { - init_empty(); - m_left_key = left; - m_right_key = right; -} - -// destroy the key copies. if they were never set, then destroy does nothing. -void keyrange::destroy(void) { - toku_destroy_dbt(&m_left_key_copy); - toku_destroy_dbt(&m_right_key_copy); -} - -// create a keyrange by copying the keys from the given range. -void keyrange::create_copy(const keyrange &range) { - // start with an initialized, empty range - init_empty(); - - // optimize the case where the left and right keys are the same. - // we'd like to only have one copy of the data. - if (toku_dbt_equals(range.get_left_key(), range.get_right_key())) { - set_both_keys(range.get_left_key()); - } else { - // replace our empty left and right keys with - // copies of the range's left and right keys - replace_left_key(range.get_left_key()); - replace_right_key(range.get_right_key()); + // create a keyrange by borrowing the left and right dbt + // pointers. no memory is copied. no checks for infinity needed. + void keyrange::create(const DBT *left, const DBT *right) { + init_empty(); + m_left_key = left; + m_right_key = right; } -} - -// extend this keyrange by choosing the leftmost and rightmost -// endpoints between this range and the given. replaced keys -// in this range are freed and inherited keys are copied. -void keyrange::extend(comparator *cmp, const keyrange &range) { - const DBT *range_left = range.get_left_key(); - const DBT *range_right = range.get_right_key(); - if (cmp->compare(range_left, get_left_key()) < 0) { - replace_left_key(range_left); + + // destroy the key copies. if they were never set, then destroy does nothing. + void keyrange::destroy(void) { + toku_destroy_dbt(&m_left_key_copy); + toku_destroy_dbt(&m_right_key_copy); } - if (cmp->compare(range_right, get_right_key()) > 0) { - replace_right_key(range_right); + + // create a keyrange by copying the keys from the given range. + void keyrange::create_copy(const keyrange &range) { + // start with an initialized, empty range + init_empty(); + + // optimize the case where the left and right keys are the same. + // we'd like to only have one copy of the data. + if (toku_dbt_equals(range.get_left_key(), range.get_right_key())) { + set_both_keys(range.get_left_key()); + } else { + // replace our empty left and right keys with + // copies of the range's left and right keys + replace_left_key(range.get_left_key()); + replace_right_key(range.get_right_key()); + } } -} - -// how much memory does this keyrange take? -// - the size of the left and right keys -// --- ignore the fact that we may have optimized the point case. -// it complicates things for little gain. -// - the size of the keyrange class itself -uint64_t keyrange::get_memory_size(void) const { - const DBT *left_key = get_left_key(); - const DBT *right_key = get_right_key(); - return left_key->size + right_key->size + sizeof(keyrange); -} - -// compare ranges. -keyrange::comparison keyrange::compare(comparator *cmp, const keyrange &range) const { - if (cmp->compare(get_right_key(), range.get_left_key()) < 0) { - return comparison::LESS_THAN; - } else if (cmp->compare(get_left_key(), range.get_right_key()) > 0) { - return comparison::GREATER_THAN; - } else if (cmp->compare(get_left_key(), range.get_left_key()) == 0 && - cmp->compare(get_right_key(), range.get_right_key()) == 0) { - return comparison::EQUALS; - } else { - return comparison::OVERLAPS; + + // extend this keyrange by choosing the leftmost and rightmost + // endpoints between this range and the given. replaced keys + // in this range are freed and inherited keys are copied. + void keyrange::extend(const comparator &cmp, const keyrange &range) { + const DBT *range_left = range.get_left_key(); + const DBT *range_right = range.get_right_key(); + if (cmp(range_left, get_left_key()) < 0) { + replace_left_key(range_left); + } + if (cmp(range_right, get_right_key()) > 0) { + replace_right_key(range_right); + } } -} - -bool keyrange::overlaps(comparator *cmp, const keyrange &range) const { - // equality is a stronger form of overlapping. - // so two ranges "overlap" if they're either equal or just overlapping. - comparison c = compare(cmp, range); - return c == comparison::EQUALS || c == comparison::OVERLAPS; -} - -keyrange keyrange::get_infinite_range(void) { - keyrange range; - range.create(toku_dbt_negative_infinity(), toku_dbt_positive_infinity()); - return range; -} - -void keyrange::init_empty(void) { - m_left_key = nullptr; - m_right_key = nullptr; - toku_init_dbt(&m_left_key_copy); - toku_init_dbt(&m_right_key_copy); - m_point_range = false; -} - -const DBT *keyrange::get_left_key(void) const { - if (m_left_key) { - return m_left_key; - } else { - return &m_left_key_copy; + + // how much memory does this keyrange take? + // - the size of the left and right keys + // --- ignore the fact that we may have optimized the point case. + // it complicates things for little gain. + // - the size of the keyrange class itself + uint64_t keyrange::get_memory_size(void) const { + const DBT *left_key = get_left_key(); + const DBT *right_key = get_right_key(); + return left_key->size + right_key->size + sizeof(keyrange); } -} -const DBT *keyrange::get_right_key(void) const { - if (m_right_key) { - return m_right_key; - } else { - return &m_right_key_copy; + // compare ranges. + keyrange::comparison keyrange::compare(const comparator &cmp, const keyrange &range) const { + if (cmp(get_right_key(), range.get_left_key()) < 0) { + return comparison::LESS_THAN; + } else if (cmp(get_left_key(), range.get_right_key()) > 0) { + return comparison::GREATER_THAN; + } else if (cmp(get_left_key(), range.get_left_key()) == 0 && + cmp(get_right_key(), range.get_right_key()) == 0) { + return comparison::EQUALS; + } else { + return comparison::OVERLAPS; + } } -} - -// copy the given once and set both the left and right pointers. -// optimization for point ranges, so the left and right ranges -// are not copied twice. -void keyrange::set_both_keys(const DBT *key) { - if (toku_dbt_is_infinite(key)) { - m_left_key = key; - m_right_key = key; - } else { - toku_clone_dbt(&m_left_key_copy, *key); - toku_copyref_dbt(&m_right_key_copy, m_left_key_copy); + + bool keyrange::overlaps(const comparator &cmp, const keyrange &range) const { + // equality is a stronger form of overlapping. + // so two ranges "overlap" if they're either equal or just overlapping. + comparison c = compare(cmp, range); + return c == comparison::EQUALS || c == comparison::OVERLAPS; } - m_point_range = true; -} - -// destroy the current left key. set and possibly copy the new one -void keyrange::replace_left_key(const DBT *key) { - // a little magic: - // - // if this is a point range, then the left and right keys share - // one copy of the data, and it lives in the left key copy. so - // if we're replacing the left key, move the real data to the - // right key copy instead of destroying it. now, the memory is - // owned by the right key and the left key may be replaced. - if (m_point_range) { - m_right_key_copy = m_left_key_copy; - } else { - toku_destroy_dbt(&m_left_key_copy); + + keyrange keyrange::get_infinite_range(void) { + keyrange range; + range.create(toku_dbt_negative_infinity(), toku_dbt_positive_infinity()); + return range; } - if (toku_dbt_is_infinite(key)) { - m_left_key = key; - } else { - toku_clone_dbt(&m_left_key_copy, *key); + void keyrange::init_empty(void) { m_left_key = nullptr; - } - m_point_range = false; -} - -// destroy the current right key. set and possibly copy the new one -void keyrange::replace_right_key(const DBT *key) { - toku_destroy_dbt(&m_right_key_copy); - if (toku_dbt_is_infinite(key)) { - m_right_key = key; - } else { - toku_clone_dbt(&m_right_key_copy, *key); m_right_key = nullptr; + toku_init_dbt(&m_left_key_copy); + toku_init_dbt(&m_right_key_copy); + m_point_range = false; + } + + const DBT *keyrange::get_left_key(void) const { + if (m_left_key) { + return m_left_key; + } else { + return &m_left_key_copy; + } + } + + const DBT *keyrange::get_right_key(void) const { + if (m_right_key) { + return m_right_key; + } else { + return &m_right_key_copy; + } + } + + // copy the given once and set both the left and right pointers. + // optimization for point ranges, so the left and right ranges + // are not copied twice. + void keyrange::set_both_keys(const DBT *key) { + if (toku_dbt_is_infinite(key)) { + m_left_key = key; + m_right_key = key; + } else { + toku_clone_dbt(&m_left_key_copy, *key); + toku_copyref_dbt(&m_right_key_copy, m_left_key_copy); + } + m_point_range = true; + } + + // destroy the current left key. set and possibly copy the new one + void keyrange::replace_left_key(const DBT *key) { + // a little magic: + // + // if this is a point range, then the left and right keys share + // one copy of the data, and it lives in the left key copy. so + // if we're replacing the left key, move the real data to the + // right key copy instead of destroying it. now, the memory is + // owned by the right key and the left key may be replaced. + if (m_point_range) { + m_right_key_copy = m_left_key_copy; + } else { + toku_destroy_dbt(&m_left_key_copy); + } + + if (toku_dbt_is_infinite(key)) { + m_left_key = key; + } else { + toku_clone_dbt(&m_left_key_copy, *key); + m_left_key = nullptr; + } + m_point_range = false; + } + + // destroy the current right key. set and possibly copy the new one + void keyrange::replace_right_key(const DBT *key) { + toku_destroy_dbt(&m_right_key_copy); + if (toku_dbt_is_infinite(key)) { + m_right_key = key; + } else { + toku_clone_dbt(&m_right_key_copy, *key); + m_right_key = nullptr; + } + m_point_range = false; } - m_point_range = false; -} } /* namespace toku */ diff --git a/storage/tokudb/ft-index/locktree/keyrange.h b/storage/tokudb/ft-index/locktree/keyrange.h index cab5866a5da..8b8e1a743e2 100644 --- a/storage/tokudb/ft-index/locktree/keyrange.h +++ b/storage/tokudb/ft-index/locktree/keyrange.h @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -86,12 +86,11 @@ PATENT RIGHTS GRANT: under this License. */ +#pragma once + #ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved." #ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it." -#ifndef KEYRANGE_H -#define KEYRANGE_H - #include <ft/comparator.h> namespace toku { @@ -118,7 +117,7 @@ public: // effect: extends the keyrange by choosing the leftmost and rightmost // endpoints from this range and the given range. // replaced keys in this range are freed, new keys are copied. - void extend(comparator *cmp, const keyrange &range); + void extend(const comparator &cmp, const keyrange &range); // returns: the amount of memory this keyrange takes. does not account // for point optimizations or malloc overhead. @@ -144,10 +143,10 @@ public: // EQUALS if given range has the same left and right endpoints // OVERLAPS if at least one of the given range's endpoints falls // between this range's endpoints - comparison compare(comparator *cmp, const keyrange &range) const; + comparison compare(const comparator &cmp, const keyrange &range) const; // returns: true if the range and the given range are equal or overlapping - bool overlaps(comparator *cmp, const keyrange &range) const; + bool overlaps(const comparator &cmp, const keyrange &range) const; // returns: a keyrange representing -inf, +inf static keyrange get_infinite_range(void); @@ -184,5 +183,3 @@ private: }; } /* namespace toku */ - -#endif /* KEYRANGE_H */ diff --git a/storage/tokudb/ft-index/locktree/lock_request.cc b/storage/tokudb/ft-index/locktree/lock_request.cc index 2f43e8960db..97fa780bb04 100644 --- a/storage/tokudb/ft-index/locktree/lock_request.cc +++ b/storage/tokudb/ft-index/locktree/lock_request.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -89,12 +89,12 @@ PATENT RIGHTS GRANT: #ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved." #ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it." -#include <toku_race_tools.h> +#include "portability/toku_race_tools.h" -#include <ft/ybt.h> - -#include "locktree.h" -#include "lock_request.h" +#include "ft/txn/txn.h" +#include "locktree/locktree.h" +#include "locktree/lock_request.h" +#include "util/dbt.h" namespace toku { diff --git a/storage/tokudb/ft-index/locktree/lock_request.h b/storage/tokudb/ft-index/locktree/lock_request.h index 0916a6529e0..d1a4c2822e0 100644 --- a/storage/tokudb/ft-index/locktree/lock_request.h +++ b/storage/tokudb/ft-index/locktree/lock_request.h @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -86,21 +86,19 @@ PATENT RIGHTS GRANT: under this License. */ +#pragma once + #ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved." #ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it." -#ifndef TOKU_LOCK_REQUEST_H -#define TOKU_LOCK_REQUEST_H - #include <db.h> -#include <toku_pthread.h> -#include <ft/fttypes.h> -#include <ft/comparator.h> +#include "portability/toku_pthread.h" -#include "locktree.h" -#include "txnid_set.h" -#include "wfg.h" +#include "locktree/locktree.h" +#include "locktree/txnid_set.h" +#include "locktree/wfg.h" +#include "ft/comparator.h" namespace toku { @@ -243,5 +241,3 @@ private: ENSURE_POD(lock_request); } /* namespace toku */ - -#endif /* TOKU_LOCK_REQUEST_H */ diff --git a/storage/tokudb/ft-index/locktree/locktree.cc b/storage/tokudb/ft-index/locktree/locktree.cc index 2deb8c2ad78..eb9be825f48 100644 --- a/storage/tokudb/ft-index/locktree/locktree.cc +++ b/storage/tokudb/ft-index/locktree/locktree.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -116,19 +116,16 @@ namespace toku { // but does nothing based on the value of the reference count - it is // up to the user of the locktree to destroy it when it sees fit. -void locktree::create(locktree_manager *mgr, DICTIONARY_ID dict_id, - DESCRIPTOR desc, ft_compare_func cmp) { +void locktree::create(locktree_manager *mgr, DICTIONARY_ID dict_id, const comparator &cmp) { m_mgr = mgr; m_dict_id = dict_id; - // the only reason m_cmp is malloc'd here is to prevent gdb from printing - // out an entire DB struct every time you inspect a locktree. - XCALLOC(m_cmp); - m_cmp->create(cmp, desc); + m_cmp.create_from(cmp); m_reference_count = 1; m_userdata = nullptr; + XCALLOC(m_rangetree); - m_rangetree->create(m_cmp); + m_rangetree->create(&m_cmp); m_sto_txnid = TXNID_NONE; m_sto_buffer.create(); @@ -155,11 +152,10 @@ void locktree::create(locktree_manager *mgr, DICTIONARY_ID dict_id, void locktree::destroy(void) { invariant(m_reference_count == 0); + m_cmp.destroy(); m_rangetree->destroy(); - toku_free(m_cmp); toku_free(m_rangetree); m_sto_buffer.destroy(); - m_lock_request_info.pending_lock_requests.destroy(); } @@ -258,18 +254,18 @@ void locktree::sto_append(const DBT *left_key, const DBT *right_key) { keyrange range; range.create(left_key, right_key); - buffer_mem = m_sto_buffer.get_num_bytes(); + buffer_mem = m_sto_buffer.total_memory_size(); m_sto_buffer.append(left_key, right_key); - delta = m_sto_buffer.get_num_bytes() - buffer_mem; + delta = m_sto_buffer.total_memory_size() - buffer_mem; if (m_mgr != nullptr) { m_mgr->note_mem_used(delta); } } void locktree::sto_end(void) { - uint64_t num_bytes = m_sto_buffer.get_num_bytes(); + uint64_t mem_size = m_sto_buffer.total_memory_size(); if (m_mgr != nullptr) { - m_mgr->note_mem_released(num_bytes); + m_mgr->note_mem_released(mem_size); } m_sto_buffer.destroy(); m_sto_buffer.create(); @@ -299,12 +295,11 @@ void locktree::sto_migrate_buffer_ranges_to_tree(void *prepared_lkr) { concurrent_tree sto_rangetree; concurrent_tree::locked_keyrange sto_lkr; - sto_rangetree.create(m_cmp); + sto_rangetree.create(&m_cmp); // insert all of the ranges from the single txnid buffer into a new rangtree - range_buffer::iterator iter; + range_buffer::iterator iter(&m_sto_buffer); range_buffer::iterator::record rec; - iter.create(&m_sto_buffer); while (iter.current(&rec)) { sto_lkr.prepare(&sto_rangetree); int r = acquire_lock_consolidated(&sto_lkr, @@ -439,7 +434,7 @@ int locktree::try_acquire_lock(bool is_write_request, txnid_set *conflicts, bool big_txn) { // All ranges in the locktree must have left endpoints <= right endpoints. // Range comparisons rely on this fact, so we make a paranoid invariant here. - paranoid_invariant(m_cmp->compare(left_key, right_key) <= 0); + paranoid_invariant(m_cmp(left_key, right_key) <= 0); int r = m_mgr == nullptr ? 0 : m_mgr->check_current_lock_constraints(big_txn); if (r == 0) { @@ -575,15 +570,14 @@ void locktree::release_locks(TXNID txnid, const range_buffer *ranges) { // locks are already released, otherwise we need to do it here. bool released = sto_try_release(txnid); if (!released) { - range_buffer::iterator iter; + range_buffer::iterator iter(ranges); range_buffer::iterator::record rec; - iter.create(ranges); while (iter.current(&rec)) { const DBT *left_key = rec.get_left_key(); const DBT *right_key = rec.get_right_key(); // All ranges in the locktree must have left endpoints <= right endpoints. // Range comparisons rely on this fact, so we make a paranoid invariant here. - paranoid_invariant(m_cmp->compare(left_key, right_key) <= 0); + paranoid_invariant(m_cmp(left_key, right_key) <= 0); remove_overlapping_locks_for_txnid(txnid, left_key, right_key); iter.next(); } @@ -647,10 +641,10 @@ struct txnid_range_buffer { TXNID txnid; range_buffer buffer; - static int find_by_txnid(const struct txnid_range_buffer &other_buffer, const TXNID &txnid) { - if (txnid < other_buffer.txnid) { + static int find_by_txnid(struct txnid_range_buffer *const &other_buffer, const TXNID &txnid) { + if (txnid < other_buffer->txnid) { return -1; - } else if (other_buffer.txnid == txnid) { + } else if (other_buffer->txnid == txnid) { return 0; } else { return 1; @@ -666,7 +660,7 @@ struct txnid_range_buffer { // has locks in a random/alternating order, then this does // not work so well. void locktree::escalate(lt_escalate_cb after_escalate_callback, void *after_escalate_callback_extra) { - omt<struct txnid_range_buffer, struct txnid_range_buffer *> range_buffers; + omt<struct txnid_range_buffer *, struct txnid_range_buffer *> range_buffers; range_buffers.create(); // prepare and acquire a locked keyrange on the entire locktree @@ -716,7 +710,6 @@ void locktree::escalate(lt_escalate_cb after_escalate_callback, void *after_esca // Try to find a range buffer for the current txnid. Create one if it doesn't exist. // Then, append the new escalated range to the buffer. uint32_t idx; - struct txnid_range_buffer new_range_buffer; struct txnid_range_buffer *existing_range_buffer; int r = range_buffers.find_zero<TXNID, txnid_range_buffer::find_by_txnid>( current_txnid, @@ -724,9 +717,10 @@ void locktree::escalate(lt_escalate_cb after_escalate_callback, void *after_esca &idx ); if (r == DB_NOTFOUND) { - new_range_buffer.txnid = current_txnid; - new_range_buffer.buffer.create(); - new_range_buffer.buffer.append(escalated_left_key, escalated_right_key); + struct txnid_range_buffer *XMALLOC(new_range_buffer); + new_range_buffer->txnid = current_txnid; + new_range_buffer->buffer.create(); + new_range_buffer->buffer.append(escalated_left_key, escalated_right_key); range_buffers.insert_at(new_range_buffer, idx); } else { invariant_zero(r); @@ -754,9 +748,8 @@ void locktree::escalate(lt_escalate_cb after_escalate_callback, void *after_esca invariant_zero(r); const TXNID current_txnid = current_range_buffer->txnid; - range_buffer::iterator iter; + range_buffer::iterator iter(¤t_range_buffer->buffer); range_buffer::iterator::record rec; - iter.create(¤t_range_buffer->buffer); while (iter.current(&rec)) { keyrange range; range.create(rec.get_left_key(), rec.get_right_key()); @@ -771,6 +764,15 @@ void locktree::escalate(lt_escalate_cb after_escalate_callback, void *after_esca } current_range_buffer->buffer.destroy(); } + + while (range_buffers.size() > 0) { + struct txnid_range_buffer *buffer; + int r = range_buffers.fetch(0, &buffer); + invariant_zero(r); + r = range_buffers.delete_at(0); + invariant_zero(r); + toku_free(buffer); + } range_buffers.destroy(); lkr.release(); @@ -788,8 +790,8 @@ struct lt_lock_request_info *locktree::get_lock_request_info(void) { return &m_lock_request_info; } -void locktree::set_descriptor(DESCRIPTOR desc) { - m_cmp->set_descriptor(desc); +void locktree::set_comparator(const comparator &cmp) { + m_cmp.inherit(cmp); } locktree_manager *locktree::get_manager(void) const { diff --git a/storage/tokudb/ft-index/locktree/locktree.h b/storage/tokudb/ft-index/locktree/locktree.h index 2f8dcef6668..3e613aba7a4 100644 --- a/storage/tokudb/ft-index/locktree/locktree.h +++ b/storage/tokudb/ft-index/locktree/locktree.h @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -95,7 +95,7 @@ PATENT RIGHTS GRANT: #include <toku_time.h> #include <toku_pthread.h> -#include <ft/fttypes.h> +#include <ft/ft-ops.h> // just for DICTIONARY_ID.. #include <ft/comparator.h> #include <util/omt.h> @@ -137,7 +137,6 @@ namespace toku { class locktree; class locktree_manager; class lock_request; - class memory_tracker; class concurrent_tree; typedef int (*lt_create_cb)(locktree *lt, void *extra); @@ -184,10 +183,10 @@ namespace toku { // effect: Get a locktree from the manager. If a locktree exists with the given // dict_id, it is referenced and then returned. If one did not exist, it - // is created. It will use the given descriptor and comparison function - // for comparing keys, and the on_create callback passed to locktree_manager::create() - // will be called with the given extra parameter. - locktree *get_lt(DICTIONARY_ID dict_id, DESCRIPTOR desc, ft_compare_func cmp, void *on_create_extra); + // is created. It will use the comparator for comparing keys. The on_create + // callback (passed to locktree_manager::create()) will be called with the + // given extra parameter. + locktree *get_lt(DICTIONARY_ID dict_id, const comparator &cmp, void *on_create_extra); void reference_lt(locktree *lt); @@ -246,7 +245,6 @@ namespace toku { // tracks the current number of locks and lock memory uint64_t m_max_lock_memory; uint64_t m_current_lock_memory; - memory_tracker *m_mem_tracker; struct lt_counters m_lt_counters; @@ -309,8 +307,7 @@ namespace toku { // A locktree represents the set of row locks owned by all transactions // over an open dictionary. Read and write ranges are represented as - // a left and right key which are compared with the given descriptor - // and comparison fn. + // a left and right key which are compared with the given comparator // // Locktrees are not created and destroyed by the user. Instead, they are // referenced and released using the locktree manager. @@ -325,10 +322,8 @@ namespace toku { // - Destroy the manager. class locktree { public: - // effect: Creates a locktree that uses the given memory tracker - // to report memory usage and honor memory constraints. - void create(locktree_manager *mgr, DICTIONARY_ID dict_id, - DESCRIPTOR desc, ft_compare_func cmp); + // effect: Creates a locktree + void create(locktree_manager *mgr, DICTIONARY_ID dict_id, const comparator &cmp); void destroy(void); @@ -374,7 +369,7 @@ namespace toku { locktree_manager *get_manager(void) const; - void set_descriptor(DESCRIPTOR desc); + void set_comparator(const comparator &cmp); int compare(const locktree *lt) const; @@ -392,16 +387,14 @@ namespace toku { DICTIONARY_ID m_dict_id; uint32_t m_reference_count; - // use a comparator object that encapsulates an ft compare - // function and a descriptor in a fake db. this way we can - // pass it around for easy key comparisons. + // Since the memory referenced by this comparator is not owned by the + // locktree, the user must guarantee it will outlive the locktree. // - // since this comparator will store a pointer to a descriptor, - // the user of the locktree needs to make sure that the descriptor - // is valid for as long as the locktree. this is currently - // implemented by opening an ft_handle for this locktree and - // storing it as userdata below. - comparator *m_cmp; + // The ydb API accomplishes this by opening an ft_handle in the on_create + // callback, which will keep the underlying FT (and its descriptor) in memory + // for as long as the handle is open. The ft_handle is stored opaquely in the + // userdata pointer below. see locktree_manager::get_lt w/ on_create_extra + comparator m_cmp; concurrent_tree *m_rangetree; diff --git a/storage/tokudb/ft-index/locktree/manager.cc b/storage/tokudb/ft-index/locktree/manager.cc index b1bc5da2fe9..5f69c46f7da 100644 --- a/storage/tokudb/ft-index/locktree/manager.cc +++ b/storage/tokudb/ft-index/locktree/manager.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -183,8 +183,8 @@ void locktree_manager::locktree_map_remove(locktree *lt) { invariant_zero(r); } -locktree *locktree_manager::get_lt(DICTIONARY_ID dict_id, DESCRIPTOR desc, - ft_compare_func cmp, void *on_create_extra) { +locktree *locktree_manager::get_lt(DICTIONARY_ID dict_id, + const comparator &cmp, void *on_create_extra) { // hold the mutex around searching and maybe // inserting into the locktree map @@ -193,7 +193,7 @@ locktree *locktree_manager::get_lt(DICTIONARY_ID dict_id, DESCRIPTOR desc, locktree *lt = locktree_map_find(dict_id); if (lt == nullptr) { XCALLOC(lt); - lt->create(this, dict_id, desc, cmp); + lt->create(this, dict_id, cmp); // new locktree created - call the on_create callback // and put it in the locktree map @@ -483,7 +483,7 @@ void locktree_manager::locktree_escalator::run(locktree_manager *mgr, void (*esc mgr->add_escalator_wait_time(t1 - t0); } -#define STATUS_INIT(k,c,t,l,inc) TOKUDB_STATUS_INIT(status, k, c, t, "locktree: " l, inc) +#define STATUS_INIT(k,c,t,l,inc) TOKUFT_STATUS_INIT(status, k, c, t, "locktree: " l, inc) void locktree_manager::status_init(void) { STATUS_INIT(LTM_SIZE_CURRENT, LOCKTREE_MEMORY_SIZE, UINT64, "memory size", TOKU_ENGINE_STATUS|TOKU_GLOBAL_STATUS); @@ -530,33 +530,32 @@ void locktree_manager::get_status(LTM_STATUS statp) { STATUS_VALUE(LTM_LONG_WAIT_ESCALATION_COUNT) = m_long_wait_escalation_count; STATUS_VALUE(LTM_LONG_WAIT_ESCALATION_TIME) = m_long_wait_escalation_time; - mutex_lock(); - uint64_t lock_requests_pending = 0; uint64_t sto_num_eligible = 0; uint64_t sto_end_early_count = 0; tokutime_t sto_end_early_time = 0; - - struct lt_counters lt_counters = m_lt_counters; - - size_t num_locktrees = m_locktree_map.size(); - for (size_t i = 0; i < num_locktrees; i++) { - locktree *lt; - int r = m_locktree_map.fetch(i, <); - invariant_zero(r); - - toku_mutex_lock(<->m_lock_request_info.mutex); - lock_requests_pending += lt->m_lock_request_info.pending_lock_requests.size(); - lt_counters.add(lt->get_lock_request_info()->counters); - toku_mutex_unlock(<->m_lock_request_info.mutex); - - sto_num_eligible += lt->sto_txnid_is_valid_unsafe() ? 1 : 0; - sto_end_early_count += lt->m_sto_end_early_count; - sto_end_early_time += lt->m_sto_end_early_time; + size_t num_locktrees = 0; + struct lt_counters lt_counters = {}; + + if (toku_mutex_trylock(&m_mutex) == 0) { + lt_counters = m_lt_counters; + num_locktrees = m_locktree_map.size(); + for (size_t i = 0; i < num_locktrees; i++) { + locktree *lt; + int r = m_locktree_map.fetch(i, <); + invariant_zero(r); + if (toku_mutex_trylock(<->m_lock_request_info.mutex) == 0) { + lock_requests_pending += lt->m_lock_request_info.pending_lock_requests.size(); + lt_counters.add(lt->get_lock_request_info()->counters); + toku_mutex_unlock(<->m_lock_request_info.mutex); + } + sto_num_eligible += lt->sto_txnid_is_valid_unsafe() ? 1 : 0; + sto_end_early_count += lt->m_sto_end_early_count; + sto_end_early_time += lt->m_sto_end_early_time; + } + mutex_unlock(); } - mutex_unlock(); - STATUS_VALUE(LTM_NUM_LOCKTREES) = num_locktrees; STATUS_VALUE(LTM_LOCK_REQUESTS_PENDING) = lock_requests_pending; STATUS_VALUE(LTM_STO_NUM_ELIGIBLE) = sto_num_eligible; diff --git a/storage/tokudb/ft-index/locktree/range_buffer.cc b/storage/tokudb/ft-index/locktree/range_buffer.cc index 5fd86a631c9..cc7bbd90afc 100644 --- a/storage/tokudb/ft-index/locktree/range_buffer.cc +++ b/storage/tokudb/ft-index/locktree/range_buffer.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -89,215 +89,210 @@ PATENT RIGHTS GRANT: #ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved." #ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it." -#include <memory.h> #include <string.h> -#include <ft/ybt.h> -#include "range_buffer.h" +#include "portability/memory.h" + +#include "locktree/range_buffer.h" +#include "util/dbt.h" namespace toku { -bool range_buffer::record_header::left_is_infinite(void) const { - return left_neg_inf || left_pos_inf; -} - -bool range_buffer::record_header::right_is_infinite(void) const { - return right_neg_inf || right_pos_inf; -} - -void range_buffer::record_header::init(const DBT *left_key, const DBT *right_key) { - left_neg_inf = left_key == toku_dbt_negative_infinity(); - left_pos_inf = left_key == toku_dbt_positive_infinity(); - left_key_size = toku_dbt_is_infinite(left_key) ? 0 : left_key->size; - if (right_key) { - right_neg_inf = right_key == toku_dbt_negative_infinity(); - right_pos_inf = right_key == toku_dbt_positive_infinity(); - right_key_size = toku_dbt_is_infinite(right_key) ? 0 : right_key->size; - } else { - right_neg_inf = left_neg_inf; - right_pos_inf = left_pos_inf; - right_key_size = 0; + bool range_buffer::record_header::left_is_infinite(void) const { + return left_neg_inf || left_pos_inf; + } + + bool range_buffer::record_header::right_is_infinite(void) const { + return right_neg_inf || right_pos_inf; } -} - -const DBT *range_buffer::iterator::record::get_left_key(void) const { - if (m_header.left_neg_inf) { - return toku_dbt_negative_infinity(); - } else if (m_header.left_pos_inf) { - return toku_dbt_positive_infinity(); - } else { - return &m_left_key; + + void range_buffer::record_header::init(const DBT *left_key, const DBT *right_key) { + left_neg_inf = left_key == toku_dbt_negative_infinity(); + left_pos_inf = left_key == toku_dbt_positive_infinity(); + left_key_size = toku_dbt_is_infinite(left_key) ? 0 : left_key->size; + if (right_key) { + right_neg_inf = right_key == toku_dbt_negative_infinity(); + right_pos_inf = right_key == toku_dbt_positive_infinity(); + right_key_size = toku_dbt_is_infinite(right_key) ? 0 : right_key->size; + } else { + right_neg_inf = left_neg_inf; + right_pos_inf = left_pos_inf; + right_key_size = 0; + } + } + + const DBT *range_buffer::iterator::record::get_left_key(void) const { + if (_header.left_neg_inf) { + return toku_dbt_negative_infinity(); + } else if (_header.left_pos_inf) { + return toku_dbt_positive_infinity(); + } else { + return &_left_key; + } } -} - -const DBT *range_buffer::iterator::record::get_right_key(void) const { - if (m_header.right_neg_inf) { - return toku_dbt_negative_infinity(); - } else if (m_header.right_pos_inf) { - return toku_dbt_positive_infinity(); - } else { - return &m_right_key; + + const DBT *range_buffer::iterator::record::get_right_key(void) const { + if (_header.right_neg_inf) { + return toku_dbt_negative_infinity(); + } else if (_header.right_pos_inf) { + return toku_dbt_positive_infinity(); + } else { + return &_right_key; + } } -} -size_t range_buffer::iterator::record::size(void) const { - return sizeof(record_header) + m_header.left_key_size + m_header.right_key_size; -} + size_t range_buffer::iterator::record::size(void) const { + return sizeof(record_header) + _header.left_key_size + _header.right_key_size; + } -void range_buffer::iterator::record::deserialize(const char *buf) { - size_t current = 0; + void range_buffer::iterator::record::deserialize(const char *buf) { + size_t current = 0; - // deserialize the header - memcpy(&m_header, buf, sizeof(record_header)); - current += sizeof(record_header); + // deserialize the header + memcpy(&_header, buf, sizeof(record_header)); + current += sizeof(record_header); - // deserialize the left key if necessary - if (!m_header.left_is_infinite()) { - // point the left DBT's buffer into ours - toku_fill_dbt(&m_left_key, buf + current, m_header.left_key_size); - current += m_header.left_key_size; - } + // deserialize the left key if necessary + if (!_header.left_is_infinite()) { + // point the left DBT's buffer into ours + toku_fill_dbt(&_left_key, buf + current, _header.left_key_size); + current += _header.left_key_size; + } - // deserialize the right key if necessary - if (!m_header.right_is_infinite()) { - if (m_header.right_key_size == 0) { - toku_copyref_dbt(&m_right_key, m_left_key); - } else { - toku_fill_dbt(&m_right_key, buf + current, m_header.right_key_size); + // deserialize the right key if necessary + if (!_header.right_is_infinite()) { + if (_header.right_key_size == 0) { + toku_copyref_dbt(&_right_key, _left_key); + } else { + toku_fill_dbt(&_right_key, buf + current, _header.right_key_size); + } } } -} - -void range_buffer::iterator::create(const range_buffer *buffer) { - m_buffer = buffer; - m_current_offset = 0; - m_current_size = 0; -} - -bool range_buffer::iterator::current(record *rec) { - if (m_current_offset < m_buffer->m_buf_current) { - rec->deserialize(m_buffer->m_buf + m_current_offset); - m_current_size = rec->size(); - return true; - } else { - return false; + + toku::range_buffer::iterator::iterator() : + _ma_chunk_iterator(nullptr), + _current_chunk_base(nullptr), + _current_chunk_offset(0), _current_chunk_max(0), + _current_rec_size(0) { } -} - -// move the iterator to the next record in the buffer -void range_buffer::iterator::next(void) { - invariant(m_current_offset < m_buffer->m_buf_current); - invariant(m_current_size > 0); - - // the next record is m_current_size bytes forward - // now, we don't know how big the current is, set it to 0. - m_current_offset += m_current_size; - m_current_size = 0; -} - -void range_buffer::create(void) { - // allocate buffer space lazily instead of on creation. this way, - // no malloc/free is done if the transaction ends up taking no locks. - m_buf = nullptr; - m_buf_size = 0; - m_buf_current = 0; - m_num_ranges = 0; -} - -void range_buffer::append(const DBT *left_key, const DBT *right_key) { - // if the keys are equal, then only one copy is stored. - if (toku_dbt_equals(left_key, right_key)) { - append_point(left_key); - } else { - append_range(left_key, right_key); + + toku::range_buffer::iterator::iterator(const range_buffer *buffer) : + _ma_chunk_iterator(&buffer->_arena), + _current_chunk_base(nullptr), + _current_chunk_offset(0), _current_chunk_max(0), + _current_rec_size(0) { + reset_current_chunk(); } - m_num_ranges++; -} -bool range_buffer::is_empty(void) const { - return m_buf == nullptr; -} + void range_buffer::iterator::reset_current_chunk() { + _current_chunk_base = _ma_chunk_iterator.current(&_current_chunk_max); + _current_chunk_offset = 0; + } -uint64_t range_buffer::get_num_bytes(void) const { - return m_buf_current; -} + bool range_buffer::iterator::current(record *rec) { + if (_current_chunk_offset < _current_chunk_max) { + const char *buf = reinterpret_cast<const char *>(_current_chunk_base); + rec->deserialize(buf + _current_chunk_offset); + _current_rec_size = rec->size(); + return true; + } else { + return false; + } + } -int range_buffer::get_num_ranges(void) const { - return m_num_ranges; -} + // move the iterator to the next record in the buffer + void range_buffer::iterator::next(void) { + invariant(_current_chunk_offset < _current_chunk_max); + invariant(_current_rec_size > 0); + + // the next record is _current_rec_size bytes forward + _current_chunk_offset += _current_rec_size; + // now, we don't know how big the current is, set it to 0. + _current_rec_size = 0; + + if (_current_chunk_offset >= _current_chunk_max) { + // current chunk is exhausted, try moving to the next one + if (_ma_chunk_iterator.more()) { + _ma_chunk_iterator.next(); + reset_current_chunk(); + } + } + } -void range_buffer::destroy(void) { - if (m_buf) { - toku_free(m_buf); + void range_buffer::create(void) { + // allocate buffer space lazily instead of on creation. this way, + // no malloc/free is done if the transaction ends up taking no locks. + _arena.create(0); + _num_ranges = 0; } -} -void range_buffer::append_range(const DBT *left_key, const DBT *right_key) { - maybe_grow(sizeof(record_header) + left_key->size + right_key->size); + void range_buffer::append(const DBT *left_key, const DBT *right_key) { + // if the keys are equal, then only one copy is stored. + if (toku_dbt_equals(left_key, right_key)) { + invariant(left_key->size <= MAX_KEY_SIZE); + append_point(left_key); + } else { + invariant(left_key->size <= MAX_KEY_SIZE); + invariant(right_key->size <= MAX_KEY_SIZE); + append_range(left_key, right_key); + } + _num_ranges++; + } - record_header h; - h.init(left_key, right_key); + bool range_buffer::is_empty(void) const { + return total_memory_size() == 0; + } - // serialize the header - memcpy(m_buf + m_buf_current, &h, sizeof(record_header)); - m_buf_current += sizeof(record_header); + uint64_t range_buffer::total_memory_size(void) const { + return _arena.total_size_in_use(); + } - // serialize the left key if necessary - if (!h.left_is_infinite()) { - memcpy(m_buf + m_buf_current, left_key->data, left_key->size); - m_buf_current += left_key->size; + int range_buffer::get_num_ranges(void) const { + return _num_ranges; } - // serialize the right key if necessary - if (!h.right_is_infinite()) { - memcpy(m_buf + m_buf_current, right_key->data, right_key->size); - m_buf_current += right_key->size; + void range_buffer::destroy(void) { + _arena.destroy(); } -} -void range_buffer::append_point(const DBT *key) { - maybe_grow(sizeof(record_header) + key->size); + void range_buffer::append_range(const DBT *left_key, const DBT *right_key) { + size_t record_length = sizeof(record_header) + left_key->size + right_key->size; + char *buf = reinterpret_cast<char *>(_arena.malloc_from_arena(record_length)); - record_header h; - h.init(key, nullptr); + record_header h; + h.init(left_key, right_key); - // serialize the header - memcpy(m_buf + m_buf_current, &h, sizeof(record_header)); - m_buf_current += sizeof(record_header); + // serialize the header + memcpy(buf, &h, sizeof(record_header)); + buf += sizeof(record_header); - // serialize the key if necessary - if (!h.left_is_infinite()) { - memcpy(m_buf + m_buf_current, key->data, key->size); - m_buf_current += key->size; - } -} - -void range_buffer::maybe_grow(size_t size) { - static const size_t initial_size = 4096; - static const size_t aggressive_growth_threshold = 128 * 1024; - const size_t needed = m_buf_current + size; - if (m_buf_size < needed) { - if (m_buf_size == 0) { - m_buf_size = initial_size; + // serialize the left key if necessary + if (!h.left_is_infinite()) { + memcpy(buf, left_key->data, left_key->size); + buf += left_key->size; } - // aggressively grow the range buffer to the threshold, - // but only additivately increase the size after that. - while (m_buf_size < needed && m_buf_size < aggressive_growth_threshold) { - m_buf_size <<= 1; - } - while (m_buf_size < needed) { - m_buf_size += aggressive_growth_threshold; + + // serialize the right key if necessary + if (!h.right_is_infinite()) { + memcpy(buf, right_key->data, right_key->size); } - XREALLOC(m_buf, m_buf_size); } -} -size_t range_buffer::get_initial_size(size_t n) const { - size_t r = 4096; - while (r < n) { - r *= 2; + void range_buffer::append_point(const DBT *key) { + size_t record_length = sizeof(record_header) + key->size; + char *buf = reinterpret_cast<char *>(_arena.malloc_from_arena(record_length)); + + record_header h; + h.init(key, nullptr); + + // serialize the header + memcpy(buf, &h, sizeof(record_header)); + buf += sizeof(record_header); + + // serialize the key if necessary + if (!h.left_is_infinite()) { + memcpy(buf, key->data, key->size); + } } - return r; -} } /* namespace toku */ diff --git a/storage/tokudb/ft-index/locktree/range_buffer.h b/storage/tokudb/ft-index/locktree/range_buffer.h index ac019ba18ce..7b1beb90329 100644 --- a/storage/tokudb/ft-index/locktree/range_buffer.h +++ b/storage/tokudb/ft-index/locktree/range_buffer.h @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -86,136 +86,126 @@ PATENT RIGHTS GRANT: under this License. */ +#pragma once + #ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved." #ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it." -#ifndef RANGE_BUFFER_H -#define RANGE_BUFFER_H - -#include <toku_stdint.h> +#include "portability/toku_stdint.h" -#include <ft/ybt.h> +#include "util/dbt.h" +#include "util/memarena.h" namespace toku { -// a key range buffer represents a set of key ranges that can -// be stored, iterated over, and then destroyed all at once. - -class range_buffer { -// Private in spirit: We fail POD asserts when we try to store range_buffers in an omt. -// So make it all public, but don't touch. -public: -//private: - - // the key range buffer is a bunch of records in a row. - // each record has the following header, followed by the - // left key and right key data payload, if applicable. - - struct record_header { - bool left_neg_inf; - bool left_pos_inf; - bool right_pos_inf; - bool right_neg_inf; - uint32_t left_key_size; - uint32_t right_key_size; + // a key range buffer represents a set of key ranges that can + // be stored, iterated over, and then destroyed all at once. + class range_buffer { + private: - bool left_is_infinite(void) const; + // the key range buffer is a bunch of records in a row. + // each record has the following header, followed by the + // left key and right key data payload, if applicable. + // we limit keys to be 2^16, since we store lengths as 2 bytes. + static const size_t MAX_KEY_SIZE = 1 << 16; - bool right_is_infinite(void) const; + struct record_header { + bool left_neg_inf; + bool left_pos_inf; + bool right_pos_inf; + bool right_neg_inf; + uint16_t left_key_size; + uint16_t right_key_size; - void init(const DBT *left_key, const DBT *right_key); - }; - static_assert(sizeof(record_header) == 12, "record header format is off"); - -public: + bool left_is_infinite(void) const; - // the iterator abstracts reading over a buffer of variable length - // records one by one until there are no more left. + bool right_is_infinite(void) const; - class iterator { + void init(const DBT *left_key, const DBT *right_key); + }; + static_assert(sizeof(record_header) == 8, "record header format is off"); + public: - // a record represents the user-view of a serialized key range. - // it handles positive and negative infinity and the optimized - // point range case, where left and right points share memory. - - class record { + // the iterator abstracts reading over a buffer of variable length + // records one by one until there are no more left. + class iterator { public: - // get a read-only pointer to the left key of this record's range - const DBT *get_left_key(void) const; - - // get a read-only pointer to the right key of this record's range - const DBT *get_right_key(void) const; + iterator(); + iterator(const range_buffer *buffer); - // how big is this record? this tells us where the next record is - size_t size(void) const; + // a record represents the user-view of a serialized key range. + // it handles positive and negative infinity and the optimized + // point range case, where left and right points share memory. + class record { + public: + // get a read-only pointer to the left key of this record's range + const DBT *get_left_key(void) const; - // populate a record header and point our DBT's - // buffers into ours if they are not infinite. - void deserialize(const char *buf); + // get a read-only pointer to the right key of this record's range + const DBT *get_right_key(void) const; - private: - record_header m_header; - DBT m_left_key; - DBT m_right_key; - }; - - void create(const range_buffer *buffer); + // how big is this record? this tells us where the next record is + size_t size(void) const; - // populate the given record object with the current - // the memory referred to by record is valid for only - // as long as the record exists. - bool current(record *rec); + // populate a record header and point our DBT's + // buffers into ours if they are not infinite. + void deserialize(const char *buf); - // move the iterator to the next record in the buffer - void next(void); + private: + record_header _header; + DBT _left_key; + DBT _right_key; + }; - private: - // the key range buffer we are iterating over, the current - // offset in that buffer, and the size of the current record. - const range_buffer *m_buffer; - size_t m_current_offset; - size_t m_current_size; - }; + // populate the given record object with the current + // the memory referred to by record is valid for only + // as long as the record exists. + bool current(record *rec); - // allocate buffer space lazily instead of on creation. this way, - // no malloc/free is done if the transaction ends up taking no locks. - void create(void); + // move the iterator to the next record in the buffer + void next(void); - // append a left/right key range to the buffer. - // if the keys are equal, then only one copy is stored. - void append(const DBT *left_key, const DBT *right_key); + private: + void reset_current_chunk(); + + // the key range buffer we are iterating over, the current + // offset in that buffer, and the size of the current record. + memarena::chunk_iterator _ma_chunk_iterator; + const void *_current_chunk_base; + size_t _current_chunk_offset; + size_t _current_chunk_max; + size_t _current_rec_size; + }; - // is this range buffer empty? - bool is_empty(void) const; + // allocate buffer space lazily instead of on creation. this way, + // no malloc/free is done if the transaction ends up taking no locks. + void create(void); - // how many bytes are stored in this range buffer? - uint64_t get_num_bytes(void) const; + // append a left/right key range to the buffer. + // if the keys are equal, then only one copy is stored. + void append(const DBT *left_key, const DBT *right_key); - // how many ranges are stored in this range buffer? - int get_num_ranges(void) const; + // is this range buffer empty? + bool is_empty(void) const; - void destroy(void); + // how much memory is being used by this range buffer? + uint64_t total_memory_size(void) const; -//private: - char *m_buf; - size_t m_buf_size; - size_t m_buf_current; - int m_num_ranges; + // how many ranges are stored in this range buffer? + int get_num_ranges(void) const; - void append_range(const DBT *left_key, const DBT *right_key); + void destroy(void); - // append a point to the buffer. this is the space/time saving - // optimization for key ranges where left == right. - void append_point(const DBT *key); + private: + memarena _arena; + int _num_ranges; - void maybe_grow(size_t size); + void append_range(const DBT *left_key, const DBT *right_key); - // the initial size of the buffer is the next power of 2 - // greater than the first entry we insert into the buffer. - size_t get_initial_size(size_t n) const; -}; + // append a point to the buffer. this is the space/time saving + // optimization for key ranges where left == right. + void append_point(const DBT *key); + }; } /* namespace toku */ - -#endif /* RANGE_BUFFER_H */ diff --git a/storage/tokudb/ft-index/locktree/tests/concurrent_tree_create_destroy.cc b/storage/tokudb/ft-index/locktree/tests/concurrent_tree_create_destroy.cc index f6bb3987d1f..a1187d6e0cc 100644 --- a/storage/tokudb/ft-index/locktree/tests/concurrent_tree_create_destroy.cc +++ b/storage/tokudb/ft-index/locktree/tests/concurrent_tree_create_destroy.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/locktree/tests/concurrent_tree_lkr_acquire_release.cc b/storage/tokudb/ft-index/locktree/tests/concurrent_tree_lkr_acquire_release.cc index ecf683ed8f8..002df28ff9e 100644 --- a/storage/tokudb/ft-index/locktree/tests/concurrent_tree_lkr_acquire_release.cc +++ b/storage/tokudb/ft-index/locktree/tests/concurrent_tree_lkr_acquire_release.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -126,19 +126,19 @@ void concurrent_tree_unit_test::test_lkr_acquire_release(void) { // if the subtree root does not overlap then one of its children // must exist and have an overlapping range. - if (!lkr.m_subtree->m_range.overlaps(&cmp, range)) { + if (!lkr.m_subtree->m_range.overlaps(cmp, range)) { treenode *left = lkr.m_subtree->m_left_child.ptr; treenode *right = lkr.m_subtree->m_right_child.ptr; if (left != nullptr) { // left exists, so if it does not overlap then the right must - if (!left->m_range.overlaps(&cmp, range)) { + if (!left->m_range.overlaps(cmp, range)) { invariant_notnull(right); - invariant(right->m_range.overlaps(&cmp, range)); + invariant(right->m_range.overlaps(cmp, range)); } } else { // no left child, so the right must exist and be overlapping invariant_notnull(right); - invariant(right->m_range.overlaps(&cmp, range)); + invariant(right->m_range.overlaps(cmp, range)); } } @@ -160,6 +160,8 @@ void concurrent_tree_unit_test::test_lkr_acquire_release(void) { lkr.release(); tree.destroy(); } + + cmp.destroy(); } } /* namespace toku */ diff --git a/storage/tokudb/ft-index/locktree/tests/concurrent_tree_lkr_insert_remove.cc b/storage/tokudb/ft-index/locktree/tests/concurrent_tree_lkr_insert_remove.cc index ae71cda4526..a4c3f01f419 100644 --- a/storage/tokudb/ft-index/locktree/tests/concurrent_tree_lkr_insert_remove.cc +++ b/storage/tokudb/ft-index/locktree/tests/concurrent_tree_lkr_insert_remove.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -117,17 +117,17 @@ static void verify_unique_keys(void) { } static uint64_t check_for_range_and_count(concurrent_tree::locked_keyrange *lkr, - comparator *cmp, const keyrange &range, bool range_should_exist) { + const comparator &cmp, const keyrange &range, bool range_should_exist) { struct check_fn_obj { - comparator *cmp; + const comparator *cmp; uint64_t count; keyrange target_range; bool target_range_found; bool fn(const keyrange &query_range, TXNID txnid) { (void) txnid; - if (query_range.compare(cmp, target_range) == keyrange::comparison::EQUALS) { + if (query_range.compare(*cmp, target_range) == keyrange::comparison::EQUALS) { invariant(!target_range_found); target_range_found = true; } @@ -135,7 +135,7 @@ static uint64_t check_for_range_and_count(concurrent_tree::locked_keyrange *lkr, return true; } } check_fn; - check_fn.cmp = cmp; + check_fn.cmp = &cmp; check_fn.count = 0; check_fn.target_range = range; check_fn.target_range_found = false; @@ -174,14 +174,14 @@ void concurrent_tree_unit_test::test_lkr_insert_remove(void) { // insert an element. it should exist and the // count should be correct. lkr.insert(range, i); - n = check_for_range_and_count(&lkr, &cmp, range, true); + n = check_for_range_and_count(&lkr, cmp, range, true); if (i >= cap) { invariant(n == cap + 1); // remove an element previously inserted. it should // no longer exist and the count should be correct. range.create(get_ith_key_from_set(i - cap), get_ith_key_from_set(i - cap)); lkr.remove(range); - n = check_for_range_and_count(&lkr, &cmp, range, false); + n = check_for_range_and_count(&lkr, cmp, range, false); invariant(n == cap); } else { invariant(n == i + 1); @@ -193,12 +193,13 @@ void concurrent_tree_unit_test::test_lkr_insert_remove(void) { keyrange range; range.create(get_ith_key_from_set(num_keys - i - 1), get_ith_key_from_set(num_keys - i - 1)); lkr.remove(range); - n = check_for_range_and_count(&lkr, &cmp, range, false); + n = check_for_range_and_count(&lkr, cmp, range, false); invariant(n == (cap - i - 1)); } lkr.release(); tree.destroy(); + cmp.destroy(); } } /* namespace toku */ diff --git a/storage/tokudb/ft-index/locktree/tests/concurrent_tree_lkr_insert_serial_large.cc b/storage/tokudb/ft-index/locktree/tests/concurrent_tree_lkr_insert_serial_large.cc index 5f0f81dc275..1b3da34c904 100644 --- a/storage/tokudb/ft-index/locktree/tests/concurrent_tree_lkr_insert_serial_large.cc +++ b/storage/tokudb/ft-index/locktree/tests/concurrent_tree_lkr_insert_serial_large.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -136,6 +136,7 @@ void concurrent_tree_unit_test::test_lkr_insert_serial_large(void) { lkr.release(); tree.destroy(); + cmp.destroy(); } } /* namespace toku */ diff --git a/storage/tokudb/ft-index/locktree/tests/concurrent_tree_lkr_remove_all.cc b/storage/tokudb/ft-index/locktree/tests/concurrent_tree_lkr_remove_all.cc index c7d5f4d3204..9fc67dbf5ef 100644 --- a/storage/tokudb/ft-index/locktree/tests/concurrent_tree_lkr_remove_all.cc +++ b/storage/tokudb/ft-index/locktree/tests/concurrent_tree_lkr_remove_all.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -132,6 +132,8 @@ void concurrent_tree_unit_test::test_lkr_remove_all(void) { lkr.release(); tree.destroy(); } + + cmp.destroy(); } } /* namespace toku */ diff --git a/storage/tokudb/ft-index/locktree/tests/concurrent_tree_unit_test.h b/storage/tokudb/ft-index/locktree/tests/concurrent_tree_unit_test.h index bda34978e50..132dbf24cce 100644 --- a/storage/tokudb/ft-index/locktree/tests/concurrent_tree_unit_test.h +++ b/storage/tokudb/ft-index/locktree/tests/concurrent_tree_unit_test.h @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -86,6 +86,8 @@ PATENT RIGHTS GRANT: under this License. */ +#pragma once + #ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved." #ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it." diff --git a/storage/tokudb/ft-index/locktree/tests/lock_request_create_set.cc b/storage/tokudb/ft-index/locktree/tests/lock_request_create_set.cc index b309d9b6fd8..d88976add4f 100644 --- a/storage/tokudb/ft-index/locktree/tests/lock_request_create_set.cc +++ b/storage/tokudb/ft-index/locktree/tests/lock_request_create_set.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/locktree/tests/lock_request_get_set_keys.cc b/storage/tokudb/ft-index/locktree/tests/lock_request_get_set_keys.cc index 60300a138df..55bb483114b 100644 --- a/storage/tokudb/ft-index/locktree/tests/lock_request_get_set_keys.cc +++ b/storage/tokudb/ft-index/locktree/tests/lock_request_get_set_keys.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/locktree/tests/lock_request_killed.cc b/storage/tokudb/ft-index/locktree/tests/lock_request_killed.cc index 18fcd873423..3c2a6a35562 100644 --- a/storage/tokudb/ft-index/locktree/tests/lock_request_killed.cc +++ b/storage/tokudb/ft-index/locktree/tests/lock_request_killed.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -120,7 +120,7 @@ void lock_request_unit_test::test_wait_time_callback(void) { locktree lt; DICTIONARY_ID dict_id = { 1 }; - lt.create(nullptr, dict_id, nullptr, compare_dbts); + lt.create(nullptr, dict_id, dbt_comparator); TXNID txnid_a = 1001; lock_request request_a; diff --git a/storage/tokudb/ft-index/locktree/tests/lock_request_not_killed.cc b/storage/tokudb/ft-index/locktree/tests/lock_request_not_killed.cc index abee11052f4..96bd2869fcf 100644 --- a/storage/tokudb/ft-index/locktree/tests/lock_request_not_killed.cc +++ b/storage/tokudb/ft-index/locktree/tests/lock_request_not_killed.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -117,7 +117,7 @@ void lock_request_unit_test::test_wait_time_callback(void) { locktree lt; DICTIONARY_ID dict_id = { 1 }; - lt.create(nullptr, dict_id, nullptr, compare_dbts); + lt.create(nullptr, dict_id, dbt_comparator); TXNID txnid_a = 1001; lock_request request_a; diff --git a/storage/tokudb/ft-index/locktree/tests/lock_request_start_deadlock.cc b/storage/tokudb/ft-index/locktree/tests/lock_request_start_deadlock.cc index 4710e19551b..af28b06b682 100644 --- a/storage/tokudb/ft-index/locktree/tests/lock_request_start_deadlock.cc +++ b/storage/tokudb/ft-index/locktree/tests/lock_request_start_deadlock.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -102,7 +102,7 @@ void lock_request_unit_test::test_start_deadlock(void) { const uint64_t lock_wait_time = 10; DICTIONARY_ID dict_id = { 1 }; - lt.create(nullptr, dict_id, nullptr, compare_dbts); + lt.create(nullptr, dict_id, dbt_comparator); TXNID txnid_a = 1001; TXNID txnid_b = 2001; diff --git a/storage/tokudb/ft-index/locktree/tests/lock_request_start_pending.cc b/storage/tokudb/ft-index/locktree/tests/lock_request_start_pending.cc index 54d630078ac..a719da64114 100644 --- a/storage/tokudb/ft-index/locktree/tests/lock_request_start_pending.cc +++ b/storage/tokudb/ft-index/locktree/tests/lock_request_start_pending.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -101,7 +101,7 @@ void lock_request_unit_test::test_start_pending(void) { lock_request request; DICTIONARY_ID dict_id = { 1 }; - lt.create(nullptr, dict_id, nullptr, compare_dbts); + lt.create(nullptr, dict_id, dbt_comparator); TXNID txnid_a = 1001; TXNID txnid_b = 2001; diff --git a/storage/tokudb/ft-index/locktree/tests/lock_request_unit_test.h b/storage/tokudb/ft-index/locktree/tests/lock_request_unit_test.h index 3183bf2b734..8fc4a3f8df8 100644 --- a/storage/tokudb/ft-index/locktree/tests/lock_request_unit_test.h +++ b/storage/tokudb/ft-index/locktree/tests/lock_request_unit_test.h @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -86,12 +86,11 @@ PATENT RIGHTS GRANT: under this License. */ +#pragma once + #ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved." #ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it." -#ifndef TOKU_LOCK_REQUEST_UNIT_TEST_H -#define TOKU_LOCK_REQUEST_UNIT_TEST_H - #include "test.h" #include "locktree_unit_test.h" @@ -132,5 +131,3 @@ private: }; } - -#endif diff --git a/storage/tokudb/ft-index/locktree/tests/lock_request_wait_time_callback.cc b/storage/tokudb/ft-index/locktree/tests/lock_request_wait_time_callback.cc index bc67bac7465..b583e32e117 100644 --- a/storage/tokudb/ft-index/locktree/tests/lock_request_wait_time_callback.cc +++ b/storage/tokudb/ft-index/locktree/tests/lock_request_wait_time_callback.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -101,7 +101,7 @@ void lock_request_unit_test::test_wait_time_callback(void) { locktree lt; DICTIONARY_ID dict_id = { 1 }; - lt.create(nullptr, dict_id, nullptr, compare_dbts); + lt.create(nullptr, dict_id, dbt_comparator); TXNID txnid_a = 1001; lock_request request_a; diff --git a/storage/tokudb/ft-index/locktree/tests/locktree_conflicts.cc b/storage/tokudb/ft-index/locktree/tests/locktree_conflicts.cc index 3eb7bd3c3d4..716000d4753 100644 --- a/storage/tokudb/ft-index/locktree/tests/locktree_conflicts.cc +++ b/storage/tokudb/ft-index/locktree/tests/locktree_conflicts.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -108,7 +108,7 @@ void locktree_unit_test::test_conflicts(void) { locktree lt; DICTIONARY_ID dict_id = { 1 }; - lt.create(nullptr, dict_id, nullptr, compare_dbts); + lt.create(nullptr, dict_id, dbt_comparator); int r; TXNID txnid_a = 1001; diff --git a/storage/tokudb/ft-index/locktree/tests/locktree_create_destroy.cc b/storage/tokudb/ft-index/locktree/tests/locktree_create_destroy.cc index b3b1fb77629..93bdea239cc 100644 --- a/storage/tokudb/ft-index/locktree/tests/locktree_create_destroy.cc +++ b/storage/tokudb/ft-index/locktree/tests/locktree_create_destroy.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -98,7 +98,7 @@ void locktree_unit_test::test_create_destroy(void) { locktree lt; DICTIONARY_ID dict_id = { 1 }; - lt.create(nullptr, dict_id, nullptr, compare_dbts); + lt.create(nullptr, dict_id, dbt_comparator); lt_lock_request_info *info = lt.get_lock_request_info(); invariant_notnull(info); diff --git a/storage/tokudb/ft-index/locktree/tests/locktree_escalation_1big7lt_1small.cc b/storage/tokudb/ft-index/locktree/tests/locktree_escalation_1big7lt_1small.cc index 26e286eb8ca..02784f52bfa 100644 --- a/storage/tokudb/ft-index/locktree/tests/locktree_escalation_1big7lt_1small.cc +++ b/storage/tokudb/ft-index/locktree/tests/locktree_escalation_1big7lt_1small.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -247,11 +247,11 @@ int main(int argc, const char *argv[]) { locktree *big_lt[n_big]; for (int i = 0; i < n_big; i++) { dict_id = { next_dict_id }; next_dict_id++; - big_lt[i] = mgr.get_lt(dict_id, nullptr, compare_dbts, nullptr); + big_lt[i] = mgr.get_lt(dict_id, dbt_comparator, nullptr); } dict_id = { next_dict_id }; next_dict_id++; - locktree *small_lt = mgr.get_lt(dict_id, nullptr, compare_dbts, nullptr); + locktree *small_lt = mgr.get_lt(dict_id, dbt_comparator, nullptr); // create the worker threads struct big_arg big_arg = { &mgr, big_lt, n_big, 1000 }; diff --git a/storage/tokudb/ft-index/locktree/tests/locktree_escalation_2big_1lt.cc b/storage/tokudb/ft-index/locktree/tests/locktree_escalation_2big_1lt.cc index 8f6c697970e..9509224a15f 100644 --- a/storage/tokudb/ft-index/locktree/tests/locktree_escalation_2big_1lt.cc +++ b/storage/tokudb/ft-index/locktree/tests/locktree_escalation_2big_1lt.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -210,13 +210,10 @@ int main(int argc, const char *argv[]) { mgr.set_max_lock_memory(max_lock_memory); // create lock trees - DESCRIPTOR desc[n_lt]; - DICTIONARY_ID dict_id[n_lt]; locktree *lt[n_big]; for (int i = 0; i < n_lt; i++) { - desc[i] = nullptr; - dict_id[i] = { (uint64_t)i }; - lt[i] = mgr.get_lt(dict_id[i], desc[i], compare_dbts, nullptr); + DICTIONARY_ID dict_id = { .dictid = (uint64_t) i }; + lt[i] = mgr.get_lt(dict_id, dbt_comparator, nullptr); assert(lt[i]); } diff --git a/storage/tokudb/ft-index/locktree/tests/locktree_escalation_2big_2lt.cc b/storage/tokudb/ft-index/locktree/tests/locktree_escalation_2big_2lt.cc index 576208f1dcb..5e315edda78 100644 --- a/storage/tokudb/ft-index/locktree/tests/locktree_escalation_2big_2lt.cc +++ b/storage/tokudb/ft-index/locktree/tests/locktree_escalation_2big_2lt.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -210,13 +210,10 @@ int main(int argc, const char *argv[]) { mgr.set_max_lock_memory(max_lock_memory); // create lock trees - DESCRIPTOR desc[n_lt]; - DICTIONARY_ID dict_id[n_lt]; locktree *lt[n_big]; for (int i = 0; i < n_lt; i++) { - desc[i] = nullptr; - dict_id[i] = { (uint64_t)i }; - lt[i] = mgr.get_lt(dict_id[i], desc[i], compare_dbts, nullptr); + DICTIONARY_ID dict_id = { .dictid = (uint64_t)i }; + lt[i] = mgr.get_lt(dict_id, dbt_comparator, nullptr); assert(lt[i]); } diff --git a/storage/tokudb/ft-index/locktree/tests/locktree_escalation_impossible.cc b/storage/tokudb/ft-index/locktree/tests/locktree_escalation_impossible.cc index 4ee79b4f573..a7d84aaf650 100644 --- a/storage/tokudb/ft-index/locktree/tests/locktree_escalation_impossible.cc +++ b/storage/tokudb/ft-index/locktree/tests/locktree_escalation_impossible.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -167,9 +167,8 @@ int main(int argc, const char *argv[]) { const TXNID txn_b = 100; // create lock trees - DESCRIPTOR desc = nullptr; - DICTIONARY_ID dict_id = { 1 }; - locktree *lt = mgr.get_lt(dict_id, desc, compare_dbts, nullptr); + DICTIONARY_ID dict_id = { .dictid = 1 }; + locktree *lt = mgr.get_lt(dict_id, dbt_comparator, nullptr); int64_t last_i = -1; for (int64_t i = 0; ; i++) { diff --git a/storage/tokudb/ft-index/locktree/tests/locktree_escalation_stalls.cc b/storage/tokudb/ft-index/locktree/tests/locktree_escalation_stalls.cc index 4fd102e2d49..9228e627e9a 100644 --- a/storage/tokudb/ft-index/locktree/tests/locktree_escalation_stalls.cc +++ b/storage/tokudb/ft-index/locktree/tests/locktree_escalation_stalls.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -228,13 +228,11 @@ int main(int argc, const char *argv[]) { mgr.set_max_lock_memory(max_lock_memory); // create lock trees - DESCRIPTOR desc_0 = nullptr; - DICTIONARY_ID dict_id_0 = { 1 }; - locktree *lt_0 = mgr.get_lt(dict_id_0, desc_0, compare_dbts, nullptr); + DICTIONARY_ID dict_id_0 = { .dictid = 1 }; + locktree *lt_0 = mgr.get_lt(dict_id_0, dbt_comparator, nullptr); - DESCRIPTOR desc_1 = nullptr; - DICTIONARY_ID dict_id_1 = { 2 }; - locktree *lt_1 = mgr.get_lt(dict_id_1, desc_1, compare_dbts, nullptr); + DICTIONARY_ID dict_id_1 = { .dictid = 2 }; + locktree *lt_1 = mgr.get_lt(dict_id_1, dbt_comparator, nullptr); // create the worker threads struct arg big_arg = { &mgr, lt_0, 1000 }; diff --git a/storage/tokudb/ft-index/locktree/tests/locktree_infinity.cc b/storage/tokudb/ft-index/locktree/tests/locktree_infinity.cc index b4e0d0765bc..ef490b59cc2 100644 --- a/storage/tokudb/ft-index/locktree/tests/locktree_infinity.cc +++ b/storage/tokudb/ft-index/locktree/tests/locktree_infinity.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -98,7 +98,7 @@ void locktree_unit_test::test_infinity(void) { locktree lt; DICTIONARY_ID dict_id = { 1 }; - lt.create(nullptr, dict_id, nullptr, compare_dbts); + lt.create(nullptr, dict_id, dbt_comparator); int r; TXNID txnid_a = 1001; diff --git a/storage/tokudb/ft-index/locktree/tests/locktree_misc.cc b/storage/tokudb/ft-index/locktree/tests/locktree_misc.cc index 72906cca983..67d616867bc 100644 --- a/storage/tokudb/ft-index/locktree/tests/locktree_misc.cc +++ b/storage/tokudb/ft-index/locktree/tests/locktree_misc.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -109,7 +109,9 @@ static int my_compare_dbts(DB *db, const DBT *a, const DBT *b) { void locktree_unit_test::test_misc(void) { locktree lt; DICTIONARY_ID dict_id = { 1 }; - lt.create(nullptr, dict_id, nullptr, my_compare_dbts); + toku::comparator my_dbt_comparator; + my_dbt_comparator.create(my_compare_dbts, nullptr); + lt.create(nullptr, dict_id, my_dbt_comparator); invariant(lt.get_userdata() == nullptr); int userdata; @@ -124,19 +126,27 @@ void locktree_unit_test::test_misc(void) { expected_a = &dbt_a; expected_b = &dbt_b; + toku::comparator cmp_d1, cmp_d2; + cmp_d1.create(my_compare_dbts, &d1); + cmp_d2.create(my_compare_dbts, &d2); + // make sure the comparator object has the correct // descriptor when we set the locktree's descriptor - lt.set_descriptor(&d1); + lt.set_comparator(cmp_d1); expected_descriptor = &d1; - r = lt.m_cmp->compare(&dbt_a, &dbt_b); + r = lt.m_cmp(&dbt_a, &dbt_b); invariant(r == expected_comparison_magic); - lt.set_descriptor(&d2); + lt.set_comparator(cmp_d2); expected_descriptor = &d2; - r = lt.m_cmp->compare(&dbt_a, &dbt_b); + r = lt.m_cmp(&dbt_a, &dbt_b); invariant(r == expected_comparison_magic); lt.release_reference(); lt.destroy(); + + cmp_d1.destroy(); + cmp_d2.destroy(); + my_dbt_comparator.destroy(); } } /* namespace toku */ diff --git a/storage/tokudb/ft-index/locktree/tests/locktree_overlapping_relock.cc b/storage/tokudb/ft-index/locktree/tests/locktree_overlapping_relock.cc index 6b412f214b4..4cf950e7037 100644 --- a/storage/tokudb/ft-index/locktree/tests/locktree_overlapping_relock.cc +++ b/storage/tokudb/ft-index/locktree/tests/locktree_overlapping_relock.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -101,7 +101,7 @@ void locktree_unit_test::test_overlapping_relock(void) { locktree lt; DICTIONARY_ID dict_id = { 1 }; - lt.create(nullptr, dict_id, nullptr, compare_dbts); + lt.create(nullptr, dict_id, dbt_comparator); const DBT *zero = get_dbt(0); const DBT *one = get_dbt(1); @@ -143,7 +143,7 @@ void locktree_unit_test::test_overlapping_relock(void) { bool saw_the_other; TXNID expected_txnid; keyrange *expected_range; - comparator *cmp; + const comparator *cmp; bool fn(const keyrange &range, TXNID txnid) { if (txnid == the_other_txnid) { invariant(!saw_the_other); @@ -151,12 +151,12 @@ void locktree_unit_test::test_overlapping_relock(void) { return true; } invariant(txnid == expected_txnid); - keyrange::comparison c = range.compare(cmp, *expected_range); + keyrange::comparison c = range.compare(*cmp, *expected_range); invariant(c == keyrange::comparison::EQUALS); return true; } } verify_fn; - verify_fn.cmp = lt.m_cmp; + verify_fn.cmp = <.m_cmp; #define do_verify() \ do { verify_fn.saw_the_other = false; locktree_iterate<verify_fn_obj>(<, &verify_fn); } while (0) diff --git a/storage/tokudb/ft-index/locktree/tests/locktree_simple_lock.cc b/storage/tokudb/ft-index/locktree/tests/locktree_simple_lock.cc index 2a4de0f7b77..c4ebb45537d 100644 --- a/storage/tokudb/ft-index/locktree/tests/locktree_simple_lock.cc +++ b/storage/tokudb/ft-index/locktree/tests/locktree_simple_lock.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -98,8 +98,8 @@ void locktree_unit_test::test_simple_lock(void) { locktree_manager mgr; mgr.create(nullptr, nullptr, nullptr, nullptr); - DICTIONARY_ID dict_id = { 1 }; - locktree *lt = mgr.get_lt(dict_id, nullptr, compare_dbts, nullptr); + DICTIONARY_ID dict_id = { .dictid = 1 }; + locktree *lt = mgr.get_lt(dict_id, dbt_comparator, nullptr); int r; TXNID txnid_a = 1001; diff --git a/storage/tokudb/ft-index/locktree/tests/locktree_single_txnid_optimization.cc b/storage/tokudb/ft-index/locktree/tests/locktree_single_txnid_optimization.cc index 9da0eff51ce..17ebc3c86f9 100644 --- a/storage/tokudb/ft-index/locktree/tests/locktree_single_txnid_optimization.cc +++ b/storage/tokudb/ft-index/locktree/tests/locktree_single_txnid_optimization.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -101,7 +101,7 @@ void locktree_unit_test::test_single_txnid_optimization(void) { locktree lt; DICTIONARY_ID dict_id = { 1 }; - lt.create(nullptr, dict_id, nullptr, compare_dbts); + lt.create(nullptr, dict_id, dbt_comparator); const DBT *zero = get_dbt(0); const DBT *one = get_dbt(1); @@ -149,15 +149,15 @@ void locktree_unit_test::test_single_txnid_optimization(void) { struct verify_fn_obj { TXNID expected_txnid; keyrange *expected_range; - comparator *cmp; + const comparator *cmp; bool fn(const keyrange &range, TXNID txnid) { invariant(txnid == expected_txnid); - keyrange::comparison c = range.compare(cmp, *expected_range); + keyrange::comparison c = range.compare(*cmp, *expected_range); invariant(c == keyrange::comparison::EQUALS); return true; } } verify_fn; - verify_fn.cmp = lt.m_cmp; + verify_fn.cmp = <.m_cmp; keyrange range; range.create(one, one); diff --git a/storage/tokudb/ft-index/locktree/tests/locktree_unit_test.h b/storage/tokudb/ft-index/locktree/tests/locktree_unit_test.h index b074cc837ba..34dbc3a7e59 100644 --- a/storage/tokudb/ft-index/locktree/tests/locktree_unit_test.h +++ b/storage/tokudb/ft-index/locktree/tests/locktree_unit_test.h @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -86,12 +86,11 @@ PATENT RIGHTS GRANT: under this License. */ +#pragma once + #ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved." #ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it." -#ifndef TOKU_LOCKTREE_UNIT_TEST_H -#define TOKU_LOCKTREE_UNIT_TEST_H - #include "test.h" #include "locktree.h" @@ -157,5 +156,3 @@ private: }; } /* namespace toku */ - -#endif /* TOKU_LOCKTREE_UNIT_TEST_H */ diff --git a/storage/tokudb/ft-index/locktree/tests/manager_create_destroy.cc b/storage/tokudb/ft-index/locktree/tests/manager_create_destroy.cc index 5b4eef82b30..07c00c5d7b7 100644 --- a/storage/tokudb/ft-index/locktree/tests/manager_create_destroy.cc +++ b/storage/tokudb/ft-index/locktree/tests/manager_create_destroy.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/locktree/tests/manager_locktree_map.cc b/storage/tokudb/ft-index/locktree/tests/manager_locktree_map.cc index bd35ba93fc9..82cf1dc9f5a 100644 --- a/storage/tokudb/ft-index/locktree/tests/manager_locktree_map.cc +++ b/storage/tokudb/ft-index/locktree/tests/manager_locktree_map.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/locktree/tests/manager_params.cc b/storage/tokudb/ft-index/locktree/tests/manager_params.cc index 1fbaf2cf330..7376d91a064 100644 --- a/storage/tokudb/ft-index/locktree/tests/manager_params.cc +++ b/storage/tokudb/ft-index/locktree/tests/manager_params.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/locktree/tests/manager_reference_release_lt.cc b/storage/tokudb/ft-index/locktree/tests/manager_reference_release_lt.cc index 65a2ee478e8..c2fdee49ffe 100644 --- a/storage/tokudb/ft-index/locktree/tests/manager_reference_release_lt.cc +++ b/storage/tokudb/ft-index/locktree/tests/manager_reference_release_lt.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -107,9 +107,15 @@ static void destroy_cb(locktree *lt) { (*k) = false; } +static int my_cmp(DB *UU(db), const DBT *UU(a), const DBT *UU(b)) { + return 0; +} + void manager_unit_test::test_reference_release_lt(void) { locktree_manager mgr; mgr.create(create_cb, destroy_cb, nullptr, nullptr); + toku::comparator my_comparator; + my_comparator.create(my_cmp, nullptr); DICTIONARY_ID a = { 0 }; DICTIONARY_ID b = { 1 }; @@ -117,18 +123,12 @@ void manager_unit_test::test_reference_release_lt(void) { bool aok = false; bool bok = false; bool cok = false; - - int d = 5; - DESCRIPTOR_S desc_s; - desc_s.dbt.data = &d; - desc_s.dbt.size = desc_s.dbt.ulen = sizeof(d); - desc_s.dbt.flags = DB_DBT_USERMEM; - locktree *alt = mgr.get_lt(a, &desc_s, nullptr, &aok); + locktree *alt = mgr.get_lt(a, my_comparator, &aok); invariant_notnull(alt); - locktree *blt = mgr.get_lt(b, &desc_s, nullptr, &bok); + locktree *blt = mgr.get_lt(b, my_comparator, &bok); invariant_notnull(alt); - locktree *clt = mgr.get_lt(c, &desc_s, nullptr, &cok); + locktree *clt = mgr.get_lt(c, my_comparator, &cok); invariant_notnull(alt); // three distinct locktrees should have been returned @@ -152,9 +152,9 @@ void manager_unit_test::test_reference_release_lt(void) { // get another handle on a and b, they shoudl be the same // as the original alt and blt - locktree *blt2 = mgr.get_lt(b, &desc_s, nullptr, &bok); + locktree *blt2 = mgr.get_lt(b, my_comparator, &bok); invariant(blt2 == blt); - locktree *alt2 = mgr.get_lt(a, &desc_s, nullptr, &aok); + locktree *alt2 = mgr.get_lt(a, my_comparator, &aok); invariant(alt2 == alt); // remove one ref from everything. c should die. a and b are ok. @@ -171,6 +171,7 @@ void manager_unit_test::test_reference_release_lt(void) { invariant(!aok); invariant(!bok); + my_comparator.destroy(); mgr.destroy(); } diff --git a/storage/tokudb/ft-index/locktree/tests/manager_status.cc b/storage/tokudb/ft-index/locktree/tests/manager_status.cc index e73814d8169..b2f1560736a 100644 --- a/storage/tokudb/ft-index/locktree/tests/manager_status.cc +++ b/storage/tokudb/ft-index/locktree/tests/manager_status.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -120,9 +120,8 @@ void manager_unit_test::test_status(void) { assert_status(&status, "LTM_WAIT_COUNT", 0); assert_status(&status, "LTM_TIMEOUT_COUNT", 0); - DESCRIPTOR desc = nullptr; - DICTIONARY_ID dict_id = { 1 }; - locktree *lt = mgr.get_lt(dict_id, desc, compare_dbts, nullptr); + DICTIONARY_ID dict_id = { .dictid = 1 }; + locktree *lt = mgr.get_lt(dict_id, dbt_comparator, nullptr); int r; TXNID txnid_a = 1001; TXNID txnid_b = 2001; diff --git a/storage/tokudb/ft-index/locktree/tests/manager_unit_test.h b/storage/tokudb/ft-index/locktree/tests/manager_unit_test.h index ba38b97989e..cec640e0c2c 100644 --- a/storage/tokudb/ft-index/locktree/tests/manager_unit_test.h +++ b/storage/tokudb/ft-index/locktree/tests/manager_unit_test.h @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -86,12 +86,11 @@ PATENT RIGHTS GRANT: under this License. */ +#pragma once + #ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved." #ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it." -#ifndef TOKU_MANAGER_TEST_H -#define TOKU_MANAGER_TEST_H - #include <toku_assert.h> #include <locktree/locktree.h> @@ -111,5 +110,3 @@ public: }; } /* namespace toku */ - -#endif /* TOKU_MANAGER_TEST_H */ diff --git a/storage/tokudb/ft-index/locktree/tests/range_buffer_test.cc b/storage/tokudb/ft-index/locktree/tests/range_buffer_test.cc index 38ed2469b69..61d14888229 100644 --- a/storage/tokudb/ft-index/locktree/tests/range_buffer_test.cc +++ b/storage/tokudb/ft-index/locktree/tests/range_buffer_test.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -121,9 +121,8 @@ static void test_points(void) { } size_t i = 0; - range_buffer::iterator iter; + range_buffer::iterator iter(&buffer); range_buffer::iterator::record rec; - iter.create(&buffer); while (iter.current(&rec)) { const DBT *expected_point = get_dbt_by_iteration(i); invariant(compare_dbts(nullptr, expected_point, rec.get_left_key()) == 0); @@ -151,9 +150,8 @@ static void test_ranges(void) { } size_t i = 0; - range_buffer::iterator iter; + range_buffer::iterator iter(&buffer); range_buffer::iterator::record rec; - iter.create(&buffer); while (iter.current(&rec)) { const DBT *expected_left = get_dbt_by_iteration(i); const DBT *expected_right = get_dbt_by_iteration(i + 1); @@ -187,9 +185,8 @@ static void test_mixed(void) { } size_t i = 0; - range_buffer::iterator iter; + range_buffer::iterator iter(&buffer); range_buffer::iterator::record rec; - iter.create(&buffer); while (iter.current(&rec)) { const DBT *expected_left = get_dbt_by_iteration(i); const DBT *expected_right = get_dbt_by_iteration(i + 1); @@ -232,10 +229,10 @@ static void test_small_and_large_points(void) { // Append a small dbt, the buf should be able to fit it. buffer.append(&small_dbt, &small_dbt); - invariant(buffer.m_buf_size >= small_dbt.size); + invariant(buffer.total_memory_size() >= small_dbt.size); // Append a large dbt, the buf should be able to fit it. buffer.append(&large_dbt, &large_dbt); - invariant(buffer.m_buf_size >= (small_dbt.size + large_dbt.size)); + invariant(buffer.total_memory_size() >= (small_dbt.size + large_dbt.size)); toku_free(small_buf); toku_free(large_buf); diff --git a/storage/tokudb/ft-index/locktree/tests/test.h b/storage/tokudb/ft-index/locktree/tests/test.h index cf9a805543c..904d0d03415 100644 --- a/storage/tokudb/ft-index/locktree/tests/test.h +++ b/storage/tokudb/ft-index/locktree/tests/test.h @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -86,79 +86,90 @@ PATENT RIGHTS GRANT: under this License. */ +#pragma once + #ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved." #ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it." -#ifndef TOKU_TEST_H -#define TOKU_TEST_H - -#include <ft/ybt.h> #include <limits.h> +#include "ft/comparator.h" +#include "util/dbt.h" + namespace toku { -__attribute__((__unused__)) -static DBT min_dbt(void) { - static int64_t min = INT_MIN; - DBT dbt; - toku_fill_dbt(&dbt, &min, sizeof(int64_t)); - dbt.flags = DB_DBT_USERMEM; - return dbt; -} - -__attribute__((__unused__)) -static DBT max_dbt(void) { - static int64_t max = INT_MAX; - DBT dbt; - toku_fill_dbt(&dbt, &max, sizeof(int64_t)); - dbt.flags = DB_DBT_USERMEM; - return dbt; -} - -__attribute__((__unused__)) -static const DBT *get_dbt(int64_t key) { - static const int NUM_DBTS = 1000; - static bool initialized; - static int64_t static_ints[NUM_DBTS]; - static DBT static_dbts[NUM_DBTS]; - invariant(key < NUM_DBTS); - if (!initialized) { - for (int i = 0; i < NUM_DBTS; i++) { - static_ints[i] = i; - toku_fill_dbt(&static_dbts[i], - &static_ints[i], - sizeof(int64_t)); - static_dbts[i].flags = DB_DBT_USERMEM; + __attribute__((__unused__)) + static DBT min_dbt(void) { + static int64_t min = INT_MIN; + DBT dbt; + toku_fill_dbt(&dbt, &min, sizeof(int64_t)); + dbt.flags = DB_DBT_USERMEM; + return dbt; + } + + __attribute__((__unused__)) + static DBT max_dbt(void) { + static int64_t max = INT_MAX; + DBT dbt; + toku_fill_dbt(&dbt, &max, sizeof(int64_t)); + dbt.flags = DB_DBT_USERMEM; + return dbt; + } + + __attribute__((__unused__)) + static const DBT *get_dbt(int64_t key) { + static const int NUM_DBTS = 1000; + static bool initialized; + static int64_t static_ints[NUM_DBTS]; + static DBT static_dbts[NUM_DBTS]; + invariant(key < NUM_DBTS); + if (!initialized) { + for (int i = 0; i < NUM_DBTS; i++) { + static_ints[i] = i; + toku_fill_dbt(&static_dbts[i], + &static_ints[i], + sizeof(int64_t)); + static_dbts[i].flags = DB_DBT_USERMEM; + } + initialized = true; } - initialized = true; + + invariant(key < NUM_DBTS); + return &static_dbts[key]; } - invariant(key < NUM_DBTS); - return &static_dbts[key]; -} - -__attribute__((__unused__)) -static int compare_dbts(DB *db, const DBT *key1, const DBT *key2) { - (void) db; - - // this emulates what a "infinity-aware" comparator object does - if (toku_dbt_is_infinite(key1) || toku_dbt_is_infinite(key2)) { - return toku_dbt_infinite_compare(key1, key2); - } else { - invariant(key1->size == sizeof(int64_t)); - invariant(key2->size == sizeof(int64_t)); - int64_t a = *(int64_t*) key1->data; - int64_t b = *(int64_t*) key2->data; - if (a < b) { - return -1; - } else if (a == b) { - return 0; + __attribute__((__unused__)) + static int compare_dbts(DB *db, const DBT *key1, const DBT *key2) { + (void) db; + + // this emulates what a "infinity-aware" comparator object does + if (toku_dbt_is_infinite(key1) || toku_dbt_is_infinite(key2)) { + return toku_dbt_infinite_compare(key1, key2); } else { - return 1; + invariant(key1->size == sizeof(int64_t)); + invariant(key2->size == sizeof(int64_t)); + int64_t a = *(int64_t*) key1->data; + int64_t b = *(int64_t*) key2->data; + if (a < b) { + return -1; + } else if (a == b) { + return 0; + } else { + return 1; + } } } -} -} /* namespace toku */ + __attribute__((__unused__)) comparator dbt_comparator; + + __attribute__((__constructor__)) + static void construct_dbt_comparator(void) { + dbt_comparator.create(compare_dbts, nullptr); + } -#endif + __attribute__((__destructor__)) + static void destruct_dbt_comparator(void) { + dbt_comparator.destroy(); + } + +} /* namespace toku */ diff --git a/storage/tokudb/ft-index/locktree/tests/txnid_set_test.cc b/storage/tokudb/ft-index/locktree/tests/txnid_set_test.cc index fe442a50683..3502b9bf049 100644 --- a/storage/tokudb/ft-index/locktree/tests/txnid_set_test.cc +++ b/storage/tokudb/ft-index/locktree/tests/txnid_set_test.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/locktree/tests/wfg_test.cc b/storage/tokudb/ft-index/locktree/tests/wfg_test.cc index a7669135bf0..be3f8fa1f20 100644 --- a/storage/tokudb/ft-index/locktree/tests/wfg_test.cc +++ b/storage/tokudb/ft-index/locktree/tests/wfg_test.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/locktree/treenode.cc b/storage/tokudb/ft-index/locktree/treenode.cc index 0e8953ce895..9853874776f 100644 --- a/storage/tokudb/ft-index/locktree/treenode.cc +++ b/storage/tokudb/ft-index/locktree/treenode.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -99,7 +99,7 @@ void treenode::mutex_unlock(void) { toku_mutex_unlock(&m_mutex); } -void treenode::init(comparator *cmp) { +void treenode::init(const comparator *cmp) { m_txnid = TXNID_NONE; m_is_root = false; m_is_empty = true; @@ -117,7 +117,7 @@ void treenode::init(comparator *cmp) { m_right_child.set(nullptr); } -void treenode::create_root(comparator *cmp) { +void treenode::create_root(const comparator *cmp) { init(cmp); m_is_root = true; } @@ -145,10 +145,10 @@ bool treenode::is_empty(void) { } bool treenode::range_overlaps(const keyrange &range) { - return m_range.overlaps(m_cmp, range); + return m_range.overlaps(*m_cmp, range); } -treenode *treenode::alloc(comparator *cmp, const keyrange &range, TXNID txnid) { +treenode *treenode::alloc(const comparator *cmp, const keyrange &range, TXNID txnid) { treenode *XCALLOC(node); node->init(cmp); node->set_range_and_txnid(range, txnid); @@ -190,7 +190,7 @@ treenode *treenode::find_node_with_overlapping_child(const keyrange &range, // determine which child to look at based on a comparison. if we were // given a comparison hint, use that. otherwise, compare them now. - keyrange::comparison c = cmp_hint ? *cmp_hint : range.compare(m_cmp, m_range); + keyrange::comparison c = cmp_hint ? *cmp_hint : range.compare(*m_cmp, m_range); treenode *child; if (c == keyrange::comparison::LESS_THAN) { @@ -209,7 +209,7 @@ treenode *treenode::find_node_with_overlapping_child(const keyrange &range, if (child == nullptr) { return this; } else { - c = range.compare(m_cmp, child->m_range); + c = range.compare(*m_cmp, child->m_range); if (c == keyrange::comparison::EQUALS || c == keyrange::comparison::OVERLAPS) { child->mutex_unlock(); return this; @@ -225,7 +225,7 @@ treenode *treenode::find_node_with_overlapping_child(const keyrange &range, template <class F> void treenode::traverse_overlaps(const keyrange &range, F *function) { - keyrange::comparison c = range.compare(m_cmp, m_range); + keyrange::comparison c = range.compare(*m_cmp, m_range); if (c == keyrange::comparison::EQUALS) { // Doesn't matter if fn wants to keep going, there // is nothing left, so return. @@ -264,7 +264,7 @@ void treenode::traverse_overlaps(const keyrange &range, F *function) { void treenode::insert(const keyrange &range, TXNID txnid) { // choose a child to check. if that child is null, then insert the new node there. // otherwise recur down that child's subtree - keyrange::comparison c = range.compare(m_cmp, m_range); + keyrange::comparison c = range.compare(*m_cmp, m_range); if (c == keyrange::comparison::LESS_THAN) { treenode *left_child = lock_and_rebalance_left(); if (left_child == nullptr) { @@ -382,7 +382,7 @@ treenode *treenode::remove(const keyrange &range) { // if the range is equal to this node's range, then just remove // the root of this subtree. otherwise search down the tree // in either the left or right children. - keyrange::comparison c = range.compare(m_cmp, m_range); + keyrange::comparison c = range.compare(*m_cmp, m_range); switch (c) { case keyrange::comparison::EQUALS: return remove_root_of_subtree(); diff --git a/storage/tokudb/ft-index/locktree/treenode.h b/storage/tokudb/ft-index/locktree/treenode.h index e48dc50d72b..7a6880a657c 100644 --- a/storage/tokudb/ft-index/locktree/treenode.h +++ b/storage/tokudb/ft-index/locktree/treenode.h @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -86,20 +86,19 @@ PATENT RIGHTS GRANT: under this License. */ +#pragma once + #ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved." #ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it." -#ifndef TREENODE_H -#define TREENODE_H - -#include <memory.h> #include <string.h> -#include <ft/comparator.h> +#include "portability/memory.h" +#include "portability/toku_pthread.h" -#include <portability/toku_pthread.h> - -#include "keyrange.h" +#include "ft/comparator.h" +#include "ft/txn/txn.h" +#include "locktree/keyrange.h" namespace toku { @@ -124,7 +123,7 @@ public: // - node may be unlocked if no other thread has visibility // effect: create the root node - void create_root(comparator *cmp); + void create_root(const comparator *cmp); // effect: destroys the root node void destroy_root(void); @@ -211,7 +210,7 @@ private: child_ptr m_right_child; // comparator for ranges - comparator *m_cmp; + const comparator *m_cmp; // marked for the root node. the root node is never free()'d // when removed, but instead marked as empty. @@ -221,7 +220,7 @@ private: bool m_is_empty; // effect: initializes an empty node with the given comparator - void init(comparator *cmp); + void init(const comparator *cmp); // requires: *parent is initialized to something meaningful. // requires: subtree is non-empty @@ -268,7 +267,7 @@ private: treenode *maybe_rebalance(void); // returns: allocated treenode populated with a copy of the range and txnid - static treenode *alloc(comparator *cmp, const keyrange &range, TXNID txnid); + static treenode *alloc(const comparator *cmp, const keyrange &range, TXNID txnid); // requires: node is a locked root node, or an unlocked non-root node static void free(treenode *node); @@ -283,5 +282,3 @@ private: #include "treenode.cc" } /* namespace toku */ - -#endif /* TREENODE_H */ diff --git a/storage/tokudb/ft-index/locktree/txnid_set.cc b/storage/tokudb/ft-index/locktree/txnid_set.cc index 598a717f933..f6b95c9b32f 100644 --- a/storage/tokudb/ft-index/locktree/txnid_set.cc +++ b/storage/tokudb/ft-index/locktree/txnid_set.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/locktree/txnid_set.h b/storage/tokudb/ft-index/locktree/txnid_set.h index d2971c5c167..c2c84b39c07 100644 --- a/storage/tokudb/ft-index/locktree/txnid_set.h +++ b/storage/tokudb/ft-index/locktree/txnid_set.h @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -86,15 +86,14 @@ PATENT RIGHTS GRANT: under this License. */ +#pragma once + #ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved." #ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it." -#ifndef TOKU_TXNID_SET_H -#define TOKU_TXNID_SET_H - -#include <ft/fttypes.h> +#include "ft/txn/txn.h" -#include <util/omt.h> +#include "util/omt.h" namespace toku { @@ -130,5 +129,3 @@ private: ENSURE_POD(txnid_set); } /* namespace toku */ - -#endif /* TOKU_TXNID_SET_H */ diff --git a/storage/tokudb/ft-index/locktree/wfg.cc b/storage/tokudb/ft-index/locktree/wfg.cc index dea97d5cd43..e18c7f4aa26 100644 --- a/storage/tokudb/ft-index/locktree/wfg.cc +++ b/storage/tokudb/ft-index/locktree/wfg.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/locktree/wfg.h b/storage/tokudb/ft-index/locktree/wfg.h index 2bfd3797f9b..99172902d2e 100644 --- a/storage/tokudb/ft-index/locktree/wfg.h +++ b/storage/tokudb/ft-index/locktree/wfg.h @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -86,17 +86,13 @@ PATENT RIGHTS GRANT: under this License. */ +#pragma once + #ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved." #ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it." -#ifndef TOKU_WFG_H -#define TOKU_WFG_H - -#include <ft/fttypes.h> - -#include <util/omt.h> - -#include "txnid_set.h" +#include "locktree/txnid_set.h" +#include "util/omt.h" namespace toku { @@ -159,5 +155,3 @@ private: ENSURE_POD(wfg); } /* namespace toku */ - -#endif /* TOKU_WFG_H */ diff --git a/storage/tokudb/ft-index/portability/CMakeLists.txt b/storage/tokudb/ft-index/portability/CMakeLists.txt index 93dcf1d1675..9f84d9b03df 100644 --- a/storage/tokudb/ft-index/portability/CMakeLists.txt +++ b/storage/tokudb/ft-index/portability/CMakeLists.txt @@ -19,7 +19,6 @@ target_link_libraries(${LIBTOKUPORTABILITY} LINK_PUBLIC ${CMAKE_THREAD_LIBS_INIT add_library(tokuportability_static_conv STATIC ${tokuportability_srcs}) set_target_properties(tokuportability_static_conv PROPERTIES POSITION_INDEPENDENT_CODE ON) -add_dependencies(tokuportability_static_conv build_jemalloc) set(tokuportability_source_libs tokuportability_static_conv ${LIBJEMALLOC} ${CMAKE_THREAD_LIBS_INIT} ${EXTRA_SYSTEM_LIBS}) toku_merge_static_libs(${LIBTOKUPORTABILITY}_static ${LIBTOKUPORTABILITY}_static "${tokuportability_source_libs}") diff --git a/storage/tokudb/ft-index/portability/file.cc b/storage/tokudb/ft-index/portability/file.cc index b351141fe29..6919b54e81d 100644 --- a/storage/tokudb/ft-index/portability/file.cc +++ b/storage/tokudb/ft-index/portability/file.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -166,10 +166,10 @@ try_again_after_handling_write_error(int fd, size_t len, ssize_t r_write) { ssize_t n = readlink(fname, symname, MY_MAX_PATH); if ((int)n == -1) - fprintf(stderr, "%.24s Tokudb No space when writing %" PRIu64 " bytes to fd=%d ", tstr, (uint64_t) len, fd); + fprintf(stderr, "%.24s TokuFT No space when writing %" PRIu64 " bytes to fd=%d ", tstr, (uint64_t) len, fd); else { tstr[n] = 0; // readlink doesn't append a NUL to the end of the buffer. - fprintf(stderr, "%.24s Tokudb No space when writing %" PRIu64 " bytes to %*s ", tstr, (uint64_t) len, (int) n, symname); + fprintf(stderr, "%.24s TokuFT No space when writing %" PRIu64 " bytes to %*s ", tstr, (uint64_t) len, (int) n, symname); } fprintf(stderr, "retry in %d second%s\n", toku_write_enospc_sleep, toku_write_enospc_sleep > 1 ? "s" : ""); fflush(stderr); diff --git a/storage/tokudb/ft-index/portability/huge_page_detection.cc b/storage/tokudb/ft-index/portability/huge_page_detection.cc index 52f52c7120e..c90333857c3 100644 --- a/storage/tokudb/ft-index/portability/huge_page_detection.cc +++ b/storage/tokudb/ft-index/portability/huge_page_detection.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -107,7 +107,7 @@ static bool check_huge_pages_config_file(const char *fname) char *r = fgets(buf, sizeof(buf), f); assert(r != NULL); if (strstr(buf, "[always]")) { - fprintf(stderr,"TokuDB: Transparent huge pages are enabled, according to %s. TokuDB will be disabled. To use TokuDB disable huge pages in your kernel or, for testing, set the environment variable TOKU_HUGE_PAGES_OK to 1\n", fname); + fprintf(stderr, "Transparent huge pages are enabled, according to %s\n", fname); huge_pages_enabled = true; } else { huge_pages_enabled =false; diff --git a/storage/tokudb/ft-index/portability/memory.cc b/storage/tokudb/ft-index/portability/memory.cc index 6102aaf7c1a..568be399bb5 100644 --- a/storage/tokudb/ft-index/portability/memory.cc +++ b/storage/tokudb/ft-index/portability/memory.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/portability/memory.h b/storage/tokudb/ft-index/portability/memory.h index e4608d6108f..837b0a70265 100644 --- a/storage/tokudb/ft-index/portability/memory.h +++ b/storage/tokudb/ft-index/portability/memory.h @@ -1,8 +1,6 @@ /* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */ // vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4: #ident "$Id$" -#ifndef MEMORY_H -#define MEMORY_H /* COPYING CONDITIONS NOTICE: @@ -32,7 +30,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -89,12 +87,13 @@ PATENT RIGHTS GRANT: under this License. */ +#pragma once + #ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved." #include <stdlib.h> #include <toku_portability.h> - /* Tokutek memory allocation functions and macros. * These are functions for malloc and free */ @@ -225,5 +224,3 @@ typedef struct memory_status { void toku_memory_get_status(LOCAL_MEMORY_STATUS s); size_t toku_memory_footprint(void * p, size_t touched); - -#endif diff --git a/storage/tokudb/ft-index/portability/os_malloc.cc b/storage/tokudb/ft-index/portability/os_malloc.cc index 6db71e958e9..c59167bd8c4 100644 --- a/storage/tokudb/ft-index/portability/os_malloc.cc +++ b/storage/tokudb/ft-index/portability/os_malloc.cc @@ -30,7 +30,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/portability/portability.cc b/storage/tokudb/ft-index/portability/portability.cc index 8fdfb916d94..09c1ccd50be 100644 --- a/storage/tokudb/ft-index/portability/portability.cc +++ b/storage/tokudb/ft-index/portability/portability.cc @@ -28,7 +28,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/portability/tests/rwlock_condvar.h b/storage/tokudb/ft-index/portability/tests/rwlock_condvar.h index fb592175dc8..135481f8997 100644 --- a/storage/tokudb/ft-index/portability/tests/rwlock_condvar.h +++ b/storage/tokudb/ft-index/portability/tests/rwlock_condvar.h @@ -33,7 +33,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/portability/tests/test-active-cpus.cc b/storage/tokudb/ft-index/portability/tests/test-active-cpus.cc index c8e1188cd03..ed141edd0bf 100644 --- a/storage/tokudb/ft-index/portability/tests/test-active-cpus.cc +++ b/storage/tokudb/ft-index/portability/tests/test-active-cpus.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/portability/tests/test-cache-line-boundary-fails.cc b/storage/tokudb/ft-index/portability/tests/test-cache-line-boundary-fails.cc index 6e900b15be8..eb4862c2254 100644 --- a/storage/tokudb/ft-index/portability/tests/test-cache-line-boundary-fails.cc +++ b/storage/tokudb/ft-index/portability/tests/test-cache-line-boundary-fails.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/portability/tests/test-cpu-freq-openlimit17.cc b/storage/tokudb/ft-index/portability/tests/test-cpu-freq-openlimit17.cc index ae4ec26fb97..04e58d49bf6 100644 --- a/storage/tokudb/ft-index/portability/tests/test-cpu-freq-openlimit17.cc +++ b/storage/tokudb/ft-index/portability/tests/test-cpu-freq-openlimit17.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/portability/tests/test-cpu-freq.cc b/storage/tokudb/ft-index/portability/tests/test-cpu-freq.cc index f0e991c9735..889eb29c5c1 100644 --- a/storage/tokudb/ft-index/portability/tests/test-cpu-freq.cc +++ b/storage/tokudb/ft-index/portability/tests/test-cpu-freq.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/portability/tests/test-filesystem-sizes.cc b/storage/tokudb/ft-index/portability/tests/test-filesystem-sizes.cc index e4466b83952..993eaf4fea2 100644 --- a/storage/tokudb/ft-index/portability/tests/test-filesystem-sizes.cc +++ b/storage/tokudb/ft-index/portability/tests/test-filesystem-sizes.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/portability/tests/test-flock.cc b/storage/tokudb/ft-index/portability/tests/test-flock.cc index 942dc6b0686..5ef45b1bd97 100644 --- a/storage/tokudb/ft-index/portability/tests/test-flock.cc +++ b/storage/tokudb/ft-index/portability/tests/test-flock.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/portability/tests/test-fsync-directory.cc b/storage/tokudb/ft-index/portability/tests/test-fsync-directory.cc index 8d1546fcff7..a0de1a0d882 100644 --- a/storage/tokudb/ft-index/portability/tests/test-fsync-directory.cc +++ b/storage/tokudb/ft-index/portability/tests/test-fsync-directory.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/portability/tests/test-fsync.cc b/storage/tokudb/ft-index/portability/tests/test-fsync.cc index efdfd0dd4f2..4d3be11120f 100644 --- a/storage/tokudb/ft-index/portability/tests/test-fsync.cc +++ b/storage/tokudb/ft-index/portability/tests/test-fsync.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -221,7 +221,6 @@ time_fsyncs_many_files(int N, int bytes, int fds[/*N*/]) { } } -//sync() does not appear to have an analogue on windows. static void time_sync_fsyncs_many_files(int N, int bytes, int fds[/*N*/]) { if (verbose>1) { diff --git a/storage/tokudb/ft-index/portability/tests/test-gettime.cc b/storage/tokudb/ft-index/portability/tests/test-gettime.cc index 70b24cd2aaf..ce0e5cb9921 100644 --- a/storage/tokudb/ft-index/portability/tests/test-gettime.cc +++ b/storage/tokudb/ft-index/portability/tests/test-gettime.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/portability/tests/test-gettimeofday.cc b/storage/tokudb/ft-index/portability/tests/test-gettimeofday.cc index a4660d8ba4f..0ff77118d1c 100644 --- a/storage/tokudb/ft-index/portability/tests/test-gettimeofday.cc +++ b/storage/tokudb/ft-index/portability/tests/test-gettimeofday.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/portability/tests/test-hugepage.cc b/storage/tokudb/ft-index/portability/tests/test-hugepage.cc index 61adcac0a69..4aa11ee2e79 100644 --- a/storage/tokudb/ft-index/portability/tests/test-hugepage.cc +++ b/storage/tokudb/ft-index/portability/tests/test-hugepage.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/portability/tests/test-max-data.cc b/storage/tokudb/ft-index/portability/tests/test-max-data.cc index 459349460d6..f04b39d8421 100644 --- a/storage/tokudb/ft-index/portability/tests/test-max-data.cc +++ b/storage/tokudb/ft-index/portability/tests/test-max-data.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/portability/tests/test-memory-status.cc b/storage/tokudb/ft-index/portability/tests/test-memory-status.cc index 20eea248bae..87011c4e0d0 100644 --- a/storage/tokudb/ft-index/portability/tests/test-memory-status.cc +++ b/storage/tokudb/ft-index/portability/tests/test-memory-status.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/portability/tests/test-pagesize.cc b/storage/tokudb/ft-index/portability/tests/test-pagesize.cc index 5f921fe4920..f9a78742e35 100644 --- a/storage/tokudb/ft-index/portability/tests/test-pagesize.cc +++ b/storage/tokudb/ft-index/portability/tests/test-pagesize.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/portability/tests/test-pthread-rwlock-rdlock.cc b/storage/tokudb/ft-index/portability/tests/test-pthread-rwlock-rdlock.cc index a4c5dcd0128..9008262fa09 100644 --- a/storage/tokudb/ft-index/portability/tests/test-pthread-rwlock-rdlock.cc +++ b/storage/tokudb/ft-index/portability/tests/test-pthread-rwlock-rdlock.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/portability/tests/test-pthread-rwlock-rwr.cc b/storage/tokudb/ft-index/portability/tests/test-pthread-rwlock-rwr.cc index b7a21b1fc06..32b38421aaf 100644 --- a/storage/tokudb/ft-index/portability/tests/test-pthread-rwlock-rwr.cc +++ b/storage/tokudb/ft-index/portability/tests/test-pthread-rwlock-rwr.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/portability/tests/test-pwrite4g.cc b/storage/tokudb/ft-index/portability/tests/test-pwrite4g.cc index 3fa21f99fa0..abd5e4ec1ac 100644 --- a/storage/tokudb/ft-index/portability/tests/test-pwrite4g.cc +++ b/storage/tokudb/ft-index/portability/tests/test-pwrite4g.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/portability/tests/test-snprintf.cc b/storage/tokudb/ft-index/portability/tests/test-snprintf.cc index 852fab29ba5..5f168c8d612 100644 --- a/storage/tokudb/ft-index/portability/tests/test-snprintf.cc +++ b/storage/tokudb/ft-index/portability/tests/test-snprintf.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/portability/tests/test-stat.cc b/storage/tokudb/ft-index/portability/tests/test-stat.cc index 8e3d18eac9a..bedf7e7e54f 100644 --- a/storage/tokudb/ft-index/portability/tests/test-stat.cc +++ b/storage/tokudb/ft-index/portability/tests/test-stat.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/portability/tests/test-toku-malloc.cc b/storage/tokudb/ft-index/portability/tests/test-toku-malloc.cc index 8c588230331..48f616dd817 100644 --- a/storage/tokudb/ft-index/portability/tests/test-toku-malloc.cc +++ b/storage/tokudb/ft-index/portability/tests/test-toku-malloc.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/portability/tests/test-xid.cc b/storage/tokudb/ft-index/portability/tests/test-xid.cc index 140f335d0e5..9277f984b43 100644 --- a/storage/tokudb/ft-index/portability/tests/test-xid.cc +++ b/storage/tokudb/ft-index/portability/tests/test-xid.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/portability/tests/test.h b/storage/tokudb/ft-index/portability/tests/test.h index ff71395e7d7..a3e7994957e 100644 --- a/storage/tokudb/ft-index/portability/tests/test.h +++ b/storage/tokudb/ft-index/portability/tests/test.h @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/portability/tests/try-assert-zero.cc b/storage/tokudb/ft-index/portability/tests/try-assert-zero.cc index 6249d0b1aa5..6517f61b3c3 100644 --- a/storage/tokudb/ft-index/portability/tests/try-assert-zero.cc +++ b/storage/tokudb/ft-index/portability/tests/try-assert-zero.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/portability/tests/try-assert0.cc b/storage/tokudb/ft-index/portability/tests/try-assert0.cc index f2e1a99469b..89fe6941138 100644 --- a/storage/tokudb/ft-index/portability/tests/try-assert0.cc +++ b/storage/tokudb/ft-index/portability/tests/try-assert0.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/portability/tests/try-leak-lost.cc b/storage/tokudb/ft-index/portability/tests/try-leak-lost.cc index fa6217f39f0..57bbe3589bb 100644 --- a/storage/tokudb/ft-index/portability/tests/try-leak-lost.cc +++ b/storage/tokudb/ft-index/portability/tests/try-leak-lost.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/portability/tests/try-leak-reachable.cc b/storage/tokudb/ft-index/portability/tests/try-leak-reachable.cc index b17418ae67b..63c1dd4f756 100644 --- a/storage/tokudb/ft-index/portability/tests/try-leak-reachable.cc +++ b/storage/tokudb/ft-index/portability/tests/try-leak-reachable.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/portability/tests/try-uninit.cc b/storage/tokudb/ft-index/portability/tests/try-uninit.cc index 415de3203f7..c763348ed2f 100644 --- a/storage/tokudb/ft-index/portability/tests/try-uninit.cc +++ b/storage/tokudb/ft-index/portability/tests/try-uninit.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/portability/toku_assert.cc b/storage/tokudb/ft-index/portability/toku_assert.cc index 860d11d83f4..68e16699e60 100644 --- a/storage/tokudb/ft-index/portability/toku_assert.cc +++ b/storage/tokudb/ft-index/portability/toku_assert.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/portability/toku_assert.h b/storage/tokudb/ft-index/portability/toku_assert.h index 0214018c11f..ab5f8c1ffb4 100644 --- a/storage/tokudb/ft-index/portability/toku_assert.h +++ b/storage/tokudb/ft-index/portability/toku_assert.h @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -86,11 +86,12 @@ PATENT RIGHTS GRANT: under this License. */ +#pragma once + #ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved." #ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it." -#ifndef TOKU_ASSERT_H -#define TOKU_ASSERT_H /* The problem with assert.h: If NDEBUG is set then it doesn't execute the function, if NDEBUG isn't set then we get a branch that isn't taken. */ + /* This version will complain if NDEBUG is set. */ /* It evaluates the argument and then calls a function toku_do_assert() which takes all the hits for the branches not taken. */ @@ -201,5 +202,3 @@ get_error_errno(void) } extern bool toku_gdb_dump_on_assert; - -#endif diff --git a/storage/tokudb/ft-index/portability/toku_atomic.h b/storage/tokudb/ft-index/portability/toku_atomic.h index e897d0b7e4a..075211a790c 100644 --- a/storage/tokudb/ft-index/portability/toku_atomic.h +++ b/storage/tokudb/ft-index/portability/toku_atomic.h @@ -28,7 +28,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -85,12 +85,11 @@ PATENT RIGHTS GRANT: under this License. */ +#pragma once + #ident "Copyright (c) 2012-2013 Tokutek Inc. All rights reserved." #ident "$Id$" -#ifndef TOKU_ATOMIC_H -#define TOKU_ATOMIC_H - #include <portability/toku_config.h> #include <toku_assert.h> @@ -159,5 +158,3 @@ static inline bool toku_sync_bool_compare_and_swap(T *addr, U oldval, V newval) #pragma GCC poison __sync_synchronize #pragma GCC poison __sync_lock_test_and_set #pragma GCC poison __sync_release - -#endif // TOKU_ATOMIC_H diff --git a/storage/tokudb/ft-index/portability/toku_byteswap.h b/storage/tokudb/ft-index/portability/toku_byteswap.h index 4ddeefe2c1b..12c76b00825 100644 --- a/storage/tokudb/ft-index/portability/toku_byteswap.h +++ b/storage/tokudb/ft-index/portability/toku_byteswap.h @@ -28,7 +28,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -85,12 +85,11 @@ PATENT RIGHTS GRANT: under this License. */ +#pragma once + #ident "Copyright (c) 2012-2013 Tokutek Inc. All rights reserved." #ident "$Id$" -#ifndef TOKU_BYTESWAP_H -#define TOKU_BYTESWAP_H - #include <portability/toku_config.h> #if defined(HAVE_BYTESWAP_H) @@ -102,5 +101,3 @@ PATENT RIGHTS GRANT: # include <libkern/OSByteOrder.h> # define bswap_64 OSSwapInt64 #endif - -#endif /* TOKU_BYTESWAP_H */ diff --git a/storage/tokudb/ft-index/portability/toku_crash.cc b/storage/tokudb/ft-index/portability/toku_crash.cc index 2eed142229d..123746d8f7f 100644 --- a/storage/tokudb/ft-index/portability/toku_crash.cc +++ b/storage/tokudb/ft-index/portability/toku_crash.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/portability/toku_crash.h b/storage/tokudb/ft-index/portability/toku_crash.h index 153ab26d460..a5dd959a15d 100644 --- a/storage/tokudb/ft-index/portability/toku_crash.h +++ b/storage/tokudb/ft-index/portability/toku_crash.h @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -86,10 +86,9 @@ PATENT RIGHTS GRANT: under this License. */ -#ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved." +#pragma once -#ifndef PORTABILITY_TOKU_CRASH_H -#define PORTABILITY_TOKU_CRASH_H +#ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved." #include <stdio.h> #include <stdlib.h> @@ -192,5 +191,3 @@ toku_crash_and_dump_core_on_purpose(void) { } void toku_try_gdb_stack_trace(const char *gdb_path); - -#endif // PORTABILITY_TOKU_CRASH_H diff --git a/storage/tokudb/ft-index/portability/toku_htod.h b/storage/tokudb/ft-index/portability/toku_htod.h index c6a7a143563..d12d45a13cf 100644 --- a/storage/tokudb/ft-index/portability/toku_htod.h +++ b/storage/tokudb/ft-index/portability/toku_htod.h @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -86,8 +86,9 @@ PATENT RIGHTS GRANT: under this License. */ -#ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved." +#pragma once +#ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved." /* Purpose of this file is to provide definitions of * Host to Disk byte transposition functions, an abstraction of @@ -106,9 +107,6 @@ PATENT RIGHTS GRANT: * HOST AND A LITTLE-ENDIAN DISK. */ -#ifndef _TOKU_HTOD_H -#define _TOKU_HTOD_H - #include <portability/toku_config.h> #if defined(HAVE_ENDIAN_H) @@ -166,8 +164,3 @@ toku_htod32(uint32_t i) { #else #error Not supported #endif - - - -#endif - diff --git a/storage/tokudb/ft-index/portability/toku_htonl.h b/storage/tokudb/ft-index/portability/toku_htonl.h index 126ba932b87..f2ba320bf1f 100644 --- a/storage/tokudb/ft-index/portability/toku_htonl.h +++ b/storage/tokudb/ft-index/portability/toku_htonl.h @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -86,14 +86,9 @@ PATENT RIGHTS GRANT: under this License. */ -#ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved." - -#ifndef _TOKU_HTONL_H -#define _TOKU_HTONL_H +#pragma once -#if !__linux__ && !__FreeBSD__ && !__sun__ -//#error -#endif +#ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved." #include <toku_htod.h> #include <arpa/inet.h> @@ -105,5 +100,3 @@ static inline uint32_t toku_htonl(uint32_t i) { static inline uint32_t toku_ntohl(uint32_t i) { return ntohl(i); } - -#endif diff --git a/storage/tokudb/ft-index/portability/toku_list.h b/storage/tokudb/ft-index/portability/toku_list.h index b39d56ebd32..3fc96a671dd 100644 --- a/storage/tokudb/ft-index/portability/toku_list.h +++ b/storage/tokudb/ft-index/portability/toku_list.h @@ -1,7 +1,5 @@ /* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */ // vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4: -#ifndef _TOKUDB_LIST_H -#define _TOKUDB_LIST_H #ident "$Id$" /* @@ -32,7 +30,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -89,13 +87,11 @@ PATENT RIGHTS GRANT: under this License. */ +#pragma once + #ident "Copyright (c) 2007, 2008, 2009 Tokutek Inc. All rights reserved." #ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it." -//TODO: #1378 This is not threadsafe. Make sure when splitting locks -//that we protect these calls. - - // This toku_list is intended to be embedded in other data structures. struct toku_list { struct toku_list *next, *prev; @@ -177,7 +173,3 @@ static inline void toku_list_move(struct toku_list *newhead, struct toku_list *o #else #define toku_list_struct(p, t, f) ((t*)((char*)(p) - ((char*)&((t*)0)->f))) #endif - - - -#endif diff --git a/storage/tokudb/ft-index/portability/toku_os.h b/storage/tokudb/ft-index/portability/toku_os.h index c232919f450..71576d7c1dd 100644 --- a/storage/tokudb/ft-index/portability/toku_os.h +++ b/storage/tokudb/ft-index/portability/toku_os.h @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -86,12 +86,11 @@ PATENT RIGHTS GRANT: under this License. */ +#pragma once + #ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved." #ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it." -#ifndef TOKU_OS_H -#define TOKU_OS_H - #include <dirent.h> #include <sys/time.h> @@ -179,5 +178,3 @@ int toku_fstat(int fd, toku_struct_stat *statbuf) __attribute__((__visibility__( // Portable linux 'dup2' int toku_dup2(int fd, int fd2) __attribute__((__visibility__("default"))); - -#endif /* TOKU_OS_H */ diff --git a/storage/tokudb/ft-index/portability/toku_os_types.h b/storage/tokudb/ft-index/portability/toku_os_types.h index 698bb9f2524..a7053374fde 100644 --- a/storage/tokudb/ft-index/portability/toku_os_types.h +++ b/storage/tokudb/ft-index/portability/toku_os_types.h @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -86,10 +86,9 @@ PATENT RIGHTS GRANT: under this License. */ -#ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved." +#pragma once -#if !defined(TOKU_OS_TYPES_H) -#define TOKU_OS_TYPES_H +#ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved." #include <stdbool.h> #include <sys/types.h> @@ -128,5 +127,3 @@ typedef struct stat toku_struct_stat; #if !defined(O_BINARY) #define O_BINARY 0 #endif - -#endif diff --git a/storage/tokudb/ft-index/portability/toku_path.cc b/storage/tokudb/ft-index/portability/toku_path.cc index 22264b7e799..89b106309eb 100644 --- a/storage/tokudb/ft-index/portability/toku_path.cc +++ b/storage/tokudb/ft-index/portability/toku_path.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/portability/toku_path.h b/storage/tokudb/ft-index/portability/toku_path.h index 4c0df9660a9..3ee6736360f 100644 --- a/storage/tokudb/ft-index/portability/toku_path.h +++ b/storage/tokudb/ft-index/portability/toku_path.h @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -86,12 +86,11 @@ PATENT RIGHTS GRANT: under this License. */ +#pragma once + #ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved." #ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it." -#ifndef PORTABILITY_TOKU_PATH_H -#define PORTABILITY_TOKU_PATH_H - #include <stdarg.h> #include <limits.h> #include <sys/types.h> @@ -124,5 +123,3 @@ char *toku_path_join(char *dest, int n, const char *base, ...); // There are n path components, including base. // Returns: // dest (useful for chaining function calls) - -#endif // PORTABILITY_TOKU_PATH_H diff --git a/storage/tokudb/ft-index/portability/toku_portability.h b/storage/tokudb/ft-index/portability/toku_portability.h index dc2ac683113..9459c2d7ad3 100644 --- a/storage/tokudb/ft-index/portability/toku_portability.h +++ b/storage/tokudb/ft-index/portability/toku_portability.h @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -86,10 +86,10 @@ PATENT RIGHTS GRANT: under this License. */ +#pragma once + #ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved." #ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it." -#ifndef TOKU_PORTABILITY_H -#define TOKU_PORTABILITY_H #include "toku_config.h" @@ -107,11 +107,6 @@ PATENT RIGHTS GRANT: #define DEV_NULL_FILE "/dev/null" -// HACK Poison these mcaros so no one uses them -#define TOKU_WINDOWS , -#define TOKU_WINDOWS_32 , -#define TOKU_WINDOWS_64 , - // include here, before they get deprecated #include <toku_atomic.h> @@ -188,26 +183,6 @@ extern "C" { // Deprecated functions. #if !defined(TOKU_ALLOW_DEPRECATED) -# if defined(__ICL) || defined(__ICC) // Intel Compiler -# pragma deprecated (creat, fstat, stat, getpid, syscall, sysconf, mkdir, strdup) -//# pragma poison off_t -//# pragma poison pthread_attr_t pthread_t -//# pragma poison pthread_mutexattr_t pthread_mutex_t -//# pragma poison pthread_condattr_t pthread_cond_t -//# pragma poison pthread_rwlockattr_t pthread_rwlock_t -//# pragma poison timespec -# ifndef DONT_DEPRECATE_WRITES -# pragma poison write pwrite -# endif -# ifndef DONT_DEPRECATE_MALLOC -# pragma deprecated (malloc, free, realloc) -# endif -# ifndef DONT_DEPRECATE_ERRNO -# pragma deprecated (errno) -# endif -# pragma poison dup2 -# pragma poison _dup2 -# else int creat(const char *pathname, mode_t mode) __attribute__((__deprecated__)); int fstat(int fd, struct stat *buf) __attribute__((__deprecated__)); int stat(const char *path, struct stat *buf) __attribute__((__deprecated__)); @@ -281,7 +256,6 @@ extern void *realloc(void*, size_t) __THROW __attribute__((__deprecat #pragma GCC poison __sync_synchronize #pragma GCC poison __sync_lock_test_and_set #pragma GCC poison __sync_release -# endif #endif #if defined(__cplusplus) @@ -352,17 +326,8 @@ void toku_set_func_pread (ssize_t (*)(int, void *, size_t, off_t)); int toku_portability_init(void); void toku_portability_destroy(void); -static inline uint64_t roundup_to_multiple(uint64_t alignment, uint64_t v) // Effect: Return X, where X the smallest multiple of ALIGNMENT such that X>=V. // Requires: ALIGNMENT is a power of two -{ - assert(0==(alignment&(alignment-1))); // alignment must be a power of two - uint64_t result = (v+alignment-1)&~(alignment-1); - assert(result>=v); // The result is >=V. - assert(result%alignment==0); // The result is a multiple of alignment. - assert(result<v+alignment); // The result is the smallest such multiple of alignment. - return result; +static inline uint64_t roundup_to_multiple(uint64_t alignment, uint64_t v) { + return (v + alignment - 1) & ~(alignment - 1); } - - -#endif /* TOKU_PORTABILITY_H */ diff --git a/storage/tokudb/ft-index/portability/toku_pthread.cc b/storage/tokudb/ft-index/portability/toku_pthread.cc index d023e9427ec..42ae9c0b1f5 100644 --- a/storage/tokudb/ft-index/portability/toku_pthread.cc +++ b/storage/tokudb/ft-index/portability/toku_pthread.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/portability/toku_pthread.h b/storage/tokudb/ft-index/portability/toku_pthread.h index 18edad7c1fa..a9dc660b6a7 100644 --- a/storage/tokudb/ft-index/portability/toku_pthread.h +++ b/storage/tokudb/ft-index/portability/toku_pthread.h @@ -30,7 +30,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -87,10 +87,9 @@ PATENT RIGHTS GRANT: under this License. */ -#ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved." +#pragma once -#ifndef TOKU_PTHREAD_H -#define TOKU_PTHREAD_H +#ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved." #include <pthread.h> #include <time.h> @@ -213,6 +212,21 @@ toku_mutex_lock(toku_mutex_t *mutex) { #endif } +static inline int +toku_mutex_trylock(toku_mutex_t *mutex) { + int r = pthread_mutex_trylock(&mutex->pmutex); +#if TOKU_PTHREAD_DEBUG + if (r == 0) { + invariant(mutex->valid); + invariant(!mutex->locked); + invariant(mutex->owner == 0); + mutex->locked = true; + mutex->owner = pthread_self(); + } +#endif + return r; +} + static inline void toku_mutex_unlock(toku_mutex_t *mutex) { #if TOKU_PTHREAD_DEBUG @@ -407,5 +421,3 @@ static inline int toku_pthread_setspecific(toku_pthread_key_t key, void *data) { return pthread_setspecific(key, data); } - -#endif /* TOKU_PTHREAD_H */ diff --git a/storage/tokudb/ft-index/portability/toku_race_tools.h b/storage/tokudb/ft-index/portability/toku_race_tools.h index 9d3795eae95..b4c83b6119d 100644 --- a/storage/tokudb/ft-index/portability/toku_race_tools.h +++ b/storage/tokudb/ft-index/portability/toku_race_tools.h @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -86,10 +86,10 @@ PATENT RIGHTS GRANT: under this License. */ +#pragma once + #ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved." #ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it." -#ifndef TOKU_RACE_TOOLS_H -#define TOKU_RACE_TOOLS_H #include <portability/toku_config.h> @@ -138,5 +138,3 @@ PATENT RIGHTS GRANT: # define RUNNING_ON_VALGRIND (0U) #endif - -#endif // TOKU_RACE_TOOLS_H diff --git a/storage/tokudb/ft-index/portability/toku_random.h b/storage/tokudb/ft-index/portability/toku_random.h index 956e73990a7..a350b171a3b 100644 --- a/storage/tokudb/ft-index/portability/toku_random.h +++ b/storage/tokudb/ft-index/portability/toku_random.h @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -86,10 +86,9 @@ PATENT RIGHTS GRANT: under this License. */ -#ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved." +#pragma once -#ifndef TOKU_RANDOM_H -#define TOKU_RANDOM_H +#ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved." #include <portability/toku_config.h> #include <toku_portability.h> @@ -169,5 +168,3 @@ rand_choices(struct random_data *buf, uint32_t choices) { return result; } - -#endif // TOKU_RANDOM_H diff --git a/storage/tokudb/ft-index/portability/toku_stdint.h b/storage/tokudb/ft-index/portability/toku_stdint.h index 0105c94c50b..806e40e612b 100644 --- a/storage/tokudb/ft-index/portability/toku_stdint.h +++ b/storage/tokudb/ft-index/portability/toku_stdint.h @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -86,13 +86,9 @@ PATENT RIGHTS GRANT: under this License. */ -#ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved." +#pragma once -#ifndef TOKU_STDINT_H -#define TOKU_STDINT_H +#ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved." #include <stdint.h> #include <inttypes.h> - -#endif - diff --git a/storage/tokudb/ft-index/portability/toku_stdlib.h b/storage/tokudb/ft-index/portability/toku_stdlib.h index 9d2a7f78778..229e3945cf9 100644 --- a/storage/tokudb/ft-index/portability/toku_stdlib.h +++ b/storage/tokudb/ft-index/portability/toku_stdlib.h @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -86,5 +86,8 @@ PATENT RIGHTS GRANT: under this License. */ +#pragma once + #ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved." + #include <stdlib.h> diff --git a/storage/tokudb/ft-index/portability/toku_time.cc b/storage/tokudb/ft-index/portability/toku_time.cc index a20c45dfb03..97834493194 100644 --- a/storage/tokudb/ft-index/portability/toku_time.cc +++ b/storage/tokudb/ft-index/portability/toku_time.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/portability/toku_time.h b/storage/tokudb/ft-index/portability/toku_time.h index 89b8dcb8524..069e67c0d28 100644 --- a/storage/tokudb/ft-index/portability/toku_time.h +++ b/storage/tokudb/ft-index/portability/toku_time.h @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -86,10 +86,9 @@ PATENT RIGHTS GRANT: under this License. */ -#ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved." +#pragma once -#ifndef TOKU_TIME_H -#define TOKU_TIME_H +#ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved." #include "toku_config.h" @@ -161,5 +160,3 @@ static inline uint64_t toku_current_time_microsec(void) { gettimeofday(&t, NULL); return t.tv_sec * (1UL * 1000 * 1000) + t.tv_usec; } - -#endif diff --git a/storage/tokudb/ft-index/scripts/run-nightly-coverage-tests.bash b/storage/tokudb/ft-index/scripts/run-nightly-coverage-tests.bash index 42af1748766..c96a02352ca 100755 --- a/storage/tokudb/ft-index/scripts/run-nightly-coverage-tests.bash +++ b/storage/tokudb/ft-index/scripts/run-nightly-coverage-tests.bash @@ -12,7 +12,7 @@ cd $tokudbdir if [ ! -d build ] ; then mkdir build pushd build - CC=gcc47 CXX=g++47 cmake \ + cmake \ -D CMAKE_BUILD_TYPE=Debug \ -D USE_VALGRIND=ON \ -D TOKU_DEBUG_PARANOID=ON \ @@ -20,7 +20,6 @@ if [ ! -d build ] ; then -D USE_GTAGS=OFF \ -D USE_CSCOPE=OFF \ -D USE_ETAGS=OFF \ - -D USE_BDB=OFF \ -D USE_GCOV=ON \ -D CMAKE_LINK_DEPENDS_NO_SHARED=ON \ -G Ninja \ diff --git a/storage/tokudb/ft-index/scripts/run-nightly-drd-tests.bash b/storage/tokudb/ft-index/scripts/run-nightly-drd-tests.bash index 467c47b9cd2..39d97de2185 100755 --- a/storage/tokudb/ft-index/scripts/run-nightly-drd-tests.bash +++ b/storage/tokudb/ft-index/scripts/run-nightly-drd-tests.bash @@ -12,7 +12,7 @@ cd $tokudbdir if [ ! -d build ] ; then mkdir build pushd build - CC=gcc47 CXX=g++47 cmake \ + cmake \ -D CMAKE_BUILD_TYPE=drd \ -D USE_VALGRIND=ON \ -D TOKU_DEBUG_PARANOID=ON \ @@ -20,7 +20,6 @@ if [ ! -d build ] ; then -D USE_GTAGS=OFF \ -D USE_CSCOPE=OFF \ -D USE_ETAGS=OFF \ - -D USE_BDB=OFF \ -D CMAKE_LINK_DEPENDS_NO_SHARED=ON \ -G Ninja \ -D RUN_LONG_TESTS=ON \ diff --git a/storage/tokudb/ft-index/scripts/run-nightly-release-tests.bash b/storage/tokudb/ft-index/scripts/run-nightly-release-tests.bash index 5ac3e62b216..af08894beb8 100755 --- a/storage/tokudb/ft-index/scripts/run-nightly-release-tests.bash +++ b/storage/tokudb/ft-index/scripts/run-nightly-release-tests.bash @@ -12,7 +12,7 @@ cd $tokudbdir if [ ! -d build ] ; then mkdir build pushd build - CC=gcc47 CXX=g++47 cmake \ + cmake \ -D CMAKE_BUILD_TYPE=Release \ -D USE_VALGRIND=ON \ -D TOKU_DEBUG_PARANOID=OFF \ @@ -20,7 +20,6 @@ if [ ! -d build ] ; then -D USE_GTAGS=OFF \ -D USE_CSCOPE=OFF \ -D USE_ETAGS=OFF \ - -D USE_BDB=ON \ -D CMAKE_LINK_DEPENDS_NO_SHARED=ON \ -G Ninja \ -D RUN_LONG_TESTS=ON \ @@ -41,6 +40,6 @@ ctest -j16 \ -E '/drd|/helgrind' ctest -j16 \ -D NightlyMemCheck \ - -E '^ydb/.*\.bdb|test1426\.tdb|/drd|/helgrind' + -E 'test1426\.tdb|/drd|/helgrind' set -e ctest -D NightlySubmit diff --git a/storage/tokudb/ft-index/scripts/run.db-benchmark-test.bash b/storage/tokudb/ft-index/scripts/run.db-benchmark-test.bash deleted file mode 100755 index ebd2a188f10..00000000000 --- a/storage/tokudb/ft-index/scripts/run.db-benchmark-test.bash +++ /dev/null @@ -1,201 +0,0 @@ -#!/usr/bin/env bash - -function usage() { - echo "run db-benchmark-test" - echo "[--tokudb=$tokudb" - echo "[--revision=$revision]" - echo "[--branch=$branch]" - echo "[--suffix=$suffix]" - echo "[--commit=$commit]" - echo "[--cc=$cc]" - echo "[--n=$n]" -} - -function retry() { - local cmd - local retries - local exitcode - cmd=$* - let retries=0 - while [ $retries -le 10 ] ; do - echo `date` $cmd - bash -c "$cmd" - exitcode=$? - echo `date` $cmd $exitcode $retries - let retries=retries+1 - if [ $exitcode -eq 0 ] ; then break; fi - sleep 10 - done - test $exitcode = 0 -} - -n=100 -cc=gcc44 -ft_loader=cilk -branch=toku -revision=0 -tokudb=tokudb -suffix=. -commit=0 -svnserver=https://svn.tokutek.com/tokudb -basedir=$HOME/svn.build -builddir=$basedir/tokudb.build -system=`uname -s | tr [:upper:] [:lower:]` -arch=`uname -m | tr [:upper:] [:lower:]` -hostname=`hostname` -instancetype="" - -# parse the command line -while [ $# -gt 0 ] ; do - arg=$1; shift - if [[ $arg =~ --(.*)=(.*) ]] ; then - eval ${BASH_REMATCH[1]}=${BASH_REMATCH[2]} - else - usage; exit 1 - fi -done - -if [ $cc = icc ] ; then - d=/opt/intel/bin - if [ -d $d ] ; then - export PATH=$d:$PATH - . $d/compilervars.sh intel64 - fi - d=/opt/intel/cilkutil/bin - if [ -d $d ] ; then - export PATH=$d:$PATH - fi -fi - -# require a revision -if [ $revision -eq 0 ] ; then exit 1; fi -if [ $branch = "." ] ; then branch="toku"; fi - -function append() { - local s=""; local x - for x in $*; do - if [ "$s" != "" ] ; then s=$s-$x; else s=$x; fi - done - echo $s -} - -# setup the branchrevision string -branchrevision="" -if [ $branch != "toku" ] ; then branchrevision=$(append $branchrevision $(basename $branch)); fi -if [ $tokudb != "tokudb" ] ; then branchrevision=$(append $branchrevision $tokudb); fi -branchrevision=$(append $branchrevision $revision) -if [ $suffix != "." ] ; then branchrevision=$(append $branchrevision $suffix); fi - -# goto the base directory -if [ ! -d $basedir ] ; then mkdir $basedir; fi - -pushd $basedir - -# update the build directory -if [ ! -d $builddir ] ; then mkdir $builddir; fi - -date=`date +%Y%m%d` -pushd $builddir - while [ ! -d $date ] ; do - svn mkdir $svnserver/mysql.build/$date -m "" - svn co -q $svnserver/mysql.build/$date - if [ $? -ne 0 ] ; then rm -rf $date; fi - done -popd -testresultsdir=$builddir/$date - -gccversion=`$cc --version|head -1|cut -f3 -d" "` - -runfile=$testresultsdir/db-benchmark-test-$branchrevision-$cc-$gccversion-$system-$arch-$hostname -if [ "$instancetype" != "" ] ; then runfile=$runfile-$instancetype; fi -rm -rf $runfile - -testresult="PASS" -testdir=db-benchmark-test-$branchrevision -rm -rf $testdir - -# checkout the tokudb branch -if [ $testresult = "PASS" ] ; then - retry svn export -q https://svn.tokutek.com/tokudb/$branch/$tokudb $testdir - exitcode=$? - if [ $exitcode != 0 ] ; then testresult="FAIL"; fi -fi - -# build it -if [ $testresult = "PASS" ] ; then - pushd $testdir - make release -s CC=$cc GCCVERSION=$gccversion FTLOADER=$ft_loader >>$runfile 2>&1 - exitcode=$? - if [ $exitcode != 0 ] ; then testresult="FAIL"; fi - popd - pushd $testdir/db-benchmark-test - make build.tdb CC=$cc GCCVERSION=$gccversion -s >>$runfile 2>&1 - exitcode=$? - if [ $exitcode != 0 ] ; then testresult="FAIL"; fi - popd -fi - -# run tests -if [ $testresult = "PASS" ] ; then - let i=$n - pushd $testdir/db-benchmark-test - echo ./db-benchmark-test-tokudb -x $i >>$runfile 2>&1 - ./db-benchmark-test-tokudb -x $i >>$runfile 2>&1 - exitcode=$? - if [ $exitcode != 0 ] ; then testresult="FAIL"; fi - echo ./scanscan-tokudb --prelock --prelockflag >>$runfile 2>&1 - ./scanscan-tokudb --prelock --prelockflag >>$runfile 2>&1 - exitcode=$? - if [ $exitcode != 0 ] ; then testresult="FAIL"; fi - echo ./scanscan-tokudb --lwc --prelock --prelockflag >>$runfile 2>&1 - ./scanscan-tokudb --lwc --prelock --prelockflag >>$runfile 2>&1 - exitcode=$? - if [ $exitcode != 0 ] ; then testresult="FAIL"; fi - popd -fi - -if [ $testresult = "PASS" ] ; then - let i=2*$n - pushd $testdir/db-benchmark-test - echo ./db-benchmark-test-tokudb -x --norandom $i >>$runfile 2>&1 - ./db-benchmark-test-tokudb -x --norandom $i >>$runfile 2>&1 - exitcode=$? - if [ $exitcode != 0 ] ; then testresult="FAIL"; fi - echo ./scanscan-tokudb --prelock --prelockflag >>$runfile 2>&1 - ./scanscan-tokudb --prelock --prelockflag >>$runfile 2>&1 - exitcode=$? - if [ $exitcode != 0 ] ; then testresult="FAIL"; fi - echo ./scanscan-tokudb --lwc --prelock --prelockflag >>$runfile 2>&1 - ./scanscan-tokudb --lwc --prelock --prelockflag >>$runfile 2>&1 - exitcode=$? - if [ $exitcode != 0 ] ; then testresult="FAIL"; fi - popd -fi - -if [ $testresult = "PASS" ] ; then - let i=2*$n - pushd $testdir/db-benchmark-test - echo ./db-benchmark-test-tokudb -x --noserial $i >>$runfile 2>&1 - ./db-benchmark-test-tokudb -x --noserial $i >>$runfile 2>&1 - exitcode=$? - if [ $exitcode != 0 ] ; then testresult="FAIL"; fi - echo ./scanscan-tokudb --prelock --prelockflag >>$runfile 2>&1 - ./scanscan-tokudb --prelock --prelockflag >>$runfile 2>&1 - exitcode=$? - if [ $exitcode != 0 ] ; then testresult="FAIL"; fi - echo ./scanscan-tokudb --lwc --prelock --prelockflag >>$runfile 2>&1 - ./scanscan-tokudb --lwc --prelock --prelockflag >>$runfile 2>&1 - exitcode=$? - if [ $exitcode != 0 ] ; then testresult="FAIL"; fi - popd -fi - -# commit results -if [ $commit != 0 ] ; then - svn add $runfile - retry svn commit -m \"$testresult db-benchmark-test $branchrevision $system $arch\" $runfile -fi - -popd - -exit 0 diff --git a/storage/tokudb/ft-index/scripts/run.fractal.tree.tests.bash b/storage/tokudb/ft-index/scripts/run.fractal.tree.tests.bash deleted file mode 100755 index 23900424af2..00000000000 --- a/storage/tokudb/ft-index/scripts/run.fractal.tree.tests.bash +++ /dev/null @@ -1,460 +0,0 @@ -#!/bin/bash - -function usage() { - echo "run.fractal.tree.tests.bash - run the nightly fractal tree test suite" - echo "[--ftcc=$ftcc] [--ftcxx=$ftcxx] [--BDBVERSION=$BDBVERSION] [--ctest_model=$ctest_model]" - echo "[--commit=$commit] [--generator=$generator] [--toku_svnroot=$toku_svnroot]" - return 1 -} - -[ -f /etc/profile.d/gcc47.sh ] && . /etc/profile.d/gcc47.sh -[ -f /etc/profile.d/binutils222.sh ] && . /etc/profile.d/binutils222.sh - -set -e - -pushd $(dirname $0) &>/dev/null -SCRIPTDIR=$PWD -popd &>/dev/null -FULLTOKUDBDIR=$(dirname $SCRIPTDIR) -TOKUDBDIR=$(basename $FULLTOKUDBDIR) -BRANCHDIR=$(basename $(dirname $FULLTOKUDBDIR)) - -function make_tokudb_name() { - local tokudb_dir=$1 - local tokudb=$2 - if [ $tokudb_dir = "toku" ] ; then - echo $tokudb - else - echo $(echo $tokudb_dir-$tokudb | tr / -) - fi -} -tokudb_name=$(make_tokudb_name $BRANCHDIR $TOKUDBDIR) -export TOKUDB_NAME=$tokudb_name - -productname=$tokudb_name - -ftcc=gcc47 -ftcxx=g++47 -BDBVERSION=5.3 -ctest_model=Nightly -generator="Unix Makefiles" -toku_svnroot=$FULLTOKUDBDIR/../.. -commit=1 -while [ $# -gt 0 ] ; do - arg=$1; shift - if [[ $arg =~ --(.*)=(.*) ]] ; then - eval ${BASH_REMATCH[1]}=${BASH_REMATCH[2]} - else - usage; exit 1; - fi -done - -if [[ ! ( ( $ctest_model = Nightly ) || ( $ctest_model = Experimental ) || ( $ctest_model = Continuous ) ) ]]; then - echo "--ctest_model must be Nightly, Experimental, or Continuous" - usage -fi - -BDBDIR=/usr/local/BerkeleyDB.$BDBVERSION -if [ -d $BDBDIR ] ; then - CMAKE_PREFIX_PATH=$BDBDIR:$CMAKE_PREFIX_PATH - export CMAKE_PREFIX_PATH -fi - -# delete some characters that cygwin and osx have trouble with -function sanitize() { - tr -d '[/:\\\\()]' -} - -# gather some info -svnserver=https://svn.tokutek.com/tokudb -nodename=$(uname -n) -system=$(uname -s | tr '[:upper:]' '[:lower:]' | sanitize) -release=$(uname -r | sanitize) -arch=$(uname -m | sanitize) -date=$(date +%Y%m%d) -ncpus=$([ -f /proc/cpuinfo ] && (grep bogomips /proc/cpuinfo | wc -l) || sysctl -n hw.ncpu) -njobs=$(if [ $ncpus -gt 8 ] ; then echo "$ncpus / 3" | bc ; else echo "$ncpus" ; fi) - -GCCVERSION=$($ftcc --version|head -1|cut -f3 -d" ") -export GCCVERSION -CC=$ftcc -export CC -CXX=$ftcxx -export CXX - -function retry() { - local cmd - local retries - local exitcode - cmd=$* - let retries=0 - while [ $retries -le 10 ] ; do - echo `date` $cmd - bash -c "$cmd" - exitcode=$? - echo `date` $cmd $exitcode $retries - let retries=retries+1 - if [ $exitcode -eq 0 ] ; then break; fi - sleep 10 - done - test $exitcode = 0 -} - -if [[ $commit -eq 1 ]]; then - svnbase=~/svn.build - if [ ! -d $svnbase ] ; then mkdir $svnbase ; fi - - # checkout the build dir - buildbase=$svnbase/tokudb.build - if [ ! -d $buildbase ] ; then - mkdir $buildbase - fi - - # make the build directory, possibly on multiple machines simultaneously, there can be only one - builddir=$buildbase/$date - pushd $buildbase - set +e - svn mkdir $svnserver/tokudb.build/$date -m "" || true - retry svn co -q $svnserver/tokudb.build/$date - if [ ! -d $date ] ; then - exit 1 - fi - set -e - popd - - tracefilepfx=$builddir/$productname+$ftcc-$GCCVERSION+bdb-$BDBVERSION+$nodename+$system+$release+$arch -else - tracefilepfx=$FULLTOKUDBDIR/test-trace -fi - -function getsysinfo() { - tracefile=$1; shift - set +e - uname -a >$tracefile 2>&1 - ulimit -a >>$tracefile 2>&1 - cmake --version >>$tracefile 2>&1 - $ftcc -v >>$tracefile 2>&1 - $ftcxx -v >>$tracefile 2>&1 - valgrind --version >>$tracefile 2>&1 - cat /etc/issue >>$tracefile 2>&1 - cat /proc/version >>$tracefile 2>&1 - cat /proc/cpuinfo >>$tracefile 2>&1 - env >>$tracefile 2>&1 - set -e -} - -function get_latest_svn_revision() { - svn info $1 | awk -v ORS="" '/Last Changed Rev:/ { print $4 }' -} - -function my_mktemp() { - mktemp /tmp/$(whoami).$1.XXXXXXXXXX -} - -yesterday="$(date -u -d yesterday +%F) 03:59:00 +0000" - -if [[ $commit -eq 1 ]]; then - # hack to make long tests run nightly but not when run in experimental mode - longtests=ON -else - longtests=OFF -fi -################################################################################ -## run normal and valgrind on optimized build -resultsdir=$tracefilepfx-Release -mkdir $resultsdir -tracefile=$tracefilepfx-Release/trace - -getsysinfo $tracefile - -mkdir -p $FULLTOKUDBDIR/opt >/dev/null 2>&1 -cd $FULLTOKUDBDIR/opt -cmake \ - -D CMAKE_BUILD_TYPE=Release \ - -D USE_VALGRIND=ON \ - -D USE_BDB=ON \ - -D RUN_LONG_TESTS=$longtests \ - -D USE_CTAGS=OFF \ - -D USE_GTAGS=OFF \ - -D USE_ETAGS=OFF \ - -D USE_CSCOPE=OFF \ - -D TOKU_SVNROOT="$toku_svnroot" \ - -G "$generator" \ - .. 2>&1 | tee -a $tracefile -cmake --system-information $resultsdir/sysinfo -make clean -# update to yesterday exactly just before ctest does nightly update -svn up -q -r "{$yesterday}" .. -set +e -ctest -j$njobs \ - -D ${ctest_model}Start \ - -D ${ctest_model}Update \ - -D ${ctest_model}Configure \ - -D ${ctest_model}Build \ - -D ${ctest_model}Test \ - -E '/drd|/helgrind' \ - 2>&1 | tee -a $tracefile -ctest -j$njobs \ - -D ${ctest_model}MemCheck \ - -E '^ydb/.*\.bdb$|test1426.tdb|/drd|/helgrind' \ - 2>&1 | tee -a $tracefile -set -e - -cp $tracefile notes.txt -set +e -ctest -D ${ctest_model}Submit -A notes.txt \ - 2>&1 | tee -a $tracefile -set -e -rm notes.txt - -tag=$(head -n1 Testing/TAG) -cp -r Testing/$tag $resultsdir -if [[ $commit -eq 1 ]]; then - cf=$(my_mktemp ftresult) - cat "$resultsdir/trace" | awk ' -BEGIN { - errs=0; - look=0; - ORS=" "; -} -/[0-9]+% tests passed, [0-9]+ tests failed out of [0-9]+/ { - fail=$4; - total=$9; - pass=total-fail; -} -/^Memory checking results:/ { - look=1; - FS=" - "; -} -/Errors while running CTest/ { - look=0; - FS=" "; -} -{ - if (look) { - errs+=$2; - } -} -END { - print "ERRORS=" errs; - if (fail>0) { - print "FAIL=" fail - } - print "PASS=" pass -}' >"$cf" - get_latest_svn_revision $FULLTOKUDBDIR >>"$cf" - echo -n " " >>"$cf" - cat "$resultsdir/trace" | awk ' -BEGIN { - FS=": "; -} -/Build name/ { - print $2; - exit -}' >>"$cf" - (echo; echo) >>"$cf" - cat "$resultsdir/trace" | awk ' -BEGIN { - printit=0 -} -/[0-9]*\% tests passed, [0-9]* tests failed out of [0-9]*/ { printit=1 } -/Memory check project/ { printit=0 } -/^ Site:/ { printit=0 } -{ - if (printit) { - print $0 - } -}' >>"$cf" - svn add $resultsdir - svn commit -F "$cf" $resultsdir - rm $cf -fi - -################################################################################ -## run drd tests on debug build -resultsdir=$tracefilepfx-Debug -mkdir $resultsdir -tracefile=$tracefilepfx-Debug/trace - -getsysinfo $tracefile - -mkdir -p $FULLTOKUDBDIR/dbg >/dev/null 2>&1 -cd $FULLTOKUDBDIR/dbg -cmake \ - -D CMAKE_BUILD_TYPE=Debug \ - -D USE_VALGRIND=ON \ - -D USE_BDB=OFF \ - -D RUN_LONG_TESTS=$longtests \ - -D USE_CTAGS=OFF \ - -D USE_GTAGS=OFF \ - -D USE_ETAGS=OFF \ - -D USE_CSCOPE=OFF \ - -D CMAKE_C_FLAGS_DEBUG="-O1" \ - -D CMAKE_CXX_FLAGS_DEBUG="-O1" \ - -D TOKU_SVNROOT="$toku_svnroot" \ - -G "$generator" \ - .. 2>&1 | tee -a $tracefile -cmake --system-information $resultsdir/sysinfo -make clean -# update to yesterday exactly just before ctest does nightly update -svn up -q -r "{$yesterday}" .. -set +e -ctest -j$njobs \ - -D ${ctest_model}Start \ - -D ${ctest_model}Update \ - -D ${ctest_model}Configure \ - -D ${ctest_model}Build \ - -D ${ctest_model}Test \ - -R '/drd|/helgrind' \ - 2>&1 | tee -a $tracefile -set -e - -cp $tracefile notes.txt -set +e -ctest -D ${ctest_model}Submit -A notes.txt \ - 2>&1 | tee -a $tracefile -set -e -rm notes.txt - -tag=$(head -n1 Testing/TAG) -cp -r Testing/$tag $resultsdir -if [[ $commit -eq 1 ]]; then - cf=$(my_mktemp ftresult) - cat "$resultsdir/trace" | awk ' -BEGIN { - ORS=" "; -} -/[0-9]+% tests passed, [0-9]+ tests failed out of [0-9]+/ { - fail=$4; - total=$9; - pass=total-fail; -} -END { - if (fail>0) { - print "FAIL=" fail - } - print "PASS=" pass -}' >"$cf" - get_latest_svn_revision $FULLTOKUDBDIR >>"$cf" - echo -n " " >>"$cf" - cat "$resultsdir/trace" | awk ' -BEGIN { - FS=": "; -} -/Build name/ { - print $2; - exit -}' >>"$cf" - (echo; echo) >>"$cf" - cat "$resultsdir/trace" | awk ' -BEGIN { - printit=0 -} -/[0-9]*\% tests passed, [0-9]* tests failed out of [0-9]*/ { printit=1 } -/^ Site:/ { printit=0 } -{ - if (printit) { - print $0 - } -}' >>"$cf" - svn add $resultsdir - svn commit -F "$cf" $resultsdir - rm $cf -fi - -################################################################################ -## run gcov on debug build -resultsdir=$tracefilepfx-Coverage -mkdir $resultsdir -tracefile=$tracefilepfx-Coverage/trace - -getsysinfo $tracefile - -mkdir -p $FULLTOKUDBDIR/cov >/dev/null 2>&1 -cd $FULLTOKUDBDIR/cov -cmake \ - -D CMAKE_BUILD_TYPE=Debug \ - -D BUILD_TESTING=ON \ - -D USE_GCOV=ON \ - -D USE_BDB=OFF \ - -D RUN_LONG_TESTS=$longtests \ - -D USE_CTAGS=OFF \ - -D USE_GTAGS=OFF \ - -D USE_ETAGS=OFF \ - -D USE_CSCOPE=OFF \ - -D TOKU_SVNROOT="$toku_svnroot" \ - -G "$generator" \ - .. 2>&1 | tee -a $tracefile -cmake --system-information $resultsdir/sysinfo -make clean -# update to yesterday exactly just before ctest does nightly update -svn up -q -r "{$yesterday}" .. -set +e -ctest -j$njobs \ - -D ${ctest_model}Start \ - -D ${ctest_model}Update \ - -D ${ctest_model}Configure \ - -D ${ctest_model}Build \ - -D ${ctest_model}Test \ - -D ${ctest_model}Coverage \ - 2>&1 | tee -a $tracefile -set -e - -cp $tracefile notes.txt -set +e -ctest -D ${ctest_model}Submit -A notes.txt \ - 2>&1 | tee -a $tracefile -set -e -rm notes.txt - -tag=$(head -n1 Testing/TAG) -cp -r Testing/$tag $resultsdir -if [[ $commit -eq 1 ]]; then - cf=$(my_mktemp ftresult) - cat "$resultsdir/trace" | awk ' -BEGIN { - ORS=" "; -} -/Percentage Coverage:/ { - covpct=$3; -} -/[0-9]+% tests passed, [0-9]+ tests failed out of [0-9]+/ { - fail=$4; - total=$9; - pass=total-fail; -} -END { - print "COVERAGE=" covpct - if (fail>0) { - print "FAIL=" fail - } - print "PASS=" pass -}' >"$cf" - get_latest_svn_revision $FULLTOKUDBDIR >>"$cf" - echo -n " " >>"$cf" - cat "$resultsdir/trace" | awk ' -BEGIN { - FS=": "; -} -/Build name/ { - print $2; - exit -}' >>"$cf" - (echo; echo) >>"$cf" - cat "$resultsdir/trace" | awk ' -BEGIN { - printit=0 -} -/[0-9]*\% tests passed, [0-9]* tests failed out of [0-9]*/ { printit=1 } -/^ Site:/ { printit=0 } -{ - if (printit) { - print $0 - } -}' >>"$cf" - svn add $resultsdir - svn commit -F "$cf" $resultsdir - rm $cf -fi - -exit 0 diff --git a/storage/tokudb/ft-index/scripts/run.fractal.tree.tests.cmake b/storage/tokudb/ft-index/scripts/run.fractal.tree.tests.cmake index f695699255d..64d52a56735 100644 --- a/storage/tokudb/ft-index/scripts/run.fractal.tree.tests.cmake +++ b/storage/tokudb/ft-index/scripts/run.fractal.tree.tests.cmake @@ -78,26 +78,20 @@ list(APPEND CTEST_NOTES_FILES ) set(all_opts - -DBDBDIR=/usr/local/BerkeleyDB.5.3 -DBUILD_TESTING=ON -DUSE_CILK=OFF ) set(rel_opts ${all_opts} -DCMAKE_BUILD_TYPE=Release - -DINTEL_CC=ON - -DUSE_BDB=ON ) set(dbg_opts ${all_opts} -DCMAKE_BUILD_TYPE=Debug - -DINTEL_CC=ON - -DUSE_BDB=ON ) set(cov_opts ${all_opts} -DCMAKE_BUILD_TYPE=Debug - -DINTEL_CC=OFF -DUSE_GCOV=ON ) diff --git a/storage/tokudb/ft-index/scripts/run.fractal.tree.tests.icc.bash b/storage/tokudb/ft-index/scripts/run.fractal.tree.tests.icc.bash deleted file mode 100755 index 2c62504619e..00000000000 --- a/storage/tokudb/ft-index/scripts/run.fractal.tree.tests.icc.bash +++ /dev/null @@ -1,2 +0,0 @@ -#!/usr/bin/env bash -run.fractal.tree.tests.bash --ftcc=icc $* diff --git a/storage/tokudb/ft-index/scripts/run.fractal.tree.tests.now.bash b/storage/tokudb/ft-index/scripts/run.fractal.tree.tests.now.bash deleted file mode 100755 index 661548f5ada..00000000000 --- a/storage/tokudb/ft-index/scripts/run.fractal.tree.tests.now.bash +++ /dev/null @@ -1,7 +0,0 @@ -#!/bin/bash - -pushd $(dirname $0) &>/dev/null -SCRIPTDIR=$PWD -popd &>/dev/null - -exec $SCRIPTDIR/run.fractal.tree.tests.bash --ctest_model=Experimental --commit=0 "$@" diff --git a/storage/tokudb/ft-index/scripts/run.loader.stress.bash b/storage/tokudb/ft-index/scripts/run.loader.stress.bash deleted file mode 100755 index 1d4232c1bb3..00000000000 --- a/storage/tokudb/ft-index/scripts/run.loader.stress.bash +++ /dev/null @@ -1,164 +0,0 @@ -#!/usr/bin/env bash - -function usage() { - echo "run the loader verify test" - echo "[--rows=$rows]" - echo "[--dictionaries=$dictionaries]" - echo "[--ft_loader=$ft_loader]" - echo "[--tokudb=$tokudb]" - echo "[--branch=$branch]" - echo "[--revision=$revision]" - echo "[--suffix=$suffix]" - echo "[--commit=$commit]" -} - -function retry() { - local cmd - local retries - local exitcode - cmd=$* - let retries=0 - while [ $retries -le 10 ] ; do - echo `date` $cmd - bash -c "$cmd" - exitcode=$? - echo `date` $cmd $exitcode $retries - let retries=retries+1 - if [ $exitcode -eq 0 ] ; then break; fi - sleep 10 - done - test $exitcode = 0 -} - -rows=100000000 -dictionaries=3 -ft_loader=cilk -tokudb=tokudb -branch=. -revision=0 -suffix=. -commit=0 -svnserver=https://svn.tokutek.com/tokudb -basedir=~/svn.build -builddir=$basedir/mysql.build -system=`uname -s | tr [:upper:] [:lower:]` -arch=`uname -m | tr [:upper:] [:lower:]` -myhost=`hostname` -instancetype="" -ftcc=gcc -have_cilk=0 - -# parse the command line -while [ $# -gt 0 ] ; do - arg=$1; shift - if [[ $arg =~ --(.*)=(.*) ]] ; then - eval ${BASH_REMATCH[1]}=${BASH_REMATCH[2]} - else - usage; exit 1 - fi -done - -# require a revision -if [ $revision -eq 0 ] ; then - exit 1 -fi - -# build -if [ $ftcc = icc ] ; then - d=/opt/intel/bin - if [ -d $d ] ; then - export PATH=$d:$PATH - . $d/compilervars.sh intel64 - fi - d=/opt/intel/cilkutil/bin - if [ -d $d ] ; then - export PATH=$d:$PATH - fi -fi - -# setup the branchrevision string -if [ $branch = "." ] ; then - branchrevision=$revision -else - branchrevision=`basename $branch`-$revision -fi -if [ $suffix != "." ] ; then - branchrevision=$branchrevision-$suffix -fi - -ftccversion=$($ftcc --version|head -1|cut -f3 -d" ") - -# goto the base directory -if [ ! -d $basedir ] ; then mkdir $basedir; fi - -pushd $basedir - -# update the build directory -if [ ! -d $builddir ] ; then mkdir $builddir; fi - -date=`date +%Y%m%d` -testresultsdir=$builddir/$date -pushd $builddir - while [ ! -d $date ] ; do - svn mkdir $svnserver/mysql.build/$date -m "" - svn checkout $svnserver/mysql.build/$date - if [ $? -ne 0 ] ; then rm -rf $date; fi - done -popd - -testresult="PASS" -runfile=$testresultsdir/loader-stress-$rows-$dictionaries-$tokudb-$branchrevision-$ftcc-$ftccversion-$system-$arch-$myhost -if [ "$instancetype" != "" ] ; then runfilefile=$runfile-$instancetype; fi -rm -f $runfile - -# checkout the code -if [ -d loader-stress-$branchrevision ] ; then rm -rf loader-stress-$branchrevision; fi -mkdir loader-stress-$branchrevision - -if [ $branch = "." ] ; then branch=toku; fi - -retry svn export -r $revision -q $svnserver/$branch/$tokudb loader-stress-$branchrevision/$tokudb -exitcode=$? -if [ $exitcode != 0 ] ; then - testresult="FAIL" -fi - -if [ $testresult = "PASS" ] ; then - pushd loader-stress-$branchrevision/$tokudb - echo `date` make release -s CC=$ftcc HAVE_CILK=$have_cilk FTLOADER=$ft_loader >>$runfile - make -s release CC=$ftcc HAVE_CILK=$have_cilk FTLOADER=$ft_loader >>$runfile 2>&1 - exitcode=$? - echo `date` complete $exitcode >>$runfile - if [ $exitcode != 0 ] ; then testresult="FAIL"; fi - popd -fi -if [ $testresult = "PASS" ] ; then - pushd loader-stress-$branchrevision/$tokudb/src/tests - echo `date` make loader-stress-test.tdb CC=$ftcc HAVE_CILK=$have_cilk >>$runfile - make loader-stress-test.tdb -s CC=$ftcc HAVE_CILK=$have_cilk >>$runfile 2>&1 - exitcode=$? - echo `date` complete $exitcode >>$runfile - if [ $exitcode != 0 ] ; then testresult="FAIL"; fi - popd -fi - -# run -if [ $testresult = "PASS" ] ; then - pushd loader-stress-$branchrevision/$tokudb/src/tests - echo `date` ./loader-stress-test.tdb -v -r $rows -d $dictionaries -c >>$runfile - ./loader-stress-test.tdb -v -r $rows -d $dictionaries -c >>$runfile 2>&1 - exitcode=$? - echo `date` complete $exitcode >>$runfile - if [ $exitcode != 0 ] ; then testresult="FAIL"; fi - popd -fi - -if [ $commit != 0 ] ; then - svn add $runfile - retry svn commit -m \"$testresult loader stress $rows $dictionaries $tokudb $branchrevision $ftcc $ftccversion $system $arch $myhost\" $runfile -fi - -popd - -if [ $testresult = "PASS" ] ; then exitcode=0; else exitcode=1; fi -exit $exitcode diff --git a/storage/tokudb/ft-index/scripts/run.stress-tests.bash b/storage/tokudb/ft-index/scripts/run.stress-tests.bash deleted file mode 100755 index b3d0e197d7b..00000000000 --- a/storage/tokudb/ft-index/scripts/run.stress-tests.bash +++ /dev/null @@ -1,332 +0,0 @@ -#!/bin/bash -# $Id$ - -DOC=<<EOF - - PARAMETERS - - table size: small (2 000), medium (200 000), large (50 000 000) - - cachetable size: small (num_elements * 50), large (1 000 000 000) - - update threads: 1, random number <= 20 - - point query threads: 1, random number <= 20 - - recover-test_stress1, recover-test_stress2 - - DATA - - currently running tests - - log of success/failure ("./recover-test_stress1.tdb --num_elements blah blah blah PASS") - - if failed: - - parameters - - corefile - - stdout/stderr - - data directory - -EOF - -set -e - -. /opt/intel/bin/compilervars.sh intel64 - -scriptname=$(basename "$0") -toku_toplevel=$(dirname $(dirname $(readlink -f "$PWD/$0"))) -log=/tmp/run.stress-tests.log -savedir=/tmp/run.stress-tests.failures - -usage() { - echo "Usage: $scriptname" 1>&2 - echo " [--toku_toplevel=<dir>]" 1>&2 - echo " [--log=<file>]" 1>&2 - echo " [--savedir=<dir>]" 1>&2 -} - -# parse the command line -while [ $# -gt 0 ] ; do - arg=$1; shift - if [[ $arg =~ --(.*)=(.*) ]] ; then - ok=no - for opt in toku_toplevel log savedir - do - if [[ ${BASH_REMATCH[1]} = $opt ]] - then - ok=yes - fi - done - if [[ $ok = no ]] - then - usage; exit 1 - fi - eval ${BASH_REMATCH[1]}=${BASH_REMATCH[2]} - else - usage; exit 1 - fi -done - -src_tests="${toku_toplevel}/src/tests" -testnames=(test_stress1.tdb \ - test_stress5.tdb \ - test_stress6.tdb) -recover_testnames=(recover-test_stress1.tdb \ - recover-test_stress2.tdb \ - recover-test_stress3.tdb) - -save_failure() { - dir="$1"; shift - out="$1"; shift - envdir="$1"; shift - rev=$1; shift - exec="$1"; shift - table_size=$1; shift - cachetable_size=$1; shift - num_ptquery=$1; shift - num_update=$1; shift - phase=$1; shift - dest="${dir}/${exec}-${table_size}-${cachetable_size}-${num_ptquery}-${num_update}-${phase}-${rev}-$$" - mkdir -p "$dest" - mv $out "${dest}/output.txt" - mv core* "${dest}/" - mv $envdir "${dest}/" -} - -running=no - -run_test() { - rev=$1; shift - exec="$1"; shift - table_size="$1"; shift - cachetable_size="$1"; shift - num_ptquery="$1"; shift - num_update="$1"; shift - mylog="$1"; shift - mysavedir="$1"; shift - - rundir=$(mktemp -d ./rundir.XXXXXXXX) - tmplog=$(mktemp) - - ulimit -c unlimited - t0="$(date)" - t1="" - t2="" - envdir="../${exec}-${table_size}-${cachetable_size}-${num_ptquery}-${num_update}-$$.dir" - cd $rundir - if LD_LIBRARY_PATH=../../../lib:$LD_LIBRARY_PATH \ - ../$exec -v --only_create --num_seconds 600 --envdir "$envdir" \ - --num_elements $table_size \ - --cachetable_size $cachetable_size &> $tmplog - then - rm -f $tmplog - t1="$(date)" - if LD_LIBRARY_PATH=../../../lib:$LD_LIBRARY_PATH \ - ../$exec -v --only_stress --num_seconds 600 --no-crash_on_update_failure --envdir "$envdir" \ - --num_elements $table_size \ - --cachetable_size $cachetable_size \ - --num_ptquery_threads $num_ptquery \ - --num_update_threads $num_update &> $tmplog - then - rm -f $tmplog - t2="$(date)" - echo "\"$exec\",$rev,$table_size,$cachetable_size,$num_ptquery,$num_update,$t0,$t1,$t2,PASS" | tee -a "$mylog" - else - save_failure "$mysavedir" $tmplog $envdir $rev $exec $table_size $cachetable_size $num_ptquery $num_update stress - echo "\"$exec\",$rev,$table_size,$cachetable_size,$num_ptquery,$num_update,$t0,$t1,$t2,FAIL" | tee -a "$mylog" - fi - else - save_failure "$mysavedir" $tmplog $envdir $rev $exec $table_size $cachetable_size $num_ptquery $num_update create - echo "\"$exec\",$rev,$table_size,$cachetable_size,$num_ptquery,$num_update,$t0,$t1,$t2,FAIL" | tee -a "$mylog" - fi - cd .. - rm -rf $rundir "$envdir" -} - -loop_test() { - rev=$1; shift - exec="$1"; shift - table_size="$1"; shift - cachetable_size="$1"; shift - mylog="$1"; shift - mysavedir="$1"; shift - - ptquery_rand=0 - update_rand=0 - while [[ $running = "yes" ]] - do - num_ptquery=1 - num_update=1 - if [[ $ptquery_rand -gt 1 ]] - then - (( num_ptquery = $RANDOM % 16 )) - fi - if [[ $update_rand -gt 0 ]] - then - (( num_update = $RANDOM % 16 )) - fi - (( ptquery_rand = (ptquery_rand + 1) % 4 )) - (( update_rand = (update_rand + 1) % 2 )) - run_test $rev $exec $table_size $cachetable_size $num_ptquery $num_update $mylog $mysavedir - done -} - -run_recover_test() { - rev=$1; shift - exec="$1"; shift - table_size="$1"; shift - cachetable_size="$1"; shift - num_ptquery="$1"; shift - num_update="$1"; shift - mylog="$1"; shift - mysavedir="$1"; shift - - rundir=$(mktemp -d ./rundir.XXXXXXXX) - tmplog=$(mktemp) - - ulimit -c unlimited - t0="$(date)" - t1="" - t2="" - envdir="../${exec}-${table_size}-${cachetable_size}-${num_ptquery}-${num_update}-$$.dir" - cd $rundir - if ! LD_LIBRARY_PATH=../../../lib:$LD_LIBRARY_PATH \ - ../$exec -v --test --num_seconds 600 --no-crash_on_update_failure --envdir "$envdir" \ - --num_elements $table_size \ - --cachetable_size $cachetable_size \ - --num_ptquery_threads $num_ptquery \ - --num_update_threads $num_update &> $tmplog - then - rm -f $tmplog - t1="$(date)" - if LD_LIBRARY_PATH=../../../lib:$LD_LIBRARY_PATH \ - ../$exec -v --recover --envdir "$envdir" \ - --num_elements $table_size \ - --cachetable_size $cachetable_size &> $tmplog - then - rm -f $tmplog - t2="$(date)" - echo "\"$exec\",$rev,$table_size,$cachetable_size,$num_ptquery,$num_update,$t0,$t1,$t2,PASS" | tee -a "$mylog" - else - save_failure "$mysavedir" $tmplog $envdir $rev $exec $table_size $cachetable_size $num_ptquery $num_update recover - echo "\"$exec\",$rev,$table_size,$cachetable_size,$num_ptquery,$num_update,$t0,$t1,$t2,FAIL" | tee -a "$mylog" - fi - else - save_failure "$mysavedir" $tmplog $envdir $rev $exec $table_size $cachetable_size $num_ptquery $num_update test - echo "\"$exec\",$rev,$table_size,$cachetable_size,$num_ptquery,$num_update,$t0,$t1,$t2,FAIL" | tee -a "$mylog" - fi - cd .. - rm -rf $rundir "$envdir" -} - -loop_recover_test() { - rev=$1; shift - exec="$1"; shift - table_size="$1"; shift - cachetable_size="$1"; shift - mylog="$1"; shift - mysavedir="$1"; shift - - ptquery_rand=0 - update_rand=0 - while [[ $running = "yes" ]] - do - num_ptquery=1 - num_update=1 - if [[ $ptquery_rand -gt 1 ]] - then - (( num_ptquery = $RANDOM % 16 )) - fi - if [[ $update_rand -gt 0 ]] - then - (( num_update = $RANDOM % 16 )) - fi - (( ptquery_rand = (ptquery_rand + 1) % 4 )) - (( update_rand = (update_rand + 1) % 2 )) - run_recover_test $rev $exec $table_size $cachetable_size $num_ptquery $num_update $mylog $mysavedir - done -} - -declare -a pids=(0) -i=0 - -savepid() { - pids[$i]=$1 - (( i = i + 1 )) -} - -killchildren() { - kill ${pids[@]} || true - for exec in ${testnames[@]} ${recover_testnames[@]} - do - pkill -f $exec || true - done -} - -trap killchildren INT TERM EXIT - -mkdir -p $log -mkdir -p $savedir - -while true -do - (cd $toku_toplevel; \ - svn update; \ - make CC=icc DEBUG=0 HAVE_CILK=0 clean fastbuild; \ - make CC=icc DEBUG=0 HAVE_CILK=0 -C src/tests ${testnames[@]} ${recover_testnames[@]}) - - cd $src_tests - - rev=$(svn info ../.. | awk '/Revision/ { print $2 }') - - running=yes - - for exec in ${testnames[@]} - do - for table_size in 2000 200000 50000000 - do - (( small_cachetable = table_size * 50 )) - suffix="${exec}-${table_size}-${small_cachetable}-$$" - touch "${log}/${suffix}" - loop_test $rev $exec $table_size $small_cachetable "${log}/${suffix}" "${savedir}/${suffix}" & savepid $! - - suffix="${exec}-${table_size}-1000000000-$$" - touch "${log}/${suffix}" - loop_test $rev $exec $table_size 1000000000 "${log}/${suffix}" "${savedir}/${suffix}" & savepid $! - done - done - - for exec in ${recover_testnames[@]} - do - for table_size in 2000 200000 50000000 - do - (( small_cachetable = table_size * 50 )) - suffix="${exec}-${table_size}-${small_cachetable}-$$" - touch "${log}/${suffix}" - loop_recover_test $rev $exec $table_size $small_cachetable "${log}/${suffix}" "${savedir}/${suffix}" & savepid $! - - suffix="${exec}-${table_size}-1000000000-$$" - touch "${log}/${suffix}" - loop_recover_test $rev $exec $table_size 1000000000 "${log}/${suffix}" "${savedir}/${suffix}" & savepid $! - done - done - - sleep 1d - - running=no - - killchildren - - wait ${pids[@]} || true - - idx=0 - for pid in ${pids[@]} - do - pids[$idx]=0 - (( idx = idx + 1 )) - done -done diff --git a/storage/tokudb/ft-index/scripts/run.stress-tests.py b/storage/tokudb/ft-index/scripts/run.stress-tests.py index fbbf5ee6472..62edbab8f3c 100755 --- a/storage/tokudb/ft-index/scripts/run.stress-tests.py +++ b/storage/tokudb/ft-index/scripts/run.stress-tests.py @@ -552,7 +552,6 @@ def rebuild(tokudb, builddir, tokudb_data, cc, cxx, tests): newenv['CXX'] = cxx r = call(['cmake', '-DCMAKE_BUILD_TYPE=Debug', - '-DUSE_BDB=OFF', '-DUSE_GTAGS=OFF', '-DUSE_CTAGS=OFF', '-DUSE_ETAGS=OFF', @@ -735,6 +734,7 @@ if __name__ == '__main__': 'test_stress6.tdb', 'test_stress7.tdb', 'test_stress_hot_indexing.tdb', + 'test_stress_with_verify.tdb', 'test_stress_openclose.tdb'] default_recover_testnames = ['recover-test_stress1.tdb', 'recover-test_stress2.tdb', @@ -766,8 +766,8 @@ if __name__ == '__main__': help="skip the tests that don't involve upgrade [default=False]") upgrade_group.add_option('--double_upgrade', action='store_true', dest='double_upgrade', default=False, help='run the upgrade tests twice in a row [default=False]') - upgrade_group.add_option('--add_old_version', action='append', type='choice', dest='old_versions', choices=['4.2.0', '5.0.8', '5.2.7', '6.0.0', '6.1.0', '6.5.1', '6.6.3'], - help='which old versions to use for running the stress tests in upgrade mode. can be specified multiple times [options=4.2.0, 5.0.8, 5.2.7, 6.0.0, 6.1.0, 6.5.1, 6.6.3]') + upgrade_group.add_option('--add_old_version', action='append', type='choice', dest='old_versions', choices=['4.2.0', '5.0.8', '5.2.7', '6.0.0', '6.1.0', '6.5.1', '6.6.3', '7.1.6'], + help='which old versions to use for running the stress tests in upgrade mode. can be specified multiple times [options=4.2.0, 5.0.8, 5.2.7, 6.0.0, 6.1.0, 6.5.1, 6.6.3, 7.1.6]') upgrade_group.add_option('--old_environments_dir', type='string', dest='old_environments_dir', default=('%s/old-stress-test-envs' % default_tokudb_data), help='directory containing old version environments (should contain 5.0.8/, 5.2.7/, etc, and the environments should be in those) [default=../../tokudb.data/stress_environments]') diff --git a/storage/tokudb/ft-index/scripts/tokucilkscreen b/storage/tokudb/ft-index/scripts/tokucilkscreen deleted file mode 100755 index 91a63ec66ef..00000000000 --- a/storage/tokudb/ft-index/scripts/tokucilkscreen +++ /dev/null @@ -1,19 +0,0 @@ -#!/usr/bin/env bash - -# exit 1 if cilkscreen finds errors - -function cleanup() { - if [ "$logfile" != "" ] ; then rm $logfile; logfile=; fi -} - -trap cleanup SIGINT -logfile=$(mktemp /tmp/toku_cilkscreen.XXXXXXXX) -cilkscreen $* 2>$logfile -exitcode=$? -if [ $exitcode = 0 ] ; then - cat $logfile >>/dev/fd/2 - grep "No errors found by Cilkscreen" $logfile >/dev/null 2>&1 - exitcode=$? -fi -rm $logfile -exit $exitcode
\ No newline at end of file diff --git a/storage/tokudb/ft-index/scripts/tokuvalgrind b/storage/tokudb/ft-index/scripts/tokuvalgrind new file mode 100755 index 00000000000..a099a1f2ff9 --- /dev/null +++ b/storage/tokudb/ft-index/scripts/tokuvalgrind @@ -0,0 +1,52 @@ +#!/usr/bin/env bash + +function usage() { + echo "check for valgrind error and set the exit code" +} + +function cleanup() { + if [ "$logfile" != "" ] ; then rm $logfile; fi + exit 1 +} + +args=$* + +logfile= +createlogfile=0 +errorexitcode=1 + +while [ $# -gt 0 ] ; do + arg=$1; shift + if [[ $arg =~ "--" ]] ; then + if [[ $arg =~ --log-file=(.*) ]] ; then + logfile=${BASH_REMATCH[1]} + elif [[ $arg =~ --error-exitcode=(.*) ]] ; then + errorexitcode=${BASH_REMATCH[1]} + fi + else + break + fi +done + +if [ "$logfile" = "" ] ; then + createlogfile=1 + trap cleanup SIGINT + logfile=`mktemp /tmp/$(whoami).tokugrind.XXXXXXXX` + args="--log-file=$logfile $args" +fi + +valgrind $args +exitcode=$? +if [ $exitcode = 0 ] ; then + lines=$(wc -l <$logfile) + if [ $lines -ne 0 ] ; then + exitcode=$errorexitcode + fi +fi + +if [ $createlogfile != 0 ] ; then + cat $logfile >>/dev/stderr + rm $logfile +fi + +exit $exitcode diff --git a/storage/tokudb/ft-index/src/errors.cc b/storage/tokudb/ft-index/src/errors.cc index 4101b372a38..fa1227b25cc 100644 --- a/storage/tokudb/ft-index/src/errors.cc +++ b/storage/tokudb/ft-index/src/errors.cc @@ -28,7 +28,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/indexer-internal.h b/storage/tokudb/ft-index/src/indexer-internal.h index a3f1f96f096..fd648a88c8f 100644 --- a/storage/tokudb/ft-index/src/indexer-internal.h +++ b/storage/tokudb/ft-index/src/indexer-internal.h @@ -28,7 +28,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -89,10 +89,9 @@ PATENT RIGHTS GRANT: #ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it." #ident "$Id$" -#ifndef TOKU_INDEXER_INTERNAL_H -#define TOKU_INDEXER_INTERNAL_H +#pragma once -#include <ft/txn_state.h> +#include <ft/txn/txn_state.h> #include <toku_pthread.h> // the indexer_commit_keys is an ordered set of keys described by a DBT in the keys array. @@ -168,5 +167,3 @@ void indexer_undo_do_init(DB_INDEXER *indexer); void indexer_undo_do_destroy(DB_INDEXER *indexer); int indexer_undo_do(DB_INDEXER *indexer, DB *hotdb, struct ule_prov_info *prov_info, DBT_ARRAY *hot_keys, DBT_ARRAY *hot_vals); - -#endif diff --git a/storage/tokudb/ft-index/src/indexer-undo-do.cc b/storage/tokudb/ft-index/src/indexer-undo-do.cc index 559dfef7291..52489fb7825 100644 --- a/storage/tokudb/ft-index/src/indexer-undo-do.cc +++ b/storage/tokudb/ft-index/src/indexer-undo-do.cc @@ -28,7 +28,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -96,13 +96,12 @@ PATENT RIGHTS GRANT: #include <string.h> #include <ft/le-cursor.h> -#include <ft/tokuconst.h> #include <ft/ft-ops.h> #include <ft/leafentry.h> #include <ft/ule.h> -#include <ft/xids.h> -#include <ft/txn_manager.h> -#include <ft/checkpoint.h> +#include <ft/txn/txn_manager.h> +#include <ft/txn/xids.h> +#include <ft/cachetable/checkpoint.h> #include "ydb-internal.h" #include "ydb_row_lock.h" @@ -199,7 +198,7 @@ indexer_undo_do_committed(DB_INDEXER *indexer, DB *hotdb, struct ule_prov_info * ULEHANDLE ule = prov_info->ule; // init the xids to the root xid - XIDS xids = xids_get_root_xids(); + XIDS xids = toku_xids_get_root_xids(); // scan the committed stack from bottom to top uint32_t num_committed = ule_get_num_committed(ule); @@ -280,7 +279,7 @@ indexer_undo_do_committed(DB_INDEXER *indexer, DB *hotdb, struct ule_prov_info * break; } - xids_destroy(&xids); + toku_xids_destroy(&xids); return result; } @@ -312,7 +311,7 @@ indexer_undo_do_provisional(DB_INDEXER *indexer, DB *hotdb, struct ule_prov_info ULEHANDLE ule = prov_info->ule; // init the xids to the root xid - XIDS xids = xids_get_root_xids(); + XIDS xids = toku_xids_get_root_xids(); uint32_t num_provisional = prov_info->num_provisional; uint32_t num_committed = prov_info->num_committed; @@ -472,7 +471,7 @@ indexer_undo_do_provisional(DB_INDEXER *indexer, DB *hotdb, struct ule_prov_info // then this will need to be handled below exit release_txns(ule, prov_states, prov_txns, indexer); exit: - xids_destroy(&xids); + toku_xids_destroy(&xids); return result; } @@ -496,16 +495,16 @@ static int indexer_set_xid(DB_INDEXER *UU(indexer), TXNID this_xid, XIDS *xids_result) { int result = 0; XIDS old_xids = *xids_result; - XIDS new_xids = xids_get_root_xids(); + XIDS new_xids = toku_xids_get_root_xids(); if (this_xid != TXNID_NONE) { XIDS child_xids; - result = xids_create_child(new_xids, &child_xids, this_xid); - xids_destroy(&new_xids); + result = toku_xids_create_child(new_xids, &child_xids, this_xid); + toku_xids_destroy(&new_xids); if (result == 0) new_xids = child_xids; } if (result == 0) { - xids_destroy(&old_xids); + toku_xids_destroy(&old_xids); *xids_result = new_xids; } @@ -517,9 +516,9 @@ static int indexer_append_xid(DB_INDEXER *UU(indexer), TXNID xid, XIDS *xids_result) { XIDS old_xids = *xids_result; XIDS new_xids; - int result = xids_create_child(old_xids, &new_xids, xid); + int result = toku_xids_create_child(old_xids, &new_xids, xid); if (result == 0) { - xids_destroy(&old_xids); + toku_xids_destroy(&old_xids); *xids_result = new_xids; } return result; @@ -682,7 +681,7 @@ indexer_ft_insert_committed(DB_INDEXER *indexer, DB *hotdb, DBT *hotkey, DBT *ho static int indexer_ft_commit(DB_INDEXER *indexer, DB *hotdb, DBT *hotkey, XIDS xids) { int result = 0; - if (xids_get_num_xids(xids) > 0) {// send commit only when not the root xid + if (toku_xids_get_num_xids(xids) > 0) {// send commit only when not the root xid // TEST if (indexer->i->test_commit_any) { result = indexer->i->test_commit_any(indexer, hotdb, hotkey, xids); diff --git a/storage/tokudb/ft-index/src/indexer.cc b/storage/tokudb/ft-index/src/indexer.cc index b91b738d4d4..aa821f67fba 100644 --- a/storage/tokudb/ft-index/src/indexer.cc +++ b/storage/tokudb/ft-index/src/indexer.cc @@ -28,7 +28,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -99,13 +99,12 @@ PATENT RIGHTS GRANT: #include "ydb-internal.h" #include <ft/le-cursor.h> #include "indexer.h" -#include <ft/tokuconst.h> #include <ft/ft-ops.h> #include <ft/leafentry.h> #include <ft/ule.h> -#include <ft/xids.h> -#include <ft/log-internal.h> -#include <ft/checkpoint.h> +#include <ft/txn/xids.h> +#include <ft/logger/log-internal.h> +#include <ft/cachetable/checkpoint.h> #include <portability/toku_atomic.h> #include "loader.h" #include <util/status.h> @@ -118,7 +117,7 @@ PATENT RIGHTS GRANT: static INDEXER_STATUS_S indexer_status; -#define STATUS_INIT(k,c,t,l,inc) TOKUDB_STATUS_INIT(indexer_status, k, c, t, "indexer: " l, inc) +#define STATUS_INIT(k,c,t,l,inc) TOKUFT_STATUS_INIT(indexer_status, k, c, t, "indexer: " l, inc) static void status_init(void) { @@ -233,32 +232,25 @@ toku_indexer_unlock(DB_INDEXER* indexer) { // after grabbing the indexer lock bool toku_indexer_may_insert(DB_INDEXER* indexer, const DBT* key) { - bool retval = false; + bool may_insert = false; toku_mutex_lock(&indexer->i->indexer_estimate_lock); + // if we have no position estimate, we can't tell, so return false - if (indexer->i->position_estimate.data == NULL) { - retval = false; - } - else { - FT_HANDLE ft_handle = indexer->i->src_db->i->ft_handle; - ft_compare_func keycompare = toku_ft_get_bt_compare(ft_handle); - int r = keycompare( - indexer->i->src_db, - &indexer->i->position_estimate, - key - ); + if (indexer->i->position_estimate.data == nullptr) { + may_insert = false; + } else { + DB *db = indexer->i->src_db; + const toku::comparator &cmp = toku_ft_get_comparator(db->i->ft_handle); + int c = cmp(&indexer->i->position_estimate, key); + // if key > position_estimate, then we know the indexer cursor // is past key, and we can safely say that associated values of // key must be inserted into the indexer's db - if (r < 0) { - retval = true; - } - else { - retval = false; - } + may_insert = c < 0; } + toku_mutex_unlock(&indexer->i->indexer_estimate_lock); - return retval; + return may_insert; } void @@ -546,7 +538,7 @@ struct le_cursor_extra { // cachetable pair locks. because no txn can commit on this db, read // the provisional info for the newly read ule. static int -le_cursor_callback(ITEMLEN keylen, bytevec key, ITEMLEN UU(vallen), bytevec val, void *extra, bool lock_only) { +le_cursor_callback(uint32_t keylen, const void *key, uint32_t UU(vallen), const void *val, void *extra, bool lock_only) { if (lock_only || val == NULL) { ; // do nothing if only locking. do nothing if val==NULL, means DB_NOTFOUND } else { @@ -696,7 +688,7 @@ abort_indexer(DB_INDEXER *indexer) { } -// derived from ha_tokudb::estimate_num_rows +// derived from the handlerton's estimate_num_rows() static int update_estimated_rows(DB_INDEXER *indexer) { int error; diff --git a/storage/tokudb/ft-index/src/indexer.h b/storage/tokudb/ft-index/src/indexer.h index 3a7842af989..12625fdc6ea 100644 --- a/storage/tokudb/ft-index/src/indexer.h +++ b/storage/tokudb/ft-index/src/indexer.h @@ -28,7 +28,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -89,9 +89,7 @@ PATENT RIGHTS GRANT: #ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it." #ident "$Id$" -#ifndef TOKU_INDEXER_H -#define TOKU_INDEXER_H - +#pragma once // locking and unlocking functions to synchronize cursor position with // XXX_multiple APIs @@ -178,6 +176,3 @@ typedef struct { } INDEXER_STATUS_S, *INDEXER_STATUS; void toku_indexer_get_status(INDEXER_STATUS s); - - -#endif // TOKU_INDEXER_H diff --git a/storage/tokudb/ft-index/src/loader.cc b/storage/tokudb/ft-index/src/loader.cc index 88db258e1ff..1a6bf718443 100644 --- a/storage/tokudb/ft-index/src/loader.cc +++ b/storage/tokudb/ft-index/src/loader.cc @@ -28,7 +28,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -99,8 +99,8 @@ PATENT RIGHTS GRANT: #include <string.h> #include <ft/ft.h> -#include <ft/ftloader.h> -#include <ft/checkpoint.h> +#include <ft/loader/loader.h> +#include <ft/cachetable/checkpoint.h> #include "ydb-internal.h" #include "ydb_db.h" @@ -119,7 +119,7 @@ enum {MAX_FILE_SIZE=256}; static LOADER_STATUS_S loader_status; -#define STATUS_INIT(k,c,t,l,inc) TOKUDB_STATUS_INIT(loader_status, k, c, t, "loader: " l, inc) +#define STATUS_INIT(k,c,t,l,inc) TOKUFT_STATUS_INIT(loader_status, k, c, t, "loader: " l, inc) static void status_init(void) { @@ -172,6 +172,13 @@ struct __toku_loader_internal { char **inames_in_env; /* [N] inames of new files to be created */ }; +static void free_inames(char **inames, int n) { + for (int i = 0; i < n; i++) { + toku_free(inames[i]); + } + toku_free(inames); +} + /* * free_loader_resources() frees all of the resources associated with * struct __toku_loader_internal @@ -185,16 +192,15 @@ static void free_loader_resources(DB_LOADER *loader) toku_destroy_dbt(&loader->i->err_val); if (loader->i->inames_in_env) { - for (int i=0; i<loader->i->N; i++) { - if (loader->i->inames_in_env[i]) toku_free(loader->i->inames_in_env[i]); - } - toku_free(loader->i->inames_in_env); + free_inames(loader->i->inames_in_env, loader->i->N); + loader->i->inames_in_env = nullptr; } - if (loader->i->temp_file_template) toku_free(loader->i->temp_file_template); + toku_free(loader->i->temp_file_template); + loader->i->temp_file_template = nullptr; // loader->i toku_free(loader->i); - loader->i = NULL; + loader->i = nullptr; } } @@ -245,6 +251,7 @@ toku_loader_create_loader(DB_ENV *env, bool check_empty) { int rval; HANDLE_READ_ONLY_TXN(txn); + DB_TXN *loader_txn = nullptr; *blp = NULL; // set later when created @@ -299,6 +306,13 @@ toku_loader_create_loader(DB_ENV *env, } { + if (env->i->open_flags & DB_INIT_TXN) { + rval = env->txn_begin(env, txn, &loader_txn, 0); + if (rval) { + goto create_exit; + } + } + ft_compare_func compare_functions[N]; for (int i=0; i<N; i++) { compare_functions[i] = env->i->bt_compare; @@ -306,18 +320,21 @@ toku_loader_create_loader(DB_ENV *env, // time to open the big kahuna char **XMALLOC_N(N, new_inames_in_env); + for (int i = 0; i < N; i++) { + new_inames_in_env[i] = nullptr; + } FT_HANDLE *XMALLOC_N(N, fts); for (int i=0; i<N; i++) { fts[i] = dbs[i]->i->ft_handle; } LSN load_lsn; - rval = locked_load_inames(env, txn, N, dbs, new_inames_in_env, &load_lsn, puts_allowed); + rval = locked_load_inames(env, loader_txn, N, dbs, new_inames_in_env, &load_lsn, puts_allowed); if ( rval!=0 ) { - toku_free(new_inames_in_env); + free_inames(new_inames_in_env, N); toku_free(fts); goto create_exit; } - TOKUTXN ttxn = txn ? db_txn_struct_i(txn)->tokutxn : NULL; + TOKUTXN ttxn = loader_txn ? db_txn_struct_i(loader_txn)->tokutxn : NULL; rval = toku_ft_loader_open(&loader->i->ft_loader, env->i->cachetable, env->i->generate_row_for_put, @@ -331,12 +348,14 @@ toku_loader_create_loader(DB_ENV *env, ttxn, puts_allowed, env->get_loader_memory_size(env), - compress_intermediates); + compress_intermediates, + puts_allowed); if ( rval!=0 ) { - toku_free(new_inames_in_env); + free_inames(new_inames_in_env, N); toku_free(fts); goto create_exit; } + loader->i->inames_in_env = new_inames_in_env; toku_free(fts); @@ -348,10 +367,19 @@ toku_loader_create_loader(DB_ENV *env, rval = 0; } + rval = loader_txn->commit(loader_txn, 0); + assert_zero(rval); + loader_txn = nullptr; + rval = 0; } *blp = loader; create_exit: + if (loader_txn) { + int r = loader_txn->abort(loader_txn); + assert_zero(r); + loader_txn = nullptr; + } if (rval == 0) { (void) toku_sync_fetch_and_add(&STATUS_VALUE(LOADER_CREATE), 1); (void) toku_sync_fetch_and_add(&STATUS_VALUE(LOADER_CURRENT), 1); @@ -441,7 +469,7 @@ static void redirect_loader_to_empty_dictionaries(DB_LOADER *loader) { loader->i->dbs, loader->i->db_flags, loader->i->dbt_flags, - 0, + LOADER_DISALLOW_PUTS, false ); lazy_assert_zero(r); diff --git a/storage/tokudb/ft-index/src/loader.h b/storage/tokudb/ft-index/src/loader.h index bd8e85aed93..c709eed3e35 100644 --- a/storage/tokudb/ft-index/src/loader.h +++ b/storage/tokudb/ft-index/src/loader.h @@ -28,7 +28,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -89,8 +89,7 @@ PATENT RIGHTS GRANT: #ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it." #ident "$Id$" -#ifndef TOKU_LOADER_H -#define TOKU_LOADER_H +#pragma once /* Create and set up a loader. @@ -208,6 +207,3 @@ typedef struct { void toku_loader_get_status(LOADER_STATUS s); - - -#endif diff --git a/storage/tokudb/ft-index/src/tests/CMakeLists.txt b/storage/tokudb/ft-index/src/tests/CMakeLists.txt index 366d392143a..06f7e523746 100644 --- a/storage/tokudb/ft-index/src/tests/CMakeLists.txt +++ b/storage/tokudb/ft-index/src/tests/CMakeLists.txt @@ -50,7 +50,7 @@ if(BUILD_TESTING OR BUILD_SRC_TESTS) ## #5138 only reproduces when using the static library. list(REMOVE_ITEM tdb_bins test-5138.tdb) add_executable(test-5138.tdb test-5138.cc) - target_link_libraries(test-5138.tdb ${LIBTOKUDB}_static ${ZLIB_LIBRARY} ${LIBTOKUPORTABILITY}_static ${CMAKE_THREAD_LIBS_INIT} ${EXTRA_SYSTEM_LIBS}) + target_link_libraries(test-5138.tdb ${LIBTOKUDB}_static z ${LIBTOKUPORTABILITY}_static ${CMAKE_THREAD_LIBS_INIT} ${EXTRA_SYSTEM_LIBS}) add_space_separated_property(TARGET test-5138.tdb COMPILE_FLAGS -fvisibility=hidden) add_ydb_test(test-5138.tdb) @@ -160,10 +160,7 @@ if(BUILD_TESTING OR BUILD_SRC_TESTS) endforeach(av) endforeach(ov) - if (NOT (CMAKE_SYSTEM_NAME MATCHES Darwin OR - (CMAKE_CXX_COMPILER_ID STREQUAL Intel AND - CMAKE_BUILD_TYPE STREQUAL Release) - OR USE_GCOV)) + if (NOT (CMAKE_SYSTEM_NAME MATCHES Darwin OR USE_GCOV)) declare_custom_tests(helgrind1.tdb) add_test(NAME ydb/helgrind_helgrind1.tdb COMMAND valgrind --quiet --tool=helgrind --error-exitcode=1 --log-file=helgrind1.tdb.deleteme $<TARGET_FILE:helgrind1.tdb>) diff --git a/storage/tokudb/ft-index/src/tests/big-nested-abort-abort.cc b/storage/tokudb/ft-index/src/tests/big-nested-abort-abort.cc index 882c3441a8f..7c6e444986a 100644 --- a/storage/tokudb/ft-index/src/tests/big-nested-abort-abort.cc +++ b/storage/tokudb/ft-index/src/tests/big-nested-abort-abort.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/big-nested-abort-commit.cc b/storage/tokudb/ft-index/src/tests/big-nested-abort-commit.cc index 98dde05cfde..9965a6f5725 100644 --- a/storage/tokudb/ft-index/src/tests/big-nested-abort-commit.cc +++ b/storage/tokudb/ft-index/src/tests/big-nested-abort-commit.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/big-nested-commit-abort.cc b/storage/tokudb/ft-index/src/tests/big-nested-commit-abort.cc index ec8707530fa..6e02e6e7799 100644 --- a/storage/tokudb/ft-index/src/tests/big-nested-commit-abort.cc +++ b/storage/tokudb/ft-index/src/tests/big-nested-commit-abort.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/big-nested-commit-commit.cc b/storage/tokudb/ft-index/src/tests/big-nested-commit-commit.cc index 189bc97769f..efd951a90d4 100644 --- a/storage/tokudb/ft-index/src/tests/big-nested-commit-commit.cc +++ b/storage/tokudb/ft-index/src/tests/big-nested-commit-commit.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/big-shutdown.cc b/storage/tokudb/ft-index/src/tests/big-shutdown.cc new file mode 100644 index 00000000000..0dc576e1117 --- /dev/null +++ b/storage/tokudb/ft-index/src/tests/big-shutdown.cc @@ -0,0 +1,189 @@ +/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */ +// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4: +#ident "$Id$" +/* +COPYING CONDITIONS NOTICE: + + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation, and provided that the + following conditions are met: + + * Redistributions of source code must retain this COPYING + CONDITIONS NOTICE, the COPYRIGHT NOTICE (below), the + DISCLAIMER (below), the UNIVERSITY PATENT NOTICE (below), the + PATENT MARKING NOTICE (below), and the PATENT RIGHTS + GRANT (below). + + * Redistributions in binary form must reproduce this COPYING + CONDITIONS NOTICE, the COPYRIGHT NOTICE (below), the + DISCLAIMER (below), the UNIVERSITY PATENT NOTICE (below), the + PATENT MARKING NOTICE (below), and the PATENT RIGHTS + GRANT (below) in the documentation and/or other materials + provided with the distribution. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + 02110-1301, USA. + +COPYRIGHT NOTICE: + + TokuDB, Tokutek Fractal Tree Indexing Library. + Copyright (C) 2007-2013 Tokutek, Inc. + +DISCLAIMER: + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + +UNIVERSITY PATENT NOTICE: + + The technology is licensed by the Massachusetts Institute of + Technology, Rutgers State University of New Jersey, and the Research + Foundation of State University of New York at Stony Brook under + United States of America Serial No. 11/760379 and to the patents + and/or patent applications resulting from it. + +PATENT MARKING NOTICE: + + This software is covered by US Patent No. 8,185,551. + This software is covered by US Patent No. 8,489,638. + +PATENT RIGHTS GRANT: + + "THIS IMPLEMENTATION" means the copyrightable works distributed by + Tokutek as part of the Fractal Tree project. + + "PATENT CLAIMS" means the claims of patents that are owned or + licensable by Tokutek, both currently or in the future; and that in + the absence of this license would be infringed by THIS + IMPLEMENTATION or by using or running THIS IMPLEMENTATION. + + "PATENT CHALLENGE" shall mean a challenge to the validity, + patentability, enforceability and/or non-infringement of any of the + PATENT CLAIMS or otherwise opposing any of the PATENT CLAIMS. + + Tokutek hereby grants to you, for the term and geographical scope of + the PATENT CLAIMS, a non-exclusive, no-charge, royalty-free, + irrevocable (except as stated in this section) patent license to + make, have made, use, offer to sell, sell, import, transfer, and + otherwise run, modify, and propagate the contents of THIS + IMPLEMENTATION, where such license applies only to the PATENT + CLAIMS. This grant does not include claims that would be infringed + only as a consequence of further modifications of THIS + IMPLEMENTATION. If you or your agent or licensee institute or order + or agree to the institution of patent litigation against any entity + (including a cross-claim or counterclaim in a lawsuit) alleging that + THIS IMPLEMENTATION constitutes direct or contributory patent + infringement, or inducement of patent infringement, then any rights + granted to you under this License shall terminate as of the date + such litigation is filed. If you or your agent or exclusive + licensee institute or order or agree to the institution of a PATENT + CHALLENGE, then Tokutek may terminate any rights granted to you + under this License. +*/ + +#ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved." +#ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it." + +// Create a lot of dirty nodes, kick off a checkpoint, and close the environment. +// Measure the time it takes to close the environment since we are speeding up that +// function. + +#include "test.h" +#include <toku_time.h> + +// Insert max_rows key/val pairs into the db +static void do_inserts(DB_ENV *env, DB *db, uint64_t max_rows, size_t val_size) { + char val_data[val_size]; memset(val_data, 0, val_size); + int r; + DB_TXN *txn = nullptr; + r = env->txn_begin(env, nullptr, &txn, 0); + CKERR(r); + + for (uint64_t i = 1; i <= max_rows; i++) { + // pick a sequential key but it does not matter for this test. + uint64_t k[2] = { + htonl(i), random64(), + }; + DBT key = { .data = k, .size = sizeof k }; + DBT val = { .data = val_data, .size = (uint32_t) val_size }; + r = db->put(db, txn, &key, &val, 0); + CKERR(r); + + if ((i % 1000) == 0) { + if (verbose) + fprintf(stderr, "put %" PRIu64 "\n", i); + r = txn->commit(txn, 0); + CKERR(r); + r = env->txn_begin(env, nullptr, &txn, 0); + CKERR(r); + } + } + + r = txn->commit(txn, 0); + CKERR(r); +} + +// Create a cache with a lot of dirty nodes, kick off a checkpoint, and measure the time to +// close the environment. +static void big_shutdown(void) { + int r; + + DB_ENV *env = nullptr; + r = db_env_create(&env, 0); + CKERR(r); + r = env->set_cachesize(env, 8, 0, 1); + CKERR(r); + r = env->open(env, TOKU_TEST_FILENAME, + DB_INIT_MPOOL|DB_CREATE|DB_THREAD |DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE, + S_IRWXU+S_IRWXG+S_IRWXO); + CKERR(r); + + DB *db = nullptr; + r = db_create(&db, env, 0); + CKERR(r); + r = db->open(db, nullptr, "foo.db", 0, DB_BTREE, DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO); + CKERR(r); + + do_inserts(env, db, 1000000, 1024); + + // kick the checkpoint thread + if (verbose) + fprintf(stderr, "env->checkpointing_set_period\n"); + r = env->checkpointing_set_period(env, 2); + CKERR(r); + sleep(3); + + if (verbose) + fprintf(stderr, "db->close\n"); + r = db->close(db, 0); + CKERR(r); + + // measure the shutdown time + uint64_t tstart = toku_current_time_microsec(); + if (verbose) + fprintf(stderr, "env->close\n"); + r = env->close(env, 0); + CKERR(r); + uint64_t tend = toku_current_time_microsec(); + if (verbose) + fprintf(stderr, "env->close complete %" PRIu64 " sec\n", (tend - tstart)/1000000); +} + +int test_main (int argc, char *const argv[]) { + default_parse_args(argc, argv); + + // init the env directory + toku_os_recursive_delete(TOKU_TEST_FILENAME); + int r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); + CKERR(r); + + // run the test + big_shutdown(); + + return 0; +} diff --git a/storage/tokudb/ft-index/src/tests/bigtxn27.cc b/storage/tokudb/ft-index/src/tests/bigtxn27.cc index baa1e4f7e6e..1eedb79543d 100644 --- a/storage/tokudb/ft-index/src/tests/bigtxn27.cc +++ b/storage/tokudb/ft-index/src/tests/bigtxn27.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/blackhole.cc b/storage/tokudb/ft-index/src/tests/blackhole.cc index 267eb8c1ba3..34df107b153 100644 --- a/storage/tokudb/ft-index/src/tests/blackhole.cc +++ b/storage/tokudb/ft-index/src/tests/blackhole.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -92,7 +92,7 @@ PATENT RIGHTS GRANT: // Test that a db ignores insert messages in blackhole mode #include "test.h" -#include <ft/ybt.h> +#include <util/dbt.h> static DB *db; static DB *blackhole_db; diff --git a/storage/tokudb/ft-index/src/tests/blocking-first-empty.cc b/storage/tokudb/ft-index/src/tests/blocking-first-empty.cc index 6ccf879005d..3fb5cae46ff 100644 --- a/storage/tokudb/ft-index/src/tests/blocking-first-empty.cc +++ b/storage/tokudb/ft-index/src/tests/blocking-first-empty.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/blocking-first.cc b/storage/tokudb/ft-index/src/tests/blocking-first.cc index 6d255023274..b501f70d5bb 100644 --- a/storage/tokudb/ft-index/src/tests/blocking-first.cc +++ b/storage/tokudb/ft-index/src/tests/blocking-first.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/blocking-last.cc b/storage/tokudb/ft-index/src/tests/blocking-last.cc index 403f31bca61..e087d9623fc 100644 --- a/storage/tokudb/ft-index/src/tests/blocking-last.cc +++ b/storage/tokudb/ft-index/src/tests/blocking-last.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/blocking-next-prev-deadlock.cc b/storage/tokudb/ft-index/src/tests/blocking-next-prev-deadlock.cc index f71c89aa3e7..dac4aa1ad44 100644 --- a/storage/tokudb/ft-index/src/tests/blocking-next-prev-deadlock.cc +++ b/storage/tokudb/ft-index/src/tests/blocking-next-prev-deadlock.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/blocking-next-prev.cc b/storage/tokudb/ft-index/src/tests/blocking-next-prev.cc index ec1ae8ff340..5fa2f781fb7 100644 --- a/storage/tokudb/ft-index/src/tests/blocking-next-prev.cc +++ b/storage/tokudb/ft-index/src/tests/blocking-next-prev.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/blocking-prelock-range.cc b/storage/tokudb/ft-index/src/tests/blocking-prelock-range.cc index 15065dcf244..78d2975f81b 100644 --- a/storage/tokudb/ft-index/src/tests/blocking-prelock-range.cc +++ b/storage/tokudb/ft-index/src/tests/blocking-prelock-range.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/blocking-put-timeout.cc b/storage/tokudb/ft-index/src/tests/blocking-put-timeout.cc index b91198a4438..13fddb8d05b 100644 --- a/storage/tokudb/ft-index/src/tests/blocking-put-timeout.cc +++ b/storage/tokudb/ft-index/src/tests/blocking-put-timeout.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/blocking-put-wakeup.cc b/storage/tokudb/ft-index/src/tests/blocking-put-wakeup.cc index 06c51b6fede..c5052fbf813 100644 --- a/storage/tokudb/ft-index/src/tests/blocking-put-wakeup.cc +++ b/storage/tokudb/ft-index/src/tests/blocking-put-wakeup.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/blocking-put.cc b/storage/tokudb/ft-index/src/tests/blocking-put.cc index 8100862881a..8b1cf71e359 100644 --- a/storage/tokudb/ft-index/src/tests/blocking-put.cc +++ b/storage/tokudb/ft-index/src/tests/blocking-put.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/blocking-set-range-0.cc b/storage/tokudb/ft-index/src/tests/blocking-set-range-0.cc index 896d4a82e12..8445493832f 100644 --- a/storage/tokudb/ft-index/src/tests/blocking-set-range-0.cc +++ b/storage/tokudb/ft-index/src/tests/blocking-set-range-0.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/blocking-set-range-n.cc b/storage/tokudb/ft-index/src/tests/blocking-set-range-n.cc index 841809fadbe..a37e5b2a0a7 100644 --- a/storage/tokudb/ft-index/src/tests/blocking-set-range-n.cc +++ b/storage/tokudb/ft-index/src/tests/blocking-set-range-n.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/blocking-set-range-reverse-0.cc b/storage/tokudb/ft-index/src/tests/blocking-set-range-reverse-0.cc index f63bb3c0e5b..dc79522d629 100644 --- a/storage/tokudb/ft-index/src/tests/blocking-set-range-reverse-0.cc +++ b/storage/tokudb/ft-index/src/tests/blocking-set-range-reverse-0.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/blocking-set.cc b/storage/tokudb/ft-index/src/tests/blocking-set.cc index dd0196ff276..4bb3c5dd1eb 100644 --- a/storage/tokudb/ft-index/src/tests/blocking-set.cc +++ b/storage/tokudb/ft-index/src/tests/blocking-set.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/blocking-table-lock.cc b/storage/tokudb/ft-index/src/tests/blocking-table-lock.cc index 42e824debbf..8258a698784 100644 --- a/storage/tokudb/ft-index/src/tests/blocking-table-lock.cc +++ b/storage/tokudb/ft-index/src/tests/blocking-table-lock.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/bug1381.cc b/storage/tokudb/ft-index/src/tests/bug1381.cc index c603d5e3ab2..988538ef782 100644 --- a/storage/tokudb/ft-index/src/tests/bug1381.cc +++ b/storage/tokudb/ft-index/src/tests/bug1381.cc @@ -28,7 +28,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/cachetable-race.cc b/storage/tokudb/ft-index/src/tests/cachetable-race.cc index 1e0ffaad40c..0ff1fc11b4e 100644 --- a/storage/tokudb/ft-index/src/tests/cachetable-race.cc +++ b/storage/tokudb/ft-index/src/tests/cachetable-race.cc @@ -28,7 +28,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/checkpoint1.cc b/storage/tokudb/ft-index/src/tests/checkpoint1.cc index 9fe56cdbc36..68300dee6fb 100644 --- a/storage/tokudb/ft-index/src/tests/checkpoint1.cc +++ b/storage/tokudb/ft-index/src/tests/checkpoint1.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/checkpoint_fairness.cc b/storage/tokudb/ft-index/src/tests/checkpoint_fairness.cc index 03d5c47f4e5..3e76020d70b 100644 --- a/storage/tokudb/ft-index/src/tests/checkpoint_fairness.cc +++ b/storage/tokudb/ft-index/src/tests/checkpoint_fairness.cc @@ -28,7 +28,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/checkpoint_stress.cc b/storage/tokudb/ft-index/src/tests/checkpoint_stress.cc index 976ba8e8555..1f39061bb1e 100644 --- a/storage/tokudb/ft-index/src/tests/checkpoint_stress.cc +++ b/storage/tokudb/ft-index/src/tests/checkpoint_stress.cc @@ -27,7 +27,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/checkpoint_test.h b/storage/tokudb/ft-index/src/tests/checkpoint_test.h index e9d4290a406..4ea74e09ede 100644 --- a/storage/tokudb/ft-index/src/tests/checkpoint_test.h +++ b/storage/tokudb/ft-index/src/tests/checkpoint_test.h @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -86,13 +86,11 @@ PATENT RIGHTS GRANT: under this License. */ +#pragma once + #ident "Copyright (c) 2009-2013 Tokutek Inc. All rights reserved." #ident "$Id$" -#ifndef CHECKPOINT_TEST_H -#define CHECKPOINT_TEST_H - - DB_ENV *env; enum {MAX_NAME=128}; @@ -537,6 +535,3 @@ snapshot(DICTIONARY d, int do_checkpoint) { db_startup(d, NULL); } } - - -#endif diff --git a/storage/tokudb/ft-index/src/tests/create-datadir.cc b/storage/tokudb/ft-index/src/tests/create-datadir.cc index ecd0b032897..2cde781872b 100644 --- a/storage/tokudb/ft-index/src/tests/create-datadir.cc +++ b/storage/tokudb/ft-index/src/tests/create-datadir.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/cursor-isolation.cc b/storage/tokudb/ft-index/src/tests/cursor-isolation.cc index 1a1450f2f63..ec91f5b73d1 100644 --- a/storage/tokudb/ft-index/src/tests/cursor-isolation.cc +++ b/storage/tokudb/ft-index/src/tests/cursor-isolation.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/cursor-more-than-a-leaf-provdel.cc b/storage/tokudb/ft-index/src/tests/cursor-more-than-a-leaf-provdel.cc index 2927bb7c5ff..4587402c286 100644 --- a/storage/tokudb/ft-index/src/tests/cursor-more-than-a-leaf-provdel.cc +++ b/storage/tokudb/ft-index/src/tests/cursor-more-than-a-leaf-provdel.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/cursor-set-del-rmw.cc b/storage/tokudb/ft-index/src/tests/cursor-set-del-rmw.cc index 38ccf112697..79df796a6c1 100644 --- a/storage/tokudb/ft-index/src/tests/cursor-set-del-rmw.cc +++ b/storage/tokudb/ft-index/src/tests/cursor-set-del-rmw.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/cursor-set-range-rmw.cc b/storage/tokudb/ft-index/src/tests/cursor-set-range-rmw.cc index fb5dbca72b4..4f0dce02edd 100644 --- a/storage/tokudb/ft-index/src/tests/cursor-set-range-rmw.cc +++ b/storage/tokudb/ft-index/src/tests/cursor-set-range-rmw.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/cursor-step-over-delete.cc b/storage/tokudb/ft-index/src/tests/cursor-step-over-delete.cc index 748b5135899..7c57475da6a 100644 --- a/storage/tokudb/ft-index/src/tests/cursor-step-over-delete.cc +++ b/storage/tokudb/ft-index/src/tests/cursor-step-over-delete.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/db-put-simple-deadlock-threads.cc b/storage/tokudb/ft-index/src/tests/db-put-simple-deadlock-threads.cc index d60725c4966..6227b602df2 100644 --- a/storage/tokudb/ft-index/src/tests/db-put-simple-deadlock-threads.cc +++ b/storage/tokudb/ft-index/src/tests/db-put-simple-deadlock-threads.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/db-put-simple-deadlock.cc b/storage/tokudb/ft-index/src/tests/db-put-simple-deadlock.cc index acc841b8d78..46109e9592f 100644 --- a/storage/tokudb/ft-index/src/tests/db-put-simple-deadlock.cc +++ b/storage/tokudb/ft-index/src/tests/db-put-simple-deadlock.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/db-put-simple-lockwait.cc b/storage/tokudb/ft-index/src/tests/db-put-simple-lockwait.cc index e459652101d..6466bd2e9ea 100644 --- a/storage/tokudb/ft-index/src/tests/db-put-simple-lockwait.cc +++ b/storage/tokudb/ft-index/src/tests/db-put-simple-lockwait.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/db-put-update-deadlock.cc b/storage/tokudb/ft-index/src/tests/db-put-update-deadlock.cc index ddd2893cae0..cfbf95cd599 100644 --- a/storage/tokudb/ft-index/src/tests/db-put-update-deadlock.cc +++ b/storage/tokudb/ft-index/src/tests/db-put-update-deadlock.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/dbremove-nofile-limit.cc b/storage/tokudb/ft-index/src/tests/dbremove-nofile-limit.cc new file mode 100644 index 00000000000..cd8b50c1c5b --- /dev/null +++ b/storage/tokudb/ft-index/src/tests/dbremove-nofile-limit.cc @@ -0,0 +1,177 @@ +/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */ +// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4: +/* +COPYING CONDITIONS NOTICE: + + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation, and provided that the + following conditions are met: + + * Redistributions of source code must retain this COPYING + CONDITIONS NOTICE, the COPYRIGHT NOTICE (below), the + DISCLAIMER (below), the UNIVERSITY PATENT NOTICE (below), the + PATENT MARKING NOTICE (below), and the PATENT RIGHTS + GRANT (below). + + * Redistributions in binary form must reproduce this COPYING + CONDITIONS NOTICE, the COPYRIGHT NOTICE (below), the + DISCLAIMER (below), the UNIVERSITY PATENT NOTICE (below), the + PATENT MARKING NOTICE (below), and the PATENT RIGHTS + GRANT (below) in the documentation and/or other materials + provided with the distribution. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + 02110-1301, USA. + +COPYRIGHT NOTICE: + + TokuFT, Tokutek Fractal Tree Indexing Library. + Copyright (C) 2007-2013 Tokutek, Inc. + +DISCLAIMER: + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + +UNIVERSITY PATENT NOTICE: + + The technology is licensed by the Massachusetts Institute of + Technology, Rutgers State University of New Jersey, and the Research + Foundation of State University of New York at Stony Brook under + United States of America Serial No. 11/760379 and to the patents + and/or patent applications resulting from it. + +PATENT MARKING NOTICE: + + This software is covered by US Patent No. 8,185,551. + This software is covered by US Patent No. 8,489,638. + +PATENT RIGHTS GRANT: + + "THIS IMPLEMENTATION" means the copyrightable works distributed by + Tokutek as part of the Fractal Tree project. + + "PATENT CLAIMS" means the claims of patents that are owned or + licensable by Tokutek, both currently or in the future; and that in + the absence of this license would be infringed by THIS + IMPLEMENTATION or by using or running THIS IMPLEMENTATION. + + "PATENT CHALLENGE" shall mean a challenge to the validity, + patentability, enforceability and/or non-infringement of any of the + PATENT CLAIMS or otherwise opposing any of the PATENT CLAIMS. + + Tokutek hereby grants to you, for the term and geographical scope of + the PATENT CLAIMS, a non-exclusive, no-charge, royalty-free, + irrevocable (except as stated in this section) patent license to + make, have made, use, offer to sell, sell, import, transfer, and + otherwise run, modify, and propagate the contents of THIS + IMPLEMENTATION, where such license applies only to the PATENT + CLAIMS. This grant does not include claims that would be infringed + only as a consequence of further modifications of THIS + IMPLEMENTATION. If you or your agent or licensee institute or order + or agree to the institution of patent litigation against any entity + (including a cross-claim or counterclaim in a lawsuit) alleging that + THIS IMPLEMENTATION constitutes direct or contributory patent + infringement, or inducement of patent infringement, then any rights + granted to you under this License shall terminate as of the date + such litigation is filed. If you or your agent or exclusive + licensee institute or order or agree to the institution of a PATENT + CHALLENGE, then Tokutek may terminate any rights granted to you + under this License. +*/ + +#ident "Copyright (c) 2014 Tokutek Inc. All rights reserved." +#ident "$Id$" + +// This test verifies that the env->dbremove function returns an error rather than +// crash when the NOFILE resource limit is exceeded. + +#include "test.h" +#include <db.h> +#include <sys/resource.h> + +static const char *envdir = TOKU_TEST_FILENAME; + +static void test_dbremove() { + int r; + + char rmcmd[32 + strlen(envdir)]; + snprintf(rmcmd, sizeof rmcmd, "rm -rf %s", envdir); + r = system(rmcmd); CKERR(r); + r = toku_os_mkdir(envdir, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r); + + DB_ENV *env; + r = db_env_create(&env, 0); CKERR(r); + int envflags = DB_INIT_LOCK | DB_INIT_LOG | DB_INIT_MPOOL | DB_INIT_TXN | DB_CREATE | DB_PRIVATE; + r = env->open(env, envdir, envflags, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r); + env->set_errfile(env, stderr); + + DB *db; + r = db_create(&db, env, 0); CKERR(r); + char fname[32]; + sprintf(fname, "db%d", 0); + r = db->open(db, nullptr, fname, nullptr, DB_BTREE, DB_CREATE, 0666); CKERR(r); + + r = db->close(db, 0); CKERR(r); + + DB_TXN *txn; + r = env->txn_begin(env, nullptr, &txn, 0); CKERR(r); + + struct rlimit current_limit; + r = getrlimit(RLIMIT_NOFILE, ¤t_limit); + assert(r == 0); + + struct rlimit new_limit = current_limit; + new_limit.rlim_cur = 0; + r = setrlimit(RLIMIT_NOFILE, &new_limit); + assert(r == 0); + + r = env->dbremove(env, txn, fname, nullptr, 0); + CKERR2(r, EMFILE); + + r = setrlimit(RLIMIT_NOFILE, ¤t_limit); + assert(r == 0); + + r = env->dbremove(env, txn, fname, nullptr, 0); + CKERR(r); + + r = txn->commit(txn, 0); CKERR(r); + + r = env->close(env, 0); CKERR(r); +} + +static void do_args(int argc, char * const argv[]) { + int resultcode; + char *cmd = argv[0]; + argc--; argv++; + while (argc>0) { + if (strcmp(argv[0], "-h")==0) { + resultcode=0; + do_usage: + fprintf(stderr, "Usage: %s -h -v -q\n", cmd); + exit(resultcode); + } else if (strcmp(argv[0], "-v")==0) { + verbose++; + } else if (strcmp(argv[0],"-q")==0) { + verbose--; + if (verbose<0) verbose=0; + } else { + fprintf(stderr, "Unknown arg: %s\n", argv[0]); + resultcode=1; + goto do_usage; + } + argc--; + argv++; + } +} + +int test_main(int argc, char * const *argv) { + do_args(argc, argv); + test_dbremove(); + return 0; +} diff --git a/storage/tokudb/ft-index/src/tests/del-multiple-huge-primary-row.cc b/storage/tokudb/ft-index/src/tests/del-multiple-huge-primary-row.cc index 9d2b2b6871b..f0ee57228ad 100644 --- a/storage/tokudb/ft-index/src/tests/del-multiple-huge-primary-row.cc +++ b/storage/tokudb/ft-index/src/tests/del-multiple-huge-primary-row.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/del-multiple-srcdb.cc b/storage/tokudb/ft-index/src/tests/del-multiple-srcdb.cc index 5230caf3a4e..f14ba646e59 100644 --- a/storage/tokudb/ft-index/src/tests/del-multiple-srcdb.cc +++ b/storage/tokudb/ft-index/src/tests/del-multiple-srcdb.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/del-multiple.cc b/storage/tokudb/ft-index/src/tests/del-multiple.cc index b54ff4fce72..7f3560fb459 100644 --- a/storage/tokudb/ft-index/src/tests/del-multiple.cc +++ b/storage/tokudb/ft-index/src/tests/del-multiple.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/del-simple.cc b/storage/tokudb/ft-index/src/tests/del-simple.cc index 34376637c9a..6ae08607f48 100644 --- a/storage/tokudb/ft-index/src/tests/del-simple.cc +++ b/storage/tokudb/ft-index/src/tests/del-simple.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/directory_lock.cc b/storage/tokudb/ft-index/src/tests/directory_lock.cc index ed89e004900..c67dfab20c8 100644 --- a/storage/tokudb/ft-index/src/tests/directory_lock.cc +++ b/storage/tokudb/ft-index/src/tests/directory_lock.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/diskfull.cc b/storage/tokudb/ft-index/src/tests/diskfull.cc index fdce56aa251..d52f621a174 100644 --- a/storage/tokudb/ft-index/src/tests/diskfull.cc +++ b/storage/tokudb/ft-index/src/tests/diskfull.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/dump-env.cc b/storage/tokudb/ft-index/src/tests/dump-env.cc index 8348c25f2bd..7815aa5bd63 100644 --- a/storage/tokudb/ft-index/src/tests/dump-env.cc +++ b/storage/tokudb/ft-index/src/tests/dump-env.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -95,7 +95,6 @@ static DB_ENV *env; static DB *db; DB_TXN *txn; -const int num_insert = 25000; static void setup (void) { diff --git a/storage/tokudb/ft-index/src/tests/env-put-multiple.cc b/storage/tokudb/ft-index/src/tests/env-put-multiple.cc index 0988f3d5ca2..75ccb0297b3 100644 --- a/storage/tokudb/ft-index/src/tests/env-put-multiple.cc +++ b/storage/tokudb/ft-index/src/tests/env-put-multiple.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/env_loader_memory.cc b/storage/tokudb/ft-index/src/tests/env_loader_memory.cc index 106bdefd3a9..ed19f05f944 100644 --- a/storage/tokudb/ft-index/src/tests/env_loader_memory.cc +++ b/storage/tokudb/ft-index/src/tests/env_loader_memory.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/env_nproc.cc b/storage/tokudb/ft-index/src/tests/env_nproc.cc index 29bc216f9c8..3ed60a18e69 100644 --- a/storage/tokudb/ft-index/src/tests/env_nproc.cc +++ b/storage/tokudb/ft-index/src/tests/env_nproc.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/env_startup.cc b/storage/tokudb/ft-index/src/tests/env_startup.cc index 0fe5a4abac8..5be8b9849a0 100644 --- a/storage/tokudb/ft-index/src/tests/env_startup.cc +++ b/storage/tokudb/ft-index/src/tests/env_startup.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/filesize.cc b/storage/tokudb/ft-index/src/tests/filesize.cc index b47be955efd..6b4c03a358c 100644 --- a/storage/tokudb/ft-index/src/tests/filesize.cc +++ b/storage/tokudb/ft-index/src/tests/filesize.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/get_key_after_bytes_unit.cc b/storage/tokudb/ft-index/src/tests/get_key_after_bytes_unit.cc index 7303ebac8c8..73a6e92b28f 100644 --- a/storage/tokudb/ft-index/src/tests/get_key_after_bytes_unit.cc +++ b/storage/tokudb/ft-index/src/tests/get_key_after_bytes_unit.cc @@ -28,7 +28,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/get_last_key.cc b/storage/tokudb/ft-index/src/tests/get_last_key.cc index 36c7ab63259..241652928c5 100644 --- a/storage/tokudb/ft-index/src/tests/get_last_key.cc +++ b/storage/tokudb/ft-index/src/tests/get_last_key.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/helgrind1.cc b/storage/tokudb/ft-index/src/tests/helgrind1.cc index 5e451ab2a25..49572197fc1 100644 --- a/storage/tokudb/ft-index/src/tests/helgrind1.cc +++ b/storage/tokudb/ft-index/src/tests/helgrind1.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/helgrind2.cc b/storage/tokudb/ft-index/src/tests/helgrind2.cc index 608d635a54a..d70c4d256df 100644 --- a/storage/tokudb/ft-index/src/tests/helgrind2.cc +++ b/storage/tokudb/ft-index/src/tests/helgrind2.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/helgrind3.cc b/storage/tokudb/ft-index/src/tests/helgrind3.cc index 2defde37ba5..85f909b11f0 100644 --- a/storage/tokudb/ft-index/src/tests/helgrind3.cc +++ b/storage/tokudb/ft-index/src/tests/helgrind3.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/hot-optimize-table-tests.cc b/storage/tokudb/ft-index/src/tests/hot-optimize-table-tests.cc index 6a00afa4a51..42f0ef86e82 100644 --- a/storage/tokudb/ft-index/src/tests/hot-optimize-table-tests.cc +++ b/storage/tokudb/ft-index/src/tests/hot-optimize-table-tests.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -103,7 +103,7 @@ const int envflags = DB_INIT_MPOOL | DB_ENV* env; unsigned int leaf_hits; -// Custom Update Function for our test BRT. +// Custom Update Function for our test FT. static int update_func(DB* UU(db), const DBT* key, @@ -148,7 +148,7 @@ hot_test_setup(void) // Remove any previous environment. toku_os_recursive_delete(TOKU_TEST_FILENAME); - // Set up a new TokuDB. + // Set up a new environment. { int chk_r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(chk_r); } { int chk_r = db_env_create(&env, 0); CKERR(chk_r); } env->set_errfile(env, stderr); @@ -266,7 +266,7 @@ test_main(int argc, char * const argv[]) default_parse_args(argc, argv); hot_test_setup(); - // Create and Open the Database/BRT + // Create and Open the Database/FT DB *db = NULL; const unsigned int BIG = 4000000; const unsigned int SMALL = 10; diff --git a/storage/tokudb/ft-index/src/tests/hotindexer-bw.cc b/storage/tokudb/ft-index/src/tests/hotindexer-bw.cc index fa53a4062e8..eb6b9f1b11b 100644 --- a/storage/tokudb/ft-index/src/tests/hotindexer-bw.cc +++ b/storage/tokudb/ft-index/src/tests/hotindexer-bw.cc @@ -28,7 +28,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -103,7 +103,6 @@ static int num_rows; static const int FORWARD = 0; static const int BACKWARD = 1; typedef int Direction; -static const int TXN_NONE = 0; static const int TXN_CREATE = 1; static const int TXN_END = 2; typedef int TxnWork; diff --git a/storage/tokudb/ft-index/src/tests/hotindexer-error-callback.cc b/storage/tokudb/ft-index/src/tests/hotindexer-error-callback.cc index 0f0e889d525..18e5a0116e1 100644 --- a/storage/tokudb/ft-index/src/tests/hotindexer-error-callback.cc +++ b/storage/tokudb/ft-index/src/tests/hotindexer-error-callback.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/hotindexer-insert-committed-optimized.cc b/storage/tokudb/ft-index/src/tests/hotindexer-insert-committed-optimized.cc index 4acd2c57b72..9268a5d2370 100644 --- a/storage/tokudb/ft-index/src/tests/hotindexer-insert-committed-optimized.cc +++ b/storage/tokudb/ft-index/src/tests/hotindexer-insert-committed-optimized.cc @@ -28,7 +28,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/hotindexer-insert-committed.cc b/storage/tokudb/ft-index/src/tests/hotindexer-insert-committed.cc index b6c409be315..81aa83ba9f7 100644 --- a/storage/tokudb/ft-index/src/tests/hotindexer-insert-committed.cc +++ b/storage/tokudb/ft-index/src/tests/hotindexer-insert-committed.cc @@ -28,7 +28,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/hotindexer-insert-provisional.cc b/storage/tokudb/ft-index/src/tests/hotindexer-insert-provisional.cc index 911587ff3f3..509f74fbec9 100644 --- a/storage/tokudb/ft-index/src/tests/hotindexer-insert-provisional.cc +++ b/storage/tokudb/ft-index/src/tests/hotindexer-insert-provisional.cc @@ -28,7 +28,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/hotindexer-lock-test.cc b/storage/tokudb/ft-index/src/tests/hotindexer-lock-test.cc index 615486a6496..16600c76574 100644 --- a/storage/tokudb/ft-index/src/tests/hotindexer-lock-test.cc +++ b/storage/tokudb/ft-index/src/tests/hotindexer-lock-test.cc @@ -28,7 +28,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/hotindexer-multiclient.cc b/storage/tokudb/ft-index/src/tests/hotindexer-multiclient.cc index 89da7da3b3b..18dc6e5e030 100644 --- a/storage/tokudb/ft-index/src/tests/hotindexer-multiclient.cc +++ b/storage/tokudb/ft-index/src/tests/hotindexer-multiclient.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/hotindexer-nested-insert-committed.cc b/storage/tokudb/ft-index/src/tests/hotindexer-nested-insert-committed.cc index 446fae8e983..938ee151b69 100644 --- a/storage/tokudb/ft-index/src/tests/hotindexer-nested-insert-committed.cc +++ b/storage/tokudb/ft-index/src/tests/hotindexer-nested-insert-committed.cc @@ -28,7 +28,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/hotindexer-put-abort.cc b/storage/tokudb/ft-index/src/tests/hotindexer-put-abort.cc index 35f3e317e3e..f81336cbee4 100644 --- a/storage/tokudb/ft-index/src/tests/hotindexer-put-abort.cc +++ b/storage/tokudb/ft-index/src/tests/hotindexer-put-abort.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/hotindexer-put-commit.cc b/storage/tokudb/ft-index/src/tests/hotindexer-put-commit.cc index b8177d52e13..2863ef4754c 100644 --- a/storage/tokudb/ft-index/src/tests/hotindexer-put-commit.cc +++ b/storage/tokudb/ft-index/src/tests/hotindexer-put-commit.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/hotindexer-put-multiple.cc b/storage/tokudb/ft-index/src/tests/hotindexer-put-multiple.cc index e5bb39f93ae..05e77137ca0 100644 --- a/storage/tokudb/ft-index/src/tests/hotindexer-put-multiple.cc +++ b/storage/tokudb/ft-index/src/tests/hotindexer-put-multiple.cc @@ -28,7 +28,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/hotindexer-simple-abort-put.cc b/storage/tokudb/ft-index/src/tests/hotindexer-simple-abort-put.cc index 41a7cc5b817..0aabcdbdd4a 100644 --- a/storage/tokudb/ft-index/src/tests/hotindexer-simple-abort-put.cc +++ b/storage/tokudb/ft-index/src/tests/hotindexer-simple-abort-put.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/hotindexer-simple-abort.cc b/storage/tokudb/ft-index/src/tests/hotindexer-simple-abort.cc index f210e0079b5..3fddf1d319f 100644 --- a/storage/tokudb/ft-index/src/tests/hotindexer-simple-abort.cc +++ b/storage/tokudb/ft-index/src/tests/hotindexer-simple-abort.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/hotindexer-undo-do-test.cc b/storage/tokudb/ft-index/src/tests/hotindexer-undo-do-test.cc index 51f60652d14..5ef06f62155 100644 --- a/storage/tokudb/ft-index/src/tests/hotindexer-undo-do-test.cc +++ b/storage/tokudb/ft-index/src/tests/hotindexer-undo-do-test.cc @@ -28,7 +28,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -95,13 +95,10 @@ PATENT RIGHTS GRANT: #include "test.h" -#include <ft/tokuconst.h> -#include <ft/fttypes.h> -#include <ft/leafentry.h> #include <ft/ule.h> #include <ft/ule-internal.h> #include <ft/le-cursor.h> -#include <ft/xids-internal.h> +#include <ft/txn/xids.h> #include "indexer-internal.h" diff --git a/storage/tokudb/ft-index/src/tests/hotindexer-with-queries.cc b/storage/tokudb/ft-index/src/tests/hotindexer-with-queries.cc index 7770b34ae07..c1be755b4d6 100644 --- a/storage/tokudb/ft-index/src/tests/hotindexer-with-queries.cc +++ b/storage/tokudb/ft-index/src/tests/hotindexer-with-queries.cc @@ -28,7 +28,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/inflate.cc b/storage/tokudb/ft-index/src/tests/inflate.cc index 8311b591c86..30f8f2199ae 100644 --- a/storage/tokudb/ft-index/src/tests/inflate.cc +++ b/storage/tokudb/ft-index/src/tests/inflate.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/inflate2.cc b/storage/tokudb/ft-index/src/tests/inflate2.cc index e5b8b6f270f..ce594cf0834 100644 --- a/storage/tokudb/ft-index/src/tests/inflate2.cc +++ b/storage/tokudb/ft-index/src/tests/inflate2.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/insert-dup-prelock.cc b/storage/tokudb/ft-index/src/tests/insert-dup-prelock.cc index 0771056b072..2ba99d0bc02 100644 --- a/storage/tokudb/ft-index/src/tests/insert-dup-prelock.cc +++ b/storage/tokudb/ft-index/src/tests/insert-dup-prelock.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/isolation-read-committed.cc b/storage/tokudb/ft-index/src/tests/isolation-read-committed.cc index ce226508d79..c949482ca16 100644 --- a/storage/tokudb/ft-index/src/tests/isolation-read-committed.cc +++ b/storage/tokudb/ft-index/src/tests/isolation-read-committed.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/isolation.cc b/storage/tokudb/ft-index/src/tests/isolation.cc index 485986099e0..dbe4ce9cb4a 100644 --- a/storage/tokudb/ft-index/src/tests/isolation.cc +++ b/storage/tokudb/ft-index/src/tests/isolation.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/key-val.h b/storage/tokudb/ft-index/src/tests/key-val.h index d77b8b00e05..9a4512bfdac 100644 --- a/storage/tokudb/ft-index/src/tests/key-val.h +++ b/storage/tokudb/ft-index/src/tests/key-val.h @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -86,10 +86,10 @@ PATENT RIGHTS GRANT: under this License. */ +#pragma once + #ident "Copyright (c) 2010-2013 Tokutek Inc. All rights reserved." -#ifndef KEY_VAL_H -#define KEY_VAL_H // // Functions to create unique key/value pairs, row generators, checkers, ... for each of NUM_DBS // @@ -295,8 +295,3 @@ static int UU() generate_initial_table(DB *db, DB_TXN *txn, uint32_t rows) return r; } - - - - -#endif // KEY_VAL_H diff --git a/storage/tokudb/ft-index/src/tests/keyrange-merge.cc b/storage/tokudb/ft-index/src/tests/keyrange-merge.cc index 0b5df76d731..b53016053ce 100644 --- a/storage/tokudb/ft-index/src/tests/keyrange-merge.cc +++ b/storage/tokudb/ft-index/src/tests/keyrange-merge.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/keyrange.cc b/storage/tokudb/ft-index/src/tests/keyrange.cc index ee63cd7cdf5..85ffcd23357 100644 --- a/storage/tokudb/ft-index/src/tests/keyrange.cc +++ b/storage/tokudb/ft-index/src/tests/keyrange.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/last-verify-time.cc b/storage/tokudb/ft-index/src/tests/last-verify-time.cc index d3b5cf456fd..057a711ffec 100644 --- a/storage/tokudb/ft-index/src/tests/last-verify-time.cc +++ b/storage/tokudb/ft-index/src/tests/last-verify-time.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/loader-cleanup-test.cc b/storage/tokudb/ft-index/src/tests/loader-cleanup-test.cc index eaed9c4170a..c0f92c448ef 100644 --- a/storage/tokudb/ft-index/src/tests/loader-cleanup-test.cc +++ b/storage/tokudb/ft-index/src/tests/loader-cleanup-test.cc @@ -28,7 +28,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/loader-close-nproc-limit.cc b/storage/tokudb/ft-index/src/tests/loader-close-nproc-limit.cc new file mode 100644 index 00000000000..262a63294fd --- /dev/null +++ b/storage/tokudb/ft-index/src/tests/loader-close-nproc-limit.cc @@ -0,0 +1,198 @@ +/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */ +// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4: +/* +COPYING CONDITIONS NOTICE: + + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation, and provided that the + following conditions are met: + + * Redistributions of source code must retain this COPYING + CONDITIONS NOTICE, the COPYRIGHT NOTICE (below), the + DISCLAIMER (below), the UNIVERSITY PATENT NOTICE (below), the + PATENT MARKING NOTICE (below), and the PATENT RIGHTS + GRANT (below). + + * Redistributions in binary form must reproduce this COPYING + CONDITIONS NOTICE, the COPYRIGHT NOTICE (below), the + DISCLAIMER (below), the UNIVERSITY PATENT NOTICE (below), the + PATENT MARKING NOTICE (below), and the PATENT RIGHTS + GRANT (below) in the documentation and/or other materials + provided with the distribution. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + 02110-1301, USA. + +COPYRIGHT NOTICE: + + TokuFT, Tokutek Fractal Tree Indexing Library. + Copyright (C) 2007-2013 Tokutek, Inc. + +DISCLAIMER: + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + +UNIVERSITY PATENT NOTICE: + + The technology is licensed by the Massachusetts Institute of + Technology, Rutgers State University of New Jersey, and the Research + Foundation of State University of New York at Stony Brook under + United States of America Serial No. 11/760379 and to the patents + and/or patent applications resulting from it. + +PATENT MARKING NOTICE: + + This software is covered by US Patent No. 8,185,551. + This software is covered by US Patent No. 8,489,638. + +PATENT RIGHTS GRANT: + + "THIS IMPLEMENTATION" means the copyrightable works distributed by + Tokutek as part of the Fractal Tree project. + + "PATENT CLAIMS" means the claims of patents that are owned or + licensable by Tokutek, both currently or in the future; and that in + the absence of this license would be infringed by THIS + IMPLEMENTATION or by using or running THIS IMPLEMENTATION. + + "PATENT CHALLENGE" shall mean a challenge to the validity, + patentability, enforceability and/or non-infringement of any of the + PATENT CLAIMS or otherwise opposing any of the PATENT CLAIMS. + + Tokutek hereby grants to you, for the term and geographical scope of + the PATENT CLAIMS, a non-exclusive, no-charge, royalty-free, + irrevocable (except as stated in this section) patent license to + make, have made, use, offer to sell, sell, import, transfer, and + otherwise run, modify, and propagate the contents of THIS + IMPLEMENTATION, where such license applies only to the PATENT + CLAIMS. This grant does not include claims that would be infringed + only as a consequence of further modifications of THIS + IMPLEMENTATION. If you or your agent or licensee institute or order + or agree to the institution of patent litigation against any entity + (including a cross-claim or counterclaim in a lawsuit) alleging that + THIS IMPLEMENTATION constitutes direct or contributory patent + infringement, or inducement of patent infringement, then any rights + granted to you under this License shall terminate as of the date + such litigation is filed. If you or your agent or exclusive + licensee institute or order or agree to the institution of a PATENT + CHALLENGE, then Tokutek may terminate any rights granted to you + under this License. +*/ + +// Verify that loader->close works correctly (does not crash, does not leak memory, returns the right error code) +// when the NPROC limit is exceeded. + +#ident "Copyright (c) 2010-2013 Tokutek Inc. All rights reserved." +#ident "$Id$" + +#include "test.h" +#include <db.h> +#include <sys/resource.h> + +static int loader_flags = 0; +static const char *envdir = TOKU_TEST_FILENAME; + +static void run_test(int ndb) { + int r; + + char rmcmd[32 + strlen(envdir)]; + snprintf(rmcmd, sizeof rmcmd, "rm -rf %s", envdir); + r = system(rmcmd); CKERR(r); + r = toku_os_mkdir(envdir, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r); + + DB_ENV *env; + r = db_env_create(&env, 0); CKERR(r); + int envflags = DB_INIT_LOCK | DB_INIT_LOG | DB_INIT_MPOOL | DB_INIT_TXN | DB_CREATE | DB_PRIVATE; + r = env->open(env, envdir, envflags, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r); + env->set_errfile(env, stderr); + + DB *dbs[ndb]; + uint32_t db_flags[ndb]; + uint32_t dbt_flags[ndb]; + for (int i = 0; i < ndb; i++) { + db_flags[i] = DB_NOOVERWRITE; + dbt_flags[i] = 0; + r = db_create(&dbs[i], env, 0); CKERR(r); + char name[32]; + sprintf(name, "db%d", i); + r = dbs[i]->open(dbs[i], NULL, name, NULL, DB_BTREE, DB_CREATE, 0666); CKERR(r); + } + + DB_TXN *txn; + r = env->txn_begin(env, NULL, &txn, 0); CKERR(r); + + DB_LOADER *loader; + r = env->create_loader(env, txn, &loader, ndb > 0 ? dbs[0] : NULL, ndb, dbs, db_flags, dbt_flags, loader_flags); CKERR(r); + + struct rlimit current_nproc_limit; + r = getrlimit(RLIMIT_NPROC, ¤t_nproc_limit); + assert(r == 0); + + struct rlimit new_nproc_limit = current_nproc_limit; + new_nproc_limit.rlim_cur = 0; + r = setrlimit(RLIMIT_NPROC, &new_nproc_limit); + assert(r == 0); + + r = loader->close(loader); + + if (loader_flags & LOADER_DISALLOW_PUTS) + CKERR(r); + else + CKERR2(r, EAGAIN); + + r = setrlimit(RLIMIT_NPROC, ¤t_nproc_limit); + assert(r == 0); + + r = txn->abort(txn); CKERR(r); + + for (int i = 0; i < ndb; i++) { + r = dbs[i]->close(dbs[i], 0); CKERR(r); + } + + r = env->close(env, 0); CKERR(r); +} + +static void do_args(int argc, char * const argv[]) { + int resultcode; + char *cmd = argv[0]; + argc--; argv++; + while (argc>0) { + if (strcmp(argv[0], "-h")==0) { + resultcode=0; + do_usage: + fprintf(stderr, "Usage: %s -h -v -q -p\n", cmd); + exit(resultcode); + } else if (strcmp(argv[0], "-v")==0) { + verbose++; + } else if (strcmp(argv[0],"-q")==0) { + verbose--; + if (verbose<0) verbose=0; + } else if (strcmp(argv[0], "-p") == 0) { + loader_flags |= LOADER_DISALLOW_PUTS; + } else if (strcmp(argv[0], "-z") == 0) { + loader_flags |= LOADER_COMPRESS_INTERMEDIATES; + } else if (strcmp(argv[0], "-e") == 0) { + argc--; argv++; + if (argc > 0) + envdir = argv[0]; + } else { + fprintf(stderr, "Unknown arg: %s\n", argv[0]); + resultcode=1; + goto do_usage; + } + argc--; + argv++; + } +} + +int test_main(int argc, char * const *argv) { + do_args(argc, argv); + run_test(1); + return 0; +} diff --git a/storage/tokudb/ft-index/src/tests/loader-create-abort.cc b/storage/tokudb/ft-index/src/tests/loader-create-abort.cc index 58568564699..3d2cf84cefe 100644 --- a/storage/tokudb/ft-index/src/tests/loader-create-abort.cc +++ b/storage/tokudb/ft-index/src/tests/loader-create-abort.cc @@ -28,7 +28,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/loader-create-close.cc b/storage/tokudb/ft-index/src/tests/loader-create-close.cc index 6a04387152f..8a2d043e51f 100644 --- a/storage/tokudb/ft-index/src/tests/loader-create-close.cc +++ b/storage/tokudb/ft-index/src/tests/loader-create-close.cc @@ -28,7 +28,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -97,11 +97,7 @@ PATENT RIGHTS GRANT: static int loader_flags = 0; static const char *envdir = TOKU_TEST_FILENAME; -static int put_multiple_generate(DB *UU(dest_db), DB *UU(src_db), DBT_ARRAY *UU(dest_keys), DBT_ARRAY *UU(dest_vals), const DBT *UU(src_key), const DBT *UU(src_val)) { - return ENOMEM; -} - -static void loader_open_abort(int ndb) { +static void test_loader_create_close(int ndb) { int r; char rmcmd[32 + strlen(envdir)]; @@ -111,8 +107,6 @@ static void loader_open_abort(int ndb) { DB_ENV *env; r = db_env_create(&env, 0); CKERR(r); - r = env->set_generate_row_callback_for_put(env, put_multiple_generate); - CKERR(r); int envflags = DB_INIT_LOCK | DB_INIT_LOG | DB_INIT_MPOOL | DB_INIT_TXN | DB_CREATE | DB_PRIVATE; r = env->open(env, envdir, envflags, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r); env->set_errfile(env, stderr); @@ -181,8 +175,8 @@ static void do_args(int argc, char * const argv[]) { int test_main(int argc, char * const *argv) { do_args(argc, argv); - loader_open_abort(0); - loader_open_abort(1); - loader_open_abort(2); + test_loader_create_close(0); + test_loader_create_close(1); + test_loader_create_close(2); return 0; } diff --git a/storage/tokudb/ft-index/src/tests/loader-create-commit-nproc-limit.cc b/storage/tokudb/ft-index/src/tests/loader-create-commit-nproc-limit.cc new file mode 100644 index 00000000000..62ba70fa4f8 --- /dev/null +++ b/storage/tokudb/ft-index/src/tests/loader-create-commit-nproc-limit.cc @@ -0,0 +1,211 @@ +/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */ +// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4: +/* +COPYING CONDITIONS NOTICE: + + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation, and provided that the + following conditions are met: + + * Redistributions of source code must retain this COPYING + CONDITIONS NOTICE, the COPYRIGHT NOTICE (below), the + DISCLAIMER (below), the UNIVERSITY PATENT NOTICE (below), the + PATENT MARKING NOTICE (below), and the PATENT RIGHTS + GRANT (below). + + * Redistributions in binary form must reproduce this COPYING + CONDITIONS NOTICE, the COPYRIGHT NOTICE (below), the + DISCLAIMER (below), the UNIVERSITY PATENT NOTICE (below), the + PATENT MARKING NOTICE (below), and the PATENT RIGHTS + GRANT (below) in the documentation and/or other materials + provided with the distribution. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + 02110-1301, USA. + +COPYRIGHT NOTICE: + + TokuFT, Tokutek Fractal Tree Indexing Library. + Copyright (C) 2007-2013 Tokutek, Inc. + +DISCLAIMER: + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + +UNIVERSITY PATENT NOTICE: + + The technology is licensed by the Massachusetts Institute of + Technology, Rutgers State University of New Jersey, and the Research + Foundation of State University of New York at Stony Brook under + United States of America Serial No. 11/760379 and to the patents + and/or patent applications resulting from it. + +PATENT MARKING NOTICE: + + This software is covered by US Patent No. 8,185,551. + This software is covered by US Patent No. 8,489,638. + +PATENT RIGHTS GRANT: + + "THIS IMPLEMENTATION" means the copyrightable works distributed by + Tokutek as part of the Fractal Tree project. + + "PATENT CLAIMS" means the claims of patents that are owned or + licensable by Tokutek, both currently or in the future; and that in + the absence of this license would be infringed by THIS + IMPLEMENTATION or by using or running THIS IMPLEMENTATION. + + "PATENT CHALLENGE" shall mean a challenge to the validity, + patentability, enforceability and/or non-infringement of any of the + PATENT CLAIMS or otherwise opposing any of the PATENT CLAIMS. + + Tokutek hereby grants to you, for the term and geographical scope of + the PATENT CLAIMS, a non-exclusive, no-charge, royalty-free, + irrevocable (except as stated in this section) patent license to + make, have made, use, offer to sell, sell, import, transfer, and + otherwise run, modify, and propagate the contents of THIS + IMPLEMENTATION, where such license applies only to the PATENT + CLAIMS. This grant does not include claims that would be infringed + only as a consequence of further modifications of THIS + IMPLEMENTATION. If you or your agent or licensee institute or order + or agree to the institution of patent litigation against any entity + (including a cross-claim or counterclaim in a lawsuit) alleging that + THIS IMPLEMENTATION constitutes direct or contributory patent + infringement, or inducement of patent infringement, then any rights + granted to you under this License shall terminate as of the date + such litigation is filed. If you or your agent or exclusive + licensee institute or order or agree to the institution of a PATENT + CHALLENGE, then Tokutek may terminate any rights granted to you + under this License. +*/ + +#ident "Copyright (c) 2010-2013 Tokutek Inc. All rights reserved." +#ident "$Id$" + +// This test crashes if a failed loader creation causes the db to be corrupted by unlinking +// the underlying fractal tree files. This unlinking occurs because the txn that logs the +// load log entries is committed rather than aborted. + +#include "test.h" +#include <db.h> +#include <sys/resource.h> + +static int loader_flags = 0; +static const char *envdir = TOKU_TEST_FILENAME; + +static void run_test(int ndb) { + int r; + + char rmcmd[32 + strlen(envdir)]; + snprintf(rmcmd, sizeof rmcmd, "rm -rf %s", envdir); + r = system(rmcmd); CKERR(r); + r = toku_os_mkdir(envdir, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r); + + DB_ENV *env; + r = db_env_create(&env, 0); CKERR(r); + int envflags = DB_INIT_LOCK | DB_INIT_LOG | DB_INIT_MPOOL | DB_INIT_TXN | DB_CREATE | DB_PRIVATE; + r = env->open(env, envdir, envflags, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r); + env->set_errfile(env, stderr); + + DB *dbs[ndb]; + uint32_t db_flags[ndb]; + uint32_t dbt_flags[ndb]; + for (int i = 0; i < ndb; i++) { + db_flags[i] = DB_NOOVERWRITE; + dbt_flags[i] = 0; + r = db_create(&dbs[i], env, 0); CKERR(r); + char name[32]; + sprintf(name, "db%d", i); + r = dbs[i]->open(dbs[i], NULL, name, NULL, DB_BTREE, DB_CREATE, 0666); CKERR(r); + } + + DB_TXN *txn; + r = env->txn_begin(env, NULL, &txn, 0); CKERR(r); + + struct rlimit current_nproc_limit; + r = getrlimit(RLIMIT_NPROC, ¤t_nproc_limit); + assert(r == 0); + + struct rlimit new_nproc_limit = current_nproc_limit; + new_nproc_limit.rlim_cur = 0; + r = setrlimit(RLIMIT_NPROC, &new_nproc_limit); + assert(r == 0); + + DB_LOADER *loader; + int loader_r = env->create_loader(env, txn, &loader, ndb > 0 ? dbs[0] : NULL, ndb, dbs, db_flags, dbt_flags, loader_flags); + + r = setrlimit(RLIMIT_NPROC, ¤t_nproc_limit); + assert(r == 0); + + if (loader_flags & LOADER_DISALLOW_PUTS) { + CKERR(loader_r); + loader_r = loader->close(loader); + CKERR(loader_r); + } else { + CKERR2(loader_r, EAGAIN); + } + + r = txn->commit(txn, 0); CKERR(r); + + for (int i = 0; i < ndb; i++) { + r = dbs[i]->close(dbs[i], 0); CKERR(r); + } + + for (int i = 0; i < ndb; i++) { + r = db_create(&dbs[i], env, 0); CKERR(r); + char name[32]; + sprintf(name, "db%d", i); + r = dbs[i]->open(dbs[i], NULL, name, NULL, DB_BTREE, 0, 0666); CKERR(r); + } + + for (int i = 0; i < ndb; i++) { + r = dbs[i]->close(dbs[i], 0); CKERR(r); + } + + r = env->close(env, 0); CKERR(r); +} + +static void do_args(int argc, char * const argv[]) { + int resultcode; + char *cmd = argv[0]; + argc--; argv++; + while (argc>0) { + if (strcmp(argv[0], "-h")==0) { + resultcode=0; + do_usage: + fprintf(stderr, "Usage: %s -h -v -q -p\n", cmd); + exit(resultcode); + } else if (strcmp(argv[0], "-v")==0) { + verbose++; + } else if (strcmp(argv[0],"-q")==0) { + verbose--; + if (verbose<0) verbose=0; + } else if (strcmp(argv[0], "-p") == 0) { + loader_flags |= LOADER_DISALLOW_PUTS; + } else if (strcmp(argv[0], "-z") == 0) { + loader_flags |= LOADER_COMPRESS_INTERMEDIATES; + } else if (strcmp(argv[0], "-e") == 0) { + argc--; argv++; + if (argc > 0) + envdir = argv[0]; + } else { + fprintf(stderr, "Unknown arg: %s\n", argv[0]); + resultcode=1; + goto do_usage; + } + argc--; + argv++; + } +} + +int test_main(int argc, char * const *argv) { + do_args(argc, argv); + run_test(1); + return 0; +} diff --git a/storage/tokudb/ft-index/src/tests/loader-create-nproc-limit.cc b/storage/tokudb/ft-index/src/tests/loader-create-nproc-limit.cc new file mode 100644 index 00000000000..844ca2043c7 --- /dev/null +++ b/storage/tokudb/ft-index/src/tests/loader-create-nproc-limit.cc @@ -0,0 +1,199 @@ +/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */ +// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4: +/* +COPYING CONDITIONS NOTICE: + + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation, and provided that the + following conditions are met: + + * Redistributions of source code must retain this COPYING + CONDITIONS NOTICE, the COPYRIGHT NOTICE (below), the + DISCLAIMER (below), the UNIVERSITY PATENT NOTICE (below), the + PATENT MARKING NOTICE (below), and the PATENT RIGHTS + GRANT (below). + + * Redistributions in binary form must reproduce this COPYING + CONDITIONS NOTICE, the COPYRIGHT NOTICE (below), the + DISCLAIMER (below), the UNIVERSITY PATENT NOTICE (below), the + PATENT MARKING NOTICE (below), and the PATENT RIGHTS + GRANT (below) in the documentation and/or other materials + provided with the distribution. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + 02110-1301, USA. + +COPYRIGHT NOTICE: + + TokuFT, Tokutek Fractal Tree Indexing Library. + Copyright (C) 2007-2013 Tokutek, Inc. + +DISCLAIMER: + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + +UNIVERSITY PATENT NOTICE: + + The technology is licensed by the Massachusetts Institute of + Technology, Rutgers State University of New Jersey, and the Research + Foundation of State University of New York at Stony Brook under + United States of America Serial No. 11/760379 and to the patents + and/or patent applications resulting from it. + +PATENT MARKING NOTICE: + + This software is covered by US Patent No. 8,185,551. + This software is covered by US Patent No. 8,489,638. + +PATENT RIGHTS GRANT: + + "THIS IMPLEMENTATION" means the copyrightable works distributed by + Tokutek as part of the Fractal Tree project. + + "PATENT CLAIMS" means the claims of patents that are owned or + licensable by Tokutek, both currently or in the future; and that in + the absence of this license would be infringed by THIS + IMPLEMENTATION or by using or running THIS IMPLEMENTATION. + + "PATENT CHALLENGE" shall mean a challenge to the validity, + patentability, enforceability and/or non-infringement of any of the + PATENT CLAIMS or otherwise opposing any of the PATENT CLAIMS. + + Tokutek hereby grants to you, for the term and geographical scope of + the PATENT CLAIMS, a non-exclusive, no-charge, royalty-free, + irrevocable (except as stated in this section) patent license to + make, have made, use, offer to sell, sell, import, transfer, and + otherwise run, modify, and propagate the contents of THIS + IMPLEMENTATION, where such license applies only to the PATENT + CLAIMS. This grant does not include claims that would be infringed + only as a consequence of further modifications of THIS + IMPLEMENTATION. If you or your agent or licensee institute or order + or agree to the institution of patent litigation against any entity + (including a cross-claim or counterclaim in a lawsuit) alleging that + THIS IMPLEMENTATION constitutes direct or contributory patent + infringement, or inducement of patent infringement, then any rights + granted to you under this License shall terminate as of the date + such litigation is filed. If you or your agent or exclusive + licensee institute or order or agree to the institution of a PATENT + CHALLENGE, then Tokutek may terminate any rights granted to you + under this License. +*/ + +// Verify that env->create_loader works correctly (does not crash, does not leak memory, returns the right error code) +// when the NPROC limit is exceeded. + +#ident "Copyright (c) 2010-2013 Tokutek Inc. All rights reserved." +#ident "$Id$" + +#include "test.h" +#include <db.h> +#include <sys/resource.h> + +static int loader_flags = 0; +static const char *envdir = TOKU_TEST_FILENAME; + +static void run_test(int ndb) { + int r; + + char rmcmd[32 + strlen(envdir)]; + snprintf(rmcmd, sizeof rmcmd, "rm -rf %s", envdir); + r = system(rmcmd); CKERR(r); + r = toku_os_mkdir(envdir, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r); + + DB_ENV *env; + r = db_env_create(&env, 0); CKERR(r); + int envflags = DB_INIT_LOCK | DB_INIT_LOG | DB_INIT_MPOOL | DB_INIT_TXN | DB_CREATE | DB_PRIVATE; + r = env->open(env, envdir, envflags, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r); + env->set_errfile(env, stderr); + + DB *dbs[ndb]; + uint32_t db_flags[ndb]; + uint32_t dbt_flags[ndb]; + for (int i = 0; i < ndb; i++) { + db_flags[i] = DB_NOOVERWRITE; + dbt_flags[i] = 0; + r = db_create(&dbs[i], env, 0); CKERR(r); + char name[32]; + sprintf(name, "db%d", i); + r = dbs[i]->open(dbs[i], NULL, name, NULL, DB_BTREE, DB_CREATE, 0666); CKERR(r); + } + + DB_TXN *txn; + r = env->txn_begin(env, NULL, &txn, 0); CKERR(r); + + struct rlimit current_nproc_limit; + r = getrlimit(RLIMIT_NPROC, ¤t_nproc_limit); + assert(r == 0); + + struct rlimit new_nproc_limit = current_nproc_limit; + new_nproc_limit.rlim_cur = 0; + r = setrlimit(RLIMIT_NPROC, &new_nproc_limit); + assert(r == 0); + + DB_LOADER *loader; + int loader_r = env->create_loader(env, txn, &loader, ndb > 0 ? dbs[0] : NULL, ndb, dbs, db_flags, dbt_flags, loader_flags); + + r = setrlimit(RLIMIT_NPROC, ¤t_nproc_limit); + assert(r == 0); + + if (loader_flags & LOADER_DISALLOW_PUTS) { + CKERR(loader_r); + loader_r = loader->close(loader); + CKERR(loader_r); + } else { + CKERR2(loader_r, EAGAIN); + } + + r = txn->abort(txn); CKERR(r); + + for (int i = 0; i < ndb; i++) { + r = dbs[i]->close(dbs[i], 0); CKERR(r); + } + + r = env->close(env, 0); CKERR(r); +} + +static void do_args(int argc, char * const argv[]) { + int resultcode; + char *cmd = argv[0]; + argc--; argv++; + while (argc>0) { + if (strcmp(argv[0], "-h")==0) { + resultcode=0; + do_usage: + fprintf(stderr, "Usage: %s -h -v -q -p\n", cmd); + exit(resultcode); + } else if (strcmp(argv[0], "-v")==0) { + verbose++; + } else if (strcmp(argv[0],"-q")==0) { + verbose--; + if (verbose<0) verbose=0; + } else if (strcmp(argv[0], "-p") == 0) { + loader_flags |= LOADER_DISALLOW_PUTS; + } else if (strcmp(argv[0], "-z") == 0) { + loader_flags |= LOADER_COMPRESS_INTERMEDIATES; + } else if (strcmp(argv[0], "-e") == 0) { + argc--; argv++; + if (argc > 0) + envdir = argv[0]; + } else { + fprintf(stderr, "Unknown arg: %s\n", argv[0]); + resultcode=1; + goto do_usage; + } + argc--; + argv++; + } +} + +int test_main(int argc, char * const *argv) { + do_args(argc, argv); + run_test(1); + return 0; +} diff --git a/storage/tokudb/ft-index/src/tests/loader-dup-test.cc b/storage/tokudb/ft-index/src/tests/loader-dup-test.cc index 5fa41809baa..d3bd2aabe57 100644 --- a/storage/tokudb/ft-index/src/tests/loader-dup-test.cc +++ b/storage/tokudb/ft-index/src/tests/loader-dup-test.cc @@ -28,7 +28,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/loader-no-puts.cc b/storage/tokudb/ft-index/src/tests/loader-no-puts.cc index 6fc20c5c8a1..c2c11a639a0 100644 --- a/storage/tokudb/ft-index/src/tests/loader-no-puts.cc +++ b/storage/tokudb/ft-index/src/tests/loader-no-puts.cc @@ -28,7 +28,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/loader-reference-test.cc b/storage/tokudb/ft-index/src/tests/loader-reference-test.cc index 7fadcf150d4..4bb9334a71f 100644 --- a/storage/tokudb/ft-index/src/tests/loader-reference-test.cc +++ b/storage/tokudb/ft-index/src/tests/loader-reference-test.cc @@ -28,7 +28,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/loader-stress-del.cc b/storage/tokudb/ft-index/src/tests/loader-stress-del.cc index 9578df66003..c9a262222fb 100644 --- a/storage/tokudb/ft-index/src/tests/loader-stress-del.cc +++ b/storage/tokudb/ft-index/src/tests/loader-stress-del.cc @@ -28,7 +28,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/loader-stress-test.cc b/storage/tokudb/ft-index/src/tests/loader-stress-test.cc index f58b839b314..b9e51436632 100644 --- a/storage/tokudb/ft-index/src/tests/loader-stress-test.cc +++ b/storage/tokudb/ft-index/src/tests/loader-stress-test.cc @@ -28,7 +28,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/loader-tpch-load.cc b/storage/tokudb/ft-index/src/tests/loader-tpch-load.cc index cbe38275821..c89331a4200 100644 --- a/storage/tokudb/ft-index/src/tests/loader-tpch-load.cc +++ b/storage/tokudb/ft-index/src/tests/loader-tpch-load.cc @@ -28,7 +28,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/locktree_escalation_stalls.cc b/storage/tokudb/ft-index/src/tests/locktree_escalation_stalls.cc index 037d2fc46a9..2a5581077d5 100644 --- a/storage/tokudb/ft-index/src/tests/locktree_escalation_stalls.cc +++ b/storage/tokudb/ft-index/src/tests/locktree_escalation_stalls.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/manyfiles.cc b/storage/tokudb/ft-index/src/tests/manyfiles.cc index 6445f1b7d38..4c68f8d86fe 100644 --- a/storage/tokudb/ft-index/src/tests/manyfiles.cc +++ b/storage/tokudb/ft-index/src/tests/manyfiles.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/maxsize-for-loader.cc b/storage/tokudb/ft-index/src/tests/maxsize-for-loader.cc index 02b21794abb..a95a42d4870 100644 --- a/storage/tokudb/ft-index/src/tests/maxsize-for-loader.cc +++ b/storage/tokudb/ft-index/src/tests/maxsize-for-loader.cc @@ -28,7 +28,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/medium-nested-commit-commit.cc b/storage/tokudb/ft-index/src/tests/medium-nested-commit-commit.cc index 48d9102d523..aab33584391 100644 --- a/storage/tokudb/ft-index/src/tests/medium-nested-commit-commit.cc +++ b/storage/tokudb/ft-index/src/tests/medium-nested-commit-commit.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/multiprocess.cc b/storage/tokudb/ft-index/src/tests/multiprocess.cc index fab0d7d3896..93b20d3ad7f 100644 --- a/storage/tokudb/ft-index/src/tests/multiprocess.cc +++ b/storage/tokudb/ft-index/src/tests/multiprocess.cc @@ -28,7 +28,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/mvcc-create-table.cc b/storage/tokudb/ft-index/src/tests/mvcc-create-table.cc index 84f8c75db7c..db1d1616732 100644 --- a/storage/tokudb/ft-index/src/tests/mvcc-create-table.cc +++ b/storage/tokudb/ft-index/src/tests/mvcc-create-table.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/mvcc-many-committed.cc b/storage/tokudb/ft-index/src/tests/mvcc-many-committed.cc index db261e6ae17..bbb7116b42d 100644 --- a/storage/tokudb/ft-index/src/tests/mvcc-many-committed.cc +++ b/storage/tokudb/ft-index/src/tests/mvcc-many-committed.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/mvcc-read-committed.cc b/storage/tokudb/ft-index/src/tests/mvcc-read-committed.cc index 38a598ec5be..6f8d3377c10 100644 --- a/storage/tokudb/ft-index/src/tests/mvcc-read-committed.cc +++ b/storage/tokudb/ft-index/src/tests/mvcc-read-committed.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/openlimit17-locktree.cc b/storage/tokudb/ft-index/src/tests/openlimit17-locktree.cc index c83ec2543f9..e9b62752af4 100644 --- a/storage/tokudb/ft-index/src/tests/openlimit17-locktree.cc +++ b/storage/tokudb/ft-index/src/tests/openlimit17-locktree.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/openlimit17-metafiles.cc b/storage/tokudb/ft-index/src/tests/openlimit17-metafiles.cc index 52c319af778..29dbeebef7c 100644 --- a/storage/tokudb/ft-index/src/tests/openlimit17-metafiles.cc +++ b/storage/tokudb/ft-index/src/tests/openlimit17-metafiles.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/openlimit17.cc b/storage/tokudb/ft-index/src/tests/openlimit17.cc index 0709d89a0c2..4f322a86f35 100644 --- a/storage/tokudb/ft-index/src/tests/openlimit17.cc +++ b/storage/tokudb/ft-index/src/tests/openlimit17.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/perf_checkpoint_var.cc b/storage/tokudb/ft-index/src/tests/perf_checkpoint_var.cc index 0c9ad682eb9..d0d60641cb1 100644 --- a/storage/tokudb/ft-index/src/tests/perf_checkpoint_var.cc +++ b/storage/tokudb/ft-index/src/tests/perf_checkpoint_var.cc @@ -28,7 +28,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/perf_child_txn.cc b/storage/tokudb/ft-index/src/tests/perf_child_txn.cc index 121d9dc3735..f6d2e8018eb 100644 --- a/storage/tokudb/ft-index/src/tests/perf_child_txn.cc +++ b/storage/tokudb/ft-index/src/tests/perf_child_txn.cc @@ -28,7 +28,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/perf_cursor_nop.cc b/storage/tokudb/ft-index/src/tests/perf_cursor_nop.cc index 71c5b8d170e..4f890ab0ca2 100644 --- a/storage/tokudb/ft-index/src/tests/perf_cursor_nop.cc +++ b/storage/tokudb/ft-index/src/tests/perf_cursor_nop.cc @@ -28,7 +28,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/perf_iibench.cc b/storage/tokudb/ft-index/src/tests/perf_iibench.cc index b5e094d2c15..b9d142c65c9 100644 --- a/storage/tokudb/ft-index/src/tests/perf_iibench.cc +++ b/storage/tokudb/ft-index/src/tests/perf_iibench.cc @@ -28,7 +28,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/perf_insert.cc b/storage/tokudb/ft-index/src/tests/perf_insert.cc index 31210a6e343..9d621b93c0c 100644 --- a/storage/tokudb/ft-index/src/tests/perf_insert.cc +++ b/storage/tokudb/ft-index/src/tests/perf_insert.cc @@ -28,7 +28,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/perf_malloc_free.cc b/storage/tokudb/ft-index/src/tests/perf_malloc_free.cc index 451bc346897..ee6ca92edb4 100644 --- a/storage/tokudb/ft-index/src/tests/perf_malloc_free.cc +++ b/storage/tokudb/ft-index/src/tests/perf_malloc_free.cc @@ -28,7 +28,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/perf_nop.cc b/storage/tokudb/ft-index/src/tests/perf_nop.cc index 9f6b5e2ee95..e6d4d94640f 100644 --- a/storage/tokudb/ft-index/src/tests/perf_nop.cc +++ b/storage/tokudb/ft-index/src/tests/perf_nop.cc @@ -28,7 +28,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/perf_ptquery.cc b/storage/tokudb/ft-index/src/tests/perf_ptquery.cc index bc17d498a7b..71922782878 100644 --- a/storage/tokudb/ft-index/src/tests/perf_ptquery.cc +++ b/storage/tokudb/ft-index/src/tests/perf_ptquery.cc @@ -28,7 +28,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/perf_ptquery2.cc b/storage/tokudb/ft-index/src/tests/perf_ptquery2.cc index 9ae1bbab844..888081bdb81 100644 --- a/storage/tokudb/ft-index/src/tests/perf_ptquery2.cc +++ b/storage/tokudb/ft-index/src/tests/perf_ptquery2.cc @@ -28,7 +28,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/perf_rangequery.cc b/storage/tokudb/ft-index/src/tests/perf_rangequery.cc index 88d30049a29..3d78dd16ab7 100644 --- a/storage/tokudb/ft-index/src/tests/perf_rangequery.cc +++ b/storage/tokudb/ft-index/src/tests/perf_rangequery.cc @@ -28,7 +28,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/perf_read_txn.cc b/storage/tokudb/ft-index/src/tests/perf_read_txn.cc index 9e62314fa58..2825f6588ce 100644 --- a/storage/tokudb/ft-index/src/tests/perf_read_txn.cc +++ b/storage/tokudb/ft-index/src/tests/perf_read_txn.cc @@ -28,7 +28,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/perf_read_txn_single_thread.cc b/storage/tokudb/ft-index/src/tests/perf_read_txn_single_thread.cc index debb1296ae9..f36b748d853 100644 --- a/storage/tokudb/ft-index/src/tests/perf_read_txn_single_thread.cc +++ b/storage/tokudb/ft-index/src/tests/perf_read_txn_single_thread.cc @@ -28,7 +28,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/perf_read_write.cc b/storage/tokudb/ft-index/src/tests/perf_read_write.cc index ef95e9d3aa2..f5d75f57103 100644 --- a/storage/tokudb/ft-index/src/tests/perf_read_write.cc +++ b/storage/tokudb/ft-index/src/tests/perf_read_write.cc @@ -28,7 +28,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/perf_txn_single_thread.cc b/storage/tokudb/ft-index/src/tests/perf_txn_single_thread.cc index 789024327cb..52e6d9d7cae 100644 --- a/storage/tokudb/ft-index/src/tests/perf_txn_single_thread.cc +++ b/storage/tokudb/ft-index/src/tests/perf_txn_single_thread.cc @@ -28,7 +28,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/powerfail.cc b/storage/tokudb/ft-index/src/tests/powerfail.cc index 63ad313e145..601df047d09 100644 --- a/storage/tokudb/ft-index/src/tests/powerfail.cc +++ b/storage/tokudb/ft-index/src/tests/powerfail.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/preload-db-nested.cc b/storage/tokudb/ft-index/src/tests/preload-db-nested.cc index 0d1a3749193..9c0c8282456 100644 --- a/storage/tokudb/ft-index/src/tests/preload-db-nested.cc +++ b/storage/tokudb/ft-index/src/tests/preload-db-nested.cc @@ -28,7 +28,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/preload-db.cc b/storage/tokudb/ft-index/src/tests/preload-db.cc index d486af941d2..584176bc997 100644 --- a/storage/tokudb/ft-index/src/tests/preload-db.cc +++ b/storage/tokudb/ft-index/src/tests/preload-db.cc @@ -28,7 +28,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/prelock-read-read.cc b/storage/tokudb/ft-index/src/tests/prelock-read-read.cc index daa6ab108b7..b23c81dd119 100644 --- a/storage/tokudb/ft-index/src/tests/prelock-read-read.cc +++ b/storage/tokudb/ft-index/src/tests/prelock-read-read.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/prelock-read-write.cc b/storage/tokudb/ft-index/src/tests/prelock-read-write.cc index 140c9e79b1c..0a3a3fddf39 100644 --- a/storage/tokudb/ft-index/src/tests/prelock-read-write.cc +++ b/storage/tokudb/ft-index/src/tests/prelock-read-write.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/prelock-write-read.cc b/storage/tokudb/ft-index/src/tests/prelock-write-read.cc index 540d385b116..35c194c362d 100644 --- a/storage/tokudb/ft-index/src/tests/prelock-write-read.cc +++ b/storage/tokudb/ft-index/src/tests/prelock-write-read.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/prelock-write-write.cc b/storage/tokudb/ft-index/src/tests/prelock-write-write.cc index 8753f158648..d9f832cdde7 100644 --- a/storage/tokudb/ft-index/src/tests/prelock-write-write.cc +++ b/storage/tokudb/ft-index/src/tests/prelock-write-write.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/print_engine_status.cc b/storage/tokudb/ft-index/src/tests/print_engine_status.cc index 2f1b6b5b98d..34b62bd8fe6 100644 --- a/storage/tokudb/ft-index/src/tests/print_engine_status.cc +++ b/storage/tokudb/ft-index/src/tests/print_engine_status.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/progress.cc b/storage/tokudb/ft-index/src/tests/progress.cc index e1d57ec61b0..e6af8fb9763 100644 --- a/storage/tokudb/ft-index/src/tests/progress.cc +++ b/storage/tokudb/ft-index/src/tests/progress.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/put-del-multiple-array-indexing.cc b/storage/tokudb/ft-index/src/tests/put-del-multiple-array-indexing.cc index af0407063f8..0a29d87369f 100644 --- a/storage/tokudb/ft-index/src/tests/put-del-multiple-array-indexing.cc +++ b/storage/tokudb/ft-index/src/tests/put-del-multiple-array-indexing.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/queries_with_deletes.cc b/storage/tokudb/ft-index/src/tests/queries_with_deletes.cc index eebe61e2839..a619e5f0f58 100644 --- a/storage/tokudb/ft-index/src/tests/queries_with_deletes.cc +++ b/storage/tokudb/ft-index/src/tests/queries_with_deletes.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/recover-2483.cc b/storage/tokudb/ft-index/src/tests/recover-2483.cc index 0950a304075..e31361839f8 100644 --- a/storage/tokudb/ft-index/src/tests/recover-2483.cc +++ b/storage/tokudb/ft-index/src/tests/recover-2483.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -94,8 +94,6 @@ PATENT RIGHTS GRANT: #include "test.h" -const int envflags = DB_INIT_MPOOL|DB_CREATE|DB_THREAD |DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE; - DB_ENV *env; DB_TXN *tid; DB *db; diff --git a/storage/tokudb/ft-index/src/tests/recover-3113.cc b/storage/tokudb/ft-index/src/tests/recover-3113.cc index 67a4e1ff4d4..eeba9baf03c 100644 --- a/storage/tokudb/ft-index/src/tests/recover-3113.cc +++ b/storage/tokudb/ft-index/src/tests/recover-3113.cc @@ -28,7 +28,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/recover-5146.cc b/storage/tokudb/ft-index/src/tests/recover-5146.cc index 3ad54539aef..c05f9effa7d 100644 --- a/storage/tokudb/ft-index/src/tests/recover-5146.cc +++ b/storage/tokudb/ft-index/src/tests/recover-5146.cc @@ -28,7 +28,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/recover-checkpoint-fcreate-fdelete-fcreate.cc b/storage/tokudb/ft-index/src/tests/recover-checkpoint-fcreate-fdelete-fcreate.cc index 20fc67dd956..0d5d4ff20db 100644 --- a/storage/tokudb/ft-index/src/tests/recover-checkpoint-fcreate-fdelete-fcreate.cc +++ b/storage/tokudb/ft-index/src/tests/recover-checkpoint-fcreate-fdelete-fcreate.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/recover-checkpoint-fopen-abort.cc b/storage/tokudb/ft-index/src/tests/recover-checkpoint-fopen-abort.cc index 3023cc1a1a7..bed20966845 100644 --- a/storage/tokudb/ft-index/src/tests/recover-checkpoint-fopen-abort.cc +++ b/storage/tokudb/ft-index/src/tests/recover-checkpoint-fopen-abort.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/recover-checkpoint-fopen-commit.cc b/storage/tokudb/ft-index/src/tests/recover-checkpoint-fopen-commit.cc index 33546958a37..2dcdbf6b939 100644 --- a/storage/tokudb/ft-index/src/tests/recover-checkpoint-fopen-commit.cc +++ b/storage/tokudb/ft-index/src/tests/recover-checkpoint-fopen-commit.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/recover-child-rollback.cc b/storage/tokudb/ft-index/src/tests/recover-child-rollback.cc index 00f036cc174..62fbfbda6ef 100644 --- a/storage/tokudb/ft-index/src/tests/recover-child-rollback.cc +++ b/storage/tokudb/ft-index/src/tests/recover-child-rollback.cc @@ -28,7 +28,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/recover-compare-db-descriptor.cc b/storage/tokudb/ft-index/src/tests/recover-compare-db-descriptor.cc index 58ae0b007e3..2cbc54efa17 100644 --- a/storage/tokudb/ft-index/src/tests/recover-compare-db-descriptor.cc +++ b/storage/tokudb/ft-index/src/tests/recover-compare-db-descriptor.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/recover-compare-db.cc b/storage/tokudb/ft-index/src/tests/recover-compare-db.cc index 7e1de1ef3fe..6ce16bd479d 100644 --- a/storage/tokudb/ft-index/src/tests/recover-compare-db.cc +++ b/storage/tokudb/ft-index/src/tests/recover-compare-db.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/recover-del-multiple-abort.cc b/storage/tokudb/ft-index/src/tests/recover-del-multiple-abort.cc index 4b8d8b44cf7..5a7e1710de1 100644 --- a/storage/tokudb/ft-index/src/tests/recover-del-multiple-abort.cc +++ b/storage/tokudb/ft-index/src/tests/recover-del-multiple-abort.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/recover-del-multiple-srcdb-fdelete-all.cc b/storage/tokudb/ft-index/src/tests/recover-del-multiple-srcdb-fdelete-all.cc index 3224fa66057..632a4805835 100644 --- a/storage/tokudb/ft-index/src/tests/recover-del-multiple-srcdb-fdelete-all.cc +++ b/storage/tokudb/ft-index/src/tests/recover-del-multiple-srcdb-fdelete-all.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/recover-del-multiple.cc b/storage/tokudb/ft-index/src/tests/recover-del-multiple.cc index be09e29a0b8..d4c7303162a 100644 --- a/storage/tokudb/ft-index/src/tests/recover-del-multiple.cc +++ b/storage/tokudb/ft-index/src/tests/recover-del-multiple.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/recover-delboth-after-checkpoint.cc b/storage/tokudb/ft-index/src/tests/recover-delboth-after-checkpoint.cc index 4655b5b5065..323b5b64ef7 100644 --- a/storage/tokudb/ft-index/src/tests/recover-delboth-after-checkpoint.cc +++ b/storage/tokudb/ft-index/src/tests/recover-delboth-after-checkpoint.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/recover-delboth-checkpoint.cc b/storage/tokudb/ft-index/src/tests/recover-delboth-checkpoint.cc index 3e674644ed5..4ee3f5bba66 100644 --- a/storage/tokudb/ft-index/src/tests/recover-delboth-checkpoint.cc +++ b/storage/tokudb/ft-index/src/tests/recover-delboth-checkpoint.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/recover-descriptor.cc b/storage/tokudb/ft-index/src/tests/recover-descriptor.cc index f726d63fc0c..df96b8cbf45 100644 --- a/storage/tokudb/ft-index/src/tests/recover-descriptor.cc +++ b/storage/tokudb/ft-index/src/tests/recover-descriptor.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/recover-descriptor10.cc b/storage/tokudb/ft-index/src/tests/recover-descriptor10.cc index 9b747506fd3..db73549eb1f 100644 --- a/storage/tokudb/ft-index/src/tests/recover-descriptor10.cc +++ b/storage/tokudb/ft-index/src/tests/recover-descriptor10.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/recover-descriptor11.cc b/storage/tokudb/ft-index/src/tests/recover-descriptor11.cc index 8a2a1f34644..5d593af25bd 100644 --- a/storage/tokudb/ft-index/src/tests/recover-descriptor11.cc +++ b/storage/tokudb/ft-index/src/tests/recover-descriptor11.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/recover-descriptor12.cc b/storage/tokudb/ft-index/src/tests/recover-descriptor12.cc index b3bb25abaea..698fa5d2b63 100644 --- a/storage/tokudb/ft-index/src/tests/recover-descriptor12.cc +++ b/storage/tokudb/ft-index/src/tests/recover-descriptor12.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/recover-descriptor2.cc b/storage/tokudb/ft-index/src/tests/recover-descriptor2.cc index 7f09f4a7c54..62e685962e4 100644 --- a/storage/tokudb/ft-index/src/tests/recover-descriptor2.cc +++ b/storage/tokudb/ft-index/src/tests/recover-descriptor2.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/recover-descriptor3.cc b/storage/tokudb/ft-index/src/tests/recover-descriptor3.cc index 87d607359f3..58d219af9cf 100644 --- a/storage/tokudb/ft-index/src/tests/recover-descriptor3.cc +++ b/storage/tokudb/ft-index/src/tests/recover-descriptor3.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/recover-descriptor4.cc b/storage/tokudb/ft-index/src/tests/recover-descriptor4.cc index 192a9474b62..37d7ca80f1f 100644 --- a/storage/tokudb/ft-index/src/tests/recover-descriptor4.cc +++ b/storage/tokudb/ft-index/src/tests/recover-descriptor4.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/recover-descriptor5.cc b/storage/tokudb/ft-index/src/tests/recover-descriptor5.cc index 6ce30af5a70..757116afe19 100644 --- a/storage/tokudb/ft-index/src/tests/recover-descriptor5.cc +++ b/storage/tokudb/ft-index/src/tests/recover-descriptor5.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/recover-descriptor6.cc b/storage/tokudb/ft-index/src/tests/recover-descriptor6.cc index b092581c3c1..68f90b0f276 100644 --- a/storage/tokudb/ft-index/src/tests/recover-descriptor6.cc +++ b/storage/tokudb/ft-index/src/tests/recover-descriptor6.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/recover-descriptor7.cc b/storage/tokudb/ft-index/src/tests/recover-descriptor7.cc index 77d5c74cc97..9c3a44d01e5 100644 --- a/storage/tokudb/ft-index/src/tests/recover-descriptor7.cc +++ b/storage/tokudb/ft-index/src/tests/recover-descriptor7.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/recover-descriptor8.cc b/storage/tokudb/ft-index/src/tests/recover-descriptor8.cc index d9c993237d8..ac80a3e8a29 100644 --- a/storage/tokudb/ft-index/src/tests/recover-descriptor8.cc +++ b/storage/tokudb/ft-index/src/tests/recover-descriptor8.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/recover-descriptor9.cc b/storage/tokudb/ft-index/src/tests/recover-descriptor9.cc index be5bf31e3d6..17da0907374 100644 --- a/storage/tokudb/ft-index/src/tests/recover-descriptor9.cc +++ b/storage/tokudb/ft-index/src/tests/recover-descriptor9.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/recover-fassociate.cc b/storage/tokudb/ft-index/src/tests/recover-fassociate.cc index d97cfd7f849..81a19f18cd2 100644 --- a/storage/tokudb/ft-index/src/tests/recover-fassociate.cc +++ b/storage/tokudb/ft-index/src/tests/recover-fassociate.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/recover-fclose-in-checkpoint.cc b/storage/tokudb/ft-index/src/tests/recover-fclose-in-checkpoint.cc index 5058c49dc6c..0519b9ba3c9 100644 --- a/storage/tokudb/ft-index/src/tests/recover-fclose-in-checkpoint.cc +++ b/storage/tokudb/ft-index/src/tests/recover-fclose-in-checkpoint.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/recover-fcreate-basementnodesize.cc b/storage/tokudb/ft-index/src/tests/recover-fcreate-basementnodesize.cc index 228528a5327..25350829505 100644 --- a/storage/tokudb/ft-index/src/tests/recover-fcreate-basementnodesize.cc +++ b/storage/tokudb/ft-index/src/tests/recover-fcreate-basementnodesize.cc @@ -30,7 +30,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/recover-fcreate-fclose.cc b/storage/tokudb/ft-index/src/tests/recover-fcreate-fclose.cc index 240969e5eca..1dfccc4c3f4 100644 --- a/storage/tokudb/ft-index/src/tests/recover-fcreate-fclose.cc +++ b/storage/tokudb/ft-index/src/tests/recover-fcreate-fclose.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/recover-fcreate-fdelete.cc b/storage/tokudb/ft-index/src/tests/recover-fcreate-fdelete.cc index b0ef652e906..76605330a23 100644 --- a/storage/tokudb/ft-index/src/tests/recover-fcreate-fdelete.cc +++ b/storage/tokudb/ft-index/src/tests/recover-fcreate-fdelete.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/recover-fcreate-nodesize.cc b/storage/tokudb/ft-index/src/tests/recover-fcreate-nodesize.cc index 619704efcf2..7526c20b474 100644 --- a/storage/tokudb/ft-index/src/tests/recover-fcreate-nodesize.cc +++ b/storage/tokudb/ft-index/src/tests/recover-fcreate-nodesize.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/recover-fcreate-xabort.cc b/storage/tokudb/ft-index/src/tests/recover-fcreate-xabort.cc index 9473b52f240..c18db167449 100644 --- a/storage/tokudb/ft-index/src/tests/recover-fcreate-xabort.cc +++ b/storage/tokudb/ft-index/src/tests/recover-fcreate-xabort.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/recover-flt1.cc b/storage/tokudb/ft-index/src/tests/recover-flt1.cc index 2efea643f73..f395ed8171d 100644 --- a/storage/tokudb/ft-index/src/tests/recover-flt1.cc +++ b/storage/tokudb/ft-index/src/tests/recover-flt1.cc @@ -28,7 +28,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/recover-flt10.cc b/storage/tokudb/ft-index/src/tests/recover-flt10.cc index 82e774ea96b..ccee07680b6 100644 --- a/storage/tokudb/ft-index/src/tests/recover-flt10.cc +++ b/storage/tokudb/ft-index/src/tests/recover-flt10.cc @@ -28,7 +28,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/recover-flt2.cc b/storage/tokudb/ft-index/src/tests/recover-flt2.cc index e7151771bc8..bd6125e2dfe 100644 --- a/storage/tokudb/ft-index/src/tests/recover-flt2.cc +++ b/storage/tokudb/ft-index/src/tests/recover-flt2.cc @@ -28,7 +28,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/recover-flt3.cc b/storage/tokudb/ft-index/src/tests/recover-flt3.cc index 323eb2d5019..f4fa3344e68 100644 --- a/storage/tokudb/ft-index/src/tests/recover-flt3.cc +++ b/storage/tokudb/ft-index/src/tests/recover-flt3.cc @@ -28,7 +28,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/recover-flt4.cc b/storage/tokudb/ft-index/src/tests/recover-flt4.cc index a4b68f2b8df..d2efee438c9 100644 --- a/storage/tokudb/ft-index/src/tests/recover-flt4.cc +++ b/storage/tokudb/ft-index/src/tests/recover-flt4.cc @@ -28,7 +28,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/recover-flt5.cc b/storage/tokudb/ft-index/src/tests/recover-flt5.cc index 48a5a10b707..d4a4c0cec00 100644 --- a/storage/tokudb/ft-index/src/tests/recover-flt5.cc +++ b/storage/tokudb/ft-index/src/tests/recover-flt5.cc @@ -28,7 +28,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/recover-flt6.cc b/storage/tokudb/ft-index/src/tests/recover-flt6.cc index 5929ba5200a..184e3933f64 100644 --- a/storage/tokudb/ft-index/src/tests/recover-flt6.cc +++ b/storage/tokudb/ft-index/src/tests/recover-flt6.cc @@ -28,7 +28,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/recover-flt7.cc b/storage/tokudb/ft-index/src/tests/recover-flt7.cc index 40be856bba8..e8fce283b71 100644 --- a/storage/tokudb/ft-index/src/tests/recover-flt7.cc +++ b/storage/tokudb/ft-index/src/tests/recover-flt7.cc @@ -28,7 +28,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/recover-flt8.cc b/storage/tokudb/ft-index/src/tests/recover-flt8.cc index 44a7b0f4f44..2f1958b3025 100644 --- a/storage/tokudb/ft-index/src/tests/recover-flt8.cc +++ b/storage/tokudb/ft-index/src/tests/recover-flt8.cc @@ -28,7 +28,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/recover-flt9.cc b/storage/tokudb/ft-index/src/tests/recover-flt9.cc index a9c89a53ab1..28325fbd6c5 100644 --- a/storage/tokudb/ft-index/src/tests/recover-flt9.cc +++ b/storage/tokudb/ft-index/src/tests/recover-flt9.cc @@ -28,7 +28,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/recover-fopen-checkpoint-fclose.cc b/storage/tokudb/ft-index/src/tests/recover-fopen-checkpoint-fclose.cc index 65c63417065..b8019b1ec79 100644 --- a/storage/tokudb/ft-index/src/tests/recover-fopen-checkpoint-fclose.cc +++ b/storage/tokudb/ft-index/src/tests/recover-fopen-checkpoint-fclose.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/recover-fopen-fclose-checkpoint.cc b/storage/tokudb/ft-index/src/tests/recover-fopen-fclose-checkpoint.cc index 6d17bb79998..bb750cd3c8d 100644 --- a/storage/tokudb/ft-index/src/tests/recover-fopen-fclose-checkpoint.cc +++ b/storage/tokudb/ft-index/src/tests/recover-fopen-fclose-checkpoint.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/recover-fopen-fdelete-checkpoint-fcreate.cc b/storage/tokudb/ft-index/src/tests/recover-fopen-fdelete-checkpoint-fcreate.cc index 608ebadd4a6..e745b666f86 100644 --- a/storage/tokudb/ft-index/src/tests/recover-fopen-fdelete-checkpoint-fcreate.cc +++ b/storage/tokudb/ft-index/src/tests/recover-fopen-fdelete-checkpoint-fcreate.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/recover-hotindexer-simple-abort-put.cc b/storage/tokudb/ft-index/src/tests/recover-hotindexer-simple-abort-put.cc index 153b911a018..ae99abb1082 100644 --- a/storage/tokudb/ft-index/src/tests/recover-hotindexer-simple-abort-put.cc +++ b/storage/tokudb/ft-index/src/tests/recover-hotindexer-simple-abort-put.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/recover-loader-test.cc b/storage/tokudb/ft-index/src/tests/recover-loader-test.cc index c9944ba0409..381a0c600ba 100644 --- a/storage/tokudb/ft-index/src/tests/recover-loader-test.cc +++ b/storage/tokudb/ft-index/src/tests/recover-loader-test.cc @@ -28,7 +28,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/recover-lsn-filter-multiple.cc b/storage/tokudb/ft-index/src/tests/recover-lsn-filter-multiple.cc index dc26721818d..465f8cffab7 100644 --- a/storage/tokudb/ft-index/src/tests/recover-lsn-filter-multiple.cc +++ b/storage/tokudb/ft-index/src/tests/recover-lsn-filter-multiple.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/recover-lsn-filter.cc b/storage/tokudb/ft-index/src/tests/recover-lsn-filter.cc index 4cd79918a86..9877923c50c 100644 --- a/storage/tokudb/ft-index/src/tests/recover-lsn-filter.cc +++ b/storage/tokudb/ft-index/src/tests/recover-lsn-filter.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/recover-missing-dbfile-2.cc b/storage/tokudb/ft-index/src/tests/recover-missing-dbfile-2.cc index 59f963ef503..691ffc36162 100644 --- a/storage/tokudb/ft-index/src/tests/recover-missing-dbfile-2.cc +++ b/storage/tokudb/ft-index/src/tests/recover-missing-dbfile-2.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/recover-missing-dbfile.cc b/storage/tokudb/ft-index/src/tests/recover-missing-dbfile.cc index a71f91d7417..5af1644176b 100644 --- a/storage/tokudb/ft-index/src/tests/recover-missing-dbfile.cc +++ b/storage/tokudb/ft-index/src/tests/recover-missing-dbfile.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/recover-missing-logfile.cc b/storage/tokudb/ft-index/src/tests/recover-missing-logfile.cc index d7b6b75d4cc..51681ad0ea8 100644 --- a/storage/tokudb/ft-index/src/tests/recover-missing-logfile.cc +++ b/storage/tokudb/ft-index/src/tests/recover-missing-logfile.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/recover-put-multiple-abort.cc b/storage/tokudb/ft-index/src/tests/recover-put-multiple-abort.cc index c2036f6f34b..abfa78a9283 100644 --- a/storage/tokudb/ft-index/src/tests/recover-put-multiple-abort.cc +++ b/storage/tokudb/ft-index/src/tests/recover-put-multiple-abort.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/recover-put-multiple-fdelete-all.cc b/storage/tokudb/ft-index/src/tests/recover-put-multiple-fdelete-all.cc index a92db3a2a22..e65667a0e4f 100644 --- a/storage/tokudb/ft-index/src/tests/recover-put-multiple-fdelete-all.cc +++ b/storage/tokudb/ft-index/src/tests/recover-put-multiple-fdelete-all.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/recover-put-multiple-fdelete-some.cc b/storage/tokudb/ft-index/src/tests/recover-put-multiple-fdelete-some.cc index 88014d208d2..4f37a9adf67 100644 --- a/storage/tokudb/ft-index/src/tests/recover-put-multiple-fdelete-some.cc +++ b/storage/tokudb/ft-index/src/tests/recover-put-multiple-fdelete-some.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/recover-put-multiple-srcdb-fdelete-all.cc b/storage/tokudb/ft-index/src/tests/recover-put-multiple-srcdb-fdelete-all.cc index df56fa4f00b..e612e4d4c9a 100644 --- a/storage/tokudb/ft-index/src/tests/recover-put-multiple-srcdb-fdelete-all.cc +++ b/storage/tokudb/ft-index/src/tests/recover-put-multiple-srcdb-fdelete-all.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/recover-put-multiple.cc b/storage/tokudb/ft-index/src/tests/recover-put-multiple.cc index 8e4c19141bf..21a68384860 100644 --- a/storage/tokudb/ft-index/src/tests/recover-put-multiple.cc +++ b/storage/tokudb/ft-index/src/tests/recover-put-multiple.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/recover-rollback.cc b/storage/tokudb/ft-index/src/tests/recover-rollback.cc new file mode 100644 index 00000000000..2d976c05b5a --- /dev/null +++ b/storage/tokudb/ft-index/src/tests/recover-rollback.cc @@ -0,0 +1,262 @@ +/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */ +// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4: +#ident "$Id$" +/* +COPYING CONDITIONS NOTICE: + + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation, and provided that the + following conditions are met: + + * Redistributions of source code must retain this COPYING + CONDITIONS NOTICE, the COPYRIGHT NOTICE (below), the + DISCLAIMER (below), the UNIVERSITY PATENT NOTICE (below), the + PATENT MARKING NOTICE (below), and the PATENT RIGHTS + GRANT (below). + + * Redistributions in binary form must reproduce this COPYING + CONDITIONS NOTICE, the COPYRIGHT NOTICE (below), the + DISCLAIMER (below), the UNIVERSITY PATENT NOTICE (below), the + PATENT MARKING NOTICE (below), and the PATENT RIGHTS + GRANT (below) in the documentation and/or other materials + provided with the distribution. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + 02110-1301, USA. + +COPYRIGHT NOTICE: + + TokuDB, Tokutek Fractal Tree Indexing Library. + Copyright (C) 2007-2013 Tokutek, Inc. + +DISCLAIMER: + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + +UNIVERSITY PATENT NOTICE: + + The technology is licensed by the Massachusetts Institute of + Technology, Rutgers State University of New Jersey, and the Research + Foundation of State University of New York at Stony Brook under + United States of America Serial No. 11/760379 and to the patents + and/or patent applications resulting from it. + +PATENT MARKING NOTICE: + + This software is covered by US Patent No. 8,185,551. + This software is covered by US Patent No. 8,489,638. + +PATENT RIGHTS GRANT: + + "THIS IMPLEMENTATION" means the copyrightable works distributed by + Tokutek as part of the Fractal Tree project. + + "PATENT CLAIMS" means the claims of patents that are owned or + licensable by Tokutek, both currently or in the future; and that in + the absence of this license would be infringed by THIS + IMPLEMENTATION or by using or running THIS IMPLEMENTATION. + + "PATENT CHALLENGE" shall mean a challenge to the validity, + patentability, enforceability and/or non-infringement of any of the + PATENT CLAIMS or otherwise opposing any of the PATENT CLAIMS. + + Tokutek hereby grants to you, for the term and geographical scope of + the PATENT CLAIMS, a non-exclusive, no-charge, royalty-free, + irrevocable (except as stated in this section) patent license to + make, have made, use, offer to sell, sell, import, transfer, and + otherwise run, modify, and propagate the contents of THIS + IMPLEMENTATION, where such license applies only to the PATENT + CLAIMS. This grant does not include claims that would be infringed + only as a consequence of further modifications of THIS + IMPLEMENTATION. If you or your agent or licensee institute or order + or agree to the institution of patent litigation against any entity + (including a cross-claim or counterclaim in a lawsuit) alleging that + THIS IMPLEMENTATION constitutes direct or contributory patent + infringement, or inducement of patent infringement, then any rights + granted to you under this License shall terminate as of the date + such litigation is filed. If you or your agent or exclusive + licensee institute or order or agree to the institution of a PATENT + CHALLENGE, then Tokutek may terminate any rights granted to you + under this License. +*/ + +#ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved." +#ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it." + +// Test dirty upgrade. +// Generate a rollback log that requires recovery. + +#include "test.h" + +// Insert max_rows key/val pairs into the db +static void do_inserts(DB_TXN *txn, DB *db, uint64_t max_rows, size_t val_size) { + char val_data[val_size]; memset(val_data, 0, val_size); + int r; + + for (uint64_t i = 0; i < max_rows; i++) { + // pick a sequential key but it does not matter for this test. + uint64_t k[2] = { + htonl(i), random64(), + }; + + DBT key = { .data = k, .size = sizeof k }; + DBT val = { .data = val_data, .size = (uint32_t) val_size }; + r = db->put(db, txn, &key, &val, 0); + CKERR(r); + } +} + +static void run_test(uint64_t num_rows, size_t val_size, bool do_crash) { + int r; + + DB_ENV *env = nullptr; + r = db_env_create(&env, 0); + CKERR(r); + r = env->set_cachesize(env, 8, 0, 1); + CKERR(r); + r = env->open(env, TOKU_TEST_FILENAME, + DB_INIT_MPOOL|DB_CREATE|DB_THREAD |DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE, + S_IRWXU+S_IRWXG+S_IRWXO); + CKERR(r); + + DB *db = nullptr; + r = db_create(&db, env, 0); + CKERR(r); + r = db->open(db, nullptr, "foo.db", 0, DB_BTREE, DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO); + CKERR(r); + + r = env->txn_checkpoint(env, 0, 0, 0); + CKERR(r); + + DB_TXN *txn = nullptr; + r = env->txn_begin(env, nullptr, &txn, 0); + CKERR(r); + + do_inserts(txn, db, num_rows, val_size); + + r = env->txn_checkpoint(env, 0, 0, 0); + CKERR(r); + + r = txn->commit(txn, 0); + CKERR(r); + + if (do_crash) + assert(0); // crash on purpose + + r = db->close(db, 0); + CKERR(r); + + r = env->close(env, 0); + CKERR(r); +} + +static void do_verify(DB_ENV *env, DB *db, uint64_t num_rows, size_t val_size UU()) { + int r; + DB_TXN *txn = nullptr; + r = env->txn_begin(env, nullptr, &txn, 0); + CKERR(r); + + DBC *c = nullptr; + r = db->cursor(db, txn, &c, 0); + CKERR(r); + + uint64_t i = 0; + while (1) { + DBT key = {}; + DBT val = {}; + r = c->c_get(c, &key, &val, DB_NEXT); + if (r == DB_NOTFOUND) + break; + CKERR(r); + assert(key.size == 16); + uint64_t k[2]; + memcpy(k, key.data, key.size); + assert(htonl(k[0]) == i); + assert(val.size == val_size); + i++; + } + assert(i == num_rows); + + r = c->c_close(c); + CKERR(r); + + r = txn->commit(txn, 0); + CKERR(r); +} + +static void run_recover(uint64_t num_rows, size_t val_size) { + int r; + + DB_ENV *env = nullptr; + r = db_env_create(&env, 0); + CKERR(r); + r = env->set_cachesize(env, 8, 0, 1); + CKERR(r); + r = env->open(env, TOKU_TEST_FILENAME, + DB_INIT_MPOOL|DB_CREATE|DB_THREAD |DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE | DB_RECOVER, + S_IRWXU+S_IRWXG+S_IRWXO); + CKERR(r); + + DB *db = nullptr; + r = db_create(&db, env, 0); + CKERR(r); + r = db->open(db, nullptr, "foo.db", 0, DB_BTREE, 0, S_IRWXU+S_IRWXG+S_IRWXO); + CKERR(r); + + do_verify(env, db, num_rows, val_size); + + r = db->close(db, 0); + CKERR(r); + + r = env->close(env, 0); + CKERR(r); +} + +int test_main (int argc, char *const argv[]) { + bool do_test = false; + bool do_recover = false; + bool do_crash = true; + uint64_t num_rows = 1; + size_t val_size = 1; + + for (int i = 1; i < argc; i++) { + if (strcmp(argv[i], "-v") == 0) { + verbose++; + continue; + } + if (strcmp(argv[i], "-q") == 0) { + if (verbose > 0) verbose--; + continue; + } + if (strcmp(argv[i], "--test") == 0) { + do_test = true; + continue; + } + if (strcmp(argv[i], "--recover") == 0) { + do_recover = true; + continue; + } + if (strcmp(argv[i], "--crash") == 0 && i+1 < argc) { + do_crash = atoi(argv[++i]); + continue; + } + } + if (do_test) { + // init the env directory + toku_os_recursive_delete(TOKU_TEST_FILENAME); + int r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); + CKERR(r); + run_test(num_rows, val_size, do_crash); + } + if (do_recover) { + run_recover(num_rows, val_size); + } + + return 0; +} diff --git a/storage/tokudb/ft-index/src/tests/recover-rollinclude.cc b/storage/tokudb/ft-index/src/tests/recover-rollinclude.cc new file mode 100644 index 00000000000..5a3a89a4052 --- /dev/null +++ b/storage/tokudb/ft-index/src/tests/recover-rollinclude.cc @@ -0,0 +1,274 @@ +/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */ +// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4: +#ident "$Id$" +/* +COPYING CONDITIONS NOTICE: + + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation, and provided that the + following conditions are met: + + * Redistributions of source code must retain this COPYING + CONDITIONS NOTICE, the COPYRIGHT NOTICE (below), the + DISCLAIMER (below), the UNIVERSITY PATENT NOTICE (below), the + PATENT MARKING NOTICE (below), and the PATENT RIGHTS + GRANT (below). + + * Redistributions in binary form must reproduce this COPYING + CONDITIONS NOTICE, the COPYRIGHT NOTICE (below), the + DISCLAIMER (below), the UNIVERSITY PATENT NOTICE (below), the + PATENT MARKING NOTICE (below), and the PATENT RIGHTS + GRANT (below) in the documentation and/or other materials + provided with the distribution. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + 02110-1301, USA. + +COPYRIGHT NOTICE: + + TokuDB, Tokutek Fractal Tree Indexing Library. + Copyright (C) 2007-2013 Tokutek, Inc. + +DISCLAIMER: + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + +UNIVERSITY PATENT NOTICE: + + The technology is licensed by the Massachusetts Institute of + Technology, Rutgers State University of New Jersey, and the Research + Foundation of State University of New York at Stony Brook under + United States of America Serial No. 11/760379 and to the patents + and/or patent applications resulting from it. + +PATENT MARKING NOTICE: + + This software is covered by US Patent No. 8,185,551. + This software is covered by US Patent No. 8,489,638. + +PATENT RIGHTS GRANT: + + "THIS IMPLEMENTATION" means the copyrightable works distributed by + Tokutek as part of the Fractal Tree project. + + "PATENT CLAIMS" means the claims of patents that are owned or + licensable by Tokutek, both currently or in the future; and that in + the absence of this license would be infringed by THIS + IMPLEMENTATION or by using or running THIS IMPLEMENTATION. + + "PATENT CHALLENGE" shall mean a challenge to the validity, + patentability, enforceability and/or non-infringement of any of the + PATENT CLAIMS or otherwise opposing any of the PATENT CLAIMS. + + Tokutek hereby grants to you, for the term and geographical scope of + the PATENT CLAIMS, a non-exclusive, no-charge, royalty-free, + irrevocable (except as stated in this section) patent license to + make, have made, use, offer to sell, sell, import, transfer, and + otherwise run, modify, and propagate the contents of THIS + IMPLEMENTATION, where such license applies only to the PATENT + CLAIMS. This grant does not include claims that would be infringed + only as a consequence of further modifications of THIS + IMPLEMENTATION. If you or your agent or licensee institute or order + or agree to the institution of patent litigation against any entity + (including a cross-claim or counterclaim in a lawsuit) alleging that + THIS IMPLEMENTATION constitutes direct or contributory patent + infringement, or inducement of patent infringement, then any rights + granted to you under this License shall terminate as of the date + such litigation is filed. If you or your agent or exclusive + licensee institute or order or agree to the institution of a PATENT + CHALLENGE, then Tokutek may terminate any rights granted to you + under this License. +*/ + +#ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved." +#ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it." + +// Create a rollback log with a rollinclude log entry, crash after the txn commits and before the last checkpoint. +// Recovery crashes 7.1.0, should succeed. + +#include "test.h" + +// Insert max_rows key/val pairs into the db + +// We want to force a rollinclude so we use a child transaction and insert enough rows so that it spills. +// It spills at about 144K and 289K rows. +static void do_inserts(DB_ENV *env, DB *db, uint64_t max_rows, size_t val_size) { + char val_data[val_size]; memset(val_data, 0, val_size); + int r; + DB_TXN *parent = nullptr; + r = env->txn_begin(env, nullptr, &parent, 0); + CKERR(r); + + DB_TXN *child = nullptr; + r = env->txn_begin(env, parent, &child, 0); + CKERR(r); + + for (uint64_t i = 0; i < max_rows; i++) { + // pick a sequential key but it does not matter for this test. + uint64_t k[2] = { + htonl(i), random64(), + }; + + DBT key = { .data = k, .size = sizeof k }; + DBT val = { .data = val_data, .size = (uint32_t) val_size }; + r = db->put(db, child, &key, &val, 0); + CKERR(r); + + if (i == max_rows-1) { + r = child->commit(child, 0); + CKERR(r); + + r = env->txn_checkpoint(env, 0, 0, 0); + CKERR(r); + } + } + + r = parent->commit(parent, 0); + CKERR(r); +} + +static void run_test(uint64_t num_rows, size_t val_size, bool do_crash) { + int r; + + DB_ENV *env = nullptr; + r = db_env_create(&env, 0); + CKERR(r); + r = env->set_cachesize(env, 8, 0, 1); + CKERR(r); + r = env->open(env, TOKU_TEST_FILENAME, + DB_INIT_MPOOL|DB_CREATE|DB_THREAD |DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE, + S_IRWXU+S_IRWXG+S_IRWXO); + CKERR(r); + + DB *db = nullptr; + r = db_create(&db, env, 0); + CKERR(r); + r = db->open(db, nullptr, "foo.db", 0, DB_BTREE, DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO); + CKERR(r); + + r = env->txn_checkpoint(env, 0, 0, 0); + CKERR(r); + + do_inserts(env, db, num_rows, val_size); + + if (do_crash) + assert(0); // crash on purpose + + r = db->close(db, 0); + CKERR(r); + + r = env->close(env, 0); + CKERR(r); +} + +static void do_verify(DB_ENV *env, DB *db, uint64_t num_rows, size_t val_size UU()) { + int r; + DB_TXN *txn = nullptr; + r = env->txn_begin(env, nullptr, &txn, 0); + CKERR(r); + + DBC *c = nullptr; + r = db->cursor(db, txn, &c, 0); + CKERR(r); + + uint64_t i = 0; + while (1) { + DBT key = {}; + DBT val = {}; + r = c->c_get(c, &key, &val, DB_NEXT); + if (r == DB_NOTFOUND) + break; + CKERR(r); + assert(key.size == 16); + uint64_t k[2]; + memcpy(k, key.data, key.size); + assert(htonl(k[0]) == i); + assert(val.size == val_size); + i++; + } + assert(i == num_rows); + + r = c->c_close(c); + CKERR(r); + + r = txn->commit(txn, 0); + CKERR(r); +} + +static void run_recover(uint64_t num_rows, size_t val_size) { + int r; + + DB_ENV *env = nullptr; + r = db_env_create(&env, 0); + CKERR(r); + r = env->set_cachesize(env, 8, 0, 1); + CKERR(r); + r = env->open(env, TOKU_TEST_FILENAME, + DB_INIT_MPOOL|DB_CREATE|DB_THREAD |DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE | DB_RECOVER, + S_IRWXU+S_IRWXG+S_IRWXO); + CKERR(r); + + DB *db = nullptr; + r = db_create(&db, env, 0); + CKERR(r); + r = db->open(db, nullptr, "foo.db", 0, DB_BTREE, 0, S_IRWXU+S_IRWXG+S_IRWXO); + CKERR(r); + + do_verify(env, db, num_rows, val_size); + + r = db->close(db, 0); + CKERR(r); + + r = env->close(env, 0); + CKERR(r); +} + +int test_main (int argc, char *const argv[]) { + bool do_test = false; + bool do_recover = false; + bool do_crash = true; + for (int i = 1; i < argc; i++) { + if (strcmp(argv[i], "-v") == 0) { + verbose++; + continue; + } + if (strcmp(argv[i], "-q") == 0) { + if (verbose > 0) verbose--; + continue; + } + if (strcmp(argv[i], "--test") == 0) { + do_test = true; + continue; + } + if (strcmp(argv[i], "--recover") == 0) { + do_recover = true; + continue; + } + if (strcmp(argv[i], "--crash") == 0 && i+1 < argc) { + do_crash = atoi(argv[++i]); + continue; + } + } + + uint64_t num_rows = 300000; + size_t val_size = 1; + + if (do_test) { + // init the env directory + toku_os_recursive_delete(TOKU_TEST_FILENAME); + int r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); + CKERR(r); + run_test(num_rows, val_size, do_crash); + } + if (do_recover) { + run_recover(num_rows, val_size); + } + + return 0; +} diff --git a/storage/tokudb/ft-index/src/tests/recover-split-checkpoint.cc b/storage/tokudb/ft-index/src/tests/recover-split-checkpoint.cc index 9df540aadde..4e6b3d16d98 100644 --- a/storage/tokudb/ft-index/src/tests/recover-split-checkpoint.cc +++ b/storage/tokudb/ft-index/src/tests/recover-split-checkpoint.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/recover-straddle-txn-nested.cc b/storage/tokudb/ft-index/src/tests/recover-straddle-txn-nested.cc index 32e22f9607d..03887ac19e5 100644 --- a/storage/tokudb/ft-index/src/tests/recover-straddle-txn-nested.cc +++ b/storage/tokudb/ft-index/src/tests/recover-straddle-txn-nested.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/recover-straddle-txn.cc b/storage/tokudb/ft-index/src/tests/recover-straddle-txn.cc index a08e8940015..a728a7de17d 100644 --- a/storage/tokudb/ft-index/src/tests/recover-straddle-txn.cc +++ b/storage/tokudb/ft-index/src/tests/recover-straddle-txn.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/recover-tablelock.cc b/storage/tokudb/ft-index/src/tests/recover-tablelock.cc index c75574e60b2..eb2a4318a20 100644 --- a/storage/tokudb/ft-index/src/tests/recover-tablelock.cc +++ b/storage/tokudb/ft-index/src/tests/recover-tablelock.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/recover-test-logsuppress-put.cc b/storage/tokudb/ft-index/src/tests/recover-test-logsuppress-put.cc index 14f659f72ad..c022fdf6243 100644 --- a/storage/tokudb/ft-index/src/tests/recover-test-logsuppress-put.cc +++ b/storage/tokudb/ft-index/src/tests/recover-test-logsuppress-put.cc @@ -28,7 +28,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/recover-test-logsuppress.cc b/storage/tokudb/ft-index/src/tests/recover-test-logsuppress.cc index 8272bbbcd51..020cfbd6122 100644 --- a/storage/tokudb/ft-index/src/tests/recover-test-logsuppress.cc +++ b/storage/tokudb/ft-index/src/tests/recover-test-logsuppress.cc @@ -28,7 +28,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/recover-test1.cc b/storage/tokudb/ft-index/src/tests/recover-test1.cc index d9b7cb6fa6c..6529d6ac968 100644 --- a/storage/tokudb/ft-index/src/tests/recover-test1.cc +++ b/storage/tokudb/ft-index/src/tests/recover-test1.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/recover-test2.cc b/storage/tokudb/ft-index/src/tests/recover-test2.cc index 524c197c625..e6bf69b92fe 100644 --- a/storage/tokudb/ft-index/src/tests/recover-test2.cc +++ b/storage/tokudb/ft-index/src/tests/recover-test2.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -94,7 +94,6 @@ PATENT RIGHTS GRANT: #include "test.h" -const int envflags = DB_INIT_MPOOL|DB_CREATE|DB_THREAD |DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE; const char *namea="a.db"; DB_ENV *env; diff --git a/storage/tokudb/ft-index/src/tests/recover-test3.cc b/storage/tokudb/ft-index/src/tests/recover-test3.cc index a3de519172d..fa40c494a96 100644 --- a/storage/tokudb/ft-index/src/tests/recover-test3.cc +++ b/storage/tokudb/ft-index/src/tests/recover-test3.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -94,7 +94,6 @@ PATENT RIGHTS GRANT: #include "test.h" -const int envflags = DB_INIT_MPOOL|DB_CREATE|DB_THREAD |DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE; const char *namea="a.db"; DB_ENV *env; diff --git a/storage/tokudb/ft-index/src/tests/recover-test_crash_in_flusher_thread.h b/storage/tokudb/ft-index/src/tests/recover-test_crash_in_flusher_thread.h index 56087ba16fa..82d57b48867 100644 --- a/storage/tokudb/ft-index/src/tests/recover-test_crash_in_flusher_thread.h +++ b/storage/tokudb/ft-index/src/tests/recover-test_crash_in_flusher_thread.h @@ -28,7 +28,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -85,8 +85,11 @@ PATENT RIGHTS GRANT: under this License. */ +#pragma once + #ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved." #ident "$Id$" + #include "test.h" #include <stdio.h> diff --git a/storage/tokudb/ft-index/src/tests/recover-test_stress1.cc b/storage/tokudb/ft-index/src/tests/recover-test_stress1.cc index 7e7be8c26f7..a45667cd8a1 100644 --- a/storage/tokudb/ft-index/src/tests/recover-test_stress1.cc +++ b/storage/tokudb/ft-index/src/tests/recover-test_stress1.cc @@ -28,7 +28,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/recover-test_stress2.cc b/storage/tokudb/ft-index/src/tests/recover-test_stress2.cc index e07f36dca7c..e78f8a222b1 100644 --- a/storage/tokudb/ft-index/src/tests/recover-test_stress2.cc +++ b/storage/tokudb/ft-index/src/tests/recover-test_stress2.cc @@ -28,7 +28,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/recover-test_stress3.cc b/storage/tokudb/ft-index/src/tests/recover-test_stress3.cc index 2a3017c4cae..9794271ec6b 100644 --- a/storage/tokudb/ft-index/src/tests/recover-test_stress3.cc +++ b/storage/tokudb/ft-index/src/tests/recover-test_stress3.cc @@ -28,7 +28,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/recover-test_stress_openclose.cc b/storage/tokudb/ft-index/src/tests/recover-test_stress_openclose.cc index 52b59f96810..e84c9f2c9f6 100644 --- a/storage/tokudb/ft-index/src/tests/recover-test_stress_openclose.cc +++ b/storage/tokudb/ft-index/src/tests/recover-test_stress_openclose.cc @@ -28,7 +28,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/recover-update-multiple-abort.cc b/storage/tokudb/ft-index/src/tests/recover-update-multiple-abort.cc index 1e6f57a0714..4d0e0164aa3 100644 --- a/storage/tokudb/ft-index/src/tests/recover-update-multiple-abort.cc +++ b/storage/tokudb/ft-index/src/tests/recover-update-multiple-abort.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/recover-update-multiple.cc b/storage/tokudb/ft-index/src/tests/recover-update-multiple.cc index 437f9615351..fe436c95a4d 100644 --- a/storage/tokudb/ft-index/src/tests/recover-update-multiple.cc +++ b/storage/tokudb/ft-index/src/tests/recover-update-multiple.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/recover-update_aborts.cc b/storage/tokudb/ft-index/src/tests/recover-update_aborts.cc index 27e4a19cef1..82ffd511c2a 100644 --- a/storage/tokudb/ft-index/src/tests/recover-update_aborts.cc +++ b/storage/tokudb/ft-index/src/tests/recover-update_aborts.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/recover-update_aborts_before_checkpoint.cc b/storage/tokudb/ft-index/src/tests/recover-update_aborts_before_checkpoint.cc index de3f0996d63..46723760c88 100644 --- a/storage/tokudb/ft-index/src/tests/recover-update_aborts_before_checkpoint.cc +++ b/storage/tokudb/ft-index/src/tests/recover-update_aborts_before_checkpoint.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/recover-update_aborts_before_close.cc b/storage/tokudb/ft-index/src/tests/recover-update_aborts_before_close.cc index e1c13d92f93..feac9dba77d 100644 --- a/storage/tokudb/ft-index/src/tests/recover-update_aborts_before_close.cc +++ b/storage/tokudb/ft-index/src/tests/recover-update_aborts_before_close.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/recover-update_broadcast_aborts.cc b/storage/tokudb/ft-index/src/tests/recover-update_broadcast_aborts.cc index 9ac5bb5b186..05904b0ae7f 100644 --- a/storage/tokudb/ft-index/src/tests/recover-update_broadcast_aborts.cc +++ b/storage/tokudb/ft-index/src/tests/recover-update_broadcast_aborts.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/recover-update_broadcast_aborts2.cc b/storage/tokudb/ft-index/src/tests/recover-update_broadcast_aborts2.cc index 2f05dc92c53..d88d483bd17 100644 --- a/storage/tokudb/ft-index/src/tests/recover-update_broadcast_aborts2.cc +++ b/storage/tokudb/ft-index/src/tests/recover-update_broadcast_aborts2.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/recover-update_broadcast_aborts3.cc b/storage/tokudb/ft-index/src/tests/recover-update_broadcast_aborts3.cc index 3668d7a612e..c1f1baada13 100644 --- a/storage/tokudb/ft-index/src/tests/recover-update_broadcast_aborts3.cc +++ b/storage/tokudb/ft-index/src/tests/recover-update_broadcast_aborts3.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/recover-update_broadcast_aborts_before_checkpoint.cc b/storage/tokudb/ft-index/src/tests/recover-update_broadcast_aborts_before_checkpoint.cc index a9bc84907ed..0768def9255 100644 --- a/storage/tokudb/ft-index/src/tests/recover-update_broadcast_aborts_before_checkpoint.cc +++ b/storage/tokudb/ft-index/src/tests/recover-update_broadcast_aborts_before_checkpoint.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/recover-update_broadcast_aborts_before_close.cc b/storage/tokudb/ft-index/src/tests/recover-update_broadcast_aborts_before_close.cc index 7dd3f647cbe..0d18ad7bacb 100644 --- a/storage/tokudb/ft-index/src/tests/recover-update_broadcast_aborts_before_close.cc +++ b/storage/tokudb/ft-index/src/tests/recover-update_broadcast_aborts_before_close.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/recover-update_broadcast_changes_values.cc b/storage/tokudb/ft-index/src/tests/recover-update_broadcast_changes_values.cc index f1c61f9d7f9..b8cd95c91b6 100644 --- a/storage/tokudb/ft-index/src/tests/recover-update_broadcast_changes_values.cc +++ b/storage/tokudb/ft-index/src/tests/recover-update_broadcast_changes_values.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/recover-update_broadcast_changes_values2.cc b/storage/tokudb/ft-index/src/tests/recover-update_broadcast_changes_values2.cc index db0080598eb..f1a776bc7f5 100644 --- a/storage/tokudb/ft-index/src/tests/recover-update_broadcast_changes_values2.cc +++ b/storage/tokudb/ft-index/src/tests/recover-update_broadcast_changes_values2.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/recover-update_broadcast_changes_values3.cc b/storage/tokudb/ft-index/src/tests/recover-update_broadcast_changes_values3.cc index 28d102579c9..9ec99677ad0 100644 --- a/storage/tokudb/ft-index/src/tests/recover-update_broadcast_changes_values3.cc +++ b/storage/tokudb/ft-index/src/tests/recover-update_broadcast_changes_values3.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/recover-update_broadcast_changes_values_before_checkpoint.cc b/storage/tokudb/ft-index/src/tests/recover-update_broadcast_changes_values_before_checkpoint.cc index 93b40a14c27..8197f8ad2fe 100644 --- a/storage/tokudb/ft-index/src/tests/recover-update_broadcast_changes_values_before_checkpoint.cc +++ b/storage/tokudb/ft-index/src/tests/recover-update_broadcast_changes_values_before_checkpoint.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/recover-update_broadcast_changes_values_before_close.cc b/storage/tokudb/ft-index/src/tests/recover-update_broadcast_changes_values_before_close.cc index ce187c09303..8d462e82f57 100644 --- a/storage/tokudb/ft-index/src/tests/recover-update_broadcast_changes_values_before_close.cc +++ b/storage/tokudb/ft-index/src/tests/recover-update_broadcast_changes_values_before_close.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/recover-update_changes_values.cc b/storage/tokudb/ft-index/src/tests/recover-update_changes_values.cc index 94029e1c99e..65a55a12125 100644 --- a/storage/tokudb/ft-index/src/tests/recover-update_changes_values.cc +++ b/storage/tokudb/ft-index/src/tests/recover-update_changes_values.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/recover-update_changes_values_before_checkpoint.cc b/storage/tokudb/ft-index/src/tests/recover-update_changes_values_before_checkpoint.cc index 0ff19f1801c..ab97b660724 100644 --- a/storage/tokudb/ft-index/src/tests/recover-update_changes_values_before_checkpoint.cc +++ b/storage/tokudb/ft-index/src/tests/recover-update_changes_values_before_checkpoint.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/recover-update_changes_values_before_close.cc b/storage/tokudb/ft-index/src/tests/recover-update_changes_values_before_close.cc index 7e075b00456..f17edbd1317 100644 --- a/storage/tokudb/ft-index/src/tests/recover-update_changes_values_before_close.cc +++ b/storage/tokudb/ft-index/src/tests/recover-update_changes_values_before_close.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/recover-upgrade-db-descriptor-multihandle.cc b/storage/tokudb/ft-index/src/tests/recover-upgrade-db-descriptor-multihandle.cc index 3914badda0b..c2b8543dba8 100644 --- a/storage/tokudb/ft-index/src/tests/recover-upgrade-db-descriptor-multihandle.cc +++ b/storage/tokudb/ft-index/src/tests/recover-upgrade-db-descriptor-multihandle.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/recover-upgrade-db-descriptor.cc b/storage/tokudb/ft-index/src/tests/recover-upgrade-db-descriptor.cc index 5bf6e1be049..9db973bc13b 100644 --- a/storage/tokudb/ft-index/src/tests/recover-upgrade-db-descriptor.cc +++ b/storage/tokudb/ft-index/src/tests/recover-upgrade-db-descriptor.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/recover-x1-abort.cc b/storage/tokudb/ft-index/src/tests/recover-x1-abort.cc index 473d76874e3..c962f9c1c29 100644 --- a/storage/tokudb/ft-index/src/tests/recover-x1-abort.cc +++ b/storage/tokudb/ft-index/src/tests/recover-x1-abort.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/recover-x1-commit.cc b/storage/tokudb/ft-index/src/tests/recover-x1-commit.cc index 780c4287f58..2c0883294e4 100644 --- a/storage/tokudb/ft-index/src/tests/recover-x1-commit.cc +++ b/storage/tokudb/ft-index/src/tests/recover-x1-commit.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/recover-x1-nested-abort.cc b/storage/tokudb/ft-index/src/tests/recover-x1-nested-abort.cc index 415d184aa3d..b23235b2af8 100644 --- a/storage/tokudb/ft-index/src/tests/recover-x1-nested-abort.cc +++ b/storage/tokudb/ft-index/src/tests/recover-x1-nested-abort.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/recover-x1-nested-commit.cc b/storage/tokudb/ft-index/src/tests/recover-x1-nested-commit.cc index a7fb13df76a..0426ac54cd7 100644 --- a/storage/tokudb/ft-index/src/tests/recover-x1-nested-commit.cc +++ b/storage/tokudb/ft-index/src/tests/recover-x1-nested-commit.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/recover-x2-abort.cc b/storage/tokudb/ft-index/src/tests/recover-x2-abort.cc index c14fa98d0b0..9335aa5e7fb 100644 --- a/storage/tokudb/ft-index/src/tests/recover-x2-abort.cc +++ b/storage/tokudb/ft-index/src/tests/recover-x2-abort.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/recover-x2-commit.cc b/storage/tokudb/ft-index/src/tests/recover-x2-commit.cc index 373e9cf546f..4a2dfa8013b 100644 --- a/storage/tokudb/ft-index/src/tests/recover-x2-commit.cc +++ b/storage/tokudb/ft-index/src/tests/recover-x2-commit.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/recovery_fileops_stress.cc b/storage/tokudb/ft-index/src/tests/recovery_fileops_stress.cc index 5546ad53f5d..4ac3bccf0a2 100644 --- a/storage/tokudb/ft-index/src/tests/recovery_fileops_stress.cc +++ b/storage/tokudb/ft-index/src/tests/recovery_fileops_stress.cc @@ -28,7 +28,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -104,7 +104,6 @@ DB** db_array; DB* states; static const int percent_do_op = 20; static const int percent_do_abort = 25; -static const int commit_abort_ratio = 3; static const int start_crashing_iter = 10; // iterations_per_crash_in_recovery should be an odd number; static const int iterations_per_crash_in_recovery = 7; diff --git a/storage/tokudb/ft-index/src/tests/recovery_fileops_unit.cc b/storage/tokudb/ft-index/src/tests/recovery_fileops_unit.cc index 0d7b33212ad..9c9681ae5e6 100644 --- a/storage/tokudb/ft-index/src/tests/recovery_fileops_unit.cc +++ b/storage/tokudb/ft-index/src/tests/recovery_fileops_unit.cc @@ -28,7 +28,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/recovery_stress.cc b/storage/tokudb/ft-index/src/tests/recovery_stress.cc index 9e6b3117031..8e704bbc3f6 100644 --- a/storage/tokudb/ft-index/src/tests/recovery_stress.cc +++ b/storage/tokudb/ft-index/src/tests/recovery_stress.cc @@ -28,7 +28,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/redirect.cc b/storage/tokudb/ft-index/src/tests/redirect.cc index bcbe861adc0..9cf9d979f2f 100644 --- a/storage/tokudb/ft-index/src/tests/redirect.cc +++ b/storage/tokudb/ft-index/src/tests/redirect.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/replace-into-write-lock.cc b/storage/tokudb/ft-index/src/tests/replace-into-write-lock.cc index 7f9ec3768ff..77a03436407 100644 --- a/storage/tokudb/ft-index/src/tests/replace-into-write-lock.cc +++ b/storage/tokudb/ft-index/src/tests/replace-into-write-lock.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/root_fifo_1.cc b/storage/tokudb/ft-index/src/tests/root_fifo_1.cc index fa88b3dfc4a..c83fe05c9fa 100644 --- a/storage/tokudb/ft-index/src/tests/root_fifo_1.cc +++ b/storage/tokudb/ft-index/src/tests/root_fifo_1.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/root_fifo_2.cc b/storage/tokudb/ft-index/src/tests/root_fifo_2.cc index b8d3ac63f27..1902774cd99 100644 --- a/storage/tokudb/ft-index/src/tests/root_fifo_2.cc +++ b/storage/tokudb/ft-index/src/tests/root_fifo_2.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/root_fifo_31.cc b/storage/tokudb/ft-index/src/tests/root_fifo_31.cc index 09f57a67ac9..495073c8e82 100644 --- a/storage/tokudb/ft-index/src/tests/root_fifo_31.cc +++ b/storage/tokudb/ft-index/src/tests/root_fifo_31.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/root_fifo_32.cc b/storage/tokudb/ft-index/src/tests/root_fifo_32.cc index 874405ff68f..d75f81dc012 100644 --- a/storage/tokudb/ft-index/src/tests/root_fifo_32.cc +++ b/storage/tokudb/ft-index/src/tests/root_fifo_32.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/root_fifo_41.cc b/storage/tokudb/ft-index/src/tests/root_fifo_41.cc index d4f1e6554f1..91fb63985d8 100644 --- a/storage/tokudb/ft-index/src/tests/root_fifo_41.cc +++ b/storage/tokudb/ft-index/src/tests/root_fifo_41.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/rowsize.cc b/storage/tokudb/ft-index/src/tests/rowsize.cc index 0965231e621..7e84173006b 100644 --- a/storage/tokudb/ft-index/src/tests/rowsize.cc +++ b/storage/tokudb/ft-index/src/tests/rowsize.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/run_test1426.sh b/storage/tokudb/ft-index/src/tests/run_test1426.sh deleted file mode 100755 index 832dd9935c2..00000000000 --- a/storage/tokudb/ft-index/src/tests/run_test1426.sh +++ /dev/null @@ -1,19 +0,0 @@ -#!/usr/bin/env bash - -set -e - -test $# -ge 4 - -tdbbin=$1; shift -bdbbin=$1; shift -tdbenv=$1; shift -bdbenv=$1; shift -tdbdump=$1; shift -bdbdump=$1; shift - -TOKU_TEST_FILENAME=$bdbenv $bdbbin -$bdbdump -p -h $bdbenv main > dump.bdb.1426 - -TOKU_TEST_FILENAME=$tdbenv $tdbbin -$tdbdump -x -p -h $tdbenv main > dump.tdb.1426 -diff -I db_pagesize=4096 dump.bdb.1426 dump.tdb.1426 diff --git a/storage/tokudb/ft-index/src/tests/seqinsert.cc b/storage/tokudb/ft-index/src/tests/seqinsert.cc index 8d402f2dcf5..85d20bf8941 100644 --- a/storage/tokudb/ft-index/src/tests/seqinsert.cc +++ b/storage/tokudb/ft-index/src/tests/seqinsert.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/shutdown-3344.cc b/storage/tokudb/ft-index/src/tests/shutdown-3344.cc index 6b586287909..94716b558ec 100644 --- a/storage/tokudb/ft-index/src/tests/shutdown-3344.cc +++ b/storage/tokudb/ft-index/src/tests/shutdown-3344.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/simple.cc b/storage/tokudb/ft-index/src/tests/simple.cc index d51cf446f4c..0733a005283 100644 --- a/storage/tokudb/ft-index/src/tests/simple.cc +++ b/storage/tokudb/ft-index/src/tests/simple.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/stat64-create-modify-times.cc b/storage/tokudb/ft-index/src/tests/stat64-create-modify-times.cc index 09c24546757..46c25dc7208 100644 --- a/storage/tokudb/ft-index/src/tests/stat64-create-modify-times.cc +++ b/storage/tokudb/ft-index/src/tests/stat64-create-modify-times.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/stat64-null-txn.cc b/storage/tokudb/ft-index/src/tests/stat64-null-txn.cc index eb799010835..3ca90823b86 100644 --- a/storage/tokudb/ft-index/src/tests/stat64-null-txn.cc +++ b/storage/tokudb/ft-index/src/tests/stat64-null-txn.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/stat64-root-changes.cc b/storage/tokudb/ft-index/src/tests/stat64-root-changes.cc index b0c7a0131b6..80cf022f946 100644 --- a/storage/tokudb/ft-index/src/tests/stat64-root-changes.cc +++ b/storage/tokudb/ft-index/src/tests/stat64-root-changes.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/stat64.cc b/storage/tokudb/ft-index/src/tests/stat64.cc index 23e6ee84a65..8e115fc20d6 100644 --- a/storage/tokudb/ft-index/src/tests/stat64.cc +++ b/storage/tokudb/ft-index/src/tests/stat64.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/stress-gc.cc b/storage/tokudb/ft-index/src/tests/stress-gc.cc index 97e2e7309c3..c67f9b8ed40 100644 --- a/storage/tokudb/ft-index/src/tests/stress-gc.cc +++ b/storage/tokudb/ft-index/src/tests/stress-gc.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/stress-gc2.cc b/storage/tokudb/ft-index/src/tests/stress-gc2.cc index adee9ad1b35..d0a63a874ec 100644 --- a/storage/tokudb/ft-index/src/tests/stress-gc2.cc +++ b/storage/tokudb/ft-index/src/tests/stress-gc2.cc @@ -28,7 +28,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/stress-test.cc b/storage/tokudb/ft-index/src/tests/stress-test.cc index 0774358fee1..87238a62d18 100644 --- a/storage/tokudb/ft-index/src/tests/stress-test.cc +++ b/storage/tokudb/ft-index/src/tests/stress-test.cc @@ -28,7 +28,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/stress_openclose.h b/storage/tokudb/ft-index/src/tests/stress_openclose.h index 4e61dcef356..ab15960a310 100644 --- a/storage/tokudb/ft-index/src/tests/stress_openclose.h +++ b/storage/tokudb/ft-index/src/tests/stress_openclose.h @@ -28,7 +28,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -85,6 +85,8 @@ PATENT RIGHTS GRANT: under this License. */ +#pragma once + #ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved." #ident "$Id$" diff --git a/storage/tokudb/ft-index/src/tests/test-5138.cc b/storage/tokudb/ft-index/src/tests/test-5138.cc index 1ec1d4646f9..546fe6d9368 100644 --- a/storage/tokudb/ft-index/src/tests/test-5138.cc +++ b/storage/tokudb/ft-index/src/tests/test-5138.cc @@ -28,7 +28,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/test-nested-xopen-eclose.cc b/storage/tokudb/ft-index/src/tests/test-nested-xopen-eclose.cc index 2c5f7fae569..e78d2130ea3 100644 --- a/storage/tokudb/ft-index/src/tests/test-nested-xopen-eclose.cc +++ b/storage/tokudb/ft-index/src/tests/test-nested-xopen-eclose.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/test-prepare.cc b/storage/tokudb/ft-index/src/tests/test-prepare.cc index ea3949cf6fe..9033c633ea7 100644 --- a/storage/tokudb/ft-index/src/tests/test-prepare.cc +++ b/storage/tokudb/ft-index/src/tests/test-prepare.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/test-prepare2.cc b/storage/tokudb/ft-index/src/tests/test-prepare2.cc index eb79a1e8e18..8952f14cf31 100644 --- a/storage/tokudb/ft-index/src/tests/test-prepare2.cc +++ b/storage/tokudb/ft-index/src/tests/test-prepare2.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/test-prepare3.cc b/storage/tokudb/ft-index/src/tests/test-prepare3.cc index 352518b8579..3643d73f41a 100644 --- a/storage/tokudb/ft-index/src/tests/test-prepare3.cc +++ b/storage/tokudb/ft-index/src/tests/test-prepare3.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/test-rollinclude.cc b/storage/tokudb/ft-index/src/tests/test-rollinclude.cc index 8a4af61bf59..6ece4beb671 100644 --- a/storage/tokudb/ft-index/src/tests/test-rollinclude.cc +++ b/storage/tokudb/ft-index/src/tests/test-rollinclude.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/test-xa-prepare.cc b/storage/tokudb/ft-index/src/tests/test-xa-prepare.cc index d409eefb382..e08e7361555 100644 --- a/storage/tokudb/ft-index/src/tests/test-xa-prepare.cc +++ b/storage/tokudb/ft-index/src/tests/test-xa-prepare.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/test-xopen-eclose.cc b/storage/tokudb/ft-index/src/tests/test-xopen-eclose.cc index f6359d8cf1a..82f2bc6d159 100644 --- a/storage/tokudb/ft-index/src/tests/test-xopen-eclose.cc +++ b/storage/tokudb/ft-index/src/tests/test-xopen-eclose.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/test.h b/storage/tokudb/ft-index/src/tests/test.h index 4cbfcf426d6..c8e98862038 100644 --- a/storage/tokudb/ft-index/src/tests/test.h +++ b/storage/tokudb/ft-index/src/tests/test.h @@ -2,10 +2,6 @@ // vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4: #ident "$Id$" -#ifndef __TEST_H -#define __TEST_H - - /* COPYING CONDITIONS NOTICE: @@ -34,7 +30,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -91,7 +87,10 @@ PATENT RIGHTS GRANT: under this License. */ +#pragma once + #ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved." + #include <toku_portability.h> #include <string.h> @@ -495,15 +494,8 @@ static int env_del_multiple_test_no_array( { int chk_r = (txn)->abort(txn); CKERR(chk_r); } \ }) - -int test_main (int argc, char * const argv[]); -int -#if defined(__cilkplusplus) -cilk_main(int argc, char *argv[]) -#else -main(int argc, char * const argv[]) -#endif -{ +int test_main(int argc, char *const argv[]); +int main(int argc, char *const argv[]) { int r; toku_os_initialize_settings(1); r = test_main(argc, argv); @@ -513,5 +505,3 @@ main(int argc, char * const argv[]) #ifndef DB_GID_SIZE #define DB_GID_SIZE DB_XIDDATASIZE #endif - -#endif // __TEST_H diff --git a/storage/tokudb/ft-index/src/tests/test1572.cc b/storage/tokudb/ft-index/src/tests/test1572.cc index 73d93d58761..0ea04a9c401 100644 --- a/storage/tokudb/ft-index/src/tests/test1572.cc +++ b/storage/tokudb/ft-index/src/tests/test1572.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -92,7 +92,7 @@ PATENT RIGHTS GRANT: /* Is it feasible to run 4 billion transactions in one test in the regression tests? */ #include <db.h> #include <sys/stat.h> -#include <ft/log.h> +#include <ft/logger/log.h> #include <src/ydb_txn.h> static void diff --git a/storage/tokudb/ft-index/src/tests/test1753.cc b/storage/tokudb/ft-index/src/tests/test1753.cc index d4d09ecaf79..e50b828c92c 100644 --- a/storage/tokudb/ft-index/src/tests/test1753.cc +++ b/storage/tokudb/ft-index/src/tests/test1753.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/test1842.cc b/storage/tokudb/ft-index/src/tests/test1842.cc index 43702da5694..20b014d4a33 100644 --- a/storage/tokudb/ft-index/src/tests/test1842.cc +++ b/storage/tokudb/ft-index/src/tests/test1842.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/test3039.cc b/storage/tokudb/ft-index/src/tests/test3039.cc index aaaeebc7c36..35b22b374c0 100644 --- a/storage/tokudb/ft-index/src/tests/test3039.cc +++ b/storage/tokudb/ft-index/src/tests/test3039.cc @@ -36,7 +36,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/test3219.cc b/storage/tokudb/ft-index/src/tests/test3219.cc index 95bff0f445d..d5de370fd60 100644 --- a/storage/tokudb/ft-index/src/tests/test3219.cc +++ b/storage/tokudb/ft-index/src/tests/test3219.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/test3522.cc b/storage/tokudb/ft-index/src/tests/test3522.cc index fe67793e3af..7166c561f70 100644 --- a/storage/tokudb/ft-index/src/tests/test3522.cc +++ b/storage/tokudb/ft-index/src/tests/test3522.cc @@ -35,7 +35,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/test3522b.cc b/storage/tokudb/ft-index/src/tests/test3522b.cc index 17b2df6b13a..09c9807dd2b 100644 --- a/storage/tokudb/ft-index/src/tests/test3522b.cc +++ b/storage/tokudb/ft-index/src/tests/test3522b.cc @@ -36,7 +36,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/test3529.cc b/storage/tokudb/ft-index/src/tests/test3529.cc index 287729451a4..2c605c3028a 100644 --- a/storage/tokudb/ft-index/src/tests/test3529.cc +++ b/storage/tokudb/ft-index/src/tests/test3529.cc @@ -40,7 +40,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/test4573-logtrim.cc b/storage/tokudb/ft-index/src/tests/test4573-logtrim.cc index a439f886103..9dba89f04f1 100644 --- a/storage/tokudb/ft-index/src/tests/test4573-logtrim.cc +++ b/storage/tokudb/ft-index/src/tests/test4573-logtrim.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/test5092.cc b/storage/tokudb/ft-index/src/tests/test5092.cc index 6572c4df246..16652472b55 100644 --- a/storage/tokudb/ft-index/src/tests/test5092.cc +++ b/storage/tokudb/ft-index/src/tests/test5092.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/test938.cc b/storage/tokudb/ft-index/src/tests/test938.cc index bb4b9464a2d..d6896894b54 100644 --- a/storage/tokudb/ft-index/src/tests/test938.cc +++ b/storage/tokudb/ft-index/src/tests/test938.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/test938b.cc b/storage/tokudb/ft-index/src/tests/test938b.cc index d0d07120f4a..78830d8d4c8 100644 --- a/storage/tokudb/ft-index/src/tests/test938b.cc +++ b/storage/tokudb/ft-index/src/tests/test938b.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/test938c.cc b/storage/tokudb/ft-index/src/tests/test938c.cc index f3914ccb302..154bf05862d 100644 --- a/storage/tokudb/ft-index/src/tests/test938c.cc +++ b/storage/tokudb/ft-index/src/tests/test938c.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -106,7 +106,7 @@ run (void) { // add (1,101) to the tree // In another concurrent txn // look up (1,102) and do DB_NEXT - // That should be fine in TokuDB. + // That should be fine in TokuFT. // It fails before #938 is fixed. // It also fails for BDB for other reasons (page-level locking vs. row-level locking) { diff --git a/storage/tokudb/ft-index/src/tests/test_3529_insert_2.cc b/storage/tokudb/ft-index/src/tests/test_3529_insert_2.cc index 542785e007f..d024143415d 100644 --- a/storage/tokudb/ft-index/src/tests/test_3529_insert_2.cc +++ b/storage/tokudb/ft-index/src/tests/test_3529_insert_2.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/test_3529_table_lock.cc b/storage/tokudb/ft-index/src/tests/test_3529_table_lock.cc index 7a07ff4ac11..cb9137a10a0 100644 --- a/storage/tokudb/ft-index/src/tests/test_3529_table_lock.cc +++ b/storage/tokudb/ft-index/src/tests/test_3529_table_lock.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/test_3645.cc b/storage/tokudb/ft-index/src/tests/test_3645.cc index dfd8544ef4b..1e7c3b5faf6 100644 --- a/storage/tokudb/ft-index/src/tests/test_3645.cc +++ b/storage/tokudb/ft-index/src/tests/test_3645.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/test_3755.cc b/storage/tokudb/ft-index/src/tests/test_3755.cc index fa6af3b46d8..a678352ba19 100644 --- a/storage/tokudb/ft-index/src/tests/test_3755.cc +++ b/storage/tokudb/ft-index/src/tests/test_3755.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/test_4015.cc b/storage/tokudb/ft-index/src/tests/test_4015.cc index a1b8f555155..c0538b7976c 100644 --- a/storage/tokudb/ft-index/src/tests/test_4015.cc +++ b/storage/tokudb/ft-index/src/tests/test_4015.cc @@ -28,7 +28,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/test_4368.cc b/storage/tokudb/ft-index/src/tests/test_4368.cc index f000efa7813..ab55a6ee173 100644 --- a/storage/tokudb/ft-index/src/tests/test_4368.cc +++ b/storage/tokudb/ft-index/src/tests/test_4368.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/test_4657.cc b/storage/tokudb/ft-index/src/tests/test_4657.cc index c7a3f7473ce..6ab9ce56d73 100644 --- a/storage/tokudb/ft-index/src/tests/test_4657.cc +++ b/storage/tokudb/ft-index/src/tests/test_4657.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/test_5015.cc b/storage/tokudb/ft-index/src/tests/test_5015.cc index 4eb337eb88b..071b7f3660e 100644 --- a/storage/tokudb/ft-index/src/tests/test_5015.cc +++ b/storage/tokudb/ft-index/src/tests/test_5015.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/test_5469.cc b/storage/tokudb/ft-index/src/tests/test_5469.cc index c7e30b42c2f..cbbcb3721cb 100644 --- a/storage/tokudb/ft-index/src/tests/test_5469.cc +++ b/storage/tokudb/ft-index/src/tests/test_5469.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/test_789.cc b/storage/tokudb/ft-index/src/tests/test_789.cc index c99af5bd1e5..31cdd6ef777 100644 --- a/storage/tokudb/ft-index/src/tests/test_789.cc +++ b/storage/tokudb/ft-index/src/tests/test_789.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/test_935.cc b/storage/tokudb/ft-index/src/tests/test_935.cc index 971a1c1a85a..a676db32460 100644 --- a/storage/tokudb/ft-index/src/tests/test_935.cc +++ b/storage/tokudb/ft-index/src/tests/test_935.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/test_abort1.cc b/storage/tokudb/ft-index/src/tests/test_abort1.cc index 27f5d68348c..7a8b3384ce8 100644 --- a/storage/tokudb/ft-index/src/tests/test_abort1.cc +++ b/storage/tokudb/ft-index/src/tests/test_abort1.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -89,7 +89,7 @@ PATENT RIGHTS GRANT: #ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved." #include "test.h" -/* Simple test of logging. Can I start a TokuDB with logging enabled? */ +/* Simple test of logging. Can I start TokuFT with logging enabled? */ #include <errno.h> #include <stdlib.h> diff --git a/storage/tokudb/ft-index/src/tests/test_abort2.cc b/storage/tokudb/ft-index/src/tests/test_abort2.cc index e8beb73dcf2..881bc97ad1b 100644 --- a/storage/tokudb/ft-index/src/tests/test_abort2.cc +++ b/storage/tokudb/ft-index/src/tests/test_abort2.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/test_abort3.cc b/storage/tokudb/ft-index/src/tests/test_abort3.cc index 705ae5cfce2..4542ad3b1c4 100644 --- a/storage/tokudb/ft-index/src/tests/test_abort3.cc +++ b/storage/tokudb/ft-index/src/tests/test_abort3.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/test_abort4.cc b/storage/tokudb/ft-index/src/tests/test_abort4.cc index 0d575c78b12..29581dc6285 100644 --- a/storage/tokudb/ft-index/src/tests/test_abort4.cc +++ b/storage/tokudb/ft-index/src/tests/test_abort4.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/test_abort5.cc b/storage/tokudb/ft-index/src/tests/test_abort5.cc index 803e0c4fa22..34bf564d9ac 100644 --- a/storage/tokudb/ft-index/src/tests/test_abort5.cc +++ b/storage/tokudb/ft-index/src/tests/test_abort5.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/test_abort_delete_first.cc b/storage/tokudb/ft-index/src/tests/test_abort_delete_first.cc index ff55cb2e3ae..fb983474462 100644 --- a/storage/tokudb/ft-index/src/tests/test_abort_delete_first.cc +++ b/storage/tokudb/ft-index/src/tests/test_abort_delete_first.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/test_archive0.cc b/storage/tokudb/ft-index/src/tests/test_archive0.cc index 85b444e243c..8ffa87e2a6d 100644 --- a/storage/tokudb/ft-index/src/tests/test_archive0.cc +++ b/storage/tokudb/ft-index/src/tests/test_archive0.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/test_archive1.cc b/storage/tokudb/ft-index/src/tests/test_archive1.cc index 8aa045b061e..5208a5eb1b6 100644 --- a/storage/tokudb/ft-index/src/tests/test_archive1.cc +++ b/storage/tokudb/ft-index/src/tests/test_archive1.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/test_archive2.cc b/storage/tokudb/ft-index/src/tests/test_archive2.cc index ea67a743f92..faa73171f7e 100644 --- a/storage/tokudb/ft-index/src/tests/test_archive2.cc +++ b/storage/tokudb/ft-index/src/tests/test_archive2.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/test_bad_implicit_promotion.cc b/storage/tokudb/ft-index/src/tests/test_bad_implicit_promotion.cc index 8fcff7c6132..c7555d2d3f6 100644 --- a/storage/tokudb/ft-index/src/tests/test_bad_implicit_promotion.cc +++ b/storage/tokudb/ft-index/src/tests/test_bad_implicit_promotion.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2014 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/test_blobs_leaf_split.cc b/storage/tokudb/ft-index/src/tests/test_blobs_leaf_split.cc index e567e4d58a8..eae30421f79 100644 --- a/storage/tokudb/ft-index/src/tests/test_blobs_leaf_split.cc +++ b/storage/tokudb/ft-index/src/tests/test_blobs_leaf_split.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/test_bulk_fetch.cc b/storage/tokudb/ft-index/src/tests/test_bulk_fetch.cc index 76706db6dba..800212a6751 100644 --- a/storage/tokudb/ft-index/src/tests/test_bulk_fetch.cc +++ b/storage/tokudb/ft-index/src/tests/test_bulk_fetch.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/test_cachesize.cc b/storage/tokudb/ft-index/src/tests/test_cachesize.cc index 2af678ff53a..d161dd89033 100644 --- a/storage/tokudb/ft-index/src/tests/test_cachesize.cc +++ b/storage/tokudb/ft-index/src/tests/test_cachesize.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/test_cmp_descriptor.cc b/storage/tokudb/ft-index/src/tests/test_cmp_descriptor.cc index 87b3da2b4cd..3d318ddd346 100644 --- a/storage/tokudb/ft-index/src/tests/test_cmp_descriptor.cc +++ b/storage/tokudb/ft-index/src/tests/test_cmp_descriptor.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/test_compression_methods.cc b/storage/tokudb/ft-index/src/tests/test_compression_methods.cc index ef73c593f56..272cf4f145d 100644 --- a/storage/tokudb/ft-index/src/tests/test_compression_methods.cc +++ b/storage/tokudb/ft-index/src/tests/test_compression_methods.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/test_cursor_2.cc b/storage/tokudb/ft-index/src/tests/test_cursor_2.cc index de332e6bf75..d07eb95122e 100644 --- a/storage/tokudb/ft-index/src/tests/test_cursor_2.cc +++ b/storage/tokudb/ft-index/src/tests/test_cursor_2.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/test_cursor_3.cc b/storage/tokudb/ft-index/src/tests/test_cursor_3.cc index 45c0b0b4a8d..f9f256fc884 100644 --- a/storage/tokudb/ft-index/src/tests/test_cursor_3.cc +++ b/storage/tokudb/ft-index/src/tests/test_cursor_3.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/test_cursor_DB_NEXT_no_dup.cc b/storage/tokudb/ft-index/src/tests/test_cursor_DB_NEXT_no_dup.cc index d87ff04f25d..125bbee9d52 100644 --- a/storage/tokudb/ft-index/src/tests/test_cursor_DB_NEXT_no_dup.cc +++ b/storage/tokudb/ft-index/src/tests/test_cursor_DB_NEXT_no_dup.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/test_cursor_db_current.cc b/storage/tokudb/ft-index/src/tests/test_cursor_db_current.cc index 8a2f5bcba93..2f8fbb9149f 100644 --- a/storage/tokudb/ft-index/src/tests/test_cursor_db_current.cc +++ b/storage/tokudb/ft-index/src/tests/test_cursor_db_current.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/test_cursor_delete2.cc b/storage/tokudb/ft-index/src/tests/test_cursor_delete2.cc index 73a7182e2fd..1fcda002bc5 100644 --- a/storage/tokudb/ft-index/src/tests/test_cursor_delete2.cc +++ b/storage/tokudb/ft-index/src/tests/test_cursor_delete2.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/test_cursor_flags.cc b/storage/tokudb/ft-index/src/tests/test_cursor_flags.cc index 1bdb3daf81c..60ca37a0ac4 100644 --- a/storage/tokudb/ft-index/src/tests/test_cursor_flags.cc +++ b/storage/tokudb/ft-index/src/tests/test_cursor_flags.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/test_cursor_interrupt.cc b/storage/tokudb/ft-index/src/tests/test_cursor_interrupt.cc index d82fc5131da..e992f86455b 100644 --- a/storage/tokudb/ft-index/src/tests/test_cursor_interrupt.cc +++ b/storage/tokudb/ft-index/src/tests/test_cursor_interrupt.cc @@ -28,7 +28,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/test_cursor_nonleaf_expand.cc b/storage/tokudb/ft-index/src/tests/test_cursor_nonleaf_expand.cc index 96b83d778fc..6464a2fda83 100644 --- a/storage/tokudb/ft-index/src/tests/test_cursor_nonleaf_expand.cc +++ b/storage/tokudb/ft-index/src/tests/test_cursor_nonleaf_expand.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/test_cursor_null.cc b/storage/tokudb/ft-index/src/tests/test_cursor_null.cc index 68a65b97e6e..6c7bf382a8d 100644 --- a/storage/tokudb/ft-index/src/tests/test_cursor_null.cc +++ b/storage/tokudb/ft-index/src/tests/test_cursor_null.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/test_cursor_stickyness.cc b/storage/tokudb/ft-index/src/tests/test_cursor_stickyness.cc index 6ed74265fff..62178e14137 100644 --- a/storage/tokudb/ft-index/src/tests/test_cursor_stickyness.cc +++ b/storage/tokudb/ft-index/src/tests/test_cursor_stickyness.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/test_cursor_with_read_txn.cc b/storage/tokudb/ft-index/src/tests/test_cursor_with_read_txn.cc index 8435b2e1a3e..d4e3148ec62 100644 --- a/storage/tokudb/ft-index/src/tests/test_cursor_with_read_txn.cc +++ b/storage/tokudb/ft-index/src/tests/test_cursor_with_read_txn.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/test_db_already_exists.cc b/storage/tokudb/ft-index/src/tests/test_db_already_exists.cc index cbb98d1b3e7..ce4008a06d8 100644 --- a/storage/tokudb/ft-index/src/tests/test_db_already_exists.cc +++ b/storage/tokudb/ft-index/src/tests/test_db_already_exists.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/test_db_change_pagesize.cc b/storage/tokudb/ft-index/src/tests/test_db_change_pagesize.cc index 83e195093fd..d596782c919 100644 --- a/storage/tokudb/ft-index/src/tests/test_db_change_pagesize.cc +++ b/storage/tokudb/ft-index/src/tests/test_db_change_pagesize.cc @@ -28,7 +28,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/test_db_change_xxx.cc b/storage/tokudb/ft-index/src/tests/test_db_change_xxx.cc index 35170e5f9ec..2033cc6cb04 100644 --- a/storage/tokudb/ft-index/src/tests/test_db_change_xxx.cc +++ b/storage/tokudb/ft-index/src/tests/test_db_change_xxx.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/test_db_close_no_open.cc b/storage/tokudb/ft-index/src/tests/test_db_close_no_open.cc index a9421b57451..7f433e09393 100644 --- a/storage/tokudb/ft-index/src/tests/test_db_close_no_open.cc +++ b/storage/tokudb/ft-index/src/tests/test_db_close_no_open.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/test_db_current_clobbers_db.cc b/storage/tokudb/ft-index/src/tests/test_db_current_clobbers_db.cc index d908e0c2e14..962d1ae5256 100644 --- a/storage/tokudb/ft-index/src/tests/test_db_current_clobbers_db.cc +++ b/storage/tokudb/ft-index/src/tests/test_db_current_clobbers_db.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/test_db_dbt_mem_behavior.cc b/storage/tokudb/ft-index/src/tests/test_db_dbt_mem_behavior.cc index eb5d7f87893..8a0a385f82e 100644 --- a/storage/tokudb/ft-index/src/tests/test_db_dbt_mem_behavior.cc +++ b/storage/tokudb/ft-index/src/tests/test_db_dbt_mem_behavior.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/test_db_delete.cc b/storage/tokudb/ft-index/src/tests/test_db_delete.cc index 66fc506d44b..4ee9b0fba83 100644 --- a/storage/tokudb/ft-index/src/tests/test_db_delete.cc +++ b/storage/tokudb/ft-index/src/tests/test_db_delete.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/test_db_descriptor.cc b/storage/tokudb/ft-index/src/tests/test_db_descriptor.cc index de6f6f5f608..a9403174818 100644 --- a/storage/tokudb/ft-index/src/tests/test_db_descriptor.cc +++ b/storage/tokudb/ft-index/src/tests/test_db_descriptor.cc @@ -30,7 +30,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/test_db_env_open_close.cc b/storage/tokudb/ft-index/src/tests/test_db_env_open_close.cc index 13b1166fa0b..4bb22a026b2 100644 --- a/storage/tokudb/ft-index/src/tests/test_db_env_open_close.cc +++ b/storage/tokudb/ft-index/src/tests/test_db_env_open_close.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/test_db_env_open_nocreate.cc b/storage/tokudb/ft-index/src/tests/test_db_env_open_nocreate.cc index a690a4f33a9..a97ec7de733 100644 --- a/storage/tokudb/ft-index/src/tests/test_db_env_open_nocreate.cc +++ b/storage/tokudb/ft-index/src/tests/test_db_env_open_nocreate.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -132,7 +132,7 @@ test_main(int argc, char *const argv[]) { r = db_env_create(&dbenv, 0); CKERR(r); r = dbenv->open(dbenv, TOKU_TEST_FILENAME, private_flags|DB_INIT_MPOOL, 0); - // TokuDB has no trouble opening an environment if the directory exists. + // TokuFT has no trouble opening an environment if the directory exists. CKERR(r); assert(r==0); dbenv->close(dbenv,0); // free memory diff --git a/storage/tokudb/ft-index/src/tests/test_db_env_open_open_close.cc b/storage/tokudb/ft-index/src/tests/test_db_env_open_open_close.cc index d9336a8b48e..9a2d665edbf 100644 --- a/storage/tokudb/ft-index/src/tests/test_db_env_open_open_close.cc +++ b/storage/tokudb/ft-index/src/tests/test_db_env_open_open_close.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/test_db_env_set_errpfx.cc b/storage/tokudb/ft-index/src/tests/test_db_env_set_errpfx.cc index 7a717ea807b..ef7bf85b528 100644 --- a/storage/tokudb/ft-index/src/tests/test_db_env_set_errpfx.cc +++ b/storage/tokudb/ft-index/src/tests/test_db_env_set_errpfx.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/test_db_env_set_lg_dir.cc b/storage/tokudb/ft-index/src/tests/test_db_env_set_lg_dir.cc index ffdaf5cb833..0baa9185d15 100644 --- a/storage/tokudb/ft-index/src/tests/test_db_env_set_lg_dir.cc +++ b/storage/tokudb/ft-index/src/tests/test_db_env_set_lg_dir.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/test_db_env_set_tmp_dir.cc b/storage/tokudb/ft-index/src/tests/test_db_env_set_tmp_dir.cc index 150b0b29049..b1adbb30120 100644 --- a/storage/tokudb/ft-index/src/tests/test_db_env_set_tmp_dir.cc +++ b/storage/tokudb/ft-index/src/tests/test_db_env_set_tmp_dir.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/test_db_env_strdup_null.cc b/storage/tokudb/ft-index/src/tests/test_db_env_strdup_null.cc index 1f65f7a54ed..01ba0792e4a 100644 --- a/storage/tokudb/ft-index/src/tests/test_db_env_strdup_null.cc +++ b/storage/tokudb/ft-index/src/tests/test_db_env_strdup_null.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/test_db_get_put_flags.cc b/storage/tokudb/ft-index/src/tests/test_db_get_put_flags.cc index bb5403732b5..1c716cc747a 100644 --- a/storage/tokudb/ft-index/src/tests/test_db_get_put_flags.cc +++ b/storage/tokudb/ft-index/src/tests/test_db_get_put_flags.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/test_db_named_delete_last.cc b/storage/tokudb/ft-index/src/tests/test_db_named_delete_last.cc index db530ad1e18..ba63986af79 100644 --- a/storage/tokudb/ft-index/src/tests/test_db_named_delete_last.cc +++ b/storage/tokudb/ft-index/src/tests/test_db_named_delete_last.cc @@ -30,7 +30,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/test_db_no_env.cc b/storage/tokudb/ft-index/src/tests/test_db_no_env.cc index 61952133119..8f7a336af5b 100644 --- a/storage/tokudb/ft-index/src/tests/test_db_no_env.cc +++ b/storage/tokudb/ft-index/src/tests/test_db_no_env.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/test_db_open_notexist_reopen.cc b/storage/tokudb/ft-index/src/tests/test_db_open_notexist_reopen.cc index ea5002a0131..70580b8f868 100644 --- a/storage/tokudb/ft-index/src/tests/test_db_open_notexist_reopen.cc +++ b/storage/tokudb/ft-index/src/tests/test_db_open_notexist_reopen.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -89,7 +89,7 @@ PATENT RIGHTS GRANT: #ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved." #include "test.h" -/* Simple test of logging. Can I start a TokuDB with logging enabled? */ +/* Simple test of logging. Can I start TokuFT with logging enabled? */ #include <stdlib.h> #include <sys/stat.h> diff --git a/storage/tokudb/ft-index/src/tests/test_db_remove.cc b/storage/tokudb/ft-index/src/tests/test_db_remove.cc index 7c5009a703d..e736784a7c4 100644 --- a/storage/tokudb/ft-index/src/tests/test_db_remove.cc +++ b/storage/tokudb/ft-index/src/tests/test_db_remove.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/test_db_remove_subdb.cc b/storage/tokudb/ft-index/src/tests/test_db_remove_subdb.cc index 90ee56278fb..8f3e50a649c 100644 --- a/storage/tokudb/ft-index/src/tests/test_db_remove_subdb.cc +++ b/storage/tokudb/ft-index/src/tests/test_db_remove_subdb.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/test_db_set_flags.cc b/storage/tokudb/ft-index/src/tests/test_db_set_flags.cc index bd63991da87..b106c70589f 100644 --- a/storage/tokudb/ft-index/src/tests/test_db_set_flags.cc +++ b/storage/tokudb/ft-index/src/tests/test_db_set_flags.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/test_db_subdb.cc b/storage/tokudb/ft-index/src/tests/test_db_subdb.cc index 4a65317d6c1..f29dd14a3fa 100644 --- a/storage/tokudb/ft-index/src/tests/test_db_subdb.cc +++ b/storage/tokudb/ft-index/src/tests/test_db_subdb.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/test_db_subdb_different_flags.cc b/storage/tokudb/ft-index/src/tests/test_db_subdb_different_flags.cc index 0c30b782665..c12e1bdfce9 100644 --- a/storage/tokudb/ft-index/src/tests/test_db_subdb_different_flags.cc +++ b/storage/tokudb/ft-index/src/tests/test_db_subdb_different_flags.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/test_db_txn_locks_nonheaviside.cc b/storage/tokudb/ft-index/src/tests/test_db_txn_locks_nonheaviside.cc index 9451b107e1a..381bce27596 100644 --- a/storage/tokudb/ft-index/src/tests/test_db_txn_locks_nonheaviside.cc +++ b/storage/tokudb/ft-index/src/tests/test_db_txn_locks_nonheaviside.cc @@ -30,7 +30,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/test_db_txn_locks_read_uncommitted.cc b/storage/tokudb/ft-index/src/tests/test_db_txn_locks_read_uncommitted.cc index 277fbaee8be..0dd73590b84 100644 --- a/storage/tokudb/ft-index/src/tests/test_db_txn_locks_read_uncommitted.cc +++ b/storage/tokudb/ft-index/src/tests/test_db_txn_locks_read_uncommitted.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/test_db_version.cc b/storage/tokudb/ft-index/src/tests/test_db_version.cc index 1041db949d4..6ce4574ae57 100644 --- a/storage/tokudb/ft-index/src/tests/test_db_version.cc +++ b/storage/tokudb/ft-index/src/tests/test_db_version.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/test_env_close_flags.cc b/storage/tokudb/ft-index/src/tests/test_env_close_flags.cc index 50fcd3fa50a..dd532627502 100644 --- a/storage/tokudb/ft-index/src/tests/test_env_close_flags.cc +++ b/storage/tokudb/ft-index/src/tests/test_env_close_flags.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/test_env_create_db_create.cc b/storage/tokudb/ft-index/src/tests/test_env_create_db_create.cc index 8e706ebf04e..d45bd7002ab 100644 --- a/storage/tokudb/ft-index/src/tests/test_env_create_db_create.cc +++ b/storage/tokudb/ft-index/src/tests/test_env_create_db_create.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/test_env_open_flags.cc b/storage/tokudb/ft-index/src/tests/test_env_open_flags.cc index 8d9b147e46d..1b621b8005d 100644 --- a/storage/tokudb/ft-index/src/tests/test_env_open_flags.cc +++ b/storage/tokudb/ft-index/src/tests/test_env_open_flags.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/test_equal_keys_with_different_bytes.cc b/storage/tokudb/ft-index/src/tests/test_equal_keys_with_different_bytes.cc index 6567822c929..d91f965ebc8 100644 --- a/storage/tokudb/ft-index/src/tests/test_equal_keys_with_different_bytes.cc +++ b/storage/tokudb/ft-index/src/tests/test_equal_keys_with_different_bytes.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2014 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/test_error.cc b/storage/tokudb/ft-index/src/tests/test_error.cc index 21084e70061..84c6289990e 100644 --- a/storage/tokudb/ft-index/src/tests/test_error.cc +++ b/storage/tokudb/ft-index/src/tests/test_error.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/test_forkjoin.cc b/storage/tokudb/ft-index/src/tests/test_forkjoin.cc index 8190a7e7745..1fb01b53712 100644 --- a/storage/tokudb/ft-index/src/tests/test_forkjoin.cc +++ b/storage/tokudb/ft-index/src/tests/test_forkjoin.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/test_get_max_row_size.cc b/storage/tokudb/ft-index/src/tests/test_get_max_row_size.cc index 12fc8c1e619..5ddddac9bd6 100644 --- a/storage/tokudb/ft-index/src/tests/test_get_max_row_size.cc +++ b/storage/tokudb/ft-index/src/tests/test_get_max_row_size.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/test_get_zeroed_dbt.cc b/storage/tokudb/ft-index/src/tests/test_get_zeroed_dbt.cc index bf7848088d2..384f4e91f46 100644 --- a/storage/tokudb/ft-index/src/tests/test_get_zeroed_dbt.cc +++ b/storage/tokudb/ft-index/src/tests/test_get_zeroed_dbt.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/test_groupcommit_count.cc b/storage/tokudb/ft-index/src/tests/test_groupcommit_count.cc index c24efb5562f..f5bb46c35db 100644 --- a/storage/tokudb/ft-index/src/tests/test_groupcommit_count.cc +++ b/storage/tokudb/ft-index/src/tests/test_groupcommit_count.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/test_groupcommit_perf.cc b/storage/tokudb/ft-index/src/tests/test_groupcommit_perf.cc index e7aa5071f61..ade56e24e4e 100644 --- a/storage/tokudb/ft-index/src/tests/test_groupcommit_perf.cc +++ b/storage/tokudb/ft-index/src/tests/test_groupcommit_perf.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/test_hsoc.cc b/storage/tokudb/ft-index/src/tests/test_hsoc.cc index ada02e5e522..28368456501 100644 --- a/storage/tokudb/ft-index/src/tests/test_hsoc.cc +++ b/storage/tokudb/ft-index/src/tests/test_hsoc.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/test_insert_cursor_delete_insert.cc b/storage/tokudb/ft-index/src/tests/test_insert_cursor_delete_insert.cc index 865736d14fe..8b09698fcee 100644 --- a/storage/tokudb/ft-index/src/tests/test_insert_cursor_delete_insert.cc +++ b/storage/tokudb/ft-index/src/tests/test_insert_cursor_delete_insert.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/test_insert_many_gc.cc b/storage/tokudb/ft-index/src/tests/test_insert_many_gc.cc index a1884d22c0c..be66e852021 100644 --- a/storage/tokudb/ft-index/src/tests/test_insert_many_gc.cc +++ b/storage/tokudb/ft-index/src/tests/test_insert_many_gc.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2014 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/test_insert_memleak.cc b/storage/tokudb/ft-index/src/tests/test_insert_memleak.cc index 667221cdf04..8168ae477a1 100644 --- a/storage/tokudb/ft-index/src/tests/test_insert_memleak.cc +++ b/storage/tokudb/ft-index/src/tests/test_insert_memleak.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/test_insert_unique.cc b/storage/tokudb/ft-index/src/tests/test_insert_unique.cc new file mode 100644 index 00000000000..84d1ded6db5 --- /dev/null +++ b/storage/tokudb/ft-index/src/tests/test_insert_unique.cc @@ -0,0 +1,202 @@ +/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */ +// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4: +#ident "$Id$" +/* +COPYING CONDITIONS NOTICE: + + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation, and provided that the + following conditions are met: + + * Redistributions of source code must retain this COPYING + CONDITIONS NOTICE, the COPYRIGHT NOTICE (below), the + DISCLAIMER (below), the UNIVERSITY PATENT NOTICE (below), the + PATENT MARKING NOTICE (below), and the PATENT RIGHTS + GRANT (below). + + * Redistributions in binary form must reproduce this COPYING + CONDITIONS NOTICE, the COPYRIGHT NOTICE (below), the + DISCLAIMER (below), the UNIVERSITY PATENT NOTICE (below), the + PATENT MARKING NOTICE (below), and the PATENT RIGHTS + GRANT (below) in the documentation and/or other materials + provided with the distribution. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + 02110-1301, USA. + +COPYRIGHT NOTICE: + + TokuFT, Tokutek Fractal Tree Indexing Library. + Copyright (C) 2007-2013 Tokutek, Inc. + +DISCLAIMER: + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + +UNIVERSITY PATENT NOTICE: + + The technology is licensed by the Massachusetts Institute of + Technology, Rutgers State University of New Jersey, and the Research + Foundation of State University of New York at Stony Brook under + United States of America Serial No. 11/760379 and to the patents + and/or patent applications resulting from it. + +PATENT MARKING NOTICE: + + This software is covered by US Patent No. 8,185,551. + This software is covered by US Patent No. 8,489,638. + +PATENT RIGHTS GRANT: + + "THIS IMPLEMENTATION" means the copyrightable works distributed by + Tokutek as part of the Fractal Tree project. + + "PATENT CLAIMS" means the claims of patents that are owned or + licensable by Tokutek, both currently or in the future; and that in + the absence of this license would be infringed by THIS + IMPLEMENTATION or by using or running THIS IMPLEMENTATION. + + "PATENT CHALLENGE" shall mean a challenge to the validity, + patentability, enforceability and/or non-infringement of any of the + PATENT CLAIMS or otherwise opposing any of the PATENT CLAIMS. + + Tokutek hereby grants to you, for the term and geographical scope of + the PATENT CLAIMS, a non-exclusive, no-charge, royalty-free, + irrevocable (except as stated in this section) patent license to + make, have made, use, offer to sell, sell, import, transfer, and + otherwise run, modify, and propagate the contents of THIS + IMPLEMENTATION, where such license applies only to the PATENT + CLAIMS. This grant does not include claims that would be infringed + only as a consequence of further modifications of THIS + IMPLEMENTATION. If you or your agent or licensee institute or order + or agree to the institution of patent litigation against any entity + (including a cross-claim or counterclaim in a lawsuit) alleging that + THIS IMPLEMENTATION constitutes direct or contributory patent + infringement, or inducement of patent infringement, then any rights + granted to you under this License shall terminate as of the date + such litigation is filed. If you or your agent or exclusive + licensee institute or order or agree to the institution of a PATENT + CHALLENGE, then Tokutek may terminate any rights granted to you + under this License. +*/ + +#ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved." +#ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it." +/** + * Test that unique inserts work correctly. This exercises the rightmost leaf inject optimization. + */ + +#include <portability/toku_random.h> + +#include "test.h" + +static char random_buf[8]; +static struct random_data random_data; + +static void test_simple_unique_insert(DB_ENV *env) { + int r; + DB *db; + r = db_create(&db, env, 0); CKERR(r); + r = db->open(db, NULL, "db", NULL, DB_BTREE, DB_CREATE, 0644); CKERR(r); + + DBT key1, key2, key3; + dbt_init(&key1, "a", sizeof("a")); + dbt_init(&key2, "b", sizeof("b")); + dbt_init(&key3, "c", sizeof("c")); + r = db->put(db, NULL, &key1, &key1, DB_NOOVERWRITE); CKERR(r); + r = db->put(db, NULL, &key1, &key1, DB_NOOVERWRITE); CKERR2(r, DB_KEYEXIST); + r = db->put(db, NULL, &key3, &key3, DB_NOOVERWRITE); CKERR(r); + r = db->put(db, NULL, &key3, &key3, DB_NOOVERWRITE); CKERR2(r, DB_KEYEXIST); + r = db->put(db, NULL, &key2, &key2, DB_NOOVERWRITE); CKERR(r); + r = db->put(db, NULL, &key2, &key2, DB_NOOVERWRITE); CKERR2(r, DB_KEYEXIST); + // sanity check + r = db->put(db, NULL, &key1, &key1, DB_NOOVERWRITE); CKERR2(r, DB_KEYEXIST); + r = db->put(db, NULL, &key1, &key3, DB_NOOVERWRITE); CKERR2(r, DB_KEYEXIST); + + r = db->close(db, 0); CKERR(r); + r = env->dbremove(env, NULL, "db", NULL, 0); CKERR(r); +} + +static void test_large_sequential_insert_unique(DB_ENV *env) { + int r; + DB *db; + r = db_create(&db, env, 0); CKERR(r); + + // very small nodes/basements to make a taller tree + r = db->set_pagesize(db, 8 * 1024); CKERR(r); + r = db->set_readpagesize(db, 2 * 1024); CKERR(r); + r = db->open(db, NULL, "db", NULL, DB_BTREE, DB_CREATE, 0644); CKERR(r); + + const int val_size = 1024; + char *XMALLOC_N(val_size, val_buf); + memset(val_buf, 'k', val_size); + DBT val; + dbt_init(&val, val_buf, val_size); + + // grow a tree to about depth 3, taking sanity checks along the way + const int start_num_rows = (64 * 1024 * 1024) / val_size; + for (int i = 0; i < start_num_rows; i++) { + DBT key; + int k = toku_htonl(i); + dbt_init(&key, &k, sizeof(k)); + r = db->put(db, NULL, &key, &val, DB_NOOVERWRITE); CKERR(r); + if (i % 50 == 0) { + // sanity check - should not be able to insert this key twice in a row + r = db->put(db, NULL, &key, &val, DB_NOOVERWRITE); CKERR2(r, DB_KEYEXIST); + + // .. but re-inserting is okay, if we provisionally deleted the row + DB_TXN *txn; + r = env->txn_begin(env, NULL, &txn, 0); CKERR(r); + r = db->del(db, NULL, &key, DB_DELETE_ANY); CKERR(r); + r = db->put(db, NULL, &key, &val, DB_NOOVERWRITE); CKERR(r); + r = txn->commit(txn, 0); CKERR(r); + } + if (i > 0 && i % 250 == 0) { + // sanity check - unique checks on random keys we already inserted should + // fail (exercises middle-of-the-tree checks) + for (int check_i = 0; check_i < 4; check_i++) { + DBT rand_key; + int rand_k = toku_htonl(myrandom_r(&random_data) % i); + dbt_init(&rand_key, &rand_k, sizeof(rand_k)); + r = db->put(db, NULL, &rand_key, &val, DB_NOOVERWRITE); CKERR2(r, DB_KEYEXIST); + } + } + } + + toku_free(val_buf); + r = db->close(db, 0); CKERR(r); + r = env->dbremove(env, NULL, "db", NULL, 0); CKERR(r); +} + + +int test_main(int argc, char * const argv[]) { + default_parse_args(argc, argv); + + int r; + const int envflags = DB_INIT_MPOOL | DB_CREATE | DB_THREAD | + DB_INIT_LOCK | DB_INIT_LOG | DB_INIT_TXN | DB_PRIVATE; + + // startup + DB_ENV *env; + toku_os_recursive_delete(TOKU_TEST_FILENAME); + r = toku_os_mkdir(TOKU_TEST_FILENAME, 0755); CKERR(r); + r = db_env_create(&env, 0); CKERR(r); + r = env->open(env, TOKU_TEST_FILENAME, envflags, 0755); + + r = myinitstate_r(random(), random_buf, 8, &random_data); CKERR(r); + + test_simple_unique_insert(env); + test_large_sequential_insert_unique(env); + + // cleanup + r = env->close(env, 0); CKERR(r); + + return 0; +} + diff --git a/storage/tokudb/ft-index/src/tests/test_iterate_live_transactions.cc b/storage/tokudb/ft-index/src/tests/test_iterate_live_transactions.cc index bfc90e71d07..dd00ddeeb9a 100644 --- a/storage/tokudb/ft-index/src/tests/test_iterate_live_transactions.cc +++ b/storage/tokudb/ft-index/src/tests/test_iterate_live_transactions.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/test_iterate_pending_lock_requests.cc b/storage/tokudb/ft-index/src/tests/test_iterate_pending_lock_requests.cc index 248c346c305..03dcce49ffd 100644 --- a/storage/tokudb/ft-index/src/tests/test_iterate_pending_lock_requests.cc +++ b/storage/tokudb/ft-index/src/tests/test_iterate_pending_lock_requests.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/test_keylen_diff.cc b/storage/tokudb/ft-index/src/tests/test_keylen_diff.cc new file mode 100644 index 00000000000..144ac5fce3e --- /dev/null +++ b/storage/tokudb/ft-index/src/tests/test_keylen_diff.cc @@ -0,0 +1,284 @@ +/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */ +// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4: + +/* +COPYING CONDITIONS NOTICE: + + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation, and provided that the + following conditions are met: + + * Redistributions of source code must retain this COPYING + CONDITIONS NOTICE, the COPYRIGHT NOTICE (below), the + DISCLAIMER (below), the UNIVERSITY PATENT NOTICE (below), the + PATENT MARKING NOTICE (below), and the PATENT RIGHTS + GRANT (below). + + * Redistributions in binary form must reproduce this COPYING + CONDITIONS NOTICE, the COPYRIGHT NOTICE (below), the + DISCLAIMER (below), the UNIVERSITY PATENT NOTICE (below), the + PATENT MARKING NOTICE (below), and the PATENT RIGHTS + GRANT (below) in the documentation and/or other materials + provided with the distribution. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + 02110-1301, USA. + +COPYRIGHT NOTICE: + + TokuFT, Tokutek Fractal Tree Indexing Library. + Copyright (C) 2014 Tokutek, Inc. + +DISCLAIMER: + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + +UNIVERSITY PATENT NOTICE: + + The technology is licensed by the Massachusetts Institute of + Technology, Rutgers State University of New Jersey, and the Research + Foundation of State University of New York at Stony Brook under + United States of America Serial No. 11/760379 and to the patents + and/or patent applications resulting from it. + +PATENT MARKING NOTICE: + + This software is covered by US Patent No. 8,185,551. + This software is covered by US Patent No. 8,489,638. + +PATENT RIGHTS GRANT: + + "THIS IMPLEMENTATION" means the copyrightable works distributed by + Tokutek as part of the Fractal Tree project. + + "PATENT CLAIMS" means the claims of patents that are owned or + licensable by Tokutek, both currently or in the future; and that in + the absence of this license would be infringed by THIS + IMPLEMENTATION or by using or running THIS IMPLEMENTATION. + + "PATENT CHALLENGE" shall mean a challenge to the validity, + patentability, enforceability and/or non-infringement of any of the + PATENT CLAIMS or otherwise opposing any of the PATENT CLAIMS. + + Tokutek hereby grants to you, for the term and geographical scope of + the PATENT CLAIMS, a non-exclusive, no-charge, royalty-free, + irrevocable (except as stated in this section) patent license to + make, have made, use, offer to sell, sell, import, transfer, and + otherwise run, modify, and propagate the contents of THIS + IMPLEMENTATION, where such license applies only to the PATENT + CLAIMS. This grant does not include claims that would be infringed + only as a consequence of further modifications of THIS + IMPLEMENTATION. If you or your agent or licensee institute or order + or agree to the institution of patent litigation against any entity + (including a cross-claim or counterclaim in a lawsuit) alleging that + THIS IMPLEMENTATION constitutes direct or contributory patent + infringement, or inducement of patent infringement, then any rights + granted to you under this License shall terminate as of the date + such litigation is filed. If you or your agent or exclusive + licensee institute or order or agree to the institution of a PATENT + CHALLENGE, then Tokutek may terminate any rights granted to you + under this License. +*/ + +#include "test.h" + +// test a comparison function that treats certain different-lengthed keys as equal + +struct packed_key { + char type; + char k[8]; + static packed_key as_int(int v) { + packed_key k; + k.type = 0; + memcpy(k.k, &v, sizeof(int)); + return k; + } + static packed_key as_double(double v) { + packed_key k; + k.type = 1; + memcpy(k.k, &v, sizeof(double)); + return k; + } + size_t size() const { + assert(type == 0 || type == 1); + return type == 0 ? 5 : 9; + } +}; + +// the point is that keys can be packed as integers or doubles, but +// we'll treat them both as doubles for the sake of comparison. +// this means a 4 byte number could equal an 8 byte number. +static int packed_key_cmp(DB *UU(db), const DBT *a, const DBT *b) { + assert(a->size == 5 || a->size == 9); + assert(b->size == 5 || b->size == 9); + char *k1 = reinterpret_cast<char *>(a->data); + char *k2 = reinterpret_cast<char *>(b->data); + assert(*k1 == 0 || *k1 == 1); + assert(*k2 == 0 || *k2 == 1); + double v1 = *k1 == 0 ? static_cast<double>(*reinterpret_cast<int *>(k1 + 1)) : + *reinterpret_cast<double *>(k1 + 1); + double v2 = *k2 == 0 ? static_cast<double>(*reinterpret_cast<int *>(k2 + 1)) : + *reinterpret_cast<double *>(k2 + 1); + if (v1 > v2) { + return 1; + } else if (v1 < v2) { + return -1; + } else { + return 0; + } +} + +static int update_callback(DB *UU(db), const DBT *UU(key), const DBT *old_val, const DBT *extra, + void (*set_val)(const DBT *new_val, void *setval_extra), void *setval_extra) { + assert(extra != nullptr); + assert(old_val != nullptr); + assert(extra->size == 0); + assert(old_val->size == 0); + if (extra->data == nullptr) { + set_val(nullptr, setval_extra); + } else { + DBT new_val; + char empty_v; + dbt_init(&new_val, &empty_v, 0); + set_val(&new_val, setval_extra); + } + return 0; +} + +enum overwrite_method { + VIA_UPDATE_OVERWRITE_BROADCAST, + VIA_UPDATE_DELETE_BROADCAST, + VIA_UPDATE_OVERWRITE, + VIA_UPDATE_DELETE, + VIA_DELETE, + VIA_INSERT, + NUM_OVERWRITE_METHODS +}; + +static void test_keylen_diff(enum overwrite_method method, bool control_test) { + int r; + + DB_ENV *env; + r = db_env_create(&env, 0); CKERR(r); + r = env->set_default_bt_compare(env, packed_key_cmp); CKERR(r); + env->set_update(env, update_callback); CKERR(r); + r = env->open(env, TOKU_TEST_FILENAME, DB_CREATE+DB_PRIVATE+DB_INIT_MPOOL+DB_INIT_TXN, 0); CKERR(r); + + DB *db; + r = db_create(&db, env, 0); CKERR(r); + r = db->set_pagesize(db, 16 * 1024); // smaller pages so we get a more lush tree + r = db->set_readpagesize(db, 1 * 1024); // smaller basements so we get more per leaf + r = db->open(db, nullptr, "db", nullptr, DB_BTREE, DB_CREATE, 0666); CKERR(r); + + DBT null_dbt, empty_dbt; + char empty_v; + dbt_init(&empty_dbt, &empty_v, 0); + dbt_init(&null_dbt, nullptr, 0); + + const int num_keys = 256 * 1000; + + for (int i = 0; i < num_keys; i++) { + // insert it using a 4 byte key .. + packed_key key = packed_key::as_int(i); + + DBT dbt; + dbt_init(&dbt, &key, key.size()); + r = db->put(db, nullptr, &dbt, &empty_dbt, 0); CKERR(r); + } + + // overwrite keys randomly, so we induce flushes and get better / realistic coverage + int *XMALLOC_N(num_keys, shuffled_keys); + for (int i = 0; i < num_keys; i++) { + shuffled_keys[i] = i; + } + for (int i = num_keys - 1; i >= 1; i--) { + long rnd = random64() % (i + 1); + int tmp = shuffled_keys[rnd]; + shuffled_keys[rnd] = shuffled_keys[i]; + shuffled_keys[i] = tmp; + } + + for (int i = 0; i < num_keys; i++) { + // for the control test, delete it using the same length key + // + // .. otherwise, delete it with an 8 byte key + packed_key key = control_test ? packed_key::as_int(shuffled_keys[i]) : + packed_key::as_double(shuffled_keys[i]); + + DBT dbt; + dbt_init(&dbt, &key, key.size()); + DB_TXN *txn; + env->txn_begin(env, nullptr, &txn, DB_TXN_NOSYNC); CKERR(r); + switch (method) { + case VIA_INSERT: { + r = db->put(db, txn, &dbt, &empty_dbt, 0); CKERR(r); + break; + } + case VIA_DELETE: { + // we purposefully do not pass DB_DELETE_ANY because the hidden query acts as + // a sanity check for the control test and, overall, gives better code coverage + r = db->del(db, txn, &dbt, 0); CKERR(r); + break; + } + case VIA_UPDATE_OVERWRITE: + case VIA_UPDATE_DELETE: { + r = db->update(db, txn, &dbt, method == VIA_UPDATE_DELETE ? &null_dbt : &empty_dbt, 0); CKERR(r); + break; + } + case VIA_UPDATE_OVERWRITE_BROADCAST: + case VIA_UPDATE_DELETE_BROADCAST: { + r = db->update_broadcast(db, txn, method == VIA_UPDATE_DELETE_BROADCAST ? &null_dbt : &empty_dbt, 0); CKERR(r); + if (i > 1 ) { // only need to test broadcast twice - one with abort, one without + txn->abort(txn); // we opened a txn so we should abort it before exiting + goto done; + } + break; + } + default: { + assert(false); + } + } + const bool abort = i % 2 == 0; + if (abort) { + txn->abort(txn); + } else { + txn->commit(txn, 0); + } + } + +done: + toku_free(shuffled_keys); + + // optimize before close to ensure that all messages are applied and any potential bugs are exposed + r = db->optimize(db); + r = db->close(db, 0); CKERR(r); + r = env->close(env, 0); CKERR(r); +} + +int +test_main(int argc, char *const argv[]) { + parse_args(argc, argv); + + toku_os_recursive_delete(TOKU_TEST_FILENAME); + int r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r); + + for (int i = 0; i < NUM_OVERWRITE_METHODS; i++) { + enum overwrite_method method = static_cast<enum overwrite_method>(i); + + // control test - must pass for the 'real' test below to be interesting + printf("testing method %d (control)\n", i); + test_keylen_diff(method, true); + + // real test, actually mixes key lengths + printf("testing method %d (real)\n", i); + test_keylen_diff(method, false); + } + + return 0; +} diff --git a/storage/tokudb/ft-index/src/tests/test_kv_gen.h b/storage/tokudb/ft-index/src/tests/test_kv_gen.h index f17b6c18641..49bb3acdb42 100644 --- a/storage/tokudb/ft-index/src/tests/test_kv_gen.h +++ b/storage/tokudb/ft-index/src/tests/test_kv_gen.h @@ -2,10 +2,6 @@ // vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4: #ident "$Id$" -#ifndef __TEST_KV_GEN_H -#define __TEST_KV_GEN_H - - /* COPYING CONDITIONS NOTICE: @@ -34,7 +30,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -91,6 +87,8 @@ PATENT RIGHTS GRANT: under this License. */ +#pragma once + #ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved." #include "test.h" @@ -279,6 +277,3 @@ put_multiple_generate(DB *dest_db, DB *src_db, DBT *dest_key, DBT *dest_val, con } return 0; } - - -#endif // __TEST_KV_GEN_H diff --git a/storage/tokudb/ft-index/src/tests/test_kv_limits.cc b/storage/tokudb/ft-index/src/tests/test_kv_limits.cc index 9ce236bf0ad..70390bb2802 100644 --- a/storage/tokudb/ft-index/src/tests/test_kv_limits.cc +++ b/storage/tokudb/ft-index/src/tests/test_kv_limits.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/test_large_update_broadcast_small_cachetable.cc b/storage/tokudb/ft-index/src/tests/test_large_update_broadcast_small_cachetable.cc index ea164a8ea43..e5ccb3071b8 100644 --- a/storage/tokudb/ft-index/src/tests/test_large_update_broadcast_small_cachetable.cc +++ b/storage/tokudb/ft-index/src/tests/test_large_update_broadcast_small_cachetable.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/test_lock_timeout_callback.cc b/storage/tokudb/ft-index/src/tests/test_lock_timeout_callback.cc index d4aae0f95d4..74daae7897f 100644 --- a/storage/tokudb/ft-index/src/tests/test_lock_timeout_callback.cc +++ b/storage/tokudb/ft-index/src/tests/test_lock_timeout_callback.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/test_locking_with_read_txn.cc b/storage/tokudb/ft-index/src/tests/test_locking_with_read_txn.cc index 8f3349f3c4b..f3cb36d1df5 100644 --- a/storage/tokudb/ft-index/src/tests/test_locking_with_read_txn.cc +++ b/storage/tokudb/ft-index/src/tests/test_locking_with_read_txn.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/test_locktree_close.cc b/storage/tokudb/ft-index/src/tests/test_locktree_close.cc index 10efefd0b62..b5735fd1495 100644 --- a/storage/tokudb/ft-index/src/tests/test_locktree_close.cc +++ b/storage/tokudb/ft-index/src/tests/test_locktree_close.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/test_log0.cc b/storage/tokudb/ft-index/src/tests/test_log0.cc index 05c0820f153..c597affc562 100644 --- a/storage/tokudb/ft-index/src/tests/test_log0.cc +++ b/storage/tokudb/ft-index/src/tests/test_log0.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -89,7 +89,7 @@ PATENT RIGHTS GRANT: #ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved." #include "test.h" -/* Simple test of logging. Can I start a TokuDB with logging enabled? */ +/* Simple test of logging. Can I start TokuFT with logging enabled? */ #include <stdlib.h> #include <sys/stat.h> diff --git a/storage/tokudb/ft-index/src/tests/test_log1.cc b/storage/tokudb/ft-index/src/tests/test_log1.cc index 3c03249c845..8379d20ba45 100644 --- a/storage/tokudb/ft-index/src/tests/test_log1.cc +++ b/storage/tokudb/ft-index/src/tests/test_log1.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -89,7 +89,7 @@ PATENT RIGHTS GRANT: #ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved." #include "test.h" -/* Simple test of logging. Can I start a TokuDB with logging enabled? */ +/* Simple test of logging. Can I start TokuFT with logging enabled? */ #include <stdlib.h> #include <sys/stat.h> diff --git a/storage/tokudb/ft-index/src/tests/test_log10.cc b/storage/tokudb/ft-index/src/tests/test_log10.cc index 599eb16d3ae..053efc0f07d 100644 --- a/storage/tokudb/ft-index/src/tests/test_log10.cc +++ b/storage/tokudb/ft-index/src/tests/test_log10.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -91,7 +91,7 @@ PATENT RIGHTS GRANT: /* Test to see if we can do logging and recovery. */ -/* This is very specific to TokuDB. It won't work with Berkeley DB. */ +/* This is very specific to TokuFT. It won't work with Berkeley DB. */ /* This test_log10 inserts to a db, closes, reopens, and inserts more to db. We want to make sure that the recovery of the buffers works. */ /* Lots of stuff gets inserted. */ diff --git a/storage/tokudb/ft-index/src/tests/test_log1_abort.cc b/storage/tokudb/ft-index/src/tests/test_log1_abort.cc index f1f8269239e..6861698740f 100644 --- a/storage/tokudb/ft-index/src/tests/test_log1_abort.cc +++ b/storage/tokudb/ft-index/src/tests/test_log1_abort.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/test_log2.cc b/storage/tokudb/ft-index/src/tests/test_log2.cc index 85a3354629f..d2a2e6d5006 100644 --- a/storage/tokudb/ft-index/src/tests/test_log2.cc +++ b/storage/tokudb/ft-index/src/tests/test_log2.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -91,7 +91,7 @@ PATENT RIGHTS GRANT: /* Test to see if we can do logging and recovery. */ -/* This is very specific to TokuDB. It won't work with Berkeley DB. */ +/* This is very specific to TokuFT. It won't work with Berkeley DB. */ #include <db.h> diff --git a/storage/tokudb/ft-index/src/tests/test_log2_abort.cc b/storage/tokudb/ft-index/src/tests/test_log2_abort.cc index 7991f046643..9ed3f8a1a6f 100644 --- a/storage/tokudb/ft-index/src/tests/test_log2_abort.cc +++ b/storage/tokudb/ft-index/src/tests/test_log2_abort.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -91,7 +91,7 @@ PATENT RIGHTS GRANT: /* Like test_log2 except abort. */ -/* This is very specific to TokuDB. It won't work with Berkeley DB. */ +/* This is very specific to TokuFT. It won't work with Berkeley DB. */ #include <db.h> diff --git a/storage/tokudb/ft-index/src/tests/test_log3.cc b/storage/tokudb/ft-index/src/tests/test_log3.cc index 9e4a531a899..a8b71fa90db 100644 --- a/storage/tokudb/ft-index/src/tests/test_log3.cc +++ b/storage/tokudb/ft-index/src/tests/test_log3.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -91,7 +91,7 @@ PATENT RIGHTS GRANT: /* Test to see if we can do logging and recovery. */ -/* This is very specific to TokuDB. It won't work with Berkeley DB. */ +/* This is very specific to TokuFT. It won't work with Berkeley DB. */ #include <db.h> diff --git a/storage/tokudb/ft-index/src/tests/test_log3_abort.cc b/storage/tokudb/ft-index/src/tests/test_log3_abort.cc index dc36d754f09..d08dab0c033 100644 --- a/storage/tokudb/ft-index/src/tests/test_log3_abort.cc +++ b/storage/tokudb/ft-index/src/tests/test_log3_abort.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/test_log4.cc b/storage/tokudb/ft-index/src/tests/test_log4.cc index b0da26e8454..2117907f1b0 100644 --- a/storage/tokudb/ft-index/src/tests/test_log4.cc +++ b/storage/tokudb/ft-index/src/tests/test_log4.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -91,7 +91,7 @@ PATENT RIGHTS GRANT: /* Test to see if we can do logging and recovery. */ -/* This is very specific to TokuDB. It won't work with Berkeley DB. */ +/* This is very specific to TokuFT. It won't work with Berkeley DB. */ #include <db.h> diff --git a/storage/tokudb/ft-index/src/tests/test_log4_abort.cc b/storage/tokudb/ft-index/src/tests/test_log4_abort.cc index 4d73cda3903..37a00085812 100644 --- a/storage/tokudb/ft-index/src/tests/test_log4_abort.cc +++ b/storage/tokudb/ft-index/src/tests/test_log4_abort.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/test_log5.cc b/storage/tokudb/ft-index/src/tests/test_log5.cc index 6a40394668b..82a122c045a 100644 --- a/storage/tokudb/ft-index/src/tests/test_log5.cc +++ b/storage/tokudb/ft-index/src/tests/test_log5.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -91,7 +91,7 @@ PATENT RIGHTS GRANT: /* Test to see if we can do logging and recovery. */ -/* This is very specific to TokuDB. It won't work with Berkeley DB. */ +/* This is very specific to TokuFT. It won't work with Berkeley DB. */ #include <db.h> diff --git a/storage/tokudb/ft-index/src/tests/test_log5_abort.cc b/storage/tokudb/ft-index/src/tests/test_log5_abort.cc index be74c14b1d4..dcd512abd61 100644 --- a/storage/tokudb/ft-index/src/tests/test_log5_abort.cc +++ b/storage/tokudb/ft-index/src/tests/test_log5_abort.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/test_log6.cc b/storage/tokudb/ft-index/src/tests/test_log6.cc index 9e579d5f4e5..710519d70a7 100644 --- a/storage/tokudb/ft-index/src/tests/test_log6.cc +++ b/storage/tokudb/ft-index/src/tests/test_log6.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -91,7 +91,7 @@ PATENT RIGHTS GRANT: /* Test to see if we can do logging and recovery. */ -/* This is very specific to TokuDB. It won't work with Berkeley DB. */ +/* This is very specific to TokuFT. It won't work with Berkeley DB. */ #include <db.h> diff --git a/storage/tokudb/ft-index/src/tests/test_log6_abort.cc b/storage/tokudb/ft-index/src/tests/test_log6_abort.cc index c02e61c82b0..09db439b22b 100644 --- a/storage/tokudb/ft-index/src/tests/test_log6_abort.cc +++ b/storage/tokudb/ft-index/src/tests/test_log6_abort.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/test_log6a_abort.cc b/storage/tokudb/ft-index/src/tests/test_log6a_abort.cc index ec4490c06fd..b5ddb6b4c3c 100644 --- a/storage/tokudb/ft-index/src/tests/test_log6a_abort.cc +++ b/storage/tokudb/ft-index/src/tests/test_log6a_abort.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/test_log7.cc b/storage/tokudb/ft-index/src/tests/test_log7.cc index ca5eb168028..afa9a5ab82c 100644 --- a/storage/tokudb/ft-index/src/tests/test_log7.cc +++ b/storage/tokudb/ft-index/src/tests/test_log7.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -91,7 +91,7 @@ PATENT RIGHTS GRANT: /* Test to see if we can do logging and recovery. */ -/* This is very specific to TokuDB. It won't work with Berkeley DB. */ +/* This is very specific to TokuFT. It won't work with Berkeley DB. */ /* This test_log7 is like test_log5 except maxcount is larger. */ diff --git a/storage/tokudb/ft-index/src/tests/test_log8.cc b/storage/tokudb/ft-index/src/tests/test_log8.cc index bf6cad4c66b..39c607b3623 100644 --- a/storage/tokudb/ft-index/src/tests/test_log8.cc +++ b/storage/tokudb/ft-index/src/tests/test_log8.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -91,7 +91,7 @@ PATENT RIGHTS GRANT: /* Test to see if we can do logging and recovery. */ -/* This is very specific to TokuDB. It won't work with Berkeley DB. */ +/* This is very specific to TokuFT. It won't work with Berkeley DB. */ /* This test_log8 inserts to a db, closes, reopens, and inserts more to db. We want to make sure that the recovery of the buffers works. */ diff --git a/storage/tokudb/ft-index/src/tests/test_log9.cc b/storage/tokudb/ft-index/src/tests/test_log9.cc index 302eaefb976..6b7f1fddb44 100644 --- a/storage/tokudb/ft-index/src/tests/test_log9.cc +++ b/storage/tokudb/ft-index/src/tests/test_log9.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -91,7 +91,7 @@ PATENT RIGHTS GRANT: /* Test to see if we can do logging and recovery. */ -/* This is very specific to TokuDB. It won't work with Berkeley DB. */ +/* This is very specific to TokuFT. It won't work with Berkeley DB. */ /* This test_log8 inserts to a db, closes, reopens, and inserts more to db. We want to make sure that the recovery of the buffers works. */ diff --git a/storage/tokudb/ft-index/src/tests/test_logflush.cc b/storage/tokudb/ft-index/src/tests/test_logflush.cc index 0c813d4b131..6ea09b83916 100644 --- a/storage/tokudb/ft-index/src/tests/test_logflush.cc +++ b/storage/tokudb/ft-index/src/tests/test_logflush.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/test_logmax.cc b/storage/tokudb/ft-index/src/tests/test_logmax.cc index 6c8b5a43ee2..89c9284ea6b 100644 --- a/storage/tokudb/ft-index/src/tests/test_logmax.cc +++ b/storage/tokudb/ft-index/src/tests/test_logmax.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/test_memcmp_magic.cc b/storage/tokudb/ft-index/src/tests/test_memcmp_magic.cc new file mode 100644 index 00000000000..5ddb473a6ed --- /dev/null +++ b/storage/tokudb/ft-index/src/tests/test_memcmp_magic.cc @@ -0,0 +1,219 @@ +/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */ +// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4: + +/* +COPYING CONDITIONS NOTICE: + + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation, and provided that the + following conditions are met: + + * Redistributions of source code must retain this COPYING + CONDITIONS NOTICE, the COPYRIGHT NOTICE (below), the + DISCLAIMER (below), the UNIVERSITY PATENT NOTICE (below), the + PATENT MARKING NOTICE (below), and the PATENT RIGHTS + GRANT (below). + + * Redistributions in binary form must reproduce this COPYING + CONDITIONS NOTICE, the COPYRIGHT NOTICE (below), the + DISCLAIMER (below), the UNIVERSITY PATENT NOTICE (below), the + PATENT MARKING NOTICE (below), and the PATENT RIGHTS + GRANT (below) in the documentation and/or other materials + provided with the distribution. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + 02110-1301, USA. + +COPYRIGHT NOTICE: + + TokuFT, Tokutek Fractal Tree Indexing Library. + Copyright (C) 2014 Tokutek, Inc. + +DISCLAIMER: + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + +UNIVERSITY PATENT NOTICE: + + The technology is licensed by the Massachusetts Institute of + Technology, Rutgers State University of New Jersey, and the Research + Foundation of State University of New York at Stony Brook under + United States of America Serial No. 11/760379 and to the patents + and/or patent applications resulting from it. + +PATENT MARKING NOTICE: + + This software is covered by US Patent No. 8,185,551. + This software is covered by US Patent No. 8,489,638. + +PATENT RIGHTS GRANT: + + "THIS IMPLEMENTATION" means the copyrightable works distributed by + Tokutek as part of the Fractal Tree project. + + "PATENT CLAIMS" means the claims of patents that are owned or + licensable by Tokutek, both currently or in the future; and that in + the absence of this license would be infringed by THIS + IMPLEMENTATION or by using or running THIS IMPLEMENTATION. + + "PATENT CHALLENGE" shall mean a challenge to the validity, + patentability, enforceability and/or non-infringement of any of the + PATENT CLAIMS or otherwise opposing any of the PATENT CLAIMS. + + Tokutek hereby grants to you, for the term and geographical scope of + the PATENT CLAIMS, a non-exclusive, no-charge, royalty-free, + irrevocable (except as stated in this section) patent license to + make, have made, use, offer to sell, sell, import, transfer, and + otherwise run, modify, and propagate the contents of THIS + IMPLEMENTATION, where such license applies only to the PATENT + CLAIMS. This grant does not include claims that would be infringed + only as a consequence of further modifications of THIS + IMPLEMENTATION. If you or your agent or licensee institute or order + or agree to the institution of patent litigation against any entity + (including a cross-claim or counterclaim in a lawsuit) alleging that + THIS IMPLEMENTATION constitutes direct or contributory patent + infringement, or inducement of patent infringement, then any rights + granted to you under this License shall terminate as of the date + such litigation is filed. If you or your agent or exclusive + licensee institute or order or agree to the institution of a PATENT + CHALLENGE, then Tokutek may terminate any rights granted to you + under this License. +*/ + +#include "test.h" + +#include "util/dbt.h" + +static void test_memcmp_magic(void) { + int r; + + DB_ENV *env; + r = db_env_create(&env, 0); CKERR(r); + r = env->open(env, TOKU_TEST_FILENAME, DB_CREATE+DB_PRIVATE+DB_INIT_MPOOL+DB_INIT_TXN, 0); CKERR(r); + + DB *db; + r = db_create(&db, env, 0); CKERR(r); + + // Can't set the memcmp magic to 0 (since it's used as a sentinel for `none') + r = db->set_memcmp_magic(db, 0); CKERR2(r, EINVAL); + + // Should be ok to set it more than once, even to different things, before opening. + r = db->set_memcmp_magic(db, 1); CKERR(r); + r = db->set_memcmp_magic(db, 2); CKERR(r); + r = db->open(db, NULL, "db", "db", DB_BTREE, DB_CREATE, 0666); CKERR(r); + + // Can't set the memcmp magic after opening. + r = db->set_memcmp_magic(db, 0); CKERR2(r, EINVAL); + r = db->set_memcmp_magic(db, 1); CKERR2(r, EINVAL); + + DB *db2; + r = db_create(&db2, env, 0); CKERR(r); + r = db2->set_memcmp_magic(db2, 3); CKERR(r); // ..we can try setting it to something different + // ..but it should fail to open + r = db2->open(db2, NULL, "db", "db", DB_BTREE, DB_CREATE, 0666); CKERR2(r, EINVAL); + r = db2->set_memcmp_magic(db2, 2); CKERR(r); + r = db2->open(db2, NULL, "db", "db", DB_BTREE, DB_CREATE, 0666); CKERR(r); + + r = db2->close(db2, 0); + r = db->close(db, 0); CKERR(r); + + // dbremove opens its own handle internally. ensure that the open + // operation succeeds (and so does dbremove) despite the fact the + // internal open does not set the memcmp magic + r = env->dbremove(env, NULL, "db", "db", 0); CKERR(r); + r = env->close(env, 0); CKERR(r); +} + +static int comparison_function_unused(DB *UU(db), const DBT *UU(a), const DBT *UU(b)) { + // We're testing that the memcmp magic gets used so the real + // comparison function should never get called. + invariant(false); + return 0; +} + +static int getf_key_cb(const DBT *key, const DBT *UU(val), void *extra) { + DBT *dbt = reinterpret_cast<DBT *>(extra); + toku_clone_dbt(dbt, *key); + return 0; +} + +static void test_memcmp_magic_sort_order(void) { + int r; + + // Verify that randomly generated integer keys are sorted in memcmp + // order when packed as little endian, even with an environment-wide + // comparison function that sorts as though keys are big-endian ints. + + DB_ENV *env; + r = db_env_create(&env, 0); CKERR(r); + r = env->set_default_bt_compare(env, comparison_function_unused); CKERR(r); + r = env->open(env, TOKU_TEST_FILENAME, DB_CREATE+DB_PRIVATE+DB_INIT_MPOOL+DB_INIT_TXN, 0); CKERR(r); + + const int magic = 49; + + DB *db; + r = db_create(&db, env, 0); CKERR(r); + r = db->set_memcmp_magic(db, magic); CKERR(r); + r = db->open(db, NULL, "db", "db", DB_BTREE, DB_CREATE, 0666); CKERR(r); + + for (int i = 0; i < 10000; i++) { + char buf[1 + sizeof(int)]; + // Serialize key to first have the magic byte, then the little-endian key. + int k = toku_htonl(random()); + buf[0] = magic; + memcpy(&buf[1], &k, sizeof(int)); + + DBT key; + dbt_init(&key, buf, sizeof(buf)); + r = db->put(db, NULL, &key, &key, 0); CKERR(r); + } + + DB_TXN *txn; + env->txn_begin(env, NULL, &txn, 0); + DBC *dbc; + db->cursor(db, txn, &dbc, 0); + DBT prev_dbt, curr_dbt; + memset(&curr_dbt, 0, sizeof(DBT)); + memset(&prev_dbt, 0, sizeof(DBT)); + while (dbc->c_getf_next(dbc, 0, getf_key_cb, &curr_dbt)) { + invariant(curr_dbt.size == sizeof(int)); + if (prev_dbt.data != NULL) { + // Each key should be >= to the last using memcmp + int c = memcmp(prev_dbt.data, curr_dbt.data, sizeof(int)); + invariant(c <= 0); + } + toku_destroy_dbt(&prev_dbt); + prev_dbt = curr_dbt; + } + toku_destroy_dbt(&curr_dbt); + toku_destroy_dbt(&prev_dbt); + dbc->c_close(dbc); + txn->commit(txn, 0); + + r = db->close(db, 0); CKERR(r); + + // dbremove opens its own handle internally. ensure that the open + // operation succeeds (and so does dbremove) despite the fact the + // internal open does not set the memcmp magic + r = env->dbremove(env, NULL, "db", "db", 0); CKERR(r); + r = env->close(env, 0); CKERR(r); +} + +int +test_main(int argc, char *const argv[]) { + parse_args(argc, argv); + + toku_os_recursive_delete(TOKU_TEST_FILENAME); + int r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r); + + test_memcmp_magic(); + test_memcmp_magic_sort_order(); + + return 0; +} diff --git a/storage/tokudb/ft-index/src/tests/test_mostly_seq.cc b/storage/tokudb/ft-index/src/tests/test_mostly_seq.cc index 1094639e7e7..f4f8d16e312 100644 --- a/storage/tokudb/ft-index/src/tests/test_mostly_seq.cc +++ b/storage/tokudb/ft-index/src/tests/test_mostly_seq.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/test_multiple_checkpoints_block_commit.cc b/storage/tokudb/ft-index/src/tests/test_multiple_checkpoints_block_commit.cc index 6cb26372140..5accd55dadc 100644 --- a/storage/tokudb/ft-index/src/tests/test_multiple_checkpoints_block_commit.cc +++ b/storage/tokudb/ft-index/src/tests/test_multiple_checkpoints_block_commit.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/test_nested.cc b/storage/tokudb/ft-index/src/tests/test_nested.cc index 1f96101b940..9ce288435ce 100644 --- a/storage/tokudb/ft-index/src/tests/test_nested.cc +++ b/storage/tokudb/ft-index/src/tests/test_nested.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/test_nodup_set.cc b/storage/tokudb/ft-index/src/tests/test_nodup_set.cc index f6797c81ef6..81c0d258af3 100644 --- a/storage/tokudb/ft-index/src/tests/test_nodup_set.cc +++ b/storage/tokudb/ft-index/src/tests/test_nodup_set.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/test_query.cc b/storage/tokudb/ft-index/src/tests/test_query.cc index 1a3ee026b9a..db199ea2b80 100644 --- a/storage/tokudb/ft-index/src/tests/test_query.cc +++ b/storage/tokudb/ft-index/src/tests/test_query.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/test_rand_insert.cc b/storage/tokudb/ft-index/src/tests/test_rand_insert.cc index d87f34af28b..76c12a9d124 100644 --- a/storage/tokudb/ft-index/src/tests/test_rand_insert.cc +++ b/storage/tokudb/ft-index/src/tests/test_rand_insert.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/test_read_txn_invalid_ops.cc b/storage/tokudb/ft-index/src/tests/test_read_txn_invalid_ops.cc index f86c56637c6..93cab3cd0c0 100644 --- a/storage/tokudb/ft-index/src/tests/test_read_txn_invalid_ops.cc +++ b/storage/tokudb/ft-index/src/tests/test_read_txn_invalid_ops.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/test_redirect_func.cc b/storage/tokudb/ft-index/src/tests/test_redirect_func.cc index f5e4dacbfcd..2107fda3093 100644 --- a/storage/tokudb/ft-index/src/tests/test_redirect_func.cc +++ b/storage/tokudb/ft-index/src/tests/test_redirect_func.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/test_restrict.cc b/storage/tokudb/ft-index/src/tests/test_restrict.cc index cc1d573ca4f..fe71111a6bb 100644 --- a/storage/tokudb/ft-index/src/tests/test_restrict.cc +++ b/storage/tokudb/ft-index/src/tests/test_restrict.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/test_reverse_compare_fun.cc b/storage/tokudb/ft-index/src/tests/test_reverse_compare_fun.cc index 774a78d23b1..f50cc6fc18e 100644 --- a/storage/tokudb/ft-index/src/tests/test_reverse_compare_fun.cc +++ b/storage/tokudb/ft-index/src/tests/test_reverse_compare_fun.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/test_set_func_malloc.cc b/storage/tokudb/ft-index/src/tests/test_set_func_malloc.cc index 8efa786b7fd..0acea21e863 100644 --- a/storage/tokudb/ft-index/src/tests/test_set_func_malloc.cc +++ b/storage/tokudb/ft-index/src/tests/test_set_func_malloc.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/test_simple_read_txn.cc b/storage/tokudb/ft-index/src/tests/test_simple_read_txn.cc index 3538c71e476..4449a6287b8 100644 --- a/storage/tokudb/ft-index/src/tests/test_simple_read_txn.cc +++ b/storage/tokudb/ft-index/src/tests/test_simple_read_txn.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/test_stress0.cc b/storage/tokudb/ft-index/src/tests/test_stress0.cc index 6e3eb2e2e89..5dbca08db48 100644 --- a/storage/tokudb/ft-index/src/tests/test_stress0.cc +++ b/storage/tokudb/ft-index/src/tests/test_stress0.cc @@ -28,7 +28,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/test_stress1.cc b/storage/tokudb/ft-index/src/tests/test_stress1.cc index 9aa5c29e89b..81095299265 100644 --- a/storage/tokudb/ft-index/src/tests/test_stress1.cc +++ b/storage/tokudb/ft-index/src/tests/test_stress1.cc @@ -28,7 +28,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/test_stress2.cc b/storage/tokudb/ft-index/src/tests/test_stress2.cc index 255dc10317b..cbd798f318b 100644 --- a/storage/tokudb/ft-index/src/tests/test_stress2.cc +++ b/storage/tokudb/ft-index/src/tests/test_stress2.cc @@ -28,7 +28,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/test_stress3.cc b/storage/tokudb/ft-index/src/tests/test_stress3.cc index 572576261af..b47e4f812ae 100644 --- a/storage/tokudb/ft-index/src/tests/test_stress3.cc +++ b/storage/tokudb/ft-index/src/tests/test_stress3.cc @@ -28,7 +28,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/test_stress4.cc b/storage/tokudb/ft-index/src/tests/test_stress4.cc index 4404f1cecac..3d420561f5e 100644 --- a/storage/tokudb/ft-index/src/tests/test_stress4.cc +++ b/storage/tokudb/ft-index/src/tests/test_stress4.cc @@ -28,7 +28,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/test_stress5.cc b/storage/tokudb/ft-index/src/tests/test_stress5.cc index 053da44d66c..a591b340025 100644 --- a/storage/tokudb/ft-index/src/tests/test_stress5.cc +++ b/storage/tokudb/ft-index/src/tests/test_stress5.cc @@ -28,7 +28,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/test_stress6.cc b/storage/tokudb/ft-index/src/tests/test_stress6.cc index e3d47064d13..d616622353b 100644 --- a/storage/tokudb/ft-index/src/tests/test_stress6.cc +++ b/storage/tokudb/ft-index/src/tests/test_stress6.cc @@ -28,7 +28,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/test_stress7.cc b/storage/tokudb/ft-index/src/tests/test_stress7.cc index 5db318521ee..cdf03ce8036 100644 --- a/storage/tokudb/ft-index/src/tests/test_stress7.cc +++ b/storage/tokudb/ft-index/src/tests/test_stress7.cc @@ -28,7 +28,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -108,7 +108,7 @@ stress_table(DB_ENV *env, DB **dbp, struct cli_args *cli_args) { // if (verbose) printf("starting creation of pthreads\n"); - const int num_threads = 4 + cli_args->num_update_threads + cli_args->num_ptquery_threads; + const int num_threads = 5 + cli_args->num_update_threads + cli_args->num_ptquery_threads; struct arg myargs[num_threads]; for (int i = 0; i < num_threads; i++) { arg_init(&myargs[i], dbp, env, cli_args); @@ -129,19 +129,21 @@ stress_table(DB_ENV *env, DB **dbp, struct cli_args *cli_args) { myargs[1].operation_extra = &soe[1]; myargs[1].operation = scan_op; - // make the guy that runs HOT in the background + // make the guys that run hot optimize, keyrange, and frag stats in the background myargs[2].operation = hot_op; myargs[3].operation = keyrange_op; + myargs[4].operation = frag_op; + myargs[4].sleep_ms = 100; struct update_op_args uoe = get_update_op_args(cli_args, NULL); // make the guy that updates the db - for (int i = 4; i < 4 + cli_args->num_update_threads; ++i) { + for (int i = 5; i < 5 + cli_args->num_update_threads; ++i) { myargs[i].operation_extra = &uoe; myargs[i].operation = update_op; } // make the guy that does point queries - for (int i = 4 + cli_args->num_update_threads; i < num_threads; i++) { + for (int i = 5 + cli_args->num_update_threads; i < num_threads; i++) { myargs[i].operation = ptquery_op; } run_workers(myargs, num_threads, cli_args->num_seconds, false, cli_args); diff --git a/storage/tokudb/ft-index/src/tests/test_stress_hot_indexing.cc b/storage/tokudb/ft-index/src/tests/test_stress_hot_indexing.cc index 65e7230bba6..fe237f063ab 100644 --- a/storage/tokudb/ft-index/src/tests/test_stress_hot_indexing.cc +++ b/storage/tokudb/ft-index/src/tests/test_stress_hot_indexing.cc @@ -28,7 +28,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/test_stress_openclose.cc b/storage/tokudb/ft-index/src/tests/test_stress_openclose.cc index 55d21770b0c..54c8e784b18 100644 --- a/storage/tokudb/ft-index/src/tests/test_stress_openclose.cc +++ b/storage/tokudb/ft-index/src/tests/test_stress_openclose.cc @@ -28,7 +28,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/test_stress_with_verify.cc b/storage/tokudb/ft-index/src/tests/test_stress_with_verify.cc index 3c13da4f975..d259d09d25a 100644 --- a/storage/tokudb/ft-index/src/tests/test_stress_with_verify.cc +++ b/storage/tokudb/ft-index/src/tests/test_stress_with_verify.cc @@ -28,7 +28,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/test_thread_flags.cc b/storage/tokudb/ft-index/src/tests/test_thread_flags.cc index 08429d1effc..2ff2dabab98 100644 --- a/storage/tokudb/ft-index/src/tests/test_thread_flags.cc +++ b/storage/tokudb/ft-index/src/tests/test_thread_flags.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/test_thread_insert.cc b/storage/tokudb/ft-index/src/tests/test_thread_insert.cc index c8a84196d4a..a1044948e0e 100644 --- a/storage/tokudb/ft-index/src/tests/test_thread_insert.cc +++ b/storage/tokudb/ft-index/src/tests/test_thread_insert.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/test_trans_desc_during_chkpt.cc b/storage/tokudb/ft-index/src/tests/test_trans_desc_during_chkpt.cc index d1844ba3f9b..5d2196b13f8 100644 --- a/storage/tokudb/ft-index/src/tests/test_trans_desc_during_chkpt.cc +++ b/storage/tokudb/ft-index/src/tests/test_trans_desc_during_chkpt.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/test_trans_desc_during_chkpt2.cc b/storage/tokudb/ft-index/src/tests/test_trans_desc_during_chkpt2.cc index dd545d33900..3a215a8b6f2 100644 --- a/storage/tokudb/ft-index/src/tests/test_trans_desc_during_chkpt2.cc +++ b/storage/tokudb/ft-index/src/tests/test_trans_desc_during_chkpt2.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/test_trans_desc_during_chkpt3.cc b/storage/tokudb/ft-index/src/tests/test_trans_desc_during_chkpt3.cc index 71a9358c332..6644cdabaa2 100644 --- a/storage/tokudb/ft-index/src/tests/test_trans_desc_during_chkpt3.cc +++ b/storage/tokudb/ft-index/src/tests/test_trans_desc_during_chkpt3.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/test_trans_desc_during_chkpt4.cc b/storage/tokudb/ft-index/src/tests/test_trans_desc_during_chkpt4.cc index dd545d33900..3a215a8b6f2 100644 --- a/storage/tokudb/ft-index/src/tests/test_trans_desc_during_chkpt4.cc +++ b/storage/tokudb/ft-index/src/tests/test_trans_desc_during_chkpt4.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/test_transactional_descriptor.cc b/storage/tokudb/ft-index/src/tests/test_transactional_descriptor.cc index 8c800784e5c..4f2e66a9381 100644 --- a/storage/tokudb/ft-index/src/tests/test_transactional_descriptor.cc +++ b/storage/tokudb/ft-index/src/tests/test_transactional_descriptor.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/test_txn_abort5.cc b/storage/tokudb/ft-index/src/tests/test_txn_abort5.cc index 27b7f056cf2..fb3a522c995 100644 --- a/storage/tokudb/ft-index/src/tests/test_txn_abort5.cc +++ b/storage/tokudb/ft-index/src/tests/test_txn_abort5.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/test_txn_abort5a.cc b/storage/tokudb/ft-index/src/tests/test_txn_abort5a.cc index 87840fc8958..6678a959805 100644 --- a/storage/tokudb/ft-index/src/tests/test_txn_abort5a.cc +++ b/storage/tokudb/ft-index/src/tests/test_txn_abort5a.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/test_txn_abort6.cc b/storage/tokudb/ft-index/src/tests/test_txn_abort6.cc index ce481c342de..f61aea8e0bd 100644 --- a/storage/tokudb/ft-index/src/tests/test_txn_abort6.cc +++ b/storage/tokudb/ft-index/src/tests/test_txn_abort6.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/test_txn_abort7.cc b/storage/tokudb/ft-index/src/tests/test_txn_abort7.cc index 8832e950310..f7f0840680f 100644 --- a/storage/tokudb/ft-index/src/tests/test_txn_abort7.cc +++ b/storage/tokudb/ft-index/src/tests/test_txn_abort7.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/test_txn_begin_commit.cc b/storage/tokudb/ft-index/src/tests/test_txn_begin_commit.cc index 8b3906decb8..7e686e3e885 100644 --- a/storage/tokudb/ft-index/src/tests/test_txn_begin_commit.cc +++ b/storage/tokudb/ft-index/src/tests/test_txn_begin_commit.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/test_txn_close_before_commit.cc b/storage/tokudb/ft-index/src/tests/test_txn_close_before_commit.cc index cbc9d856bd2..24ef8a0fb72 100644 --- a/storage/tokudb/ft-index/src/tests/test_txn_close_before_commit.cc +++ b/storage/tokudb/ft-index/src/tests/test_txn_close_before_commit.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/test_txn_close_before_prepare_commit.cc b/storage/tokudb/ft-index/src/tests/test_txn_close_before_prepare_commit.cc index 6427bf8491e..e3b715c4ce7 100644 --- a/storage/tokudb/ft-index/src/tests/test_txn_close_before_prepare_commit.cc +++ b/storage/tokudb/ft-index/src/tests/test_txn_close_before_prepare_commit.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/test_txn_cursor_last.cc b/storage/tokudb/ft-index/src/tests/test_txn_cursor_last.cc index 62cb3984b79..82a15e0b515 100644 --- a/storage/tokudb/ft-index/src/tests/test_txn_cursor_last.cc +++ b/storage/tokudb/ft-index/src/tests/test_txn_cursor_last.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/test_txn_nested1.cc b/storage/tokudb/ft-index/src/tests/test_txn_nested1.cc index d25e7c61ce2..7797d88e478 100644 --- a/storage/tokudb/ft-index/src/tests/test_txn_nested1.cc +++ b/storage/tokudb/ft-index/src/tests/test_txn_nested1.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -95,7 +95,7 @@ PATENT RIGHTS GRANT: #include <memory.h> #include <sys/stat.h> #include <db.h> -#include <ft/tokuconst.h> +#include <ft/txn/xids.h> #define MAX_NEST MAX_NESTED_TRANSACTIONS diff --git a/storage/tokudb/ft-index/src/tests/test_txn_nested2.cc b/storage/tokudb/ft-index/src/tests/test_txn_nested2.cc index 542f2574c85..f5c0d2b4e51 100644 --- a/storage/tokudb/ft-index/src/tests/test_txn_nested2.cc +++ b/storage/tokudb/ft-index/src/tests/test_txn_nested2.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -87,15 +87,18 @@ PATENT RIGHTS GRANT: */ #ident "Copyright (c) 2009-2013 Tokutek Inc. All rights reserved." -#include "test.h" -#include <stdio.h> +#include <db.h> +#include <stdio.h> #include <stdlib.h> #include <unistd.h> #include <memory.h> #include <sys/stat.h> -#include <db.h> -#include <ft/tokuconst.h> + +#include "src/tests/test.h" + +#include <ft/txn/xids.h> + #define MAX_NEST MAX_TRANSACTION_RECORDS #define MAX_SIZE MAX_TRANSACTION_RECORDS diff --git a/storage/tokudb/ft-index/src/tests/test_txn_nested3.cc b/storage/tokudb/ft-index/src/tests/test_txn_nested3.cc index 22e5d984a7a..16ede714422 100644 --- a/storage/tokudb/ft-index/src/tests/test_txn_nested3.cc +++ b/storage/tokudb/ft-index/src/tests/test_txn_nested3.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -95,7 +95,7 @@ PATENT RIGHTS GRANT: #include <memory.h> #include <sys/stat.h> #include <db.h> -#include <ft/tokuconst.h> +#include <ft/txn/xids.h> #define MAX_NEST MAX_TRANSACTION_RECORDS #define MAX_SIZE MAX_TRANSACTION_RECORDS diff --git a/storage/tokudb/ft-index/src/tests/test_txn_nested4.cc b/storage/tokudb/ft-index/src/tests/test_txn_nested4.cc index edc6430a8c7..0bca6309169 100644 --- a/storage/tokudb/ft-index/src/tests/test_txn_nested4.cc +++ b/storage/tokudb/ft-index/src/tests/test_txn_nested4.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -95,7 +95,7 @@ PATENT RIGHTS GRANT: #include <memory.h> #include <sys/stat.h> #include <db.h> -#include <ft/tokuconst.h> +#include <ft/txn/xids.h> #define MAX_NEST MAX_TRANSACTION_RECORDS #define MAX_SIZE MAX_TRANSACTION_RECORDS diff --git a/storage/tokudb/ft-index/src/tests/test_txn_nested5.cc b/storage/tokudb/ft-index/src/tests/test_txn_nested5.cc index df5ad696984..02692be3d08 100644 --- a/storage/tokudb/ft-index/src/tests/test_txn_nested5.cc +++ b/storage/tokudb/ft-index/src/tests/test_txn_nested5.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -95,7 +95,7 @@ PATENT RIGHTS GRANT: #include <memory.h> #include <sys/stat.h> #include <db.h> -#include <ft/tokuconst.h> +#include <ft/txn/xids.h> #define MAX_NEST MAX_TRANSACTION_RECORDS #define MAX_SIZE (MAX_TRANSACTION_RECORDS + 1) diff --git a/storage/tokudb/ft-index/src/tests/test_txn_nested_abort.cc b/storage/tokudb/ft-index/src/tests/test_txn_nested_abort.cc index 2c81c91681d..10be3fea79d 100644 --- a/storage/tokudb/ft-index/src/tests/test_txn_nested_abort.cc +++ b/storage/tokudb/ft-index/src/tests/test_txn_nested_abort.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/test_txn_nested_abort2.cc b/storage/tokudb/ft-index/src/tests/test_txn_nested_abort2.cc index 2fbf3f6e2b2..6e1928b4891 100644 --- a/storage/tokudb/ft-index/src/tests/test_txn_nested_abort2.cc +++ b/storage/tokudb/ft-index/src/tests/test_txn_nested_abort2.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/test_txn_nested_abort3.cc b/storage/tokudb/ft-index/src/tests/test_txn_nested_abort3.cc index c53b1cc68ba..2fa58b86b2a 100644 --- a/storage/tokudb/ft-index/src/tests/test_txn_nested_abort3.cc +++ b/storage/tokudb/ft-index/src/tests/test_txn_nested_abort3.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/test_txn_nested_abort4.cc b/storage/tokudb/ft-index/src/tests/test_txn_nested_abort4.cc index 164f1c26d11..b412aeec884 100644 --- a/storage/tokudb/ft-index/src/tests/test_txn_nested_abort4.cc +++ b/storage/tokudb/ft-index/src/tests/test_txn_nested_abort4.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/test_txn_recover3.cc b/storage/tokudb/ft-index/src/tests/test_txn_recover3.cc index c701ed51257..55cf772f207 100644 --- a/storage/tokudb/ft-index/src/tests/test_txn_recover3.cc +++ b/storage/tokudb/ft-index/src/tests/test_txn_recover3.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/test_unused_memory_crash.cc b/storage/tokudb/ft-index/src/tests/test_unused_memory_crash.cc index 9bb65016ba0..9c13a08368f 100644 --- a/storage/tokudb/ft-index/src/tests/test_unused_memory_crash.cc +++ b/storage/tokudb/ft-index/src/tests/test_unused_memory_crash.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/test_update_abort_works.cc b/storage/tokudb/ft-index/src/tests/test_update_abort_works.cc index 595b955855f..4a0d815749b 100644 --- a/storage/tokudb/ft-index/src/tests/test_update_abort_works.cc +++ b/storage/tokudb/ft-index/src/tests/test_update_abort_works.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/test_update_broadcast_abort_works.cc b/storage/tokudb/ft-index/src/tests/test_update_broadcast_abort_works.cc index 5b9e105cc7c..c11fffe643f 100644 --- a/storage/tokudb/ft-index/src/tests/test_update_broadcast_abort_works.cc +++ b/storage/tokudb/ft-index/src/tests/test_update_broadcast_abort_works.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/test_update_broadcast_calls_back.cc b/storage/tokudb/ft-index/src/tests/test_update_broadcast_calls_back.cc index 22bb1193f3f..db12a74832b 100644 --- a/storage/tokudb/ft-index/src/tests/test_update_broadcast_calls_back.cc +++ b/storage/tokudb/ft-index/src/tests/test_update_broadcast_calls_back.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/test_update_broadcast_can_delete_elements.cc b/storage/tokudb/ft-index/src/tests/test_update_broadcast_can_delete_elements.cc index a54aa20da88..804161402ba 100644 --- a/storage/tokudb/ft-index/src/tests/test_update_broadcast_can_delete_elements.cc +++ b/storage/tokudb/ft-index/src/tests/test_update_broadcast_can_delete_elements.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/test_update_broadcast_changes_values.cc b/storage/tokudb/ft-index/src/tests/test_update_broadcast_changes_values.cc index c532d571375..304c799de07 100644 --- a/storage/tokudb/ft-index/src/tests/test_update_broadcast_changes_values.cc +++ b/storage/tokudb/ft-index/src/tests/test_update_broadcast_changes_values.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/test_update_broadcast_indexer.cc b/storage/tokudb/ft-index/src/tests/test_update_broadcast_indexer.cc index 839b42a5347..4a7fa176424 100644 --- a/storage/tokudb/ft-index/src/tests/test_update_broadcast_indexer.cc +++ b/storage/tokudb/ft-index/src/tests/test_update_broadcast_indexer.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/test_update_broadcast_loader.cc b/storage/tokudb/ft-index/src/tests/test_update_broadcast_loader.cc index 704e6e08070..4e3db6380ae 100644 --- a/storage/tokudb/ft-index/src/tests/test_update_broadcast_loader.cc +++ b/storage/tokudb/ft-index/src/tests/test_update_broadcast_loader.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/test_update_broadcast_nested_updates.cc b/storage/tokudb/ft-index/src/tests/test_update_broadcast_nested_updates.cc index 42b254b22ef..2f858beffcd 100644 --- a/storage/tokudb/ft-index/src/tests/test_update_broadcast_nested_updates.cc +++ b/storage/tokudb/ft-index/src/tests/test_update_broadcast_nested_updates.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/test_update_broadcast_previously_deleted.cc b/storage/tokudb/ft-index/src/tests/test_update_broadcast_previously_deleted.cc index 912b68f6a1e..348dd71f941 100644 --- a/storage/tokudb/ft-index/src/tests/test_update_broadcast_previously_deleted.cc +++ b/storage/tokudb/ft-index/src/tests/test_update_broadcast_previously_deleted.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/test_update_broadcast_stress.cc b/storage/tokudb/ft-index/src/tests/test_update_broadcast_stress.cc index 9da0fd8dfa8..fb294e40446 100644 --- a/storage/tokudb/ft-index/src/tests/test_update_broadcast_stress.cc +++ b/storage/tokudb/ft-index/src/tests/test_update_broadcast_stress.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/test_update_broadcast_update_fun_has_choices.cc b/storage/tokudb/ft-index/src/tests/test_update_broadcast_update_fun_has_choices.cc index 31c0dabc39c..6f6481d3175 100644 --- a/storage/tokudb/ft-index/src/tests/test_update_broadcast_update_fun_has_choices.cc +++ b/storage/tokudb/ft-index/src/tests/test_update_broadcast_update_fun_has_choices.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/test_update_broadcast_with_empty_table.cc b/storage/tokudb/ft-index/src/tests/test_update_broadcast_with_empty_table.cc index 82c69f95af8..5aa27c10b69 100644 --- a/storage/tokudb/ft-index/src/tests/test_update_broadcast_with_empty_table.cc +++ b/storage/tokudb/ft-index/src/tests/test_update_broadcast_with_empty_table.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/test_update_calls_back.cc b/storage/tokudb/ft-index/src/tests/test_update_calls_back.cc index ba64dea1463..4970cc6ad4c 100644 --- a/storage/tokudb/ft-index/src/tests/test_update_calls_back.cc +++ b/storage/tokudb/ft-index/src/tests/test_update_calls_back.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/test_update_can_delete_elements.cc b/storage/tokudb/ft-index/src/tests/test_update_can_delete_elements.cc index ca59008014f..328a569bf56 100644 --- a/storage/tokudb/ft-index/src/tests/test_update_can_delete_elements.cc +++ b/storage/tokudb/ft-index/src/tests/test_update_can_delete_elements.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/test_update_changes_values.cc b/storage/tokudb/ft-index/src/tests/test_update_changes_values.cc index 623ce1a1f38..ee346f54947 100644 --- a/storage/tokudb/ft-index/src/tests/test_update_changes_values.cc +++ b/storage/tokudb/ft-index/src/tests/test_update_changes_values.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/test_update_nested_updates.cc b/storage/tokudb/ft-index/src/tests/test_update_nested_updates.cc index 28ab01ae632..cb44a5bff29 100644 --- a/storage/tokudb/ft-index/src/tests/test_update_nested_updates.cc +++ b/storage/tokudb/ft-index/src/tests/test_update_nested_updates.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/test_update_nonexistent_keys.cc b/storage/tokudb/ft-index/src/tests/test_update_nonexistent_keys.cc index 24a1eaf3787..1d609aabb9a 100644 --- a/storage/tokudb/ft-index/src/tests/test_update_nonexistent_keys.cc +++ b/storage/tokudb/ft-index/src/tests/test_update_nonexistent_keys.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/test_update_previously_deleted.cc b/storage/tokudb/ft-index/src/tests/test_update_previously_deleted.cc index 27c01649851..7e9e4bcb5ba 100644 --- a/storage/tokudb/ft-index/src/tests/test_update_previously_deleted.cc +++ b/storage/tokudb/ft-index/src/tests/test_update_previously_deleted.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/test_update_stress.cc b/storage/tokudb/ft-index/src/tests/test_update_stress.cc index 97a6bb93d6c..0c0d2c9926a 100644 --- a/storage/tokudb/ft-index/src/tests/test_update_stress.cc +++ b/storage/tokudb/ft-index/src/tests/test_update_stress.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/test_update_txn_snapshot_works_concurrently.cc b/storage/tokudb/ft-index/src/tests/test_update_txn_snapshot_works_concurrently.cc index 337a90e8d88..99acf3f7f8e 100644 --- a/storage/tokudb/ft-index/src/tests/test_update_txn_snapshot_works_concurrently.cc +++ b/storage/tokudb/ft-index/src/tests/test_update_txn_snapshot_works_concurrently.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/test_update_txn_snapshot_works_correctly_with_deletes.cc b/storage/tokudb/ft-index/src/tests/test_update_txn_snapshot_works_correctly_with_deletes.cc index 641521e96bd..61a346c928d 100644 --- a/storage/tokudb/ft-index/src/tests/test_update_txn_snapshot_works_correctly_with_deletes.cc +++ b/storage/tokudb/ft-index/src/tests/test_update_txn_snapshot_works_correctly_with_deletes.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/test_update_with_empty_table.cc b/storage/tokudb/ft-index/src/tests/test_update_with_empty_table.cc index 6ed492b86e9..6d54ee93b19 100644 --- a/storage/tokudb/ft-index/src/tests/test_update_with_empty_table.cc +++ b/storage/tokudb/ft-index/src/tests/test_update_with_empty_table.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/test_updates_single_key.cc b/storage/tokudb/ft-index/src/tests/test_updates_single_key.cc index 455e82122dd..0b4dff69b9f 100644 --- a/storage/tokudb/ft-index/src/tests/test_updates_single_key.cc +++ b/storage/tokudb/ft-index/src/tests/test_updates_single_key.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/test_weakxaction.cc b/storage/tokudb/ft-index/src/tests/test_weakxaction.cc index 3e9e1f25234..e99f6510a0c 100644 --- a/storage/tokudb/ft-index/src/tests/test_weakxaction.cc +++ b/storage/tokudb/ft-index/src/tests/test_weakxaction.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/test_zero_length_keys.cc b/storage/tokudb/ft-index/src/tests/test_zero_length_keys.cc index 37180e9e952..c7b4dd1ac75 100644 --- a/storage/tokudb/ft-index/src/tests/test_zero_length_keys.cc +++ b/storage/tokudb/ft-index/src/tests/test_zero_length_keys.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/threaded_stress_test_helpers.h b/storage/tokudb/ft-index/src/tests/threaded_stress_test_helpers.h index c173d2d2d63..2c2525a3165 100644 --- a/storage/tokudb/ft-index/src/tests/threaded_stress_test_helpers.h +++ b/storage/tokudb/ft-index/src/tests/threaded_stress_test_helpers.h @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -86,6 +86,8 @@ PATENT RIGHTS GRANT: under this License. */ +#pragma once + #ident "Copyright (c) 2009-2013 Tokutek Inc. All rights reserved." #ident "$Id$" @@ -102,9 +104,6 @@ PATENT RIGHTS GRANT: // with keys in the range [0, table_size - 1] unless disperse_keys is true, // then the keys are scrambled up in the integer key space. -#ifndef _THREADED_STRESS_TEST_HELPERS_H_ -#define _THREADED_STRESS_TEST_HELPERS_H_ - #include "toku_config.h" #include "test.h" @@ -123,7 +122,7 @@ PATENT RIGHTS GRANT: #include <src/ydb-internal.h> -#include <ft/ybt.h> +#include <util/dbt.h> #include <util/rwlock.h> #include <util/kibbutz.h> @@ -209,6 +208,7 @@ struct cli_args { bool nocrashstatus; // do not print engine status upon crash bool prelock_updates; // update threads perform serial updates on a prelocked range bool disperse_keys; // spread the keys out during a load (by reversing the bits in the loop index) to make a wide tree we can spread out random inserts into + bool memcmp_keys; // pack keys big endian and use the builtin key comparison function in the fractal tree bool direct_io; // use direct I/O const char *print_engine_status; // print engine status rows matching a simple regex "a|b|c", matching strings where a or b or c is a subtring. }; @@ -833,12 +833,13 @@ fill_key_buf(int64_t key, uint8_t *data, struct cli_args *args) { } invariant(key >= 0); if (args->key_size == sizeof(int)) { - const int key32 = key; + const int key32 = args->memcmp_keys ? toku_htonl(key) : key; memcpy(data, &key32, sizeof(key32)); } else { invariant(args->key_size >= sizeof(key)); - memcpy(data, &key, sizeof(key)); - memset(data + sizeof(key), 0, args->key_size - sizeof(key)); + const int64_t key64 = args->memcmp_keys ? toku_htonl(key) : key; + memcpy(data, &key64, sizeof(key64)); + memset(data + sizeof(key64), 0, args->key_size - sizeof(key64)); } } @@ -1076,6 +1077,16 @@ static int UU() keyrange_op(DB_TXN *txn, ARG arg, void* UU(operation_extra), voi return r; } +static int UU() frag_op(DB_TXN *UU(txn), ARG arg, void* UU(operation_extra), void *UU(stats_extra)) { + int db_index = myrandom_r(arg->random_data)%arg->cli->num_DBs; + DB *db = arg->dbp[db_index]; + + TOKU_DB_FRAGMENTATION_S frag; + int r = db->get_fragmentation(db, &frag); + invariant_zero(r); + return r; +} + static void UU() get_key_after_bytes_callback(const DBT *UU(end_key), uint64_t UU(skipped), void *UU(extra)) { // nothing } @@ -1966,7 +1977,9 @@ static int create_tables(DB_ENV **env_res, DB **db_res, int num_DBs, db_env_set_num_bucket_mutexes(env_args.num_bucket_mutexes); r = db_env_create(&env, 0); assert(r == 0); r = env->set_redzone(env, 0); CKERR(r); - r = env->set_default_bt_compare(env, bt_compare); CKERR(r); + if (!cli_args->memcmp_keys) { + r = env->set_default_bt_compare(env, bt_compare); CKERR(r); + } r = env->set_lk_max_memory(env, env_args.lk_max_memory); CKERR(r); r = env->set_cachesize(env, env_args.cachetable_size / (1 << 30), env_args.cachetable_size % (1 << 30), 1); CKERR(r); r = env->set_lg_bsize(env, env_args.rollback_node_size); CKERR(r); @@ -2164,7 +2177,9 @@ static int open_tables(DB_ENV **env_res, DB **db_res, int num_DBs, db_env_set_num_bucket_mutexes(env_args.num_bucket_mutexes); r = db_env_create(&env, 0); assert(r == 0); r = env->set_redzone(env, 0); CKERR(r); - r = env->set_default_bt_compare(env, bt_compare); CKERR(r); + if (!cli_args->memcmp_keys) { + r = env->set_default_bt_compare(env, bt_compare); CKERR(r); + } r = env->set_lk_max_memory(env, env_args.lk_max_memory); CKERR(r); env->set_update(env, env_args.update_function); r = env->set_cachesize(env, env_args.cachetable_size / (1 << 30), env_args.cachetable_size % (1 << 30), 1); CKERR(r); @@ -2282,6 +2297,7 @@ static struct cli_args UU() get_default_args(void) { .nocrashstatus = false, .prelock_updates = false, .disperse_keys = false, + .memcmp_keys = false, .direct_io = false, }; DEFAULT_ARGS.env_args.envdir = TOKU_TEST_FILENAME; @@ -2669,6 +2685,7 @@ static inline void parse_stress_test_args (int argc, char *const argv[], struct BOOL_ARG("nocrashstatus", nocrashstatus), BOOL_ARG("prelock_updates", prelock_updates), BOOL_ARG("disperse_keys", disperse_keys), + BOOL_ARG("memcmp_keys", memcmp_keys), BOOL_ARG("direct_io", direct_io), STRING_ARG("--envdir", env_args.envdir), @@ -2924,5 +2941,3 @@ UU() perf_test_main_with_cmp(struct cli_args *args, int (*cmp)(DB *, const DBT * // We want to control the row size and its compressibility. open_and_stress_tables(args, false, cmp); } - -#endif diff --git a/storage/tokudb/ft-index/src/tests/time_create_db.cc b/storage/tokudb/ft-index/src/tests/time_create_db.cc index 2cc2496f33a..2365df4701d 100644 --- a/storage/tokudb/ft-index/src/tests/time_create_db.cc +++ b/storage/tokudb/ft-index/src/tests/time_create_db.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/transactional_fileops.cc b/storage/tokudb/ft-index/src/tests/transactional_fileops.cc index c58e5d8e8a1..ea1f8af6e16 100644 --- a/storage/tokudb/ft-index/src/tests/transactional_fileops.cc +++ b/storage/tokudb/ft-index/src/tests/transactional_fileops.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/update-multiple-data-diagonal.cc b/storage/tokudb/ft-index/src/tests/update-multiple-data-diagonal.cc index f60f939dbc8..c39005d0f00 100644 --- a/storage/tokudb/ft-index/src/tests/update-multiple-data-diagonal.cc +++ b/storage/tokudb/ft-index/src/tests/update-multiple-data-diagonal.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/update-multiple-key0.cc b/storage/tokudb/ft-index/src/tests/update-multiple-key0.cc index 52d672ebc6f..51257fd7377 100644 --- a/storage/tokudb/ft-index/src/tests/update-multiple-key0.cc +++ b/storage/tokudb/ft-index/src/tests/update-multiple-key0.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/update-multiple-nochange.cc b/storage/tokudb/ft-index/src/tests/update-multiple-nochange.cc index e814bff7d2b..19a668a67c8 100644 --- a/storage/tokudb/ft-index/src/tests/update-multiple-nochange.cc +++ b/storage/tokudb/ft-index/src/tests/update-multiple-nochange.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/update-multiple-with-indexer-array.cc b/storage/tokudb/ft-index/src/tests/update-multiple-with-indexer-array.cc index 684925f9872..9101771c4d8 100644 --- a/storage/tokudb/ft-index/src/tests/update-multiple-with-indexer-array.cc +++ b/storage/tokudb/ft-index/src/tests/update-multiple-with-indexer-array.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/update-multiple-with-indexer.cc b/storage/tokudb/ft-index/src/tests/update-multiple-with-indexer.cc index 62f3c7b7e76..444bcf17106 100644 --- a/storage/tokudb/ft-index/src/tests/update-multiple-with-indexer.cc +++ b/storage/tokudb/ft-index/src/tests/update-multiple-with-indexer.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/update.cc b/storage/tokudb/ft-index/src/tests/update.cc index e89a0227082..aa0c4f0dd18 100644 --- a/storage/tokudb/ft-index/src/tests/update.cc +++ b/storage/tokudb/ft-index/src/tests/update.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/upgrade-test-1.cc b/storage/tokudb/ft-index/src/tests/upgrade-test-1.cc index ef638e3fa8c..1f30ab21cf2 100644 --- a/storage/tokudb/ft-index/src/tests/upgrade-test-1.cc +++ b/storage/tokudb/ft-index/src/tests/upgrade-test-1.cc @@ -28,7 +28,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -213,7 +213,7 @@ static void setup(void) { } } else { - fprintf(stderr, "unsupported TokuDB version %d to upgrade\n", SRC_VERSION); + fprintf(stderr, "unsupported TokuFT version %d to upgrade\n", SRC_VERSION); assert(0); } diff --git a/storage/tokudb/ft-index/src/tests/upgrade-test-2.cc b/storage/tokudb/ft-index/src/tests/upgrade-test-2.cc index e7735ac1567..33003f6780b 100644 --- a/storage/tokudb/ft-index/src/tests/upgrade-test-2.cc +++ b/storage/tokudb/ft-index/src/tests/upgrade-test-2.cc @@ -28,7 +28,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -193,7 +193,7 @@ static void setup(void) { src_db_dir = db_v5_dir; } else { - fprintf(stderr, "unsupported TokuDB version %d to upgrade\n", SRC_VERSION); + fprintf(stderr, "unsupported TokuFT version %d to upgrade\n", SRC_VERSION); assert(0); } diff --git a/storage/tokudb/ft-index/src/tests/upgrade-test-3.cc b/storage/tokudb/ft-index/src/tests/upgrade-test-3.cc index 276251d699a..61994a2de66 100644 --- a/storage/tokudb/ft-index/src/tests/upgrade-test-3.cc +++ b/storage/tokudb/ft-index/src/tests/upgrade-test-3.cc @@ -28,7 +28,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -90,7 +90,7 @@ PATENT RIGHTS GRANT: // Purpose of this test is to verify that dictionaries created with 4.2.0 -// can be properly truncated with TokuDB version 5.x or later. +// can be properly truncated with TokuFT version 5.x or later. #include "test.h" @@ -216,7 +216,7 @@ static void setup(void) { src_db_dir = db_v5_dir; } else { - fprintf(stderr, "unsupported TokuDB version %d to upgrade\n", SRC_VERSION); + fprintf(stderr, "unsupported TokuFT version %d to upgrade\n", SRC_VERSION); assert(0); } diff --git a/storage/tokudb/ft-index/src/tests/upgrade-test-4.cc b/storage/tokudb/ft-index/src/tests/upgrade-test-4.cc index 67380e900e1..0d083d9d87a 100644 --- a/storage/tokudb/ft-index/src/tests/upgrade-test-4.cc +++ b/storage/tokudb/ft-index/src/tests/upgrade-test-4.cc @@ -28,7 +28,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -306,7 +306,7 @@ static void setup(void) { src_db_dir = db_v5_dir; } else { - fprintf(stderr, "unsupported TokuDB version %d to upgrade\n", SRC_VERSION); + fprintf(stderr, "unsupported TokuFT version %d to upgrade\n", SRC_VERSION); assert(0); } diff --git a/storage/tokudb/ft-index/src/tests/upgrade-test-5.cc b/storage/tokudb/ft-index/src/tests/upgrade-test-5.cc index 564fe607d85..2f5d1863e51 100644 --- a/storage/tokudb/ft-index/src/tests/upgrade-test-5.cc +++ b/storage/tokudb/ft-index/src/tests/upgrade-test-5.cc @@ -28,7 +28,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -210,7 +210,7 @@ static void setup(void) { src_db_dir = db_v5_dir; } else { - fprintf(stderr, "unsupported TokuDB version %d to upgrade\n", SRC_VERSION); + fprintf(stderr, "unsupported TokuFT version %d to upgrade\n", SRC_VERSION); assert(0); } diff --git a/storage/tokudb/ft-index/src/tests/upgrade-test-6.cc b/storage/tokudb/ft-index/src/tests/upgrade-test-6.cc index afe99ae68a3..a1e137c980f 100644 --- a/storage/tokudb/ft-index/src/tests/upgrade-test-6.cc +++ b/storage/tokudb/ft-index/src/tests/upgrade-test-6.cc @@ -28,7 +28,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/upgrade-test-7.cc b/storage/tokudb/ft-index/src/tests/upgrade-test-7.cc index b1a17d0a079..429f4cddf2c 100644 --- a/storage/tokudb/ft-index/src/tests/upgrade-test-7.cc +++ b/storage/tokudb/ft-index/src/tests/upgrade-test-7.cc @@ -28,7 +28,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -89,8 +89,8 @@ PATENT RIGHTS GRANT: #ident "$Id$" -// Purpose of this test is to verify that an environment created by TokuDB 3.1.0 -// is properly rejected by the upgrade logic of TokuDB 5.x and later. +// Purpose of this test is to verify that an environment created by TokuFT 3.1.0 +// is properly rejected by the upgrade logic of TokuFT 5.x and later. #include "test.h" #include "toku_pthread.h" diff --git a/storage/tokudb/ft-index/src/tests/upgrade_simple.cc b/storage/tokudb/ft-index/src/tests/upgrade_simple.cc index a9048460054..678953c4ff7 100644 --- a/storage/tokudb/ft-index/src/tests/upgrade_simple.cc +++ b/storage/tokudb/ft-index/src/tests/upgrade_simple.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/tests/xa-dirty-commit.cc b/storage/tokudb/ft-index/src/tests/xa-dirty-commit.cc new file mode 100644 index 00000000000..126a7c1453e --- /dev/null +++ b/storage/tokudb/ft-index/src/tests/xa-dirty-commit.cc @@ -0,0 +1,193 @@ +/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */ +// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4: +#ident "$Id$" +/* +COPYING CONDITIONS NOTICE: + + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation, and provided that the + following conditions are met: + + * Redistributions of source code must retain this COPYING + CONDITIONS NOTICE, the COPYRIGHT NOTICE (below), the + DISCLAIMER (below), the UNIVERSITY PATENT NOTICE (below), the + PATENT MARKING NOTICE (below), and the PATENT RIGHTS + GRANT (below). + + * Redistributions in binary form must reproduce this COPYING + CONDITIONS NOTICE, the COPYRIGHT NOTICE (below), the + DISCLAIMER (below), the UNIVERSITY PATENT NOTICE (below), the + PATENT MARKING NOTICE (below), and the PATENT RIGHTS + GRANT (below) in the documentation and/or other materials + provided with the distribution. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + 02110-1301, USA. + +COPYRIGHT NOTICE: + + TokuFT, Tokutek Fractal Tree Indexing Library. + Copyright (C) 2007-2013 Tokutek, Inc. + +DISCLAIMER: + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + +UNIVERSITY PATENT NOTICE: + + The technology is licensed by the Massachusetts Institute of + Technology, Rutgers State University of New Jersey, and the Research + Foundation of State University of New York at Stony Brook under + United States of America Serial No. 11/760379 and to the patents + and/or patent applications resulting from it. + +PATENT MARKING NOTICE: + + This software is covered by US Patent No. 8,185,551. + This software is covered by US Patent No. 8,489,638. + +PATENT RIGHTS GRANT: + + "THIS IMPLEMENTATION" means the copyrightable works distributed by + Tokutek as part of the Fractal Tree project. + + "PATENT CLAIMS" means the claims of patents that are owned or + licensable by Tokutek, both currently or in the future; and that in + the absence of this license would be infringed by THIS + IMPLEMENTATION or by using or running THIS IMPLEMENTATION. + + "PATENT CHALLENGE" shall mean a challenge to the validity, + patentability, enforceability and/or non-infringement of any of the + PATENT CLAIMS or otherwise opposing any of the PATENT CLAIMS. + + Tokutek hereby grants to you, for the term and geographical scope of + the PATENT CLAIMS, a non-exclusive, no-charge, royalty-free, + irrevocable (except as stated in this section) patent license to + make, have made, use, offer to sell, sell, import, transfer, and + otherwise run, modify, and propagate the contents of THIS + IMPLEMENTATION, where such license applies only to the PATENT + CLAIMS. This grant does not include claims that would be infringed + only as a consequence of further modifications of THIS + IMPLEMENTATION. If you or your agent or licensee institute or order + or agree to the institution of patent litigation against any entity + (including a cross-claim or counterclaim in a lawsuit) alleging that + THIS IMPLEMENTATION constitutes direct or contributory patent + infringement, or inducement of patent infringement, then any rights + granted to you under this License shall terminate as of the date + such litigation is filed. If you or your agent or exclusive + licensee institute or order or agree to the institution of a PATENT + CHALLENGE, then Tokutek may terminate any rights granted to you + under this License. +*/ + +#ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved." +#ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it." +#include "test.h" + +// Verify that a commit of a prepared txn in recovery retains a db that was created by it. +// The rollback file is dirty when the environment is closed. + +static void create_foo(DB_ENV *env, DB_TXN *txn) { + int r; + DB *db; + r = db_create(&db, env, 0); + CKERR(r); + r = db->open(db, txn, "foo.db", 0, DB_BTREE, DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO); + CKERR(r); + r = db->close(db, 0); + CKERR(r); +} + +static void check_foo(DB_ENV *env) { + int r; + DB *db; + r = db_create(&db, env, 0); + CKERR(r); + r = db->open(db, nullptr, "foo.db", 0, DB_BTREE, 0, 0); + CKERR(r); + r = db->close(db, 0); + CKERR(r); +} + +static void create_prepared_txn(void) { + int r; + + DB_ENV *env = nullptr; + r = db_env_create(&env, 0); + CKERR(r); + r = env->open(env, TOKU_TEST_FILENAME, + DB_INIT_MPOOL|DB_CREATE|DB_THREAD |DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE, + S_IRWXU+S_IRWXG+S_IRWXO); + CKERR(r); + + DB_TXN *txn = nullptr; + r = env->txn_begin(env, nullptr, &txn, 0); + CKERR(r); + + create_foo(env, txn); + + TOKU_XA_XID xid = { 0x1234, 8, 9 }; + for (int i = 0; i < 8+9; i++) { + xid.data[i] = i; + } + r = txn->xa_prepare(txn, &xid); + CKERR(r); + + // discard the txn so that we can close the env and run xa recovery later + r = txn->discard(txn, 0); + CKERR(r); + + r = env->close(env, TOKUFT_DIRTY_SHUTDOWN); + CKERR(r); +} + +static void run_xa_recovery(void) { + int r; + + DB_ENV *env; + r = db_env_create(&env, 0); + CKERR(r); + r = env->open(env, TOKU_TEST_FILENAME, + DB_INIT_MPOOL|DB_CREATE|DB_THREAD |DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE | DB_RECOVER, + S_IRWXU+S_IRWXG+S_IRWXO); + CKERR(r); + + // get prepared xid + long count; + TOKU_XA_XID xid; + r = env->txn_xa_recover(env, &xid, 1, &count, DB_FIRST); + CKERR(r); + + // commit it + DB_TXN *txn = nullptr; + r = env->get_txn_from_xid(env, &xid, &txn); + CKERR(r); + r = txn->commit(txn, 0); + CKERR(r); + + check_foo(env); + + r = env->close(env, 0); + CKERR(r); +} + +int test_main (int argc, char *const argv[]) { + default_parse_args(argc, argv); + + // init the env directory + toku_os_recursive_delete(TOKU_TEST_FILENAME); + int r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); + CKERR(r); + + // run the test + create_prepared_txn(); + run_xa_recovery(); + + return 0; +} diff --git a/storage/tokudb/ft-index/src/tests/xa-dirty-rollback.cc b/storage/tokudb/ft-index/src/tests/xa-dirty-rollback.cc new file mode 100644 index 00000000000..2d13e559050 --- /dev/null +++ b/storage/tokudb/ft-index/src/tests/xa-dirty-rollback.cc @@ -0,0 +1,193 @@ +/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */ +// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4: +#ident "$Id$" +/* +COPYING CONDITIONS NOTICE: + + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation, and provided that the + following conditions are met: + + * Redistributions of source code must retain this COPYING + CONDITIONS NOTICE, the COPYRIGHT NOTICE (below), the + DISCLAIMER (below), the UNIVERSITY PATENT NOTICE (below), the + PATENT MARKING NOTICE (below), and the PATENT RIGHTS + GRANT (below). + + * Redistributions in binary form must reproduce this COPYING + CONDITIONS NOTICE, the COPYRIGHT NOTICE (below), the + DISCLAIMER (below), the UNIVERSITY PATENT NOTICE (below), the + PATENT MARKING NOTICE (below), and the PATENT RIGHTS + GRANT (below) in the documentation and/or other materials + provided with the distribution. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + 02110-1301, USA. + +COPYRIGHT NOTICE: + + TokuFT, Tokutek Fractal Tree Indexing Library. + Copyright (C) 2007-2013 Tokutek, Inc. + +DISCLAIMER: + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + +UNIVERSITY PATENT NOTICE: + + The technology is licensed by the Massachusetts Institute of + Technology, Rutgers State University of New Jersey, and the Research + Foundation of State University of New York at Stony Brook under + United States of America Serial No. 11/760379 and to the patents + and/or patent applications resulting from it. + +PATENT MARKING NOTICE: + + This software is covered by US Patent No. 8,185,551. + This software is covered by US Patent No. 8,489,638. + +PATENT RIGHTS GRANT: + + "THIS IMPLEMENTATION" means the copyrightable works distributed by + Tokutek as part of the Fractal Tree project. + + "PATENT CLAIMS" means the claims of patents that are owned or + licensable by Tokutek, both currently or in the future; and that in + the absence of this license would be infringed by THIS + IMPLEMENTATION or by using or running THIS IMPLEMENTATION. + + "PATENT CHALLENGE" shall mean a challenge to the validity, + patentability, enforceability and/or non-infringement of any of the + PATENT CLAIMS or otherwise opposing any of the PATENT CLAIMS. + + Tokutek hereby grants to you, for the term and geographical scope of + the PATENT CLAIMS, a non-exclusive, no-charge, royalty-free, + irrevocable (except as stated in this section) patent license to + make, have made, use, offer to sell, sell, import, transfer, and + otherwise run, modify, and propagate the contents of THIS + IMPLEMENTATION, where such license applies only to the PATENT + CLAIMS. This grant does not include claims that would be infringed + only as a consequence of further modifications of THIS + IMPLEMENTATION. If you or your agent or licensee institute or order + or agree to the institution of patent litigation against any entity + (including a cross-claim or counterclaim in a lawsuit) alleging that + THIS IMPLEMENTATION constitutes direct or contributory patent + infringement, or inducement of patent infringement, then any rights + granted to you under this License shall terminate as of the date + such litigation is filed. If you or your agent or exclusive + licensee institute or order or agree to the institution of a PATENT + CHALLENGE, then Tokutek may terminate any rights granted to you + under this License. +*/ + +#ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved." +#ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it." +#include "test.h" + +// Verify that an abort of a prepared txn in recovery deletes a db created by it. +// The rollback file is dirty when the environment is closed. + +static void create_foo(DB_ENV *env, DB_TXN *txn) { + int r; + DB *db; + r = db_create(&db, env, 0); + CKERR(r); + r = db->open(db, txn, "foo.db", 0, DB_BTREE, DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO); + CKERR(r); + r = db->close(db, 0); + CKERR(r); +} + +static void check_foo(DB_ENV *env) { + int r; + DB *db; + r = db_create(&db, env, 0); + CKERR(r); + r = db->open(db, nullptr, "foo.db", 0, DB_BTREE, 0, 0); + CKERR2(r, ENOENT); + r = db->close(db, 0); + CKERR(r); +} + +static void create_prepared_txn(void) { + int r; + + DB_ENV *env = nullptr; + r = db_env_create(&env, 0); + CKERR(r); + r = env->open(env, TOKU_TEST_FILENAME, + DB_INIT_MPOOL|DB_CREATE|DB_THREAD |DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE, + S_IRWXU+S_IRWXG+S_IRWXO); + CKERR(r); + + DB_TXN *txn = nullptr; + r = env->txn_begin(env, nullptr, &txn, 0); + CKERR(r); + + create_foo(env, txn); + + TOKU_XA_XID xid = { 0x1234, 8, 9 }; + for (int i = 0; i < 8+9; i++) { + xid.data[i] = i; + } + r = txn->xa_prepare(txn, &xid); + CKERR(r); + + // discard the txn so that we can close the env and run xa recovery later + r = txn->discard(txn, 0); + CKERR(r); + + r = env->close(env, TOKUFT_DIRTY_SHUTDOWN); + CKERR(r); +} + +static void run_xa_recovery(void) { + int r; + + DB_ENV *env; + r = db_env_create(&env, 0); + CKERR(r); + r = env->open(env, TOKU_TEST_FILENAME, + DB_INIT_MPOOL|DB_CREATE|DB_THREAD |DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE | DB_RECOVER, + S_IRWXU+S_IRWXG+S_IRWXO); + CKERR(r); + + // get prepared xid + long count; + TOKU_XA_XID xid; + r = env->txn_xa_recover(env, &xid, 1, &count, DB_FIRST); + CKERR(r); + + // abort it + DB_TXN *txn = nullptr; + r = env->get_txn_from_xid(env, &xid, &txn); + CKERR(r); + r = txn->abort(txn); + CKERR(r); + + check_foo(env); + + r = env->close(env, 0); + CKERR(r); +} + +int test_main (int argc, char *const argv[]) { + default_parse_args(argc, argv); + + // init the env directory + toku_os_recursive_delete(TOKU_TEST_FILENAME); + int r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); + CKERR(r); + + // run the test + create_prepared_txn(); + run_xa_recovery(); + + return 0; +} diff --git a/storage/tokudb/ft-index/src/tests/xa-txn-discard-abort.cc b/storage/tokudb/ft-index/src/tests/xa-txn-discard-abort.cc new file mode 100644 index 00000000000..3365a1bb139 --- /dev/null +++ b/storage/tokudb/ft-index/src/tests/xa-txn-discard-abort.cc @@ -0,0 +1,195 @@ +/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */ +// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4: +#ident "$Id$" +/* +COPYING CONDITIONS NOTICE: + + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation, and provided that the + following conditions are met: + + * Redistributions of source code must retain this COPYING + CONDITIONS NOTICE, the COPYRIGHT NOTICE (below), the + DISCLAIMER (below), the UNIVERSITY PATENT NOTICE (below), the + PATENT MARKING NOTICE (below), and the PATENT RIGHTS + GRANT (below). + + * Redistributions in binary form must reproduce this COPYING + CONDITIONS NOTICE, the COPYRIGHT NOTICE (below), the + DISCLAIMER (below), the UNIVERSITY PATENT NOTICE (below), the + PATENT MARKING NOTICE (below), and the PATENT RIGHTS + GRANT (below) in the documentation and/or other materials + provided with the distribution. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + 02110-1301, USA. + +COPYRIGHT NOTICE: + + TokuFT, Tokutek Fractal Tree Indexing Library. + Copyright (C) 2007-2013 Tokutek, Inc. + +DISCLAIMER: + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + +UNIVERSITY PATENT NOTICE: + + The technology is licensed by the Massachusetts Institute of + Technology, Rutgers State University of New Jersey, and the Research + Foundation of State University of New York at Stony Brook under + United States of America Serial No. 11/760379 and to the patents + and/or patent applications resulting from it. + +PATENT MARKING NOTICE: + + This software is covered by US Patent No. 8,185,551. + This software is covered by US Patent No. 8,489,638. + +PATENT RIGHTS GRANT: + + "THIS IMPLEMENTATION" means the copyrightable works distributed by + Tokutek as part of the Fractal Tree project. + + "PATENT CLAIMS" means the claims of patents that are owned or + licensable by Tokutek, both currently or in the future; and that in + the absence of this license would be infringed by THIS + IMPLEMENTATION or by using or running THIS IMPLEMENTATION. + + "PATENT CHALLENGE" shall mean a challenge to the validity, + patentability, enforceability and/or non-infringement of any of the + PATENT CLAIMS or otherwise opposing any of the PATENT CLAIMS. + + Tokutek hereby grants to you, for the term and geographical scope of + the PATENT CLAIMS, a non-exclusive, no-charge, royalty-free, + irrevocable (except as stated in this section) patent license to + make, have made, use, offer to sell, sell, import, transfer, and + otherwise run, modify, and propagate the contents of THIS + IMPLEMENTATION, where such license applies only to the PATENT + CLAIMS. This grant does not include claims that would be infringed + only as a consequence of further modifications of THIS + IMPLEMENTATION. If you or your agent or licensee institute or order + or agree to the institution of patent litigation against any entity + (including a cross-claim or counterclaim in a lawsuit) alleging that + THIS IMPLEMENTATION constitutes direct or contributory patent + infringement, or inducement of patent infringement, then any rights + granted to you under this License shall terminate as of the date + such litigation is filed. If you or your agent or exclusive + licensee institute or order or agree to the institution of a PATENT + CHALLENGE, then Tokutek may terminate any rights granted to you + under this License. +*/ + +#ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved." +#ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it." +#include "test.h" + +// Verify that an abort of a prepared txn in recovery removes a db created by it. +// A checkpoint is taken between the db creation and the txn prepare. + +static void create_foo(DB_ENV *env, DB_TXN *txn) { + int r; + DB *db; + r = db_create(&db, env, 0); + CKERR(r); + r = db->open(db, txn, "foo.db", 0, DB_BTREE, DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO); + CKERR(r); + r = db->close(db, 0); + CKERR(r); +} + +static void check_foo(DB_ENV *env) { + int r; + DB *db; + r = db_create(&db, env, 0); + CKERR(r); + r = db->open(db, nullptr, "foo.db", 0, DB_BTREE, 0, 0); + CKERR2(r, ENOENT); + r = db->close(db, 0); + CKERR(r); +} + +static void create_prepared_txn(void) { + int r; + + DB_ENV *env = nullptr; + r = db_env_create(&env, 0); + CKERR(r); + r = env->open(env, TOKU_TEST_FILENAME, + DB_INIT_MPOOL|DB_CREATE|DB_THREAD |DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE, + S_IRWXU+S_IRWXG+S_IRWXO); + CKERR(r); + + DB_TXN *txn = nullptr; + r = env->txn_begin(env, nullptr, &txn, 0); + CKERR(r); + + create_foo(env, txn); + r = env->txn_checkpoint(env, 0, 0, 0); + CKERR(r); + + TOKU_XA_XID xid = { 0x1234, 8, 9 }; + for (int i = 0; i < 8+9; i++) { + xid.data[i] = i; + } + r = txn->xa_prepare(txn, &xid); + CKERR(r); + + // discard the txn so that we can close the env and run xa recovery later + r = txn->discard(txn, 0); + CKERR(r); + + r = env->close(env, TOKUFT_DIRTY_SHUTDOWN); + CKERR(r); +} + +static void run_xa_recovery(void) { + int r; + + DB_ENV *env; + r = db_env_create(&env, 0); + CKERR(r); + r = env->open(env, TOKU_TEST_FILENAME, + DB_INIT_MPOOL|DB_CREATE|DB_THREAD |DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE | DB_RECOVER, + S_IRWXU+S_IRWXG+S_IRWXO); + CKERR(r); + + // get prepared xid + long count; + TOKU_XA_XID xid; + r = env->txn_xa_recover(env, &xid, 1, &count, DB_FIRST); + CKERR(r); + + // abort it + DB_TXN *txn = nullptr; + r = env->get_txn_from_xid(env, &xid, &txn); + CKERR(r); + r = txn->abort(txn); + CKERR(r); + + check_foo(env); + + r = env->close(env, 0); + CKERR(r); +} + +int test_main (int argc, char *const argv[]) { + default_parse_args(argc, argv); + + // init the env directory + toku_os_recursive_delete(TOKU_TEST_FILENAME); + int r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); + CKERR(r); + + // run the test + create_prepared_txn(); + run_xa_recovery(); + + return 0; +} diff --git a/storage/tokudb/ft-index/src/tests/xa-txn-discard-commit.cc b/storage/tokudb/ft-index/src/tests/xa-txn-discard-commit.cc new file mode 100644 index 00000000000..c4d164017ae --- /dev/null +++ b/storage/tokudb/ft-index/src/tests/xa-txn-discard-commit.cc @@ -0,0 +1,196 @@ +/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */ +// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4: +#ident "$Id$" +/* +COPYING CONDITIONS NOTICE: + + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation, and provided that the + following conditions are met: + + * Redistributions of source code must retain this COPYING + CONDITIONS NOTICE, the COPYRIGHT NOTICE (below), the + DISCLAIMER (below), the UNIVERSITY PATENT NOTICE (below), the + PATENT MARKING NOTICE (below), and the PATENT RIGHTS + GRANT (below). + + * Redistributions in binary form must reproduce this COPYING + CONDITIONS NOTICE, the COPYRIGHT NOTICE (below), the + DISCLAIMER (below), the UNIVERSITY PATENT NOTICE (below), the + PATENT MARKING NOTICE (below), and the PATENT RIGHTS + GRANT (below) in the documentation and/or other materials + provided with the distribution. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + 02110-1301, USA. + +COPYRIGHT NOTICE: + + TokuFT, Tokutek Fractal Tree Indexing Library. + Copyright (C) 2007-2013 Tokutek, Inc. + +DISCLAIMER: + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + +UNIVERSITY PATENT NOTICE: + + The technology is licensed by the Massachusetts Institute of + Technology, Rutgers State University of New Jersey, and the Research + Foundation of State University of New York at Stony Brook under + United States of America Serial No. 11/760379 and to the patents + and/or patent applications resulting from it. + +PATENT MARKING NOTICE: + + This software is covered by US Patent No. 8,185,551. + This software is covered by US Patent No. 8,489,638. + +PATENT RIGHTS GRANT: + + "THIS IMPLEMENTATION" means the copyrightable works distributed by + Tokutek as part of the Fractal Tree project. + + "PATENT CLAIMS" means the claims of patents that are owned or + licensable by Tokutek, both currently or in the future; and that in + the absence of this license would be infringed by THIS + IMPLEMENTATION or by using or running THIS IMPLEMENTATION. + + "PATENT CHALLENGE" shall mean a challenge to the validity, + patentability, enforceability and/or non-infringement of any of the + PATENT CLAIMS or otherwise opposing any of the PATENT CLAIMS. + + Tokutek hereby grants to you, for the term and geographical scope of + the PATENT CLAIMS, a non-exclusive, no-charge, royalty-free, + irrevocable (except as stated in this section) patent license to + make, have made, use, offer to sell, sell, import, transfer, and + otherwise run, modify, and propagate the contents of THIS + IMPLEMENTATION, where such license applies only to the PATENT + CLAIMS. This grant does not include claims that would be infringed + only as a consequence of further modifications of THIS + IMPLEMENTATION. If you or your agent or licensee institute or order + or agree to the institution of patent litigation against any entity + (including a cross-claim or counterclaim in a lawsuit) alleging that + THIS IMPLEMENTATION constitutes direct or contributory patent + infringement, or inducement of patent infringement, then any rights + granted to you under this License shall terminate as of the date + such litigation is filed. If you or your agent or exclusive + licensee institute or order or agree to the institution of a PATENT + CHALLENGE, then Tokutek may terminate any rights granted to you + under this License. +*/ + +#ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved." +#ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it." +#include "test.h" + +// Verify that a commit of a prepared txn in recovery retains a db created by it. +// A checkpoint is taken between the db creation and the txn prepare. + +static void create_foo(DB_ENV *env, DB_TXN *txn) { + int r; + DB *db; + r = db_create(&db, env, 0); + CKERR(r); + r = db->open(db, txn, "foo.db", 0, DB_BTREE, DB_CREATE, S_IRWXU+S_IRWXG+S_IRWXO); + CKERR(r); + r = db->close(db, 0); + CKERR(r); +} + +static void check_foo(DB_ENV *env) { + int r; + DB *db; + r = db_create(&db, env, 0); + CKERR(r); + r = db->open(db, nullptr, "foo.db", 0, DB_BTREE, 0, 0); + CKERR(r); + r = db->close(db, 0); + CKERR(r); +} + +static void create_prepared_txn(void) { + int r; + + DB_ENV *env = nullptr; + r = db_env_create(&env, 0); + CKERR(r); + r = env->open(env, TOKU_TEST_FILENAME, + DB_INIT_MPOOL|DB_CREATE|DB_THREAD |DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE, + S_IRWXU+S_IRWXG+S_IRWXO); + CKERR(r); + + DB_TXN *txn = nullptr; + r = env->txn_begin(env, nullptr, &txn, 0); + CKERR(r); + + create_foo(env, txn); + + r = env->txn_checkpoint(env, 0, 0, 0); + CKERR(r); + + TOKU_XA_XID xid = { 0x1234, 8, 9 }; + for (int i = 0; i < 8+9; i++) { + xid.data[i] = i; + } + r = txn->xa_prepare(txn, &xid); + CKERR(r); + + // discard the txn so that we can close the env and run xa recovery later + r = txn->discard(txn, 0); + CKERR(r); + + r = env->close(env, TOKUFT_DIRTY_SHUTDOWN); + CKERR(r); +} + +static void run_xa_recovery(void) { + int r; + + DB_ENV *env; + r = db_env_create(&env, 0); + CKERR(r); + r = env->open(env, TOKU_TEST_FILENAME, + DB_INIT_MPOOL|DB_CREATE|DB_THREAD |DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE | DB_RECOVER, + S_IRWXU+S_IRWXG+S_IRWXO); + CKERR(r); + + // get prepared xid + long count; + TOKU_XA_XID xid; + r = env->txn_xa_recover(env, &xid, 1, &count, DB_FIRST); + CKERR(r); + + // commit it + DB_TXN *txn = nullptr; + r = env->get_txn_from_xid(env, &xid, &txn); + CKERR(r); + r = txn->commit(txn, 0); + CKERR(r); + + check_foo(env); + + r = env->close(env, 0); + CKERR(r); +} + +int test_main (int argc, char *const argv[]) { + default_parse_args(argc, argv); + + // init the env directory + toku_os_recursive_delete(TOKU_TEST_FILENAME); + int r = toku_os_mkdir(TOKU_TEST_FILENAME, S_IRWXU+S_IRWXG+S_IRWXO); + CKERR(r); + + // run the test + create_prepared_txn(); + run_xa_recovery(); + + return 0; +} diff --git a/storage/tokudb/ft-index/src/tests/zombie_db.cc b/storage/tokudb/ft-index/src/tests/zombie_db.cc index 16d6a933451..56ff71f13da 100644 --- a/storage/tokudb/ft-index/src/tests/zombie_db.cc +++ b/storage/tokudb/ft-index/src/tests/zombie_db.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/src/toku_patent.cc b/storage/tokudb/ft-index/src/toku_patent.cc index e7b0ebe2c56..5261b6f3a79 100644 --- a/storage/tokudb/ft-index/src/toku_patent.cc +++ b/storage/tokudb/ft-index/src/toku_patent.cc @@ -28,7 +28,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -115,7 +115,7 @@ const char *toku_patent_string = "COPYING CONDITIONS NOTICE:\n\ \n\ COPYRIGHT NOTICE:\n\ \n\ - TokuDB, Tokutek Fractal Tree Indexing Library.\n\ + TokuFT, Tokutek Fractal Tree Indexing Library.\n\ Copyright (C) 2007-2013 Tokutek, Inc.\n\ \n\ DISCLAIMER:\n\ diff --git a/storage/tokudb/ft-index/src/ydb-internal.h b/storage/tokudb/ft-index/src/ydb-internal.h index 085a4dd0334..26cc8419f4a 100644 --- a/storage/tokudb/ft-index/src/ydb-internal.h +++ b/storage/tokudb/ft-index/src/ydb-internal.h @@ -1,7 +1,5 @@ /* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */ // vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4: -#ifndef YDB_INTERNAL_H -#define YDB_INTERNAL_H /* COPYING CONDITIONS NOTICE: @@ -31,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -88,17 +86,22 @@ PATENT RIGHTS GRANT: under this License. */ +#pragma once + #ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved." #ident "$Id$" #include <db.h> #include <limits.h> -#include <ft/fttypes.h> -#include <ft/ft-ops.h> -#include <ft/minicron.h> +#include <ft/cachetable/cachetable.h> +#include <ft/cursor.h> +#include <ft/comparator.h> +#include <ft/logger/logger.h> +#include <ft/txn/txn.h> #include <util/growable_array.h> +#include <util/minicron.h> #include <util/omt.h> #include <locktree/locktree.h> @@ -276,7 +279,7 @@ struct __toku_db_txn_external { #define db_txn_struct_i(x) (&((struct __toku_db_txn_external *)x)->internal_part) struct __toku_dbc_internal { - struct ft_cursor *c; + struct ft_cursor ftcursor; DB_TXN *txn; TOKU_ISOLATION iso; struct simple_dbt skey_s,sval_s; @@ -287,12 +290,21 @@ struct __toku_dbc_internal { bool rmw; }; -struct __toku_dbc_external { - struct __toku_dbc external_part; - struct __toku_dbc_internal internal_part; -}; - -#define dbc_struct_i(x) (&((struct __toku_dbc_external *)x)->internal_part) +static_assert(sizeof(__toku_dbc_internal) <= sizeof(((DBC *) nullptr)->_internal), + "__toku_dbc_internal doesn't fit in the internal portion of a DBC"); + +static inline __toku_dbc_internal *dbc_struct_i(DBC *c) { + union dbc_union { + __toku_dbc_internal *dbc_internal; + char *buf; + } u; + u.buf = c->_internal; + return u.dbc_internal; +} + +static inline struct ft_cursor *dbc_ftcursor(DBC *c) { + return &dbc_struct_i(c)->ftcursor; +} static inline int env_opened(DB_ENV *env) { @@ -312,5 +324,3 @@ txn_is_read_only(DB_TXN* txn) { void env_panic(DB_ENV * env, int cause, const char * msg); void env_note_db_opened(DB_ENV *env, DB *db); void env_note_db_closed(DB_ENV *env, DB *db); - -#endif diff --git a/storage/tokudb/ft-index/src/ydb.cc b/storage/tokudb/ft-index/src/ydb.cc index a2bb221a40b..e61bf940175 100644 --- a/storage/tokudb/ft-index/src/ydb.cc +++ b/storage/tokudb/ft-index/src/ydb.cc @@ -28,7 +28,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -92,51 +92,40 @@ PATENT RIGHTS GRANT: extern const char *toku_patent_string; const char *toku_copyright_string = "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved."; -#include <toku_portability.h> -#include <toku_pthread.h> -#include <toku_assert.h> - #include <db.h> -#include <ctype.h> #include <errno.h> -#include <limits.h> -#include <stdio.h> -#include <stdlib.h> #include <string.h> -#include <fcntl.h> -#include <unistd.h> -#include <memory.h> - -#include <sys/stat.h> -#include <sys/types.h> - -#include <util/status.h> -#include <util/context.h> - -#include <ft/ft-flusher.h> -#include <ft/cachetable.h> -#include <ft/log.h> -#include <ft/checkpoint.h> -#include <ft/key.h> -#include <ft/ftloader.h> -#include <ft/log_header.h> -#include <ft/ft.h> -#include <ft/txn_manager.h> - -#include "ydb.h" -#include "ydb-internal.h" -#include "ydb_cursor.h" -#include "ydb_row_lock.h" -#include "ydb_env_func.h" -#include "ydb_db.h" -#include "ydb_write.h" -#include "ydb_txn.h" -#include "loader.h" -#include "indexer.h" + +#include "portability/memory.h" +#include "portability/toku_assert.h" +#include "portability/toku_portability.h" +#include "portability/toku_pthread.h" +#include "portability/toku_stdlib.h" + +#include "ft/ft-flusher.h" +#include "ft/cachetable/cachetable.h" +#include "ft/cachetable/checkpoint.h" +#include "ft/logger/log.h" +#include "ft/loader/loader.h" +#include "ft/log_header.h" +#include "ft/ft.h" +#include "ft/txn/txn_manager.h" +#include "src/ydb.h" +#include "src/ydb-internal.h" +#include "src/ydb_cursor.h" +#include "src/ydb_row_lock.h" +#include "src/ydb_env_func.h" +#include "src/ydb_db.h" +#include "src/ydb_write.h" +#include "src/ydb_txn.h" +#include "src/loader.h" +#include "src/indexer.h" +#include "util/status.h" +#include "util/context.h" // Include ydb_lib.cc here so that its constructor/destructor gets put into // ydb.o, to make sure they don't get erased at link time (when linking to -// a static libtokudb.a that was compiled with gcc). See #5094. +// a static libtokufractaltree.a that was compiled with gcc). See #5094. #include "ydb_lib.cc" #ifdef TOKUTRACE @@ -197,7 +186,7 @@ typedef struct { static YDB_LAYER_STATUS_S ydb_layer_status; #define STATUS_VALUE(x) ydb_layer_status.status[x].value.num -#define STATUS_INIT(k,c,t,l,inc) TOKUDB_STATUS_INIT(ydb_layer_status, k, c, t, l, inc) +#define STATUS_INIT(k,c,t,l,inc) TOKUFT_STATUS_INIT(ydb_layer_status, k, c, t, l, inc) static void ydb_layer_status_init (void) { @@ -263,14 +252,14 @@ static void env_fs_report_in_yellow(DB_ENV *UU(env)) { char tbuf[26]; time_t tnow = time(NULL); - fprintf(stderr, "%.24s Tokudb file system space is low\n", ctime_r(&tnow, tbuf)); fflush(stderr); + fprintf(stderr, "%.24s TokuFT file system space is low\n", ctime_r(&tnow, tbuf)); fflush(stderr); } static void env_fs_report_in_red(DB_ENV *UU(env)) { char tbuf[26]; time_t tnow = time(NULL); - fprintf(stderr, "%.24s Tokudb file system space is really low and access is restricted\n", ctime_r(&tnow, tbuf)); fflush(stderr); + fprintf(stderr, "%.24s TokuFT file system space is really low and access is restricted\n", ctime_r(&tnow, tbuf)); fflush(stderr); } static inline uint64_t @@ -279,7 +268,7 @@ env_fs_redzone(DB_ENV *env, uint64_t total) { } #define ZONEREPORTLIMIT 12 -// Check the available space in the file systems used by tokudb and erect barriers when available space gets low. +// Check the available space in the file systems used by tokuft and erect barriers when available space gets low. static int env_fs_poller(void *arg) { DB_ENV *env = (DB_ENV *) arg; @@ -456,7 +445,7 @@ static void keep_cachetable_callback (DB_ENV *env, CACHETABLE cachetable) static int ydb_do_recovery (DB_ENV *env) { assert(env->i->real_log_dir); - int r = tokudb_recover(env, + int r = tokuft_recover(env, toku_keep_prepared_txn_callback, keep_cachetable_callback, env->i->logger, @@ -470,33 +459,12 @@ ydb_do_recovery (DB_ENV *env) { static int needs_recovery (DB_ENV *env) { assert(env->i->real_log_dir); - int recovery_needed = tokudb_needs_recovery(env->i->real_log_dir, true); + int recovery_needed = tokuft_needs_recovery(env->i->real_log_dir, true); return recovery_needed ? DB_RUNRECOVERY : 0; } static int toku_env_txn_checkpoint(DB_ENV * env, uint32_t kbyte, uint32_t min, uint32_t flags); -// Instruct db to use the default (built-in) key comparison function -// by setting the flag bits in the db and ft structs -static int -db_use_builtin_key_cmp(DB *db) { - HANDLE_PANICKED_DB(db); - int r = 0; - if (db_opened(db)) - r = toku_ydb_do_error(db->dbenv, EINVAL, "Comparison functions cannot be set after DB open.\n"); - else if (db->i->key_compare_was_set) - r = toku_ydb_do_error(db->dbenv, EINVAL, "Key comparison function already set.\n"); - else { - uint32_t tflags; - toku_ft_get_flags(db->i->ft_handle, &tflags); - - tflags |= TOKU_DB_KEYCMP_BUILTIN; - toku_ft_set_flags(db->i->ft_handle, tflags); - db->i->key_compare_was_set = true; - } - return r; -} - // Keys used in persistent environment dictionary: // Following keys added in version 12 static const char * orig_env_ver_key = "original_version"; @@ -553,7 +521,7 @@ typedef struct { static PERSISTENT_UPGRADE_STATUS_S persistent_upgrade_status; -#define PERSISTENT_UPGRADE_STATUS_INIT(k,c,t,l,inc) TOKUDB_STATUS_INIT(persistent_upgrade_status, k, c, t, "upgrade: " l, inc) +#define PERSISTENT_UPGRADE_STATUS_INIT(k,c,t,l,inc) TOKUFT_STATUS_INIT(persistent_upgrade_status, k, c, t, "upgrade: " l, inc) static void persistent_upgrade_status_init (void) { @@ -703,7 +671,7 @@ capture_persistent_env_contents (DB_ENV * env, DB_TXN * txn) { // return 0 if log exists or ENOENT if log does not exist static int ydb_recover_log_exists(DB_ENV *env) { - int r = tokudb_recover_log_exists(env->i->real_log_dir); + int r = tokuft_recover_log_exists(env->i->real_log_dir); return r; } @@ -866,20 +834,20 @@ env_open(DB_ENV * env, const char *home, uint32_t flags, int mode) { HANDLE_EXTRA_FLAGS(env, flags, DB_CREATE|DB_PRIVATE|DB_INIT_LOG|DB_INIT_TXN|DB_RECOVER|DB_INIT_MPOOL|DB_INIT_LOCK|DB_THREAD); - // DB_CREATE means create if env does not exist, and Tokudb requires it because - // Tokudb requries DB_PRIVATE. + // DB_CREATE means create if env does not exist, and TokuFT requires it because + // TokuFT requries DB_PRIVATE. if ((flags & DB_PRIVATE) && !(flags & DB_CREATE)) { r = toku_ydb_do_error(env, ENOENT, "DB_PRIVATE requires DB_CREATE (seems gratuitous to us, but that's BDB's behavior\n"); goto cleanup; } if (!(flags & DB_PRIVATE)) { - r = toku_ydb_do_error(env, ENOENT, "TokuDB requires DB_PRIVATE\n"); + r = toku_ydb_do_error(env, ENOENT, "TokuFT requires DB_PRIVATE\n"); goto cleanup; } if ((flags & DB_INIT_LOG) && !(flags & DB_INIT_TXN)) { - r = toku_ydb_do_error(env, EINVAL, "TokuDB requires transactions for logging\n"); + r = toku_ydb_do_error(env, EINVAL, "TokuFT requires transactions for logging\n"); goto cleanup; } @@ -991,13 +959,13 @@ env_open(DB_ENV * env, const char *home, uint32_t flags, int mode) { // This is probably correct, but it will be pain... // if ((flags & DB_THREAD)==0) { -// r = toku_ydb_do_error(env, EINVAL, "TokuDB requires DB_THREAD"); +// r = toku_ydb_do_error(env, EINVAL, "TokuFT requires DB_THREAD"); // goto cleanup; // } unused_flags &= ~DB_THREAD; if (unused_flags!=0) { - r = toku_ydb_do_error(env, EINVAL, "Extra flags not understood by tokudb: %u\n", unused_flags); + r = toku_ydb_do_error(env, EINVAL, "Extra flags not understood by tokuft: %u\n", unused_flags); goto cleanup; } @@ -1036,7 +1004,7 @@ env_open(DB_ENV * env, const char *home, uint32_t flags, int mode) { { r = toku_db_create(&env->i->persistent_environment, env, 0); assert_zero(r); - r = db_use_builtin_key_cmp(env->i->persistent_environment); + r = toku_db_use_builtin_key_cmp(env->i->persistent_environment); assert_zero(r); r = toku_db_open_iname(env->i->persistent_environment, txn, toku_product_name_strings.environmentdictionary, DB_CREATE, mode); if (r != 0) { @@ -1074,7 +1042,7 @@ env_open(DB_ENV * env, const char *home, uint32_t flags, int mode) { { r = toku_db_create(&env->i->directory, env, 0); assert_zero(r); - r = db_use_builtin_key_cmp(env->i->directory); + r = toku_db_use_builtin_key_cmp(env->i->directory); assert_zero(r); r = toku_db_open_iname(env->i->directory, txn, toku_product_name_strings.fileopsdirectory, DB_CREATE, mode); if (r != 0) { @@ -1124,6 +1092,12 @@ static int env_close(DB_ENV * env, uint32_t flags) { int r = 0; const char * err_msg = NULL; + bool clean_shutdown = true; + + if (flags & TOKUFT_DIRTY_SHUTDOWN) { + clean_shutdown = false; + flags &= ~TOKUFT_DIRTY_SHUTDOWN; + } most_recent_env = NULL; // Set most_recent_env to NULL so that we don't have a dangling pointer (and if there's an error, the toku assert code would try to look at the env.) @@ -1160,25 +1134,32 @@ env_close(DB_ENV * env, uint32_t flags) { goto panic_and_quit_early; } } + env_fsync_log_cron_destroy(env); if (env->i->cachetable) { + toku_cachetable_prepare_close(env->i->cachetable); toku_cachetable_minicron_shutdown(env->i->cachetable); if (env->i->logger) { - CHECKPOINTER cp = toku_cachetable_get_checkpointer(env->i->cachetable); - r = toku_checkpoint(cp, env->i->logger, NULL, NULL, NULL, NULL, SHUTDOWN_CHECKPOINT); - if (r) { - err_msg = "Cannot close environment (error during checkpoint)\n"; - toku_ydb_do_error(env, r, "%s", err_msg); - goto panic_and_quit_early; + CHECKPOINTER cp = nullptr; + if (clean_shutdown) { + cp = toku_cachetable_get_checkpointer(env->i->cachetable); + r = toku_checkpoint(cp, env->i->logger, NULL, NULL, NULL, NULL, SHUTDOWN_CHECKPOINT); + if (r) { + err_msg = "Cannot close environment (error during checkpoint)\n"; + toku_ydb_do_error(env, r, "%s", err_msg); + goto panic_and_quit_early; + } } - toku_logger_close_rollback(env->i->logger); - //Do a second checkpoint now that the rollback cachefile is closed. - r = toku_checkpoint(cp, env->i->logger, NULL, NULL, NULL, NULL, SHUTDOWN_CHECKPOINT); - if (r) { - err_msg = "Cannot close environment (error during checkpoint)\n"; - toku_ydb_do_error(env, r, "%s", err_msg); - goto panic_and_quit_early; + toku_logger_close_rollback_check_empty(env->i->logger, clean_shutdown); + if (clean_shutdown) { + //Do a second checkpoint now that the rollback cachefile is closed. + r = toku_checkpoint(cp, env->i->logger, NULL, NULL, NULL, NULL, SHUTDOWN_CHECKPOINT); + if (r) { + err_msg = "Cannot close environment (error during checkpoint)\n"; + toku_ydb_do_error(env, r, "%s", err_msg); + goto panic_and_quit_early; + } + toku_logger_shutdown(env->i->logger); } - toku_logger_shutdown(env->i->logger); } toku_cachetable_close(&env->i->cachetable); } @@ -1200,7 +1181,6 @@ env_close(DB_ENV * env, uint32_t flags) { } env_fs_destroy(env); - env_fsync_log_cron_destroy(env); env->i->ltm.destroy(); if (env->i->data_dir) toku_free(env->i->data_dir); @@ -1230,7 +1210,7 @@ env_close(DB_ENV * env, uint32_t flags) { unlock_single_process(env); toku_free(env->i); toku_free(env); - toku_sync_fetch_and_add(&tokudb_num_envs, -1); + toku_sync_fetch_and_add(&tokuft_num_envs, -1); if (flags != 0) { r = EINVAL; } @@ -1405,7 +1385,7 @@ env_set_flags(DB_ENV * env, uint32_t flags, int onoff) { flags &= ~DB_AUTO_COMMIT; } if (flags != 0 && onoff) { - return toku_ydb_do_error(env, EINVAL, "TokuDB does not (yet) support any nonzero ENV flags other than DB_AUTO_COMMIT\n"); + return toku_ydb_do_error(env, EINVAL, "TokuFT does not (yet) support any nonzero ENV flags other than DB_AUTO_COMMIT\n"); } if (onoff) env->i->open_flags |= change; else env->i->open_flags &= ~change; @@ -1451,7 +1431,7 @@ env_get_lg_max(DB_ENV * env, uint32_t *lg_maxp) { static int env_set_lk_detect(DB_ENV * env, uint32_t UU(detect)) { HANDLE_PANICKED_ENV(env); - return toku_ydb_do_error(env, EINVAL, "TokuDB does not (yet) support set_lk_detect\n"); + return toku_ydb_do_error(env, EINVAL, "TokuFT does not (yet) support set_lk_detect\n"); } static int @@ -1796,7 +1776,7 @@ typedef struct { static FS_STATUS_S fsstat; -#define FS_STATUS_INIT(k,c,t,l,inc) TOKUDB_STATUS_INIT(fsstat, k, c, t, "filesystem: " l, inc) +#define FS_STATUS_INIT(k,c,t,l,inc) TOKUFT_STATUS_INIT(fsstat, k, c, t, "filesystem: " l, inc) static void fs_status_init(void) { @@ -1867,7 +1847,7 @@ typedef struct { static MEMORY_STATUS_S memory_status; -#define STATUS_INIT(k,c,t,l,inc) TOKUDB_STATUS_INIT(memory_status, k, c, t, "memory: " l, inc) +#define STATUS_INIT(k,c,t,l,inc) TOKUFT_STATUS_INIT(memory_status, k, c, t, "memory: " l, inc) static void memory_status_init(void) { @@ -2464,7 +2444,7 @@ struct iter_txn_row_locks_callback_extra { const int r = lt_map->fetch(which_lt, &ranges); invariant_zero(r); current_db = locked_get_db_by_dict_id(env, ranges.lt->get_dict_id()); - iter.create(ranges.buffer); + iter = toku::range_buffer::iterator(ranges.buffer); } DB_ENV *env; @@ -2694,7 +2674,7 @@ toku_env_create(DB_ENV ** envp, uint32_t flags) { *envp = result; r = 0; - toku_sync_fetch_and_add(&tokudb_num_envs, 1); + toku_sync_fetch_and_add(&tokuft_num_envs, 1); cleanup: if (r!=0) { if (result) { @@ -2901,7 +2881,13 @@ env_dbremove(DB_ENV * env, DB_TXN *txn, const char *fname, const char *dbname, u r = toku_db_create(&db, env, 0); lazy_assert_zero(r); r = toku_db_open_iname(db, txn, iname, 0, 0); - lazy_assert_zero(r); + if (txn && r) { + if (r == EMFILE || r == ENFILE) + r = toku_ydb_do_error(env, r, "toku dbremove failed because open file limit reached\n"); + else + r = toku_ydb_do_error(env, r, "toku dbremove failed\n"); + goto exit; + } if (txn) { // Now that we have a writelock on dname, verify that there are still no handles open. (to prevent race conditions) if (env_is_db_with_dname_open(env, dname)) { @@ -3073,15 +3059,15 @@ db_strerror(int error) { case TOKUDB_OUT_OF_LOCKS: return "Out of locks"; case TOKUDB_DICTIONARY_TOO_OLD: - return "Dictionary too old for this version of TokuDB"; + return "Dictionary too old for this version of TokuFT"; case TOKUDB_DICTIONARY_TOO_NEW: - return "Dictionary too new for this version of TokuDB"; + return "Dictionary too new for this version of TokuFT"; case TOKUDB_CANCELED: return "User cancelled operation"; case TOKUDB_NO_DATA: return "Ran out of data (not EOF)"; case TOKUDB_HUGE_PAGES_ENABLED: - return "Transparent huge pages are enabled but TokuDB's memory allocator will oversubscribe main memory with transparent huge pages. This check can be disabled by setting the environment variable TOKU_HUGE_PAGES_OK."; + return "Transparent huge pages are enabled but TokuFT's memory allocator will oversubscribe main memory with transparent huge pages. This check can be disabled by setting the environment variable TOKU_HUGE_PAGES_OK."; } static char unknown_result[100]; // Race condition if two threads call this at the same time. However even in a bad case, it should be some sort of null-terminated string. diff --git a/storage/tokudb/ft-index/src/ydb.h b/storage/tokudb/ft-index/src/ydb.h index e7de82b5db2..fad41f382f3 100644 --- a/storage/tokudb/ft-index/src/ydb.h +++ b/storage/tokudb/ft-index/src/ydb.h @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -88,11 +88,8 @@ PATENT RIGHTS GRANT: #ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved." #ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it." -// This file defines the public interface to the ydb library - -#if !defined(TOKU_YDB_INTERFACE_H) -#define TOKU_YDB_INTERFACE_H +#pragma once // Initialize the ydb library globals. // Called when the ydb library is loaded. @@ -114,5 +111,3 @@ extern "C" uint64_t toku_test_get_latest_lsn(DB_ENV *env) __attribute__((__visib // test-only function extern "C" int toku_test_get_checkpointing_user_data_status(void) __attribute__((__visibility__("default"))); - -#endif diff --git a/storage/tokudb/ft-index/src/ydb_cursor.cc b/storage/tokudb/ft-index/src/ydb_cursor.cc index aa236ab0324..c42e2fb673e 100644 --- a/storage/tokudb/ft-index/src/ydb_cursor.cc +++ b/storage/tokudb/ft-index/src/ydb_cursor.cc @@ -28,7 +28,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -97,6 +97,7 @@ PATENT RIGHTS GRANT: #include "ydb-internal.h" #include "ydb_cursor.h" #include "ydb_row_lock.h" +#include "ft/cursor.h" static YDB_C_LAYER_STATUS_S ydb_c_layer_status; #ifdef STATUS_VALUE @@ -104,7 +105,7 @@ static YDB_C_LAYER_STATUS_S ydb_c_layer_status; #endif #define STATUS_VALUE(x) ydb_c_layer_status.status[x].value.num -#define STATUS_INIT(k,c,t,l,inc) TOKUDB_STATUS_INIT(ydb_c_layer_status, k, c, t, l, inc) +#define STATUS_INIT(k,c,t,l,inc) TOKUFT_STATUS_INIT(ydb_c_layer_status, k, c, t, l, inc) static void ydb_c_layer_status_init (void) { @@ -133,8 +134,8 @@ get_nonmain_cursor_flags(uint32_t flags) { } static inline bool -c_uninitialized(DBC* c) { - return toku_ft_cursor_uninitialized(dbc_struct_i(c)->c); +c_uninitialized(DBC *c) { + return toku_ft_cursor_uninitialized(dbc_ftcursor(c)); } typedef struct query_context_wrapped_t { @@ -200,7 +201,7 @@ typedef struct query_context_with_input_t { static void query_context_base_init(QUERY_CONTEXT_BASE context, DBC *c, uint32_t flag, bool is_write_op, YDB_CALLBACK_FUNCTION f, void *extra) { - context->c = dbc_struct_i(c)->c; + context->c = dbc_ftcursor(c); context->txn = dbc_struct_i(c)->txn; context->db = c->dbp; context->f = f; @@ -247,7 +248,7 @@ query_context_with_input_init(QUERY_CONTEXT_WITH_INPUT context, DBC *c, uint32_t context->input_val = val; } -static int c_getf_first_callback(ITEMLEN keylen, bytevec key, ITEMLEN vallen, bytevec val, void *extra, bool); +static int c_getf_first_callback(uint32_t keylen, const void *key, uint32_t vallen, const void *val, void *extra, bool); static void c_query_context_init(QUERY_CONTEXT context, DBC *c, uint32_t flag, YDB_CALLBACK_FUNCTION f, void *extra) { @@ -277,7 +278,7 @@ c_getf_first(DBC *c, uint32_t flag, YDB_CALLBACK_FUNCTION f, void *extra) { c_query_context_init(&context, c, flag, f, extra); while (r == 0) { //toku_ft_cursor_first will call c_getf_first_callback(..., context) (if query is successful) - r = toku_ft_cursor_first(dbc_struct_i(c)->c, c_getf_first_callback, &context); + r = toku_ft_cursor_first(dbc_ftcursor(c), c_getf_first_callback, &context); if (r == DB_LOCK_NOTGRANTED) { r = toku_db_wait_range_lock(context.base.db, context.base.txn, &context.base.request); } else { @@ -290,7 +291,7 @@ c_getf_first(DBC *c, uint32_t flag, YDB_CALLBACK_FUNCTION f, void *extra) { //result is the result of the query (i.e. 0 means found, DB_NOTFOUND, etc..) static int -c_getf_first_callback(ITEMLEN keylen, bytevec key, ITEMLEN vallen, bytevec val, void *extra, bool lock_only) { +c_getf_first_callback(uint32_t keylen, const void *key, uint32_t vallen, const void *val, void *extra, bool lock_only) { QUERY_CONTEXT super_context = (QUERY_CONTEXT) extra; QUERY_CONTEXT_BASE context = &super_context->base; @@ -317,7 +318,7 @@ c_getf_first_callback(ITEMLEN keylen, bytevec key, ITEMLEN vallen, bytevec val, return r; } -static int c_getf_last_callback(ITEMLEN keylen, bytevec key, ITEMLEN vallen, bytevec val, void *extra, bool); +static int c_getf_last_callback(uint32_t keylen, const void *key, uint32_t vallen, const void *val, void *extra, bool); static int c_getf_last(DBC *c, uint32_t flag, YDB_CALLBACK_FUNCTION f, void *extra) { @@ -328,7 +329,7 @@ c_getf_last(DBC *c, uint32_t flag, YDB_CALLBACK_FUNCTION f, void *extra) { c_query_context_init(&context, c, flag, f, extra); while (r == 0) { //toku_ft_cursor_last will call c_getf_last_callback(..., context) (if query is successful) - r = toku_ft_cursor_last(dbc_struct_i(c)->c, c_getf_last_callback, &context); + r = toku_ft_cursor_last(dbc_ftcursor(c), c_getf_last_callback, &context); if (r == DB_LOCK_NOTGRANTED) { r = toku_db_wait_range_lock(context.base.db, context.base.txn, &context.base.request); } else { @@ -341,7 +342,7 @@ c_getf_last(DBC *c, uint32_t flag, YDB_CALLBACK_FUNCTION f, void *extra) { //result is the result of the query (i.e. 0 means found, DB_NOTFOUND, etc..) static int -c_getf_last_callback(ITEMLEN keylen, bytevec key, ITEMLEN vallen, bytevec val, void *extra, bool lock_only) { +c_getf_last_callback(uint32_t keylen, const void *key, uint32_t vallen, const void *val, void *extra, bool lock_only) { QUERY_CONTEXT super_context = (QUERY_CONTEXT) extra; QUERY_CONTEXT_BASE context = &super_context->base; @@ -368,7 +369,7 @@ c_getf_last_callback(ITEMLEN keylen, bytevec key, ITEMLEN vallen, bytevec val, v return r; } -static int c_getf_next_callback(ITEMLEN keylen, bytevec key, ITEMLEN vallen, bytevec val, void *extra, bool); +static int c_getf_next_callback(uint32_t keylen, const void *key, uint32_t vallen, const void *val, void *extra, bool); static int c_getf_next(DBC *c, uint32_t flag, YDB_CALLBACK_FUNCTION f, void *extra) { @@ -383,7 +384,7 @@ c_getf_next(DBC *c, uint32_t flag, YDB_CALLBACK_FUNCTION f, void *extra) { c_query_context_init(&context, c, flag, f, extra); while (r == 0) { //toku_ft_cursor_next will call c_getf_next_callback(..., context) (if query is successful) - r = toku_ft_cursor_next(dbc_struct_i(c)->c, c_getf_next_callback, &context); + r = toku_ft_cursor_next(dbc_ftcursor(c), c_getf_next_callback, &context); if (r == DB_LOCK_NOTGRANTED) { r = toku_db_wait_range_lock(context.base.db, context.base.txn, &context.base.request); } else { @@ -397,7 +398,7 @@ c_getf_next(DBC *c, uint32_t flag, YDB_CALLBACK_FUNCTION f, void *extra) { //result is the result of the query (i.e. 0 means found, DB_NOTFOUND, etc..) static int -c_getf_next_callback(ITEMLEN keylen, bytevec key, ITEMLEN vallen, bytevec val, void *extra, bool lock_only) { +c_getf_next_callback(uint32_t keylen, const void *key, uint32_t vallen, const void *val, void *extra, bool lock_only) { QUERY_CONTEXT super_context = (QUERY_CONTEXT) extra; QUERY_CONTEXT_BASE context = &super_context->base; @@ -427,7 +428,7 @@ c_getf_next_callback(ITEMLEN keylen, bytevec key, ITEMLEN vallen, bytevec val, v return r; } -static int c_getf_prev_callback(ITEMLEN keylen, bytevec key, ITEMLEN vallen, bytevec val, void *extra, bool); +static int c_getf_prev_callback(uint32_t keylen, const void *key, uint32_t vallen, const void *val, void *extra, bool); static int c_getf_prev(DBC *c, uint32_t flag, YDB_CALLBACK_FUNCTION f, void *extra) { @@ -442,7 +443,7 @@ c_getf_prev(DBC *c, uint32_t flag, YDB_CALLBACK_FUNCTION f, void *extra) { c_query_context_init(&context, c, flag, f, extra); while (r == 0) { //toku_ft_cursor_prev will call c_getf_prev_callback(..., context) (if query is successful) - r = toku_ft_cursor_prev(dbc_struct_i(c)->c, c_getf_prev_callback, &context); + r = toku_ft_cursor_prev(dbc_ftcursor(c), c_getf_prev_callback, &context); if (r == DB_LOCK_NOTGRANTED) { r = toku_db_wait_range_lock(context.base.db, context.base.txn, &context.base.request); } else { @@ -456,7 +457,7 @@ c_getf_prev(DBC *c, uint32_t flag, YDB_CALLBACK_FUNCTION f, void *extra) { //result is the result of the query (i.e. 0 means found, DB_NOTFOUND, etc..) static int -c_getf_prev_callback(ITEMLEN keylen, bytevec key, ITEMLEN vallen, bytevec val, void *extra, bool lock_only) { +c_getf_prev_callback(uint32_t keylen, const void *key, uint32_t vallen, const void *val, void *extra, bool lock_only) { QUERY_CONTEXT super_context = (QUERY_CONTEXT) extra; QUERY_CONTEXT_BASE context = &super_context->base; @@ -485,7 +486,7 @@ c_getf_prev_callback(ITEMLEN keylen, bytevec key, ITEMLEN vallen, bytevec val, v return r; } -static int c_getf_current_callback(ITEMLEN keylen, bytevec key, ITEMLEN vallen, bytevec val, void *extra, bool); +static int c_getf_current_callback(uint32_t keylen, const void *key, uint32_t vallen, const void *val, void *extra, bool); static int c_getf_current(DBC *c, uint32_t flag, YDB_CALLBACK_FUNCTION f, void *extra) { @@ -495,14 +496,14 @@ c_getf_current(DBC *c, uint32_t flag, YDB_CALLBACK_FUNCTION f, void *extra) { QUERY_CONTEXT_S context; //Describes the context of this query. c_query_context_init(&context, c, flag, f, extra); //toku_ft_cursor_current will call c_getf_current_callback(..., context) (if query is successful) - int r = toku_ft_cursor_current(dbc_struct_i(c)->c, DB_CURRENT, c_getf_current_callback, &context); + int r = toku_ft_cursor_current(dbc_ftcursor(c), DB_CURRENT, c_getf_current_callback, &context); c_query_context_destroy(&context); return r; } //result is the result of the query (i.e. 0 means found, DB_NOTFOUND, etc..) static int -c_getf_current_callback(ITEMLEN keylen, bytevec key, ITEMLEN vallen, bytevec val, void *extra, bool lock_only) { +c_getf_current_callback(uint32_t keylen, const void *key, uint32_t vallen, const void *val, void *extra, bool lock_only) { QUERY_CONTEXT super_context = (QUERY_CONTEXT) extra; QUERY_CONTEXT_BASE context = &super_context->base; @@ -522,7 +523,7 @@ c_getf_current_callback(ITEMLEN keylen, bytevec key, ITEMLEN vallen, bytevec val return r; } -static int c_getf_set_callback(ITEMLEN keylen, bytevec key, ITEMLEN vallen, bytevec val, void *extra, bool); +static int c_getf_set_callback(uint32_t keylen, const void *key, uint32_t vallen, const void *val, void *extra, bool); int toku_c_getf_set(DBC *c, uint32_t flag, DBT *key, YDB_CALLBACK_FUNCTION f, void *extra) { @@ -534,7 +535,7 @@ toku_c_getf_set(DBC *c, uint32_t flag, DBT *key, YDB_CALLBACK_FUNCTION f, void * query_context_with_input_init(&context, c, flag, key, NULL, f, extra); while (r == 0) { //toku_ft_cursor_set will call c_getf_set_callback(..., context) (if query is successful) - r = toku_ft_cursor_set(dbc_struct_i(c)->c, key, c_getf_set_callback, &context); + r = toku_ft_cursor_set(dbc_ftcursor(c), key, c_getf_set_callback, &context); if (r == DB_LOCK_NOTGRANTED) { r = toku_db_wait_range_lock(context.base.db, context.base.txn, &context.base.request); } else { @@ -547,7 +548,7 @@ toku_c_getf_set(DBC *c, uint32_t flag, DBT *key, YDB_CALLBACK_FUNCTION f, void * //result is the result of the query (i.e. 0 means found, DB_NOTFOUND, etc..) static int -c_getf_set_callback(ITEMLEN keylen, bytevec key, ITEMLEN vallen, bytevec val, void *extra, bool lock_only) { +c_getf_set_callback(uint32_t keylen, const void *key, uint32_t vallen, const void *val, void *extra, bool lock_only) { QUERY_CONTEXT_WITH_INPUT super_context = (QUERY_CONTEXT_WITH_INPUT) extra; QUERY_CONTEXT_BASE context = &super_context->base; @@ -575,7 +576,7 @@ c_getf_set_callback(ITEMLEN keylen, bytevec key, ITEMLEN vallen, bytevec val, vo return r; } -static int c_getf_set_range_callback(ITEMLEN keylen, bytevec key, ITEMLEN vallen, bytevec val, void *extra, bool); +static int c_getf_set_range_callback(uint32_t keylen, const void *key, uint32_t vallen, const void *val, void *extra, bool); static int c_getf_set_range(DBC *c, uint32_t flag, DBT *key, YDB_CALLBACK_FUNCTION f, void *extra) { @@ -587,7 +588,7 @@ c_getf_set_range(DBC *c, uint32_t flag, DBT *key, YDB_CALLBACK_FUNCTION f, void query_context_with_input_init(&context, c, flag, key, NULL, f, extra); while (r == 0) { //toku_ft_cursor_set_range will call c_getf_set_range_callback(..., context) (if query is successful) - r = toku_ft_cursor_set_range(dbc_struct_i(c)->c, key, nullptr, c_getf_set_range_callback, &context); + r = toku_ft_cursor_set_range(dbc_ftcursor(c), key, nullptr, c_getf_set_range_callback, &context); if (r == DB_LOCK_NOTGRANTED) { r = toku_db_wait_range_lock(context.base.db, context.base.txn, &context.base.request); } else { @@ -600,7 +601,7 @@ c_getf_set_range(DBC *c, uint32_t flag, DBT *key, YDB_CALLBACK_FUNCTION f, void //result is the result of the query (i.e. 0 means found, DB_NOTFOUND, etc..) static int -c_getf_set_range_callback(ITEMLEN keylen, bytevec key, ITEMLEN vallen, bytevec val, void *extra, bool lock_only) { +c_getf_set_range_callback(uint32_t keylen, const void *key, uint32_t vallen, const void *val, void *extra, bool lock_only) { QUERY_CONTEXT_WITH_INPUT super_context = (QUERY_CONTEXT_WITH_INPUT) extra; QUERY_CONTEXT_BASE context = &super_context->base; @@ -641,7 +642,7 @@ c_getf_set_range_with_bound(DBC *c, uint32_t flag, DBT *key, DBT *key_bound, YDB query_context_with_input_init(&context, c, flag, key, NULL, f, extra); while (r == 0) { //toku_ft_cursor_set_range will call c_getf_set_range_callback(..., context) (if query is successful) - r = toku_ft_cursor_set_range(dbc_struct_i(c)->c, key, key_bound, c_getf_set_range_callback, &context); + r = toku_ft_cursor_set_range(dbc_ftcursor(c), key, key_bound, c_getf_set_range_callback, &context); if (r == DB_LOCK_NOTGRANTED) { r = toku_db_wait_range_lock(context.base.db, context.base.txn, &context.base.request); } else { @@ -652,7 +653,7 @@ c_getf_set_range_with_bound(DBC *c, uint32_t flag, DBT *key, DBT *key_bound, YDB return r; } -static int c_getf_set_range_reverse_callback(ITEMLEN keylen, bytevec key, ITEMLEN vallen, bytevec val, void *extra, bool); +static int c_getf_set_range_reverse_callback(uint32_t keylen, const void *key, uint32_t vallen, const void *val, void *extra, bool); static int c_getf_set_range_reverse(DBC *c, uint32_t flag, DBT *key, YDB_CALLBACK_FUNCTION f, void *extra) { @@ -664,7 +665,7 @@ c_getf_set_range_reverse(DBC *c, uint32_t flag, DBT *key, YDB_CALLBACK_FUNCTION query_context_with_input_init(&context, c, flag, key, NULL, f, extra); while (r == 0) { //toku_ft_cursor_set_range_reverse will call c_getf_set_range_reverse_callback(..., context) (if query is successful) - r = toku_ft_cursor_set_range_reverse(dbc_struct_i(c)->c, key, c_getf_set_range_reverse_callback, &context); + r = toku_ft_cursor_set_range_reverse(dbc_ftcursor(c), key, c_getf_set_range_reverse_callback, &context); if (r == DB_LOCK_NOTGRANTED) { r = toku_db_wait_range_lock(context.base.db, context.base.txn, &context.base.request); } else { @@ -677,7 +678,7 @@ c_getf_set_range_reverse(DBC *c, uint32_t flag, DBT *key, YDB_CALLBACK_FUNCTION //result is the result of the query (i.e. 0 means found, DB_NOTFOUND, etc..) static int -c_getf_set_range_reverse_callback(ITEMLEN keylen, bytevec key, ITEMLEN vallen, bytevec val, void *extra, bool lock_only) { +c_getf_set_range_reverse_callback(uint32_t keylen, const void *key, uint32_t vallen, const void *val, void *extra, bool lock_only) { QUERY_CONTEXT_WITH_INPUT super_context = (QUERY_CONTEXT_WITH_INPUT) extra; QUERY_CONTEXT_BASE context = &super_context->base; @@ -708,14 +709,19 @@ c_getf_set_range_reverse_callback(ITEMLEN keylen, bytevec key, ITEMLEN vallen, b return r; } -// Close a cursor. -int -toku_c_close(DBC * c) { + +int toku_c_close_internal(DBC *c) { HANDLE_PANICKED_DB(c->dbp); HANDLE_CURSOR_ILLEGAL_WORKING_PARENT_TXN(c); - toku_ft_cursor_close(dbc_struct_i(c)->c); + toku_ft_cursor_destroy(dbc_ftcursor(c)); toku_sdbt_cleanup(&dbc_struct_i(c)->skey_s); toku_sdbt_cleanup(&dbc_struct_i(c)->sval_s); + return 0; +} + +// Close a cursor. +int toku_c_close(DBC *c) { + toku_c_close_internal(c); toku_free(c); return 0; } @@ -739,7 +745,7 @@ c_set_bounds(DBC *dbc, const DBT *left_key, const DBT *right_key, bool pre_acqui DB *db = dbc->dbp; DB_TXN *txn = dbc_struct_i(dbc)->txn; HANDLE_PANICKED_DB(db); - toku_ft_cursor_set_range_lock(dbc_struct_i(dbc)->c, left_key, right_key, + toku_ft_cursor_set_range_lock(dbc_ftcursor(dbc), left_key, right_key, (left_key == toku_dbt_negative_infinity()), (right_key == toku_dbt_positive_infinity()), out_of_range_error); @@ -757,12 +763,12 @@ c_set_bounds(DBC *dbc, const DBT *left_key, const DBT *right_key, bool pre_acqui static void c_remove_restriction(DBC *dbc) { - toku_ft_cursor_remove_restriction(dbc_struct_i(dbc)->c); + toku_ft_cursor_remove_restriction(dbc_ftcursor(dbc)); } static void c_set_check_interrupt_callback(DBC* dbc, bool (*interrupt_callback)(void*), void *extra) { - toku_ft_cursor_set_check_interrupt_cb(dbc_struct_i(dbc)->c, interrupt_callback, extra); + toku_ft_cursor_set_check_interrupt_cb(dbc_ftcursor(dbc), interrupt_callback, extra); } int @@ -828,7 +834,7 @@ toku_c_get(DBC* c, DBT* key, DBT* val, uint32_t flag) { } int -toku_db_cursor_internal(DB * db, DB_TXN * txn, DBC ** c, uint32_t flags, int is_temporary_cursor) { +toku_db_cursor_internal(DB * db, DB_TXN * txn, DBC *c, uint32_t flags, int is_temporary_cursor) { HANDLE_PANICKED_DB(db); HANDLE_DB_ILLEGAL_WORKING_PARENT_TXN(db, txn); DB_ENV* env = db->dbenv; @@ -841,13 +847,7 @@ toku_db_cursor_internal(DB * db, DB_TXN * txn, DBC ** c, uint32_t flags, int is_ ); } - int r = 0; - - struct __toku_dbc_external *XMALLOC(eresult); // so the internal stuff is stuck on the end - memset(eresult, 0, sizeof(*eresult)); - DBC *result = &eresult->external_part; - -#define SCRS(name) result->name = name +#define SCRS(name) c->name = name SCRS(c_getf_first); SCRS(c_getf_last); SCRS(c_getf_next); @@ -861,59 +861,49 @@ toku_db_cursor_internal(DB * db, DB_TXN * txn, DBC ** c, uint32_t flags, int is_ SCRS(c_set_check_interrupt_callback); #undef SCRS - result->c_get = toku_c_get; - result->c_getf_set = toku_c_getf_set; - result->c_close = toku_c_close; + c->c_get = toku_c_get; + c->c_getf_set = toku_c_getf_set; + c->c_close = toku_c_close; - result->dbp = db; + c->dbp = db; - dbc_struct_i(result)->txn = txn; - dbc_struct_i(result)->skey_s = (struct simple_dbt){0,0}; - dbc_struct_i(result)->sval_s = (struct simple_dbt){0,0}; + dbc_struct_i(c)->txn = txn; + dbc_struct_i(c)->skey_s = (struct simple_dbt){0,0}; + dbc_struct_i(c)->sval_s = (struct simple_dbt){0,0}; if (is_temporary_cursor) { - dbc_struct_i(result)->skey = &db->i->skey; - dbc_struct_i(result)->sval = &db->i->sval; + dbc_struct_i(c)->skey = &db->i->skey; + dbc_struct_i(c)->sval = &db->i->sval; } else { - dbc_struct_i(result)->skey = &dbc_struct_i(result)->skey_s; - dbc_struct_i(result)->sval = &dbc_struct_i(result)->sval_s; + dbc_struct_i(c)->skey = &dbc_struct_i(c)->skey_s; + dbc_struct_i(c)->sval = &dbc_struct_i(c)->sval_s; } if (flags & DB_SERIALIZABLE) { - dbc_struct_i(result)->iso = TOKU_ISO_SERIALIZABLE; + dbc_struct_i(c)->iso = TOKU_ISO_SERIALIZABLE; } else { - dbc_struct_i(result)->iso = txn ? db_txn_struct_i(txn)->iso : TOKU_ISO_SERIALIZABLE; + dbc_struct_i(c)->iso = txn ? db_txn_struct_i(txn)->iso : TOKU_ISO_SERIALIZABLE; } - dbc_struct_i(result)->rmw = (flags & DB_RMW) != 0; + dbc_struct_i(c)->rmw = (flags & DB_RMW) != 0; bool is_snapshot_read = false; if (txn) { - is_snapshot_read = (dbc_struct_i(result)->iso == TOKU_ISO_READ_COMMITTED || - dbc_struct_i(result)->iso == TOKU_ISO_SNAPSHOT); + is_snapshot_read = (dbc_struct_i(c)->iso == TOKU_ISO_READ_COMMITTED || + dbc_struct_i(c)->iso == TOKU_ISO_SNAPSHOT); } - r = toku_ft_cursor( + int r = toku_ft_cursor_create( db->i->ft_handle, - &dbc_struct_i(result)->c, + dbc_ftcursor(c), txn ? db_txn_struct_i(txn)->tokutxn : NULL, is_snapshot_read, - ((flags & DBC_DISABLE_PREFETCHING) != 0) + ((flags & DBC_DISABLE_PREFETCHING) != 0), + is_temporary_cursor != 0 ); - assert(r == 0 || r == TOKUDB_MVCC_DICTIONARY_TOO_NEW); - if (r == 0) { - // Set the is_temporary_cursor boolean inside the ftnode so - // that a query only needing one cursor will not perform - // unecessary malloc calls. - if (is_temporary_cursor) { - toku_ft_cursor_set_temporary(dbc_struct_i(result)->c); - } - - *c = result; - } - else { - toku_free(result); + if (r != 0) { + invariant(r == TOKUDB_MVCC_DICTIONARY_TOO_NEW); } return r; } static inline int -autotxn_db_cursor(DB *db, DB_TXN *txn, DBC **c, uint32_t flags) { +autotxn_db_cursor(DB *db, DB_TXN *txn, DBC *c, uint32_t flags) { if (!txn && (db->dbenv->i->open_flags & DB_INIT_TXN)) { return toku_ydb_do_error(db->dbenv, EINVAL, "Cursors in a transaction environment must have transactions.\n"); @@ -922,9 +912,14 @@ autotxn_db_cursor(DB *db, DB_TXN *txn, DBC **c, uint32_t flags) { } // Create a cursor on a db. -int -toku_db_cursor(DB *db, DB_TXN *txn, DBC **c, uint32_t flags) { - int r = autotxn_db_cursor(db, txn, c, flags); +int toku_db_cursor(DB *db, DB_TXN *txn, DBC **c, uint32_t flags) { + DBC *XMALLOC(cursor); + int r = autotxn_db_cursor(db, txn, cursor, flags); + if (r == 0) { + *c = cursor; + } else { + toku_free(cursor); + } return r; } diff --git a/storage/tokudb/ft-index/src/ydb_cursor.h b/storage/tokudb/ft-index/src/ydb_cursor.h index 9666cc4e61e..a10e32f3002 100644 --- a/storage/tokudb/ft-index/src/ydb_cursor.h +++ b/storage/tokudb/ft-index/src/ydb_cursor.h @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -86,14 +86,12 @@ PATENT RIGHTS GRANT: under this License. */ +#pragma once + #ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved." #ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it." // This file defines the public interface to the ydb library -#if !defined(TOKU_YDB_CURSOR_H) -#define TOKU_YDB_CURSOR_H - - typedef enum { YDB_C_LAYER_STATUS_NUM_ROWS = 0 /* number of rows in this status array */ } ydb_c_lock_layer_status_entry; @@ -107,10 +105,9 @@ void ydb_c_layer_get_status(YDB_C_LAYER_STATUS statp); int toku_c_get(DBC * c, DBT * key, DBT * data, uint32_t flag); int toku_c_getf_set(DBC *c, uint32_t flag, DBT *key, YDB_CALLBACK_FUNCTION f, void *extra); -int toku_c_close(DBC * c); -int toku_db_cursor_internal(DB *db, DB_TXN * txn, DBC **c, uint32_t flags, int is_temporary_cursor); -int toku_db_cursor(DB *db, DB_TXN *txn, DBC **c, uint32_t flags); - +int toku_db_cursor(DB *db, DB_TXN *txn, DBC **c, uint32_t flags); +int toku_db_cursor_internal(DB *db, DB_TXN * txn, DBC *c, uint32_t flags, int is_temporary_cursor); -#endif +int toku_c_close(DBC *c); +int toku_c_close_internal(DBC *c); diff --git a/storage/tokudb/ft-index/src/ydb_db.cc b/storage/tokudb/ft-index/src/ydb_db.cc index 78e08705ac6..2c54a3bd4dc 100644 --- a/storage/tokudb/ft-index/src/ydb_db.cc +++ b/storage/tokudb/ft-index/src/ydb_db.cc @@ -28,7 +28,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -95,8 +95,7 @@ PATENT RIGHTS GRANT: #include <locktree/locktree.h> #include <ft/ft.h> #include <ft/ft-flusher.h> -#include <ft/checkpoint.h> -#include <ft/log_header.h> +#include <ft/cachetable/checkpoint.h> #include "ydb_cursor.h" #include "ydb_row_lock.h" @@ -115,7 +114,7 @@ static YDB_DB_LAYER_STATUS_S ydb_db_layer_status; #endif #define STATUS_VALUE(x) ydb_db_layer_status.status[x].value.num -#define STATUS_INIT(k,c,t,l,inc) TOKUDB_STATUS_INIT(ydb_db_layer_status, k, c, t, l, inc) +#define STATUS_INIT(k,c,t,l,inc) TOKUFT_STATUS_INIT(ydb_db_layer_status, k, c, t, l, inc) static void ydb_db_layer_status_init (void) { @@ -225,13 +224,13 @@ int db_getf_set(DB *db, DB_TXN *txn, uint32_t flags, DBT *key, YDB_CALLBACK_FUNCTION f, void *extra) { HANDLE_PANICKED_DB(db); HANDLE_DB_ILLEGAL_WORKING_PARENT_TXN(db, txn); - DBC *c; + DBC c; uint32_t create_flags = flags & (DB_ISOLATION_FLAGS | DB_RMW); flags &= ~DB_ISOLATION_FLAGS; int r = toku_db_cursor_internal(db, txn, &c, create_flags | DBC_DISABLE_PREFETCHING, 1); if (r==0) { - r = toku_c_getf_set(c, flags, key, f, extra); - int r2 = toku_c_close(c); + r = toku_c_getf_set(&c, flags, key, f, extra); + int r2 = toku_c_close_internal(&c); if (r==0) r = r2; } return r; @@ -258,12 +257,12 @@ toku_db_get (DB * db, DB_TXN * txn, DBT * key, DBT * data, uint32_t flags) { // And DB_GET_BOTH is no longer supported. #2862. if (flags != 0) return EINVAL; - DBC *dbc; + DBC dbc; r = toku_db_cursor_internal(db, txn, &dbc, iso_flags | DBC_DISABLE_PREFETCHING, 1); if (r!=0) return r; uint32_t c_get_flags = DB_SET; - r = toku_c_get(dbc, key, data, c_get_flags | lock_flags); - int r2 = toku_c_close(dbc); + r = toku_c_get(&dbc, key, data, c_get_flags | lock_flags); + int r2 = toku_c_close_internal(&dbc); return r ? r : r2; } @@ -390,10 +389,12 @@ toku_db_open(DB * db, DB_TXN * txn, const char *fname, const char *dbname, DBTYP // locktree's descriptor pointer if necessary static void db_set_descriptors(DB *db, FT_HANDLE ft_handle) { + const toku::comparator &cmp = toku_ft_get_comparator(ft_handle); db->descriptor = toku_ft_get_descriptor(ft_handle); db->cmp_descriptor = toku_ft_get_cmp_descriptor(ft_handle); + invariant(db->cmp_descriptor == cmp.get_descriptor()); if (db->i->lt) { - db->i->lt->set_descriptor(db->cmp_descriptor); + db->i->lt->set_comparator(cmp); } } @@ -430,8 +431,27 @@ void toku_db_lt_on_destroy_callback(toku::locktree *lt) { toku_ft_handle_close(ft_handle); } -int -toku_db_open_iname(DB * db, DB_TXN * txn, const char *iname_in_env, uint32_t flags, int mode) { +// Instruct db to use the default (built-in) key comparison function +// by setting the flag bits in the db and ft structs +int toku_db_use_builtin_key_cmp(DB *db) { + HANDLE_PANICKED_DB(db); + int r = 0; + if (db_opened(db)) { + r = toku_ydb_do_error(db->dbenv, EINVAL, "Comparison functions cannot be set after DB open.\n"); + } else if (db->i->key_compare_was_set) { + r = toku_ydb_do_error(db->dbenv, EINVAL, "Key comparison function already set.\n"); + } else { + uint32_t tflags; + toku_ft_get_flags(db->i->ft_handle, &tflags); + + tflags |= TOKU_DB_KEYCMP_BUILTIN; + toku_ft_set_flags(db->i->ft_handle, tflags); + db->i->key_compare_was_set = true; + } + return r; +} + +int toku_db_open_iname(DB * db, DB_TXN * txn, const char *iname_in_env, uint32_t flags, int mode) { //Set comparison functions if not yet set. HANDLE_READ_ONLY_TXN(txn); if (!db->i->key_compare_was_set && db->dbenv->i->bt_compare) { @@ -474,9 +494,9 @@ toku_db_open_iname(DB * db, DB_TXN * txn, const char *iname_in_env, uint32_t fla int r = toku_ft_handle_open(ft_handle, iname_in_env, is_db_create, is_db_excl, db->dbenv->i->cachetable, - txn ? db_txn_struct_i(txn)->tokutxn : NULL_TXN); + txn ? db_txn_struct_i(txn)->tokutxn : nullptr); if (r != 0) { - goto error_cleanup; + goto out; } // if the dictionary was opened as a blackhole, mark the @@ -497,26 +517,27 @@ toku_db_open_iname(DB * db, DB_TXN * txn, const char *iname_in_env, uint32_t fla .txn = txn, .ft_handle = db->i->ft_handle, }; - db->i->lt = db->dbenv->i->ltm.get_lt( - db->i->dict_id, - db->cmp_descriptor, - toku_ft_get_bt_compare(db->i->ft_handle), - &on_create_extra); + db->i->lt = db->dbenv->i->ltm.get_lt(db->i->dict_id, + toku_ft_get_comparator(db->i->ft_handle), + &on_create_extra); if (db->i->lt == nullptr) { r = errno; - if (r == 0) + if (r == 0) { r = EINVAL; - goto error_cleanup; + } + goto out; } } - return 0; + r = 0; -error_cleanup: - db->i->dict_id = DICTIONARY_ID_NONE; - db->i->opened = 0; - if (db->i->lt) { - db->dbenv->i->ltm.release_lt(db->i->lt); - db->i->lt = NULL; +out: + if (r != 0) { + db->i->dict_id = DICTIONARY_ID_NONE; + db->i->opened = 0; + if (db->i->lt) { + db->dbenv->i->ltm.release_lt(db->i->lt); + db->i->lt = nullptr; + } } return r; } @@ -565,11 +586,12 @@ toku_db_change_descriptor(DB *db, DB_TXN* txn, const DBT* descriptor, uint32_t f HANDLE_DB_ILLEGAL_WORKING_PARENT_TXN(db, txn); int r = 0; TOKUTXN ttxn = txn ? db_txn_struct_i(txn)->tokutxn : NULL; - DBT old_descriptor; bool is_db_hot_index = ((flags & DB_IS_HOT_INDEX) != 0); bool update_cmp_descriptor = ((flags & DB_UPDATE_CMP_DESCRIPTOR) != 0); - toku_init_dbt(&old_descriptor); + DBT old_descriptor_dbt; + toku_init_dbt(&old_descriptor_dbt); + if (!db_opened(db) || !descriptor || (descriptor->size>0 && !descriptor->data)){ r = EINVAL; goto cleanup; @@ -582,23 +604,12 @@ toku_db_change_descriptor(DB *db, DB_TXN* txn, const DBT* descriptor, uint32_t f if (r != 0) { goto cleanup; } } - // TODO: use toku_clone_dbt(&old-descriptor, db->descriptor); - old_descriptor.size = db->descriptor->dbt.size; - old_descriptor.data = toku_memdup(db->descriptor->dbt.data, db->descriptor->dbt.size); - - toku_ft_change_descriptor( - db->i->ft_handle, - &old_descriptor, - descriptor, - true, - ttxn, - update_cmp_descriptor - ); + toku_clone_dbt(&old_descriptor_dbt, db->descriptor->dbt); + toku_ft_change_descriptor(db->i->ft_handle, &old_descriptor_dbt, descriptor, + true, ttxn, update_cmp_descriptor); cleanup: - if (old_descriptor.data) { - toku_free(old_descriptor.data); - } + toku_destroy_dbt(&old_descriptor_dbt); return r; } @@ -713,6 +724,15 @@ toku_db_get_fanout(DB *db, unsigned int *fanout) { } static int +toku_db_set_memcmp_magic(DB *db, uint8_t magic) { + HANDLE_PANICKED_DB(db); + if (db_opened(db)) { + return EINVAL; + } + return toku_ft_handle_set_memcmp_magic(db->i->ft_handle, magic); +} + +static int toku_db_get_fractal_tree_info64(DB *db, uint64_t *num_blocks_allocated, uint64_t *num_blocks_in_use, uint64_t *size_allocated, uint64_t *size_in_use) { HANDLE_PANICKED_DB(db); struct ftinfo64 ftinfo; @@ -950,7 +970,7 @@ struct last_key_extra { }; static int -db_get_last_key_callback(ITEMLEN keylen, bytevec key, ITEMLEN vallen UU(), bytevec val UU(), void *extra, bool lock_only) { +db_get_last_key_callback(uint32_t keylen, const void *key, uint32_t vallen UU(), const void *val UU(), void *extra, bool lock_only) { if (!lock_only) { DBT keydbt; toku_fill_dbt(&keydbt, key, keylen); @@ -1109,6 +1129,7 @@ toku_db_create(DB ** db, DB_ENV * env, uint32_t flags) { USDB(change_compression_method); USDB(set_fanout); USDB(get_fanout); + USDB(set_memcmp_magic); USDB(change_fanout); USDB(set_flags); USDB(get_flags); @@ -1221,36 +1242,14 @@ load_inames(DB_ENV * env, DB_TXN * txn, int N, DB * dbs[/*N*/], const char * new int locked_load_inames(DB_ENV * env, DB_TXN * txn, int N, DB * dbs[/*N*/], char * new_inames_in_env[/*N*/], LSN *load_lsn, bool mark_as_loader) { - int ret, r; + int r; HANDLE_READ_ONLY_TXN(txn); - DB_TXN *child_txn = NULL; - int using_txns = env->i->open_flags & DB_INIT_TXN; - if (using_txns) { - ret = toku_txn_begin(env, txn, &child_txn, 0); - invariant_zero(ret); - } - // cannot begin a checkpoint toku_multi_operation_client_lock(); - r = load_inames(env, child_txn, N, dbs, (const char **) new_inames_in_env, load_lsn, mark_as_loader); + r = load_inames(env, txn, N, dbs, (const char **) new_inames_in_env, load_lsn, mark_as_loader); toku_multi_operation_client_unlock(); - if (using_txns) { - if (r == 0) { - ret = locked_txn_commit(child_txn, DB_TXN_NOSYNC); - invariant_zero(ret); - } else { - ret = locked_txn_abort(child_txn); - invariant_zero(ret); - for (int i = 0; i < N; i++) { - if (new_inames_in_env[i]) { - toku_free(new_inames_in_env[i]); - new_inames_in_env[i] = NULL; - } - } - } - } return r; } diff --git a/storage/tokudb/ft-index/src/ydb_db.h b/storage/tokudb/ft-index/src/ydb_db.h index 54f0d178731..edbc72cb0d3 100644 --- a/storage/tokudb/ft-index/src/ydb_db.h +++ b/storage/tokudb/ft-index/src/ydb_db.h @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -89,8 +89,7 @@ PATENT RIGHTS GRANT: #ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved." #ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it." -#ifndef TOKU_YDB_DB_H -#define TOKU_YDB_DB_H +#pragma once #include <ft/ft.h> @@ -128,11 +127,11 @@ static inline int db_opened(DB *db) { return db->i->opened != 0; } -static inline ft_compare_func -toku_db_get_compare_fun(DB* db) { - return toku_ft_get_bt_compare(db->i->ft_handle); +static inline const toku::comparator &toku_db_get_comparator(DB *db) { + return toku_ft_get_comparator(db->i->ft_handle); } +int toku_db_use_builtin_key_cmp(DB *db); int toku_db_pre_acquire_fileops_lock(DB *db, DB_TXN *txn); int toku_db_open_iname(DB * db, DB_TXN * txn, const char *iname, uint32_t flags, int mode); int toku_db_pre_acquire_table_lock(DB *db, DB_TXN *txn); @@ -173,5 +172,3 @@ toku_db_destruct_autotxn(DB_TXN *txn, int r, bool changed) { } return r; } - -#endif /* TOKU_YDB_DB_H */ diff --git a/storage/tokudb/ft-index/src/ydb_env_func.cc b/storage/tokudb/ft-index/src/ydb_env_func.cc index 5247e699a23..714fad74ec5 100644 --- a/storage/tokudb/ft-index/src/ydb_env_func.cc +++ b/storage/tokudb/ft-index/src/ydb_env_func.cc @@ -28,7 +28,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -94,12 +94,12 @@ PATENT RIGHTS GRANT: #include <memory.h> #include <db.h> +#include <ft/cachetable/checkpoint.h> #include <ft/ft.h> #include <ft/ft-ops.h> #include <ft/ft-flusher.h> -#include <ft/checkpoint.h> -#include <ft/recover.h> -#include <ft/ftloader.h> +#include <ft/logger/recover.h> +#include <ft/loader/loader.h> #include "ydb_env_func.h" diff --git a/storage/tokudb/ft-index/src/ydb_env_func.h b/storage/tokudb/ft-index/src/ydb_env_func.h index cf193b64216..2fb0c202f90 100644 --- a/storage/tokudb/ft-index/src/ydb_env_func.h +++ b/storage/tokudb/ft-index/src/ydb_env_func.h @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -86,12 +86,10 @@ PATENT RIGHTS GRANT: under this License. */ +#pragma once + #ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved." #ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it." -// This file defines the public interface to the ydb library - -#if !defined(TOKU_YDB_ENV_FUNC_H) -#define TOKU_YDB_ENV_FUNC_H extern void (*checkpoint_callback_f)(void*); extern void * checkpoint_callback_extra; @@ -105,5 +103,3 @@ void setup_dlmalloc(void) __attribute__((__visibility__("default"))); // Test-only function void toku_env_increase_last_xid(DB_ENV *env, uint64_t increment); - -#endif diff --git a/storage/tokudb/ft-index/src/ydb_lib.cc b/storage/tokudb/ft-index/src/ydb_lib.cc index 11847788661..12742cad5c2 100644 --- a/storage/tokudb/ft-index/src/ydb_lib.cc +++ b/storage/tokudb/ft-index/src/ydb_lib.cc @@ -28,7 +28,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -98,14 +98,12 @@ PATENT RIGHTS GRANT: #if defined(__GNUC__) -static void __attribute__((constructor)) libtokudb_init(void) { - // printf("%s:%s:%d\n", __FILE__, __FUNCTION__, __LINE__); +static void __attribute__((constructor)) libtokuft_init(void) { int r = toku_ydb_init(); assert(r==0); } -static void __attribute__((destructor)) libtokudb_destroy(void) { - // printf("%s:%s:%d\n", __FILE__, __FUNCTION__, __LINE__); +static void __attribute__((destructor)) libtokuft_destroy(void) { toku_ydb_destroy(); } diff --git a/storage/tokudb/ft-index/src/ydb_load.h b/storage/tokudb/ft-index/src/ydb_load.h index 6496a92eeec..c815969a97c 100644 --- a/storage/tokudb/ft-index/src/ydb_load.h +++ b/storage/tokudb/ft-index/src/ydb_load.h @@ -1,8 +1,6 @@ /* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */ // vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4: #ident "$Id$" -#ifndef YDB_LOAD_H -#define YDB_LOAD_H /* COPYING CONDITIONS NOTICE: @@ -32,7 +30,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -89,7 +87,7 @@ PATENT RIGHTS GRANT: under this License. */ -#ident "Copyright (c) 2010-2013 Tokutek Inc. All rights reserved." +#pragma once /* ydb functions used by loader */ @@ -113,5 +111,3 @@ int locked_load_inames(DB_ENV * env, char * new_inames_in_env[/*N*/], /* out */ LSN *load_lsn, bool mark_as_loader); - -#endif diff --git a/storage/tokudb/ft-index/src/ydb_row_lock.cc b/storage/tokudb/ft-index/src/ydb_row_lock.cc index db5548a00c3..40cafd0e331 100644 --- a/storage/tokudb/ft-index/src/ydb_row_lock.cc +++ b/storage/tokudb/ft-index/src/ydb_row_lock.cc @@ -28,7 +28,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -144,11 +144,11 @@ static void db_txn_note_row_lock(DB *db, DB_TXN *txn, const DBT *left_key, const } // add a new lock range to this txn's row lock buffer - size_t old_num_bytes = ranges.buffer->get_num_bytes(); + size_t old_mem_size = ranges.buffer->total_memory_size(); ranges.buffer->append(left_key, right_key); - size_t new_num_bytes = ranges.buffer->get_num_bytes(); - invariant(new_num_bytes > old_num_bytes); - lt->get_manager()->note_mem_used(new_num_bytes - old_num_bytes); + size_t new_mem_size = ranges.buffer->total_memory_size(); + invariant(new_mem_size > old_mem_size); + lt->get_manager()->note_mem_used(new_mem_size - old_mem_size); toku_mutex_unlock(&db_txn_struct_i(txn)->txn_mutex); } @@ -201,17 +201,16 @@ void toku_db_txn_escalate_callback(TXNID txnid, const toku::locktree *lt, const // // We could theoretically steal the memory from the caller instead of copying // it, but it's simpler to have a callback API that doesn't transfer memory ownership. - lt->get_manager()->note_mem_released(ranges.buffer->get_num_bytes()); + lt->get_manager()->note_mem_released(ranges.buffer->total_memory_size()); ranges.buffer->destroy(); ranges.buffer->create(); - toku::range_buffer::iterator iter; + toku::range_buffer::iterator iter(&buffer); toku::range_buffer::iterator::record rec; - iter.create(&buffer); while (iter.current(&rec)) { ranges.buffer->append(rec.get_left_key(), rec.get_right_key()); iter.next(); } - lt->get_manager()->note_mem_used(ranges.buffer->get_num_bytes()); + lt->get_manager()->note_mem_used(ranges.buffer->total_memory_size()); } else { // In rare cases, we may not find the associated locktree, because we are // racing with the transaction trying to add this locktree to the lt map @@ -315,7 +314,7 @@ void toku_db_release_lt_key_ranges(DB_TXN *txn, txn_lt_key_ranges *ranges) { // release all of the locks this txn has ever successfully // acquired and stored in the range buffer for this locktree lt->release_locks(txnid, ranges->buffer); - lt->get_manager()->note_mem_released(ranges->buffer->get_num_bytes()); + lt->get_manager()->note_mem_released(ranges->buffer->total_memory_size()); ranges->buffer->destroy(); toku_free(ranges->buffer); diff --git a/storage/tokudb/ft-index/src/ydb_row_lock.h b/storage/tokudb/ft-index/src/ydb_row_lock.h index 2a1a4ffb5c8..2c3a10d92d6 100644 --- a/storage/tokudb/ft-index/src/ydb_row_lock.h +++ b/storage/tokudb/ft-index/src/ydb_row_lock.h @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -89,8 +89,7 @@ PATENT RIGHTS GRANT: #ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved." #ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it." -#ifndef TOKU_YDB_ROW_LOCK_H -#define TOKU_YDB_ROW_LOCK_H +#pragma once #include <ydb-internal.h> @@ -113,5 +112,3 @@ int toku_db_get_point_write_lock(DB *db, DB_TXN *txn, const DBT *key); void toku_db_grab_write_lock(DB *db, DBT *key, TOKUTXN tokutxn); void toku_db_release_lt_key_ranges(DB_TXN *txn, txn_lt_key_ranges *ranges); - -#endif /* TOKU_YDB_ROW_LOCK_H */ diff --git a/storage/tokudb/ft-index/src/ydb_txn.cc b/storage/tokudb/ft-index/src/ydb_txn.cc index 6f1105412d7..b6b8e154c6f 100644 --- a/storage/tokudb/ft-index/src/ydb_txn.cc +++ b/storage/tokudb/ft-index/src/ydb_txn.cc @@ -28,7 +28,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -89,15 +89,17 @@ PATENT RIGHTS GRANT: #ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it." #ident "$Id$" -#include <toku_race_tools.h> #include <db.h> -#include <ft/txn_manager.h> -#include <ft/log_header.h> -#include <ft/checkpoint.h> +#include <portability/toku_race_tools.h> #include <portability/toku_atomic.h> +#include <ft/cachetable/checkpoint.h> +#include <ft/log_header.h> +#include <ft/txn/txn_manager.h> + + #include "ydb-internal.h" #include "ydb_txn.h" #include "ydb_row_lock.h" @@ -205,12 +207,6 @@ cleanup: return r; } -static uint32_t toku_txn_id(DB_TXN * txn) { - HANDLE_PANICKED_ENV(txn->mgrp); - abort(); - return (uint32_t) -1; -} - static int toku_txn_abort(DB_TXN * txn, TXN_PROGRESS_POLL_FUNCTION poll, void *poll_extra) { HANDLE_PANICKED_ENV(txn->mgrp); @@ -387,6 +383,44 @@ static uint64_t locked_txn_get_client_id(DB_TXN *txn) { return toku_txn_get_client_id(db_txn_struct_i(txn)->tokutxn); } +static int toku_txn_discard(DB_TXN *txn, uint32_t flags) { + // check parameters + if (flags != 0) + return EINVAL; + TOKUTXN ttxn = db_txn_struct_i(txn)->tokutxn; + if (toku_txn_get_state(ttxn) != TOKUTXN_PREPARING) + return EINVAL; + + bool low_priority; + if (toku_is_big_tokutxn(ttxn)) { + low_priority = true; + toku_low_priority_multi_operation_client_lock(); + } else { + low_priority = false; + toku_multi_operation_client_lock(); + } + + // discard + toku_txn_discard_txn(ttxn); + + // complete + toku_txn_complete_txn(ttxn); + + // release locks + toku_txn_release_locks(txn); + + if (low_priority) { + toku_low_priority_multi_operation_client_unlock(); + } else { + toku_multi_operation_client_unlock(); + } + + // destroy + toku_txn_destroy(txn); + + return 0; +} + static inline void txn_func_init(DB_TXN *txn) { #define STXN(name) txn->name = locked_txn_ ## name STXN(abort); @@ -400,8 +434,8 @@ static inline void txn_func_init(DB_TXN *txn) { #define SUTXN(name) txn->name = toku_txn_ ## name SUTXN(prepare); SUTXN(xa_prepare); + SUTXN(discard); #undef SUTXN - txn->id = toku_txn_id; txn->id64 = toku_txn_id64; } diff --git a/storage/tokudb/ft-index/src/ydb_txn.h b/storage/tokudb/ft-index/src/ydb_txn.h index 454b6578e9f..a2e5a3b09e2 100644 --- a/storage/tokudb/ft-index/src/ydb_txn.h +++ b/storage/tokudb/ft-index/src/ydb_txn.h @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -86,12 +86,10 @@ PATENT RIGHTS GRANT: under this License. */ +#pragma once + #ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved." #ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it." -// This file defines the public interface to the ydb library - -#if !defined(TOKU_YDB_TXN_H) -#define TOKU_YDB_TXN_H // begin, commit, and abort use the multi operation lock // internally to synchronize with begin checkpoint. callers @@ -112,5 +110,3 @@ bool toku_is_big_tokutxn(TOKUTXN tokutxn); // Test-only function extern "C" void toku_increase_last_xid(DB_ENV *env, uint64_t increment) __attribute__((__visibility__("default"))); - -#endif diff --git a/storage/tokudb/ft-index/src/ydb_write.cc b/storage/tokudb/ft-index/src/ydb_write.cc index 4826e418ab5..77daf4e6793 100644 --- a/storage/tokudb/ft-index/src/ydb_write.cc +++ b/storage/tokudb/ft-index/src/ydb_write.cc @@ -28,7 +28,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -93,7 +93,7 @@ PATENT RIGHTS GRANT: #include "ydb-internal.h" #include "indexer.h" #include <ft/log_header.h> -#include <ft/checkpoint.h> +#include <ft/cachetable/checkpoint.h> #include "ydb_row_lock.h" #include "ydb_write.h" #include "ydb_db.h" @@ -106,7 +106,7 @@ static YDB_WRITE_LAYER_STATUS_S ydb_write_layer_status; #endif #define STATUS_VALUE(x) ydb_write_layer_status.status[x].value.num -#define STATUS_INIT(k,c,t,l,inc) TOKUDB_STATUS_INIT(ydb_write_layer_status, k, c, t, l, inc) +#define STATUS_INIT(k,c,t,l,inc) TOKUFT_STATUS_INIT(ydb_write_layer_status, k, c, t, l, inc) static void ydb_write_layer_status_init (void) { @@ -253,6 +253,30 @@ toku_db_del(DB *db, DB_TXN *txn, DBT *key, uint32_t flags, bool holds_mo_lock) { return r; } +static int +db_put(DB *db, DB_TXN *txn, DBT *key, DBT *val, int flags, bool do_log) { + int r = 0; + bool unique = false; + enum ft_msg_type type = FT_INSERT; + if (flags == DB_NOOVERWRITE) { + unique = true; + } else if (flags == DB_NOOVERWRITE_NO_ERROR) { + type = FT_INSERT_NO_OVERWRITE; + } else if (flags != 0) { + // All other non-zero flags are unsupported + r = EINVAL; + } + if (r == 0) { + TOKUTXN ttxn = txn ? db_txn_struct_i(txn)->tokutxn : nullptr; + if (unique) { + r = toku_ft_insert_unique(db->i->ft_handle, key, val, ttxn, do_log); + } else { + toku_ft_maybe_insert(db->i->ft_handle, key, val, ttxn, false, ZERO_LSN, do_log, type); + } + invariant(r == DB_KEYEXIST || r == 0); + } + return r; +} int toku_db_put(DB *db, DB_TXN *txn, DBT *key, DBT *val, uint32_t flags, bool holds_mo_lock) { @@ -265,25 +289,16 @@ toku_db_put(DB *db, DB_TXN *txn, DBT *key, DBT *val, uint32_t flags, bool holds_ flags &= ~lock_flags; r = db_put_check_size_constraints(db, key, val); - if (r == 0) { - //Do any checking required by the flags. - r = db_put_check_overwrite_constraint(db, txn, key, lock_flags, flags); - } - //Do locking if necessary. Do not grab the lock again if this DB had a unique - //check performed because the lock was already grabbed by its cursor callback. + + //Do locking if necessary. bool do_locking = (bool)(db->i->lt && !(lock_flags&DB_PRELOCKED_WRITE)); - if (r == 0 && do_locking && !(flags & DB_NOOVERWRITE)) { + if (r == 0 && do_locking) { r = toku_db_get_point_write_lock(db, txn, key); } if (r == 0) { //Insert into the ft. - TOKUTXN ttxn = txn ? db_txn_struct_i(txn)->tokutxn : NULL; - enum ft_msg_type type = FT_INSERT; - if (flags==DB_NOOVERWRITE_NO_ERROR) { - type = FT_INSERT_NO_OVERWRITE; - } if (!holds_mo_lock) toku_multi_operation_client_lock(); - toku_ft_maybe_insert(db->i->ft_handle, key, val, ttxn, false, ZERO_LSN, true, type); + r = db_put(db, txn, key, val, flags, true); if (!holds_mo_lock) toku_multi_operation_client_unlock(); } @@ -635,9 +650,11 @@ log_put_multiple(DB_TXN *txn, DB *src_db, const DBT *src_key, const DBT *src_val } } +// Requires: If remaining_flags is non-null, this function performs any required uniqueness checks +// Otherwise, the caller is responsible. static int -do_put_multiple(DB_TXN *txn, uint32_t num_dbs, DB *db_array[], DBT_ARRAY keys[], DBT_ARRAY vals[], DB *src_db, const DBT *src_key, bool indexer_shortcut) { - TOKUTXN ttxn = db_txn_struct_i(txn)->tokutxn; +do_put_multiple(DB_TXN *txn, uint32_t num_dbs, DB *db_array[], DBT_ARRAY keys[], DBT_ARRAY vals[], uint32_t *remaining_flags, DB *src_db, const DBT *src_key, bool indexer_shortcut) { + int r = 0; for (uint32_t which_db = 0; which_db < num_dbs; which_db++) { DB *db = db_array[which_db]; @@ -666,16 +683,21 @@ do_put_multiple(DB_TXN *txn, uint32_t num_dbs, DB *db_array[], DBT_ARRAY keys[], } if (do_put) { for (uint32_t i = 0; i < keys[which_db].size; i++) { - // if db is being indexed by an indexer, then put into that db if the src key is to the left or equal to the - // indexers cursor. we have to get the src_db from the indexer and find it in the db_array. - toku_ft_maybe_insert(db->i->ft_handle, - &keys[which_db].dbts[i], &vals[which_db].dbts[i], - ttxn, false, ZERO_LSN, false, FT_INSERT); + int flags = 0; + if (remaining_flags != nullptr) { + flags = remaining_flags[which_db]; + invariant(!(flags & DB_NOOVERWRITE_NO_ERROR)); + } + r = db_put(db, txn, &keys[which_db].dbts[i], &vals[which_db].dbts[i], flags, false); + if (r != 0) { + goto done; + } } } } } - return 0; +done: + return r; } static int @@ -754,20 +776,14 @@ env_put_multiple_internal( r = db_put_check_size_constraints(db, &put_key, &put_val); if (r != 0) goto cleanup; - //Check overwrite constraints - r = db_put_check_overwrite_constraint(db, txn, - &put_key, - lock_flags[which_db], remaining_flags[which_db]); - if (r != 0) goto cleanup; if (remaining_flags[which_db] == DB_NOOVERWRITE_NO_ERROR) { //put_multiple does not support delaying the no error, since we would //have to log the flag in the put_multiple. r = EINVAL; goto cleanup; } - //Do locking if necessary. Do not grab the lock again if this DB had a unique - //check performed because the lock was already grabbed by its cursor callback. - if (db->i->lt && !(lock_flags[which_db] & DB_PRELOCKED_WRITE) && !(remaining_flags[which_db] & DB_NOOVERWRITE)) { + //Do locking if necessary. + if (db->i->lt && !(lock_flags[which_db] & DB_PRELOCKED_WRITE)) { //Needs locking r = toku_db_get_point_write_lock(db, txn, &put_key); if (r != 0) goto cleanup; @@ -790,8 +806,10 @@ env_put_multiple_internal( } } toku_multi_operation_client_lock(); - log_put_multiple(txn, src_db, src_key, src_val, num_dbs, fts); - r = do_put_multiple(txn, num_dbs, db_array, put_keys, put_vals, src_db, src_key, indexer_shortcut); + r = do_put_multiple(txn, num_dbs, db_array, put_keys, put_vals, remaining_flags, src_db, src_key, indexer_shortcut); + if (r == 0) { + log_put_multiple(txn, src_db, src_key, src_val, num_dbs, fts); + } toku_multi_operation_client_unlock(); if (indexer_lock_taken) { toku_indexer_unlock(indexer); @@ -933,8 +951,8 @@ env_update_multiple(DB_ENV *env, DB *src_db, DB_TXN *txn, } else if (idx_old == old_keys.size) { cmp = +1; } else { - ft_compare_func cmpfun = toku_db_get_compare_fun(db); - cmp = cmpfun(db, curr_old_key, curr_new_key); + const toku::comparator &cmpfn = toku_db_get_comparator(db); + cmp = cmpfn(curr_old_key, curr_new_key); } bool do_del = false; @@ -1075,7 +1093,7 @@ env_update_multiple(DB_ENV *env, DB *src_db, DB_TXN *txn, // recovery so we don't end up losing data. // So unlike env->put_multiple, we ONLY log a 'put_multiple' log entry. log_put_multiple(txn, src_db, new_src_key, new_src_data, n_put_dbs, put_fts); - r = do_put_multiple(txn, n_put_dbs, put_dbs, put_key_arrays, put_val_arrays, src_db, new_src_key, indexer_shortcut); + r = do_put_multiple(txn, n_put_dbs, put_dbs, put_key_arrays, put_val_arrays, nullptr, src_db, new_src_key, indexer_shortcut); } toku_multi_operation_client_unlock(); if (indexer_lock_taken) { diff --git a/storage/tokudb/ft-index/src/ydb_write.h b/storage/tokudb/ft-index/src/ydb_write.h index a890089d895..00c4ab4da5e 100644 --- a/storage/tokudb/ft-index/src/ydb_write.h +++ b/storage/tokudb/ft-index/src/ydb_write.h @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -88,11 +88,8 @@ PATENT RIGHTS GRANT: #ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved." #ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it." -// This file defines the public interface to the ydb library - -#if !defined(TOKU_YDB_WRITE_H) -#define TOKU_YDB_WRITE_H +#pragma once typedef enum { YDB_LAYER_NUM_INSERTS = 0, @@ -119,7 +116,6 @@ typedef struct { void ydb_write_layer_get_status(YDB_WRITE_LAYER_STATUS statp); - int toku_db_del(DB *db, DB_TXN *txn, DBT *key, uint32_t flags, bool holds_mo_lock); int toku_db_put(DB *db, DB_TXN *txn, DBT *key, DBT *val, uint32_t flags, bool holds_mo_lock); int autotxn_db_del(DB* db, DB_TXN* txn, DBT* key, uint32_t flags); @@ -159,8 +155,3 @@ int env_update_multiple( uint32_t num_keys, DBT_ARRAY keys[], uint32_t num_vals, DBT_ARRAY vals[] ); - - - - -#endif diff --git a/storage/tokudb/ft-index/tools/CMakeLists.txt b/storage/tokudb/ft-index/tools/CMakeLists.txt index 67763535920..4ed0cb4cbdc 100644 --- a/storage/tokudb/ft-index/tools/CMakeLists.txt +++ b/storage/tokudb/ft-index/tools/CMakeLists.txt @@ -1,11 +1,20 @@ set_property(DIRECTORY APPEND PROPERTY COMPILE_DEFINITIONS _GNU_SOURCE DONT_DEPRECATE_ERRNO) -set(utils tokudb_gen tokudb_load tokudb_dump) -foreach(util ${utils}) - add_executable(${util} ${util}.cc) - set_target_properties(${util} PROPERTIES - COMPILE_DEFINITIONS "IS_TDB=1;USE_TDB=1;TDB_IS_STATIC=1") - target_link_libraries(${util} ${LIBTOKUDB}_static ft_static z lzma ${LIBTOKUPORTABILITY}_static ${CMAKE_THREAD_LIBS_INIT} ${EXTRA_SYSTEM_LIBS}) - - add_space_separated_property(TARGET ${util} COMPILE_FLAGS -fvisibility=hidden) -endforeach(util) +set(tools tokudb_dump tokuftdump tdb_logprint tdb-recover ftverify ba_replay) +foreach(tool ${tools}) + add_executable(${tool} ${tool}.cc) + add_dependencies(${tool} install_tdb_h) + target_link_libraries(${tool} ${LIBTOKUDB}_static ft_static z lzma ${LIBTOKUPORTABILITY}_static ${CMAKE_THREAD_LIBS_INIT} ${EXTRA_SYSTEM_LIBS}) + + add_space_separated_property(TARGET ${tool} COMPILE_FLAGS -fvisibility=hidden) +endforeach(tool) + +# link in math.h library just for this tool. +target_link_libraries(ftverify m) + +install( + TARGETS tokuftdump + DESTINATION bin + COMPONENT Server + ) + diff --git a/storage/tokudb/ft-index/tools/ba_replay.cc b/storage/tokudb/ft-index/tools/ba_replay.cc new file mode 100644 index 00000000000..e274ac0a1e8 --- /dev/null +++ b/storage/tokudb/ft-index/tools/ba_replay.cc @@ -0,0 +1,679 @@ +/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */ +// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4: + +/* +COPYING CONDITIONS NOTICE: + + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation, and provided that the + following conditions are met: + + * Redistributions of source code must retain this COPYING + CONDITIONS NOTICE, the COPYRIGHT NOTICE (below), the + DISCLAIMER (below), the UNIVERSITY PATENT NOTICE (below), the + PATENT MARKING NOTICE (below), and the PATENT RIGHTS + GRANT (below). + + * Redistributions in binary form must reproduce this COPYING + CONDITIONS NOTICE, the COPYRIGHT NOTICE (below), the + DISCLAIMER (below), the UNIVERSITY PATENT NOTICE (below), the + PATENT MARKING NOTICE (below), and the PATENT RIGHTS + GRANT (below) in the documentation and/or other materials + provided with the distribution. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + 02110-1301, USA. + +COPYRIGHT NOTICE: + + TokuFT, Tokutek Fractal Tree Indexing Library. + Copyright (C) 2007-2013 Tokutek, Inc. + +DISCLAIMER: + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + +UNIVERSITY PATENT NOTICE: + + The technology is licensed by the Massachusetts Institute of + Technology, Rutgers State University of New Jersey, and the Research + Foundation of State University of New York at Stony Brook under + United States of America Serial No. 11/760379 and to the patents + and/or patent applications resulting from it. + +PATENT MARKING NOTICE: + + This software is covered by US Patent No. 8,185,551. + This software is covered by US Patent No. 8,489,638. + +PATENT RIGHTS GRANT: + + "THIS IMPLEMENTATION" means the copyrightable works distributed by + Tokutek as part of the Fractal Tree project. + + "PATENT CLAIMS" means the claims of patents that are owned or + licensable by Tokutek, both currently or in the future; and that in + the absence of this license would be infringed by THIS + IMPLEMENTATION or by using or running THIS IMPLEMENTATION. + + "PATENT CHALLENGE" shall mean a challenge to the validity, + patentability, enforceability and/or non-infringement of any of the + PATENT CLAIMS or otherwise opposing any of the PATENT CLAIMS. + + Tokutek hereby grants to you, for the term and geographical scope of + the PATENT CLAIMS, a non-exclusive, no-charge, royalty-free, + irrevocable (except as stated in this section) patent license to + make, have made, use, offer to sell, sell, import, transfer, and + otherwise run, modify, and propagate the contents of THIS + IMPLEMENTATION, where such license applies only to the PATENT + CLAIMS. This grant does not include claims that would be infringed + only as a consequence of further modifications of THIS + IMPLEMENTATION. If you or your agent or licensee institute or order + or agree to the institution of patent litigation against any entity + (including a cross-claim or counterclaim in a lawsuit) alleging that + THIS IMPLEMENTATION constitutes direct or contributory patent + infringement, or inducement of patent infringement, then any rights + granted to you under this License shall terminate as of the date + such litigation is filed. If you or your agent or exclusive + licensee institute or order or agree to the institution of a PATENT + CHALLENGE, then Tokutek may terminate any rights granted to you + under this License. +*/ + +// Replay a block allocator trace against different strategies and compare +// the results + +#include <db.h> + +#include <getopt.h> +#include <math.h> +#include <stdio.h> +#include <string.h> + +#include <map> +#include <set> +#include <string> +#include <sstream> +#include <vector> + +#include <portability/memory.h> +#include <portability/toku_assert.h> +#include <portability/toku_stdlib.h> + +#include "ft/serialize/block_allocator.h" + +using std::map; +using std::set; +using std::string; +using std::vector; + +static int verbose = false; + +static void ba_replay_assert(bool pred, const char *msg, const char *line, int line_num) { + if (!pred) { + fprintf(stderr, "%s, line (#%d): %s\n", msg, line_num, line); + abort(); + } +} + +static char *trim_whitespace(char *line) { + // skip leading whitespace + while (isspace(*line)) { + line++; + } + return line; +} + +static int64_t parse_number(char **ptr, int line_num, int base) { + *ptr = trim_whitespace(*ptr); + char *line = *ptr; + + char *new_ptr; + int64_t n = strtoll(line, &new_ptr, base); + ba_replay_assert(n >= 0, "malformed trace (bad numeric token)", line, line_num); + ba_replay_assert(new_ptr > *ptr, "malformed trace (missing numeric token)", line, line_num); + *ptr = new_ptr; + return n; +} + +static uint64_t parse_uint64(char **ptr, int line_num) { + int64_t n = parse_number(ptr, line_num, 10); + // we happen to know that the uint64's we deal with will + // take less than 63 bits (they come from pointers) + return static_cast<uint64_t>(n); +} + +static string parse_token(char **ptr, int line_num) { + *ptr = trim_whitespace(*ptr); + char *line = *ptr; + + // parse the first token, which represents the traced function + char token[64]; + int r = sscanf(*ptr, "%64s", token); + ba_replay_assert(r == 1, "malformed trace (missing string token)", line, line_num); + *ptr += strlen(token); + return string(token); +} + +static block_allocator::blockpair parse_blockpair(char **ptr, int line_num) { + *ptr = trim_whitespace(*ptr); + char *line = *ptr; + + uint64_t offset, size; + int bytes_read; + int r = sscanf(line, "[%" PRIu64 " %" PRIu64 "]%n", &offset, &size, &bytes_read); + ba_replay_assert(r == 2, "malformed trace (bad offset/size pair)", line, line_num); + *ptr += bytes_read; + return block_allocator::blockpair(offset, size); +} + +static char *strip_newline(char *line, bool *found) { + char *ptr = strchr(line, '\n'); + if (ptr != nullptr) { + if (found != nullptr) { + *found = true; + } + *ptr = '\0'; + } + return line; +} + +static char *read_trace_line(FILE *file) { + const int buf_size = 4096; + char buf[buf_size]; + std::stringstream ss; + while (true) { + if (fgets(buf, buf_size, file) == nullptr) { + break; + } + bool has_newline = false; + ss << strip_newline(buf, &has_newline); + if (has_newline) { + // end of the line, we're done out + break; + } + } + std::string s = ss.str(); + return s.size() ? toku_strdup(s.c_str()) : nullptr; +} + +static vector<string> canonicalize_trace_from(FILE *file) { + // new trace, canonicalized from a raw trace + vector<string> canonicalized_trace; + + // raw allocator id -> canonical allocator id + // + // keeps track of allocators that were created as part of the trace, + // and therefore will be part of the canonicalized trace. + uint64_t allocator_id_seq_num = 0; + map<uint64_t, uint64_t> allocator_ids; + + // allocated offset -> allocation seq num + // + uint64_t allocation_seq_num = 0; + static const uint64_t ASN_NONE = (uint64_t) -1; + typedef map<uint64_t, uint64_t> offset_seq_map; + + // raw allocator id -> offset_seq_map that tracks its allocations + map<uint64_t, offset_seq_map> offset_to_seq_num_maps; + + int line_num = 0; + char *line; + while ((line = read_trace_line(file)) != nullptr) { + line_num++; + char *ptr = line; + + string fn = parse_token(&ptr, line_num); + int64_t allocator_id = parse_number(&ptr, line_num, 16); + + std::stringstream ss; + if (fn.find("ba_trace_create") != string::npos) { + ba_replay_assert(allocator_ids.count(allocator_id) == 0, "corrupted trace: double create", line, line_num); + ba_replay_assert(fn == "ba_trace_create" || fn == "ba_trace_create_from_blockpairs", + "corrupted trace: bad fn", line, line_num); + + // we only convert the allocator_id to an allocator_id_seq_num + // in the canonical trace and leave the rest of the line as-is. + allocator_ids[allocator_id] = allocator_id_seq_num; + ss << fn << ' ' << allocator_id_seq_num << ' ' << trim_whitespace(ptr) << std::endl; + allocator_id_seq_num++; + + // First, read passed the reserve / alignment values. + (void) parse_uint64(&ptr, line_num); + (void) parse_uint64(&ptr, line_num); + if (fn == "ba_trace_create_from_blockpairs") { + // For each blockpair created by this traceline, add its offset to the offset seq map + // with asn ASN_NONE so that later canonicalizations of `free' know whether to write + // down the asn or the raw offset. + offset_seq_map *map = &offset_to_seq_num_maps[allocator_id]; + while (*trim_whitespace(ptr) != '\0') { + const block_allocator::blockpair bp = parse_blockpair(&ptr, line_num); + (*map)[bp.offset] = ASN_NONE; + } + } + } else { + ba_replay_assert(allocator_ids.count(allocator_id) > 0, "corrupted trace: unknown allocator", line, line_num); + uint64_t canonical_allocator_id = allocator_ids[allocator_id]; + + // this is the map that tracks allocations for this allocator + offset_seq_map *map = &offset_to_seq_num_maps[allocator_id]; + + if (fn == "ba_trace_alloc") { + const uint64_t size = parse_uint64(&ptr, line_num); + const uint64_t heat = parse_uint64(&ptr, line_num); + const uint64_t offset = parse_uint64(&ptr, line_num); + ba_replay_assert(map->count(offset) == 0, "corrupted trace: double alloc", line, line_num); + + // remember that an allocation at `offset' has the current alloc seq num + (*map)[offset] = allocation_seq_num; + + // translate `offset = alloc(size)' to `asn = alloc(size)' + ss << fn << ' ' << canonical_allocator_id << ' ' << size << ' ' << heat << ' ' << allocation_seq_num << std::endl; + allocation_seq_num++; + } else if (fn == "ba_trace_free") { + const uint64_t offset = parse_uint64(&ptr, line_num); + ba_replay_assert(map->count(offset) != 0, "corrupted trace: invalid free", line, line_num); + + // get the alloc seq num for an allcation that occurred at `offset' + const uint64_t asn = (*map)[offset]; + map->erase(offset); + + // if there's an asn, then a corresponding ba_trace_alloc occurred and we should + // write `free(asn)'. otherwise, the blockpair was initialized from create_from_blockpairs + // and we write the original offset. + if (asn != ASN_NONE) { + ss << "ba_trace_free_asn" << ' ' << canonical_allocator_id << ' ' << asn << std::endl; + } else { + ss << "ba_trace_free_offset" << ' ' << canonical_allocator_id << ' ' << offset << std::endl; + } + } else if (fn == "ba_trace_destroy") { + // Remove this allocator from both maps + allocator_ids.erase(allocator_id); + offset_to_seq_num_maps.erase(allocator_id); + + // translate `destroy(ptr_id) to destroy(canonical_id)' + ss << fn << ' ' << canonical_allocator_id << ' ' << std::endl; + } else { + ba_replay_assert(false, "corrupted trace: bad fn", line, line_num); + } + } + canonicalized_trace.push_back(ss.str()); + + toku_free(line); + } + + if (allocator_ids.size() != 0) { + fprintf(stderr, "warning: leaked allocators. this might be ok if the tracing process is still running"); + } + + return canonicalized_trace; +} + +struct streaming_variance_calculator { + int64_t n_samples; + int64_t mean; + int64_t variance; + + // math credit: AoCP, Donald Knuth, '62 + void add_sample(int64_t x) { + n_samples++; + if (n_samples == 1) { + mean = x; + variance = 0; + } else { + int64_t old_mean = mean; + mean = old_mean + ((x - old_mean) / n_samples); + variance = (((n_samples - 1) * variance) + + ((x - old_mean) * (x - mean))) / n_samples; + } + } +}; + +struct canonical_trace_stats { + uint64_t n_lines_replayed; + + uint64_t n_create; + uint64_t n_create_from_blockpairs; + uint64_t n_alloc_hot; + uint64_t n_alloc_cold; + uint64_t n_free; + uint64_t n_destroy; + + struct streaming_variance_calculator alloc_hot_bytes; + struct streaming_variance_calculator alloc_cold_bytes; + + canonical_trace_stats() { + memset(this, 0, sizeof(*this)); + } +}; + +struct fragmentation_report { + TOKU_DB_FRAGMENTATION_S beginning; + TOKU_DB_FRAGMENTATION_S end; + fragmentation_report() { + memset(this, 0, sizeof(*this)); + } + void merge(const struct fragmentation_report &src_report) { + for (int i = 0; i < 2; i++) { + TOKU_DB_FRAGMENTATION_S *dst = i == 0 ? &beginning : &end; + const TOKU_DB_FRAGMENTATION_S *src = i == 0 ? &src_report.beginning : &src_report.end; + dst->file_size_bytes += src->file_size_bytes; + dst->data_bytes += src->data_bytes; + dst->data_blocks += src->data_blocks; + dst->checkpoint_bytes_additional += src->checkpoint_bytes_additional; + dst->checkpoint_blocks_additional += src->checkpoint_blocks_additional; + dst->unused_bytes += src->unused_bytes; + dst->unused_blocks += src->unused_blocks; + dst->largest_unused_block += src->largest_unused_block; + } + } +}; + +static void replay_canonicalized_trace(const vector<string> &canonicalized_trace, + block_allocator::allocation_strategy strategy, + map<uint64_t, struct fragmentation_report> *reports, + struct canonical_trace_stats *stats) { + // maps an allocator id to its block allocator + map<uint64_t, block_allocator *> allocator_map; + + // maps allocation seq num to allocated offset + map<uint64_t, uint64_t> seq_num_to_offset; + + for (vector<string>::const_iterator it = canonicalized_trace.begin(); + it != canonicalized_trace.end(); it++) { + const int line_num = stats->n_lines_replayed++; + + char *line = toku_strdup(it->c_str()); + line = strip_newline(line, nullptr); + + char *ptr = trim_whitespace(line); + + // canonical allocator id is in base 10, not 16 + string fn = parse_token(&ptr, line_num); + int64_t allocator_id = parse_number(&ptr, line_num, 10); + + if (fn.find("ba_trace_create") != string::npos) { + const uint64_t reserve_at_beginning = parse_uint64(&ptr, line_num); + const uint64_t alignment = parse_uint64(&ptr, line_num); + ba_replay_assert(allocator_map.count(allocator_id) == 0, + "corrupted canonical trace: double create", line, line_num); + + block_allocator *ba = new block_allocator(); + if (fn == "ba_trace_create") { + ba->create(reserve_at_beginning, alignment); + stats->n_create++; + } else { + ba_replay_assert(fn == "ba_trace_create_from_blockpairs", + "corrupted canonical trace: bad create fn", line, line_num); + vector<block_allocator::blockpair> pairs; + while (*trim_whitespace(ptr) != '\0') { + const block_allocator::blockpair bp = parse_blockpair(&ptr, line_num); + pairs.push_back(bp); + } + ba->create_from_blockpairs(reserve_at_beginning, alignment, &pairs[0], pairs.size()); + stats->n_create_from_blockpairs++; + } + ba->set_strategy(strategy); + + TOKU_DB_FRAGMENTATION_S report; + ba->get_statistics(&report); + (*reports)[allocator_id].beginning = report; + allocator_map[allocator_id] = ba; + } else { + ba_replay_assert(allocator_map.count(allocator_id) > 0, + "corrupted canonical trace: no such allocator", line, line_num); + + block_allocator *ba = allocator_map[allocator_id]; + if (fn == "ba_trace_alloc") { + // replay an `alloc' whose result will be associated with a certain asn + const uint64_t size = parse_uint64(&ptr, line_num); + const uint64_t heat = parse_uint64(&ptr, line_num); + const uint64_t asn = parse_uint64(&ptr, line_num); + ba_replay_assert(seq_num_to_offset.count(asn) == 0, + "corrupted canonical trace: double alloc (asn in use)", line, line_num); + + uint64_t offset; + ba->alloc_block(size, heat, &offset); + seq_num_to_offset[asn] = offset; + heat ? stats->n_alloc_hot++ : stats->n_alloc_cold++; + heat ? stats->alloc_hot_bytes.add_sample(size) : stats->alloc_cold_bytes.add_sample(size); + } else if (fn == "ba_trace_free_asn") { + // replay a `free' on a block whose offset is the result of an alloc with an asn + const uint64_t asn = parse_uint64(&ptr, line_num); + ba_replay_assert(seq_num_to_offset.count(asn) == 1, + "corrupted canonical trace: double free (asn unused)", line, line_num); + + const uint64_t offset = seq_num_to_offset[asn]; + ba->free_block(offset); + seq_num_to_offset.erase(asn); + stats->n_free++; + } else if (fn == "ba_trace_free_offset") { + // replay a `free' on a block whose offset was explicitly set during a create_from_blockpairs + const uint64_t offset = parse_uint64(&ptr, line_num); + ba->free_block(offset); + stats->n_free++; + } else if (fn == "ba_trace_destroy") { + TOKU_DB_FRAGMENTATION_S report; + ba->get_statistics(&report); + ba->destroy(); + (*reports)[allocator_id].end = report; + allocator_map.erase(allocator_id); + stats->n_destroy++; + } else { + ba_replay_assert(false, "corrupted canonical trace: bad fn", line, line_num); + } + } + + toku_free(line); + } +} + +static const char *strategy_to_cstring(block_allocator::allocation_strategy strategy) { + switch (strategy) { + case block_allocator::allocation_strategy::BA_STRATEGY_FIRST_FIT: + return "first-fit"; + case block_allocator::allocation_strategy::BA_STRATEGY_BEST_FIT: + return "best-fit"; + case block_allocator::allocation_strategy::BA_STRATEGY_HEAT_ZONE: + return "heat-zone"; + case block_allocator::allocation_strategy::BA_STRATEGY_PADDED_FIT: + return "padded-fit"; + default: + abort(); + } +} + +static block_allocator::allocation_strategy cstring_to_strategy(const char *str) { + if (strcmp(str, "first-fit") == 0) { + return block_allocator::allocation_strategy::BA_STRATEGY_FIRST_FIT; + } + if (strcmp(str, "best-fit") == 0) { + return block_allocator::allocation_strategy::BA_STRATEGY_BEST_FIT; + } + if (strcmp(str, "heat-zone") == 0) { + return block_allocator::allocation_strategy::BA_STRATEGY_HEAT_ZONE; + } + if (strcmp(str, "padded-fit") != 0) { + fprintf(stderr, "bad strategy string: %s\n", str); + abort(); + } + return block_allocator::allocation_strategy::BA_STRATEGY_PADDED_FIT; +} + +static void print_result_verbose(uint64_t allocator_id, + block_allocator::allocation_strategy strategy, + const struct fragmentation_report &report) { + if (report.end.data_bytes + report.end.unused_bytes + + report.beginning.data_bytes + report.beginning.unused_bytes + < 32UL * 1024 * 1024) { + printf(" ...skipping allocator_id %" PRId64 " (total bytes < 32mb)\n", allocator_id); + return; + } + + printf(" allocator_id: %20" PRId64 "\n", allocator_id); + printf(" strategy: %20s\n", strategy_to_cstring(strategy)); + + for (int i = 0; i < 2; i++) { + const TOKU_DB_FRAGMENTATION_S *r = i == 0 ? &report.beginning : &report.end; + printf("%s\n", i == 0 ? "BEFORE" : "AFTER"); + + uint64_t total_bytes = r->data_bytes + r->unused_bytes; + uint64_t total_blocks = r->data_blocks + r->unused_blocks; + + // byte statistics + printf(" total bytes: %20" PRId64 "\n", total_bytes); + printf(" used bytes: %20" PRId64 " (%.3lf)\n", r->data_bytes, + static_cast<double>(r->data_bytes) / total_bytes); + printf(" unused bytes: %20" PRId64 " (%.3lf)\n", r->unused_bytes, + static_cast<double>(r->unused_bytes) / total_bytes); + + // block statistics + printf(" total blocks: %20" PRId64 "\n", total_blocks); + printf(" used blocks: %20" PRId64 " (%.3lf)\n", r->data_blocks, + static_cast<double>(r->data_blocks) / total_blocks); + printf(" unused blocks: %20" PRId64 " (%.3lf)\n", r->unused_blocks, + static_cast<double>(r->unused_blocks) / total_blocks); + + // misc + printf(" largest unused: %20" PRId64 "\n", r->largest_unused_block); + } +} + +static void print_result(uint64_t allocator_id, + block_allocator::allocation_strategy strategy, + const struct fragmentation_report &report) { + const TOKU_DB_FRAGMENTATION_S *beginning = &report.beginning; + const TOKU_DB_FRAGMENTATION_S *end = &report.end; + + uint64_t total_beginning_bytes = beginning->data_bytes + beginning->unused_bytes; + uint64_t total_end_bytes = end->data_bytes + end->unused_bytes; + if (total_end_bytes + total_beginning_bytes < 32UL * 1024 * 1024) { + if (verbose) { + printf("\n"); + printf(" ...skipping allocator_id %" PRId64 " (total bytes < 32mb)\n", allocator_id); + } + return; + } + printf("\n"); + if (verbose) { + print_result_verbose(allocator_id, strategy, report); + } else { + printf(" %-15s: allocator %" PRId64 ", %.3lf used bytes (%.3lf before)\n", + strategy_to_cstring(strategy), allocator_id, + static_cast<double>(report.end.data_bytes) / total_end_bytes, + static_cast<double>(report.beginning.data_bytes) / total_beginning_bytes); + } +} + +static int only_aggregate_reports; + +static struct option getopt_options[] = { + { "verbose", no_argument, &verbose, 1 }, + { "only-aggregate-reports", no_argument, &only_aggregate_reports, 1 }, + { "include-strategy", required_argument, nullptr, 'i' }, + { "exclude-strategy", required_argument, nullptr, 'x' }, + { nullptr, 0, nullptr, 0 }, +}; + +int main(int argc, char *argv[]) { + int opt; + set<block_allocator::allocation_strategy> candidate_strategies, excluded_strategies; + while ((opt = getopt_long(argc, argv, "", getopt_options, nullptr)) != -1) { + switch (opt) { + case 0: + break; + case 'i': + candidate_strategies.insert(cstring_to_strategy(optarg)); + break; + case 'x': + excluded_strategies.insert(cstring_to_strategy(optarg)); + break; + case '?': + default: + abort(); + }; + } + // Default to everything if nothing was explicitly included. + if (candidate_strategies.empty()) { + candidate_strategies.insert(block_allocator::allocation_strategy::BA_STRATEGY_FIRST_FIT); + candidate_strategies.insert(block_allocator::allocation_strategy::BA_STRATEGY_BEST_FIT); + candidate_strategies.insert(block_allocator::allocation_strategy::BA_STRATEGY_PADDED_FIT); + candidate_strategies.insert(block_allocator::allocation_strategy::BA_STRATEGY_HEAT_ZONE); + } + // ..but remove anything that was explicitly excluded + for (set<block_allocator::allocation_strategy>::const_iterator it = excluded_strategies.begin(); + it != excluded_strategies.end(); it++) { + candidate_strategies.erase(*it); + } + + // Run the real trace + // + // First, read the raw trace from stdin + vector<string> canonicalized_trace = canonicalize_trace_from(stdin); + + if (!only_aggregate_reports) { + printf("\n"); + printf("Individual reports, by allocator:\n"); + } + + struct canonical_trace_stats stats; + map<block_allocator::allocation_strategy, struct fragmentation_report> reports_by_strategy; + for (set<block_allocator::allocation_strategy>::const_iterator it = candidate_strategies.begin(); + it != candidate_strategies.end(); it++) { + const block_allocator::allocation_strategy strategy(*it); + + // replay the canonicalized trace against the current strategy. + // + // we provided the allocator map so we can gather statistics later + struct canonical_trace_stats dummy_stats; + map<uint64_t, struct fragmentation_report> reports; + replay_canonicalized_trace(canonicalized_trace, strategy, &reports, + // Only need to gather canonical trace stats once + it == candidate_strategies.begin() ? &stats : &dummy_stats); + + struct fragmentation_report aggregate_report; + memset(&aggregate_report, 0, sizeof(aggregate_report)); + for (map<uint64_t, struct fragmentation_report>::iterator rp = reports.begin(); + rp != reports.end(); rp++) { + const struct fragmentation_report &report = rp->second; + aggregate_report.merge(report); + if (!only_aggregate_reports) { + print_result(rp->first, strategy, report); + } + } + reports_by_strategy[strategy] = aggregate_report; + } + + printf("\n"); + printf("Aggregate reports, by strategy:\n"); + + for (map<block_allocator::allocation_strategy, struct fragmentation_report>::iterator it = reports_by_strategy.begin(); + it != reports_by_strategy.end(); it++) { + print_result(0, it->first, it->second); + } + + printf("\n"); + printf("Overall trace stats:\n"); + printf("\n"); + printf(" n_lines_played: %15" PRIu64 "\n", stats.n_lines_replayed); + printf(" n_create: %15" PRIu64 "\n", stats.n_create); + printf(" n_create_from_blockpairs: %15" PRIu64 "\n", stats.n_create_from_blockpairs); + printf(" n_alloc_hot: %15" PRIu64 "\n", stats.n_alloc_hot); + printf(" n_alloc_cold: %15" PRIu64 "\n", stats.n_alloc_cold); + printf(" n_free: %15" PRIu64 "\n", stats.n_free); + printf(" n_destroy: %15" PRIu64 "\n", stats.n_destroy); + printf("\n"); + printf(" avg_alloc_hot: %15" PRIu64 "\n", stats.alloc_hot_bytes.mean); + printf(" stddev_alloc_hot: %15" PRIu64 "\n", (uint64_t) sqrt(stats.alloc_hot_bytes.variance)); + printf(" avg_alloc_cold: %15" PRIu64 "\n", stats.alloc_cold_bytes.mean); + printf(" stddev_alloc_cold: %15" PRIu64 "\n", (uint64_t) sqrt(stats.alloc_cold_bytes.variance)); + printf("\n"); + + return 0; +} diff --git a/storage/tokudb/ft-index/ft/ftverify.cc b/storage/tokudb/ft-index/tools/ftverify.cc index 1b103abd55a..120658b2cb1 100644 --- a/storage/tokudb/ft-index/ft/ftverify.cc +++ b/storage/tokudb/ft-index/tools/ftverify.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -94,17 +94,19 @@ PATENT RIGHTS GRANT: // fractal tree file, one block at a time. //////////////////////////////////////////////////////////////////// -#include "fttypes.h" -#include "ft-internal.h" -#include "ft_layout_version.h" -#include "block_table.h" -#include "rbuf.h" -#include "sub_block.h" +#include "portability/toku_assert.h" +#include "portability/toku_list.h" +#include "portability/toku_portability.h" -#include <toku_assert.h> -#include <toku_list.h> -#include <toku_portability.h> -#include <util/threadpool.h> +#include "ft/serialize/block_allocator.h" +#include "ft/ft-internal.h" +#include "ft/serialize/ft-serialize.h" +#include "ft/serialize/ft_layout_version.h" +#include "ft/serialize/ft_node-serialize.h" +#include "ft/node.h" +#include "ft/serialize/rbuf.h" +#include "ft/serialize/sub_block.h" +#include "util/threadpool.h" #include <fcntl.h> #include <math.h> @@ -199,7 +201,7 @@ deserialize_headers(int fd, struct ft **h1p, struct ft **h2p) } } { - toku_off_t header_1_off = BLOCK_ALLOCATOR_HEADER_RESERVE; + toku_off_t header_1_off = block_allocator::BLOCK_ALLOCATOR_HEADER_RESERVE; r1 = deserialize_ft_from_fd_into_rbuf( fd, header_1_off, @@ -215,7 +217,7 @@ deserialize_headers(int fd, struct ft **h1p, struct ft **h2p) // If either header is too new, the dictionary is unreadable if (r0 == TOKUDB_DICTIONARY_TOO_NEW || r1 == TOKUDB_DICTIONARY_TOO_NEW) { - fprintf(stderr, "This dictionary was created with too new a version of TokuDB. Aborting.\n"); + fprintf(stderr, "This dictionary was created with a version of TokuFT that is too new. Aborting.\n"); abort(); } if (h0_acceptable) { @@ -410,10 +412,8 @@ cleanup: // Passes our check_block() function to be called as we iterate over // the block table. This will print any interesting failures and // update us on our progress. -static void -check_block_table(int fd, BLOCK_TABLE bt, struct ft *h) -{ - int64_t num_blocks = toku_block_get_blocks_in_use_unlocked(bt); +static void check_block_table(int fd, block_table *bt, struct ft *h) { + int64_t num_blocks = bt->get_blocks_in_use_unlocked(); printf("Starting verification of checkpoint containing"); printf(" %" PRId64 " blocks.\n", num_blocks); fflush(stdout); @@ -423,13 +423,11 @@ check_block_table(int fd, BLOCK_TABLE bt, struct ft *h) .blocks_failed = 0, .total_blocks = num_blocks, .h = h }; - int r = 0; - r = toku_blocktable_iterate(bt, - TRANSLATION_CURRENT, - check_block, - &extra, - true, - true); + int r = bt->iterate(block_table::TRANSLATION_CURRENT, + check_block, + &extra, + true, + true); if (r != 0) { // We can print more information here if necessary. } @@ -491,11 +489,11 @@ main(int argc, char const * const argv[]) // walk over the block table and check blocks if (h1) { printf("Checking dictionary from header 1.\n"); - check_block_table(dictfd, h1->blocktable, h1); + check_block_table(dictfd, &h1->blocktable, h1); } if (h2) { printf("Checking dictionary from header 2.\n"); - check_block_table(dictfd, h2->blocktable, h2); + check_block_table(dictfd, &h2->blocktable, h2); } if (h1 == NULL && h2 == NULL) { printf("Both headers have a corruption and could not be used.\n"); diff --git a/storage/tokudb/ft-index/tools/parseTraceFiles.py b/storage/tokudb/ft-index/tools/parseTraceFiles.py deleted file mode 100755 index f53ef620111..00000000000 --- a/storage/tokudb/ft-index/tools/parseTraceFiles.py +++ /dev/null @@ -1,82 +0,0 @@ -#!/usr/bin/env python - -import sys -try: - data = open(sys.argv[1]) -except: - print "Could not open '%s'" % (sys.argv[1][0]) - exit(0) - -ts_factor = 1. -ts_prev = 0. - -threadlist = [] - -for line in data: - line = line.rstrip("\n") - vals = line.split() - [n, tid, ts, funcline] = vals[0:4] - # 'note' is all text following funcline - note = '' - for v in vals[4:-1]: - note += v+' ' - note += vals[-1] - - if ( note == 'calibrate done' ): - ts_factor = float(ts) - ts_prev - print "Factor = ", ts_factor, "("+str(ts_factor/1000000000)[0:4]+"GHz)" - - time = (float(ts)-ts_prev)/ts_factor - - # create a list of threads - # - each thread has a list of <note,time> pairs, where time is the accumulated time for that note - # - search threadlist for thread_id (tid) - # - if found, search corresponding list of <note,time> pairs for the current note - # - if found, update (+=) the time - # - if not found, create a new <note,time> pair - # - if not found, create a new thread,<note,time> entry - found_thread = 0 - for thread in threadlist: - if tid == thread[0]: - found_thread = 1 - notetimelist = thread[1] - found_note = 0 - for notetime in notetimelist: - if note == notetime[0]: - found_note = 1 - notetime[1] += time - break - if found_note == 0: - thread[1].append([note, time]) - break - if found_thread == 0: - notetime = [] - notetime.append([note, time]) - threadlist.append([tid, notetime]) - - ts_prev = float(ts) - -# trim out unneeded -for thread in threadlist: - trimlist = [] - for notetime in thread[1]: - if notetime[0][0:9] == 'calibrate': - trimlist.append(notetime) - for notetime in trimlist: - thread[1].remove(notetime) -print '' - -# sum times to calculate percent (of 100) -total_time = 0 -for thread in threadlist: - for [note, time] in thread[1]: - total_time += time - -print ' thread operation time(sec) percent' -for thread in threadlist: - print 'tid : %5s' % thread[0] - for [note, time] in thread[1]: - print ' %20s %f %5d' % (note, time, 100. * time/total_time) - - - diff --git a/storage/tokudb/ft-index/ft/tdb-recover.cc b/storage/tokudb/ft-index/tools/tdb-recover.cc index 0d3fe0c75be..8f185bedb04 100644 --- a/storage/tokudb/ft-index/ft/tdb-recover.cc +++ b/storage/tokudb/ft-index/tools/tdb-recover.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -95,18 +95,15 @@ PATENT RIGHTS GRANT: // cd ../src/tests/tmpdir // ../../../ft/recover ../dir.test_log2.c.tdb -#include "ft-ops.h" -#include "recover.h" +#include "ft/ft-ops.h" +#include "ft/logger/recover.h" static int recovery_main(int argc, const char *const argv[]); -int -main(int argc, const char *const argv[]) { - { - int rr = toku_ft_layer_init(); - assert(rr==0); - } - int r = recovery_main(argc, argv); +int main(int argc, const char *const argv[]) { + int r = toku_ft_layer_init(); + assert(r == 0); + r = recovery_main(argc, argv); toku_ft_layer_destroy(); return r; } @@ -123,11 +120,11 @@ int recovery_main (int argc, const char *const argv[]) { return(1); } - int r = tokudb_recover(NULL, - NULL_prepared_txn_callback, - NULL_keep_cachetable_callback, - NULL_logger, - data_dir, log_dir, NULL, NULL, NULL, NULL, 0); + int r = tokuft_recover(nullptr, + nullptr, + nullptr, + nullptr, + data_dir, log_dir, nullptr, nullptr, nullptr, nullptr, 0); if (r!=0) { fprintf(stderr, "Recovery failed\n"); return(1); diff --git a/storage/tokudb/ft-index/ft/tdb_logprint.cc b/storage/tokudb/ft-index/tools/tdb_logprint.cc index c221a88e36c..1dd7581b9f5 100644 --- a/storage/tokudb/ft-index/ft/tdb_logprint.cc +++ b/storage/tokudb/ft-index/tools/tdb_logprint.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -91,7 +91,8 @@ PATENT RIGHTS GRANT: /* Dump the log from stdin to stdout. */ -#include <ft/log_header.h> +#include "ft/log_header.h" +#include "ft/logger/logger.h" static void newmain (int count) { int i; diff --git a/storage/tokudb/ft-index/tools/tokudb_common_funcs.h b/storage/tokudb/ft-index/tools/tokudb_common_funcs.h deleted file mode 100644 index c2737025acc..00000000000 --- a/storage/tokudb/ft-index/tools/tokudb_common_funcs.h +++ /dev/null @@ -1,337 +0,0 @@ -/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */ -// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4: -#ident "$Id$" -#if !defined(TOKUDB_COMMON_FUNCS_H) -#define TOKUDB_COMMON_FUNCS_H - -/* -COPYING CONDITIONS NOTICE: - - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation, and provided that the - following conditions are met: - - * Redistributions of source code must retain this COPYING - CONDITIONS NOTICE, the COPYRIGHT NOTICE (below), the - DISCLAIMER (below), the UNIVERSITY PATENT NOTICE (below), the - PATENT MARKING NOTICE (below), and the PATENT RIGHTS - GRANT (below). - - * Redistributions in binary form must reproduce this COPYING - CONDITIONS NOTICE, the COPYRIGHT NOTICE (below), the - DISCLAIMER (below), the UNIVERSITY PATENT NOTICE (below), the - PATENT MARKING NOTICE (below), and the PATENT RIGHTS - GRANT (below) in the documentation and/or other materials - provided with the distribution. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA - 02110-1301, USA. - -COPYRIGHT NOTICE: - - TokuDB, Tokutek Fractal Tree Indexing Library. - Copyright (C) 2007-2013 Tokutek, Inc. - -DISCLAIMER: - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - -UNIVERSITY PATENT NOTICE: - - The technology is licensed by the Massachusetts Institute of - Technology, Rutgers State University of New Jersey, and the Research - Foundation of State University of New York at Stony Brook under - United States of America Serial No. 11/760379 and to the patents - and/or patent applications resulting from it. - -PATENT MARKING NOTICE: - - This software is covered by US Patent No. 8,185,551. - This software is covered by US Patent No. 8,489,638. - -PATENT RIGHTS GRANT: - - "THIS IMPLEMENTATION" means the copyrightable works distributed by - Tokutek as part of the Fractal Tree project. - - "PATENT CLAIMS" means the claims of patents that are owned or - licensable by Tokutek, both currently or in the future; and that in - the absence of this license would be infringed by THIS - IMPLEMENTATION or by using or running THIS IMPLEMENTATION. - - "PATENT CHALLENGE" shall mean a challenge to the validity, - patentability, enforceability and/or non-infringement of any of the - PATENT CLAIMS or otherwise opposing any of the PATENT CLAIMS. - - Tokutek hereby grants to you, for the term and geographical scope of - the PATENT CLAIMS, a non-exclusive, no-charge, royalty-free, - irrevocable (except as stated in this section) patent license to - make, have made, use, offer to sell, sell, import, transfer, and - otherwise run, modify, and propagate the contents of THIS - IMPLEMENTATION, where such license applies only to the PATENT - CLAIMS. This grant does not include claims that would be infringed - only as a consequence of further modifications of THIS - IMPLEMENTATION. If you or your agent or licensee institute or order - or agree to the institution of patent litigation against any entity - (including a cross-claim or counterclaim in a lawsuit) alleging that - THIS IMPLEMENTATION constitutes direct or contributory patent - infringement, or inducement of patent infringement, then any rights - granted to you under this License shall terminate as of the date - such litigation is filed. If you or your agent or exclusive - licensee institute or order or agree to the institution of a PATENT - CHALLENGE, then Tokutek may terminate any rights granted to you - under this License. -*/ - -#ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved." - -#include "tokudb_common.h" - -//DB_ENV->err disabled since it does not use db_strerror -#define PRINT_ERROR(retval, ...) \ -do { \ -if (0) g.dbenv->err(g.dbenv, retval, __VA_ARGS__); \ -else { \ - fprintf(stderr, "\tIn %s:%d %s()\n", __FILE__, __LINE__, __FUNCTION__); \ - fprintf(stderr, "%s: %s:", g.progname, db_strerror(retval)); \ - fprintf(stderr, __VA_ARGS__); \ - fprintf(stderr, "\n"); \ - fflush(stderr); \ -} \ -} while (0) - -//DB_ENV->err disabled since it does not use db_strerror, errx does not exist. -#define PRINT_ERRORX(...) \ -do { \ -if (0) g.dbenv->err(g.dbenv, 0, __VA_ARGS__); \ -else { \ - fprintf(stderr, "\tIn %s:%d %s()\n", __FILE__, __LINE__, __FUNCTION__); \ - fprintf(stderr, "%s: ", g.progname); \ - fprintf(stderr, __VA_ARGS__); \ - fprintf(stderr, "\n"); \ - fflush(stderr); \ -} \ -} while (0) - -int strtoint32 (char* str, int32_t* num, int32_t min, int32_t max, int base); -int strtouint32 (char* str, uint32_t* num, uint32_t min, uint32_t max, int base); -int strtoint64 (char* str, int64_t* num, int64_t min, int64_t max, int base); -int strtouint64 (char* str, uint64_t* num, uint64_t min, uint64_t max, int base); - -/* - * Convert a string to an integer of type "type". - * - * - * Sets errno and returns: - * EINVAL: str == NULL, num == NULL, or string not of the form [ \t]*[+-]?[0-9]+ - * ERANGE: value out of range specified. (Range of [min, max]) - * - * *num is unchanged on error. - * Returns: - * - */ -#define DEF_STR_TO(name, type, bigtype, strtofunc, frmt) \ -int name(char* str, type* num, type min, type max, int base) \ -{ \ - char* test; \ - bigtype value; \ - \ - assert(str); \ - assert(num); \ - assert(min <= max); \ - assert(g.dbenv || g.progname); \ - assert(base == 0 || (base >= 2 && base <= 36)); \ - \ - errno = 0; \ - while (isspace(*str)) str++; \ - value = strtofunc(str, &test, base); \ - if ((*test != '\0' && *test != '\n') || test == str) { \ - PRINT_ERRORX("%s: Invalid numeric argument\n", str); \ - errno = EINVAL; \ - goto error; \ - } \ - if (errno != 0) { \ - PRINT_ERROR(errno, "%s\n", str); \ - } \ - if (value < min) { \ - PRINT_ERRORX("%s: Less than minimum value (%" frmt ")\n", str, min); \ - goto error; \ - } \ - if (value > max) { \ - PRINT_ERRORX("%s: Greater than maximum value (%" frmt ")\n", str, max); \ - goto error; \ - } \ - *num = value; \ - return EXIT_SUCCESS; \ -error: \ - return errno; \ -} - -DEF_STR_TO(strtoint32, int32_t, int64_t, strtoll, PRId32) -DEF_STR_TO(strtouint32, uint32_t, uint64_t, strtoull, PRIu32) -DEF_STR_TO(strtoint64, int64_t, int64_t, strtoll, PRId64) -DEF_STR_TO(strtouint64, uint64_t, uint64_t, strtoull, PRIu64) - -static inline void -outputbyte(uint8_t ch) -{ - if (g.plaintext) { - if (ch == '\\') printf("\\\\"); - else if (isprint(ch)) printf("%c", ch); - else printf("\\%02x", ch); - } - else printf("%02x", ch); -} - -static inline void -outputstring(char* str) -{ - char* p; - - for (p = str; *p != '\0'; p++) { - outputbyte((uint8_t)*p); - } -} - -static inline void -outputplaintextstring(char* str) -{ - bool old_plaintext = g.plaintext; - g.plaintext = true; - outputstring(str); - g.plaintext = old_plaintext; -} - -static inline int -hextoint(int ch) -{ - if (ch >= '0' && ch <= '9') { - return ch - '0'; - } - if (ch >= 'a' && ch <= 'z') { - return ch - 'a' + 10; - } - if (ch >= 'A' && ch <= 'Z') { - return ch - 'A' + 10; - } - return EOF; -} - -static inline int -printabletocstring(char* inputstr, char** poutputstr) -{ - char highch; - char lowch; - char nextch; - char* cstring; - - assert(inputstr); - assert(poutputstr); - assert(*poutputstr == NULL); - - cstring = (char*)toku_malloc((strlen(inputstr) + 1) * sizeof(char)); - if (cstring == NULL) { - PRINT_ERROR(errno, "printabletocstring"); - goto error; - } - - for (*poutputstr = cstring; *inputstr != '\0'; inputstr++) { - if (*inputstr == '\\') { - if ((highch = *++inputstr) == '\\') { - *cstring++ = '\\'; - continue; - } - if (highch == '\0' || (lowch = *++inputstr) == '\0') { - PRINT_ERROR(0, "unexpected end of input data or key/data pair"); - goto error; - } - if (!isxdigit(highch)) { - PRINT_ERROR(0, "Unexpected '%c' (non-hex) input.\n", highch); - goto error; - } - if (!isxdigit(lowch)) { - PRINT_ERROR(0, "Unexpected '%c' (non-hex) input.\n", lowch); - goto error; - } - nextch = (char)((hextoint(highch) << 4) | hextoint(lowch)); - if (nextch == '\0') { - /* Database names are c strings, and cannot have extra NULL terminators. */ - PRINT_ERROR(0, "Unexpected '\\00' in input.\n"); - goto error; - } - *cstring++ = nextch; - } - else *cstring++ = *inputstr; - } - /* Terminate the string. */ - *cstring = '\0'; - return EXIT_SUCCESS; - -error: - PRINT_ERROR(0, "Quitting out due to errors.\n"); - return EXIT_FAILURE; -} - -static inline int -verify_library_version(void) -{ - int major; - int minor; - - db_version(&major, &minor, NULL); - if (major != DB_VERSION_MAJOR || minor != DB_VERSION_MINOR) { - PRINT_ERRORX("version %d.%d doesn't match library version %d.%d\n", - DB_VERSION_MAJOR, DB_VERSION_MINOR, major, minor); - return EXIT_FAILURE; - } - return EXIT_SUCCESS; -} - -static int last_caught = 0; - -static void catch_signal(int which_signal) { - last_caught = which_signal; - if (last_caught == 0) last_caught = SIGINT; -} - -static inline void -init_catch_signals(void) { - signal(SIGINT, catch_signal); - signal(SIGTERM, catch_signal); -#ifdef SIGHUP - signal(SIGHUP, catch_signal); -#endif -#ifdef SIGPIPE - signal(SIGPIPE, catch_signal); -#endif -} - -static inline int -caught_any_signals(void) { - return last_caught != 0; -} - -static inline void -resend_signals(void) { - if (last_caught) { - signal(last_caught, SIG_DFL); - raise(last_caught); - } -} - -#include <memory.h> -static int test_main (int argc, char *const argv[]); -int -main(int argc, char *const argv[]) { - int r; - r = test_main(argc, argv); - return r; -} - -#endif /* #if !defined(TOKUDB_COMMON_H) */ diff --git a/storage/tokudb/ft-index/tools/tokudb_dump.cc b/storage/tokudb/ft-index/tools/tokudb_dump.cc index 1020afb70e0..2da50bb793a 100644 --- a/storage/tokudb/ft-index/tools/tokudb_dump.cc +++ b/storage/tokudb/ft-index/tools/tokudb_dump.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -88,6 +88,10 @@ PATENT RIGHTS GRANT: #ident "Copyright (c) 2007, 2008 Tokutek Inc. All rights reserved." +#include <db.h> + +#include <toku_stdlib.h> +#include <toku_stdint.h> #include <toku_portability.h> #include <toku_assert.h> #include <stdio.h> @@ -97,8 +101,8 @@ PATENT RIGHTS GRANT: #include <ctype.h> #include <errno.h> #include <getopt.h> -#include <db.h> -#include "tokudb_common.h" +#include <signal.h> +#include <memory.h> typedef struct { bool leadingspace; @@ -120,7 +124,245 @@ typedef struct { } dump_globals; dump_globals g; -#include "tokudb_common_funcs.h" + +#define SET_BITS(bitvector, bits) ((bitvector) |= (bits)) +#define REMOVE_BITS(bitvector, bits) ((bitvector) &= ~(bits)) +#define IS_SET_ANY(bitvector, bits) ((bitvector) & (bits)) +#define IS_SET_ALL(bitvector, bits) (((bitvector) & (bits)) == (bits)) + +#define IS_POWER_OF_2(num) ((num) > 0 && ((num) & ((num) - 1)) == 0) + +//DB_ENV->err disabled since it does not use db_strerror +#define PRINT_ERROR(retval, ...) \ +do { \ +if (0) g.dbenv->err(g.dbenv, retval, __VA_ARGS__); \ +else { \ + fprintf(stderr, "\tIn %s:%d %s()\n", __FILE__, __LINE__, __FUNCTION__); \ + fprintf(stderr, "%s: %s:", g.progname, db_strerror(retval)); \ + fprintf(stderr, __VA_ARGS__); \ + fprintf(stderr, "\n"); \ + fflush(stderr); \ +} \ +} while (0) + +//DB_ENV->err disabled since it does not use db_strerror, errx does not exist. +#define PRINT_ERRORX(...) \ +do { \ +if (0) g.dbenv->err(g.dbenv, 0, __VA_ARGS__); \ +else { \ + fprintf(stderr, "\tIn %s:%d %s()\n", __FILE__, __LINE__, __FUNCTION__); \ + fprintf(stderr, "%s: ", g.progname); \ + fprintf(stderr, __VA_ARGS__); \ + fprintf(stderr, "\n"); \ + fflush(stderr); \ +} \ +} while (0) + +int strtoint32 (char* str, int32_t* num, int32_t min, int32_t max, int base); +int strtouint32 (char* str, uint32_t* num, uint32_t min, uint32_t max, int base); +int strtoint64 (char* str, int64_t* num, int64_t min, int64_t max, int base); +int strtouint64 (char* str, uint64_t* num, uint64_t min, uint64_t max, int base); + +/* + * Convert a string to an integer of type "type". + * + * + * Sets errno and returns: + * EINVAL: str == NULL, num == NULL, or string not of the form [ \t]*[+-]?[0-9]+ + * ERANGE: value out of range specified. (Range of [min, max]) + * + * *num is unchanged on error. + * Returns: + * + */ +#define DEF_STR_TO(name, type, bigtype, strtofunc, frmt) \ +int name(char* str, type* num, type min, type max, int base) \ +{ \ + char* test; \ + bigtype value; \ + \ + assert(str); \ + assert(num); \ + assert(min <= max); \ + assert(g.dbenv || g.progname); \ + assert(base == 0 || (base >= 2 && base <= 36)); \ + \ + errno = 0; \ + while (isspace(*str)) str++; \ + value = strtofunc(str, &test, base); \ + if ((*test != '\0' && *test != '\n') || test == str) { \ + PRINT_ERRORX("%s: Invalid numeric argument\n", str); \ + errno = EINVAL; \ + goto error; \ + } \ + if (errno != 0) { \ + PRINT_ERROR(errno, "%s\n", str); \ + } \ + if (value < min) { \ + PRINT_ERRORX("%s: Less than minimum value (%" frmt ")\n", str, min); \ + goto error; \ + } \ + if (value > max) { \ + PRINT_ERRORX("%s: Greater than maximum value (%" frmt ")\n", str, max); \ + goto error; \ + } \ + *num = value; \ + return EXIT_SUCCESS; \ +error: \ + return errno; \ +} + +DEF_STR_TO(strtoint32, int32_t, int64_t, strtoll, PRId32) +DEF_STR_TO(strtouint32, uint32_t, uint64_t, strtoull, PRIu32) +DEF_STR_TO(strtoint64, int64_t, int64_t, strtoll, PRId64) +DEF_STR_TO(strtouint64, uint64_t, uint64_t, strtoull, PRIu64) + +static inline void +outputbyte(uint8_t ch) +{ + if (g.plaintext) { + if (ch == '\\') printf("\\\\"); + else if (isprint(ch)) printf("%c", ch); + else printf("\\%02x", ch); + } + else printf("%02x", ch); +} + +static inline void +outputstring(char* str) +{ + char* p; + + for (p = str; *p != '\0'; p++) { + outputbyte((uint8_t)*p); + } +} + +static inline void +outputplaintextstring(char* str) +{ + bool old_plaintext = g.plaintext; + g.plaintext = true; + outputstring(str); + g.plaintext = old_plaintext; +} + +static inline int +hextoint(int ch) +{ + if (ch >= '0' && ch <= '9') { + return ch - '0'; + } + if (ch >= 'a' && ch <= 'z') { + return ch - 'a' + 10; + } + if (ch >= 'A' && ch <= 'Z') { + return ch - 'A' + 10; + } + return EOF; +} + +static inline int +printabletocstring(char* inputstr, char** poutputstr) +{ + char highch; + char lowch; + char nextch; + char* cstring; + + assert(inputstr); + assert(poutputstr); + assert(*poutputstr == NULL); + + cstring = (char*)toku_malloc((strlen(inputstr) + 1) * sizeof(char)); + if (cstring == NULL) { + PRINT_ERROR(errno, "printabletocstring"); + goto error; + } + + for (*poutputstr = cstring; *inputstr != '\0'; inputstr++) { + if (*inputstr == '\\') { + if ((highch = *++inputstr) == '\\') { + *cstring++ = '\\'; + continue; + } + if (highch == '\0' || (lowch = *++inputstr) == '\0') { + PRINT_ERROR(0, "unexpected end of input data or key/data pair"); + goto error; + } + if (!isxdigit(highch)) { + PRINT_ERROR(0, "Unexpected '%c' (non-hex) input.\n", highch); + goto error; + } + if (!isxdigit(lowch)) { + PRINT_ERROR(0, "Unexpected '%c' (non-hex) input.\n", lowch); + goto error; + } + nextch = (char)((hextoint(highch) << 4) | hextoint(lowch)); + if (nextch == '\0') { + /* Database names are c strings, and cannot have extra NULL terminators. */ + PRINT_ERROR(0, "Unexpected '\\00' in input.\n"); + goto error; + } + *cstring++ = nextch; + } + else *cstring++ = *inputstr; + } + /* Terminate the string. */ + *cstring = '\0'; + return EXIT_SUCCESS; + +error: + PRINT_ERROR(0, "Quitting out due to errors.\n"); + return EXIT_FAILURE; +} + +static inline int +verify_library_version(void) +{ + int major; + int minor; + + db_version(&major, &minor, NULL); + if (major != DB_VERSION_MAJOR || minor != DB_VERSION_MINOR) { + PRINT_ERRORX("version %d.%d doesn't match library version %d.%d\n", + DB_VERSION_MAJOR, DB_VERSION_MINOR, major, minor); + return EXIT_FAILURE; + } + return EXIT_SUCCESS; +} + +static int last_caught = 0; + +static void catch_signal(int which_signal) { + last_caught = which_signal; + if (last_caught == 0) last_caught = SIGINT; +} + +static inline void +init_catch_signals(void) { + signal(SIGINT, catch_signal); + signal(SIGTERM, catch_signal); +#ifdef SIGHUP + signal(SIGHUP, catch_signal); +#endif +#ifdef SIGPIPE + signal(SIGPIPE, catch_signal); +#endif +} + +static inline int +caught_any_signals(void) { + return last_caught != 0; +} + +static inline void +resend_signals(void) { + if (last_caught) { + signal(last_caught, SIG_DFL); + raise(last_caught); + } +} static int usage (void); static int create_init_env(void); @@ -131,7 +373,7 @@ static int dump_footer (void); static int dump_header (void); static int close_database (void); -int test_main(int argc, char *const argv[]) { +int main(int argc, char *const argv[]) { int ch; int retval; diff --git a/storage/tokudb/ft-index/tools/tokudb_gen.cc b/storage/tokudb/ft-index/tools/tokudb_gen.cc deleted file mode 100644 index c23567116a8..00000000000 --- a/storage/tokudb/ft-index/tools/tokudb_gen.cc +++ /dev/null @@ -1,471 +0,0 @@ -/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */ -// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4: -#ident "$Id$" -/* -COPYING CONDITIONS NOTICE: - - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation, and provided that the - following conditions are met: - - * Redistributions of source code must retain this COPYING - CONDITIONS NOTICE, the COPYRIGHT NOTICE (below), the - DISCLAIMER (below), the UNIVERSITY PATENT NOTICE (below), the - PATENT MARKING NOTICE (below), and the PATENT RIGHTS - GRANT (below). - - * Redistributions in binary form must reproduce this COPYING - CONDITIONS NOTICE, the COPYRIGHT NOTICE (below), the - DISCLAIMER (below), the UNIVERSITY PATENT NOTICE (below), the - PATENT MARKING NOTICE (below), and the PATENT RIGHTS - GRANT (below) in the documentation and/or other materials - provided with the distribution. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA - 02110-1301, USA. - -COPYRIGHT NOTICE: - - TokuDB, Tokutek Fractal Tree Indexing Library. - Copyright (C) 2007-2013 Tokutek, Inc. - -DISCLAIMER: - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - -UNIVERSITY PATENT NOTICE: - - The technology is licensed by the Massachusetts Institute of - Technology, Rutgers State University of New Jersey, and the Research - Foundation of State University of New York at Stony Brook under - United States of America Serial No. 11/760379 and to the patents - and/or patent applications resulting from it. - -PATENT MARKING NOTICE: - - This software is covered by US Patent No. 8,185,551. - This software is covered by US Patent No. 8,489,638. - -PATENT RIGHTS GRANT: - - "THIS IMPLEMENTATION" means the copyrightable works distributed by - Tokutek as part of the Fractal Tree project. - - "PATENT CLAIMS" means the claims of patents that are owned or - licensable by Tokutek, both currently or in the future; and that in - the absence of this license would be infringed by THIS - IMPLEMENTATION or by using or running THIS IMPLEMENTATION. - - "PATENT CHALLENGE" shall mean a challenge to the validity, - patentability, enforceability and/or non-infringement of any of the - PATENT CLAIMS or otherwise opposing any of the PATENT CLAIMS. - - Tokutek hereby grants to you, for the term and geographical scope of - the PATENT CLAIMS, a non-exclusive, no-charge, royalty-free, - irrevocable (except as stated in this section) patent license to - make, have made, use, offer to sell, sell, import, transfer, and - otherwise run, modify, and propagate the contents of THIS - IMPLEMENTATION, where such license applies only to the PATENT - CLAIMS. This grant does not include claims that would be infringed - only as a consequence of further modifications of THIS - IMPLEMENTATION. If you or your agent or licensee institute or order - or agree to the institution of patent litigation against any entity - (including a cross-claim or counterclaim in a lawsuit) alleging that - THIS IMPLEMENTATION constitutes direct or contributory patent - infringement, or inducement of patent infringement, then any rights - granted to you under this License shall terminate as of the date - such litigation is filed. If you or your agent or exclusive - licensee institute or order or agree to the institution of a PATENT - CHALLENGE, then Tokutek may terminate any rights granted to you - under this License. -*/ - -#ident "Copyright (c) 2007, 2008 Tokutek Inc. All rights reserved." - -#include <toku_portability.h> -#include <toku_assert.h> -#include <stdio.h> -#include <sys/types.h> -#include <db.h> -#include <unistd.h> -#include <string.h> -#include <ctype.h> -#include <errno.h> -#include <getopt.h> -#include <src/ydb.h> - -#include "tokudb_common.h" - -typedef struct { - DB_ENV* dbenv; - bool plaintext; - char* progname; -} gen_globals; - -gen_globals g; -#include "tokudb_common_funcs.h" - -static int usage(void); -static void generate_keys(void); -static int get_delimiter(char* str); - - - -char dbt_delimiter = '\n'; -char sort_delimiter[3]; -uint32_t lengthmin = 0; -bool set_lengthmin = false; -uint32_t lengthlimit = 0; -bool set_lengthlimit= false; -uint64_t numkeys = 0; -bool set_numkeys = false; -bool header = true; -bool footer = true; -bool justheader = false; -bool justfooter = false; -bool outputkeys = true; -uint32_t seed = 1; -bool set_seed = false; -bool printableonly = false; -bool leadingspace = true; -bool force_unique = true; -bool dupsort = false; - -static int test_main (int argc, char *const argv[]) { - int ch; - - /* Set up the globals. */ - memset(&g, 0, sizeof(g)); - - g.progname = argv[0]; - - if (verify_library_version() != 0) goto error; - - strcpy(sort_delimiter, ""); - - while ((ch = getopt(argc, argv, "PpTo:r:m:M:n:uVhHfFd:s:DS")) != EOF) { - switch (ch) { - case ('P'): { - printableonly = true; - break; - } - case ('p'): { - g.plaintext = true; - leadingspace = true; - break; - } - case ('T'): { - g.plaintext = true; - leadingspace = false; - header = false; - footer = false; - break; - } - case ('o'): { - if (freopen(optarg, "w", stdout) == NULL) { - PRINT_ERROR(errno, "%s: reopen\n", optarg); - goto error; - } - break; - } - case ('r'): { - if (strtouint32(optarg, &seed, 0, UINT32_MAX, 10)) { - PRINT_ERRORX("%s: (-r) Random seed invalid.", optarg); - goto error; - } - set_seed = true; - break; - } - case ('m'): { - if (strtouint32(optarg, &lengthmin, 0, UINT32_MAX, 10)) { - PRINT_ERRORX("%s: (-m) Min length of keys/values invalid.", optarg); - goto error; - } - set_lengthmin = true; - break; - } - case ('M'): { - if (strtouint32(optarg, &lengthlimit, 1, UINT32_MAX, 10)) { - PRINT_ERRORX("%s: (-M) Limit of key/value length invalid.", optarg); - goto error; - } - set_lengthlimit = true; - break; - } - case ('n'): { - if (strtouint64(optarg, &numkeys, 0, UINT64_MAX, 10)) { - PRINT_ERRORX("%s: (-n) Number of keys to generate invalid.", optarg); - goto error; - } - set_numkeys = true; - break; - } - case ('u'): { - force_unique = false; - break; - } - case ('h'): { - header = false; - break; - } - case ('H'): { - justheader = true; - break; - } - case ('f'): { - footer = false; - break; - } - case ('F'): { - justfooter = true; - break; - } - case ('d'): { - int temp = get_delimiter(optarg); - if (temp == EOF) { - PRINT_ERRORX("%s: (-d) Key (or value) delimiter must be one character.", - optarg); - goto error; - } - if (isxdigit(temp)) { - PRINT_ERRORX("%c: (-d) Key (or value) delimiter cannot be a hex digit.", - temp); - goto error; - } - dbt_delimiter = (char)temp; - break; - } - case ('s'): { - int temp = get_delimiter(optarg); - if (temp == EOF) { - PRINT_ERRORX("%s: (-s) Sorting (Between key/value pairs) delimiter must be one character.", - optarg); - goto error; - } - if (isxdigit(temp)) { - PRINT_ERRORX("%c: (-s) Sorting (Between key/value pairs) delimiter cannot be a hex digit.", - temp); - goto error; - } - sort_delimiter[0] = (char)temp; - sort_delimiter[1] = '\0'; - break; - } - case ('V'): { - printf("%s\n", db_version(NULL, NULL, NULL)); - return EXIT_SUCCESS; - } - case 'D': { - fprintf(stderr, "Duplicates no longer supported by tokudb\n"); - return EXIT_FAILURE; - } - case 'S': { - fprintf(stderr, "Dupsort no longer supported by tokudb\n"); - return EXIT_FAILURE; - } - case ('?'): - default: { - return (usage()); - } - } - } - argc -= optind; - argv += optind; - - if (justheader && !header) { - PRINT_ERRORX("The -h and -H options may not both be specified.\n"); - goto error; - } - if (justfooter && !footer) { - PRINT_ERRORX("The -f and -F options may not both be specified.\n"); - goto error; - } - if (justfooter && justheader) { - PRINT_ERRORX("The -H and -F options may not both be specified.\n"); - goto error; - } - if (justfooter && header) { - PRINT_ERRORX("-F implies -h\n"); - header = false; - } - if (justheader && footer) { - PRINT_ERRORX("-H implies -f\n"); - footer = false; - } - if (!leadingspace) { - if (footer) { - PRINT_ERRORX("-p implies -f\n"); - footer = false; - } - if (header) { - PRINT_ERRORX("-p implies -h\n"); - header = false; - } - } - if (justfooter || justheader) outputkeys = false; - else if (!set_numkeys) - { - PRINT_ERRORX("Using default number of keys. (-n 1024).\n"); - numkeys = 1024; - } - if (outputkeys && !set_seed) { - PRINT_ERRORX("Using default seed. (-r 1).\n"); - seed = 1; - } - if (outputkeys && !set_lengthmin) { - PRINT_ERRORX("Using default lengthmin. (-m 0).\n"); - lengthmin = 0; - } - if (outputkeys && !set_lengthlimit) { - PRINT_ERRORX("Using default lengthlimit. (-M 1024).\n"); - lengthlimit = 1024; - } - if (outputkeys && lengthmin >= lengthlimit) { - PRINT_ERRORX("Max key size must be greater than min key size.\n"); - goto error; - } - - if (argc != 0) { - return usage(); - } - if (header) { - printf("VERSION=3\n"); - printf("format=%s\n", g.plaintext ? "print" : "bytevalue"); - printf("type=btree\n"); - // printf("db_pagesize=%d\n", 4096); //Don't write pagesize which would be useless. - if (dupsort) - printf("dupsort=%d\n", dupsort); - printf("HEADER=END\n"); - } - if (outputkeys) generate_keys(); - if (footer) printf("DATA=END\n"); - return EXIT_SUCCESS; - -error: - fprintf(stderr, "Quitting out due to errors.\n"); - return EXIT_FAILURE; -} - -static int usage() -{ - fprintf(stderr, - "usage: %s [-PpTuVhHfFDS] [-o output] [-r seed] [-m minsize] [-M limitsize]\n" - " %*s[-n numpairs] [-d delimiter] [-s delimiter]\n", - g.progname, (int)strlen(g.progname) + 1, ""); - return EXIT_FAILURE; -} - -static uint8_t randbyte(void) -{ - static uint32_t numsavedbits = 0; - static uint64_t savedbits = 0; - uint8_t retval; - - if (numsavedbits < 8) { - savedbits |= ((uint64_t)random()) << numsavedbits; - numsavedbits += 31; /* Random generates 31 random bits. */ - } - retval = savedbits & 0xff; - numsavedbits -= 8; - savedbits >>= 8; - return retval; -} - -/* Almost-uniformly random int from [0,limit) */ -static int32_t random_below(int32_t limit) -{ - assert(limit > 0); - return random() % limit; -} - -static void generate_keys() -{ - bool usedemptykey = false; - uint64_t numgenerated = 0; - uint64_t totalsize = 0; - char identifier[24]; /* 8 bytes * 2 = 16; 16+1=17; 17+null terminator = 18. Extra padding. */ - int length; - int i; - uint8_t ch; - - srandom(seed); - while (numgenerated < numkeys) { - numgenerated++; - - /* Each key is preceded by a space (unless using -T). */ - if (leadingspace) printf(" "); - - /* Generate a key. */ - { - /* Pick a key length. */ - length = random_below(lengthlimit - lengthmin) + lengthmin; - - /* Output 'length' random bytes. */ - for (i = 0; i < length; i++) { - do {ch = randbyte();} - while (printableonly && !isprint(ch)); - outputbyte(ch); - } - totalsize += length; - if (force_unique) { - if (length == 0 && !usedemptykey) usedemptykey = true; - else { - /* Append identifier to ensure uniqueness. */ - sprintf(identifier, "x%" PRIx64, numgenerated); - outputstring(identifier); - totalsize += strlen(identifier); - } - } - } - printf("%c", dbt_delimiter); - - /* Each value is preceded by a space (unless using -T). */ - if (leadingspace) printf(" "); - - /* Generate a value. */ - { - /* Pick a key length. */ - length = random_below(lengthlimit - lengthmin) + lengthmin; - - /* Output 'length' random bytes. */ - for (i = 0; i < length; i++) { - do {ch = randbyte();} - while (printableonly && !isprint(ch)); - outputbyte(ch); - } - totalsize += length; - } - printf("%c", dbt_delimiter); - - printf("%s", sort_delimiter); - } -} - -int get_delimiter(char* str) -{ - if (strlen(str) == 2 && str[0] == '\\') { - switch (str[1]) { - case ('a'): return '\a'; - case ('b'): return '\b'; -#ifndef __ICL - case ('e'): return '\e'; -#endif - case ('f'): return '\f'; - case ('n'): return '\n'; - case ('r'): return '\r'; - case ('t'): return '\t'; - case ('v'): return '\v'; - case ('0'): return '\0'; - case ('\\'): return '\\'; - default: return EOF; - } - } - if (strlen(str) == 1) return str[0]; - return EOF; -} diff --git a/storage/tokudb/ft-index/tools/tokudb_load.cc b/storage/tokudb/ft-index/tools/tokudb_load.cc deleted file mode 100644 index 2072b2f7f8d..00000000000 --- a/storage/tokudb/ft-index/tools/tokudb_load.cc +++ /dev/null @@ -1,977 +0,0 @@ -/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */ -// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4: -#ident "$Id$" -/* -COPYING CONDITIONS NOTICE: - - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation, and provided that the - following conditions are met: - - * Redistributions of source code must retain this COPYING - CONDITIONS NOTICE, the COPYRIGHT NOTICE (below), the - DISCLAIMER (below), the UNIVERSITY PATENT NOTICE (below), the - PATENT MARKING NOTICE (below), and the PATENT RIGHTS - GRANT (below). - - * Redistributions in binary form must reproduce this COPYING - CONDITIONS NOTICE, the COPYRIGHT NOTICE (below), the - DISCLAIMER (below), the UNIVERSITY PATENT NOTICE (below), the - PATENT MARKING NOTICE (below), and the PATENT RIGHTS - GRANT (below) in the documentation and/or other materials - provided with the distribution. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA - 02110-1301, USA. - -COPYRIGHT NOTICE: - - TokuDB, Tokutek Fractal Tree Indexing Library. - Copyright (C) 2007-2013 Tokutek, Inc. - -DISCLAIMER: - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - -UNIVERSITY PATENT NOTICE: - - The technology is licensed by the Massachusetts Institute of - Technology, Rutgers State University of New Jersey, and the Research - Foundation of State University of New York at Stony Brook under - United States of America Serial No. 11/760379 and to the patents - and/or patent applications resulting from it. - -PATENT MARKING NOTICE: - - This software is covered by US Patent No. 8,185,551. - This software is covered by US Patent No. 8,489,638. - -PATENT RIGHTS GRANT: - - "THIS IMPLEMENTATION" means the copyrightable works distributed by - Tokutek as part of the Fractal Tree project. - - "PATENT CLAIMS" means the claims of patents that are owned or - licensable by Tokutek, both currently or in the future; and that in - the absence of this license would be infringed by THIS - IMPLEMENTATION or by using or running THIS IMPLEMENTATION. - - "PATENT CHALLENGE" shall mean a challenge to the validity, - patentability, enforceability and/or non-infringement of any of the - PATENT CLAIMS or otherwise opposing any of the PATENT CLAIMS. - - Tokutek hereby grants to you, for the term and geographical scope of - the PATENT CLAIMS, a non-exclusive, no-charge, royalty-free, - irrevocable (except as stated in this section) patent license to - make, have made, use, offer to sell, sell, import, transfer, and - otherwise run, modify, and propagate the contents of THIS - IMPLEMENTATION, where such license applies only to the PATENT - CLAIMS. This grant does not include claims that would be infringed - only as a consequence of further modifications of THIS - IMPLEMENTATION. If you or your agent or licensee institute or order - or agree to the institution of patent litigation against any entity - (including a cross-claim or counterclaim in a lawsuit) alleging that - THIS IMPLEMENTATION constitutes direct or contributory patent - infringement, or inducement of patent infringement, then any rights - granted to you under this License shall terminate as of the date - such litigation is filed. If you or your agent or exclusive - licensee institute or order or agree to the institution of a PATENT - CHALLENGE, then Tokutek may terminate any rights granted to you - under this License. -*/ - -#ident "Copyright (c) 2007, 2008 Tokutek Inc. All rights reserved." - -#include <toku_portability.h> -#include <toku_assert.h> -#include <stdio.h> -#include <sys/types.h> -#include <unistd.h> -#include <string.h> -#include <ctype.h> -#include <errno.h> -#include <getopt.h> -#include <db.h> -#include "tokudb_common.h" - -typedef struct { - bool leadingspace; - bool plaintext; - bool overwritekeys; - bool header; - bool eof; - bool keys; - bool is_private; - char* progname; - char* homedir; - char* database; - char* subdatabase; - char** config_options; - int32_t version; - int exitcode; - uint64_t linenumber; - DBTYPE dbtype; - DB* db; - DB_ENV* dbenv; - struct { - char* data[2]; - } get_dbt; - struct { - char* data; - } read_header; -} load_globals; - -load_globals g; -#include "tokudb_common_funcs.h" - -static int usage (void); -static int load_database (void); -static int create_init_env(void); -static int read_header (void); -static int open_database (void); -static int read_keys (void); -static int apply_commandline_options(void); -static int close_database (void); -static int doublechararray(char** pmem, uint64_t* size); - -int test_main(int argc, char *const argv[]) { - int ch; - int retval; - char** next_config_option; - - /* Set up the globals. */ - memset(&g, 0, sizeof(g)); - g.leadingspace = true; - g.overwritekeys = true; - g.dbtype = DB_UNKNOWN; - //g.dbtype = DB_BTREE; - g.progname = argv[0]; - g.header = true; - - if (verify_library_version() != 0) goto error; - - next_config_option = g.config_options = (char**) calloc(argc, sizeof(char*)); - if (next_config_option == NULL) { - PRINT_ERROR(errno, "main: calloc\n"); - goto error; - } - while ((ch = getopt(argc, argv, "c:f:h:nP:r:Tt:V")) != EOF) { - switch (ch) { - case ('c'): { - *next_config_option++ = optarg; - break; - } - case ('f'): { - if (freopen(optarg, "r", stdin) == NULL) { - fprintf(stderr, - "%s: %s: reopen: %s\n", - g.progname, optarg, strerror(errno)); - goto error; - } - break; - } - case ('h'): { - g.homedir = optarg; - break; - } - case ('n'): { - /* g.overwritekeys = false; */ - PRINT_ERRORX("-%c option not supported.\n", ch); - goto error; - } - case ('P'): { - /* Clear password. */ - memset(optarg, 0, strlen(optarg)); - PRINT_ERRORX("-%c option not supported.\n", ch); - goto error; - } - case ('r'): { - PRINT_ERRORX("-%c option not supported.\n", ch); - goto error; - } - case ('T'): { - g.plaintext = true; - g.leadingspace = false; - g.header = false; - break; - } - case ('t'): { - if (!strcmp(optarg, "btree")) { - g.dbtype = DB_BTREE; - break; - } - if (!strcmp(optarg, "hash") || !strcmp(optarg, "recno") || !strcmp(optarg, "queue")) { - fprintf(stderr, "%s: db type %s not supported.\n", g.progname, optarg); - goto error; - } - fprintf(stderr, "%s: Unrecognized db type %s.\n", g.progname, optarg); - goto error; - } - case ('V'): { - printf("%s\n", db_version(NULL, NULL, NULL)); - goto cleanup; - } - case ('?'): - default: { - g.exitcode = usage(); - goto cleanup; - } - } - } - argc -= optind; - argv += optind; - - if (argc != 1) { - g.exitcode = usage(); - goto cleanup; - } - init_catch_signals(); - - g.database = argv[0]; - if (create_init_env() != 0) goto error; - if (caught_any_signals()) goto cleanup; - while (!g.eof) { - if (load_database() != 0) goto error; - if (caught_any_signals()) goto cleanup; - } - if (false) { -error: - g.exitcode = EXIT_FAILURE; - fprintf(stderr, "%s: Quitting out due to errors.\n", g.progname); - } -cleanup: - if (g.dbenv && (retval = g.dbenv->close(g.dbenv, 0)) != 0) { - g.exitcode = EXIT_FAILURE; - fprintf(stderr, "%s: dbenv->close: %s\n", g.progname, db_strerror(retval)); - } - if (g.config_options) toku_free(g.config_options); - if (g.subdatabase) toku_free(g.subdatabase); - if (g.read_header.data) toku_free(g.read_header.data); - if (g.get_dbt.data[0]) toku_free(g.get_dbt.data[0]); - if (g.get_dbt.data[1]) toku_free(g.get_dbt.data[1]); - resend_signals(); - - return g.exitcode; -} - -int load_database() -{ - int retval; - - /* Create a database handle. */ - retval = db_create(&g.db, g.dbenv, 0); - if (retval != 0) { - PRINT_ERROR(retval, "db_create"); - return EXIT_FAILURE; - } - - if (g.header && read_header() != 0) goto error; - if (g.eof) goto cleanup; - if (caught_any_signals()) goto cleanup; - if (apply_commandline_options() != 0) goto error; - if (g.eof) goto cleanup; - if (caught_any_signals()) goto cleanup; - - /* - TODO: If/when supporting encryption - if (g.password && (retval = db->set_flags(db, DB_ENCRYPT))) { - PRINT_ERROR(ret, "DB->set_flags: DB_ENCRYPT"); - goto error; - } - */ - if (open_database() != 0) goto error; - if (g.eof) goto cleanup; - if (caught_any_signals()) goto cleanup; - if (read_keys() != 0) goto error; - if (g.eof) goto cleanup; - if (caught_any_signals()) goto cleanup; - - if (false) { -error: - g.exitcode = EXIT_FAILURE; - } -cleanup: - - if (close_database() != 0) g.exitcode = EXIT_FAILURE; - - return g.exitcode; -} - -int usage() -{ - fprintf(stderr, - "usage: %s [-TV] [-c name=value] [-f file] [-h home] [-t btree] db_file\n", - g.progname); - return EXIT_FAILURE; -} - -int create_init_env() -{ - int retval; - DB_ENV* dbenv; - int flags; - //TODO: Experiments to determine right cache size for tokudb, or maybe command line argument. - //int cache = 1 << 20; /* 1 megabyte */ - - retval = db_env_create(&dbenv, 0); - if (retval) { - fprintf(stderr, "%s: db_dbenv_create: %s\n", g.progname, db_strerror(retval)); - goto error; - } - ///TODO: UNCOMMENT/IMPLEMENT dbenv->set_errfile(dbenv, stderr); - dbenv->set_errpfx(dbenv, g.progname); - /* - TODO: If/when supporting encryption - if (g.password && (retval = dbenv->set_encrypt(dbenv, g.password, DB_ENCRYPT_AES))) { - PRINT_ERROR(retval, "set_passwd"); - goto error; - } - */ - - /* Open the dbenvironment. */ - g.is_private = false; - flags = DB_INIT_LOCK | DB_INIT_LOG | DB_INIT_MPOOL|DB_INIT_TXN|DB_INIT_LOG; ///TODO: UNCOMMENT/IMPLEMENT | DB_USE_ENVIRON; - //TODO: Transactions.. SET_BITS(flags, DB_INIT_TXN); - - /* - ///TODO: UNCOMMENT/IMPLEMENT Notes: We require DB_PRIVATE - if (!dbenv->open(dbenv, g.homedir, flags, 0)) goto success; - */ - - /* - ///TODO: UNCOMMENT/IMPLEMENT - retval = dbenv->set_cachesize(dbenv, 0, cache, 1); - if (retval) { - PRINT_ERROR(retval, "DB_ENV->set_cachesize"); - goto error; - } - */ - g.is_private = true; - //TODO: Do we want to support transactions/logging even in single-process mode? - //Maybe if the db already exists. - //If db does not exist.. makes sense not to log or have transactions - //REMOVE_BITS(flags, DB_INIT_LOCK | DB_INIT_LOG | DB_INIT_TXN); - SET_BITS(flags, DB_CREATE | DB_PRIVATE); - - retval = dbenv->open(dbenv, g.homedir ? g.homedir : ".", flags, 0); - if (retval) { - PRINT_ERROR(retval, "DB_ENV->open"); - goto error; - } - g.dbenv = dbenv; - return EXIT_SUCCESS; - -error: - return EXIT_FAILURE; -} - -#define PARSE_NUMBER(match, dbfunction) \ -if (!strcmp(field, match)) { \ - if (strtoint32(value, &num, 1, INT32_MAX, 10)) goto error; \ - if ((retval = dbfunction(db, num)) != 0) goto printerror; \ - continue; \ -} -#define PARSE_UNSUPPORTEDNUMBER(match, dbfunction) \ -if (!strcmp(field, match)) { \ - if (strtoint32(value, &num, 1, INT32_MAX, 10)) goto error; \ - PRINT_ERRORX("%s option not supported.\n", field); \ - goto error; \ -} -#define PARSE_IGNOREDNUMBER(match, dbfunction) \ -if (!strcmp(field, match)) { \ - if (strtoint32(value, &num, 1, INT32_MAX, 10)) goto error; \ - PRINT_ERRORX("%s option not supported yet (ignored).\n", field); \ - continue; \ -} - -#define PARSE_FLAG(match, flag) \ -if (!strcmp(field, match)) { \ - if (strtoint32(value, &num, 0, 1, 10)) { \ - PRINT_ERRORX("%s: boolean name=value pairs require a value of 0 or 1", \ - field); \ - goto error; \ - } \ - if ((retval = db->set_flags(db, flag)) != 0) { \ - PRINT_ERROR(retval, "set_flags: %s", field); \ - goto error; \ - } \ - continue; \ -} - -#define PARSE_UNSUPPORTEDFLAG(match, flag) \ -if (!strcmp(field, match)) { \ - if (strtoint32(value, &num, 0, 1, 10)) { \ - PRINT_ERRORX("%s: boolean name=value pairs require a value of 0 or 1", \ - field); \ - goto error; \ - } \ - PRINT_ERRORX("%s option not supported.\n", field); \ - goto error; \ -} - -#define PARSE_IGNOREDFLAG(match, flag) \ -if (!strcmp(field, match)) { \ - if (strtoint32(value, &num, 0, 1, 10)) { \ - PRINT_ERRORX("%s: boolean name=value pairs require a value of 0 or 1", \ - field); \ - goto error; \ - } \ - PRINT_ERRORX("%s option not supported yet (ignored).\n", field); \ - continue; \ -} - -#define PARSE_CHAR(match, dbfunction) \ -if (!strcmp(field, match)) { \ - if (strlen(value) != 1) { \ - PRINT_ERRORX("%s=%s: Expected 1-byte value", \ - field, value); \ - goto error; \ - } \ - if ((retval = dbfunction(db, value[0])) != 0) { \ - goto printerror; \ - } \ - continue; \ -} - -#define PARSE_UNSUPPORTEDCHAR(match, dbfunction) \ -if (!strcmp(field, match)) { \ - if (strlen(value) != 1) { \ - PRINT_ERRORX("%s=%s: Expected 1-byte value", \ - field, value); \ - goto error; \ - } \ - PRINT_ERRORX("%s option not supported.\n", field); \ - goto error; \ -} - -#define PARSE_COMMON_CONFIGURATIONS() \ - PARSE_IGNOREDNUMBER( "bt_minkey", db->set_bt_minkey); \ - PARSE_IGNOREDFLAG( "chksum", DB_CHKSUM); \ - PARSE_IGNOREDNUMBER( "db_lorder", db->set_lorder); \ - PARSE_IGNOREDNUMBER( "db_pagesize", db->set_pagesize); \ - PARSE_UNSUPPORTEDNUMBER("extentsize", db->set_q_extentsize); \ - PARSE_UNSUPPORTEDNUMBER("h_ffactor", db->set_h_ffactor); \ - PARSE_UNSUPPORTEDNUMBER("h_nelem", db->set_h_nelem); \ - PARSE_UNSUPPORTEDNUMBER("re_len", db->set_re_len); \ - PARSE_UNSUPPORTEDCHAR( "re_pad", db->set_re_pad); \ - PARSE_UNSUPPORTEDFLAG( "recnum", DB_RECNUM); \ - PARSE_UNSUPPORTEDFLAG( "renumber", DB_RENUMBER); - - - -int read_header() -{ - static uint64_t datasize = 1 << 10; - uint64_t idx = 0; - char* field; - char* value; - int ch; - int32_t num; - int retval; - int r; - - assert(g.header); - - if (g.read_header.data == NULL && (g.read_header.data = (char*)toku_malloc(datasize * sizeof(char))) == NULL) { - PRINT_ERROR(errno, "read_header: malloc"); - goto error; - } - while (!g.eof) { - if (caught_any_signals()) goto success; - g.linenumber++; - idx = 0; - /* Read a line. */ - while (true) { - if ((ch = getchar()) == EOF) { - g.eof = true; - if (ferror(stdin)) goto formaterror; - break; - } - if (ch == '\n') break; - - g.read_header.data[idx] = (char)ch; - idx++; - - /* Ensure room exists for next character/null terminator. */ - if (idx == datasize && doublechararray(&g.read_header.data, &datasize)) goto error; - } - if (idx == 0 && g.eof) goto success; - g.read_header.data[idx] = '\0'; - - field = g.read_header.data; - if ((value = strchr(g.read_header.data, '=')) == NULL) goto formaterror; - value[0] = '\0'; - value++; - - if (field[0] == '\0' || value[0] == '\0') goto formaterror; - - if (!strcmp(field, "HEADER")) break; - if (!strcmp(field, "VERSION")) { - if (strtoint32(value, &g.version, 1, INT32_MAX, 10)) goto error; - if (g.version != 3) { - PRINT_ERRORX("line %" PRIu64 ": VERSION %d is unsupported", g.linenumber, g.version); - goto error; - } - continue; - } - if (!strcmp(field, "format")) { - if (!strcmp(value, "bytevalue")) { - g.plaintext = false; - continue; - } - if (!strcmp(value, "print")) { - g.plaintext = true; - continue; - } - goto formaterror; - } - if (!strcmp(field, "type")) { - if (!strcmp(value, "btree")) { - g.dbtype = DB_BTREE; - continue; - } - if (!strcmp(value, "hash") || strcmp(value, "recno") || strcmp(value, "queue")) { - PRINT_ERRORX("db type %s not supported.\n", value); - goto error; - } - PRINT_ERRORX("line %" PRIu64 ": unknown type %s", g.linenumber, value); - goto error; - } - if (!strcmp(field, "database") || !strcmp(field, "subdatabase")) { - if (g.subdatabase != NULL) { - toku_free(g.subdatabase); - g.subdatabase = NULL; - } - if ((retval = printabletocstring(value, &g.subdatabase))) { - PRINT_ERROR(retval, "error reading db name"); - goto error; - } - continue; - } - if (!strcmp(field, "keys")) { - int32_t temp; - if (strtoint32(value, &temp, 0, 1, 10)) { - PRINT_ERROR(0, - "%s: boolean name=value pairs require a value of 0 or 1", - field); - goto error; - } - g.keys = (bool)temp; - if (!g.keys) { - PRINT_ERRORX("keys=0 not supported"); - goto error; - } - continue; - } - PARSE_COMMON_CONFIGURATIONS(); - - PRINT_ERRORX("unknown input-file header configuration keyword \"%s\"", field); - goto error; - } -success: - r = 0; - - if (false) { -formaterror: - r = EXIT_FAILURE; - PRINT_ERRORX("line %" PRIu64 ": unexpected format", g.linenumber); - } - if (false) { -error: - r = EXIT_FAILURE; - } - return r; -} - -int apply_commandline_options() -{ - int r = -1; - unsigned idx; - char* field; - char* value = NULL; - int32_t num; - int retval; - - for (idx = 0; g.config_options[idx]; idx++) { - if (value) { - /* Restore the field=value format. */ - value[-1] = '='; - value = NULL; - } - field = g.config_options[idx]; - - if ((value = strchr(field, '=')) == NULL) { - PRINT_ERRORX("command-line configuration uses name=value format"); - goto error; - } - value[0] = '\0'; - value++; - - if (field[0] == '\0' || value[0] == '\0') { - PRINT_ERRORX("command-line configuration uses name=value format"); - goto error; - } - - if (!strcmp(field, "database") || !strcmp(field, "subdatabase")) { - if (g.subdatabase != NULL) { - toku_free(g.subdatabase); - g.subdatabase = NULL; - } - if ((retval = printabletocstring(value, &g.subdatabase))) { - PRINT_ERROR(retval, "error reading db name"); - goto error; - } - continue; - } - if (!strcmp(field, "keys")) { - int32_t temp; - if (strtoint32(value, &temp, 0, 1, 10)) { - PRINT_ERROR(0, - "%s: boolean name=value pairs require a value of 0 or 1", - field); - goto error; - } - g.keys = (bool)temp; - if (!g.keys) { - PRINT_ERRORX("keys=0 not supported"); - goto error; - } - continue; - } - PARSE_COMMON_CONFIGURATIONS(); - - PRINT_ERRORX("unknown input-file header configuration keyword \"%s\"", field); - goto error; - } - if (value) { - /* Restore the field=value format. */ - value[-1] = '='; - value = NULL; - } - r = 0; - -error: - return r; -} - -int open_database() -{ - DB* db = g.db; - int retval; - - int open_flags = 0; - //TODO: Transaction auto commit stuff - //if (TXN_ON(dbenv)) SET_BITS(open_flags, DB_AUTO_COMMIT); - - //Try to see if it exists first. - retval = db->open(db, NULL, g.database, g.subdatabase, g.dbtype, open_flags, 0666); - if (retval == ENOENT) { - //Does not exist and we did not specify a type. - //TODO: Uncomment when DB_UNKNOWN + db->get_type are implemented. - /* - if (g.dbtype == DB_UNKNOWN) { - PRINT_ERRORX("no database type specified"); - goto error; - }*/ - SET_BITS(open_flags, DB_CREATE); - //Try creating it. - retval = db->open(db, NULL, g.database, g.subdatabase, g.dbtype, open_flags, 0666); - } - if (retval != 0) { - PRINT_ERROR(retval, "DB->open: %s", g.database); - goto error; - } - //TODO: Uncomment when DB_UNKNOWN + db->get_type are implemented. - /* - if ((retval = db->get_type(db, &opened_type)) != 0) { - PRINT_ERROR(retval, "DB->get_type"); - goto error; - } - if (opened_type != DB_BTREE) { - PRINT_ERRORX("Unsupported db type %d\n", opened_type); - goto error; - } - if (g.dbtype != DB_UNKNOWN && opened_type != g.dbtype) { - PRINT_ERRORX("DBTYPE %d does not match opened DBTYPE %d.\n", g.dbtype, opened_type); - goto error; - }*/ - return EXIT_SUCCESS; -error: - fprintf(stderr, "Quitting out due to errors.\n"); - return EXIT_FAILURE; -} - -int doublechararray(char** pmem, uint64_t* size) -{ - assert(pmem); - assert(size); - assert(IS_POWER_OF_2(*size)); - - *size <<= 1; - if (*size == 0) { - /* Overflowed uint64_t. */ - PRINT_ERRORX("Line %" PRIu64 ": Line too long.\n", g.linenumber); - goto error; - } - if ((*pmem = (char*)toku_realloc(*pmem, *size)) == NULL) { - PRINT_ERROR(errno, "doublechararray: realloc"); - goto error; - } - return EXIT_SUCCESS; - -error: - return EXIT_FAILURE; -} - -static int get_dbt(DBT* pdbt) -{ - /* Need to store a key and value. */ - static uint64_t datasize[2] = {1 << 10, 1 << 10}; - static int which = 0; - char* datum; - uint64_t idx = 0; - int highch; - int lowch; - - /* *pdbt should have been memset to 0 before being called. */ - which = 1 - which; - if (g.get_dbt.data[which] == NULL && - (g.get_dbt.data[which] = (char*)toku_malloc(datasize[which] * sizeof(char))) == NULL) { - PRINT_ERROR(errno, "get_dbt: malloc"); - goto error; - } - - datum = g.get_dbt.data[which]; - - if (g.plaintext) { - int firstch; - int nextch = EOF; - - for (firstch = getchar(); firstch != EOF; firstch = getchar()) { - switch (firstch) { - case ('\n'): { - /* Done reading this key/value. */ - nextch = EOF; - break; - } - case ('\\'): { - /* Escaped \ or two hex digits. */ - highch = getchar(); - if (highch == '\\') { - nextch = '\\'; - break; - } - else if (highch == EOF) { - g.eof = true; - PRINT_ERRORX("Line %" PRIu64 ": Unexpected end of file (2 hex digits per byte).\n", g.linenumber); - goto error; - } - else if (!isxdigit(highch)) { - PRINT_ERRORX("Line %" PRIu64 ": Unexpected '%c' (non-hex) input.\n", g.linenumber, highch); - goto error; - } - - lowch = getchar(); - if (lowch == EOF) { - g.eof = true; - PRINT_ERRORX("Line %" PRIu64 ": Unexpected end of file (2 hex digits per byte).\n", g.linenumber); - goto error; - } - else if (!isxdigit(lowch)) { - PRINT_ERRORX("Line %" PRIu64 ": Unexpected '%c' (non-hex) input.\n", g.linenumber, lowch); - goto error; - } - - nextch = (hextoint(highch) << 4) | hextoint(lowch); - break; - } - default: { - if (isprint(firstch)) { - nextch = firstch; - break; - } - PRINT_ERRORX("Line %" PRIu64 ": Nonprintable character found.", g.linenumber); - goto error; - } - } - if (nextch == EOF) { - break; - } - if (idx == datasize[which]) { - /* Overflow, double the memory. */ - if (doublechararray(&g.get_dbt.data[which], &datasize[which])) goto error; - datum = g.get_dbt.data[which]; - } - datum[idx] = (char)nextch; - idx++; - } - if (firstch == EOF) g.eof = true; - } - else { - for (highch = getchar(); highch != EOF; highch = getchar()) { - if (highch == '\n') { - /* Done reading this key/value. */ - break; - } - - lowch = getchar(); - if (lowch == EOF) { - g.eof = true; - PRINT_ERRORX("Line %" PRIu64 ": Unexpected end of file (2 hex digits per byte).\n", g.linenumber); - goto error; - } - if (!isxdigit(highch)) { - PRINT_ERRORX("Line %" PRIu64 ": Unexpected '%c' (non-hex) input.\n", g.linenumber, highch); - goto error; - } - if (!isxdigit(lowch)) { - PRINT_ERRORX("Line %" PRIu64 ": Unexpected '%c' (non-hex) input.\n", g.linenumber, lowch); - goto error; - } - if (idx == datasize[which]) { - /* Overflow, double the memory. */ - if (doublechararray(&g.get_dbt.data[which], &datasize[which])) goto error; - datum = g.get_dbt.data[which]; - } - datum[idx] = (char)((hextoint(highch) << 4) | hextoint(lowch)); - idx++; - } - if (highch == EOF) g.eof = true; - } - - /* Done reading. */ - pdbt->size = idx; - pdbt->data = (void*)datum; - return EXIT_SUCCESS; -error: - return EXIT_FAILURE; -} - -static int insert_pair(DBT* key, DBT* data) -{ - DB* db = g.db; - - int retval = db->put(db, NULL, key, data, g.overwritekeys ? 0 : DB_NOOVERWRITE); - if (retval != 0) { - //TODO: Check for transaction failures/etc.. retry if necessary. - PRINT_ERROR(retval, "DB->put"); - if (!(retval == DB_KEYEXIST && g.overwritekeys)) goto error; - } - return EXIT_SUCCESS; -error: - return EXIT_FAILURE; -} - -int read_keys() -{ - DBT key; - DBT data; - int spacech; - - char footer[sizeof("ATA=END\n")]; - - memset(&key, 0, sizeof(key)); - memset(&data, 0, sizeof(data)); - - - //TODO: Start transaction/end transaction/abort/retry/etc - - if (!g.leadingspace) { - assert(g.plaintext); - while (!g.eof) { - if (caught_any_signals()) goto success; - g.linenumber++; - if (get_dbt(&key) != 0) goto error; - if (g.eof) { - if (key.size == 0) { - //Last entry had no newline. Done. - break; - } - PRINT_ERRORX("Line %" PRIu64 ": Key exists but value missing.", g.linenumber); - goto error; - } - g.linenumber++; - if (get_dbt(&data) != 0) goto error; - if (insert_pair(&key, &data) != 0) goto error; - } - } - else while (!g.eof) { - if (caught_any_signals()) goto success; - g.linenumber++; - spacech = getchar(); - switch (spacech) { - case (EOF): { - /* Done. */ - g.eof = true; - goto success; - } - case (' '): { - /* Time to read a key. */ - if (get_dbt(&key) != 0) goto error; - break; - } - case ('D'): { - if (fgets(footer, sizeof("ATA=END\n"), stdin) != NULL && - (!strcmp(footer, "ATA=END") || !strcmp(footer, "ATA=END\n"))) - { - goto success; - } - goto unexpectedinput; - } - default: { -unexpectedinput: - PRINT_ERRORX("Line %" PRIu64 ": Unexpected input while reading key.\n", g.linenumber); - goto error; - } - } - - if (g.eof) { - PRINT_ERRORX("Line %" PRIu64 ": Key exists but value missing.", g.linenumber); - goto error; - } - g.linenumber++; - spacech = getchar(); - switch (spacech) { - case (EOF): { - g.eof = true; - PRINT_ERRORX("Line %" PRIu64 ": Unexpected end of file while reading value.\n", g.linenumber); - goto error; - } - case (' '): { - /* Time to read a key. */ - if (get_dbt(&data) != 0) goto error; - break; - } - default: { - PRINT_ERRORX("Line %" PRIu64 ": Unexpected input while reading value.\n", g.linenumber); - goto error; - } - } - if (insert_pair(&key, &data) != 0) goto error; - } -success: - return EXIT_SUCCESS; -error: - return EXIT_FAILURE; -} - -int close_database() -{ - DB* db = g.db; - int retval; - - assert(db); - if ((retval = db->close(db, 0)) != 0) { - PRINT_ERROR(retval, "DB->close"); - goto error; - } - return EXIT_SUCCESS; -error: - return EXIT_FAILURE; -} diff --git a/storage/tokudb/ft-index/ft/tokuftdump.cc b/storage/tokudb/ft-index/tools/tokuftdump.cc index f2d4fce83cb..3aab5401cd3 100644 --- a/storage/tokudb/ft-index/ft/tokuftdump.cc +++ b/storage/tokudb/ft-index/tools/tokuftdump.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -89,12 +89,8 @@ PATENT RIGHTS GRANT: #ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved." #ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it." -/* Tell me the diff between two FT files. */ +// Dump a fractal tree file -#include "cachetable.h" -#include "ft.h" -#include "fttypes.h" -#include "ft-internal.h" #include <ctype.h> #include <stdint.h> #include <stdio.h> @@ -102,22 +98,38 @@ PATENT RIGHTS GRANT: #include <inttypes.h> #include <limits.h> -static void -format_time(const uint64_t time_int, char *buf) { +#include "ft/serialize/block_table.h" +#include "ft/cachetable/cachetable.h" +#include "ft/ft.h" +#include "ft/ft-internal.h" +#include "ft/serialize/ft-serialize.h" +#include "ft/serialize/ft_node-serialize.h" +#include "ft/node.h" + +static int do_dump_data = 1; +static int do_interactive = 0; +static int do_header = 0; +static int do_fragmentation = 0; +static int do_garbage = 0; +static int do_translation_table = 0; +static int do_rootnode = 0; +static int do_node = 0; +static BLOCKNUM do_node_num; +static int do_tsv = 0; + +static const char *arg0; +static const char *fname; + +static void format_time(const uint64_t time_int, char *buf) { time_t timer = (time_t) time_int; ctime_r(&timer, buf); assert(buf[24] == '\n'); buf[24] = 0; } -static int dump_data = 1; - -static CACHETABLE ct; - -static void -print_item (bytevec val, ITEMLEN len) { +static void print_item(const void *val, uint32_t len) { printf("\""); - ITEMLEN i; + uint32_t i; for (i=0; i<len; i++) { unsigned char ch = ((unsigned char*)val)[i]; if (isprint(ch) && ch!='\\' && ch!='"') { @@ -129,16 +141,14 @@ print_item (bytevec val, ITEMLEN len) { printf("\""); } -static void -simple_hex_dump(unsigned char *vp, uint64_t size) { +static void simple_hex_dump(unsigned char *vp, uint64_t size) { for (uint64_t i = 0; i < size; i++) { unsigned char c = vp[i]; printf("%2.2X", c); } } -static void -hex_dump(unsigned char *vp, uint64_t offset, uint64_t size) { +static void hex_dump(unsigned char *vp, uint64_t offset, uint64_t size) { uint64_t n = size / 32; for (uint64_t i = 0; i < n; i++) { printf("%" PRIu64 ": ", offset); @@ -169,25 +179,26 @@ hex_dump(unsigned char *vp, uint64_t offset, uint64_t size) { printf("\n"); } -static void -dump_descriptor(DESCRIPTOR d) { +static void dump_descriptor(DESCRIPTOR d) { printf(" descriptor size %u ", d->dbt.size); simple_hex_dump((unsigned char*) d->dbt.data, d->dbt.size); printf("\n"); } -static void -open_header (int f, FT *header, CACHEFILE cf) { +static void open_header(int fd, FT *header, CACHEFILE cf) { FT ft = NULL; int r; - r = toku_deserialize_ft_from (f, MAX_LSN, &ft); - assert(r==0); + r = toku_deserialize_ft_from (fd, MAX_LSN, &ft); + if (r != 0) { + fprintf(stderr, "%s: can not deserialize from %s error %d\n", arg0, fname, r); + exit(1); + } + assert_zero(r); ft->cf = cf; *header = ft; } -static void -dump_header(FT ft) { +static void dump_header(FT ft) { char timestr[26]; printf("ft:\n"); printf(" layout_version=%d\n", ft->h->layout_version); @@ -212,38 +223,28 @@ dump_header(FT ft) { printf(" estimated numbytes=%" PRId64 "\n", ft->in_memory_stats.numbytes); } -static int -print_le( - const void* key, - const uint32_t keylen, - const LEAFENTRY &le, - const uint32_t idx UU(), - void *const ai UU() - ) -{ +static int print_le(const void* key, const uint32_t keylen, const LEAFENTRY &le, const uint32_t idx UU(), void *const ai UU()) { print_klpair(stdout, key, keylen, le); printf("\n"); return 0; } - -static void -dump_node (int f, BLOCKNUM blocknum, FT h) { +static void dump_node(int fd, BLOCKNUM blocknum, FT ft) { FTNODE n; - struct ftnode_fetch_extra bfe; - FTNODE_DISK_DATA ndd = NULL; - fill_bfe_for_full_read(&bfe, h); - int r = toku_deserialize_ftnode_from (f, blocknum, 0 /*pass zero for hash, it doesn't matter*/, &n, &ndd, &bfe); - assert(r==0); + FTNODE_DISK_DATA ndd = nullptr; + ftnode_fetch_extra bfe; + bfe.create_for_full_read(ft); + int r = toku_deserialize_ftnode_from (fd, blocknum, 0 /*pass zero for hash, it doesn't matter*/, &n, &ndd, &bfe); + assert_zero(r); assert(n!=0); printf("ftnode\n"); DISKOFF disksize, diskoffset; - toku_translate_blocknum_to_offset_size(h->blocktable, blocknum, &diskoffset, &disksize); + ft->blocktable.translate_blocknum_to_offset_size(blocknum, &diskoffset, &disksize); printf(" diskoffset =%" PRId64 "\n", diskoffset); printf(" disksize =%" PRId64 "\n", disksize); printf(" serialize_size =%u\n", toku_serialize_ftnode_size(n)); printf(" flags =%u\n", n->flags); - printf(" thisnodename=%" PRId64 "\n", n->thisnodename.b); + printf(" blocknum=%" PRId64 "\n", n->blocknum.b); //printf(" log_lsn =%lld\n", n->log_lsn.lsn); // The log_lsn is a memory-only value. printf(" height =%d\n", n->height); printf(" layout_version=%d\n", n->layout_version); @@ -251,72 +252,81 @@ dump_node (int f, BLOCKNUM blocknum, FT h) { printf(" layout_version_read_from_disk=%d\n", n->layout_version_read_from_disk); printf(" build_id=%d\n", n->build_id); printf(" max_msn_applied_to_node_on_disk=%" PRId64 " (0x%" PRIx64 ")\n", n->max_msn_applied_to_node_on_disk.msn, n->max_msn_applied_to_node_on_disk.msn); - printf("io time %lf decompress time %lf deserialize time %lf\n", - tokutime_to_seconds(bfe.io_time), - tokutime_to_seconds(bfe.decompress_time), - tokutime_to_seconds(bfe.deserialize_time) - ); + printf(" io time %lf decompress time %lf deserialize time %lf\n", + tokutime_to_seconds(bfe.io_time), + tokutime_to_seconds(bfe.decompress_time), + tokutime_to_seconds(bfe.deserialize_time)); printf(" n_children=%d\n", n->n_children); - printf(" total_childkeylens=%u\n", n->totalchildkeylens); + printf(" pivotkeys.total_size()=%u\n", (unsigned) n->pivotkeys.total_size()); printf(" pivots:\n"); for (int i=0; i<n->n_children-1; i++) { - const DBT *piv = &n->childkeys[i]; + const DBT piv = n->pivotkeys.get_pivot(i); printf(" pivot %2d:", i); if (n->flags) printf(" flags=%x ", n->flags); - print_item(piv->data, piv->size); + print_item(piv.data, piv.size); printf("\n"); } printf(" children:\n"); for (int i=0; i<n->n_children; i++) { + printf(" child %d: ", i); if (n->height > 0) { - printf(" child %d: %" PRId64 "\n", i, BP_BLOCKNUM(n, i).b); + printf("%" PRId64 "\n", BP_BLOCKNUM(n, i).b); NONLEAF_CHILDINFO bnc = BNC(n, i); unsigned int n_bytes = toku_bnc_nbytesinbuf(bnc); int n_entries = toku_bnc_n_entries(bnc); if (n_bytes > 0 || n_entries > 0) { printf(" buffer contains %u bytes (%d items)\n", n_bytes, n_entries); } - if (dump_data) { - FIFO_ITERATE(bnc->buffer, key, keylen, data, datalen, typ, msn, xids, UU(is_fresh), - { - printf(" msn=%" PRIu64 " (0x%" PRIx64 ") ", msn.msn, msn.msn); - printf(" TYPE="); - switch ((enum ft_msg_type)typ) { - case FT_NONE: printf("NONE"); goto ok; - case FT_INSERT: printf("INSERT"); goto ok; - case FT_INSERT_NO_OVERWRITE: printf("INSERT_NO_OVERWRITE"); goto ok; - case FT_DELETE_ANY: printf("DELETE_ANY"); goto ok; - case FT_ABORT_ANY: printf("ABORT_ANY"); goto ok; - case FT_COMMIT_ANY: printf("COMMIT_ANY"); goto ok; - case FT_COMMIT_BROADCAST_ALL: printf("COMMIT_BROADCAST_ALL"); goto ok; - case FT_COMMIT_BROADCAST_TXN: printf("COMMIT_BROADCAST_TXN"); goto ok; - case FT_ABORT_BROADCAST_TXN: printf("ABORT_BROADCAST_TXN"); goto ok; - case FT_OPTIMIZE: printf("OPTIMIZE"); goto ok; - case FT_OPTIMIZE_FOR_UPGRADE: printf("OPTIMIZE_FOR_UPGRADE"); goto ok; - case FT_UPDATE: printf("UPDATE"); goto ok; - case FT_UPDATE_BROADCAST_ALL: printf("UPDATE_BROADCAST_ALL"); goto ok; - } - printf("HUH?"); - ok: - printf(" xid="); - xids_fprintf(stdout, xids); - printf(" "); - print_item(key, keylen); - if (datalen>0) { - printf(" "); - print_item(data, datalen); - } - printf("\n"); - } - ); + if (do_dump_data) { + struct dump_data_fn { + int operator()(const ft_msg &msg, bool UU(is_fresh)) { + enum ft_msg_type type = (enum ft_msg_type) msg.type(); + MSN msn = msg.msn(); + XIDS xids = msg.xids(); + const void *key = msg.kdbt()->data; + const void *data = msg.vdbt()->data; + uint32_t keylen = msg.kdbt()->size; + uint32_t datalen = msg.vdbt()->size; + printf(" msn=%" PRIu64 " (0x%" PRIx64 ") ", msn.msn, msn.msn); + printf(" TYPE="); + switch (type) { + case FT_NONE: printf("NONE"); goto ok; + case FT_INSERT: printf("INSERT"); goto ok; + case FT_INSERT_NO_OVERWRITE: printf("INSERT_NO_OVERWRITE"); goto ok; + case FT_DELETE_ANY: printf("DELETE_ANY"); goto ok; + case FT_ABORT_ANY: printf("ABORT_ANY"); goto ok; + case FT_COMMIT_ANY: printf("COMMIT_ANY"); goto ok; + case FT_COMMIT_BROADCAST_ALL: printf("COMMIT_BROADCAST_ALL"); goto ok; + case FT_COMMIT_BROADCAST_TXN: printf("COMMIT_BROADCAST_TXN"); goto ok; + case FT_ABORT_BROADCAST_TXN: printf("ABORT_BROADCAST_TXN"); goto ok; + case FT_OPTIMIZE: printf("OPTIMIZE"); goto ok; + case FT_OPTIMIZE_FOR_UPGRADE: printf("OPTIMIZE_FOR_UPGRADE"); goto ok; + case FT_UPDATE: printf("UPDATE"); goto ok; + case FT_UPDATE_BROADCAST_ALL: printf("UPDATE_BROADCAST_ALL"); goto ok; + } + printf("HUH?"); +ok: + printf(" xid="); + toku_xids_fprintf(stdout, xids); + printf(" "); + print_item(key, keylen); + if (datalen>0) { + printf(" "); + print_item(data, datalen); + } + printf("\n"); + return 0; + } + } dump_fn; + bnc->msg_buffer.iterate(dump_fn); } } else { printf(" n_bytes_in_buffer= %" PRIu64 "", BLB_DATA(n, i)->get_disk_size()); printf(" items_in_buffer=%u\n", BLB_DATA(n, i)->num_klpairs()); - if (dump_data) { + if (do_dump_data) { BLB_DATA(n, i)->iterate<void, print_le>(NULL); } } @@ -325,16 +335,14 @@ dump_node (int f, BLOCKNUM blocknum, FT h) { toku_free(ndd); } -static void -dump_block_translation(FT h, uint64_t offset) { - toku_blocknum_dump_translation(h->blocktable, make_blocknum(offset)); +static void dump_block_translation(FT ft, uint64_t offset) { + ft->blocktable.blocknum_dump_translation(make_blocknum(offset)); } -static void -dump_fragmentation(int UU(f), FT h, int tsv) { +static void dump_fragmentation(int UU(f), FT ft, int tsv) { int64_t used_space; int64_t total_space; - toku_blocktable_internal_fragmentation(h->blocktable, &total_space, &used_space); + ft->blocktable.internal_fragmentation(&total_space, &used_space); int64_t fragsizes = total_space - used_space; if (tsv) { @@ -349,21 +357,20 @@ dump_fragmentation(int UU(f), FT h, int tsv) { } typedef struct { - int f; - FT h; + int fd; + FT ft; uint64_t blocksizes; uint64_t leafsizes; uint64_t leafblocks; } frag_help_extra; -static int -nodesizes_helper(BLOCKNUM b, int64_t size, int64_t UU(address), void *extra) { +static int nodesizes_helper(BLOCKNUM b, int64_t size, int64_t UU(address), void *extra) { frag_help_extra *CAST_FROM_VOIDP(info, extra); FTNODE n; FTNODE_DISK_DATA ndd = NULL; - struct ftnode_fetch_extra bfe; - fill_bfe_for_full_read(&bfe, info->h); - int r = toku_deserialize_ftnode_from(info->f, b, 0 /*pass zero for hash, it doesn't matter*/, &n, &ndd, &bfe); + ftnode_fetch_extra bfe; + bfe.create_for_full_read(info->ft); + int r = toku_deserialize_ftnode_from(info->fd, b, 0 /*pass zero for hash, it doesn't matter*/, &n, &ndd, &bfe); if (r==0) { info->blocksizes += size; if (n->height == 0) { @@ -376,49 +383,57 @@ nodesizes_helper(BLOCKNUM b, int64_t size, int64_t UU(address), void *extra) { return 0; } -static void -dump_nodesizes(int f, FT h) { +static void dump_nodesizes(int fd, FT ft) { frag_help_extra info; memset(&info, 0, sizeof(info)); - info.f = f; - info.h = h; - toku_blocktable_iterate(h->blocktable, TRANSLATION_CHECKPOINTED, - nodesizes_helper, &info, true, true); + info.fd = fd; + info.ft = ft; + ft->blocktable.iterate(block_table::TRANSLATION_CHECKPOINTED, + nodesizes_helper, &info, true, true); printf("leafblocks\t%" PRIu64 "\n", info.leafblocks); printf("blocksizes\t%" PRIu64 "\n", info.blocksizes); printf("leafsizes\t%" PRIu64 "\n", info.leafsizes); } -static void -dump_garbage_stats(int f, FT ft) { - invariant(f == toku_cachefile_get_fd(ft->cf)); +static void dump_garbage_stats(int fd, FT ft) { + assert(fd == toku_cachefile_get_fd(ft->cf)); uint64_t total_space = 0; uint64_t used_space = 0; toku_ft_get_garbage(ft, &total_space, &used_space); - printf("total_size\t%" PRIu64 "\n", total_space); - printf("used_size\t%" PRIu64 "\n", used_space); + printf("garbage total size\t%" PRIu64 "\n", total_space); + printf("garbage used size\t%" PRIu64 "\n", used_space); } -static uint32_t -get_unaligned_uint32(unsigned char *p) { - return *(uint32_t *)p; +typedef struct __dump_node_extra { + int fd; + FT ft; +} dump_node_extra; + +static int dump_node_wrapper(BLOCKNUM b, int64_t UU(size), int64_t UU(address), void *extra) { + dump_node_extra *CAST_FROM_VOIDP(info, extra); + dump_node(info->fd, b, info->ft); + return 0; +} + +static uint32_t get_unaligned_uint32(unsigned char *p) { + uint32_t n; + memcpy(&n, p, sizeof n); + return n; } struct dump_sub_block { - uint32_t compressed_size; - uint32_t uncompressed_size; - uint32_t xsum; + uint32_t compressed_size; + uint32_t uncompressed_size; + uint32_t xsum; }; -static void -sub_block_deserialize(struct dump_sub_block *sb, unsigned char *sub_block_header) { +static void sub_block_deserialize(struct dump_sub_block *sb, unsigned char *sub_block_header) { sb->compressed_size = toku_dtoh32(get_unaligned_uint32(sub_block_header+0)); sb->uncompressed_size = toku_dtoh32(get_unaligned_uint32(sub_block_header+4)); sb->xsum = toku_dtoh32(get_unaligned_uint32(sub_block_header+8)); } -static void -verify_block(unsigned char *cp, uint64_t file_offset, uint64_t size) { +static void verify_block(unsigned char *cp, uint64_t file_offset, uint64_t size) { // verify the header checksum const size_t node_header = 8 + sizeof (uint32_t) + sizeof (uint32_t) + sizeof (uint32_t); @@ -461,24 +476,22 @@ verify_block(unsigned char *cp, uint64_t file_offset, uint64_t size) { printf("offset %u expected %" PRIu64 "\n", offset, size); } -static void -dump_block(int f, BLOCKNUM blocknum, FT h) { +static void dump_block(int fd, BLOCKNUM blocknum, FT ft) { DISKOFF offset, size; - toku_translate_blocknum_to_offset_size(h->blocktable, blocknum, &offset, &size); + ft->blocktable.translate_blocknum_to_offset_size(blocknum, &offset, &size); printf("%" PRId64 " at %" PRId64 " size %" PRId64 "\n", blocknum.b, offset, size); unsigned char *CAST_FROM_VOIDP(vp, toku_malloc(size)); - uint64_t r = pread(f, vp, size, offset); + uint64_t r = pread(fd, vp, size, offset); if (r == (uint64_t)size) { verify_block(vp, offset, size); } toku_free(vp); } -static void -dump_file(int f, uint64_t offset, uint64_t size, FILE *outfp) { +static void dump_file(int fd, uint64_t offset, uint64_t size, FILE *outfp) { unsigned char *XMALLOC_N(size, vp); - uint64_t r = pread(f, vp, size, offset); + uint64_t r = pread(fd, vp, size, offset); if (r == size) { if (outfp == stdout) { hex_dump(vp, offset, size); @@ -490,13 +503,11 @@ dump_file(int f, uint64_t offset, uint64_t size, FILE *outfp) { toku_free(vp); } -static void -set_file(int f, uint64_t offset, unsigned char newc) { - toku_os_pwrite(f, &newc, sizeof newc, offset); +static void set_file(int fd, uint64_t offset, unsigned char newc) { + toku_os_pwrite(fd, &newc, sizeof newc, offset); } -static int -readline (char *line, int maxline) { +static int readline(char *line, int maxline) { int i = 0; int c; while ((c = getchar()) != EOF && c != '\n' && i < maxline) { @@ -506,8 +517,7 @@ readline (char *line, int maxline) { return c == EOF ? EOF : i; } -static int -split_fields (char *line, char *fields[], int maxfields) { +static int split_fields(char *line, char *fields[], int maxfields) { int i; for (i=0; i<maxfields; i++) fields[i] = NULL; @@ -520,26 +530,16 @@ split_fields (char *line, char *fields[], int maxfields) { return i; } -static int -usage(const char *arg0) { - printf("Usage: %s [--nodata] [--i[nteractive]|--fragmentation [--tsv]|--translation-table|--rootnode] ftfilename\n", arg0); - return 1; -} - -typedef struct __dump_node_extra { - int f; - FT h; -} dump_node_extra; - -static int -dump_node_wrapper(BLOCKNUM b, int64_t UU(size), int64_t UU(address), void *extra) { - dump_node_extra *CAST_FROM_VOIDP(info, extra); - dump_node(info->f, b, info->h); - return 0; +static uint64_t getuint64(const char *f) { + if (strncmp(f, "0x", 2) == 0 || strncmp(f, "0X", 2) == 0) + return strtoull(f, 0, 16); + else if (strncmp(f, "0", 1) == 0) + return strtoull(f, 0, 8); + else + return strtoull(f, 0, 10); } -static void -interactive_help(void) { +static void interactive_help(void) { fprintf(stderr, "help\n"); fprintf(stderr, "header\n"); fprintf(stderr, "node NUMBER\n"); @@ -552,133 +552,169 @@ interactive_help(void) { fprintf(stderr, "quit\n"); } -static uint64_t -getuint64(const char *f) { - if (strncmp(f, "0x", 2) == 0 || strncmp(f, "0X", 2) == 0) - return strtoull(f, 0, 16); - else if (strncmp(f, "0", 1) == 0) - return strtoull(f, 0, 8); - else - return strtoull(f, 0, 10); +static void run_iteractive_loop(int fd, FT ft, CACHEFILE cf) { + while (1) { + printf("ftdump>"); fflush(stdout); + enum { maxline = 64}; + char line[maxline+1]; + int r = readline(line, maxline); + if (r == EOF) + break; + const int maxfields = 4; + char *fields[maxfields]; + int nfields = split_fields(line, fields, maxfields); + if (nfields == 0) + continue; + if (strcmp(fields[0], "help") == 0) { + interactive_help(); + } else if (strcmp(fields[0], "header") == 0) { + toku_ft_free(ft); + open_header(fd, &ft, cf); + dump_header(ft); + } else if (strcmp(fields[0], "block") == 0 && nfields == 2) { + BLOCKNUM blocknum = make_blocknum(getuint64(fields[1])); + dump_block(fd, blocknum, ft); + } else if (strcmp(fields[0], "node") == 0 && nfields == 2) { + BLOCKNUM off = make_blocknum(getuint64(fields[1])); + dump_node(fd, off, ft); + } else if (strcmp(fields[0], "dumpdata") == 0 && nfields == 2) { + do_dump_data = strtol(fields[1], NULL, 10); + } else if (strcmp(fields[0], "block_translation") == 0 || strcmp(fields[0], "bx") == 0) { + uint64_t offset = 0; + if (nfields == 2) + offset = getuint64(fields[1]); + dump_block_translation(ft, offset); + } else if (strcmp(fields[0], "fragmentation") == 0) { + dump_fragmentation(fd, ft, do_tsv); + } else if (strcmp(fields[0], "nodesizes") == 0) { + dump_nodesizes(fd, ft); + } else if (strcmp(fields[0], "garbage") == 0) { + dump_garbage_stats(fd, ft); + } else if (strcmp(fields[0], "file") == 0 && nfields >= 3) { + uint64_t offset = getuint64(fields[1]); + uint64_t size = getuint64(fields[2]); + FILE *outfp = stdout; + if (nfields >= 4) + outfp = fopen(fields[3], "w"); + dump_file(fd, offset, size, outfp); + } else if (strcmp(fields[0], "setfile") == 0 && nfields == 3) { + uint64_t offset = getuint64(fields[1]); + unsigned char newc = getuint64(fields[2]); + set_file(fd, offset, newc); + } else if (strcmp(fields[0], "quit") == 0 || strcmp(fields[0], "q") == 0) { + break; + } + } } -int -main (int argc, const char *const argv[]) { - int interactive = 0; - int fragmentation = 0; - int translation_table = 0; - int rootnode = 0; - int tsv = 0; +static int usage(void) { + fprintf(stderr, "Usage: %s ", arg0); + fprintf(stderr, "--interactive "); + fprintf(stderr, "--nodata "); + fprintf(stderr, "--dumpdata 0|1 "); + fprintf(stderr, "--header "); + fprintf(stderr, "--rootnode "); + fprintf(stderr, "--node N "); + fprintf(stderr, "--fragmentation "); + fprintf(stderr, "--garbage "); + fprintf(stderr, "--tsv "); + fprintf(stderr, "--translation-table "); + fprintf(stderr, "--tsv "); + fprintf(stderr, "filename \n"); + return 1; +} - const char *arg0 = argv[0]; +int main (int argc, const char *const argv[]) { + arg0 = argv[0]; argc--; argv++; while (argc>0) { - if (strcmp(argv[0], "--nodata") == 0) { - dump_data = 0; - } else if (strcmp(argv[0], "--interactive") == 0 || strcmp(argv[0], "--i") == 0) { - interactive = 1; + if (strcmp(argv[0], "--interactive") == 0 || strcmp(argv[0], "--i") == 0) { + do_interactive = 1; + } else if (strcmp(argv[0], "--nodata") == 0) { + do_dump_data = 0; + } else if (strcmp(argv[0], "--dumpdata") == 0 && argc > 1) { + argc--; argv++; + do_dump_data = atoi(argv[0]); + } else if (strcmp(argv[0], "--header") == 0) { + do_header = 1; + } else if (strcmp(argv[0], "--rootnode") == 0) { + do_rootnode = 1; + } else if (strcmp(argv[0], "--node") == 0 && argc > 1) { + argc--; argv++; + do_node = 1; + do_node_num = make_blocknum(getuint64(argv[0])); } else if (strcmp(argv[0], "--fragmentation") == 0) { - fragmentation = 1; + do_fragmentation = 1; + } else if (strcmp(argv[0], "--garbage") == 0) { + do_garbage = 1; } else if (strcmp(argv[0], "--tsv") == 0) { - tsv = 1; + do_tsv = 1; } else if (strcmp(argv[0], "--translation-table") == 0) { - translation_table = 1; - } else if (strcmp(argv[0], "--rootnode") == 0) { - rootnode = 1; - } else if (strcmp(argv[0], "--help") == 0) { - return usage(arg0); + do_translation_table = 1; + } else if (strcmp(argv[0], "--help") == 0 || strcmp(argv[0], "-?") == 0 || strcmp(argv[0], "-h") == 0) { + return usage(); } else { break; } argc--; argv++; } - if (argc != 1) return usage(arg0); + if (argc != 1) + return usage(); int r = toku_ft_layer_init(); - invariant_zero(r); + assert_zero(r); + + fname = argv[0]; + int fd = open(fname, O_RDWR + O_BINARY); + if (fd < 0) { + fprintf(stderr, "%s: can not open %s errno %d\n", arg0, fname, errno); + return 1; + } - const char *n = argv[0]; - int f = open(n, O_RDWR + O_BINARY); assert(f>=0); - FT ft; // create a cachefile for the header + CACHETABLE ct = NULL; toku_cachetable_create(&ct, 1<<25, (LSN){0}, 0); + CACHEFILE cf = NULL; - r = toku_cachetable_openfd (&cf, ct, f, n); - assert(r==0); - open_header(f, &ft, cf); - if (!fragmentation && !translation_table) { - // quick fix for now, we want those two to have clean output - dump_header(ft); - } - if (interactive) { - while (1) { - printf("ftdump>"); fflush(stdout); - enum { maxline = 64}; - char line[maxline+1]; - r = readline(line, maxline); - if (r == EOF) - break; - const int maxfields = 4; - char *fields[maxfields]; - int nfields = split_fields(line, fields, maxfields); - if (nfields == 0) - continue; - if (strcmp(fields[0], "help") == 0) { - interactive_help(); - } else if (strcmp(fields[0], "header") == 0) { - toku_ft_free(ft); - open_header(f, &ft, cf); - dump_header(ft); - } else if (strcmp(fields[0], "block") == 0 && nfields == 2) { - BLOCKNUM blocknum = make_blocknum(getuint64(fields[1])); - dump_block(f, blocknum, ft); - } else if (strcmp(fields[0], "node") == 0 && nfields == 2) { - BLOCKNUM off = make_blocknum(getuint64(fields[1])); - dump_node(f, off, ft); - } else if (strcmp(fields[0], "dumpdata") == 0 && nfields == 2) { - dump_data = strtol(fields[1], NULL, 10); - } else if (strcmp(fields[0], "block_translation") == 0 || strcmp(fields[0], "bx") == 0) { - uint64_t offset = 0; - if (nfields == 2) - offset = getuint64(fields[1]); - dump_block_translation(ft, offset); - } else if (strcmp(fields[0], "fragmentation") == 0) { - dump_fragmentation(f, ft, tsv); - } else if (strcmp(fields[0], "nodesizes") == 0) { - dump_nodesizes(f, ft); - } else if (strcmp(fields[0], "garbage") == 0) { - dump_garbage_stats(f, ft); - } else if (strcmp(fields[0], "file") == 0 && nfields >= 3) { - uint64_t offset = getuint64(fields[1]); - uint64_t size = getuint64(fields[2]); - FILE *outfp = stdout; - if (nfields >= 4) - outfp = fopen(fields[3], "w"); - dump_file(f, offset, size, outfp); - } else if (strcmp(fields[0], "setfile") == 0 && nfields == 3) { - uint64_t offset = getuint64(fields[1]); - unsigned char newc = getuint64(fields[2]); - set_file(f, offset, newc); - } else if (strcmp(fields[0], "quit") == 0 || strcmp(fields[0], "q") == 0) { - break; - } - } - } else if (rootnode) { - dump_node(f, ft->h->root_blocknum, ft); - } else if (fragmentation) { - dump_fragmentation(f, ft, tsv); - } else if (translation_table) { - toku_dump_translation_table_pretty(stdout, ft->blocktable); - } else { - printf("Block translation:"); + r = toku_cachetable_openfd (&cf, ct, fd, fname); + assert_zero(r); - toku_dump_translation_table(stdout, ft->blocktable); + FT ft = NULL; + open_header(fd, &ft, cf); - struct __dump_node_extra info; - info.f = f; - info.h = ft; - toku_blocktable_iterate(ft->blocktable, TRANSLATION_CHECKPOINTED, - dump_node_wrapper, &info, true, true); + if (do_interactive) { + run_iteractive_loop(fd, ft, cf); + } else { + if (do_header) { + dump_header(ft); + } + if (do_rootnode) { + dump_node(fd, ft->h->root_blocknum, ft); + } + if (do_node) { + dump_node(fd, do_node_num, ft); + } + if (do_fragmentation) { + dump_fragmentation(fd, ft, do_tsv); + } + if (do_translation_table) { + ft->blocktable.dump_translation_table_pretty(stdout); + } + if (do_garbage) { + dump_garbage_stats(fd, ft); + } + if (!do_header && !do_rootnode && !do_fragmentation && !do_translation_table && !do_garbage) { + printf("Block translation:"); + ft->blocktable.dump_translation_table(stdout); + + dump_header(ft); + + struct __dump_node_extra info; + info.fd = fd; + info.ft = ft; + ft->blocktable.iterate(block_table::TRANSLATION_CHECKPOINTED, + dump_node_wrapper, &info, true, true); + } } toku_cachefile_close(&cf, false, ZERO_LSN); toku_cachetable_close(&ct); diff --git a/storage/tokudb/ft-index/util/CMakeLists.txt b/storage/tokudb/ft-index/util/CMakeLists.txt index 6a0bb6208a5..6f6b899e5b7 100644 --- a/storage/tokudb/ft-index/util/CMakeLists.txt +++ b/storage/tokudb/ft-index/util/CMakeLists.txt @@ -1,10 +1,13 @@ set(util_srcs context + dbt frwlock kibbutz memarena mempool + minicron partitioned_counter + queue threadpool scoped_malloc x1764 diff --git a/storage/tokudb/ft-index/tools/tokudb_common.h b/storage/tokudb/ft-index/util/bytestring.h index aeda0ae5027..1fea03ecfd1 100644 --- a/storage/tokudb/ft-index/tools/tokudb_common.h +++ b/storage/tokudb/ft-index/util/bytestring.h @@ -1,8 +1,5 @@ /* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */ // vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4: -#ident "$Id$" -#if !defined(TOKUDB_COMMON_H) -#define TOKUDB_COMMON_H /* COPYING CONDITIONS NOTICE: @@ -32,8 +29,8 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. - Copyright (C) 2007-2013 Tokutek, Inc. + TokuFT, Tokutek Fractal Tree Indexing Library. + Copyright (C) 2014 Tokutek, Inc. DISCLAIMER: @@ -89,21 +86,11 @@ PATENT RIGHTS GRANT: under this License. */ -#ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved." +#pragma once -#include <stdlib.h> -#include <stdint.h> -#include <limits.h> -#include <db.h> -#include <inttypes.h> -#include <signal.h> -#include <memory.h> +#include "portability/toku_stdint.h" -#define SET_BITS(bitvector, bits) ((bitvector) |= (bits)) -#define REMOVE_BITS(bitvector, bits) ((bitvector) &= ~(bits)) -#define IS_SET_ANY(bitvector, bits) ((bitvector) & (bits)) -#define IS_SET_ALL(bitvector, bits) (((bitvector) & (bits)) == (bits)) - -#define IS_POWER_OF_2(num) ((num) > 0 && ((num) & ((num) - 1)) == 0) - -#endif /* #if !defined(TOKUDB_COMMON_H) */ +struct BYTESTRING { + uint32_t len; + char *data; +}; diff --git a/storage/tokudb/ft-index/util/circular_buffer.cc b/storage/tokudb/ft-index/util/circular_buffer.cc index a453c5b71c7..92d9af521f7 100644 --- a/storage/tokudb/ft-index/util/circular_buffer.cc +++ b/storage/tokudb/ft-index/util/circular_buffer.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/util/circular_buffer.h b/storage/tokudb/ft-index/util/circular_buffer.h index 6f40cf3046f..904dfed7c49 100644 --- a/storage/tokudb/ft-index/util/circular_buffer.h +++ b/storage/tokudb/ft-index/util/circular_buffer.h @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -86,12 +86,11 @@ PATENT RIGHTS GRANT: under this License. */ +#pragma once + #ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved." #ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it." -#ifndef UTIL_CIRCULAR_BUFFER_H -#define UTIL_CIRCULAR_BUFFER_H - #include <stdbool.h> #include <stddef.h> #include <portability/toku_pthread.h> @@ -210,5 +209,3 @@ private: } #include "circular_buffer.cc" - -#endif // UTIL_CIRCULAR_BUFFER_H diff --git a/storage/tokudb/ft-index/util/constexpr.h b/storage/tokudb/ft-index/util/constexpr.h index cfea0b46924..ed71daaf3fe 100644 --- a/storage/tokudb/ft-index/util/constexpr.h +++ b/storage/tokudb/ft-index/util/constexpr.h @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -86,11 +86,11 @@ PATENT RIGHTS GRANT: under this License. */ +#pragma once + #ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved." #ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it." -#pragma once - constexpr char UU() static_tolower(const char a) { return a >= 'A' && a <= 'Z' ? a - 'A' + 'a' : a; } diff --git a/storage/tokudb/ft-index/util/context.cc b/storage/tokudb/ft-index/util/context.cc index 350cac07960..6166be41294 100644 --- a/storage/tokudb/ft-index/util/context.cc +++ b/storage/tokudb/ft-index/util/context.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2014 Tokutek, Inc. DISCLAIMER: @@ -121,7 +121,7 @@ const toku::context *toku_thread_get_context() { // engine status static struct context_status context_status; -#define CONTEXT_STATUS_INIT(key, legend) TOKUDB_STATUS_INIT(context_status, key, nullptr, PARCOUNT, "context: " legend, TOKU_ENGINE_STATUS) +#define CONTEXT_STATUS_INIT(key, legend) TOKUFT_STATUS_INIT(context_status, key, nullptr, PARCOUNT, "context: " legend, TOKU_ENGINE_STATUS) static void context_status_init(void) { diff --git a/storage/tokudb/ft-index/util/context.h b/storage/tokudb/ft-index/util/context.h index 15f7d732635..04aef5c5e3b 100644 --- a/storage/tokudb/ft-index/util/context.h +++ b/storage/tokudb/ft-index/util/context.h @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2014 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/ft/ybt.cc b/storage/tokudb/ft-index/util/dbt.cc index 68fd3c178ed..aa26a9b0dd9 100644 --- a/storage/tokudb/ft-index/ft/ybt.cc +++ b/storage/tokudb/ft-index/util/dbt.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -90,27 +90,29 @@ PATENT RIGHTS GRANT: #ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it." #include <db.h> -#include <memory.h> #include <string.h> -#include <fttypes.h> -#include "ybt.h" +#include "portability/memory.h" -DBT * -toku_init_dbt(DBT *ybt) { - memset(ybt, 0, sizeof(*ybt)); - return ybt; +#include "util/dbt.h" + +DBT *toku_init_dbt(DBT *dbt) { + memset(dbt, 0, sizeof(*dbt)); + return dbt; } -DBT * -toku_init_dbt_flags(DBT *ybt, uint32_t flags) { - toku_init_dbt(ybt); - ybt->flags = flags; - return ybt; +DBT toku_empty_dbt(void) { + static const DBT empty_dbt = { .data = 0, .size = 0, .ulen = 0, .flags = 0 }; + return empty_dbt; +} + +DBT *toku_init_dbt_flags(DBT *dbt, uint32_t flags) { + toku_init_dbt(dbt); + dbt->flags = flags; + return dbt; } -DBT_ARRAY * -toku_dbt_array_init(DBT_ARRAY *dbts, uint32_t size) { +DBT_ARRAY *toku_dbt_array_init(DBT_ARRAY *dbts, uint32_t size) { uint32_t capacity = 1; while (capacity < size) { capacity *= 2; } @@ -123,8 +125,7 @@ toku_dbt_array_init(DBT_ARRAY *dbts, uint32_t size) { return dbts; } -void -toku_dbt_array_resize(DBT_ARRAY *dbts, uint32_t size) { +void toku_dbt_array_resize(DBT_ARRAY *dbts, uint32_t size) { if (size != dbts->size) { if (size > dbts->capacity) { const uint32_t old_capacity = dbts->capacity; @@ -152,14 +153,12 @@ toku_dbt_array_resize(DBT_ARRAY *dbts, uint32_t size) { } } -void -toku_dbt_array_destroy_shallow(DBT_ARRAY *dbts) { +void toku_dbt_array_destroy_shallow(DBT_ARRAY *dbts) { toku_free(dbts->dbts); ZERO_STRUCT(*dbts); } -void -toku_dbt_array_destroy(DBT_ARRAY *dbts) { +void toku_dbt_array_destroy(DBT_ARRAY *dbts) { for (uint32_t i = 0; i < dbts->capacity; i++) { toku_destroy_dbt(&dbts->dbts[i]); } @@ -168,8 +167,7 @@ toku_dbt_array_destroy(DBT_ARRAY *dbts) { -void -toku_destroy_dbt(DBT *dbt) { +void toku_destroy_dbt(DBT *dbt) { switch (dbt->flags) { case DB_DBT_MALLOC: case DB_DBT_REALLOC: @@ -179,8 +177,7 @@ toku_destroy_dbt(DBT *dbt) { } } -DBT * -toku_fill_dbt(DBT *dbt, bytevec k, ITEMLEN len) { +DBT *toku_fill_dbt(DBT *dbt, const void *k, uint32_t len) { toku_init_dbt(dbt); dbt->size=len; dbt->data=(char*)k; @@ -202,14 +199,6 @@ DBT *toku_copyref_dbt(DBT *dst, const DBT src) { return dst; } -DBT *toku_copy_dbt(DBT *dst, const DBT &src) { - dst->flags = src.flags; - dst->ulen = src.ulen; - dst->size = src.size; - dst->data = src.data; - return dst; -} - DBT *toku_clone_dbt(DBT *dst, const DBT &src) { return toku_memdup_dbt(dst, src.data, src.size); } @@ -220,8 +209,7 @@ toku_sdbt_cleanup(struct simple_dbt *sdbt) { memset(sdbt, 0, sizeof(*sdbt)); } -static inline int -sdbt_realloc(struct simple_dbt *sdbt) { +static inline int sdbt_realloc(struct simple_dbt *sdbt) { void *new_data = toku_realloc(sdbt->data, sdbt->len); int r; if (new_data == NULL) { @@ -233,8 +221,7 @@ sdbt_realloc(struct simple_dbt *sdbt) { return r; } -static inline int -dbt_realloc(DBT *dbt) { +static inline int dbt_realloc(DBT *dbt) { void *new_data = toku_realloc(dbt->data, dbt->ulen); int r; if (new_data == NULL) { @@ -246,13 +233,13 @@ dbt_realloc(DBT *dbt) { return r; } -int -toku_dbt_set (ITEMLEN len, bytevec val, DBT *d, struct simple_dbt *sdbt) { // sdbt is the static value used when flags==0 // Otherwise malloc or use the user-supplied memory, as according to the flags in d->flags. +int toku_dbt_set(uint32_t len, const void *val, DBT *d, struct simple_dbt *sdbt) { int r; - if (!d) r = 0; - else { + if (d == nullptr) { + r = 0; + } else { switch (d->flags) { case (DB_DBT_USERMEM): d->size = len; @@ -325,6 +312,12 @@ bool toku_dbt_is_infinite(const DBT *dbt) { return dbt == toku_dbt_positive_infinity() || dbt == toku_dbt_negative_infinity(); } +bool toku_dbt_is_empty(const DBT *dbt) { + // can't have a null data field with a non-zero size + paranoid_invariant(dbt->data != nullptr || dbt->size == 0); + return dbt->data == nullptr; +} + int toku_dbt_infinite_compare(const DBT *a, const DBT *b) { if (a == b) { return 0; diff --git a/storage/tokudb/ft-index/ft/ybt.h b/storage/tokudb/ft-index/util/dbt.h index ae19f527493..4d78068cb67 100644 --- a/storage/tokudb/ft-index/ft/ybt.h +++ b/storage/tokudb/ft-index/util/dbt.h @@ -1,7 +1,5 @@ /* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */ // vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4: -#ifndef TOKU_YBT_H -#define TOKU_YBT_H #ident "$Id$" /* @@ -32,7 +30,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -89,11 +87,11 @@ PATENT RIGHTS GRANT: under this License. */ +#pragma once + #ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved." #ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it." -// fttypes.h must be first to make 64-bit file mode work right in linux. -#include "fttypes.h" #include <db.h> // TODO: John @@ -102,23 +100,24 @@ PATENT RIGHTS GRANT: DBT *toku_init_dbt(DBT *); +// returns: an initialized but empty dbt (for which toku_dbt_is_empty() is true) +DBT toku_empty_dbt(void); + DBT *toku_init_dbt_flags(DBT *, uint32_t flags); void toku_destroy_dbt(DBT *); -DBT *toku_fill_dbt(DBT *dbt, bytevec k, ITEMLEN len); +DBT *toku_fill_dbt(DBT *dbt, const void *k, uint32_t len); DBT *toku_memdup_dbt(DBT *dbt, const void *k, size_t len); DBT *toku_copyref_dbt(DBT *dst, const DBT src); -DBT *toku_copy_dbt(DBT *dst, const DBT &src); - DBT *toku_clone_dbt(DBT *dst, const DBT &src); -int toku_dbt_set(ITEMLEN len, bytevec val, DBT *d, struct simple_dbt *sdbt); +int toku_dbt_set(uint32_t len, const void *val, DBT *d, struct simple_dbt *sdbt); -int toku_dbt_set_value(DBT *, bytevec *val, ITEMLEN vallen, void **staticptrp, bool ybt1_disposable); +int toku_dbt_set_value(DBT *, const void **val, uint32_t vallen, void **staticptrp, bool dbt1_disposable); void toku_sdbt_cleanup(struct simple_dbt *sdbt); @@ -131,11 +130,12 @@ const DBT *toku_dbt_negative_infinity(void); // returns: true if the given dbt is either positive or negative infinity bool toku_dbt_is_infinite(const DBT *dbt); +// returns: true if the given dbt has no data (ie: dbt->data == nullptr) +bool toku_dbt_is_empty(const DBT *dbt); + // effect: compares two potentially infinity-valued dbts // requires: at least one is infinite (assert otherwise) int toku_dbt_infinite_compare(const DBT *a, const DBT *b); // returns: true if the given dbts have the same data pointer and size bool toku_dbt_equals(const DBT *a, const DBT *b); - -#endif /* TOKU_YBT_H */ diff --git a/storage/tokudb/ft-index/util/dmt.cc b/storage/tokudb/ft-index/util/dmt.cc index 8d10c5b921c..3e0b512d7a7 100644 --- a/storage/tokudb/ft-index/util/dmt.cc +++ b/storage/tokudb/ft-index/util/dmt.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -130,7 +130,7 @@ void dmt<dmtdata_t, dmtdataout_t, dmtwriter_t>::create_from_sorted_memory_of_fix toku_mempool_construct(&this->mp, aligned_memsize); if (aligned_memsize > 0) { paranoid_invariant(numvalues > 0); - void *ptr = toku_mempool_malloc(&this->mp, aligned_memsize, 1); + void *ptr = toku_mempool_malloc(&this->mp, aligned_memsize); paranoid_invariant_notnull(ptr); uint8_t * const CAST_FROM_VOIDP(dest, ptr); const uint8_t * const CAST_FROM_VOIDP(src, mem); @@ -261,7 +261,7 @@ dmtdata_t * dmt<dmtdata_t, dmtdataout_t, dmtwriter_t>::alloc_array_value_end(voi paranoid_invariant(this->values_same_size); this->d.a.num_values++; - void *ptr = toku_mempool_malloc(&this->mp, align(this->value_length), 1); + void *ptr = toku_mempool_malloc(&this->mp, align(this->value_length)); paranoid_invariant_notnull(ptr); paranoid_invariant(reinterpret_cast<size_t>(ptr) % ALIGNMENT == 0); dmtdata_t *CAST_FROM_VOIDP(n, ptr); @@ -302,7 +302,7 @@ void dmt<dmtdata_t, dmtdataout_t, dmtwriter_t>::maybe_resize_array_for_insert(vo paranoid_invariant(copy_bytes <= toku_mempool_get_used_size(&this->mp)); // Copy over to new mempool if (this->d.a.num_values > 0) { - void* dest = toku_mempool_malloc(&new_kvspace, copy_bytes, 1); + void* dest = toku_mempool_malloc(&new_kvspace, copy_bytes); invariant(dest!=nullptr); memcpy(dest, get_array_value(0), copy_bytes); } @@ -344,7 +344,7 @@ void dmt<dmtdata_t, dmtdataout_t, dmtwriter_t>::convert_from_tree_to_array(void) const uint32_t fixed_aligned_len = align(this->value_length); size_t mem_needed = num_values * fixed_aligned_len; toku_mempool_construct(&new_mp, mem_needed); - uint8_t* CAST_FROM_VOIDP(dest, toku_mempool_malloc(&new_mp, mem_needed, 1)); + uint8_t* CAST_FROM_VOIDP(dest, toku_mempool_malloc(&new_mp, mem_needed)); paranoid_invariant_notnull(dest); for (uint32_t i = 0; i < num_values; i++) { const dmt_node &n = get_node(tmp_array[i]); @@ -588,7 +588,7 @@ node_offset dmt<dmtdata_t, dmtdataout_t, dmtwriter_t>::node_malloc_and_set_value size_t val_size = value.get_size(); size_t size_to_alloc = __builtin_offsetof(dmt_node, value) + val_size; size_to_alloc = align(size_to_alloc); - void* np = toku_mempool_malloc(&this->mp, size_to_alloc, 1); + void* np = toku_mempool_malloc(&this->mp, size_to_alloc); paranoid_invariant_notnull(np); dmt_node *CAST_FROM_VOIDP(n, np); node_set_value(n, value); @@ -645,7 +645,7 @@ void dmt<dmtdata_t, dmtdataout_t, dmtwriter_t>::maybe_resize_tree(const dmtwrite dmt_node &node = get_node(tmp_array[i]); const size_t bytes_to_copy = __builtin_offsetof(dmt_node, value) + node.value_length; const size_t bytes_to_alloc = align(bytes_to_copy); - void* newdata = toku_mempool_malloc(&new_kvspace, bytes_to_alloc, 1); + void* newdata = toku_mempool_malloc(&new_kvspace, bytes_to_alloc); memcpy(newdata, &node, bytes_to_copy); tmp_array[i] = toku_mempool_get_offset_from_pointer_and_base(&new_kvspace, newdata); } @@ -1251,7 +1251,7 @@ void dmt<dmtdata_t, dmtdataout_t, dmtwriter_t>::builder::build(dmt<dmtdata_t, dm invariant_zero(toku_mempool_get_frag_size(&this->temp.mp)); struct mempool new_mp; toku_mempool_construct(&new_mp, used); - void * newbase = toku_mempool_malloc(&new_mp, used, 1); + void * newbase = toku_mempool_malloc(&new_mp, used); invariant_notnull(newbase); memcpy(newbase, toku_mempool_get_base(&this->temp.mp), used); toku_mempool_destroy(&this->temp.mp); diff --git a/storage/tokudb/ft-index/util/dmt.h b/storage/tokudb/ft-index/util/dmt.h index 374fa785e42..d4b032f5d6f 100644 --- a/storage/tokudb/ft-index/util/dmt.h +++ b/storage/tokudb/ft-index/util/dmt.h @@ -1,6 +1,5 @@ /* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */ // vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4: -#pragma once /* COPYING CONDITIONS NOTICE: @@ -30,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -86,17 +85,22 @@ PATENT RIGHTS GRANT: under this License. */ +#pragma once + #ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved." #ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it." -#include <stdint.h> -#include <memory.h> -#include <toku_portability.h> -#include <toku_race_tools.h> -#include "growable_array.h" -#include "../ft/wbuf.h" #include <vector> +#include "portability/memory.h" +#include "portability/toku_portability.h" +#include "portability/toku_race_tools.h" +#include "portability/toku_stdint.h" + +#include "ft/serialize/wbuf.h" +#include "util/growable_array.h" +#include "util/mempool.h" + namespace toku { typedef uint32_t node_offset; diff --git a/storage/tokudb/ft-index/util/doubly_linked_list.h b/storage/tokudb/ft-index/util/doubly_linked_list.h index fb125d243be..738e2736fa1 100644 --- a/storage/tokudb/ft-index/util/doubly_linked_list.h +++ b/storage/tokudb/ft-index/util/doubly_linked_list.h @@ -1,7 +1,5 @@ /* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */ // vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4: -#ifndef UTIL_DOUBLY_LINKED_LIST_H -#define UTIL_DOUBLY_LINKED_LIST_H #ident "$Id$" /* COPYING CONDITIONS NOTICE: @@ -31,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -88,6 +86,8 @@ PATENT RIGHTS GRANT: under this License. */ +#pragma once + #ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved." #ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it." @@ -225,5 +225,3 @@ int DoublyLinkedList<T>::iterate(int (*fun)(T container, extra_t extra), extra_t } } - -#endif // UTIL_DOUBLY_LINKED_LIST_H diff --git a/storage/tokudb/ft-index/util/fmutex.h b/storage/tokudb/ft-index/util/fmutex.h index 075925dd03f..224a6972ba7 100644 --- a/storage/tokudb/ft-index/util/fmutex.h +++ b/storage/tokudb/ft-index/util/fmutex.h @@ -1,5 +1,4 @@ -#ifndef FMUTEX_H -#define FMUTEX_H +#pragma once // fair mutex struct fmutex { @@ -105,5 +104,3 @@ int fmutex_users(struct fmutex *fm) const { int fmutex_blocked_users(struct fmutex *fm) const { return fm->num_want_mutex; } - -#endif // FMUTEX_H diff --git a/storage/tokudb/ft-index/util/frwlock.cc b/storage/tokudb/ft-index/util/frwlock.cc index 7259c776f83..fac0c07967b 100644 --- a/storage/tokudb/ft-index/util/frwlock.cc +++ b/storage/tokudb/ft-index/util/frwlock.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/util/frwlock.h b/storage/tokudb/ft-index/util/frwlock.h index 7811e0d2427..985c92bccb4 100644 --- a/storage/tokudb/ft-index/util/frwlock.h +++ b/storage/tokudb/ft-index/util/frwlock.h @@ -1,7 +1,5 @@ /* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */ // vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4: -#ifndef UTIL_FRWLOCK_H -#define UTIL_FRWLOCK_H #ident "$Id$" /* COPYING CONDITIONS NOTICE: @@ -31,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -88,6 +86,8 @@ PATENT RIGHTS GRANT: under this License. */ +#pragma once + #ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved." #ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it." @@ -176,5 +176,3 @@ ENSURE_POD(frwlock); // include the implementation here // #include "frwlock.cc" - -#endif // UTIL_FRWLOCK_H diff --git a/storage/tokudb/ft-index/util/growable_array.h b/storage/tokudb/ft-index/util/growable_array.h index 763377d0ab0..b452f94366c 100644 --- a/storage/tokudb/ft-index/util/growable_array.h +++ b/storage/tokudb/ft-index/util/growable_array.h @@ -1,7 +1,5 @@ /* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */ // vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4: -#ifndef UTIL_GROWABLE_ARRAY_H -#define UTIL_GROWABLE_ARRAY_H #ident "$Id$" /* COPYING CONDITIONS NOTICE: @@ -31,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -88,6 +86,8 @@ PATENT RIGHTS GRANT: under this License. */ +#pragma once + #ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved." #ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it." @@ -175,5 +175,3 @@ template<typename T> class GrowableArray { }; } - -#endif // UTIL_GROWABLE_ARRAY_H diff --git a/storage/tokudb/ft-index/util/kibbutz.cc b/storage/tokudb/ft-index/util/kibbutz.cc index 8e69471e9ab..ad0c0b30788 100644 --- a/storage/tokudb/ft-index/util/kibbutz.cc +++ b/storage/tokudb/ft-index/util/kibbutz.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/util/kibbutz.h b/storage/tokudb/ft-index/util/kibbutz.h index 83e981b916c..25515887700 100644 --- a/storage/tokudb/ft-index/util/kibbutz.h +++ b/storage/tokudb/ft-index/util/kibbutz.h @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -86,12 +86,11 @@ PATENT RIGHTS GRANT: under this License. */ +#pragma once + #ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved." #ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it." -#ifndef UTIL_KIBBUTZ_H -#define UTIL_KIBBUTZ_H - // // The kibbutz is another threadpool meant to do arbitrary work. // @@ -116,5 +115,3 @@ void toku_kibbutz_enq (KIBBUTZ k, void (*f)(void*), void *extra); // destroys the kibbutz // void toku_kibbutz_destroy (KIBBUTZ k); - -#endif // UTIL_KIBBUTZ_H diff --git a/storage/tokudb/ft-index/util/memarena.cc b/storage/tokudb/ft-index/util/memarena.cc index 773c949e8f4..d8c0daa0ba0 100644 --- a/storage/tokudb/ft-index/util/memarena.cc +++ b/storage/tokudb/ft-index/util/memarena.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -89,157 +89,142 @@ PATENT RIGHTS GRANT: #ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved." #ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it." +#include <algorithm> #include <string.h> #include <memory.h> #include <util/memarena.h> -struct memarena { - char *buf; - size_t buf_used, buf_size; - size_t size_of_other_bufs; // the buf_size of all the other bufs. - size_t footprint_of_other_bufs; // the footprint of all the other bufs. - char **other_bufs; - int n_other_bufs; -}; - -MEMARENA toku_memarena_create_presized (size_t initial_size) { - MEMARENA XMALLOC(result); - result->buf_size = initial_size; - result->buf_used = 0; - result->other_bufs = NULL; - result->size_of_other_bufs = 0; - result->footprint_of_other_bufs = 0; - result->n_other_bufs = 0; - XMALLOC_N(result->buf_size, result->buf); - return result; -} +void memarena::create(size_t initial_size) { + _current_chunk = arena_chunk(); + _other_chunks = nullptr; + _size_of_other_chunks = 0; + _footprint_of_other_chunks = 0; + _n_other_chunks = 0; -MEMARENA toku_memarena_create (void) { - return toku_memarena_create_presized(1024); + _current_chunk.size = initial_size; + if (_current_chunk.size > 0) { + XMALLOC_N(_current_chunk.size, _current_chunk.buf); + } } -void toku_memarena_clear (MEMARENA ma) { - // Free the other bufs. - int i; - for (i=0; i<ma->n_other_bufs; i++) { - toku_free(ma->other_bufs[i]); - ma->other_bufs[i]=0; +void memarena::destroy(void) { + if (_current_chunk.buf) { + toku_free(_current_chunk.buf); + } + for (int i = 0; i < _n_other_chunks; i++) { + toku_free(_other_chunks[i].buf); } - ma->n_other_bufs=0; - // But reuse the main buffer - ma->buf_used = 0; - ma->size_of_other_bufs = 0; - ma->footprint_of_other_bufs = 0; + if (_other_chunks) { + toku_free(_other_chunks); + } + _current_chunk = arena_chunk(); + _other_chunks = nullptr; + _n_other_chunks = 0; } -static size_t -round_to_page (size_t size) { - const size_t _PAGE_SIZE = 4096; - const size_t result = _PAGE_SIZE+((size-1)&~(_PAGE_SIZE-1)); - assert(0==(result&(_PAGE_SIZE-1))); // make sure it's aligned - assert(result>=size); // make sure it's not too small - assert(result<size+_PAGE_SIZE); // make sure we didn't grow by more than a page. - return result; +static size_t round_to_page(size_t size) { + const size_t page_size = 4096; + const size_t r = page_size + ((size - 1) & ~(page_size - 1)); + assert((r & (page_size - 1)) == 0); // make sure it's aligned + assert(r >= size); // make sure it's not too small + assert(r < size + page_size); // make sure we didn't grow by more than a page. + return r; } -void* toku_memarena_malloc (MEMARENA ma, size_t size) { - if (ma->buf_size < ma->buf_used + size) { +static const size_t MEMARENA_MAX_CHUNK_SIZE = 64 * 1024 * 1024; + +void *memarena::malloc_from_arena(size_t size) { + if (_current_chunk.buf == nullptr || _current_chunk.size < _current_chunk.used + size) { // The existing block isn't big enough. // Add the block to the vector of blocks. - if (ma->buf) { - int old_n = ma->n_other_bufs; - REALLOC_N(old_n+1, ma->other_bufs); - assert(ma->other_bufs); - ma->other_bufs[old_n]=ma->buf; - ma->n_other_bufs = old_n+1; - ma->size_of_other_bufs += ma->buf_size; - ma->footprint_of_other_bufs += toku_memory_footprint(ma->buf, ma->buf_used); + if (_current_chunk.buf) { + invariant(_current_chunk.size > 0); + int old_n = _n_other_chunks; + XREALLOC_N(old_n + 1, _other_chunks); + _other_chunks[old_n] = _current_chunk; + _n_other_chunks = old_n + 1; + _size_of_other_chunks += _current_chunk.size; + _footprint_of_other_chunks += toku_memory_footprint(_current_chunk.buf, _current_chunk.used); } - // Make a new one - { - size_t new_size = 2*ma->buf_size; - if (new_size<size) new_size=size; - new_size=round_to_page(new_size); // at least size, but round to the next page size - XMALLOC_N(new_size, ma->buf); - ma->buf_used = 0; - ma->buf_size = new_size; + + // Make a new one. Grow the buffer size exponentially until we hit + // the max chunk size, but make it at least `size' bytes so the + // current allocation always fit. + size_t new_size = std::min(MEMARENA_MAX_CHUNK_SIZE, 2 * _current_chunk.size); + if (new_size < size) { + new_size = size; } + new_size = round_to_page(new_size); // at least size, but round to the next page size + XMALLOC_N(new_size, _current_chunk.buf); + _current_chunk.used = 0; + _current_chunk.size = new_size; } + invariant(_current_chunk.buf != nullptr); + // allocate in the existing block. - char *result=ma->buf+ma->buf_used; - ma->buf_used+=size; - return result; + char *p = _current_chunk.buf + _current_chunk.used; + _current_chunk.used += size; + return p; } -void *toku_memarena_memdup (MEMARENA ma, const void *v, size_t len) { - void *r=toku_memarena_malloc(ma, len); - memcpy(r,v,len); - return r; +void memarena::move_memory(memarena *dest) { + // Move memory to dest + XREALLOC_N(dest->_n_other_chunks + _n_other_chunks + 1, dest->_other_chunks); + dest->_size_of_other_chunks += _size_of_other_chunks + _current_chunk.size; + dest->_footprint_of_other_chunks += _footprint_of_other_chunks + toku_memory_footprint(_current_chunk.buf, _current_chunk.used); + for (int i = 0; i < _n_other_chunks; i++) { + dest->_other_chunks[dest->_n_other_chunks++] = _other_chunks[i]; + } + dest->_other_chunks[dest->_n_other_chunks++] = _current_chunk; + + // Clear out this memarena's memory + toku_free(_other_chunks); + _current_chunk = arena_chunk(); + _other_chunks = nullptr; + _size_of_other_chunks = 0; + _footprint_of_other_chunks = 0; + _n_other_chunks = 0; } -void toku_memarena_destroy(MEMARENA *map) { - MEMARENA ma=*map; - if (ma->buf) { - toku_free(ma->buf); - ma->buf=0; - } - int i; - for (i=0; i<ma->n_other_bufs; i++) { - toku_free(ma->other_bufs[i]); - } - if (ma->other_bufs) toku_free(ma->other_bufs); - ma->other_bufs=0; - ma->n_other_bufs=0; - toku_free(ma); - *map = 0; +size_t memarena::total_memory_size(void) const { + return sizeof(*this) + + total_size_in_use() + + _n_other_chunks * sizeof(*_other_chunks); } -void toku_memarena_move_buffers(MEMARENA dest, MEMARENA source) { - int i; - char **other_bufs = dest->other_bufs; - static int move_counter = 0; - move_counter++; - REALLOC_N(dest->n_other_bufs + source->n_other_bufs + 1, other_bufs); - - dest ->size_of_other_bufs += source->size_of_other_bufs + source->buf_size; - dest ->footprint_of_other_bufs += source->footprint_of_other_bufs + toku_memory_footprint(source->buf, source->buf_used); - source->size_of_other_bufs = 0; - source->footprint_of_other_bufs = 0; - - assert(other_bufs); - dest->other_bufs = other_bufs; - for (i=0; i<source->n_other_bufs; i++) { - dest->other_bufs[dest->n_other_bufs++] = source->other_bufs[i]; - } - dest->other_bufs[dest->n_other_bufs++] = source->buf; - source->n_other_bufs = 0; - toku_free(source->other_bufs); - source->other_bufs = 0; - source->buf = 0; - source->buf_size = 0; - source->buf_used = 0; +size_t memarena::total_size_in_use(void) const { + return _size_of_other_chunks + _current_chunk.used; +} +size_t memarena::total_footprint(void) const { + return sizeof(*this) + + _footprint_of_other_chunks + + toku_memory_footprint(_current_chunk.buf, _current_chunk.used) + + _n_other_chunks * sizeof(*_other_chunks); } -size_t -toku_memarena_total_memory_size (MEMARENA m) -{ - return (toku_memarena_total_size_in_use(m) + - sizeof(*m) + - m->n_other_bufs * sizeof(*m->other_bufs)); +//////////////////////////////////////////////////////////////////////////////// + +const void *memarena::chunk_iterator::current(size_t *used) const { + if (_chunk_idx < 0) { + *used = _ma->_current_chunk.used; + return _ma->_current_chunk.buf; + } else if (_chunk_idx < _ma->_n_other_chunks) { + *used = _ma->_other_chunks[_chunk_idx].used; + return _ma->_other_chunks[_chunk_idx].buf; + } + *used = 0; + return nullptr; } -size_t -toku_memarena_total_size_in_use (MEMARENA m) -{ - return m->size_of_other_bufs + m->buf_used; +void memarena::chunk_iterator::next() { + _chunk_idx++; } -size_t -toku_memarena_total_footprint (MEMARENA m) -{ - return m->footprint_of_other_bufs + toku_memory_footprint(m->buf, m->buf_used) + - sizeof(*m) + - m->n_other_bufs * sizeof(*m->other_bufs); +bool memarena::chunk_iterator::more() const { + if (_chunk_idx < 0) { + return _ma->_current_chunk.buf != nullptr; + } + return _chunk_idx < _ma->_n_other_chunks; } diff --git a/storage/tokudb/ft-index/util/memarena.h b/storage/tokudb/ft-index/util/memarena.h index 0dac262ba46..8d1b577222e 100644 --- a/storage/tokudb/ft-index/util/memarena.h +++ b/storage/tokudb/ft-index/util/memarena.h @@ -1,7 +1,5 @@ /* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */ // vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4: -#ifndef TOKU_MEMARENA_H -#define TOKU_MEMARENA_H #ident "$Id$" /* @@ -32,7 +30,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -89,48 +87,90 @@ PATENT RIGHTS GRANT: under this License. */ +#pragma once + #ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved." #ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it." -/* We have too many memory management tricks: - * memarena (this code) is for a collection of objects that cannot be moved. - * The pattern is allocate more and more stuff. - * Don't free items as you go. - * Free all the items at once. - * Then reuse the same buffer again. - * Allocated objects never move. - * A memarena (as currently implemented) is not suitable for interprocess memory sharing. No reason it couldn't be made to work though. +/* + * A memarena is used to efficiently store a collection of objects that never move + * The pattern is allocate more and more stuff and free all of the items at once. + * The underlying memory will store 1 or more objects per chunk. Each chunk is + * contiguously laid out in memory but chunks are not necessarily contiguous with + * each other. */ - -struct memarena; - -typedef struct memarena *MEMARENA; - -MEMARENA toku_memarena_create_presized (size_t initial_size); -// Effect: Create a memarena with initial size. In case of ENOMEM, aborts. - -MEMARENA toku_memarena_create (void); -// Effect: Create a memarena with default initial size. In case of ENOMEM, aborts. - -void toku_memarena_clear (MEMARENA ma); -// Effect: Reset the internal state so that the allocated memory can be used again. - -void* toku_memarena_malloc (MEMARENA ma, size_t size); -// Effect: Allocate some memory. The returned value remains valid until the memarena is cleared or closed. -// In case of ENOMEM, aborts. - -void *toku_memarena_memdup (MEMARENA ma, const void *v, size_t len); - -void toku_memarena_destroy(MEMARENA *ma); - -void toku_memarena_move_buffers(MEMARENA dest, MEMARENA source); -// Effect: Move all the memory from SOURCE into DEST. When SOURCE is closed the memory won't be freed. When DEST is closed, the memory will be freed. (Unless DEST moves its memory to another memarena...) - -size_t toku_memarena_total_memory_size (MEMARENA); -// Effect: Calculate the amount of memory used by a memory arena. - -size_t toku_memarena_total_size_in_use (MEMARENA); - -size_t toku_memarena_total_footprint (MEMARENA); - -#endif +class memarena { +public: + memarena() : + _current_chunk(arena_chunk()), + _other_chunks(nullptr), + _n_other_chunks(0), + _size_of_other_chunks(0), + _footprint_of_other_chunks(0) { + } + + // Effect: Create a memarena with the specified initial size + void create(size_t initial_size); + + void destroy(void); + + // Effect: Allocate some memory. The returned value remains valid until the memarena is cleared or closed. + // In case of ENOMEM, aborts. + void *malloc_from_arena(size_t size); + + // Effect: Move all the memory from this memarena into DEST. + // When SOURCE is closed the memory won't be freed. + // When DEST is closed, the memory will be freed, unless DEST moves its memory to another memarena... + void move_memory(memarena *dest); + + // Effect: Calculate the amount of memory used by a memory arena. + size_t total_memory_size(void) const; + + // Effect: Calculate the used space of the memory arena (ie: excludes unused space) + size_t total_size_in_use(void) const; + + // Effect: Calculate the amount of memory used, according to toku_memory_footprint(), + // which is a more expensive but more accurate count of memory used. + size_t total_footprint(void) const; + + // iterator over the underlying chunks that store objects in the memarena. + // a chunk is represented by a pointer to const memory and a usable byte count. + class chunk_iterator { + public: + chunk_iterator(const memarena *ma) : + _ma(ma), _chunk_idx(-1) { + } + + // returns: base pointer to the current chunk + // *used set to the number of usable bytes + // if more() is false, returns nullptr and *used = 0 + const void *current(size_t *used) const; + + // requires: more() is true + void next(); + + bool more() const; + + private: + // -1 represents the 'initial' chunk in a memarena, ie: ma->_current_chunk + // >= 0 represents the i'th chunk in the ma->_other_chunks array + const memarena *_ma; + int _chunk_idx; + }; + +private: + struct arena_chunk { + arena_chunk() : buf(nullptr), used(0), size(0) { } + char *buf; + size_t used; + size_t size; + }; + + struct arena_chunk _current_chunk; + struct arena_chunk *_other_chunks; + int _n_other_chunks; + size_t _size_of_other_chunks; // the buf_size of all the other chunks. + size_t _footprint_of_other_chunks; // the footprint of all the other chunks. + + friend class memarena_unit_test; +}; diff --git a/storage/tokudb/ft-index/util/mempool.cc b/storage/tokudb/ft-index/util/mempool.cc index 6f3e2c013db..23200ee41ad 100644 --- a/storage/tokudb/ft-index/util/mempool.cc +++ b/storage/tokudb/ft-index/util/mempool.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -207,24 +207,20 @@ size_t toku_mempool_get_allocated_size(const struct mempool *mp) { return mp->free_offset; } -void *toku_mempool_malloc(struct mempool *mp, size_t size, int alignment) { +void *toku_mempool_malloc(struct mempool *mp, size_t size) { paranoid_invariant(size < (1U<<31)); paranoid_invariant(mp->size < (1U<<31)); paranoid_invariant(mp->free_offset < (1U<<31)); paranoid_invariant(mp->free_offset <= mp->size); void *vp; - size_t offset = (mp->free_offset + (alignment-1)) & ~(alignment-1); - //printf("mempool_malloc size=%ld base=%p free_offset=%ld mp->size=%ld offset=%ld\n", size, mp->base, mp->free_offset, mp->size, offset); - if (offset + size > mp->size) { - vp = 0; + if (mp->free_offset + size > mp->size) { + vp = nullptr; } else { - vp = (char *)mp->base + offset; - mp->free_offset = offset + size; + vp = reinterpret_cast<char *>(mp->base) + mp->free_offset; + mp->free_offset += size; } paranoid_invariant(mp->free_offset <= mp->size); - paranoid_invariant(((long)vp & (alignment-1)) == 0); paranoid_invariant(vp == 0 || toku_mempool_inrange(mp, vp, size)); - //printf("mempool returning %p\n", vp); return vp; } @@ -232,7 +228,8 @@ void *toku_mempool_malloc(struct mempool *mp, size_t size, int alignment) { void toku_mempool_mfree(struct mempool *mp, void *vp, size_t size) { if (vp) { paranoid_invariant(toku_mempool_inrange(mp, vp, size)); } mp->frag_size += size; - paranoid_invariant(mp->frag_size <= mp->size); + invariant(mp->frag_size <= mp->free_offset); + invariant(mp->frag_size <= mp->size); } diff --git a/storage/tokudb/ft-index/util/mempool.h b/storage/tokudb/ft-index/util/mempool.h index c8be5e13297..e1a47e66591 100644 --- a/storage/tokudb/ft-index/util/mempool.h +++ b/storage/tokudb/ft-index/util/mempool.h @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -86,12 +86,11 @@ PATENT RIGHTS GRANT: under this License. */ +#pragma once + #ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved." #ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it." -#ifndef UTIL_MEMPOOL_H -#define UTIL_MEMPOOL_H - /* a memory pool is a contiguous region of memory that supports single allocations from the pool. these allocated regions are never recycled. when the memory pool no longer has free space, the allocated chunks @@ -164,8 +163,8 @@ size_t toku_mempool_get_free_size(const struct mempool *mp); /* get the amount of space that has been allocated for use (wasted or not) */ size_t toku_mempool_get_allocated_size(const struct mempool *mp); -/* allocate a chunk of memory from the memory pool suitably aligned */ -void *toku_mempool_malloc(struct mempool *mp, size_t size, int alignment); +/* allocate a chunk of memory from the memory pool */ +void *toku_mempool_malloc(struct mempool *mp, size_t size); /* free a previously allocated chunk of memory. the free only updates a count of the amount of free space in the memory pool. the memory @@ -181,7 +180,3 @@ static inline int toku_mempool_inrange(struct mempool *mp, void *vp, size_t size size_t toku_mempool_footprint(struct mempool *mp); void toku_mempool_clone(const struct mempool* orig_mp, struct mempool* new_mp); - - - -#endif // UTIL_MEMPOOL_H diff --git a/storage/tokudb/ft-index/ft/minicron.cc b/storage/tokudb/ft-index/util/minicron.cc index 03d4075e1b2..601e1fd40d4 100644 --- a/storage/tokudb/ft-index/ft/minicron.cc +++ b/storage/tokudb/ft-index/util/minicron.cc @@ -28,7 +28,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -92,9 +92,8 @@ PATENT RIGHTS GRANT: #include <errno.h> #include <string.h> -#include "toku_assert.h" -#include "fttypes.h" -#include "minicron.h" +#include "portability/toku_assert.h" +#include "util/minicron.h" static void toku_gettime (toku_timespec_t *a) { diff --git a/storage/tokudb/ft-index/ft/minicron.h b/storage/tokudb/ft-index/util/minicron.h index d6cb0f76c9f..b97c5687561 100644 --- a/storage/tokudb/ft-index/ft/minicron.h +++ b/storage/tokudb/ft-index/util/minicron.h @@ -28,7 +28,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -88,13 +88,10 @@ PATENT RIGHTS GRANT: #ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved." #ident "$Id$" -#ifndef TOKU_MINICRON_H -#define TOKU_MINICRON_H +#pragma once #include <toku_pthread.h> #include <toku_time.h> -#include "fttypes.h" - // Specification: // A minicron is a miniature cron job for executing a job periodically inside a pthread. @@ -127,6 +124,3 @@ uint32_t toku_minicron_get_period_in_seconds_unlocked(struct minicron *p); uint32_t toku_minicron_get_period_in_ms_unlocked(struct minicron *p); int toku_minicron_shutdown(struct minicron *p); bool toku_minicron_has_been_shutdown(struct minicron *p); - - -#endif diff --git a/storage/tokudb/ft-index/util/nb_mutex.h b/storage/tokudb/ft-index/util/nb_mutex.h index f781e9d6dda..cc350813622 100644 --- a/storage/tokudb/ft-index/util/nb_mutex.h +++ b/storage/tokudb/ft-index/util/nb_mutex.h @@ -1,7 +1,5 @@ /* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */ // vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4: -#ifndef UTIL_NB_MUTEX_H -#define UTIL_NB_MUTEX_H #ident "$Id$" /* COPYING CONDITIONS NOTICE: @@ -31,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -88,6 +86,8 @@ PATENT RIGHTS GRANT: under this License. */ +#pragma once + #ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved." #ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it." @@ -155,5 +155,3 @@ static inline int nb_mutex_writers(NB_MUTEX nb_mutex) { static inline int nb_mutex_users(NB_MUTEX nb_mutex) { return rwlock_users(&nb_mutex->lock); } - -#endif // UTIL_NB_MUTEX_H diff --git a/storage/tokudb/ft-index/util/omt.cc b/storage/tokudb/ft-index/util/omt.cc index 92cda38aefe..bb3fc34c513 100644 --- a/storage/tokudb/ft-index/util/omt.cc +++ b/storage/tokudb/ft-index/util/omt.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -207,6 +207,9 @@ void omt<omtdata_t, omtdataout_t, supports_marks>::clone(const omt &src) { src.fill_array_with_subtree_values(&this->d.a.values[0], src.d.t.root); } this->d.a.num_values = src.size(); + if (supports_marks) { + this->convert_to_tree(); + } } template<typename omtdata_t, typename omtdataout_t, bool supports_marks> diff --git a/storage/tokudb/ft-index/util/omt.h b/storage/tokudb/ft-index/util/omt.h index 6e963badafa..02f3f0d759a 100644 --- a/storage/tokudb/ft-index/util/omt.h +++ b/storage/tokudb/ft-index/util/omt.h @@ -1,7 +1,5 @@ /* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */ // vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4: -#ifndef UTIL_OMT_H -#define UTIL_OMT_H #ident "$Id$" /* @@ -32,7 +30,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -89,6 +87,8 @@ PATENT RIGHTS GRANT: under this License. */ +#pragma once + #ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved." #ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it." @@ -813,5 +813,3 @@ private: // include the implementation here #include "omt.cc" - -#endif // UTIL_OMT_H diff --git a/storage/tokudb/ft-index/util/partitioned_counter.cc b/storage/tokudb/ft-index/util/partitioned_counter.cc index 4ac60cc8e29..70dff209f3b 100644 --- a/storage/tokudb/ft-index/util/partitioned_counter.cc +++ b/storage/tokudb/ft-index/util/partitioned_counter.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/util/partitioned_counter.h b/storage/tokudb/ft-index/util/partitioned_counter.h index b7401080f11..4da0e084a82 100644 --- a/storage/tokudb/ft-index/util/partitioned_counter.h +++ b/storage/tokudb/ft-index/util/partitioned_counter.h @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -86,12 +86,11 @@ PATENT RIGHTS GRANT: under this License. */ +#pragma once + #ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved." #ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it." -#ifndef UTIL_PARTITIONED_COUNTER_H -#define UTIL_PARTITIONED_COUNTER_H - // Overview: A partitioned_counter provides a counter that can be incremented and the running sum can be read at any time. // We assume that increments are frequent, whereas reading is infrequent. // Implementation hint: Use thread-local storage so each thread increments its own data. The increment does not require a lock or atomic operation. @@ -187,5 +186,3 @@ private: friend void destroy_thread_local_part_of_partitioned_counters (void *); }; #endif - -#endif // UTIL_PARTITIONED_COUNTER_H diff --git a/storage/tokudb/ft-index/ft/queue.cc b/storage/tokudb/ft-index/util/queue.cc index 37c3bc025f8..7a2fefaefec 100644 --- a/storage/tokudb/ft-index/ft/queue.cc +++ b/storage/tokudb/ft-index/util/queue.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -128,7 +128,7 @@ struct queue { // q->mutex and q->cond are used as condition variables. -int queue_create (QUEUE *q, uint64_t weight_limit) +int toku_queue_create (QUEUE *q, uint64_t weight_limit) { QUEUE CALLOC(result); if (result==NULL) return get_error_errno(); @@ -143,7 +143,7 @@ int queue_create (QUEUE *q, uint64_t weight_limit) return 0; } -int queue_destroy (QUEUE q) +int toku_queue_destroy (QUEUE q) { if (q->head) return EINVAL; assert(q->contents_weight==0); @@ -153,7 +153,7 @@ int queue_destroy (QUEUE q) return 0; } -int queue_enq (QUEUE q, void *item, uint64_t weight, uint64_t *total_weight_after_enq) +int toku_queue_enq (QUEUE q, void *item, uint64_t weight, uint64_t *total_weight_after_enq) { toku_mutex_lock(&q->mutex); assert(!q->eof); @@ -189,7 +189,7 @@ int queue_enq (QUEUE q, void *item, uint64_t weight, uint64_t *total_weight_afte return 0; } -int queue_eof (QUEUE q) +int toku_queue_eof (QUEUE q) { toku_mutex_lock(&q->mutex); assert(!q->eof); @@ -199,7 +199,7 @@ int queue_eof (QUEUE q) return 0; } -int queue_deq (QUEUE q, void **item, uint64_t *weight, uint64_t *total_weight_after_deq) +int toku_queue_deq (QUEUE q, void **item, uint64_t *weight, uint64_t *total_weight_after_deq) { toku_mutex_lock(&q->mutex); int result; diff --git a/storage/tokudb/ft-index/ft/queue.h b/storage/tokudb/ft-index/util/queue.h index ec12a0193d2..88c7d99c200 100644 --- a/storage/tokudb/ft-index/ft/queue.h +++ b/storage/tokudb/ft-index/util/queue.h @@ -1,7 +1,5 @@ /* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */ // vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4: -#ifndef TOKU_QUEUE_H -#define TOKU_QUEUE_H #ident "$Id$" /* @@ -32,7 +30,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -89,11 +87,11 @@ PATENT RIGHTS GRANT: under this License. */ +#pragma once + #ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved." #ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it." -#include "fttypes.h" - // The abstraction: // // queue.h implements a queue suitable for a producer-consumer relationship between two pthreads. @@ -110,21 +108,21 @@ PATENT RIGHTS GRANT: typedef struct queue *QUEUE; -int queue_create (QUEUE *q, uint64_t weight_limit); +int toku_queue_create (QUEUE *q, uint64_t weight_limit); // Effect: Create a queue with a given weight limit. The queue is initially empty. -int queue_enq (QUEUE q, void *item, uint64_t weight, uint64_t *total_weight_after_enq); +int toku_queue_enq (QUEUE q, void *item, uint64_t weight, uint64_t *total_weight_after_enq); // Effect: Insert ITEM of weight WEIGHT into queue. If the resulting contents weight too much then block (don't return) until the total weight is low enough. // If total_weight_after_enq!=NULL then return the current weight of the items in the queue (after finishing blocking on overweight, and after enqueueing the item). // If successful return 0. // If an error occurs, return the error number, and the state of the queue is undefined. The item may have been enqueued or not, and in fact the queue may be badly corrupted if the condition variables go awry. If it's just a matter of out-of-memory, then the queue is probably OK. // Requires: There is only a single consumer. (We wake up the consumer using a pthread_cond_signal (which is suitable only for single consumers.) -int queue_eof (QUEUE q); +int toku_queue_eof (QUEUE q); // Effect: Inform the queue that no more values will be inserted. After all the values that have been inserted are dequeued, further dequeue operations will return EOF. // Returns 0 on success. On failure, things are pretty bad (likely to be some sort of mutex failure). -int queue_deq (QUEUE q, void **item, uint64_t *weight, uint64_t *total_weight_after_deq); +int toku_queue_deq (QUEUE q, void **item, uint64_t *weight, uint64_t *total_weight_after_deq); // Effect: Wait until the queue becomes nonempty. Then dequeue and return the oldest item. The item and its weight are returned in *ITEM. // If weight!=NULL then return the item's weight in *weight. // If total_weight_after_deq!=NULL then return the current weight of the items in the queue (after dequeuing the item). @@ -132,9 +130,8 @@ int queue_deq (QUEUE q, void **item, uint64_t *weight, uint64_t *total_weight_af // Return EOF is we no more items will be returned. // Usage note: The queue should be destroyed only after any consumers will no longer look at it (for example, they saw EOF). -int queue_destroy (QUEUE q); +int toku_queue_destroy (QUEUE q); // Effect: Destroy the queue. // Requires: The queue must be empty and no consumer should try to dequeue after this (one way to do this is to make sure the consumer saw EOF). // Returns 0 on success. If the queue is not empty, returns EINVAL. Other errors are likely to be bad (some sort of mutex or condvar failure). -#endif diff --git a/storage/tokudb/ft-index/util/rwlock.h b/storage/tokudb/ft-index/util/rwlock.h index cb72e153eb6..14b447a450f 100644 --- a/storage/tokudb/ft-index/util/rwlock.h +++ b/storage/tokudb/ft-index/util/rwlock.h @@ -1,7 +1,5 @@ /* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */ // vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4: -#ifndef UTIL_RWLOCK_H -#define UTIL_RWLOCK_H #ident "$Id$" /* COPYING CONDITIONS NOTICE: @@ -31,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -88,6 +86,8 @@ PATENT RIGHTS GRANT: under this License. */ +#pragma once + #ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved." #ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it." @@ -99,7 +99,7 @@ PATENT RIGHTS GRANT: * Overview ***************************************** * - * TokuDB employs readers/writers locks for the ephemeral locks (e.g., + * TokuFT employs readers/writers locks for the ephemeral locks (e.g., * on FT nodes) Why not just use the toku_pthread_rwlock API? * * 1) we need multiprocess rwlocks (not just multithreaded) @@ -353,4 +353,3 @@ static inline void rwlock_wait_for_users( toku_cond_destroy(&cond); } -#endif // UTIL_RWLOCK_H diff --git a/storage/tokudb/ft-index/util/scoped_malloc.cc b/storage/tokudb/ft-index/util/scoped_malloc.cc index ed8a493233e..551bd944beb 100644 --- a/storage/tokudb/ft-index/util/scoped_malloc.cc +++ b/storage/tokudb/ft-index/util/scoped_malloc.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/util/scoped_malloc.h b/storage/tokudb/ft-index/util/scoped_malloc.h index ae8847731f3..dbd919d155e 100644 --- a/storage/tokudb/ft-index/util/scoped_malloc.h +++ b/storage/tokudb/ft-index/util/scoped_malloc.h @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/util/sort.h b/storage/tokudb/ft-index/util/sort.h index 825909d4e9f..2925f791029 100644 --- a/storage/tokudb/ft-index/util/sort.h +++ b/storage/tokudb/ft-index/util/sort.h @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -89,22 +89,11 @@ PATENT RIGHTS GRANT: #ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved." #ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it." -#ifndef UTIL_SORT_H -#define UTIL_SORT_H +#pragma once #include <string.h> #include <memory.h> -#if defined(HAVE_CILK) -#include <cilk/cilk.h> -#define cilk_worker_count (__cilkrts_get_nworkers()) -#else -#define cilk_spawn -#define cilk_sync -#define cilk_for for -#define cilk_worker_count 1 -#endif - namespace toku { template<typename sortdata_t, typename sortextra_t, int (*cmp)(sortextra_t &, const sortdata_t &, const sortdata_t &)> @@ -148,9 +137,8 @@ namespace toku { } const int mid = n / 2; sortdata_t *right_as[2] = { &(as[0])[mid], &(as[1])[mid] }; - const int r1 = cilk_spawn mergesort_internal(as, which, mid, extra); + const int r1 = mergesort_internal(as, which, mid, extra); const int r2 = mergesort_internal(right_as, which, n - mid, extra); - cilk_sync; if (r1 != r2) { // move everything to the same place (r2) memcpy(as[r2], as[r1], mid * (sizeof as[r2][0])); @@ -222,9 +210,8 @@ namespace toku { const int a2 = an / 2; const sortdata_t *akey = &a[a2]; const int b2 = binsearch(*akey, b, bn, 0, extra); - cilk_spawn merge(dest, a, a2, b, b2, extra); + merge(dest, a, a2, b, b2, extra); merge(&dest[a2 + b2], akey, an - a2, &b[b2], bn - b2, extra); - cilk_sync; } } @@ -272,5 +259,3 @@ namespace toku { }; }; - -#endif // UTIL_SORT_H diff --git a/storage/tokudb/ft-index/util/status.h b/storage/tokudb/ft-index/util/status.h index 16a709237dd..1ab6d35e560 100644 --- a/storage/tokudb/ft-index/util/status.h +++ b/storage/tokudb/ft-index/util/status.h @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -90,10 +90,11 @@ PATENT RIGHTS GRANT: #ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it." #pragma once + #include <util/partitioned_counter.h> #include <util/constexpr.h> -#define TOKUDB_STATUS_INIT(array,k,c,t,l,inc) do { \ +#define TOKUFT_STATUS_INIT(array,k,c,t,l,inc) do { \ array.status[k].keyname = #k; \ array.status[k].columnname = #c; \ array.status[k].type = t; \ @@ -104,7 +105,7 @@ PATENT RIGHTS GRANT: constexpr_static_assert((inc) == TOKU_ENGINE_STATUS \ || strcmp(#c, "nullptr"), "Missing column name."); \ constexpr_static_assert(static_strncasecmp(#c, "TOKU", strlen("TOKU")), \ - "Do not start column names with toku/tokudb. Names get TOKUDB_ prefix automatically."); \ + "Do not start column names with toku."); \ array.status[k].include = static_cast<toku_engine_status_include_type>(inc); \ if (t == PARCOUNT) { \ array.status[k].value.parcount = create_partitioned_counter(); \ diff --git a/storage/tokudb/ft-index/util/tests/marked-omt-test.cc b/storage/tokudb/ft-index/util/tests/marked-omt-test.cc index 883a414c566..97e4cf72d61 100644 --- a/storage/tokudb/ft-index/util/tests/marked-omt-test.cc +++ b/storage/tokudb/ft-index/util/tests/marked-omt-test.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/util/tests/memarena-test.cc b/storage/tokudb/ft-index/util/tests/memarena-test.cc new file mode 100644 index 00000000000..7374539d11a --- /dev/null +++ b/storage/tokudb/ft-index/util/tests/memarena-test.cc @@ -0,0 +1,234 @@ +/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */ +// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4: + +/* +COPYING CONDITIONS NOTICE: + + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation, and provided that the + following conditions are met: + + * Redistributions of source code must retain this COPYING + CONDITIONS NOTICE, the COPYRIGHT NOTICE (below), the + DISCLAIMER (below), the UNIVERSITY PATENT NOTICE (below), the + PATENT MARKING NOTICE (below), and the PATENT RIGHTS + GRANT (below). + + * Redistributions in binary form must reproduce this COPYING + CONDITIONS NOTICE, the COPYRIGHT NOTICE (below), the + DISCLAIMER (below), the UNIVERSITY PATENT NOTICE (below), the + PATENT MARKING NOTICE (below), and the PATENT RIGHTS + GRANT (below) in the documentation and/or other materials + provided with the distribution. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + 02110-1301, USA. + +COPYRIGHT NOTICE: + + TokuFT, Tokutek Fractal Tree Indexing Library. + Copyright (C) 2007-2013 Tokutek, Inc. + +DISCLAIMER: + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + +UNIVERSITY PATENT NOTICE: + + The technology is licensed by the Massachusetts Institute of + Technology, Rutgers State University of New Jersey, and the Research + Foundation of State University of New York at Stony Brook under + United States of America Serial No. 11/760379 and to the patents + and/or patent applications resulting from it. + +PATENT MARKING NOTICE: + + This software is covered by US Patent No. 8,185,551. + This software is covered by US Patent No. 8,489,638. + +PATENT RIGHTS GRANT: + + "THIS IMPLEMENTATION" means the copyrightable works distributed by + Tokutek as part of the Fractal Tree project. + + "PATENT CLAIMS" means the claims of patents that are owned or + licensable by Tokutek, both currently or in the future; and that in + the absence of this license would be infringed by THIS + IMPLEMENTATION or by using or running THIS IMPLEMENTATION. + + "PATENT CHALLENGE" shall mean a challenge to the validity, + patentability, enforceability and/or non-infringement of any of the + PATENT CLAIMS or otherwise opposing any of the PATENT CLAIMS. + + Tokutek hereby grants to you, for the term and geographical scope of + the PATENT CLAIMS, a non-exclusive, no-charge, royalty-free, + irrevocable (except as stated in this section) patent license to + make, have made, use, offer to sell, sell, import, transfer, and + otherwise run, modify, and propagate the contents of THIS + IMPLEMENTATION, where such license applies only to the PATENT + CLAIMS. This grant does not include claims that would be infringed + only as a consequence of further modifications of THIS + IMPLEMENTATION. If you or your agent or licensee institute or order + or agree to the institution of patent litigation against any entity + (including a cross-claim or counterclaim in a lawsuit) alleging that + THIS IMPLEMENTATION constitutes direct or contributory patent + infringement, or inducement of patent infringement, then any rights + granted to you under this License shall terminate as of the date + such litigation is filed. If you or your agent or exclusive + licensee institute or order or agree to the institution of a PATENT + CHALLENGE, then Tokutek may terminate any rights granted to you + under this License. +*/ + +#include <string.h> + +#include "portability/toku_assert.h" + +#include "util/memarena.h" + +class memarena_unit_test { +private: + static const int magic = 37; + + template <typename F> + void iterate_chunks(memarena *ma, F &fn) { + for (memarena::chunk_iterator it(ma); it.more(); it.next()) { + size_t used = 0; + const void *buf = it.current(&used); + fn(buf, used); + } + } + + void test_create(size_t size) { + memarena ma; + ma.create(size); + invariant(ma._current_chunk.size == size); + invariant(ma._current_chunk.used == 0); + if (size == 0) { + invariant_null(ma._current_chunk.buf); + } else { + invariant_notnull(ma._current_chunk.buf); + } + + // make sure memory was allocated ok by + // writing to buf and reading it back + if (size > 0) { + memset(ma._current_chunk.buf, magic, size); + } + for (size_t i = 0; i < size; i++) { + const char *buf = reinterpret_cast<char *>(ma._current_chunk.buf); + invariant(buf[i] == magic); + } + ma.destroy(); + } + + void test_malloc(size_t size) { + memarena ma; + ma.create(14); + void *v = ma.malloc_from_arena(size); + invariant_notnull(v); + + // make sure memory was allocated ok by + // writing to buf and reading it back + if (size > 0) { + memset(ma._current_chunk.buf, magic, size); + } + for (size_t i = 0; i < size; i++) { + const char *c = reinterpret_cast<char *>(ma._current_chunk.buf); + invariant(c[i] == magic); + } + ma.destroy(); + } + + static void test_iterate_fn(const void *buf, size_t used) { + for (size_t i = 0; i < used; i++) { + const char *c = reinterpret_cast<const char *>(buf); + invariant(c[i] == (char) ((intptr_t) &c[i])); + } + } + + void test_iterate(size_t size) { + memarena ma; + ma.create(14); + for (size_t k = 0; k < size / 64; k += 64) { + void *v = ma.malloc_from_arena(64); + for (size_t i = 0; i < 64; i++) { + char *c = reinterpret_cast<char *>(v); + c[i] = (char) ((intptr_t) &c[i]); + } + } + size_t rest = size % 64; + if (rest != 0) { + void *v = ma.malloc_from_arena(64); + for (size_t i = 0; i < 64; i++) { + char *c = reinterpret_cast<char *>(v); + c[i] = (char) ((intptr_t) &c[i]); + } + } + + iterate_chunks(&ma, test_iterate_fn); + ma.destroy(); + } + + void test_move_memory(size_t size) { + memarena ma; + ma.create(14); + for (size_t k = 0; k < size / 64; k += 64) { + void *v = ma.malloc_from_arena(64); + for (size_t i = 0; i < 64; i++) { + char *c = reinterpret_cast<char *>(v); + c[i] = (char) ((intptr_t) &c[i]); + } + } + size_t rest = size % 64; + if (rest != 0) { + void *v = ma.malloc_from_arena(64); + for (size_t i = 0; i < 64; i++) { + char *c = reinterpret_cast<char *>(v); + c[i] = (char) ((intptr_t) &c[i]); + } + } + + memarena ma2; + ma.move_memory(&ma2); + iterate_chunks(&ma2, test_iterate_fn); + + ma.destroy(); + ma2.destroy(); + } + +public: + void test() { + test_create(0); + test_create(64); + test_create(128 * 1024 * 1024); + test_malloc(0); + test_malloc(63); + test_malloc(64); + test_malloc(64 * 1024 * 1024); + test_malloc((64 * 1024 * 1024) + 1); + test_iterate(0); + test_iterate(63); + test_iterate(128 * 1024); + test_iterate(64 * 1024 * 1024); + test_iterate((64 * 1024 * 1024) + 1); + test_move_memory(0); + test_move_memory(1); + test_move_memory(63); + test_move_memory(65); + test_move_memory(65 * 1024 * 1024); + test_move_memory(101 * 1024 * 1024); + } +}; + +int main(void) { + memarena_unit_test test; + test.test(); + return 0; +} diff --git a/storage/tokudb/ft-index/ft/tests/minicron-test.cc b/storage/tokudb/ft-index/util/tests/minicron-test.cc index 5f953f1b694..7729edbda84 100644 --- a/storage/tokudb/ft-index/ft/tests/minicron-test.cc +++ b/storage/tokudb/ft-index/util/tests/minicron-test.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -90,7 +90,7 @@ PATENT RIGHTS GRANT: #ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it." #include <toku_portability.h> #include "test.h" -#include "minicron.h" +#include "util/minicron.h" #include <unistd.h> #include <string.h> @@ -125,7 +125,7 @@ static void* test1 (void* v) { struct minicron m; - ZERO_STRUCT(m); + memset(&m, 0, sizeof(struct minicron)); int r = toku_minicron_setup(&m, 0, never_run, 0); assert(r==0); sleep(1); r = toku_minicron_shutdown(&m); assert(r==0); @@ -137,7 +137,7 @@ static void* test2 (void* v) { struct minicron m; - ZERO_STRUCT(m); + memset(&m, 0, sizeof(struct minicron)); int r = toku_minicron_setup(&m, 10000, never_run, 0); assert(r==0); sleep(2); r = toku_minicron_shutdown(&m); assert(r==0); @@ -174,7 +174,7 @@ test3 (void* v) struct tenx tx; gettimeofday(&tx.tv, 0); tx.counter=0; - ZERO_STRUCT(m); + memset(&m, 0, sizeof(struct minicron)); int r = toku_minicron_setup(&m, 1000, run_5x, &tx); assert(r==0); sleep(5); r = toku_minicron_shutdown(&m); assert(r==0); @@ -197,7 +197,7 @@ static void* test4 (void *v) { struct minicron m; int counter = 0; - ZERO_STRUCT(m); + memset(&m, 0, sizeof(struct minicron)); int r = toku_minicron_setup(&m, 2000, run_3sec, &counter); assert(r==0); sleep(10); r = toku_minicron_shutdown(&m); assert(r==0); @@ -209,7 +209,7 @@ static void* test5 (void *v) { struct minicron m; int counter = 0; - ZERO_STRUCT(m); + memset(&m, 0, sizeof(struct minicron)); int r = toku_minicron_setup(&m, 10000, run_3sec, &counter); assert(r==0); toku_minicron_change_period(&m, 2000); sleep(10); @@ -221,7 +221,7 @@ test5 (void *v) { static void* test6 (void *v) { struct minicron m; - ZERO_STRUCT(m); + memset(&m, 0, sizeof(struct minicron)); int r = toku_minicron_setup(&m, 5000, never_run, 0); assert(r==0); toku_minicron_change_period(&m, 0); sleep(7); @@ -233,8 +233,8 @@ test6 (void *v) { static void* test7 (void *v) { struct minicron m; + memset(&m, 0, sizeof(struct minicron)); int counter = 0; - ZERO_STRUCT(m); int r = toku_minicron_setup(&m, 5000, run_3sec, &counter); assert(r==0); sleep(17); r = toku_minicron_shutdown(&m); assert(r==0); diff --git a/storage/tokudb/ft-index/util/tests/omt-test.cc b/storage/tokudb/ft-index/util/tests/omt-test.cc index 9eeb7970a47..28daed80965 100644 --- a/storage/tokudb/ft-index/util/tests/omt-test.cc +++ b/storage/tokudb/ft-index/util/tests/omt-test.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/util/tests/omt-tmpl-test.cc b/storage/tokudb/ft-index/util/tests/omt-tmpl-test.cc index 8a9e13af89d..455502d50e9 100644 --- a/storage/tokudb/ft-index/util/tests/omt-tmpl-test.cc +++ b/storage/tokudb/ft-index/util/tests/omt-tmpl-test.cc @@ -30,7 +30,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/ft/tests/queue-test.cc b/storage/tokudb/ft-index/util/tests/queue-test.cc index edc2c628f94..d15e9ccab96 100644 --- a/storage/tokudb/ft-index/ft/tests/queue-test.cc +++ b/storage/tokudb/ft-index/util/tests/queue-test.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -94,7 +94,7 @@ PATENT RIGHTS GRANT: #include <unistd.h> #include <toku_assert.h> #include <toku_pthread.h> -#include "queue.h" +#include "util/queue.h" static int verbose=1; @@ -108,7 +108,7 @@ static void *start_0 (void *arg) { long count = 0; while (1) { uint64_t this_max_weight; - int r=queue_deq(q, &item, &weight, &this_max_weight); + int r=toku_queue_deq(q, &item, &weight, &this_max_weight); if (r==EOF) break; assert(r==0); if (this_max_weight>d_max_weight) d_max_weight=this_max_weight; @@ -123,7 +123,7 @@ static void *start_0 (void *arg) { static void enq (QUEUE q, long v, uint64_t weight) { uint64_t this_max_weight; - int r = queue_enq(q, (void*)v, (weight==0)?0:1, &this_max_weight); + int r = toku_queue_enq(q, (void*)v, (weight==0)?0:1, &this_max_weight); assert(r==0); if (this_max_weight>e_max_weight) e_max_weight=this_max_weight; //printf("E(%ld)=%ld %ld\n", v, this_max_weight, e_max_weight); @@ -138,7 +138,7 @@ static void queue_test_0 (uint64_t weight) d_max_weight = 0; QUEUE q; int r; - r = queue_create(&q, weight); assert(r==0); + r = toku_queue_create(&q, weight); assert(r==0); toku_pthread_t thread; r = toku_pthread_create(&thread, NULL, start_0, q); assert(r==0); enq(q, 0L, weight); @@ -148,12 +148,12 @@ static void queue_test_0 (uint64_t weight) sleep(1); enq(q, 4L, weight); enq(q, 5L, weight); - r = queue_eof(q); assert(r==0); + r = toku_queue_eof(q); assert(r==0); void *result; r = toku_pthread_join(thread, &result); assert(r==0); assert(result==NULL); assert(count_0==6); - r = queue_destroy(q); + r = toku_queue_destroy(q); assert(d_max_weight <= weight); assert(e_max_weight <= weight); } diff --git a/storage/tokudb/ft-index/util/tests/rwlock_condvar.h b/storage/tokudb/ft-index/util/tests/rwlock_condvar.h index db4b759ea52..58e7a61ae42 100644 --- a/storage/tokudb/ft-index/util/tests/rwlock_condvar.h +++ b/storage/tokudb/ft-index/util/tests/rwlock_condvar.h @@ -33,7 +33,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/util/tests/sort-tmpl-test.cc b/storage/tokudb/ft-index/util/tests/sort-tmpl-test.cc index a1be929fce0..7597c4fa5a7 100644 --- a/storage/tokudb/ft-index/util/tests/sort-tmpl-test.cc +++ b/storage/tokudb/ft-index/util/tests/sort-tmpl-test.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/util/tests/test-kibbutz.cc b/storage/tokudb/ft-index/util/tests/test-kibbutz.cc index 1f73037892c..dd5a7facf64 100644 --- a/storage/tokudb/ft-index/util/tests/test-kibbutz.cc +++ b/storage/tokudb/ft-index/util/tests/test-kibbutz.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/util/tests/test-kibbutz2.cc b/storage/tokudb/ft-index/util/tests/test-kibbutz2.cc index ce797c068d8..80b97ff69c6 100644 --- a/storage/tokudb/ft-index/util/tests/test-kibbutz2.cc +++ b/storage/tokudb/ft-index/util/tests/test-kibbutz2.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/util/tests/test-rwlock-cheapness.cc b/storage/tokudb/ft-index/util/tests/test-rwlock-cheapness.cc index de54c21efd2..ac04da16b85 100644 --- a/storage/tokudb/ft-index/util/tests/test-rwlock-cheapness.cc +++ b/storage/tokudb/ft-index/util/tests/test-rwlock-cheapness.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/util/tests/test-rwlock.cc b/storage/tokudb/ft-index/util/tests/test-rwlock.cc index 42ceb00ad19..c4988aab85d 100644 --- a/storage/tokudb/ft-index/util/tests/test-rwlock.cc +++ b/storage/tokudb/ft-index/util/tests/test-rwlock.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/util/tests/test.h b/storage/tokudb/ft-index/util/tests/test.h index 0760b9bf1fb..6ca60105d93 100644 --- a/storage/tokudb/ft-index/util/tests/test.h +++ b/storage/tokudb/ft-index/util/tests/test.h @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/util/tests/test_circular_buffer.cc b/storage/tokudb/ft-index/util/tests/test_circular_buffer.cc index 8bc239ac6fc..8bf0b646e4a 100644 --- a/storage/tokudb/ft-index/util/tests/test_circular_buffer.cc +++ b/storage/tokudb/ft-index/util/tests/test_circular_buffer.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/util/tests/test_doubly_linked_list.cc b/storage/tokudb/ft-index/util/tests/test_doubly_linked_list.cc index 6fad884ed8e..94e6b0a3489 100644 --- a/storage/tokudb/ft-index/util/tests/test_doubly_linked_list.cc +++ b/storage/tokudb/ft-index/util/tests/test_doubly_linked_list.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/util/tests/test_partitioned_counter.cc b/storage/tokudb/ft-index/util/tests/test_partitioned_counter.cc index 5af214f75ac..ce09aa04229 100644 --- a/storage/tokudb/ft-index/util/tests/test_partitioned_counter.cc +++ b/storage/tokudb/ft-index/util/tests/test_partitioned_counter.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -201,9 +201,6 @@ static inline void increment (void) { head->prev = cp; } head = cp; -#ifdef __INTEL_COMPILER - __memory_barrier(); // for some reason I don't understand, ICC needs a memory barrier here. -Bradley -#endif cp->counter = 0; cp->inited = true; cp->myid = idcounter++; diff --git a/storage/tokudb/ft-index/util/tests/test_partitioned_counter_5833.cc b/storage/tokudb/ft-index/util/tests/test_partitioned_counter_5833.cc index 419f992576b..2e42e4d4b4e 100644 --- a/storage/tokudb/ft-index/util/tests/test_partitioned_counter_5833.cc +++ b/storage/tokudb/ft-index/util/tests/test_partitioned_counter_5833.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/util/tests/threadpool-nproc-limit.cc b/storage/tokudb/ft-index/util/tests/threadpool-nproc-limit.cc new file mode 100644 index 00000000000..3395a30238b --- /dev/null +++ b/storage/tokudb/ft-index/util/tests/threadpool-nproc-limit.cc @@ -0,0 +1,171 @@ +/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */ +// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4: +#ident "$Id$" +/* +COPYING CONDITIONS NOTICE: + + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation, and provided that the + following conditions are met: + + * Redistributions of source code must retain this COPYING + CONDITIONS NOTICE, the COPYRIGHT NOTICE (below), the + DISCLAIMER (below), the UNIVERSITY PATENT NOTICE (below), the + PATENT MARKING NOTICE (below), and the PATENT RIGHTS + GRANT (below). + + * Redistributions in binary form must reproduce this COPYING + CONDITIONS NOTICE, the COPYRIGHT NOTICE (below), the + DISCLAIMER (below), the UNIVERSITY PATENT NOTICE (below), the + PATENT MARKING NOTICE (below), and the PATENT RIGHTS + GRANT (below) in the documentation and/or other materials + provided with the distribution. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + 02110-1301, USA. + +COPYRIGHT NOTICE: + + TokuFT, Tokutek Fractal Tree Indexing Library. + Copyright (C) 2007-2013 Tokutek, Inc. + +DISCLAIMER: + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + +UNIVERSITY PATENT NOTICE: + + The technology is licensed by the Massachusetts Institute of + Technology, Rutgers State University of New Jersey, and the Research + Foundation of State University of New York at Stony Brook under + United States of America Serial No. 11/760379 and to the patents + and/or patent applications resulting from it. + +PATENT MARKING NOTICE: + + This software is covered by US Patent No. 8,185,551. + This software is covered by US Patent No. 8,489,638. + +PATENT RIGHTS GRANT: + + "THIS IMPLEMENTATION" means the copyrightable works distributed by + Tokutek as part of the Fractal Tree project. + + "PATENT CLAIMS" means the claims of patents that are owned or + licensable by Tokutek, both currently or in the future; and that in + the absence of this license would be infringed by THIS + IMPLEMENTATION or by using or running THIS IMPLEMENTATION. + + "PATENT CHALLENGE" shall mean a challenge to the validity, + patentability, enforceability and/or non-infringement of any of the + PATENT CLAIMS or otherwise opposing any of the PATENT CLAIMS. + + Tokutek hereby grants to you, for the term and geographical scope of + the PATENT CLAIMS, a non-exclusive, no-charge, royalty-free, + irrevocable (except as stated in this section) patent license to + make, have made, use, offer to sell, sell, import, transfer, and + otherwise run, modify, and propagate the contents of THIS + IMPLEMENTATION, where such license applies only to the PATENT + CLAIMS. This grant does not include claims that would be infringed + only as a consequence of further modifications of THIS + IMPLEMENTATION. If you or your agent or licensee institute or order + or agree to the institution of patent litigation against any entity + (including a cross-claim or counterclaim in a lawsuit) alleging that + THIS IMPLEMENTATION constitutes direct or contributory patent + infringement, or inducement of patent infringement, then any rights + granted to you under this License shall terminate as of the date + such litigation is filed. If you or your agent or exclusive + licensee institute or order or agree to the institution of a PATENT + CHALLENGE, then Tokutek may terminate any rights granted to you + under this License. +*/ + +#ident "Copyright (c) 2014 Tokutek Inc. All rights reserved." + +// this test verifies that the toku thread pool is resilient when hitting the nproc limit. + +#include <util/threadpool.h> +#include <stdio.h> +#include <stdlib.h> +#include <assert.h> +#include <string.h> +#include <unistd.h> +#include <errno.h> +#include <sys/resource.h> + +int verbose = 0; + +static int usage(void) { + fprintf(stderr, "[-q] [-v] [--verbose] (%d)\n", verbose); + return 1; +} + +static void *f(void *arg) { + return arg; +} + +static int dotest(int the_limit) { + if (verbose) + fprintf(stderr, "%s:%u %d\n", __FILE__, __LINE__, the_limit); + int r; + struct toku_thread_pool *pool = nullptr; + r = toku_thread_pool_create(&pool, 10); + assert(r == 0 && pool != nullptr); + + struct rlimit current_nproc_limit; + r = getrlimit(RLIMIT_NPROC, ¤t_nproc_limit); + assert(r == 0); + + struct rlimit new_nproc_limit = current_nproc_limit; + new_nproc_limit.rlim_cur = the_limit; + r = setrlimit(RLIMIT_NPROC, &new_nproc_limit); + assert(r == 0); + + int want_n = 20; + int got_n = want_n; + r = toku_thread_pool_run(pool, 0, &got_n, f, nullptr); + if (r == 0) + assert(want_n == got_n); + else { + assert(r == EWOULDBLOCK); + assert(got_n <= want_n); + } + + r = setrlimit(RLIMIT_NPROC, ¤t_nproc_limit); + assert(r == 0); + + if (verbose) + toku_thread_pool_print(pool, stderr); + toku_thread_pool_destroy(&pool); + return got_n > 0; +} + +int main(int argc, char *argv[]) { + // parse args + for (int i = 1; i < argc; i++) { + char *arg = argv[i]; + if (arg[0] != '-') + break; + if (strcmp(arg, "-v") == 0 || strcmp(arg, "--verbose") == 0) { + verbose = verbose+1; + continue; + } + if (strcmp(arg, "-q") == 0) { + verbose = verbose > 0 ? verbose-1 : 0; + continue; + } + return usage(); + } + // set increasing nproc limits until the test succeeds in hitting the limit after > 0 threads are created + for (int i = 0; 1; i++) { + if (dotest(i)) + break; + } + return 0; +} diff --git a/storage/tokudb/ft-index/util/tests/threadpool-test.cc b/storage/tokudb/ft-index/util/tests/threadpool-test.cc index 6815cce8f8f..b9bebc5db7d 100644 --- a/storage/tokudb/ft-index/util/tests/threadpool-test.cc +++ b/storage/tokudb/ft-index/util/tests/threadpool-test.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/util/tests/threadpool-testrunf.cc b/storage/tokudb/ft-index/util/tests/threadpool-testrunf.cc index f4d875a8941..b7744cbf54c 100644 --- a/storage/tokudb/ft-index/util/tests/threadpool-testrunf.cc +++ b/storage/tokudb/ft-index/util/tests/threadpool-testrunf.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/util/tests/x1764-test.cc b/storage/tokudb/ft-index/util/tests/x1764-test.cc index d8a0b1d0eb3..5f47e007f50 100644 --- a/storage/tokudb/ft-index/util/tests/x1764-test.cc +++ b/storage/tokudb/ft-index/util/tests/x1764-test.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/util/threadpool.cc b/storage/tokudb/ft-index/util/threadpool.cc index d6652b7a71c..7c8fade7ed6 100644 --- a/storage/tokudb/ft-index/util/threadpool.cc +++ b/storage/tokudb/ft-index/util/threadpool.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -132,13 +132,18 @@ static int toku_thread_create(struct toku_thread_pool *pool, struct toku_thread **toku_thread_return) { int r; struct toku_thread *MALLOC(thread); - if (thread == NULL) { + if (thread == nullptr) { r = get_error_errno(); } else { memset(thread, 0, sizeof *thread); thread->pool = pool; - toku_cond_init(&thread->wait, NULL); - r = toku_pthread_create(&thread->tid, NULL, toku_thread_run_internal, thread); resource_assert_zero(r); + toku_cond_init(&thread->wait, nullptr); + r = toku_pthread_create(&thread->tid, nullptr, toku_thread_run_internal, thread); + if (r) { + toku_cond_destroy(&thread->wait); + toku_free(thread); + thread = nullptr; + } *toku_thread_return = thread; } return r; @@ -192,7 +197,7 @@ toku_thread_run_internal(void *arg) { if (doexit) break; toku_thread_pool_lock(pool); - thread->f = NULL; + thread->f = nullptr; toku_list_push(&pool->free_threads, &thread->free_link); } return arg; @@ -202,13 +207,13 @@ int toku_thread_pool_create(struct toku_thread_pool **pool_return, int max_threads) { int r; struct toku_thread_pool *CALLOC(pool); - if (pool == NULL) { + if (pool == nullptr) { r = get_error_errno(); } else { - toku_mutex_init(&pool->lock, NULL); + toku_mutex_init(&pool->lock, nullptr); toku_list_init(&pool->free_threads); toku_list_init(&pool->all_threads); - toku_cond_init(&pool->wait_free, NULL); + toku_cond_init(&pool->wait_free, nullptr); pool->cur_threads = 0; pool->max_threads = max_threads; *pool_return = pool; @@ -230,7 +235,7 @@ toku_thread_pool_unlock(struct toku_thread_pool *pool) { void toku_thread_pool_destroy(struct toku_thread_pool **poolptr) { struct toku_thread_pool *pool = *poolptr; - *poolptr = NULL; + *poolptr = nullptr; // ask the threads to exit toku_thread_pool_lock(pool); @@ -260,7 +265,7 @@ toku_thread_pool_destroy(struct toku_thread_pool **poolptr) { static int toku_thread_pool_add(struct toku_thread_pool *pool) { - struct toku_thread *thread = NULL; + struct toku_thread *thread = nullptr; int r = toku_thread_create(pool, &thread); if (r == 0) { pool->cur_threads += 1; @@ -294,7 +299,7 @@ toku_thread_pool_get_one(struct toku_thread_pool *pool, int dowait, struct toku_ struct toku_thread *thread = toku_list_struct(list, struct toku_thread, free_link); *toku_thread_return = thread; } else - *toku_thread_return = NULL; + *toku_thread_return = nullptr; toku_thread_pool_unlock(pool); return r; } diff --git a/storage/tokudb/ft-index/util/threadpool.h b/storage/tokudb/ft-index/util/threadpool.h index 3fada1f6e54..ed43dea93be 100644 --- a/storage/tokudb/ft-index/util/threadpool.h +++ b/storage/tokudb/ft-index/util/threadpool.h @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -89,8 +89,7 @@ PATENT RIGHTS GRANT: #ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved." #ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it." -#ifndef UTIL_THREADPOOL_H -#define UTIL_THREADPOOL_H +#pragma once #include <stdio.h> @@ -137,5 +136,3 @@ int toku_thread_pool_run(struct toku_thread_pool *pool, int dowait, int *nthread // Print the state of the thread pool void toku_thread_pool_print(struct toku_thread_pool *pool, FILE *out); - -#endif // UTIL_THREADPOOL_H diff --git a/storage/tokudb/ft-index/util/x1764.cc b/storage/tokudb/ft-index/util/x1764.cc index ef7e6576e4f..5fb20daccee 100644 --- a/storage/tokudb/ft-index/util/x1764.cc +++ b/storage/tokudb/ft-index/util/x1764.cc @@ -29,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: diff --git a/storage/tokudb/ft-index/util/x1764.h b/storage/tokudb/ft-index/util/x1764.h index 1f87f50f09e..1d83e5a1853 100644 --- a/storage/tokudb/ft-index/util/x1764.h +++ b/storage/tokudb/ft-index/util/x1764.h @@ -1,7 +1,5 @@ /* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */ // vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4: -#ifndef X1764_H -#define X1764_H #ident "$Id$" /* COPYING CONDITIONS NOTICE: @@ -31,7 +29,7 @@ COPYING CONDITIONS NOTICE: COPYRIGHT NOTICE: - TokuDB, Tokutek Fractal Tree Indexing Library. + TokuFT, Tokutek Fractal Tree Indexing Library. Copyright (C) 2007-2013 Tokutek, Inc. DISCLAIMER: @@ -88,6 +86,8 @@ PATENT RIGHTS GRANT: under this License. */ +#pragma once + #ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved." #ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it." @@ -121,6 +121,3 @@ void toku_x1764_add (struct x1764 *l, const void *vbuf, int len); uint32_t toku_x1764_finish (struct x1764 *l); // Effect: Return the final 32-bit result. - - -#endif diff --git a/storage/tokudb/ha_tokudb.cc b/storage/tokudb/ha_tokudb.cc index 0cef79ed32c..0c2310f6685 100644 --- a/storage/tokudb/ha_tokudb.cc +++ b/storage/tokudb/ha_tokudb.cc @@ -120,14 +120,6 @@ extern "C" { #include "hatoku_defines.h" #include "hatoku_cmp.h" -static inline void *thd_data_get(THD *thd, int slot) { - return thd->ha_data[slot].ha_ptr; -} - -static inline void thd_data_set(THD *thd, int slot, void *data) { - thd->ha_data[slot].ha_ptr = data; -} - static inline uint get_key_parts(const KEY *key); #undef PACKAGE @@ -144,8 +136,8 @@ static inline uint get_key_parts(const KEY *key); #include "tokudb_buffer.h" #include "tokudb_status.h" #include "tokudb_card.h" -#include "hatoku_hton.h" #include "ha_tokudb.h" +#include "hatoku_hton.h" #include <mysql/plugin.h> static const char *ha_tokudb_exts[] = { @@ -477,10 +469,9 @@ typedef struct index_read_info { DBT* orig_key; } *INDEX_READ_INFO; - static int ai_poll_fun(void *extra, float progress) { LOADER_CONTEXT context = (LOADER_CONTEXT)extra; - if (context->thd->killed) { + if (thd_killed(context->thd)) { sprintf(context->write_status_msg, "The process has been killed, aborting add index."); return ER_ABORTING_CONNECTION; } @@ -495,7 +486,7 @@ static int ai_poll_fun(void *extra, float progress) { static int loader_poll_fun(void *extra, float progress) { LOADER_CONTEXT context = (LOADER_CONTEXT)extra; - if (context->thd->killed) { + if (thd_killed(context->thd)) { sprintf(context->write_status_msg, "The process has been killed, aborting bulk load."); return ER_ABORTING_CONNECTION; } @@ -1016,8 +1007,7 @@ static uchar* pack_toku_field_blob( static int create_tokudb_trx_data_instance(tokudb_trx_data** out_trx) { int error; - tokudb_trx_data* trx = NULL; - trx = (tokudb_trx_data *) tokudb_my_malloc(sizeof(*trx), MYF(MY_ZEROFILL)); + tokudb_trx_data* trx = (tokudb_trx_data *) tokudb_my_malloc(sizeof(*trx), MYF(MY_ZEROFILL)); if (!trx) { error = ENOMEM; goto cleanup; @@ -1259,6 +1249,7 @@ ha_tokudb::ha_tokudb(handlerton * hton, TABLE_SHARE * table_arg):handler(hton, t tokudb_active_index = MAX_KEY; invalidate_icp(); trx_handler_list.data = this; + in_rpl_write_rows = in_rpl_delete_rows = in_rpl_update_rows = false; TOKUDB_HANDLER_DBUG_VOID_RETURN; } @@ -1614,8 +1605,7 @@ int ha_tokudb::initialize_share( DB_TXN* txn = NULL; bool do_commit = false; THD* thd = ha_thd(); - tokudb_trx_data *trx = NULL; - trx = (tokudb_trx_data *) thd_data_get(ha_thd(), tokudb_hton->slot); + tokudb_trx_data *trx = (tokudb_trx_data *) thd_get_ha_data(ha_thd(), tokudb_hton); if (thd_sql_command(thd) == SQLCOM_CREATE_TABLE && trx && trx->sub_sp_level) { txn = trx->sub_sp_level; } @@ -1649,8 +1639,7 @@ int ha_tokudb::initialize_share( #if WITH_PARTITION_STORAGE_ENGINE // verify frm data for non-partitioned tables - if (TOKU_PARTITION_WRITE_FRM_DATA || - IF_PARTITIONING(table->part_info, NULL) == NULL) { + if (TOKU_PARTITION_WRITE_FRM_DATA || table->part_info == NULL) { error = verify_frm_data(table->s->path.str, txn); if (error) goto exit; @@ -1727,7 +1716,7 @@ int ha_tokudb::initialize_share( } share->ref_length = ref_length; - error = estimate_num_rows(share->file,&num_rows, txn); + error = estimate_num_rows(share->file, &num_rows, txn); // // estimate_num_rows should not fail under normal conditions // @@ -1937,7 +1926,6 @@ exit: // int ha_tokudb::estimate_num_rows(DB* db, uint64_t* num_rows, DB_TXN* txn) { int error = ENOSYS; - DBC* crsr = NULL; bool do_commit = false; DB_BTREE_STAT64 dict_stats; DB_TXN* txn_to_use = NULL; @@ -1951,21 +1939,12 @@ int ha_tokudb::estimate_num_rows(DB* db, uint64_t* num_rows, DB_TXN* txn) { txn_to_use = txn; } - error = db->stat64( - share->file, - txn_to_use, - &dict_stats - ); + error = db->stat64(db, txn_to_use, &dict_stats); if (error) { goto cleanup; } *num_rows = dict_stats.bt_ndata; error = 0; cleanup: - if (crsr != NULL) { - int r = crsr->c_close(crsr); - assert(r==0); - crsr = NULL; - } if (do_commit) { commit_txn(txn_to_use, 0); txn_to_use = NULL; @@ -3271,7 +3250,7 @@ void ha_tokudb::start_bulk_insert(ha_rows rows) { TOKUDB_HANDLER_DBUG_ENTER("%llu txn %p", (unsigned long long) rows, transaction); #endif THD* thd = ha_thd(); - tokudb_trx_data* trx = (tokudb_trx_data *) thd_data_get(thd, tokudb_hton->slot); + tokudb_trx_data* trx = (tokudb_trx_data *) thd_get_ha_data(thd, tokudb_hton); delay_updating_ai_metadata = true; ai_metadata_update_required = false; abort_loader = false; @@ -3281,7 +3260,7 @@ void ha_tokudb::start_bulk_insert(ha_rows rows) { num_DBs_locked_in_bulk = true; lock_count = 0; - if (share->try_table_lock) { + if ((rows == 0 || rows > 1) && share->try_table_lock) { if (get_prelock_empty(thd) && may_table_be_empty(transaction)) { if (using_ignore || is_insert_ignore(thd) || thd->lex->duplicates != DUP_ERROR || table->s->next_number_key_offset) { @@ -3340,7 +3319,7 @@ int ha_tokudb::end_bulk_insert(bool abort) { TOKUDB_HANDLER_DBUG_ENTER(""); int error = 0; THD* thd = ha_thd(); - tokudb_trx_data* trx = (tokudb_trx_data *) thd_data_get(thd, tokudb_hton->slot); + tokudb_trx_data* trx = (tokudb_trx_data *) thd_get_ha_data(thd, tokudb_hton); bool using_loader = (loader != NULL); if (ai_metadata_update_required) { tokudb_pthread_mutex_lock(&share->mutex); @@ -3352,17 +3331,17 @@ int ha_tokudb::end_bulk_insert(bool abort) { ai_metadata_update_required = false; loader_error = 0; if (loader) { - if (!abort_loader && !thd->killed) { + if (!abort_loader && !thd_killed(thd)) { DBUG_EXECUTE_IF("tokudb_end_bulk_insert_sleep", { - const char *old_proc_info = tokudb_thd_get_proc_info(thd); + const char *orig_proc_info = tokudb_thd_get_proc_info(thd); thd_proc_info(thd, "DBUG sleep"); my_sleep(20000000); - thd_proc_info(thd, old_proc_info); + thd_proc_info(thd, orig_proc_info); }); error = loader->close(loader); loader = NULL; if (error) { - if (thd->killed) { + if (thd_killed(thd)) { my_error(ER_QUERY_INTERRUPTED, MYF(0)); } goto cleanup; @@ -3374,12 +3353,8 @@ int ha_tokudb::end_bulk_insert(bool abort) { if (i == primary_key && !share->pk_has_string) { continue; } - error = is_index_unique( - &is_unique, - transaction, - share->key_file[i], - &table->key_info[i] - ); + error = is_index_unique(&is_unique, transaction, share->key_file[i], &table->key_info[i], + DB_PRELOCKED_WRITE); if (error) goto cleanup; if (!is_unique) { error = HA_ERR_FOUND_DUPP_KEY; @@ -3419,6 +3394,7 @@ cleanup: } } trx->stmt_progress.using_loader = false; + thd_proc_info(thd, 0); TOKUDB_HANDLER_DBUG_RETURN(error ? error : loader_error); } @@ -3426,7 +3402,7 @@ int ha_tokudb::end_bulk_insert() { return end_bulk_insert( false ); } -int ha_tokudb::is_index_unique(bool* is_unique, DB_TXN* txn, DB* db, KEY* key_info) { +int ha_tokudb::is_index_unique(bool* is_unique, DB_TXN* txn, DB* db, KEY* key_info, int lock_flags) { int error; DBC* tmp_cursor1 = NULL; DBC* tmp_cursor2 = NULL; @@ -3434,7 +3410,7 @@ int ha_tokudb::is_index_unique(bool* is_unique, DB_TXN* txn, DB* db, KEY* key_in uint64_t cnt = 0; char status_msg[MAX_ALIAS_NAME + 200]; //buffer of 200 should be a good upper bound. THD* thd = ha_thd(); - const char *old_proc_info = tokudb_thd_get_proc_info(thd); + const char *orig_proc_info = tokudb_thd_get_proc_info(thd); memset(&key1, 0, sizeof(key1)); memset(&key2, 0, sizeof(key2)); memset(&val, 0, sizeof(val)); @@ -3442,49 +3418,23 @@ int ha_tokudb::is_index_unique(bool* is_unique, DB_TXN* txn, DB* db, KEY* key_in memset(&packed_key2, 0, sizeof(packed_key2)); *is_unique = true; - error = db->cursor( - db, - txn, - &tmp_cursor1, - DB_SERIALIZABLE - ); + error = db->cursor(db, txn, &tmp_cursor1, DB_SERIALIZABLE); if (error) { goto cleanup; } - error = db->cursor( - db, - txn, - &tmp_cursor2, - DB_SERIALIZABLE - ); + error = db->cursor(db, txn, &tmp_cursor2, DB_SERIALIZABLE); if (error) { goto cleanup; } - - error = tmp_cursor1->c_get( - tmp_cursor1, - &key1, - &val, - DB_NEXT - ); + error = tmp_cursor1->c_get(tmp_cursor1, &key1, &val, DB_NEXT + lock_flags); if (error == DB_NOTFOUND) { *is_unique = true; error = 0; goto cleanup; } else if (error) { goto cleanup; } - error = tmp_cursor2->c_get( - tmp_cursor2, - &key2, - &val, - DB_NEXT - ); + error = tmp_cursor2->c_get(tmp_cursor2, &key2, &val, DB_NEXT + lock_flags); if (error) { goto cleanup; } - error = tmp_cursor2->c_get( - tmp_cursor2, - &key2, - &val, - DB_NEXT - ); + error = tmp_cursor2->c_get(tmp_cursor2, &key2, &val, DB_NEXT + lock_flags); if (error == DB_NOTFOUND) { *is_unique = true; error = 0; @@ -3496,59 +3446,25 @@ int ha_tokudb::is_index_unique(bool* is_unique, DB_TXN* txn, DB* db, KEY* key_in bool has_null1; bool has_null2; int cmp; - place_key_into_mysql_buff( - key_info, - table->record[0], - (uchar *) key1.data + 1 - ); - place_key_into_mysql_buff( - key_info, - table->record[1], - (uchar *) key2.data + 1 - ); + place_key_into_mysql_buff(key_info, table->record[0], (uchar *) key1.data + 1); + place_key_into_mysql_buff(key_info, table->record[1], (uchar *) key2.data + 1); - create_dbt_key_for_lookup( - &packed_key1, - key_info, - key_buff, - table->record[0], - &has_null1 - ); - create_dbt_key_for_lookup( - &packed_key2, - key_info, - key_buff2, - table->record[1], - &has_null2 - ); + create_dbt_key_for_lookup(&packed_key1, key_info, key_buff, table->record[0], &has_null1); + create_dbt_key_for_lookup(&packed_key2, key_info, key_buff2, table->record[1], &has_null2); if (!has_null1 && !has_null2) { cmp = tokudb_prefix_cmp_dbt_key(db, &packed_key1, &packed_key2); if (cmp == 0) { memcpy(key_buff, key1.data, key1.size); - place_key_into_mysql_buff( - key_info, - table->record[0], - (uchar *) key_buff + 1 - ); + place_key_into_mysql_buff(key_info, table->record[0], (uchar *) key_buff + 1); *is_unique = false; break; } } - error = tmp_cursor1->c_get( - tmp_cursor1, - &key1, - &val, - DB_NEXT - ); + error = tmp_cursor1->c_get(tmp_cursor1, &key1, &val, DB_NEXT + lock_flags); if (error) { goto cleanup; } - error = tmp_cursor2->c_get( - tmp_cursor2, - &key2, - &val, - DB_NEXT - ); + error = tmp_cursor2->c_get(tmp_cursor2, &key2, &val, DB_NEXT + lock_flags); if (error && (error != DB_NOTFOUND)) { goto cleanup; } cnt++; @@ -3560,7 +3476,7 @@ int ha_tokudb::is_index_unique(bool* is_unique, DB_TXN* txn, DB* db, KEY* key_in share->rows, key_info->name); thd_proc_info(thd, status_msg); - if (thd->killed) { + if (thd_killed(thd)) { my_error(ER_QUERY_INTERRUPTED, MYF(0)); error = ER_QUERY_INTERRUPTED; goto cleanup; @@ -3571,7 +3487,7 @@ int ha_tokudb::is_index_unique(bool* is_unique, DB_TXN* txn, DB* db, KEY* key_in error = 0; cleanup: - thd_proc_info(thd, old_proc_info); + thd_proc_info(thd, orig_proc_info); if (tmp_cursor1) { tmp_cursor1->c_close(tmp_cursor1); tmp_cursor1 = NULL; @@ -3646,12 +3562,27 @@ cleanup: return error; } +static void maybe_do_unique_checks_delay(THD *thd) { + if (thd->slave_thread) { + uint64_t delay_ms = THDVAR(thd, rpl_unique_checks_delay); + if (delay_ms) + usleep(delay_ms * 1000); + } +} + +static bool do_unique_checks(THD *thd, bool do_rpl_event) { + if (do_rpl_event && thd->slave_thread && opt_readonly && !THDVAR(thd, rpl_unique_checks)) + return false; + else + return !thd_test_options(thd, OPTION_RELAXED_UNIQUE_CHECKS); +} + int ha_tokudb::do_uniqueness_checks(uchar* record, DB_TXN* txn, THD* thd) { - int error; + int error = 0; // // first do uniqueness checks // - if (share->has_unique_keys && !thd_test_options(thd, OPTION_RELAXED_UNIQUE_CHECKS)) { + if (share->has_unique_keys && do_unique_checks(thd, in_rpl_write_rows)) { for (uint keynr = 0; keynr < table_share->keys; keynr++) { bool is_unique_key = (table->key_info[keynr].flags & HA_NOSAME) || (keynr == primary_key); bool is_unique = false; @@ -3664,13 +3595,18 @@ int ha_tokudb::do_uniqueness_checks(uchar* record, DB_TXN* txn, THD* thd) { if (!is_unique_key) { continue; } + + maybe_do_unique_checks_delay(thd); + // // if unique key, check uniqueness constraint // but, we do not need to check it if the key has a null // and we do not need to check it if unique_checks is off // error = is_val_unique(&is_unique, record, &table->key_info[keynr], keynr, txn); - if (error) { goto cleanup; } + if (error) { + goto cleanup; + } if (!is_unique) { error = DB_KEYEXIST; last_dup_key = keynr; @@ -3678,7 +3614,6 @@ int ha_tokudb::do_uniqueness_checks(uchar* record, DB_TXN* txn, THD* thd) { } } } - error = 0; cleanup: return error; } @@ -3781,15 +3716,8 @@ void ha_tokudb::test_row_packing(uchar* record, DBT* pk_key, DBT* pk_val) { tokudb_my_free(tmp_pk_val_data); } -// // set the put flags for the main dictionary -// -void ha_tokudb::set_main_dict_put_flags( - THD* thd, - bool opt_eligible, - uint32_t* put_flags - ) -{ +void ha_tokudb::set_main_dict_put_flags(THD* thd, bool opt_eligible, uint32_t* put_flags) { uint32_t old_prelock_flags = 0; uint curr_num_DBs = table->s->keys + tokudb_test(hidden_primary_key); bool in_hot_index = share->num_DBs > curr_num_DBs; @@ -3809,8 +3737,7 @@ void ha_tokudb::set_main_dict_put_flags( { *put_flags = old_prelock_flags; } - else if (thd_test_options(thd, OPTION_RELAXED_UNIQUE_CHECKS) - && !is_replace_into(thd) && !is_insert_ignore(thd)) + else if (!do_unique_checks(thd, in_rpl_write_rows | in_rpl_update_rows) && !is_replace_into(thd) && !is_insert_ignore(thd)) { *put_flags = old_prelock_flags; } @@ -3832,22 +3759,18 @@ void ha_tokudb::set_main_dict_put_flags( int ha_tokudb::insert_row_to_main_dictionary(uchar* record, DBT* pk_key, DBT* pk_val, DB_TXN* txn) { int error = 0; - uint32_t put_flags = mult_put_flags[primary_key]; - THD *thd = ha_thd(); uint curr_num_DBs = table->s->keys + tokudb_test(hidden_primary_key); - assert(curr_num_DBs == 1); - + + uint32_t put_flags = mult_put_flags[primary_key]; + THD *thd = ha_thd(); set_main_dict_put_flags(thd, true, &put_flags); - error = share->file->put( - share->file, - txn, - pk_key, - pk_val, - put_flags - ); + // for test, make unique checks have a very long duration + if ((put_flags & DB_OPFLAGS_MASK) == DB_NOOVERWRITE) + maybe_do_unique_checks_delay(thd); + error = share->file->put(share->file, txn, pk_key, pk_val, put_flags); if (error) { last_dup_key = primary_key; goto cleanup; @@ -3861,14 +3784,18 @@ int ha_tokudb::insert_rows_to_dictionaries_mult(DBT* pk_key, DBT* pk_val, DB_TXN int error = 0; uint curr_num_DBs = share->num_DBs; set_main_dict_put_flags(thd, true, &mult_put_flags[primary_key]); - uint32_t i, flags = mult_put_flags[primary_key]; + uint32_t flags = mult_put_flags[primary_key]; + + // for test, make unique checks have a very long duration + if ((flags & DB_OPFLAGS_MASK) == DB_NOOVERWRITE) + maybe_do_unique_checks_delay(thd); // the insert ignore optimization uses DB_NOOVERWRITE_NO_ERROR, // which is not allowed with env->put_multiple. // we have to insert the rows one by one in this case. if (flags & DB_NOOVERWRITE_NO_ERROR) { DB * src_db = share->key_file[primary_key]; - for (i = 0; i < curr_num_DBs; i++) { + for (uint32_t i = 0; i < curr_num_DBs; i++) { DB * db = share->key_file[i]; if (i == primary_key) { // if it's the primary key, insert the rows @@ -3929,7 +3856,7 @@ out: // error otherwise // int ha_tokudb::write_row(uchar * record) { - TOKUDB_HANDLER_DBUG_ENTER(""); + TOKUDB_HANDLER_DBUG_ENTER("%p", record); DBT row, prim_key; int error; @@ -3967,10 +3894,7 @@ int ha_tokudb::write_row(uchar * record) { if (share->has_auto_inc && record == table->record[0]) { tokudb_pthread_mutex_lock(&share->mutex); ulonglong curr_auto_inc = retrieve_auto_increment( - table->field[share->ai_field_index]->key_type(), - field_offset(table->field[share->ai_field_index], table), - record - ); + table->field[share->ai_field_index]->key_type(), field_offset(table->field[share->ai_field_index], table), record); if (curr_auto_inc > share->last_auto_increment) { share->last_auto_increment = curr_auto_inc; if (delay_updating_ai_metadata) { @@ -4072,7 +3996,7 @@ int ha_tokudb::write_row(uchar * record) { } } - trx = (tokudb_trx_data *) thd_data_get(thd, tokudb_hton->slot); + trx = (tokudb_trx_data *) thd_get_ha_data(thd, tokudb_hton); if (!error) { added_rows++; trx->stmt_progress.inserted++; @@ -4129,7 +4053,7 @@ int ha_tokudb::update_row(const uchar * old_row, uchar * new_row) { THD* thd = ha_thd(); DB_TXN* sub_trans = NULL; DB_TXN* txn = NULL; - tokudb_trx_data* trx = (tokudb_trx_data *) thd_data_get(thd, tokudb_hton->slot); + tokudb_trx_data* trx = (tokudb_trx_data *) thd_get_ha_data(thd, tokudb_hton); uint curr_num_DBs; LINT_INIT(error); @@ -4138,7 +4062,6 @@ int ha_tokudb::update_row(const uchar * old_row, uchar * new_row) { memset((void *) &prim_row, 0, sizeof(prim_row)); memset((void *) &old_prim_row, 0, sizeof(old_prim_row)); - ha_statistic_increment(&SSV::ha_update_count); #if MYSQL_VERSION_ID < 50600 if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_UPDATE) { @@ -4185,7 +4108,6 @@ int ha_tokudb::update_row(const uchar * old_row, uchar * new_row) { } txn = using_ignore ? sub_trans : transaction; - if (hidden_primary_key) { memset((void *) &prim_key, 0, sizeof(prim_key)); prim_key.data = (void *) current_ident; @@ -4197,10 +4119,8 @@ int ha_tokudb::update_row(const uchar * old_row, uchar * new_row) { create_dbt_key_from_table(&old_prim_key, primary_key, primary_key_buff, old_row, &has_null); } - // // do uniqueness checks - // - if (share->has_unique_keys && !thd_test_options(thd, OPTION_RELAXED_UNIQUE_CHECKS)) { + if (share->has_unique_keys && do_unique_checks(thd, in_rpl_update_rows)) { for (uint keynr = 0; keynr < table_share->keys; keynr++) { bool is_unique_key = (table->key_info[keynr].flags & HA_NOSAME) || (keynr == primary_key); if (keynr == primary_key && !share->pk_has_string) { @@ -4241,6 +4161,10 @@ int ha_tokudb::update_row(const uchar * old_row, uchar * new_row) { set_main_dict_put_flags(thd, false, &mult_put_flags[primary_key]); + // for test, make unique checks have a very long duration + if ((mult_put_flags[primary_key] & DB_OPFLAGS_MASK) == DB_NOOVERWRITE) + maybe_do_unique_checks_delay(thd); + error = db_env->update_multiple( db_env, share->key_file[primary_key], @@ -4303,7 +4227,7 @@ int ha_tokudb::delete_row(const uchar * record) { bool has_null; THD* thd = ha_thd(); uint curr_num_DBs; - tokudb_trx_data* trx = (tokudb_trx_data *) thd_data_get(thd, tokudb_hton->slot);; + tokudb_trx_data* trx = (tokudb_trx_data *) thd_get_ha_data(thd, tokudb_hton);; ha_statistic_increment(&SSV::ha_delete_count); @@ -4459,6 +4383,20 @@ static bool index_key_is_null(TABLE *table, uint keynr, const uchar *key, uint k return key_can_be_null && key_len > 0 && key[0] != 0; } +// Return true if bulk fetch can be used +static bool tokudb_do_bulk_fetch(THD *thd) { + switch (thd_sql_command(thd)) { + case SQLCOM_SELECT: + case SQLCOM_CREATE_TABLE: + case SQLCOM_INSERT_SELECT: + case SQLCOM_REPLACE_SELECT: + case SQLCOM_DELETE: + return THDVAR(thd, bulk_fetch) != 0; + default: + return false; + } +} + // // Notification that a range query getting all elements that equal a key // to take place. Will pre acquire read lock @@ -4467,7 +4405,7 @@ static bool index_key_is_null(TABLE *table, uint keynr, const uchar *key, uint k // error otherwise // int ha_tokudb::prepare_index_key_scan(const uchar * key, uint key_len) { - TOKUDB_HANDLER_DBUG_ENTER(""); + TOKUDB_HANDLER_DBUG_ENTER("%p %u", key, key_len); int error = 0; DBT start_key, end_key; THD* thd = ha_thd(); @@ -4491,7 +4429,7 @@ int ha_tokudb::prepare_index_key_scan(const uchar * key, uint key_len) { range_lock_grabbed = true; range_lock_grabbed_null = index_key_is_null(table, tokudb_active_index, key, key_len); - doing_bulk_fetch = (thd_sql_command(thd) == SQLCOM_SELECT); + doing_bulk_fetch = tokudb_do_bulk_fetch(thd); bulk_fetch_iteration = 0; rows_fetched_using_bulk_fetch = 0; error = 0; @@ -4603,6 +4541,7 @@ int ha_tokudb::index_init(uint keynr, bool sorted) { } invalidate_bulk_fetch(); doing_bulk_fetch = false; + maybe_index_scan = false; error = 0; exit: TOKUDB_HANDLER_DBUG_RETURN(error); @@ -4870,7 +4809,7 @@ int ha_tokudb::index_read(uchar * buf, const uchar * key, uint key_len, enum ha_ int error = 0; uint32_t flags = 0; THD* thd = ha_thd(); - tokudb_trx_data* trx = (tokudb_trx_data *) thd_data_get(thd, tokudb_hton->slot);; + tokudb_trx_data* trx = (tokudb_trx_data *) thd_get_ha_data(thd, tokudb_hton);; struct smart_dbt_info info; struct index_read_info ir_info; @@ -5345,86 +5284,91 @@ cleanup: } int ha_tokudb::get_next(uchar* buf, int direction, DBT* key_to_compare, bool do_key_read) { - int error = 0; - uint32_t flags = SET_PRELOCK_FLAG(0); - THD* thd = ha_thd(); - tokudb_trx_data* trx = (tokudb_trx_data *) thd_data_get(thd, tokudb_hton->slot);; - bool need_val; + int error = 0; HANDLE_INVALID_CURSOR(); - // we need to read the val of what we retrieve if - // we do NOT have a covering index AND we are using a clustering secondary - // key - need_val = (do_key_read == 0) && - (tokudb_active_index == primary_key || - key_is_clustering(&table->key_info[tokudb_active_index]) - ); - - if ((bytes_used_in_range_query_buff - curr_range_query_buff_offset) > 0) { - error = read_data_from_range_query_buff(buf, need_val, do_key_read); - } - else if (icp_went_out_of_range) { - icp_went_out_of_range = false; - error = HA_ERR_END_OF_FILE; + if (maybe_index_scan) { + maybe_index_scan = false; + if (!range_lock_grabbed) { + error = prepare_index_scan(); + } } - else { - invalidate_bulk_fetch(); - if (doing_bulk_fetch) { - struct smart_dbt_bf_info bf_info; - bf_info.ha = this; - // you need the val if you have a clustering index and key_read is not 0; - bf_info.direction = direction; - bf_info.thd = ha_thd(); - bf_info.need_val = need_val; - bf_info.buf = buf; - bf_info.key_to_compare = key_to_compare; - // - // call c_getf_next with purpose of filling in range_query_buff - // - rows_fetched_using_bulk_fetch = 0; - // it is expected that we can do ICP in the smart_dbt_bf_callback - // as a result, it's possible we don't return any data because - // none of the rows matched the index condition. Therefore, we need - // this while loop. icp_out_of_range will be set if we hit a row that - // the index condition states is out of our range. When that hits, - // we know all the data in the buffer is the last data we will retrieve - while (bytes_used_in_range_query_buff == 0 && !icp_went_out_of_range && error == 0) { - if (direction > 0) { - error = cursor->c_getf_next(cursor, flags, smart_dbt_bf_callback, &bf_info); - } else { - error = cursor->c_getf_prev(cursor, flags, smart_dbt_bf_callback, &bf_info); - } - } - // if there is no data set and we went out of range, - // then there is nothing to return - if (bytes_used_in_range_query_buff == 0 && icp_went_out_of_range) { - icp_went_out_of_range = false; - error = HA_ERR_END_OF_FILE; - } - if (bulk_fetch_iteration < HA_TOKU_BULK_FETCH_ITERATION_MAX) { - bulk_fetch_iteration++; - } + + if (!error) { + uint32_t flags = SET_PRELOCK_FLAG(0); - error = handle_cursor_error(error, HA_ERR_END_OF_FILE,tokudb_active_index); - if (error) { goto cleanup; } - - // - // now that range_query_buff is filled, read an element - // + // we need to read the val of what we retrieve if + // we do NOT have a covering index AND we are using a clustering secondary + // key + bool need_val = (do_key_read == 0) && + (tokudb_active_index == primary_key || key_is_clustering(&table->key_info[tokudb_active_index])); + + if ((bytes_used_in_range_query_buff - curr_range_query_buff_offset) > 0) { error = read_data_from_range_query_buff(buf, need_val, do_key_read); } + else if (icp_went_out_of_range) { + icp_went_out_of_range = false; + error = HA_ERR_END_OF_FILE; + } else { - struct smart_dbt_info info; - info.ha = this; - info.buf = buf; - info.keynr = tokudb_active_index; + invalidate_bulk_fetch(); + if (doing_bulk_fetch) { + struct smart_dbt_bf_info bf_info; + bf_info.ha = this; + // you need the val if you have a clustering index and key_read is not 0; + bf_info.direction = direction; + bf_info.thd = ha_thd(); + bf_info.need_val = need_val; + bf_info.buf = buf; + bf_info.key_to_compare = key_to_compare; + // + // call c_getf_next with purpose of filling in range_query_buff + // + rows_fetched_using_bulk_fetch = 0; + // it is expected that we can do ICP in the smart_dbt_bf_callback + // as a result, it's possible we don't return any data because + // none of the rows matched the index condition. Therefore, we need + // this while loop. icp_out_of_range will be set if we hit a row that + // the index condition states is out of our range. When that hits, + // we know all the data in the buffer is the last data we will retrieve + while (bytes_used_in_range_query_buff == 0 && !icp_went_out_of_range && error == 0) { + if (direction > 0) { + error = cursor->c_getf_next(cursor, flags, smart_dbt_bf_callback, &bf_info); + } else { + error = cursor->c_getf_prev(cursor, flags, smart_dbt_bf_callback, &bf_info); + } + } + // if there is no data set and we went out of range, + // then there is nothing to return + if (bytes_used_in_range_query_buff == 0 && icp_went_out_of_range) { + icp_went_out_of_range = false; + error = HA_ERR_END_OF_FILE; + } + if (bulk_fetch_iteration < HA_TOKU_BULK_FETCH_ITERATION_MAX) { + bulk_fetch_iteration++; + } - if (direction > 0) { - error = cursor->c_getf_next(cursor, flags, SMART_DBT_CALLBACK(do_key_read), &info); - } else { - error = cursor->c_getf_prev(cursor, flags, SMART_DBT_CALLBACK(do_key_read), &info); + error = handle_cursor_error(error, HA_ERR_END_OF_FILE,tokudb_active_index); + if (error) { goto cleanup; } + + // + // now that range_query_buff is filled, read an element + // + error = read_data_from_range_query_buff(buf, need_val, do_key_read); + } + else { + struct smart_dbt_info info; + info.ha = this; + info.buf = buf; + info.keynr = tokudb_active_index; + + if (direction > 0) { + error = cursor->c_getf_next(cursor, flags, SMART_DBT_CALLBACK(do_key_read), &info); + } else { + error = cursor->c_getf_prev(cursor, flags, SMART_DBT_CALLBACK(do_key_read), &info); + } + error = handle_cursor_error(error, HA_ERR_END_OF_FILE, tokudb_active_index); } - error = handle_cursor_error(error, HA_ERR_END_OF_FILE, tokudb_active_index); } } @@ -5436,12 +5380,15 @@ int ha_tokudb::get_next(uchar* buf, int direction, DBT* key_to_compare, bool do_ // read the full row by doing a point query into the // main table. // - if (!error && !do_key_read && (tokudb_active_index != primary_key) && !key_is_clustering(&table->key_info[tokudb_active_index])) { error = read_full_row(buf); } - trx->stmt_progress.queried++; - track_progress(thd); + + if (!error) { + tokudb_trx_data* trx = (tokudb_trx_data *) thd_get_ha_data(ha_thd(), tokudb_hton); + trx->stmt_progress.queried++; + track_progress(ha_thd()); + } cleanup: return error; } @@ -5501,7 +5448,7 @@ int ha_tokudb::index_first(uchar * buf) { struct smart_dbt_info info; uint32_t flags = SET_PRELOCK_FLAG(0); THD* thd = ha_thd(); - tokudb_trx_data* trx = (tokudb_trx_data *) thd_data_get(thd, tokudb_hton->slot);; + tokudb_trx_data* trx = (tokudb_trx_data *) thd_get_ha_data(thd, tokudb_hton);; HANDLE_INVALID_CURSOR(); ha_statistic_increment(&SSV::ha_read_first_count); @@ -5510,8 +5457,7 @@ int ha_tokudb::index_first(uchar * buf) { info.buf = buf; info.keynr = tokudb_active_index; - error = cursor->c_getf_first(cursor, flags, - SMART_DBT_CALLBACK(key_read), &info); + error = cursor->c_getf_first(cursor, flags, SMART_DBT_CALLBACK(key_read), &info); error = handle_cursor_error(error,HA_ERR_END_OF_FILE,tokudb_active_index); // @@ -5521,9 +5467,11 @@ int ha_tokudb::index_first(uchar * buf) { if (!error && !key_read && (tokudb_active_index != primary_key) && !key_is_clustering(&table->key_info[tokudb_active_index])) { error = read_full_row(buf); } - trx->stmt_progress.queried++; + if (trx) { + trx->stmt_progress.queried++; + } track_progress(thd); - + maybe_index_scan = true; cleanup: TOKUDB_HANDLER_DBUG_RETURN(error); } @@ -5544,7 +5492,7 @@ int ha_tokudb::index_last(uchar * buf) { struct smart_dbt_info info; uint32_t flags = SET_PRELOCK_FLAG(0); THD* thd = ha_thd(); - tokudb_trx_data* trx = (tokudb_trx_data *) thd_data_get(thd, tokudb_hton->slot);; + tokudb_trx_data* trx = (tokudb_trx_data *) thd_get_ha_data(thd, tokudb_hton);; HANDLE_INVALID_CURSOR(); ha_statistic_increment(&SSV::ha_read_last_count); @@ -5553,8 +5501,7 @@ int ha_tokudb::index_last(uchar * buf) { info.buf = buf; info.keynr = tokudb_active_index; - error = cursor->c_getf_last(cursor, flags, - SMART_DBT_CALLBACK(key_read), &info); + error = cursor->c_getf_last(cursor, flags, SMART_DBT_CALLBACK(key_read), &info); error = handle_cursor_error(error,HA_ERR_END_OF_FILE,tokudb_active_index); // // still need to get entire contents of the row if operation done on @@ -5568,6 +5515,7 @@ int ha_tokudb::index_last(uchar * buf) { trx->stmt_progress.queried++; } track_progress(thd); + maybe_index_scan = true; cleanup: TOKUDB_HANDLER_DBUG_RETURN(error); } @@ -5635,7 +5583,7 @@ int ha_tokudb::rnd_next(uchar * buf) { void ha_tokudb::track_progress(THD* thd) { - tokudb_trx_data* trx = (tokudb_trx_data *) thd_data_get(thd, tokudb_hton->slot); + tokudb_trx_data* trx = (tokudb_trx_data *) thd_get_ha_data(thd, tokudb_hton); if (trx) { ulonglong num_written = trx->stmt_progress.inserted + trx->stmt_progress.updated + trx->stmt_progress.deleted; bool update_status = @@ -5691,13 +5639,11 @@ DBT *ha_tokudb::get_pos(DBT * to, uchar * pos) { DBUG_RETURN(to); } -// // Retrieves a row with based on the primary key saved in pos // Returns: // 0 on success // HA_ERR_KEY_NOT_FOUND if not found // error otherwise -// int ha_tokudb::rnd_pos(uchar * buf, uchar * pos) { TOKUDB_HANDLER_DBUG_ENTER(""); DBT db_pos; @@ -5710,12 +5656,20 @@ int ha_tokudb::rnd_pos(uchar * buf, uchar * pos) { ha_statistic_increment(&SSV::ha_read_rnd_count); tokudb_active_index = MAX_KEY; + // test rpl slave by inducing a delay before the point query + THD *thd = ha_thd(); + if (thd->slave_thread && (in_rpl_delete_rows || in_rpl_update_rows)) { + uint64_t delay_ms = THDVAR(thd, rpl_lookup_rows_delay); + if (delay_ms) + usleep(delay_ms * 1000); + } + info.ha = this; info.buf = buf; info.keynr = primary_key; error = share->file->getf_set(share->file, transaction, - get_cursor_isolation_flags(lock.type, ha_thd()), + get_cursor_isolation_flags(lock.type, thd), key, smart_dbt_callback_rowread_ptquery, &info); if (error == DB_NOTFOUND) { @@ -5727,8 +5681,8 @@ cleanup: TOKUDB_HANDLER_DBUG_RETURN(error); } -int ha_tokudb::prelock_range( const key_range *start_key, const key_range *end_key) { - TOKUDB_HANDLER_DBUG_ENTER(""); +int ha_tokudb::prelock_range(const key_range *start_key, const key_range *end_key) { + TOKUDB_HANDLER_DBUG_ENTER("%p %p", start_key, end_key); THD* thd = ha_thd(); int error = 0; @@ -5793,11 +5747,8 @@ int ha_tokudb::prelock_range( const key_range *start_key, const key_range *end_k goto cleanup; } - // // at this point, determine if we will be doing bulk fetch - // as of now, only do it if we are doing a select - // - doing_bulk_fetch = (thd_sql_command(thd) == SQLCOM_SELECT); + doing_bulk_fetch = tokudb_do_bulk_fetch(thd); bulk_fetch_iteration = 0; rows_fetched_using_bulk_fetch = 0; @@ -5812,7 +5763,7 @@ cleanup: // Forward scans use read_range_first()/read_range_next(). // int ha_tokudb::prepare_range_scan( const key_range *start_key, const key_range *end_key) { - TOKUDB_HANDLER_DBUG_ENTER(""); + TOKUDB_HANDLER_DBUG_ENTER("%p %p", start_key, end_key); int error = prelock_range(start_key, end_key); if (!error) { range_lock_grabbed = true; @@ -5826,7 +5777,7 @@ int ha_tokudb::read_range_first( bool eq_range, bool sorted) { - TOKUDB_HANDLER_DBUG_ENTER(""); + TOKUDB_HANDLER_DBUG_ENTER("%p %p %u %u", start_key, end_key, eq_range, sorted); int error = prelock_range(start_key, end_key); if (error) { goto cleanup; } range_lock_grabbed = true; @@ -6225,12 +6176,11 @@ int ha_tokudb::external_lock(THD * thd, int lock_type) { } int error = 0; - tokudb_trx_data *trx = NULL; - trx = (tokudb_trx_data *) thd_data_get(thd, tokudb_hton->slot); + tokudb_trx_data *trx = (tokudb_trx_data *) thd_get_ha_data(thd, tokudb_hton); if (!trx) { error = create_tokudb_trx_data_instance(&trx); if (error) { goto cleanup; } - thd_data_set(thd, tokudb_hton->slot, trx); + thd_set_ha_data(thd, tokudb_hton, trx); } if (trx->all == NULL) { trx->sp_level = NULL; @@ -6304,7 +6254,7 @@ int ha_tokudb::start_stmt(THD * thd, thr_lock_type lock_type) { TOKUDB_HANDLER_TRACE("q %s", thd->query()); int error = 0; - tokudb_trx_data *trx = (tokudb_trx_data *) thd_data_get(thd, tokudb_hton->slot); + tokudb_trx_data *trx = (tokudb_trx_data *) thd_get_ha_data(thd, tokudb_hton); DBUG_ASSERT(trx); /* @@ -6404,7 +6354,7 @@ uint32_t ha_tokudb::get_cursor_isolation_flags(enum thr_lock_type lock_type, THD lock (if we don't want to use MySQL table locks at all) or add locks for many tables (like we do when we are using a MERGE handler). - Tokudb DB changes all WRITE locks to TL_WRITE_ALLOW_WRITE (which + TokuDB changes all WRITE locks to TL_WRITE_ALLOW_WRITE (which signals that we are doing WRITES, but we are still allowing other reader's and writer's. @@ -6426,34 +6376,25 @@ THR_LOCK_DATA **ha_tokudb::store_lock(THD * thd, THR_LOCK_DATA ** to, enum thr_l } if (lock_type != TL_IGNORE && lock.type == TL_UNLOCK) { - // if creating a hot index - if (thd_sql_command(thd)== SQLCOM_CREATE_INDEX && get_create_index_online(thd)) { - rw_rdlock(&share->num_DBs_lock); - if (share->num_DBs == (table->s->keys + tokudb_test(hidden_primary_key))) { - lock_type = TL_WRITE_ALLOW_WRITE; - } - lock.type = lock_type; - rw_unlock(&share->num_DBs_lock); - } - - // 5.5 supports reads concurrent with alter table. just use the default lock type. -#if MYSQL_VERSION_ID < 50500 - else if (thd_sql_command(thd)== SQLCOM_CREATE_INDEX || - thd_sql_command(thd)== SQLCOM_ALTER_TABLE || - thd_sql_command(thd)== SQLCOM_DROP_INDEX) { - // force alter table to lock out other readers - lock_type = TL_WRITE; - lock.type = lock_type; - } -#endif - else { - // If we are not doing a LOCK TABLE, then allow multiple writers - if ((lock_type >= TL_WRITE_CONCURRENT_INSERT && lock_type <= TL_WRITE) && - !thd->in_lock_tables && thd_sql_command(thd) != SQLCOM_TRUNCATE && !thd_tablespace_op(thd)) { + enum_sql_command sql_command = (enum_sql_command) thd_sql_command(thd); + if (!thd->in_lock_tables) { + if (sql_command == SQLCOM_CREATE_INDEX && get_create_index_online(thd)) { + // hot indexing + rw_rdlock(&share->num_DBs_lock); + if (share->num_DBs == (table->s->keys + tokudb_test(hidden_primary_key))) { + lock_type = TL_WRITE_ALLOW_WRITE; + } + rw_unlock(&share->num_DBs_lock); + } else if ((lock_type >= TL_WRITE_CONCURRENT_INSERT && lock_type <= TL_WRITE) && + sql_command != SQLCOM_TRUNCATE && !thd_tablespace_op(thd)) { + // allow concurrent writes lock_type = TL_WRITE_ALLOW_WRITE; + } else if (sql_command == SQLCOM_OPTIMIZE && lock_type == TL_READ_NO_INSERT) { + // hot optimize table + lock_type = TL_READ; } - lock.type = lock_type; } + lock.type = lock_type; } *to++ = &lock; if (tokudb_debug & TOKUDB_DEBUG_LOCK) @@ -6909,7 +6850,7 @@ int ha_tokudb::create(const char *name, TABLE * form, HA_CREATE_INFO * create_in newname = (char *)tokudb_my_malloc(get_max_dict_name_path_length(name),MYF(MY_WME)); if (newname == NULL){ error = ENOMEM; goto cleanup;} - trx = (tokudb_trx_data *) thd_data_get(ha_thd(), tokudb_hton->slot); + trx = (tokudb_trx_data *) thd_get_ha_data(ha_thd(), tokudb_hton); if (trx && trx->sub_sp_level && thd_sql_command(thd) == SQLCOM_CREATE_TABLE) { txn = trx->sub_sp_level; } @@ -6946,7 +6887,7 @@ int ha_tokudb::create(const char *name, TABLE * form, HA_CREATE_INFO * create_in if (error) { goto cleanup; } #if WITH_PARTITION_STORAGE_ENGINE - if (TOKU_PARTITION_WRITE_FRM_DATA || IF_PARTITIONING(form->part_info, NULL) == NULL) { + if (TOKU_PARTITION_WRITE_FRM_DATA || form->part_info == NULL) { error = write_frm_data(status_block, txn, form->s->path.str); if (error) { goto cleanup; } } @@ -7099,7 +7040,7 @@ int ha_tokudb::delete_or_rename_table (const char* from_name, const char* to_nam DB_TXN *parent_txn = NULL; tokudb_trx_data *trx = NULL; - trx = (tokudb_trx_data *) thd_data_get(thd, tokudb_hton->slot); + trx = (tokudb_trx_data *) thd_get_ha_data(thd, tokudb_hton); if (thd_sql_command(ha_thd()) == SQLCOM_CREATE_TABLE && trx && trx->sub_sp_level) { parent_txn = trx->sub_sp_level; } @@ -7540,7 +7481,7 @@ int ha_tokudb::tokudb_add_index( DBC* tmp_cursor = NULL; int cursor_ret_val = 0; DBT curr_pk_key, curr_pk_val; - THD* thd = ha_thd(); + THD* thd = ha_thd(); DB_LOADER* loader = NULL; DB_INDEXER* indexer = NULL; bool loader_save_space = get_load_save_space(thd); @@ -7578,7 +7519,7 @@ int ha_tokudb::tokudb_add_index( // // status message to be shown in "show process list" // - const char *old_proc_info = tokudb_thd_get_proc_info(thd); + const char *orig_proc_info = tokudb_thd_get_proc_info(thd); char status_msg[MAX_ALIAS_NAME + 200]; //buffer of 200 should be a good upper bound. ulonglong num_processed = 0; //variable that stores number of elements inserted thus far thd_proc_info(thd, "Adding indexes"); @@ -7804,14 +7745,15 @@ int ha_tokudb::tokudb_add_index( num_processed++; if ((num_processed % 1000) == 0) { - sprintf(status_msg, "Adding indexes: Fetched %llu of about %llu rows, loading of data still remains.", num_processed, (long long unsigned) share->rows); + sprintf(status_msg, "Adding indexes: Fetched %llu of about %llu rows, loading of data still remains.", + num_processed, (long long unsigned) share->rows); thd_proc_info(thd, status_msg); #ifdef HA_TOKUDB_HAS_THD_PROGRESS thd_progress_report(thd, num_processed, (long long unsigned) share->rows); #endif - if (thd->killed) { + if (thd_killed(thd)) { error = ER_ABORTING_CONNECTION; goto cleanup; } @@ -7836,12 +7778,8 @@ int ha_tokudb::tokudb_add_index( for (uint i = 0; i < num_of_keys; i++, curr_index++) { if (key_info[i].flags & HA_NOSAME) { bool is_unique; - error = is_index_unique( - &is_unique, - txn, - share->key_file[curr_index], - &key_info[i] - ); + error = is_index_unique(&is_unique, txn, share->key_file[curr_index], &key_info[i], + creating_hot_index ? 0 : DB_PRELOCKED_WRITE); if (error) goto cleanup; if (!is_unique) { error = HA_ERR_FOUND_DUPP_KEY; @@ -7899,7 +7837,7 @@ cleanup: another transaction has accessed the table. \ To add indexes, make sure no transactions touch the table.", share->table_name); } - thd_proc_info(thd, old_proc_info); + thd_proc_info(thd, orig_proc_info); TOKUDB_HANDLER_DBUG_RETURN(error ? error : loader_error); } @@ -8251,15 +8189,46 @@ void ha_tokudb::cleanup_txn(DB_TXN *txn) { } void ha_tokudb::add_to_trx_handler_list() { - tokudb_trx_data *trx = (tokudb_trx_data *) thd_data_get(ha_thd(), tokudb_hton->slot); + tokudb_trx_data *trx = (tokudb_trx_data *) thd_get_ha_data(ha_thd(), tokudb_hton); trx->handlers = list_add(trx->handlers, &trx_handler_list); } void ha_tokudb::remove_from_trx_handler_list() { - tokudb_trx_data *trx = (tokudb_trx_data *) thd_data_get(ha_thd(), tokudb_hton->slot); + tokudb_trx_data *trx = (tokudb_trx_data *) thd_get_ha_data(ha_thd(), tokudb_hton); trx->handlers = list_delete(trx->handlers, &trx_handler_list); } +void ha_tokudb::rpl_before_write_rows() { + in_rpl_write_rows = true; +} + +void ha_tokudb::rpl_after_write_rows() { + in_rpl_write_rows = false; +} + +void ha_tokudb::rpl_before_delete_rows() { + in_rpl_delete_rows = true; +} + +void ha_tokudb::rpl_after_delete_rows() { + in_rpl_delete_rows = false; +} + +void ha_tokudb::rpl_before_update_rows() { + in_rpl_update_rows = true; +} + +void ha_tokudb::rpl_after_update_rows() { + in_rpl_update_rows = false; +} + +bool ha_tokudb::rpl_lookup_rows() { + if (!in_rpl_delete_rows && !in_rpl_update_rows) + return true; + else + return THDVAR(ha_thd(), rpl_lookup_rows); +} + // table admin #include "ha_tokudb_admin.cc" diff --git a/storage/tokudb/ha_tokudb.h b/storage/tokudb/ha_tokudb.h index 220ea82f44b..061b28823d4 100644 --- a/storage/tokudb/ha_tokudb.h +++ b/storage/tokudb/ha_tokudb.h @@ -109,15 +109,6 @@ typedef struct loader_context { ha_tokudb* ha; } *LOADER_CONTEXT; -typedef struct hot_optimize_context { - THD *thd; - char* write_status_msg; - ha_tokudb *ha; - uint progress_stage; - uint current_table; - uint num_tables; -} *HOT_OPTIMIZE_CONTEXT; - // // This object stores table information that is to be shared // among all ha_tokudb objects. @@ -260,6 +251,7 @@ private: uint64_t bulk_fetch_iteration; uint64_t rows_fetched_using_bulk_fetch; bool doing_bulk_fetch; + bool maybe_index_scan; // // buffer used to temporarily store a "packed key" @@ -475,7 +467,7 @@ private: ); int create_main_dictionary(const char* name, TABLE* form, DB_TXN* txn, KEY_AND_COL_INFO* kc_info, toku_compression_method compression_method); void trace_create_table_info(const char *name, TABLE * form); - int is_index_unique(bool* is_unique, DB_TXN* txn, DB* db, KEY* key_info); + int is_index_unique(bool* is_unique, DB_TXN* txn, DB* db, KEY* key_info, int lock_flags); int is_val_unique(bool* is_unique, uchar* record, KEY* key_info, uint dict_index, DB_TXN* txn); int do_uniqueness_checks(uchar* record, DB_TXN* txn, THD* thd); void set_main_dict_put_flags(THD* thd, bool opt_eligible, uint32_t* put_flags); @@ -804,7 +796,21 @@ private: void remove_from_trx_handler_list(); private: + int do_optimize(THD *thd); int map_to_handler_error(int error); + +public: + void rpl_before_write_rows(); + void rpl_after_write_rows(); + void rpl_before_delete_rows(); + void rpl_after_delete_rows(); + void rpl_before_update_rows(); + void rpl_after_update_rows(); + bool rpl_lookup_rows(); +private: + bool in_rpl_write_rows; + bool in_rpl_delete_rows; + bool in_rpl_update_rows; }; #if TOKU_INCLUDE_OPTION_STRUCTS diff --git a/storage/tokudb/ha_tokudb_admin.cc b/storage/tokudb/ha_tokudb_admin.cc index 8d202eeda41..100c88a76a8 100644 --- a/storage/tokudb/ha_tokudb_admin.cc +++ b/storage/tokudb/ha_tokudb_admin.cc @@ -128,8 +128,15 @@ static int analyze_progress(void *v_extra, uint64_t rows) { int ha_tokudb::analyze(THD *thd, HA_CHECK_OPT *check_opt) { TOKUDB_HANDLER_DBUG_ENTER("%s", share->table_name); + const char *orig_proc_info = tokudb_thd_get_proc_info(thd); uint64_t rec_per_key[table_share->key_parts]; int result = HA_ADMIN_OK; + + // stub out analyze if optimize is remapped to alter recreate + analyze + if (thd_sql_command(thd) != SQLCOM_ANALYZE) { + TOKUDB_HANDLER_DBUG_RETURN(result); + } + DB_TXN *txn = transaction; if (!txn) { result = HA_ADMIN_FAILED; @@ -168,9 +175,19 @@ int ha_tokudb::analyze(THD *thd, HA_CHECK_OPT *check_opt) { if (error) result = HA_ADMIN_FAILED; } + thd_proc_info(thd, orig_proc_info); TOKUDB_HANDLER_DBUG_RETURN(result); } +typedef struct hot_optimize_context { + THD *thd; + char* write_status_msg; + ha_tokudb *ha; + uint progress_stage; + uint current_table; + uint num_tables; +} *HOT_OPTIMIZE_CONTEXT; + static int hot_poll_fun(void *extra, float progress) { HOT_OPTIMIZE_CONTEXT context = (HOT_OPTIMIZE_CONTEXT)extra; if (context->thd->killed) { @@ -194,9 +211,9 @@ static int hot_poll_fun(void *extra, float progress) { } // flatten all DB's in this table, to do so, peform hot optimize on each db -int ha_tokudb::optimize(THD * thd, HA_CHECK_OPT * check_opt) { +int ha_tokudb::do_optimize(THD *thd) { TOKUDB_HANDLER_DBUG_ENTER("%s", share->table_name); - + const char *orig_proc_info = tokudb_thd_get_proc_info(thd); int error; uint curr_num_DBs = table->s->keys + tokudb_test(hidden_primary_key); @@ -206,9 +223,7 @@ int ha_tokudb::optimize(THD * thd, HA_CHECK_OPT * check_opt) { thd_progress_init(thd, curr_num_DBs); #endif - // // for each DB, run optimize and hot_optimize - // for (uint i = 0; i < curr_num_DBs; i++) { DB* db = share->key_file[i]; error = db->optimize(db); @@ -228,14 +243,24 @@ int ha_tokudb::optimize(THD * thd, HA_CHECK_OPT * check_opt) { goto cleanup; } } - error = 0; -cleanup: +cleanup: #ifdef HA_TOKUDB_HAS_THD_PROGRESS thd_progress_end(thd); #endif + thd_proc_info(thd, orig_proc_info); + TOKUDB_HANDLER_DBUG_RETURN(error); +} +int ha_tokudb::optimize(THD *thd, HA_CHECK_OPT *check_opt) { + TOKUDB_HANDLER_DBUG_ENTER("%s", share->table_name); + int error; +#if TOKU_OPTIMIZE_WITH_RECREATE + error = HA_ADMIN_TRY_ALTER; +#else + error = do_optimize(thd); +#endif TOKUDB_HANDLER_DBUG_RETURN(error); } @@ -266,10 +291,7 @@ static void ha_tokudb_check_info(THD *thd, TABLE *table, const char *msg) { int ha_tokudb::check(THD *thd, HA_CHECK_OPT *check_opt) { TOKUDB_HANDLER_DBUG_ENTER("%s", share->table_name); - - const char *old_proc_info = tokudb_thd_get_proc_info(thd); - thd_proc_info(thd, "tokudb::check"); - + const char *orig_proc_info = tokudb_thd_get_proc_info(thd); int result = HA_ADMIN_OK; int r; @@ -321,6 +343,6 @@ int ha_tokudb::check(THD *thd, HA_CHECK_OPT *check_opt) { } } } - thd_proc_info(thd, old_proc_info); + thd_proc_info(thd, orig_proc_info); TOKUDB_HANDLER_DBUG_RETURN(result); } diff --git a/storage/tokudb/ha_tokudb_alter_56.cc b/storage/tokudb/ha_tokudb_alter_56.cc index 8d8315c271c..2d5345e9584 100644 --- a/storage/tokudb/ha_tokudb_alter_56.cc +++ b/storage/tokudb/ha_tokudb_alter_56.cc @@ -122,6 +122,7 @@ public: expand_varchar_update_needed(false), expand_fixed_update_needed(false), expand_blob_update_needed(false), + optimize_needed(false), table_kc_info(NULL), altered_table_kc_info(NULL) { } @@ -141,6 +142,7 @@ public: bool expand_varchar_update_needed; bool expand_fixed_update_needed; bool expand_blob_update_needed; + bool optimize_needed; Dynamic_array<uint> changed_fields; KEY_AND_COL_INFO *table_kc_info; KEY_AND_COL_INFO *altered_table_kc_info; @@ -219,8 +221,10 @@ static bool change_type_is_supported(TABLE *table, TABLE *altered_table, Alter_i static ulong fix_handler_flags(THD *thd, TABLE *table, TABLE *altered_table, Alter_inplace_info *ha_alter_info) { ulong handler_flags = ha_alter_info->handler_flags; +#if 100000 <= MYSQL_VERSION_ID && MYSQL_VERSION_ID <= 100099 // This is automatically supported, hide the flag from later checks handler_flags &= ~Alter_inplace_info::ALTER_PARTITIONED; +#endif // workaround for fill_alter_inplace_info bug (#5193) // the function erroneously sets the ADD_INDEX and DROP_INDEX flags for a column addition that does not @@ -437,7 +441,13 @@ enum_alter_inplace_result ha_tokudb::check_if_supported_inplace_alter(TABLE *alt result = HA_ALTER_INPLACE_EXCLUSIVE_LOCK; } } + } +#if TOKU_OPTIMIZE_WITH_RECREATE + else if (only_flags(ctx->handler_flags, Alter_inplace_info::RECREATE_TABLE + Alter_inplace_info::ALTER_COLUMN_DEFAULT)) { + ctx->optimize_needed = true; + result = HA_ALTER_INPLACE_NO_LOCK_AFTER_PREPARE; } +#endif if (result != HA_ALTER_INPLACE_NOT_SUPPORTED && table->s->null_bytes != altered_table->s->null_bytes && (tokudb_debug & TOKUDB_DEBUG_ALTER_TABLE)) { @@ -520,6 +530,9 @@ bool ha_tokudb::inplace_alter_table(TABLE *altered_table, Alter_inplace_info *ha if (error == 0 && ctx->reset_card) { error = tokudb::set_card_from_status(share->status_block, ctx->alter_txn, table->s, altered_table->s); } + if (error == 0 && ctx->optimize_needed) { + error = do_optimize(ha_thd()); + } #if (50600 <= MYSQL_VERSION_ID && MYSQL_VERSION_ID <= 50699) || \ (50700 <= MYSQL_VERSION_ID && MYSQL_VERSION_ID <= 50799) @@ -707,27 +720,6 @@ bool ha_tokudb::commit_inplace_alter_table(TABLE *altered_table, Alter_inplace_i tokudb_alter_ctx *ctx = static_cast<tokudb_alter_ctx *>(ha_alter_info->handler_ctx); bool result = false; // success THD *thd = ha_thd(); - MDL_ticket *ticket = table->mdl_ticket; - if (ticket->get_type() != MDL_EXCLUSIVE) { - // get exclusive lock no matter what -#if defined(MARIADB_BASE_VERSION) - killed_state saved_killed_state = thd->killed; - thd->killed = NOT_KILLED; - while (wait_while_table_is_used(thd, table, HA_EXTRA_NOT_USED) && thd->killed) - thd->killed = NOT_KILLED; - assert(ticket->get_type() == MDL_EXCLUSIVE); - if (thd->killed == NOT_KILLED) - thd->killed = saved_killed_state; -#else - THD::killed_state saved_killed_state = thd->killed; - thd->killed = THD::NOT_KILLED; - while (wait_while_table_is_used(thd, table, HA_EXTRA_NOT_USED) && thd->killed) - thd->killed = THD::NOT_KILLED; - assert(ticket->get_type() == MDL_EXCLUSIVE); - if (thd->killed == THD::NOT_KILLED) - thd->killed = saved_killed_state; -#endif - } if (commit) { #if (50613 <= MYSQL_VERSION_ID && MYSQL_VERSION_ID <= 50699) || \ @@ -755,8 +747,37 @@ bool ha_tokudb::commit_inplace_alter_table(TABLE *altered_table, Alter_inplace_i } if (!commit) { + if (table->mdl_ticket->get_type() != MDL_EXCLUSIVE && + (ctx->add_index_changed || ctx->drop_index_changed || ctx->compression_changed)) { + + // get exclusive lock no matter what +#if defined(MARIADB_BASE_VERSION) + killed_state saved_killed_state = thd->killed; + thd->killed = NOT_KILLED; + for (volatile uint i = 0; wait_while_table_is_used(thd, table, HA_EXTRA_NOT_USED); i++) { + if (thd->killed != NOT_KILLED) + thd->killed = NOT_KILLED; + sleep(1); + } + assert(table->mdl_ticket->get_type() == MDL_EXCLUSIVE); + if (thd->killed == NOT_KILLED) + thd->killed = saved_killed_state; +#else + THD::killed_state saved_killed_state = thd->killed; + thd->killed = THD::NOT_KILLED; + for (volatile uint i = 0; wait_while_table_is_used(thd, table, HA_EXTRA_NOT_USED); i++) { + if (thd->killed != THD::NOT_KILLED) + thd->killed = THD::NOT_KILLED; + sleep(1); + } + assert(table->mdl_ticket->get_type() == MDL_EXCLUSIVE); + if (thd->killed == THD::NOT_KILLED) + thd->killed = saved_killed_state; +#endif + } + // abort the alter transaction NOW so that any alters are rolled back. this allows the following restores to work. - tokudb_trx_data *trx = (tokudb_trx_data *) thd_data_get(thd, tokudb_hton->slot); + tokudb_trx_data *trx = (tokudb_trx_data *) thd_get_ha_data(thd, tokudb_hton); assert(ctx->alter_txn == trx->stmt); assert(trx->tokudb_lock_count > 0); // for partitioned tables, we use a single transaction to do all of the partition changes. the tokudb_lock_count diff --git a/storage/tokudb/ha_tokudb_alter_common.cc b/storage/tokudb/ha_tokudb_alter_common.cc index ecef0fb7415..414e8280daf 100644 --- a/storage/tokudb/ha_tokudb_alter_common.cc +++ b/storage/tokudb/ha_tokudb_alter_common.cc @@ -814,7 +814,7 @@ int ha_tokudb::write_frm_data(const uchar *frm_data, size_t frm_len) { if (TOKU_PARTITION_WRITE_FRM_DATA || table->part_info == NULL) { // write frmdata to status THD *thd = ha_thd(); - tokudb_trx_data *trx = (tokudb_trx_data *) thd_data_get(thd, tokudb_hton->slot); + tokudb_trx_data *trx = (tokudb_trx_data *) thd_get_ha_data(thd, tokudb_hton); assert(trx); DB_TXN *txn = trx->stmt; // use alter table transaction assert(txn); diff --git a/storage/tokudb/hatoku_defines.h b/storage/tokudb/hatoku_defines.h index 63c9f107e2e..ca25037cb0b 100644 --- a/storage/tokudb/hatoku_defines.h +++ b/storage/tokudb/hatoku_defines.h @@ -97,6 +97,11 @@ PATENT RIGHTS GRANT: #endif #if 100000 <= MYSQL_VERSION_ID && MYSQL_VERSION_ID <= 100199 + +#if !defined(TOKUDB_CHECK_JEMALLOC) +#define TOKUDB_CHECK_JEMALLOC 1 +#endif + // mariadb 10.0 #define TOKU_USE_DB_TYPE_TOKUDB 1 #define TOKU_INCLUDE_ALTER_56 1 @@ -108,6 +113,7 @@ PATENT RIGHTS GRANT: #define TOKU_INCLUDE_EXTENDED_KEYS 1 #endif #define TOKU_INCLUDE_OPTION_STRUCTS 1 +#define TOKU_OPTIMIZE_WITH_RECREATE 1 #elif 50700 <= MYSQL_VERSION_ID && MYSQL_VERSION_ID <= 50799 // mysql 5.7 with no patches @@ -130,17 +136,18 @@ PATENT RIGHTS GRANT: #define TOKU_PARTITION_WRITE_FRM_DATA 0 #else // mysql 5.6 with tokutek patches -#define TOKU_USE_DB_TYPE_TOKUDB 1 /* has DB_TYPE_TOKUDB patch */ +#define TOKU_USE_DB_TYPE_TOKUDB 1 // has DB_TYPE_TOKUDB patch #define TOKU_INCLUDE_ALTER_56 1 -#define TOKU_INCLUDE_ROW_TYPE_COMPRESSION 1 /* has tokudb row format compression patch */ -#define TOKU_INCLUDE_XA 1 /* has patch that fixes TC_LOG_MMAP code */ +#define TOKU_INCLUDE_ROW_TYPE_COMPRESSION 1 // has tokudb row format compression patch +#define TOKU_INCLUDE_XA 1 // has patch that fixes TC_LOG_MMAP code #define TOKU_PARTITION_WRITE_FRM_DATA 0 #define TOKU_INCLUDE_WRITE_FRM_DATA 0 -#define TOKU_INCLUDE_UPSERT 1 /* has tokudb upsert patch */ +#define TOKU_INCLUDE_UPSERT 1 // has tokudb upsert patch #if defined(HTON_SUPPORTS_EXTENDED_KEYS) #define TOKU_INCLUDE_EXTENDED_KEYS 1 #endif #endif +#define TOKU_OPTIMIZE_WITH_RECREATE 1 #elif 50500 <= MYSQL_VERSION_ID && MYSQL_VERSION_ID <= 50599 #define TOKU_USE_DB_TYPE_TOKUDB 1 diff --git a/storage/tokudb/hatoku_hton.cc b/storage/tokudb/hatoku_hton.cc index ea360812bb8..da4dc178d23 100644 --- a/storage/tokudb/hatoku_hton.cc +++ b/storage/tokudb/hatoku_hton.cc @@ -92,6 +92,7 @@ PATENT RIGHTS GRANT: #define MYSQL_SERVER 1 #include "hatoku_defines.h" #include <db.h> +#include <ctype.h> #include "stdint.h" #if defined(_WIN32) @@ -320,9 +321,25 @@ static void handle_ydb_error(int error) { sql_print_error(" "); sql_print_error("************************************************************"); break; + case TOKUDB_UPGRADE_FAILURE: + sql_print_error("%s upgrade failed. A clean shutdown of the previous version is required.", tokudb_hton_name); + break; + default: + sql_print_error("%s unknown error %d", tokudb_hton_name, error); + break; } } +static int tokudb_set_product_name(void) { + size_t n = strlen(tokudb_hton_name); + char tokudb_product_name[n+1]; + memset(tokudb_product_name, 0, sizeof tokudb_product_name); + for (size_t i = 0; i < n; i++) + tokudb_product_name[i] = tolower(tokudb_hton_name[i]); + int r = db_env_set_toku_product_name(tokudb_product_name); + return r; +} + static int tokudb_init_func(void *p) { TOKUDB_DBUG_ENTER("%p", p); int r; @@ -336,11 +353,17 @@ static int tokudb_init_func(void *p) { #if TOKUDB_CHECK_JEMALLOC if (tokudb_check_jemalloc && dlsym(RTLD_DEFAULT, "mallctl") == NULL) { - sql_print_error("%s not initialized because jemalloc is not loaded", tokudb_hton_name); + sql_print_error("%s is not initialized because jemalloc is not loaded", tokudb_hton_name); goto error; } #endif + r = tokudb_set_product_name(); + if (r) { + sql_print_error("%s can not set product name error %d", tokudb_hton_name, r); + goto error; + } + tokudb_pthread_mutex_init(&tokudb_mutex, MY_MUTEX_INIT_FAST); (void) my_hash_init(&tokudb_open_tables, table_alias_charset, 32, 0, 0, (my_hash_get_key) tokudb_get_key, 0, 0); @@ -522,6 +545,7 @@ static int tokudb_init_func(void *p) { if (r) { DBUG_PRINT("info", ("env->open %d", r)); + handle_ydb_error(r); goto error; } @@ -579,9 +603,6 @@ static int tokudb_done_func(void *p) { toku_global_status_rows = NULL; my_hash_free(&tokudb_open_tables); tokudb_pthread_mutex_destroy(&tokudb_mutex); -#if defined(_WIN64) - toku_ydb_destroy(); -#endif TOKUDB_DBUG_RETURN(0); } @@ -603,8 +624,35 @@ int tokudb_end(handlerton * hton, ha_panic_function type) { if (db_env) { if (tokudb_init_flags & DB_INIT_LOG) tokudb_cleanup_log_files(); - error = db_env->close(db_env, 0); // Error is logged - assert(error==0); +#if TOKU_INCLUDE_XA + long total_prepared = 0; // count the total number of prepared txn's that we discard + while (1) { + // get xid's + const long n_xid = 1; + TOKU_XA_XID xids[n_xid]; + long n_prepared = 0; + error = db_env->txn_xa_recover(db_env, xids, n_xid, &n_prepared, total_prepared == 0 ? DB_FIRST : DB_NEXT); + assert(error == 0); + if (n_prepared == 0) + break; + // discard xid's + for (long i = 0; i < n_xid; i++) { + DB_TXN *txn = NULL; + error = db_env->get_txn_from_xid(db_env, &xids[i], &txn); + assert(error == 0); + error = txn->discard(txn, 0); + assert(error == 0); + } + total_prepared += n_prepared; + } +#endif + error = db_env->close(db_env, total_prepared > 0 ? TOKUFT_DIRTY_SHUTDOWN : 0); +#if TOKU_INCLUDE_XA + if (error != 0 && total_prepared > 0) { + sql_print_error("%s: %ld prepared txns still live, please shutdown, error %d", tokudb_hton_name, total_prepared, error); + } else +#endif + assert(error == 0); db_env = NULL; } @@ -627,8 +675,7 @@ int tokudb_end(handlerton * hton, ha_panic_function type) { static int tokudb_close_connection(handlerton * hton, THD * thd) { int error = 0; - tokudb_trx_data* trx = NULL; - trx = (tokudb_trx_data *) thd_data_get(thd, tokudb_hton->slot); + tokudb_trx_data* trx = (tokudb_trx_data *) thd_get_ha_data(thd, tokudb_hton); if (trx && trx->checkpoint_lock_taken) { error = db_env->checkpointing_resume(db_env); } @@ -692,25 +739,27 @@ static void txn_progress_func(TOKU_TXN_PROGRESS progress, void* extra) { } static void commit_txn_with_progress(DB_TXN* txn, uint32_t flags, THD* thd) { - int r; + const char *orig_proc_info = tokudb_thd_get_proc_info(thd); struct txn_progress_info info; info.thd = thd; - r = txn->commit_with_progress(txn, flags, txn_progress_func, &info); + int r = txn->commit_with_progress(txn, flags, txn_progress_func, &info); if (r != 0) { - sql_print_error("tried committing transaction %p and got error code %d", txn, r); + sql_print_error("%s: tried committing transaction %p and got error code %d", tokudb_hton_name, txn, r); } assert(r == 0); + thd_proc_info(thd, orig_proc_info); } static void abort_txn_with_progress(DB_TXN* txn, THD* thd) { - int r; + const char *orig_proc_info = tokudb_thd_get_proc_info(thd); struct txn_progress_info info; info.thd = thd; - r = txn->abort_with_progress(txn, txn_progress_func, &info); + int r = txn->abort_with_progress(txn, txn_progress_func, &info); if (r != 0) { - sql_print_error("tried aborting transaction %p and got error code %d", txn, r); + sql_print_error("%s: tried aborting transaction %p and got error code %d", tokudb_hton_name, txn, r); } assert(r == 0); + thd_proc_info(thd, orig_proc_info); } static void tokudb_cleanup_handlers(tokudb_trx_data *trx, DB_TXN *txn) { @@ -726,7 +775,7 @@ static int tokudb_commit(handlerton * hton, THD * thd, bool all) { TOKUDB_DBUG_ENTER(""); DBUG_PRINT("trans", ("ending transaction %s", all ? "all" : "stmt")); uint32_t syncflag = THDVAR(thd, commit_sync) ? 0 : DB_TXN_NOSYNC; - tokudb_trx_data *trx = (tokudb_trx_data *) thd_data_get(thd, hton->slot); + tokudb_trx_data *trx = (tokudb_trx_data *) thd_get_ha_data(thd, hton); DB_TXN **txn = all ? &trx->all : &trx->stmt; DB_TXN *this_txn = *txn; if (this_txn) { @@ -755,7 +804,7 @@ static int tokudb_commit(handlerton * hton, THD * thd, bool all) { static int tokudb_rollback(handlerton * hton, THD * thd, bool all) { TOKUDB_DBUG_ENTER(""); DBUG_PRINT("trans", ("aborting transaction %s", all ? "all" : "stmt")); - tokudb_trx_data *trx = (tokudb_trx_data *) thd_data_get(thd, hton->slot); + tokudb_trx_data *trx = (tokudb_trx_data *) thd_get_ha_data(thd, hton); DB_TXN **txn = all ? &trx->all : &trx->stmt; DB_TXN *this_txn = *txn; if (this_txn) { @@ -784,8 +833,14 @@ static int tokudb_rollback(handlerton * hton, THD * thd, bool all) { static int tokudb_xa_prepare(handlerton* hton, THD* thd, bool all) { TOKUDB_DBUG_ENTER(""); int r = 0; + + /* if support_xa is disable, just return */ + if (!THDVAR(thd, support_xa)) { + TOKUDB_DBUG_RETURN(r); + } + DBUG_PRINT("trans", ("preparing transaction %s", all ? "all" : "stmt")); - tokudb_trx_data *trx = (tokudb_trx_data *) thd_data_get(thd, hton->slot); + tokudb_trx_data *trx = (tokudb_trx_data *) thd_get_ha_data(thd, hton); DB_TXN* txn = all ? trx->all : trx->stmt; if (txn) { if (tokudb_debug & TOKUDB_DEBUG_TXN) { @@ -806,7 +861,7 @@ static int tokudb_xa_prepare(handlerton* hton, THD* thd, bool all) { TOKUDB_DBUG_RETURN(r); } -static int tokudb_xa_recover(handlerton* hton, XID* xid_list, uint len) { +static int tokudb_xa_recover(handlerton* hton, XID* xid_list, uint len) { TOKUDB_DBUG_ENTER(""); int r = 0; if (len == 0 || xid_list == NULL) { @@ -864,7 +919,7 @@ static int tokudb_savepoint(handlerton * hton, THD * thd, void *savepoint) { TOKUDB_DBUG_ENTER(""); int error; SP_INFO save_info = (SP_INFO)savepoint; - tokudb_trx_data *trx = (tokudb_trx_data *) thd_data_get(thd, hton->slot); + tokudb_trx_data *trx = (tokudb_trx_data *) thd_get_ha_data(thd, hton); if (thd->in_sub_stmt) { assert(trx->stmt); error = txn_begin(db_env, trx->sub_sp_level, &(save_info->txn), DB_INHERIT_ISOLATION, thd); @@ -895,7 +950,7 @@ static int tokudb_rollback_to_savepoint(handlerton * hton, THD * thd, void *save DB_TXN* parent = NULL; DB_TXN* txn_to_rollback = save_info->txn; - tokudb_trx_data *trx = (tokudb_trx_data *) thd_data_get(thd, hton->slot); + tokudb_trx_data *trx = (tokudb_trx_data *) thd_get_ha_data(thd, hton); parent = txn_to_rollback->parent; if (!(error = txn_to_rollback->abort(txn_to_rollback))) { if (save_info->in_sub_stmt) { @@ -917,7 +972,7 @@ static int tokudb_release_savepoint(handlerton * hton, THD * thd, void *savepoin DB_TXN* parent = NULL; DB_TXN* txn_to_commit = save_info->txn; - tokudb_trx_data *trx = (tokudb_trx_data *) thd_data_get(thd, hton->slot); + tokudb_trx_data *trx = (tokudb_trx_data *) thd_get_ha_data(thd, hton); parent = txn_to_commit->parent; if (!(error = txn_to_commit->commit(txn_to_commit, 0))) { if (save_info->in_sub_stmt) { @@ -974,10 +1029,10 @@ static int tokudb_discover3(handlerton *hton, THD* thd, const char *db, const ch HA_METADATA_KEY curr_key = hatoku_frm_data; DBT key = {}; DBT value = {}; - bool do_commit; + bool do_commit = false; #if 100000 <= MYSQL_VERSION_ID && MYSQL_VERSION_ID <= 100199 - tokudb_trx_data *trx = (tokudb_trx_data *) thd_data_get(thd, tokudb_hton->slot); + tokudb_trx_data *trx = (tokudb_trx_data *) thd_get_ha_data(thd, tokudb_hton); if (thd_sql_command(thd) == SQLCOM_CREATE_TABLE && trx && trx->sub_sp_level) { do_commit = false; txn = trx->sub_sp_level; @@ -1132,15 +1187,14 @@ static bool tokudb_show_engine_status(THD * thd, stat_print_fn * stat_print) { static void tokudb_checkpoint_lock(THD * thd) { int error; const char *old_proc_info; - tokudb_trx_data* trx = NULL; - trx = (tokudb_trx_data *) thd_data_get(thd, tokudb_hton->slot); + tokudb_trx_data* trx = (tokudb_trx_data *) thd_get_ha_data(thd, tokudb_hton); if (!trx) { error = create_tokudb_trx_data_instance(&trx); // // can only fail due to memory allocation, so ok to assert // assert(!error); - thd_data_set(thd, tokudb_hton->slot, trx); + thd_set_ha_data(thd, tokudb_hton, trx); } if (trx->checkpoint_lock_taken) { @@ -1164,8 +1218,7 @@ cleanup: static void tokudb_checkpoint_unlock(THD * thd) { int error; const char *old_proc_info; - tokudb_trx_data* trx = NULL; - trx = (tokudb_trx_data *) thd_data_get(thd, tokudb_hton->slot); + tokudb_trx_data* trx = (tokudb_trx_data *) thd_get_ha_data(thd, tokudb_hton); if (!trx) { error = 0; goto cleanup; @@ -1209,7 +1262,7 @@ static void tokudb_handle_fatal_signal(handlerton *hton __attribute__ ((__unused #endif static void tokudb_print_error(const DB_ENV * db_env, const char *db_errpfx, const char *buffer) { - sql_print_error("%s: %s", db_errpfx, buffer); + sql_print_error("%s: %s", db_errpfx, buffer); } static void tokudb_cleanup_log_files(void) { @@ -1388,9 +1441,36 @@ static struct st_mysql_sys_var *tokudb_system_variables[] = { #if TOKUDB_CHECK_JEMALLOC MYSQL_SYSVAR(check_jemalloc), #endif + MYSQL_SYSVAR(bulk_fetch), +#if TOKU_INCLUDE_XA + MYSQL_SYSVAR(support_xa), +#endif + MYSQL_SYSVAR(rpl_unique_checks), + MYSQL_SYSVAR(rpl_unique_checks_delay), + MYSQL_SYSVAR(rpl_lookup_rows), + MYSQL_SYSVAR(rpl_lookup_rows_delay), NULL }; +// Split ./database/table-dictionary into database, table and dictionary strings +static void tokudb_split_dname(const char *dname, String &database_name, String &table_name, String &dictionary_name) { + const char *splitter = strchr(dname, '/'); + if (splitter) { + const char *database_ptr = splitter+1; + const char *table_ptr = strchr(database_ptr, '/'); + if (table_ptr) { + database_name.append(database_ptr, table_ptr - database_ptr); + table_ptr += 1; + const char *dictionary_ptr = strchr(table_ptr, '-'); + if (dictionary_ptr) { + table_name.append(table_ptr, dictionary_ptr - table_ptr); + dictionary_ptr += 1; + dictionary_name.append(dictionary_ptr); + } + } + } +} + struct st_mysql_storage_engine tokudb_storage_engine = { MYSQL_HANDLERTON_INTERFACE_VERSION }; static struct st_mysql_information_schema tokudb_file_map_information_schema = { MYSQL_INFORMATION_SCHEMA_INTERFACE_VERSION }; @@ -1436,31 +1516,12 @@ static int tokudb_file_map(TABLE *table, THD *thd) { assert(iname_len == curr_val.size - 1); table->field[1]->store(iname, iname_len, system_charset_info); - // denormalize the dname - const char *database_name = NULL; - size_t database_len = 0; - const char *table_name = NULL; - size_t table_len = 0; - const char *dictionary_name = NULL; - size_t dictionary_len = 0; - database_name = strchr(dname, '/'); - if (database_name) { - database_name += 1; - table_name = strchr(database_name, '/'); - if (table_name) { - database_len = table_name - database_name; - table_name += 1; - dictionary_name = strchr(table_name, '-'); - if (dictionary_name) { - table_len = dictionary_name - table_name; - dictionary_name += 1; - dictionary_len = strlen(dictionary_name); - } - } - } - table->field[2]->store(database_name, database_len, system_charset_info); - table->field[3]->store(table_name, table_len, system_charset_info); - table->field[4]->store(dictionary_name, dictionary_len, system_charset_info); + // split the dname + String database_name, table_name, dictionary_name; + tokudb_split_dname(dname, database_name, table_name, dictionary_name); + table->field[2]->store(database_name.c_ptr(), database_name.length(), system_charset_info); + table->field[3]->store(table_name.c_ptr(), table_name.length(), system_charset_info); + table->field[4]->store(dictionary_name.c_ptr(), dictionary_name.length(), system_charset_info); error = schema_table_store_record(thd, table); } @@ -1491,10 +1552,12 @@ static int tokudb_file_map_fill_table(THD *thd, TABLE_LIST *tables, COND *cond) rw_rdlock(&tokudb_hton_initialized_lock); if (!tokudb_hton_initialized) { - my_error(ER_PLUGIN_IS_NOT_LOADED, MYF(0), "TokuDB"); - error = -1; + error = ER_PLUGIN_IS_NOT_LOADED; + my_error(error, MYF(0), tokudb_hton_name); } else { error = tokudb_file_map(table, thd); + if (error) + my_error(error, MYF(0)); } rw_unlock(&tokudb_hton_initialized_lock); @@ -1521,6 +1584,9 @@ static ST_FIELD_INFO tokudb_fractal_tree_info_field_info[] = { {"bt_num_blocks_in_use", 0, MYSQL_TYPE_LONGLONG, 0, 0, NULL, SKIP_OPEN_TABLE }, {"bt_size_allocated", 0, MYSQL_TYPE_LONGLONG, 0, 0, NULL, SKIP_OPEN_TABLE }, {"bt_size_in_use", 0, MYSQL_TYPE_LONGLONG, 0, 0, NULL, SKIP_OPEN_TABLE }, + {"table_schema", 256, MYSQL_TYPE_STRING, 0, 0, NULL, SKIP_OPEN_TABLE }, + {"table_name", 256, MYSQL_TYPE_STRING, 0, 0, NULL, SKIP_OPEN_TABLE }, + {"table_dictionary_name", 256, MYSQL_TYPE_STRING, 0, 0, NULL, SKIP_OPEN_TABLE }, {NULL, 0, MYSQL_TYPE_NULL, 0, 0, NULL, SKIP_OPEN_TABLE} }; @@ -1558,25 +1624,25 @@ static int tokudb_report_fractal_tree_info_for_db(const DBT *dname, const DBT *i // Recalculate and check just to be safe. { size_t dname_len = strlen((const char *)dname->data); - size_t iname_len = strlen((const char *)iname->data); assert(dname_len == dname->size - 1); + table->field[0]->store((char *)dname->data, dname_len, system_charset_info); + size_t iname_len = strlen((const char *)iname->data); assert(iname_len == iname->size - 1); - table->field[0]->store( - (char *)dname->data, - dname_len, - system_charset_info - ); - table->field[1]->store( - (char *)iname->data, - iname_len, - system_charset_info - ); + table->field[1]->store((char *)iname->data, iname_len, system_charset_info); } table->field[2]->store(bt_num_blocks_allocated, false); table->field[3]->store(bt_num_blocks_in_use, false); table->field[4]->store(bt_size_allocated, false); table->field[5]->store(bt_size_in_use, false); + // split the dname + { + String database_name, table_name, dictionary_name; + tokudb_split_dname((const char *)dname->data, database_name, table_name, dictionary_name); + table->field[6]->store(database_name.c_ptr(), database_name.length(), system_charset_info); + table->field[7]->store(table_name.c_ptr(), table_name.length(), system_charset_info); + table->field[8]->store(dictionary_name.c_ptr(), dictionary_name.length(), system_charset_info); + } error = schema_table_store_record(thd, table); exit: @@ -1600,12 +1666,7 @@ static int tokudb_fractal_tree_info(TABLE *table, THD *thd) { goto cleanup; } while (error == 0) { - error = tmp_cursor->c_get( - tmp_cursor, - &curr_key, - &curr_val, - DB_NEXT - ); + error = tmp_cursor->c_get(tmp_cursor, &curr_key, &curr_val, DB_NEXT); if (!error) { error = tokudb_report_fractal_tree_info_for_db(&curr_key, &curr_val, table, thd); } @@ -1638,10 +1699,12 @@ static int tokudb_fractal_tree_info_fill_table(THD *thd, TABLE_LIST *tables, CON rw_rdlock(&tokudb_hton_initialized_lock); if (!tokudb_hton_initialized) { - my_error(ER_PLUGIN_IS_NOT_LOADED, MYF(0), "TokuDB"); - error = -1; + error = ER_PLUGIN_IS_NOT_LOADED; + my_error(error, MYF(0), tokudb_hton_name); } else { error = tokudb_fractal_tree_info(table, thd); + if (error) + my_error(error, MYF(0)); } //3938: unlock the status flag lock @@ -1669,6 +1732,9 @@ static ST_FIELD_INFO tokudb_fractal_tree_block_map_field_info[] = { {"blocknum", 0, MYSQL_TYPE_LONGLONG, 0, 0, NULL, SKIP_OPEN_TABLE }, {"offset", 0, MYSQL_TYPE_LONGLONG, 0, MY_I_S_MAYBE_NULL, NULL, SKIP_OPEN_TABLE }, {"size", 0, MYSQL_TYPE_LONGLONG, 0, MY_I_S_MAYBE_NULL, NULL, SKIP_OPEN_TABLE }, + {"table_schema", 256, MYSQL_TYPE_STRING, 0, 0, NULL, SKIP_OPEN_TABLE }, + {"table_name", 256, MYSQL_TYPE_STRING, 0, 0, NULL, SKIP_OPEN_TABLE }, + {"table_dictionary_name", 256, MYSQL_TYPE_STRING, 0, 0, NULL, SKIP_OPEN_TABLE }, {NULL, 0, MYSQL_TYPE_NULL, 0, 0, NULL, SKIP_OPEN_TABLE} }; @@ -1741,19 +1807,13 @@ static int tokudb_report_fractal_tree_block_map_for_db(const DBT *dname, const D // See #5789 // Recalculate and check just to be safe. size_t dname_len = strlen((const char *)dname->data); - size_t iname_len = strlen((const char *)iname->data); assert(dname_len == dname->size - 1); + table->field[0]->store((char *)dname->data, dname_len, system_charset_info); + + size_t iname_len = strlen((const char *)iname->data); assert(iname_len == iname->size - 1); - table->field[0]->store( - (char *)dname->data, - dname_len, - system_charset_info - ); - table->field[1]->store( - (char *)iname->data, - iname_len, - system_charset_info - ); + table->field[1]->store((char *)iname->data, iname_len, system_charset_info); + table->field[2]->store(e.checkpoint_counts[i], false); table->field[3]->store(e.blocknums[i], false); static const int64_t freelist_null = -1; @@ -1772,6 +1832,13 @@ static int tokudb_report_fractal_tree_block_map_for_db(const DBT *dname, const D table->field[5]->store(e.sizes[i], false); } + // split the dname + String database_name, table_name, dictionary_name; + tokudb_split_dname((const char *)dname->data, database_name, table_name,dictionary_name); + table->field[6]->store(database_name.c_ptr(), database_name.length(), system_charset_info); + table->field[7]->store(table_name.c_ptr(), table_name.length(), system_charset_info); + table->field[8]->store(dictionary_name.c_ptr(), dictionary_name.length(), system_charset_info); + error = schema_table_store_record(thd, table); } @@ -1812,12 +1879,7 @@ static int tokudb_fractal_tree_block_map(TABLE *table, THD *thd) { goto cleanup; } while (error == 0) { - error = tmp_cursor->c_get( - tmp_cursor, - &curr_key, - &curr_val, - DB_NEXT - ); + error = tmp_cursor->c_get(tmp_cursor, &curr_key, &curr_val, DB_NEXT); if (!error) { error = tokudb_report_fractal_tree_block_map_for_db(&curr_key, &curr_val, table, thd); } @@ -1850,10 +1912,12 @@ static int tokudb_fractal_tree_block_map_fill_table(THD *thd, TABLE_LIST *tables rw_rdlock(&tokudb_hton_initialized_lock); if (!tokudb_hton_initialized) { - my_error(ER_PLUGIN_IS_NOT_LOADED, MYF(0), "TokuDB"); - error = -1; + error = ER_PLUGIN_IS_NOT_LOADED; + my_error(error, MYF(0), tokudb_hton_name); } else { error = tokudb_fractal_tree_block_map(table, thd); + if (error) + my_error(error, MYF(0)); } //3938: unlock the status flag lock @@ -1962,7 +2026,7 @@ static void tokudb_lock_timeout_callback(DB *db, uint64_t requesting_txnid, cons } // dump to stderr if (lock_timeout_debug & 2) { - TOKUDB_TRACE("%s", log_str.c_ptr()); + sql_print_error("%s: %s", tokudb_hton_name, log_str.c_ptr()); } } } @@ -2001,11 +2065,13 @@ static int tokudb_trx_fill_table(THD *thd, TABLE_LIST *tables, COND *cond) { rw_rdlock(&tokudb_hton_initialized_lock); if (!tokudb_hton_initialized) { - my_error(ER_PLUGIN_IS_NOT_LOADED, MYF(0), "TokuDB"); - error = -1; + error = ER_PLUGIN_IS_NOT_LOADED; + my_error(error, MYF(0), tokudb_hton_name); } else { struct tokudb_trx_extra e = { thd, tables->table }; error = db_env->iterate_live_transactions(db_env, tokudb_trx_callback, &e); + if (error) + my_error(error, MYF(0)); } rw_unlock(&tokudb_hton_initialized_lock); @@ -2032,6 +2098,9 @@ static ST_FIELD_INFO tokudb_lock_waits_field_info[] = { {"lock_waits_key_left", 256, MYSQL_TYPE_STRING, 0, 0, NULL, SKIP_OPEN_TABLE }, {"lock_waits_key_right", 256, MYSQL_TYPE_STRING, 0, 0, NULL, SKIP_OPEN_TABLE }, {"lock_waits_start_time", 0, MYSQL_TYPE_LONGLONG, 0, 0, NULL, SKIP_OPEN_TABLE }, + {"lock_waits_table_schema", 256, MYSQL_TYPE_STRING, 0, 0, NULL, SKIP_OPEN_TABLE }, + {"lock_waits_table_name", 256, MYSQL_TYPE_STRING, 0, 0, NULL, SKIP_OPEN_TABLE }, + {"lock_waits_table_dictionary_name", 256, MYSQL_TYPE_STRING, 0, 0, NULL, SKIP_OPEN_TABLE }, {NULL, 0, MYSQL_TYPE_NULL, 0, 0, NULL, SKIP_OPEN_TABLE} }; @@ -2057,6 +2126,13 @@ static int tokudb_lock_waits_callback(DB *db, uint64_t requesting_txnid, const D tokudb_pretty_right_key(db, right_key, &right_str); table->field[4]->store(right_str.ptr(), right_str.length(), system_charset_info); table->field[5]->store(start_time, false); + + String database_name, table_name, dictionary_name; + tokudb_split_dname(dname, database_name, table_name, dictionary_name); + table->field[6]->store(database_name.c_ptr(), database_name.length(), system_charset_info); + table->field[7]->store(table_name.c_ptr(), table_name.length(), system_charset_info); + table->field[8]->store(dictionary_name.c_ptr(), dictionary_name.length(), system_charset_info); + int error = schema_table_store_record(thd, table); return error; } @@ -2072,11 +2148,13 @@ static int tokudb_lock_waits_fill_table(THD *thd, TABLE_LIST *tables, COND *cond rw_rdlock(&tokudb_hton_initialized_lock); if (!tokudb_hton_initialized) { - my_error(ER_PLUGIN_IS_NOT_LOADED, MYF(0), "TokuDB"); - error = -1; + error = ER_PLUGIN_IS_NOT_LOADED; + my_error(error, MYF(0), tokudb_hton_name); } else { struct tokudb_lock_waits_extra e = { thd, tables->table }; error = db_env->iterate_pending_lock_requests(db_env, tokudb_lock_waits_callback, &e); + if (error) + my_error(error, MYF(0)); } rw_unlock(&tokudb_hton_initialized_lock); @@ -2102,6 +2180,9 @@ static ST_FIELD_INFO tokudb_locks_field_info[] = { {"locks_dname", 256, MYSQL_TYPE_STRING, 0, 0, NULL, SKIP_OPEN_TABLE }, {"locks_key_left", 256, MYSQL_TYPE_STRING, 0, 0, NULL, SKIP_OPEN_TABLE }, {"locks_key_right", 256, MYSQL_TYPE_STRING, 0, 0, NULL, SKIP_OPEN_TABLE }, + {"locks_table_schema", 256, MYSQL_TYPE_STRING, 0, 0, NULL, SKIP_OPEN_TABLE }, + {"locks_table_name", 256, MYSQL_TYPE_STRING, 0, 0, NULL, SKIP_OPEN_TABLE }, + {"locks_table_dictionary_name", 256, MYSQL_TYPE_STRING, 0, 0, NULL, SKIP_OPEN_TABLE }, {NULL, 0, MYSQL_TYPE_NULL, 0, 0, NULL, SKIP_OPEN_TABLE} }; @@ -2133,6 +2214,12 @@ static int tokudb_locks_callback(uint64_t txn_id, uint64_t client_id, iterate_ro tokudb_pretty_right_key(db, &right_key, &right_str); table->field[4]->store(right_str.ptr(), right_str.length(), system_charset_info); + String database_name, table_name, dictionary_name; + tokudb_split_dname(dname, database_name, table_name, dictionary_name); + table->field[5]->store(database_name.c_ptr(), database_name.length(), system_charset_info); + table->field[6]->store(table_name.c_ptr(), table_name.length(), system_charset_info); + table->field[7]->store(dictionary_name.c_ptr(), dictionary_name.length(), system_charset_info); + error = schema_table_store_record(thd, table); } return error; @@ -2149,11 +2236,13 @@ static int tokudb_locks_fill_table(THD *thd, TABLE_LIST *tables, COND *cond) { rw_rdlock(&tokudb_hton_initialized_lock); if (!tokudb_hton_initialized) { - my_error(ER_PLUGIN_IS_NOT_LOADED, MYF(0), "TokuDB"); - error = -1; + error = ER_PLUGIN_IS_NOT_LOADED; + my_error(error, MYF(0), tokudb_hton_name); } else { struct tokudb_locks_extra e = { thd, tables->table }; error = db_env->iterate_live_transactions(db_env, tokudb_locks_callback, &e); + if (error) + my_error(error, MYF(0)); } rw_unlock(&tokudb_hton_initialized_lock); diff --git a/storage/tokudb/hatoku_hton.h b/storage/tokudb/hatoku_hton.h index 054a5119702..58d34f01af6 100644 --- a/storage/tokudb/hatoku_hton.h +++ b/storage/tokudb/hatoku_hton.h @@ -450,17 +450,42 @@ static TYPELIB tokudb_empty_scan_typelib = { NULL }; -static MYSQL_THDVAR_ENUM(empty_scan, - PLUGIN_VAR_OPCMDARG, +static MYSQL_THDVAR_ENUM(empty_scan, PLUGIN_VAR_OPCMDARG, "TokuDB algorithm to check if the table is empty when opened. ", NULL, NULL, TOKUDB_EMPTY_SCAN_RL, &tokudb_empty_scan_typelib ); #if TOKUDB_CHECK_JEMALLOC static uint tokudb_check_jemalloc; -static MYSQL_SYSVAR_UINT(check_jemalloc, tokudb_check_jemalloc, 0, "Check if jemalloc is linked", NULL, NULL, 1, 0, 1, 0); +static MYSQL_SYSVAR_UINT(check_jemalloc, tokudb_check_jemalloc, 0, "Check if jemalloc is linked", + NULL, NULL, 1, 0, 1, 0); +#endif + +static MYSQL_THDVAR_BOOL(bulk_fetch, PLUGIN_VAR_THDLOCAL, "enable bulk fetch", + NULL /*check*/, NULL /*update*/, true /*default*/); + +#if TOKU_INCLUDE_XA +static MYSQL_THDVAR_BOOL(support_xa, + PLUGIN_VAR_OPCMDARG, + "Enable TokuDB support for the XA two-phase commit", + NULL, // check + NULL, // update + true // default +); #endif +static MYSQL_THDVAR_BOOL(rpl_unique_checks, PLUGIN_VAR_THDLOCAL, "enable unique checks on replication slave", + NULL /*check*/, NULL /*update*/, true /*default*/); + +static MYSQL_THDVAR_ULONGLONG(rpl_unique_checks_delay, PLUGIN_VAR_THDLOCAL, "time in milliseconds to add to unique checks test on replication slave", + NULL, NULL, 0 /*default*/, 0 /*min*/, ~0ULL /*max*/, 1 /*blocksize*/); + +static MYSQL_THDVAR_BOOL(rpl_lookup_rows, PLUGIN_VAR_THDLOCAL, "lookup a row on rpl slave", + NULL /*check*/, NULL /*update*/, true /*default*/); + +static MYSQL_THDVAR_ULONGLONG(rpl_lookup_rows_delay, PLUGIN_VAR_THDLOCAL, "time in milliseconds to add to lookups on replication slave", + NULL, NULL, 0 /*default*/, 0 /*min*/, ~0ULL /*max*/, 1 /*blocksize*/); + extern HASH tokudb_open_tables; extern pthread_mutex_t tokudb_mutex; extern uint32_t tokudb_write_status_frequency; diff --git a/storage/tokudb/mysql-test/rpl/include/have_tokudb.opt b/storage/tokudb/mysql-test/rpl/include/have_tokudb.opt index 976f96f3f48..c22939cc965 100644 --- a/storage/tokudb/mysql-test/rpl/include/have_tokudb.opt +++ b/storage/tokudb/mysql-test/rpl/include/have_tokudb.opt @@ -1 +1 @@ ---loose-tokudb --plugin-load-add=$HA_TOKUDB_SO +--loose-tokudb --plugin-load-add=$HA_TOKUDB_SO --loose-tokudb-check-jemalloc=0 diff --git a/storage/tokudb/mysql-test/rpl/r/rpl_tokudb_delete_pk.result b/storage/tokudb/mysql-test/rpl/r/rpl_tokudb_delete_pk.result new file mode 100644 index 00000000000..cd8608f4387 --- /dev/null +++ b/storage/tokudb/mysql-test/rpl/r/rpl_tokudb_delete_pk.result @@ -0,0 +1,17 @@ +include/master-slave.inc +[connection master] +drop table if exists t; +create table t (a bigint not null, primary key(a)) engine=tokudb; +insert into t values (1); +insert into t values (2),(3); +insert into t values (4); +include/diff_tables.inc [master:test.t, slave:test.t] +delete from t where a=2; +select unix_timestamp() into @tstart; +select unix_timestamp() into @tend; +select @tend-@tstart <= 5; +@tend-@tstart <= 5 +1 +include/diff_tables.inc [master:test.t, slave:test.t] +drop table if exists t; +include/rpl_end.inc diff --git a/storage/tokudb/mysql-test/rpl/r/rpl_tokudb_delete_pk_lookup1.result b/storage/tokudb/mysql-test/rpl/r/rpl_tokudb_delete_pk_lookup1.result new file mode 100644 index 00000000000..ae2aea84287 --- /dev/null +++ b/storage/tokudb/mysql-test/rpl/r/rpl_tokudb_delete_pk_lookup1.result @@ -0,0 +1,17 @@ +include/master-slave.inc +[connection master] +drop table if exists t; +create table t (a bigint not null, primary key(a)) engine=tokudb; +insert into t values (1); +insert into t values (2),(3); +insert into t values (4); +include/diff_tables.inc [master:test.t, slave:test.t] +delete from t where a=2; +select unix_timestamp() into @tstart; +select unix_timestamp() into @tend; +select @tend-@tstart > 5; +@tend-@tstart > 5 +1 +include/diff_tables.inc [master:test.t, slave:test.t] +drop table if exists t; +include/rpl_end.inc diff --git a/storage/tokudb/mysql-test/rpl/r/rpl_tokudb_update_pk_uc0_lookup0.result b/storage/tokudb/mysql-test/rpl/r/rpl_tokudb_update_pk_uc0_lookup0.result new file mode 100644 index 00000000000..fc961fd0c13 --- /dev/null +++ b/storage/tokudb/mysql-test/rpl/r/rpl_tokudb_update_pk_uc0_lookup0.result @@ -0,0 +1,27 @@ +include/master-slave.inc +[connection master] +drop table if exists t; +create table t (a bigint not null, b bigint not null, primary key(a)) engine=tokudb; +insert into t values (1,0); +insert into t values (2,0),(3,0); +insert into t values (4,0); +include/diff_tables.inc [master:test.t, slave:test.t] +update t set b=b+1 where a=2; +update t set b=b+2 where a=1; +update t set b=b+3 where a=4; +update t set b=b+4 where a=3; +update t set b=b+1 where 1<=a and a<=3; +select unix_timestamp() into @tstart; +select unix_timestamp() into @tend; +select @tend-@tstart <= 5; +@tend-@tstart <= 5 +1 +select * from t; +a b +1 3 +2 2 +3 5 +4 3 +include/diff_tables.inc [master:test.t, slave:test.t] +drop table if exists t; +include/rpl_end.inc diff --git a/storage/tokudb/mysql-test/rpl/r/rpl_tokudb_update_pk_uc0_lookup1.result b/storage/tokudb/mysql-test/rpl/r/rpl_tokudb_update_pk_uc0_lookup1.result new file mode 100644 index 00000000000..5325f6c3c6d --- /dev/null +++ b/storage/tokudb/mysql-test/rpl/r/rpl_tokudb_update_pk_uc0_lookup1.result @@ -0,0 +1,27 @@ +include/master-slave.inc +[connection master] +drop table if exists t; +create table t (a bigint not null, b bigint not null, primary key(a)) engine=tokudb; +insert into t values (1,0); +insert into t values (2,0),(3,0); +insert into t values (4,0); +include/diff_tables.inc [master:test.t, slave:test.t] +update t set b=b+1 where a=2; +update t set b=b+2 where a=1; +update t set b=b+3 where a=4; +update t set b=b+4 where a=3; +update t set b=b+1 where 1<=a and a<=3; +select unix_timestamp() into @tstart; +select unix_timestamp() into @tend; +select @tend-@tstart <= 5; +@tend-@tstart <= 5 +0 +select * from t; +a b +1 3 +2 2 +3 5 +4 3 +include/diff_tables.inc [master:test.t, slave:test.t] +drop table if exists t; +include/rpl_end.inc diff --git a/storage/tokudb/mysql-test/rpl/r/rpl_tokudb_update_pk_uc1_lookup0.result b/storage/tokudb/mysql-test/rpl/r/rpl_tokudb_update_pk_uc1_lookup0.result new file mode 100644 index 00000000000..5325f6c3c6d --- /dev/null +++ b/storage/tokudb/mysql-test/rpl/r/rpl_tokudb_update_pk_uc1_lookup0.result @@ -0,0 +1,27 @@ +include/master-slave.inc +[connection master] +drop table if exists t; +create table t (a bigint not null, b bigint not null, primary key(a)) engine=tokudb; +insert into t values (1,0); +insert into t values (2,0),(3,0); +insert into t values (4,0); +include/diff_tables.inc [master:test.t, slave:test.t] +update t set b=b+1 where a=2; +update t set b=b+2 where a=1; +update t set b=b+3 where a=4; +update t set b=b+4 where a=3; +update t set b=b+1 where 1<=a and a<=3; +select unix_timestamp() into @tstart; +select unix_timestamp() into @tend; +select @tend-@tstart <= 5; +@tend-@tstart <= 5 +0 +select * from t; +a b +1 3 +2 2 +3 5 +4 3 +include/diff_tables.inc [master:test.t, slave:test.t] +drop table if exists t; +include/rpl_end.inc diff --git a/storage/tokudb/mysql-test/rpl/r/rpl_tokudb_update_pk_uc1_lookup1.result b/storage/tokudb/mysql-test/rpl/r/rpl_tokudb_update_pk_uc1_lookup1.result new file mode 100644 index 00000000000..5325f6c3c6d --- /dev/null +++ b/storage/tokudb/mysql-test/rpl/r/rpl_tokudb_update_pk_uc1_lookup1.result @@ -0,0 +1,27 @@ +include/master-slave.inc +[connection master] +drop table if exists t; +create table t (a bigint not null, b bigint not null, primary key(a)) engine=tokudb; +insert into t values (1,0); +insert into t values (2,0),(3,0); +insert into t values (4,0); +include/diff_tables.inc [master:test.t, slave:test.t] +update t set b=b+1 where a=2; +update t set b=b+2 where a=1; +update t set b=b+3 where a=4; +update t set b=b+4 where a=3; +update t set b=b+1 where 1<=a and a<=3; +select unix_timestamp() into @tstart; +select unix_timestamp() into @tend; +select @tend-@tstart <= 5; +@tend-@tstart <= 5 +0 +select * from t; +a b +1 3 +2 2 +3 5 +4 3 +include/diff_tables.inc [master:test.t, slave:test.t] +drop table if exists t; +include/rpl_end.inc diff --git a/storage/tokudb/mysql-test/rpl/r/rpl_tokudb_update_unique_uc0_lookup0.result b/storage/tokudb/mysql-test/rpl/r/rpl_tokudb_update_unique_uc0_lookup0.result new file mode 100644 index 00000000000..0b958b89d0f --- /dev/null +++ b/storage/tokudb/mysql-test/rpl/r/rpl_tokudb_update_unique_uc0_lookup0.result @@ -0,0 +1,27 @@ +include/master-slave.inc +[connection master] +drop table if exists t; +create table t (a bigint not null, b bigint not null, c bigint not null, primary key(a), unique key(c)) engine=tokudb; +insert into t values (1,0,-1); +insert into t values (2,0,-2),(3,0,-3); +insert into t values (4,0,-4); +include/diff_tables.inc [master:test.t, slave:test.t] +update t set b=b+1 where a=2; +update t set b=b+2 where a=1; +update t set b=b+3 where a=4; +update t set b=b+4 where a=3; +update t set b=b+1 where 1<=a and a<=3; +select unix_timestamp() into @tstart; +select unix_timestamp() into @tend; +select @tend-@tstart <= 5; +@tend-@tstart <= 5 +1 +select * from t; +a b c +1 3 -1 +2 2 -2 +3 5 -3 +4 3 -4 +include/diff_tables.inc [master:test.t, slave:test.t] +drop table if exists t; +include/rpl_end.inc diff --git a/storage/tokudb/mysql-test/rpl/r/rpl_tokudb_update_unique_uc0_lookup1.result b/storage/tokudb/mysql-test/rpl/r/rpl_tokudb_update_unique_uc0_lookup1.result new file mode 100644 index 00000000000..83dcdb394df --- /dev/null +++ b/storage/tokudb/mysql-test/rpl/r/rpl_tokudb_update_unique_uc0_lookup1.result @@ -0,0 +1,27 @@ +include/master-slave.inc +[connection master] +drop table if exists t; +create table t (a bigint not null, b bigint not null, c bigint not null, primary key(a), unique key(c)) engine=tokudb; +insert into t values (1,0,-1); +insert into t values (2,0,-2),(3,0,-3); +insert into t values (4,0,-4); +include/diff_tables.inc [master:test.t, slave:test.t] +update t set b=b+1 where a=2; +update t set b=b+2 where a=1; +update t set b=b+3 where a=4; +update t set b=b+4 where a=3; +update t set b=b+1 where 1<=a and a<=3; +select unix_timestamp() into @tstart; +select unix_timestamp() into @tend; +select @tend-@tstart <= 5; +@tend-@tstart <= 5 +0 +select * from t; +a b c +1 3 -1 +2 2 -2 +3 5 -3 +4 3 -4 +include/diff_tables.inc [master:test.t, slave:test.t] +drop table if exists t; +include/rpl_end.inc diff --git a/storage/tokudb/mysql-test/rpl/r/rpl_tokudb_write_pk.result b/storage/tokudb/mysql-test/rpl/r/rpl_tokudb_write_pk.result new file mode 100644 index 00000000000..6db2036d933 --- /dev/null +++ b/storage/tokudb/mysql-test/rpl/r/rpl_tokudb_write_pk.result @@ -0,0 +1,14 @@ +include/master-slave.inc +[connection master] +drop table if exists t; +create table t (a bigint not null, primary key(a)) engine=tokudb; +select unix_timestamp() into @tstart; +insert into t values (1); +insert into t values (2),(3); +insert into t values (4); +select unix_timestamp()-@tstart <= 10; +unix_timestamp()-@tstart <= 10 +1 +include/diff_tables.inc [master:test.t, slave:test.t] +drop table if exists t; +include/rpl_end.inc diff --git a/storage/tokudb/mysql-test/rpl/r/rpl_tokudb_write_pk_uc1.result b/storage/tokudb/mysql-test/rpl/r/rpl_tokudb_write_pk_uc1.result new file mode 100644 index 00000000000..3bcd3e8ccdd --- /dev/null +++ b/storage/tokudb/mysql-test/rpl/r/rpl_tokudb_write_pk_uc1.result @@ -0,0 +1,14 @@ +include/master-slave.inc +[connection master] +drop table if exists t; +create table t (a bigint not null, primary key(a)) engine=tokudb; +select unix_timestamp() into @tstart; +insert into t values (1); +insert into t values (2),(3); +insert into t values (4); +select unix_timestamp()-@tstart <= 10; +unix_timestamp()-@tstart <= 10 +0 +include/diff_tables.inc [master:test.t, slave:test.t] +drop table if exists t; +include/rpl_end.inc diff --git a/storage/tokudb/mysql-test/rpl/r/rpl_tokudb_write_unique.result b/storage/tokudb/mysql-test/rpl/r/rpl_tokudb_write_unique.result new file mode 100644 index 00000000000..9eb1f2edf20 --- /dev/null +++ b/storage/tokudb/mysql-test/rpl/r/rpl_tokudb_write_unique.result @@ -0,0 +1,14 @@ +include/master-slave.inc +[connection master] +drop table if exists t; +create table t (a bigint not null, b bigint not null, primary key(a), unique key(b)) engine=tokudb; +select unix_timestamp() into @tstart; +insert into t values (1,2); +insert into t values (2,3),(3,4); +insert into t values (4,5); +select unix_timestamp()-@tstart <= 10; +unix_timestamp()-@tstart <= 10 +1 +include/diff_tables.inc [master:test.t, slave:test.t] +drop table if exists t; +include/rpl_end.inc diff --git a/storage/tokudb/mysql-test/rpl/r/rpl_tokudb_write_unique_uc1.result b/storage/tokudb/mysql-test/rpl/r/rpl_tokudb_write_unique_uc1.result new file mode 100644 index 00000000000..3bed6ea282a --- /dev/null +++ b/storage/tokudb/mysql-test/rpl/r/rpl_tokudb_write_unique_uc1.result @@ -0,0 +1,14 @@ +include/master-slave.inc +[connection master] +drop table if exists t; +create table t (a bigint not null, b bigint not null, primary key(a), unique key(b)) engine=tokudb; +select unix_timestamp() into @tstart; +insert into t values (1,2); +insert into t values (2,3),(3,4); +insert into t values (4,5); +select unix_timestamp()-@tstart <= 10; +unix_timestamp()-@tstart <= 10 +0 +include/diff_tables.inc [master:test.t, slave:test.t] +drop table if exists t; +include/rpl_end.inc diff --git a/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_delete_pk-slave.opt b/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_delete_pk-slave.opt new file mode 100644 index 00000000000..dc139282dc4 --- /dev/null +++ b/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_delete_pk-slave.opt @@ -0,0 +1 @@ +--read-only=ON --tokudb-rpl-unique-checks-delay=10000 --tokudb-rpl-unique-checks=OFF --tokudb-rpl-lookup-rows-delay=10000 --tokudb-rpl-lookup-rows=OFF diff --git a/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_delete_pk.test b/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_delete_pk.test new file mode 100644 index 00000000000..fb42f40bb62 --- /dev/null +++ b/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_delete_pk.test @@ -0,0 +1,63 @@ +# test replicated delete rows log events on a table with a primary key. +# the slave is read only with tokudb rpl row lookups OFF. + +source include/have_tokudb.inc; +let $engine=tokudb; +source include/have_binlog_format_row.inc; +source include/master-slave.inc; + +# initialize +connection master; +disable_warnings; +drop table if exists t; +enable_warnings; + +connection slave; +# show variables like 'read_only'; +# show variables like 'tokudb_rpl_%'; + +# insert some rows +connection master; +# select @@binlog_format; +# select @@autocommit; +eval create table t (a bigint not null, primary key(a)) engine=$engine; +# show create table t; +insert into t values (1); +insert into t values (2),(3); +insert into t values (4); + +# wait for the inserts to finish on the slave +connection master; +sync_slave_with_master; +# source include/sync_slave_sql_with_master.inc; + +# diff tables +connection master; +--let $diff_tables= master:test.t, slave:test.t +source include/diff_tables.inc; + +# delete a row +connection master; +delete from t where a=2; +select unix_timestamp() into @tstart; + +# wait for the delete to finish on the slave +connection master; +sync_slave_with_master; +# source include/sync_slave_sql_with_master.inc; +connection master; +select unix_timestamp() into @tend; +select @tend-@tstart <= 5; # assert no delay in the delete time + +# diff tables +--let $diff_tables= master:test.t, slave:test.t +source include/diff_tables.inc; + +# cleanup +connection master; +drop table if exists t; +sync_slave_with_master; +# source include/sync_slave_sql_with_master.inc; + +source include/rpl_end.inc; + diff --git a/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_delete_pk_lookup1-slave.opt b/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_delete_pk_lookup1-slave.opt new file mode 100644 index 00000000000..4675b07763d --- /dev/null +++ b/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_delete_pk_lookup1-slave.opt @@ -0,0 +1 @@ +--read-only=ON --tokudb-rpl-unique-checks-delay=0 --tokudb-rpl-unique-checks=ON --tokudb-rpl-lookup-rows-delay=10000 --tokudb-rpl-lookup-rows=ON diff --git a/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_delete_pk_lookup1.test b/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_delete_pk_lookup1.test new file mode 100644 index 00000000000..bf5edbd2c1b --- /dev/null +++ b/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_delete_pk_lookup1.test @@ -0,0 +1,66 @@ +# test replicated delete rows log events on a table with a primary key. +# the slave is read only with tokudb rpl row lookups ON. +# this will cause SLOW deletes. + +source include/have_tokudb.inc; +let $engine=tokudb; +source include/have_binlog_format_row.inc; +source include/master-slave.inc; + +# initialize +connection master; +disable_warnings; +drop table if exists t; +enable_warnings; + +connection slave; +# show variables like 'read_only'; +# show variables like 'tokudb_rpl_%'; + +# insert some rows +connection master; +# select @@binlog_format; +# select @@autocommit; +eval create table t (a bigint not null, primary key(a)) engine=$engine; +# show create table t; +insert into t values (1); +insert into t values (2),(3); +insert into t values (4); + +# wait for the inserts to finish on the slave +connection master; +sync_slave_with_master; +# source include/sync_slave_sql_with_master.inc; + +# diff tables +connection master; +--let $diff_tables= master:test.t, slave:test.t +source include/diff_tables.inc; + +# delete a row +connection master; +delete from t where a=2; +select unix_timestamp() into @tstart; + +# wait for the delete to finish on the slave +connection master; +sync_slave_with_master; +# source include/sync_slave_sql_with_master.inc; + +connection master; +select unix_timestamp() into @tend; +select @tend-@tstart > 5; # assert big delay in the delete time + +# diff tables +--let $diff_tables= master:test.t, slave:test.t +source include/diff_tables.inc; + +# cleanup +connection master; +drop table if exists t; + +sync_slave_with_master; +# source include/sync_slave_sql_with_master.inc; + +source include/rpl_end.inc; + diff --git a/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_update_pk_uc0_lookup0-slave.opt b/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_update_pk_uc0_lookup0-slave.opt new file mode 100644 index 00000000000..dc139282dc4 --- /dev/null +++ b/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_update_pk_uc0_lookup0-slave.opt @@ -0,0 +1 @@ +--read-only=ON --tokudb-rpl-unique-checks-delay=10000 --tokudb-rpl-unique-checks=OFF --tokudb-rpl-lookup-rows-delay=10000 --tokudb-rpl-lookup-rows=OFF diff --git a/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_update_pk_uc0_lookup0.test b/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_update_pk_uc0_lookup0.test new file mode 100644 index 00000000000..998987349c7 --- /dev/null +++ b/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_update_pk_uc0_lookup0.test @@ -0,0 +1,70 @@ +# test replicated update rows log events on a table with a primary key. + +source include/have_tokudb.inc; +let $engine=tokudb; +source include/have_binlog_format_row.inc; +source include/master-slave.inc; + +# initialize +connection master; +disable_warnings; +drop table if exists t; +enable_warnings; + +connection slave; +# show variables like 'read_only'; +# show variables like 'tokudb_rpl_%'; + +# insert some rows +connection master; +# select @@binlog_format; +# select @@autocommit; +eval create table t (a bigint not null, b bigint not null, primary key(a)) engine=$engine; +# show create table t; +insert into t values (1,0); +insert into t values (2,0),(3,0); +insert into t values (4,0); + +# wait for the inserts to finish on the slave +connection master; +sync_slave_with_master; +# source include/sync_slave_sql_with_master.inc; + +# diff tables +connection master; +--let $diff_tables= master:test.t, slave:test.t +source include/diff_tables.inc; + +# delete a row +connection master; +update t set b=b+1 where a=2; +update t set b=b+2 where a=1; +update t set b=b+3 where a=4; +update t set b=b+4 where a=3; +update t set b=b+1 where 1<=a and a<=3; +select unix_timestamp() into @tstart; + +# wait for the delete to finish on the slave +connection master; +sync_slave_with_master; +# source include/sync_slave_sql_with_master.inc; +connection master; +select unix_timestamp() into @tend; +select @tend-@tstart <= 5; # assert no delay in the delete time + +connection slave; +select * from t; + +# diff tables +--let $diff_tables= master:test.t, slave:test.t +source include/diff_tables.inc; + +# cleanup +connection master; +drop table if exists t; + +sync_slave_with_master; +# source include/sync_slave_sql_with_master.inc; + +source include/rpl_end.inc; + diff --git a/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_update_pk_uc0_lookup1-slave.opt b/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_update_pk_uc0_lookup1-slave.opt new file mode 100644 index 00000000000..d546dd00669 --- /dev/null +++ b/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_update_pk_uc0_lookup1-slave.opt @@ -0,0 +1 @@ +--read-only=ON --tokudb-rpl-unique-checks-delay=10000 --tokudb-rpl-unique-checks=OFF --tokudb-rpl-lookup-rows-delay=10000 --tokudb-rpl-lookup-rows=ON diff --git a/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_update_pk_uc0_lookup1.test b/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_update_pk_uc0_lookup1.test new file mode 100644 index 00000000000..998987349c7 --- /dev/null +++ b/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_update_pk_uc0_lookup1.test @@ -0,0 +1,70 @@ +# test replicated update rows log events on a table with a primary key. + +source include/have_tokudb.inc; +let $engine=tokudb; +source include/have_binlog_format_row.inc; +source include/master-slave.inc; + +# initialize +connection master; +disable_warnings; +drop table if exists t; +enable_warnings; + +connection slave; +# show variables like 'read_only'; +# show variables like 'tokudb_rpl_%'; + +# insert some rows +connection master; +# select @@binlog_format; +# select @@autocommit; +eval create table t (a bigint not null, b bigint not null, primary key(a)) engine=$engine; +# show create table t; +insert into t values (1,0); +insert into t values (2,0),(3,0); +insert into t values (4,0); + +# wait for the inserts to finish on the slave +connection master; +sync_slave_with_master; +# source include/sync_slave_sql_with_master.inc; + +# diff tables +connection master; +--let $diff_tables= master:test.t, slave:test.t +source include/diff_tables.inc; + +# delete a row +connection master; +update t set b=b+1 where a=2; +update t set b=b+2 where a=1; +update t set b=b+3 where a=4; +update t set b=b+4 where a=3; +update t set b=b+1 where 1<=a and a<=3; +select unix_timestamp() into @tstart; + +# wait for the delete to finish on the slave +connection master; +sync_slave_with_master; +# source include/sync_slave_sql_with_master.inc; +connection master; +select unix_timestamp() into @tend; +select @tend-@tstart <= 5; # assert no delay in the delete time + +connection slave; +select * from t; + +# diff tables +--let $diff_tables= master:test.t, slave:test.t +source include/diff_tables.inc; + +# cleanup +connection master; +drop table if exists t; + +sync_slave_with_master; +# source include/sync_slave_sql_with_master.inc; + +source include/rpl_end.inc; + diff --git a/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_update_pk_uc1_lookup0-slave.opt b/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_update_pk_uc1_lookup0-slave.opt new file mode 100644 index 00000000000..5cfe5f83a91 --- /dev/null +++ b/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_update_pk_uc1_lookup0-slave.opt @@ -0,0 +1 @@ +--read-only=ON --tokudb-rpl-unique-checks-delay=10000 --tokudb-rpl-unique-checks=ON --tokudb-rpl-lookup-rows-delay=10000 --tokudb-rpl-lookup-rows=OFF diff --git a/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_update_pk_uc1_lookup0.test b/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_update_pk_uc1_lookup0.test new file mode 100644 index 00000000000..998987349c7 --- /dev/null +++ b/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_update_pk_uc1_lookup0.test @@ -0,0 +1,70 @@ +# test replicated update rows log events on a table with a primary key. + +source include/have_tokudb.inc; +let $engine=tokudb; +source include/have_binlog_format_row.inc; +source include/master-slave.inc; + +# initialize +connection master; +disable_warnings; +drop table if exists t; +enable_warnings; + +connection slave; +# show variables like 'read_only'; +# show variables like 'tokudb_rpl_%'; + +# insert some rows +connection master; +# select @@binlog_format; +# select @@autocommit; +eval create table t (a bigint not null, b bigint not null, primary key(a)) engine=$engine; +# show create table t; +insert into t values (1,0); +insert into t values (2,0),(3,0); +insert into t values (4,0); + +# wait for the inserts to finish on the slave +connection master; +sync_slave_with_master; +# source include/sync_slave_sql_with_master.inc; + +# diff tables +connection master; +--let $diff_tables= master:test.t, slave:test.t +source include/diff_tables.inc; + +# delete a row +connection master; +update t set b=b+1 where a=2; +update t set b=b+2 where a=1; +update t set b=b+3 where a=4; +update t set b=b+4 where a=3; +update t set b=b+1 where 1<=a and a<=3; +select unix_timestamp() into @tstart; + +# wait for the delete to finish on the slave +connection master; +sync_slave_with_master; +# source include/sync_slave_sql_with_master.inc; +connection master; +select unix_timestamp() into @tend; +select @tend-@tstart <= 5; # assert no delay in the delete time + +connection slave; +select * from t; + +# diff tables +--let $diff_tables= master:test.t, slave:test.t +source include/diff_tables.inc; + +# cleanup +connection master; +drop table if exists t; + +sync_slave_with_master; +# source include/sync_slave_sql_with_master.inc; + +source include/rpl_end.inc; + diff --git a/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_update_pk_uc1_lookup1-slave.opt b/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_update_pk_uc1_lookup1-slave.opt new file mode 100644 index 00000000000..7cd575c52bb --- /dev/null +++ b/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_update_pk_uc1_lookup1-slave.opt @@ -0,0 +1 @@ +--read-only=ON --tokudb-rpl-unique-checks-delay=10000 --tokudb-rpl-unique-checks=ON --tokudb-rpl-lookup-rows-delay=10000 --tokudb-rpl-lookup-rows=ON diff --git a/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_update_pk_uc1_lookup1.test b/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_update_pk_uc1_lookup1.test new file mode 100644 index 00000000000..998987349c7 --- /dev/null +++ b/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_update_pk_uc1_lookup1.test @@ -0,0 +1,70 @@ +# test replicated update rows log events on a table with a primary key. + +source include/have_tokudb.inc; +let $engine=tokudb; +source include/have_binlog_format_row.inc; +source include/master-slave.inc; + +# initialize +connection master; +disable_warnings; +drop table if exists t; +enable_warnings; + +connection slave; +# show variables like 'read_only'; +# show variables like 'tokudb_rpl_%'; + +# insert some rows +connection master; +# select @@binlog_format; +# select @@autocommit; +eval create table t (a bigint not null, b bigint not null, primary key(a)) engine=$engine; +# show create table t; +insert into t values (1,0); +insert into t values (2,0),(3,0); +insert into t values (4,0); + +# wait for the inserts to finish on the slave +connection master; +sync_slave_with_master; +# source include/sync_slave_sql_with_master.inc; + +# diff tables +connection master; +--let $diff_tables= master:test.t, slave:test.t +source include/diff_tables.inc; + +# delete a row +connection master; +update t set b=b+1 where a=2; +update t set b=b+2 where a=1; +update t set b=b+3 where a=4; +update t set b=b+4 where a=3; +update t set b=b+1 where 1<=a and a<=3; +select unix_timestamp() into @tstart; + +# wait for the delete to finish on the slave +connection master; +sync_slave_with_master; +# source include/sync_slave_sql_with_master.inc; +connection master; +select unix_timestamp() into @tend; +select @tend-@tstart <= 5; # assert no delay in the delete time + +connection slave; +select * from t; + +# diff tables +--let $diff_tables= master:test.t, slave:test.t +source include/diff_tables.inc; + +# cleanup +connection master; +drop table if exists t; + +sync_slave_with_master; +# source include/sync_slave_sql_with_master.inc; + +source include/rpl_end.inc; + diff --git a/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_update_unique_uc0_lookup0-slave.opt b/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_update_unique_uc0_lookup0-slave.opt new file mode 100644 index 00000000000..dc139282dc4 --- /dev/null +++ b/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_update_unique_uc0_lookup0-slave.opt @@ -0,0 +1 @@ +--read-only=ON --tokudb-rpl-unique-checks-delay=10000 --tokudb-rpl-unique-checks=OFF --tokudb-rpl-lookup-rows-delay=10000 --tokudb-rpl-lookup-rows=OFF diff --git a/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_update_unique_uc0_lookup0.test b/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_update_unique_uc0_lookup0.test new file mode 100644 index 00000000000..11401ac0ce0 --- /dev/null +++ b/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_update_unique_uc0_lookup0.test @@ -0,0 +1,70 @@ +# test replicated update rows log events on a table with a primary key. + +source include/have_tokudb.inc; +let $engine=tokudb; +source include/have_binlog_format_row.inc; +source include/master-slave.inc; + +# initialize +connection master; +disable_warnings; +drop table if exists t; +enable_warnings; + +connection slave; +# show variables like 'read_only'; +# show variables like 'tokudb_rpl_%'; + +# insert some rows +connection master; +# select @@binlog_format; +# select @@autocommit; +eval create table t (a bigint not null, b bigint not null, c bigint not null, primary key(a), unique key(c)) engine=$engine; +# show create table t; +insert into t values (1,0,-1); +insert into t values (2,0,-2),(3,0,-3); +insert into t values (4,0,-4); + +# wait for the inserts to finish on the slave +connection master; +sync_slave_with_master; +# source include/sync_slave_sql_with_master.inc; + +# diff tables +connection master; +--let $diff_tables= master:test.t, slave:test.t +source include/diff_tables.inc; + +# delete a row +connection master; +update t set b=b+1 where a=2; +update t set b=b+2 where a=1; +update t set b=b+3 where a=4; +update t set b=b+4 where a=3; +update t set b=b+1 where 1<=a and a<=3; +select unix_timestamp() into @tstart; + +# wait for the delete to finish on the slave +connection master; +sync_slave_with_master; +# source include/sync_slave_sql_with_master.inc; +connection master; +select unix_timestamp() into @tend; +select @tend-@tstart <= 5; # assert no delay in the delete time + +connection slave; +select * from t; + +# diff tables +--let $diff_tables= master:test.t, slave:test.t +source include/diff_tables.inc; + +# cleanup +connection master; +drop table if exists t; + +sync_slave_with_master; +# source include/sync_slave_sql_with_master.inc; + +source include/rpl_end.inc; + diff --git a/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_update_unique_uc0_lookup1-slave.opt b/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_update_unique_uc0_lookup1-slave.opt new file mode 100644 index 00000000000..d546dd00669 --- /dev/null +++ b/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_update_unique_uc0_lookup1-slave.opt @@ -0,0 +1 @@ +--read-only=ON --tokudb-rpl-unique-checks-delay=10000 --tokudb-rpl-unique-checks=OFF --tokudb-rpl-lookup-rows-delay=10000 --tokudb-rpl-lookup-rows=ON diff --git a/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_update_unique_uc0_lookup1.test b/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_update_unique_uc0_lookup1.test new file mode 100644 index 00000000000..ea77447bc75 --- /dev/null +++ b/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_update_unique_uc0_lookup1.test @@ -0,0 +1,69 @@ +# test replicated update rows log events on a table with a primary key. + +source include/have_tokudb.inc; +let $engine=tokudb; +source include/have_binlog_format_row.inc; +source include/master-slave.inc; + +# initialize +connection master; +disable_warnings; +drop table if exists t; +enable_warnings; + +connection slave; +# show variables like 'read_only'; +# show variables like 'tokudb_rpl_%'; + +# insert some rows +connection master; +# select @@binlog_format; +# select @@autocommit; +eval create table t (a bigint not null, b bigint not null, c bigint not null, primary key(a), unique key(c)) engine=$engine; +# show create table t; +insert into t values (1,0,-1); +insert into t values (2,0,-2),(3,0,-3); +insert into t values (4,0,-4); + +# wait for the inserts to finish on the slave +connection master; +sync_slave_with_master; +# source include/sync_slave_sql_with_master.inc; + +# diff tables +connection master; +--let $diff_tables= master:test.t, slave:test.t +source include/diff_tables.inc; + +# delete a row +connection master; +update t set b=b+1 where a=2; +update t set b=b+2 where a=1; +update t set b=b+3 where a=4; +update t set b=b+4 where a=3; +update t set b=b+1 where 1<=a and a<=3; +select unix_timestamp() into @tstart; + +# wait for the delete to finish on the slave +connection master; +sync_slave_with_master; +# source include/sync_slave_sql_with_master.inc; +connection master; +select unix_timestamp() into @tend; +select @tend-@tstart <= 5; # assert no delay in the delete time + +connection slave; +select * from t; + +# diff tables +--let $diff_tables= master:test.t, slave:test.t +source include/diff_tables.inc; + +# cleanup +connection master; +drop table if exists t; +sync_slave_with_master; +# source include/sync_slave_sql_with_master.inc; + +source include/rpl_end.inc; + diff --git a/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_write_pk-slave.opt b/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_write_pk-slave.opt new file mode 100644 index 00000000000..9baf0d65ecf --- /dev/null +++ b/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_write_pk-slave.opt @@ -0,0 +1 @@ +--read-only=ON --tokudb-rpl-unique-checks-delay=5000 --tokudb-rpl-unique-checks=OFF diff --git a/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_write_pk.test b/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_write_pk.test new file mode 100644 index 00000000000..c77e4b49605 --- /dev/null +++ b/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_write_pk.test @@ -0,0 +1,53 @@ +# test replicated write rows log events on a table with a primary key. +# the slave is read only with tokudb unique checks disabled. + +source include/have_tokudb.inc; +let $engine=tokudb; +source include/have_binlog_format_row.inc; +source include/master-slave.inc; + +# initialize +connection master; +disable_warnings; +drop table if exists t; +enable_warnings; + +connection slave; +# show variables like 'read_only'; +# show variables like 'tokudb_rpl_unique_checks%'; + +# insert some rows +connection master; +# select @@binlog_format; +# select @@autocommit; +eval create table t (a bigint not null, primary key(a)) engine=$engine; +# show create table t; +select unix_timestamp() into @tstart; +insert into t values (1); +insert into t values (2),(3); +insert into t values (4); + +sync_slave_with_master; +# source include/sync_slave_sql_with_master.inc; + +connection master; +select unix_timestamp()-@tstart <= 10; + +connection slave; +# insert into t values (5); # test read-only +# show create table t; + +# diff tables +connection master; +--let $diff_tables= master:test.t, slave:test.t +source include/diff_tables.inc; + +# cleanup +connection master; +drop table if exists t; + +sync_slave_with_master; +# source include/sync_slave_sql_with_master.inc; + +source include/rpl_end.inc; + diff --git a/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_write_pk_uc1-slave.opt b/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_write_pk_uc1-slave.opt new file mode 100644 index 00000000000..b1df0b6daf0 --- /dev/null +++ b/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_write_pk_uc1-slave.opt @@ -0,0 +1 @@ +--read-only=ON --tokudb-rpl-unique-checks-delay=10000 --tokudb-rpl-unique-checks=ON diff --git a/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_write_pk_uc1.test b/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_write_pk_uc1.test new file mode 100644 index 00000000000..c77e4b49605 --- /dev/null +++ b/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_write_pk_uc1.test @@ -0,0 +1,53 @@ +# test replicated write rows log events on a table with a primary key. +# the slave is read only with tokudb unique checks disabled. + +source include/have_tokudb.inc; +let $engine=tokudb; +source include/have_binlog_format_row.inc; +source include/master-slave.inc; + +# initialize +connection master; +disable_warnings; +drop table if exists t; +enable_warnings; + +connection slave; +# show variables like 'read_only'; +# show variables like 'tokudb_rpl_unique_checks%'; + +# insert some rows +connection master; +# select @@binlog_format; +# select @@autocommit; +eval create table t (a bigint not null, primary key(a)) engine=$engine; +# show create table t; +select unix_timestamp() into @tstart; +insert into t values (1); +insert into t values (2),(3); +insert into t values (4); + +sync_slave_with_master; +# source include/sync_slave_sql_with_master.inc; + +connection master; +select unix_timestamp()-@tstart <= 10; + +connection slave; +# insert into t values (5); # test read-only +# show create table t; + +# diff tables +connection master; +--let $diff_tables= master:test.t, slave:test.t +source include/diff_tables.inc; + +# cleanup +connection master; +drop table if exists t; + +sync_slave_with_master; +# source include/sync_slave_sql_with_master.inc; + +source include/rpl_end.inc; + diff --git a/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_write_unique-slave.opt b/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_write_unique-slave.opt new file mode 100644 index 00000000000..9baf0d65ecf --- /dev/null +++ b/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_write_unique-slave.opt @@ -0,0 +1 @@ +--read-only=ON --tokudb-rpl-unique-checks-delay=5000 --tokudb-rpl-unique-checks=OFF diff --git a/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_write_unique.test b/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_write_unique.test new file mode 100644 index 00000000000..cf6a26b423d --- /dev/null +++ b/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_write_unique.test @@ -0,0 +1,52 @@ +# test replicated write rows log events on a table with a primary key and a unique secondary key. +# the slave is read only with tokudb unique checks disabled. + +source include/have_tokudb.inc; +let $engine=tokudb; +source include/have_binlog_format_row.inc; +source include/master-slave.inc; + +# initialize +connection master; +disable_warnings; +drop table if exists t; +enable_warnings; + +connection slave; +# show variables like 'read_only'; +# show variables like 'tokudb_rpl_unique_checks%'; + +# insert some rows +connection master; +# select @@binlog_format; +# select @@autocommit; +eval create table t (a bigint not null, b bigint not null, primary key(a), unique key(b)) engine=$engine; +# show create table t; +select unix_timestamp() into @tstart; +insert into t values (1,2); +insert into t values (2,3),(3,4); +insert into t values (4,5); + +sync_slave_with_master; +# source include/sync_slave_sql_with_master.inc; + +connection master; +select unix_timestamp()-@tstart <= 10; + +connection slave; +# show create table t; + +# diff tables +connection master; +--let $diff_tables= master:test.t, slave:test.t +source include/diff_tables.inc; + +# cleanup +connection master; +drop table if exists t; + +sync_slave_with_master; +# source include/sync_slave_sql_with_master.inc; + +source include/rpl_end.inc; + diff --git a/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_write_unique_uc1-slave.opt b/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_write_unique_uc1-slave.opt new file mode 100644 index 00000000000..0518efd3da5 --- /dev/null +++ b/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_write_unique_uc1-slave.opt @@ -0,0 +1 @@ +--read-only=ON --tokudb-rpl-unique-checks-delay=5000 --tokudb-rpl-unique-checks=ON diff --git a/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_write_unique_uc1.test b/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_write_unique_uc1.test new file mode 100644 index 00000000000..cf6a26b423d --- /dev/null +++ b/storage/tokudb/mysql-test/rpl/t/rpl_tokudb_write_unique_uc1.test @@ -0,0 +1,52 @@ +# test replicated write rows log events on a table with a primary key and a unique secondary key. +# the slave is read only with tokudb unique checks disabled. + +source include/have_tokudb.inc; +let $engine=tokudb; +source include/have_binlog_format_row.inc; +source include/master-slave.inc; + +# initialize +connection master; +disable_warnings; +drop table if exists t; +enable_warnings; + +connection slave; +# show variables like 'read_only'; +# show variables like 'tokudb_rpl_unique_checks%'; + +# insert some rows +connection master; +# select @@binlog_format; +# select @@autocommit; +eval create table t (a bigint not null, b bigint not null, primary key(a), unique key(b)) engine=$engine; +# show create table t; +select unix_timestamp() into @tstart; +insert into t values (1,2); +insert into t values (2,3),(3,4); +insert into t values (4,5); + +sync_slave_with_master; +# source include/sync_slave_sql_with_master.inc; + +connection master; +select unix_timestamp()-@tstart <= 10; + +connection slave; +# show create table t; + +# diff tables +connection master; +--let $diff_tables= master:test.t, slave:test.t +source include/diff_tables.inc; + +# cleanup +connection master; +drop table if exists t; + +sync_slave_with_master; +# source include/sync_slave_sql_with_master.inc; + +source include/rpl_end.inc; + diff --git a/storage/tokudb/mysql-test/rpl/t/tokudb_innodb_xa_crash.test b/storage/tokudb/mysql-test/rpl/t/tokudb_innodb_xa_crash.test index 97818a597e2..07b117947a0 100644 --- a/storage/tokudb/mysql-test/rpl/t/tokudb_innodb_xa_crash.test +++ b/storage/tokudb/mysql-test/rpl/t/tokudb_innodb_xa_crash.test @@ -1,6 +1,6 @@ ---source include/master-slave.inc --source include/have_tokudb.inc --source include/have_innodb.inc +--source include/master-slave.inc eval CREATE TABLE t1(`a` INT) ENGINE=TokuDB; eval CREATE TABLE t2(`a` INT) ENGINE=InnoDB; diff --git a/storage/tokudb/mysql-test/tokudb/disabled.def b/storage/tokudb/mysql-test/tokudb/disabled.def index 7a36839f262..a396de4d9c3 100644 --- a/storage/tokudb/mysql-test/tokudb/disabled.def +++ b/storage/tokudb/mysql-test/tokudb/disabled.def @@ -25,3 +25,4 @@ fast_upsert_values: No UPSERT in MariaDB 5.5 mvcc-27: No online OPTIMIZE in MariaDB 5.5 mvcc-19: how this could work, if alter needs an exclusive mdl lock? mvcc-20: how this could work, if alter needs an exclusive mdl lock? +cluster_key_part: engine options on partitioned tables diff --git a/storage/tokudb/mysql-test/tokudb/include/have_tokudb.inc b/storage/tokudb/mysql-test/tokudb/include/have_tokudb.inc index e69de29bb2d..12b29a22d2c 100644 --- a/storage/tokudb/mysql-test/tokudb/include/have_tokudb.inc +++ b/storage/tokudb/mysql-test/tokudb/include/have_tokudb.inc @@ -0,0 +1 @@ +let $datadir=`select @@datadir`; diff --git a/storage/tokudb/mysql-test/tokudb/r/bf_create_select.result b/storage/tokudb/mysql-test/tokudb/r/bf_create_select.result new file mode 100644 index 00000000000..adacf1ed6aa --- /dev/null +++ b/storage/tokudb/mysql-test/tokudb/r/bf_create_select.result @@ -0,0 +1,242 @@ +set default_storage_engine='tokudb'; +drop table if exists t,t1,t2; +CREATE TABLE `t` ( +`num` int(10) unsigned auto_increment NOT NULL, +`val` varchar(32) DEFAULT NULL, +PRIMARY KEY (`num`) +); +INSERT INTO t values (null,null); +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +SELECT count(*) FROM t; +count(*) +8388608 +CREATE TABLE `t1` ( +`num` int(10) unsigned NOT NULL, +`val` varchar(32) DEFAULT NULL, +PRIMARY KEY (`num`) +) as select * from t; +SELECT count(*) from t1; +count(*) +8388608 +SELECT count(*) from t1; +count(*) +8388608 +SELECT count(*) from t1; +count(*) +8388608 +SELECT count(*) from t1; +count(*) +8388608 +SELECT count(*) from t1; +count(*) +8388608 +SELECT count(*) from t1; +count(*) +8388608 +SELECT count(*) from t1; +count(*) +8388608 +SELECT count(*) from t1; +count(*) +8388608 +SELECT count(*) from t1; +count(*) +8388608 +SELECT count(*) from t1; +count(*) +8388608 +CREATE TABLE t2 AS SELECT count(*) from t1; +DROP TABLE t2; +CREATE TABLE t2 AS SELECT count(*) from t1; +DROP TABLE t2; +CREATE TABLE t2 AS SELECT count(*) from t1; +DROP TABLE t2; +CREATE TABLE t2 AS SELECT count(*) from t1; +DROP TABLE t2; +CREATE TABLE t2 AS SELECT count(*) from t1; +DROP TABLE t2; +CREATE TABLE t2 AS SELECT count(*) from t1; +DROP TABLE t2; +CREATE TABLE t2 AS SELECT count(*) from t1; +DROP TABLE t2; +CREATE TABLE t2 AS SELECT count(*) from t1; +DROP TABLE t2; +CREATE TABLE t2 AS SELECT count(*) from t1; +DROP TABLE t2; +CREATE TABLE t2 AS SELECT count(*) from t1; +DROP TABLE t2; +1 +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +CREATE TABLE t2 AS SELECT count(*) from t1 where num > 7000000; +DROP TABLE t2; +CREATE TABLE t2 AS SELECT count(*) from t1 where num > 7000000; +DROP TABLE t2; +CREATE TABLE t2 AS SELECT count(*) from t1 where num > 7000000; +DROP TABLE t2; +CREATE TABLE t2 AS SELECT count(*) from t1 where num > 7000000; +DROP TABLE t2; +CREATE TABLE t2 AS SELECT count(*) from t1 where num > 7000000; +DROP TABLE t2; +CREATE TABLE t2 AS SELECT count(*) from t1 where num > 7000000; +DROP TABLE t2; +CREATE TABLE t2 AS SELECT count(*) from t1 where num > 7000000; +DROP TABLE t2; +CREATE TABLE t2 AS SELECT count(*) from t1 where num > 7000000; +DROP TABLE t2; +CREATE TABLE t2 AS SELECT count(*) from t1 where num > 7000000; +DROP TABLE t2; +CREATE TABLE t2 AS SELECT count(*) from t1 where num > 7000000; +DROP TABLE t2; +CREATE TABLE t2 AS SELECT count(*) from t1 where num > 7000000; +DROP TABLE t2; +CREATE TABLE t2 AS SELECT count(*) from t1 where num > 7000000; +DROP TABLE t2; +CREATE TABLE t2 AS SELECT count(*) from t1 where num > 7000000; +DROP TABLE t2; +CREATE TABLE t2 AS SELECT count(*) from t1 where num > 7000000; +DROP TABLE t2; +CREATE TABLE t2 AS SELECT count(*) from t1 where num > 7000000; +DROP TABLE t2; +CREATE TABLE t2 AS SELECT count(*) from t1 where num > 7000000; +DROP TABLE t2; +CREATE TABLE t2 AS SELECT count(*) from t1 where num > 7000000; +DROP TABLE t2; +CREATE TABLE t2 AS SELECT count(*) from t1 where num > 7000000; +DROP TABLE t2; +CREATE TABLE t2 AS SELECT count(*) from t1 where num > 7000000; +DROP TABLE t2; +CREATE TABLE t2 AS SELECT count(*) from t1 where num > 7000000; +DROP TABLE t2; +CREATE TABLE t2 AS SELECT count(*) from t1 where num > 7000000; +DROP TABLE t2; +CREATE TABLE t2 AS SELECT count(*) from t1 where num > 7000000; +DROP TABLE t2; +CREATE TABLE t2 AS SELECT count(*) from t1 where num > 7000000; +DROP TABLE t2; +CREATE TABLE t2 AS SELECT count(*) from t1 where num > 7000000; +DROP TABLE t2; +CREATE TABLE t2 AS SELECT count(*) from t1 where num > 7000000; +DROP TABLE t2; +CREATE TABLE t2 AS SELECT count(*) from t1 where num > 7000000; +DROP TABLE t2; +CREATE TABLE t2 AS SELECT count(*) from t1 where num > 7000000; +DROP TABLE t2; +CREATE TABLE t2 AS SELECT count(*) from t1 where num > 7000000; +DROP TABLE t2; +CREATE TABLE t2 AS SELECT count(*) from t1 where num > 7000000; +DROP TABLE t2; +CREATE TABLE t2 AS SELECT count(*) from t1 where num > 7000000; +DROP TABLE t2; +1 +drop table t,t1; diff --git a/storage/tokudb/mysql-test/tokudb/r/bf_create_select_hash_part.result b/storage/tokudb/mysql-test/tokudb/r/bf_create_select_hash_part.result new file mode 100644 index 00000000000..f8c8e6c596d --- /dev/null +++ b/storage/tokudb/mysql-test/tokudb/r/bf_create_select_hash_part.result @@ -0,0 +1,328 @@ +set default_storage_engine='tokudb'; +drop table if exists t,t1,t2,t3; +CREATE TABLE `t` ( +`num` int(10) unsigned auto_increment NOT NULL, +`val` varchar(32) DEFAULT NULL, +PRIMARY KEY (`num`) +); +INSERT INTO t values (null,null); +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +SELECT count(*) FROM t; +count(*) +8388608 +CREATE TABLE `t1` ( +`num` int(10) unsigned NOT NULL, +`val` varchar(32) DEFAULT NULL, +PRIMARY KEY (`num`) +) as select * from t; +CREATE TABLE `t2` ( +`num` int(10) unsigned NOT NULL, +`val` varchar(32) DEFAULT NULL, +PRIMARY KEY (`num`) +) PARTITION BY HASH (num) +PARTITIONS 8 as select * from t; +CREATE TABLE `t3` (`x` bigint); +SELECT count(*) from t1; +count(*) +8388608 +DROP TABLE t3; +CREATE TABLE `t3` (`x` bigint); +SELECT count(*) from t1; +count(*) +8388608 +DROP TABLE t3; +CREATE TABLE `t3` (`x` bigint); +SELECT count(*) from t1; +count(*) +8388608 +DROP TABLE t3; +CREATE TABLE `t3` (`x` bigint); +SELECT count(*) from t1; +count(*) +8388608 +DROP TABLE t3; +CREATE TABLE `t3` (`x` bigint); +SELECT count(*) from t1; +count(*) +8388608 +DROP TABLE t3; +CREATE TABLE `t3` (`x` bigint); +SELECT count(*) from t1; +count(*) +8388608 +DROP TABLE t3; +CREATE TABLE `t3` (`x` bigint); +SELECT count(*) from t1; +count(*) +8388608 +DROP TABLE t3; +CREATE TABLE `t3` (`x` bigint); +SELECT count(*) from t1; +count(*) +8388608 +DROP TABLE t3; +CREATE TABLE `t3` (`x` bigint); +SELECT count(*) from t1; +count(*) +8388608 +DROP TABLE t3; +CREATE TABLE `t3` (`x` bigint); +SELECT count(*) from t1; +count(*) +8388608 +DROP TABLE t3; +CREATE TABLE t3 AS SELECT count(*) from t2; +DROP TABLE t3; +CREATE TABLE t3 AS SELECT count(*) from t2; +DROP TABLE t3; +CREATE TABLE t3 AS SELECT count(*) from t2; +DROP TABLE t3; +CREATE TABLE t3 AS SELECT count(*) from t2; +DROP TABLE t3; +CREATE TABLE t3 AS SELECT count(*) from t2; +DROP TABLE t3; +CREATE TABLE t3 AS SELECT count(*) from t2; +DROP TABLE t3; +CREATE TABLE t3 AS SELECT count(*) from t2; +DROP TABLE t3; +CREATE TABLE t3 AS SELECT count(*) from t2; +DROP TABLE t3; +CREATE TABLE t3 AS SELECT count(*) from t2; +DROP TABLE t3; +CREATE TABLE t3 AS SELECT count(*) from t2; +DROP TABLE t3; +1 +CREATE TABLE `t3` (`x` bigint); +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +DROP TABLE t3; +CREATE TABLE `t3` (`x` bigint); +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +DROP TABLE t3; +CREATE TABLE `t3` (`x` bigint); +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +DROP TABLE t3; +CREATE TABLE `t3` (`x` bigint); +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +DROP TABLE t3; +CREATE TABLE `t3` (`x` bigint); +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +DROP TABLE t3; +CREATE TABLE `t3` (`x` bigint); +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +DROP TABLE t3; +CREATE TABLE `t3` (`x` bigint); +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +DROP TABLE t3; +CREATE TABLE `t3` (`x` bigint); +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +DROP TABLE t3; +CREATE TABLE `t3` (`x` bigint); +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +DROP TABLE t3; +CREATE TABLE `t3` (`x` bigint); +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +DROP TABLE t3; +CREATE TABLE `t3` (`x` bigint); +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +DROP TABLE t3; +CREATE TABLE `t3` (`x` bigint); +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +DROP TABLE t3; +CREATE TABLE `t3` (`x` bigint); +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +DROP TABLE t3; +CREATE TABLE `t3` (`x` bigint); +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +DROP TABLE t3; +CREATE TABLE `t3` (`x` bigint); +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +DROP TABLE t3; +CREATE TABLE `t3` (`x` bigint); +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +DROP TABLE t3; +CREATE TABLE `t3` (`x` bigint); +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +DROP TABLE t3; +CREATE TABLE `t3` (`x` bigint); +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +DROP TABLE t3; +CREATE TABLE `t3` (`x` bigint); +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +DROP TABLE t3; +CREATE TABLE `t3` (`x` bigint); +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +DROP TABLE t3; +CREATE TABLE `t3` (`x` bigint); +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +DROP TABLE t3; +CREATE TABLE `t3` (`x` bigint); +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +DROP TABLE t3; +CREATE TABLE `t3` (`x` bigint); +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +DROP TABLE t3; +CREATE TABLE `t3` (`x` bigint); +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +DROP TABLE t3; +CREATE TABLE `t3` (`x` bigint); +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +DROP TABLE t3; +CREATE TABLE `t3` (`x` bigint); +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +DROP TABLE t3; +CREATE TABLE `t3` (`x` bigint); +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +DROP TABLE t3; +CREATE TABLE `t3` (`x` bigint); +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +DROP TABLE t3; +CREATE TABLE `t3` (`x` bigint); +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +DROP TABLE t3; +CREATE TABLE `t3` (`x` bigint); +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +DROP TABLE t3; +CREATE TABLE t3 AS SELECT count(*) from t2 where num > 7000000; +DROP TABLE t3; +CREATE TABLE t3 AS SELECT count(*) from t2 where num > 7000000; +DROP TABLE t3; +CREATE TABLE t3 AS SELECT count(*) from t2 where num > 7000000; +DROP TABLE t3; +CREATE TABLE t3 AS SELECT count(*) from t2 where num > 7000000; +DROP TABLE t3; +CREATE TABLE t3 AS SELECT count(*) from t2 where num > 7000000; +DROP TABLE t3; +CREATE TABLE t3 AS SELECT count(*) from t2 where num > 7000000; +DROP TABLE t3; +CREATE TABLE t3 AS SELECT count(*) from t2 where num > 7000000; +DROP TABLE t3; +CREATE TABLE t3 AS SELECT count(*) from t2 where num > 7000000; +DROP TABLE t3; +CREATE TABLE t3 AS SELECT count(*) from t2 where num > 7000000; +DROP TABLE t3; +CREATE TABLE t3 AS SELECT count(*) from t2 where num > 7000000; +DROP TABLE t3; +CREATE TABLE t3 AS SELECT count(*) from t2 where num > 7000000; +DROP TABLE t3; +CREATE TABLE t3 AS SELECT count(*) from t2 where num > 7000000; +DROP TABLE t3; +CREATE TABLE t3 AS SELECT count(*) from t2 where num > 7000000; +DROP TABLE t3; +CREATE TABLE t3 AS SELECT count(*) from t2 where num > 7000000; +DROP TABLE t3; +CREATE TABLE t3 AS SELECT count(*) from t2 where num > 7000000; +DROP TABLE t3; +CREATE TABLE t3 AS SELECT count(*) from t2 where num > 7000000; +DROP TABLE t3; +CREATE TABLE t3 AS SELECT count(*) from t2 where num > 7000000; +DROP TABLE t3; +CREATE TABLE t3 AS SELECT count(*) from t2 where num > 7000000; +DROP TABLE t3; +CREATE TABLE t3 AS SELECT count(*) from t2 where num > 7000000; +DROP TABLE t3; +CREATE TABLE t3 AS SELECT count(*) from t2 where num > 7000000; +DROP TABLE t3; +CREATE TABLE t3 AS SELECT count(*) from t2 where num > 7000000; +DROP TABLE t3; +CREATE TABLE t3 AS SELECT count(*) from t2 where num > 7000000; +DROP TABLE t3; +CREATE TABLE t3 AS SELECT count(*) from t2 where num > 7000000; +DROP TABLE t3; +CREATE TABLE t3 AS SELECT count(*) from t2 where num > 7000000; +DROP TABLE t3; +CREATE TABLE t3 AS SELECT count(*) from t2 where num > 7000000; +DROP TABLE t3; +CREATE TABLE t3 AS SELECT count(*) from t2 where num > 7000000; +DROP TABLE t3; +CREATE TABLE t3 AS SELECT count(*) from t2 where num > 7000000; +DROP TABLE t3; +CREATE TABLE t3 AS SELECT count(*) from t2 where num > 7000000; +DROP TABLE t3; +CREATE TABLE t3 AS SELECT count(*) from t2 where num > 7000000; +DROP TABLE t3; +CREATE TABLE t3 AS SELECT count(*) from t2 where num > 7000000; +DROP TABLE t3; +1 +drop table t,t1,t2; diff --git a/storage/tokudb/mysql-test/tokudb/r/bf_create_select_range_part.result b/storage/tokudb/mysql-test/tokudb/r/bf_create_select_range_part.result new file mode 100644 index 00000000000..0e055e76d3f --- /dev/null +++ b/storage/tokudb/mysql-test/tokudb/r/bf_create_select_range_part.result @@ -0,0 +1,335 @@ +set default_storage_engine='tokudb'; +drop table if exists t,t1,t2; +CREATE TABLE `t` ( +`num` int(10) unsigned auto_increment NOT NULL, +`val` varchar(32) DEFAULT NULL, +PRIMARY KEY (`num`) +); +INSERT INTO t values (null,null); +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +SELECT count(*) FROM t; +count(*) +8388608 +CREATE TABLE `t1` ( +`num` int(10) unsigned NOT NULL, +`val` varchar(32) DEFAULT NULL, +PRIMARY KEY (`num`) +) as select * from t; +CREATE TABLE `t2` ( +`num` int(10) unsigned NOT NULL, +`val` varchar(32) DEFAULT NULL, +PRIMARY KEY (`num`) +) PARTITION BY RANGE (num) +(PARTITION p0 VALUES LESS THAN (1000000), +PARTITION p1 VALUES LESS THAN (2000000), +PARTITION p2 VALUES LESS THAN (3000000), +PARTITION p3 VALUES LESS THAN (4000000), +PARTITION p4 VALUES LESS THAN (5000000), +PARTITION p5 VALUES LESS THAN (6000000), +PARTITION p6 VALUES LESS THAN (7000000), +PARTITION p7 VALUES LESS THAN MAXVALUE) as select * from t; +CREATE TABLE `t3` (`x` bigint); +SELECT count(*) from t1; +count(*) +8388608 +DROP TABLE t3; +CREATE TABLE `t3` (`x` bigint); +SELECT count(*) from t1; +count(*) +8388608 +DROP TABLE t3; +CREATE TABLE `t3` (`x` bigint); +SELECT count(*) from t1; +count(*) +8388608 +DROP TABLE t3; +CREATE TABLE `t3` (`x` bigint); +SELECT count(*) from t1; +count(*) +8388608 +DROP TABLE t3; +CREATE TABLE `t3` (`x` bigint); +SELECT count(*) from t1; +count(*) +8388608 +DROP TABLE t3; +CREATE TABLE `t3` (`x` bigint); +SELECT count(*) from t1; +count(*) +8388608 +DROP TABLE t3; +CREATE TABLE `t3` (`x` bigint); +SELECT count(*) from t1; +count(*) +8388608 +DROP TABLE t3; +CREATE TABLE `t3` (`x` bigint); +SELECT count(*) from t1; +count(*) +8388608 +DROP TABLE t3; +CREATE TABLE `t3` (`x` bigint); +SELECT count(*) from t1; +count(*) +8388608 +DROP TABLE t3; +CREATE TABLE `t3` (`x` bigint); +SELECT count(*) from t1; +count(*) +8388608 +DROP TABLE t3; +CREATE TABLE t4 AS SELECT count(*) from t2; +DROP TABLE t4; +CREATE TABLE t4 AS SELECT count(*) from t2; +DROP TABLE t4; +CREATE TABLE t4 AS SELECT count(*) from t2; +DROP TABLE t4; +CREATE TABLE t4 AS SELECT count(*) from t2; +DROP TABLE t4; +CREATE TABLE t4 AS SELECT count(*) from t2; +DROP TABLE t4; +CREATE TABLE t4 AS SELECT count(*) from t2; +DROP TABLE t4; +CREATE TABLE t4 AS SELECT count(*) from t2; +DROP TABLE t4; +CREATE TABLE t4 AS SELECT count(*) from t2; +DROP TABLE t4; +CREATE TABLE t4 AS SELECT count(*) from t2; +DROP TABLE t4; +CREATE TABLE t4 AS SELECT count(*) from t2; +DROP TABLE t4; +1 +CREATE TABLE `t3` (`x` bigint); +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +DROP TABLE t3; +CREATE TABLE `t3` (`x` bigint); +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +DROP TABLE t3; +CREATE TABLE `t3` (`x` bigint); +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +DROP TABLE t3; +CREATE TABLE `t3` (`x` bigint); +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +DROP TABLE t3; +CREATE TABLE `t3` (`x` bigint); +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +DROP TABLE t3; +CREATE TABLE `t3` (`x` bigint); +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +DROP TABLE t3; +CREATE TABLE `t3` (`x` bigint); +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +DROP TABLE t3; +CREATE TABLE `t3` (`x` bigint); +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +DROP TABLE t3; +CREATE TABLE `t3` (`x` bigint); +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +DROP TABLE t3; +CREATE TABLE `t3` (`x` bigint); +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +DROP TABLE t3; +CREATE TABLE `t3` (`x` bigint); +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +DROP TABLE t3; +CREATE TABLE `t3` (`x` bigint); +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +DROP TABLE t3; +CREATE TABLE `t3` (`x` bigint); +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +DROP TABLE t3; +CREATE TABLE `t3` (`x` bigint); +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +DROP TABLE t3; +CREATE TABLE `t3` (`x` bigint); +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +DROP TABLE t3; +CREATE TABLE `t3` (`x` bigint); +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +DROP TABLE t3; +CREATE TABLE `t3` (`x` bigint); +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +DROP TABLE t3; +CREATE TABLE `t3` (`x` bigint); +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +DROP TABLE t3; +CREATE TABLE `t3` (`x` bigint); +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +DROP TABLE t3; +CREATE TABLE `t3` (`x` bigint); +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +DROP TABLE t3; +CREATE TABLE `t3` (`x` bigint); +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +DROP TABLE t3; +CREATE TABLE `t3` (`x` bigint); +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +DROP TABLE t3; +CREATE TABLE `t3` (`x` bigint); +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +DROP TABLE t3; +CREATE TABLE `t3` (`x` bigint); +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +DROP TABLE t3; +CREATE TABLE `t3` (`x` bigint); +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +DROP TABLE t3; +CREATE TABLE `t3` (`x` bigint); +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +DROP TABLE t3; +CREATE TABLE `t3` (`x` bigint); +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +DROP TABLE t3; +CREATE TABLE `t3` (`x` bigint); +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +DROP TABLE t3; +CREATE TABLE `t3` (`x` bigint); +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +DROP TABLE t3; +CREATE TABLE `t3` (`x` bigint); +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +DROP TABLE t3; +CREATE TABLE t4 AS SELECT count(*) from t2 where num > 7000000; +DROP TABLE t4; +CREATE TABLE t4 AS SELECT count(*) from t2 where num > 7000000; +DROP TABLE t4; +CREATE TABLE t4 AS SELECT count(*) from t2 where num > 7000000; +DROP TABLE t4; +CREATE TABLE t4 AS SELECT count(*) from t2 where num > 7000000; +DROP TABLE t4; +CREATE TABLE t4 AS SELECT count(*) from t2 where num > 7000000; +DROP TABLE t4; +CREATE TABLE t4 AS SELECT count(*) from t2 where num > 7000000; +DROP TABLE t4; +CREATE TABLE t4 AS SELECT count(*) from t2 where num > 7000000; +DROP TABLE t4; +CREATE TABLE t4 AS SELECT count(*) from t2 where num > 7000000; +DROP TABLE t4; +CREATE TABLE t4 AS SELECT count(*) from t2 where num > 7000000; +DROP TABLE t4; +CREATE TABLE t4 AS SELECT count(*) from t2 where num > 7000000; +DROP TABLE t4; +CREATE TABLE t4 AS SELECT count(*) from t2 where num > 7000000; +DROP TABLE t4; +CREATE TABLE t4 AS SELECT count(*) from t2 where num > 7000000; +DROP TABLE t4; +CREATE TABLE t4 AS SELECT count(*) from t2 where num > 7000000; +DROP TABLE t4; +CREATE TABLE t4 AS SELECT count(*) from t2 where num > 7000000; +DROP TABLE t4; +CREATE TABLE t4 AS SELECT count(*) from t2 where num > 7000000; +DROP TABLE t4; +CREATE TABLE t4 AS SELECT count(*) from t2 where num > 7000000; +DROP TABLE t4; +CREATE TABLE t4 AS SELECT count(*) from t2 where num > 7000000; +DROP TABLE t4; +CREATE TABLE t4 AS SELECT count(*) from t2 where num > 7000000; +DROP TABLE t4; +CREATE TABLE t4 AS SELECT count(*) from t2 where num > 7000000; +DROP TABLE t4; +CREATE TABLE t4 AS SELECT count(*) from t2 where num > 7000000; +DROP TABLE t4; +CREATE TABLE t4 AS SELECT count(*) from t2 where num > 7000000; +DROP TABLE t4; +CREATE TABLE t4 AS SELECT count(*) from t2 where num > 7000000; +DROP TABLE t4; +CREATE TABLE t4 AS SELECT count(*) from t2 where num > 7000000; +DROP TABLE t4; +CREATE TABLE t4 AS SELECT count(*) from t2 where num > 7000000; +DROP TABLE t4; +CREATE TABLE t4 AS SELECT count(*) from t2 where num > 7000000; +DROP TABLE t4; +CREATE TABLE t4 AS SELECT count(*) from t2 where num > 7000000; +DROP TABLE t4; +CREATE TABLE t4 AS SELECT count(*) from t2 where num > 7000000; +DROP TABLE t4; +CREATE TABLE t4 AS SELECT count(*) from t2 where num > 7000000; +DROP TABLE t4; +CREATE TABLE t4 AS SELECT count(*) from t2 where num > 7000000; +DROP TABLE t4; +CREATE TABLE t4 AS SELECT count(*) from t2 where num > 7000000; +DROP TABLE t4; +1 +drop table t,t1,t2; diff --git a/storage/tokudb/mysql-test/tokudb/r/bf_create_temp_select.result b/storage/tokudb/mysql-test/tokudb/r/bf_create_temp_select.result new file mode 100644 index 00000000000..6eddfaa9e82 --- /dev/null +++ b/storage/tokudb/mysql-test/tokudb/r/bf_create_temp_select.result @@ -0,0 +1,242 @@ +set default_storage_engine='tokudb'; +drop table if exists t,t1,t2; +CREATE TABLE `t` ( +`num` int(10) unsigned auto_increment NOT NULL, +`val` varchar(32) DEFAULT NULL, +PRIMARY KEY (`num`) +); +INSERT INTO t values (null,null); +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +SELECT count(*) FROM t; +count(*) +8388608 +CREATE TABLE `t1` ( +`num` int(10) unsigned NOT NULL, +`val` varchar(32) DEFAULT NULL, +PRIMARY KEY (`num`) +) as select * from t; +SELECT count(*) from t1; +count(*) +8388608 +SELECT count(*) from t1; +count(*) +8388608 +SELECT count(*) from t1; +count(*) +8388608 +SELECT count(*) from t1; +count(*) +8388608 +SELECT count(*) from t1; +count(*) +8388608 +SELECT count(*) from t1; +count(*) +8388608 +SELECT count(*) from t1; +count(*) +8388608 +SELECT count(*) from t1; +count(*) +8388608 +SELECT count(*) from t1; +count(*) +8388608 +SELECT count(*) from t1; +count(*) +8388608 +CREATE TEMPORARY TABLE t2 AS SELECT count(*) from t1; +DROP TEMPORARY TABLE t2; +CREATE TEMPORARY TABLE t2 AS SELECT count(*) from t1; +DROP TEMPORARY TABLE t2; +CREATE TEMPORARY TABLE t2 AS SELECT count(*) from t1; +DROP TEMPORARY TABLE t2; +CREATE TEMPORARY TABLE t2 AS SELECT count(*) from t1; +DROP TEMPORARY TABLE t2; +CREATE TEMPORARY TABLE t2 AS SELECT count(*) from t1; +DROP TEMPORARY TABLE t2; +CREATE TEMPORARY TABLE t2 AS SELECT count(*) from t1; +DROP TEMPORARY TABLE t2; +CREATE TEMPORARY TABLE t2 AS SELECT count(*) from t1; +DROP TEMPORARY TABLE t2; +CREATE TEMPORARY TABLE t2 AS SELECT count(*) from t1; +DROP TEMPORARY TABLE t2; +CREATE TEMPORARY TABLE t2 AS SELECT count(*) from t1; +DROP TEMPORARY TABLE t2; +CREATE TEMPORARY TABLE t2 AS SELECT count(*) from t1; +DROP TEMPORARY TABLE t2; +1 +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +CREATE TEMPORARY TABLE t2 AS SELECT count(*) from t1 where num > 7000000; +DROP TEMPORARY TABLE t2; +CREATE TEMPORARY TABLE t2 AS SELECT count(*) from t1 where num > 7000000; +DROP TEMPORARY TABLE t2; +CREATE TEMPORARY TABLE t2 AS SELECT count(*) from t1 where num > 7000000; +DROP TEMPORARY TABLE t2; +CREATE TEMPORARY TABLE t2 AS SELECT count(*) from t1 where num > 7000000; +DROP TEMPORARY TABLE t2; +CREATE TEMPORARY TABLE t2 AS SELECT count(*) from t1 where num > 7000000; +DROP TEMPORARY TABLE t2; +CREATE TEMPORARY TABLE t2 AS SELECT count(*) from t1 where num > 7000000; +DROP TEMPORARY TABLE t2; +CREATE TEMPORARY TABLE t2 AS SELECT count(*) from t1 where num > 7000000; +DROP TEMPORARY TABLE t2; +CREATE TEMPORARY TABLE t2 AS SELECT count(*) from t1 where num > 7000000; +DROP TEMPORARY TABLE t2; +CREATE TEMPORARY TABLE t2 AS SELECT count(*) from t1 where num > 7000000; +DROP TEMPORARY TABLE t2; +CREATE TEMPORARY TABLE t2 AS SELECT count(*) from t1 where num > 7000000; +DROP TEMPORARY TABLE t2; +CREATE TEMPORARY TABLE t2 AS SELECT count(*) from t1 where num > 7000000; +DROP TEMPORARY TABLE t2; +CREATE TEMPORARY TABLE t2 AS SELECT count(*) from t1 where num > 7000000; +DROP TEMPORARY TABLE t2; +CREATE TEMPORARY TABLE t2 AS SELECT count(*) from t1 where num > 7000000; +DROP TEMPORARY TABLE t2; +CREATE TEMPORARY TABLE t2 AS SELECT count(*) from t1 where num > 7000000; +DROP TEMPORARY TABLE t2; +CREATE TEMPORARY TABLE t2 AS SELECT count(*) from t1 where num > 7000000; +DROP TEMPORARY TABLE t2; +CREATE TEMPORARY TABLE t2 AS SELECT count(*) from t1 where num > 7000000; +DROP TEMPORARY TABLE t2; +CREATE TEMPORARY TABLE t2 AS SELECT count(*) from t1 where num > 7000000; +DROP TEMPORARY TABLE t2; +CREATE TEMPORARY TABLE t2 AS SELECT count(*) from t1 where num > 7000000; +DROP TEMPORARY TABLE t2; +CREATE TEMPORARY TABLE t2 AS SELECT count(*) from t1 where num > 7000000; +DROP TEMPORARY TABLE t2; +CREATE TEMPORARY TABLE t2 AS SELECT count(*) from t1 where num > 7000000; +DROP TEMPORARY TABLE t2; +CREATE TEMPORARY TABLE t2 AS SELECT count(*) from t1 where num > 7000000; +DROP TEMPORARY TABLE t2; +CREATE TEMPORARY TABLE t2 AS SELECT count(*) from t1 where num > 7000000; +DROP TEMPORARY TABLE t2; +CREATE TEMPORARY TABLE t2 AS SELECT count(*) from t1 where num > 7000000; +DROP TEMPORARY TABLE t2; +CREATE TEMPORARY TABLE t2 AS SELECT count(*) from t1 where num > 7000000; +DROP TEMPORARY TABLE t2; +CREATE TEMPORARY TABLE t2 AS SELECT count(*) from t1 where num > 7000000; +DROP TEMPORARY TABLE t2; +CREATE TEMPORARY TABLE t2 AS SELECT count(*) from t1 where num > 7000000; +DROP TEMPORARY TABLE t2; +CREATE TEMPORARY TABLE t2 AS SELECT count(*) from t1 where num > 7000000; +DROP TEMPORARY TABLE t2; +CREATE TEMPORARY TABLE t2 AS SELECT count(*) from t1 where num > 7000000; +DROP TEMPORARY TABLE t2; +CREATE TEMPORARY TABLE t2 AS SELECT count(*) from t1 where num > 7000000; +DROP TEMPORARY TABLE t2; +CREATE TEMPORARY TABLE t2 AS SELECT count(*) from t1 where num > 7000000; +DROP TEMPORARY TABLE t2; +1 +drop table t,t1; diff --git a/storage/tokudb/mysql-test/tokudb/r/bf_delete.result b/storage/tokudb/mysql-test/tokudb/r/bf_delete.result new file mode 100644 index 00000000000..e83b150c543 --- /dev/null +++ b/storage/tokudb/mysql-test/tokudb/r/bf_delete.result @@ -0,0 +1,54 @@ +set default_storage_engine='tokudb'; +drop table if exists t; +CREATE TABLE `t` (id bigint not null auto_increment primary key, val bigint not null default 0); +INSERT INTO t (id) values (null); +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +SELECT count(*) FROM t; +count(*) +8388608 +set tokudb_bulk_fetch = ON; +delete from t where val > 0; +delete from t where val > 0; +delete from t where val > 0; +delete from t where val > 0; +delete from t where val > 0; +delete from t where val > 0; +delete from t where val > 0; +delete from t where val > 0; +delete from t where val > 0; +delete from t where val > 0; +set tokudb_bulk_fetch = OFF; +delete from t where val > 0; +delete from t where val > 0; +delete from t where val > 0; +delete from t where val > 0; +delete from t where val > 0; +delete from t where val > 0; +delete from t where val > 0; +delete from t where val > 0; +delete from t where val > 0; +delete from t where val > 0; +1 +drop table t; diff --git a/storage/tokudb/mysql-test/tokudb/r/bf_delete_trigger.result b/storage/tokudb/mysql-test/tokudb/r/bf_delete_trigger.result new file mode 100644 index 00000000000..78bd8d2fe14 --- /dev/null +++ b/storage/tokudb/mysql-test/tokudb/r/bf_delete_trigger.result @@ -0,0 +1,54 @@ +set default_storage_engine='tokudb'; +drop table if exists t; +create table t (id bigint not null primary key, x bigint not null); +insert into t values (1,0),(2,0),(3,0),(4,0); +create trigger t_delete before delete on t for each row insert into t values (1000000,0); +begin; +delete from t where x=0; +ERROR HY000: Can't update table 't' in stored function/trigger because it is already used by statement which invoked this stored function/trigger. +rollback; +drop trigger t_delete; +create trigger t_delete after delete on t for each row insert into t values (1000000,0); +begin; +delete from t where x=0; +ERROR HY000: Can't update table 't' in stored function/trigger because it is already used by statement which invoked this stored function/trigger. +rollback; +drop trigger t_delete; +create trigger t_delete before delete on t for each row delete from t where id=1000000; +begin; +delete from t where x=0; +ERROR HY000: Can't update table 't' in stored function/trigger because it is already used by statement which invoked this stored function/trigger. +rollback; +drop trigger t_delete; +create trigger t_delete after delete on t for each row delete from t where id=1000000; +begin; +delete from t where x=0; +ERROR HY000: Can't update table 't' in stored function/trigger because it is already used by statement which invoked this stored function/trigger. +rollback; +drop trigger t_delete; +create trigger t_delete before delete on t for each row update t set x=x+1 where id=1000000; +begin; +delete from t where x=0; +ERROR HY000: Can't update table 't' in stored function/trigger because it is already used by statement which invoked this stored function/trigger. +rollback; +drop trigger t_delete; +create trigger t_delete after delete on t for each row update t set x=x+1 where id=10000000; +begin; +delete from t where x=0; +ERROR HY000: Can't update table 't' in stored function/trigger because it is already used by statement which invoked this stored function/trigger. +rollback; +drop trigger t_delete; +create table count (count bigint not null); +create trigger t_delete before delete on t for each row insert into count select count(*) from t; +begin; +delete from t where x=0; +select * from count; +count +4 +3 +2 +1 +rollback; +drop trigger t_delete; +drop table count; +drop table t; diff --git a/storage/tokudb/mysql-test/tokudb/r/bf_insert_select.result b/storage/tokudb/mysql-test/tokudb/r/bf_insert_select.result new file mode 100644 index 00000000000..ba7d0f63cd6 --- /dev/null +++ b/storage/tokudb/mysql-test/tokudb/r/bf_insert_select.result @@ -0,0 +1,205 @@ +set default_storage_engine='tokudb'; +drop table if exists t,t1,t2; +CREATE TABLE `t` ( +`num` int(10) unsigned auto_increment NOT NULL, +`val` varchar(32) DEFAULT NULL, +PRIMARY KEY (`num`) +); +INSERT INTO t values (null,null); +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +SELECT count(*) FROM t; +count(*) +8388608 +CREATE TABLE `t1` ( +`num` int(10) unsigned NOT NULL, +`val` varchar(32) DEFAULT NULL, +PRIMARY KEY (`num`) +) as select * from t; +CREATE TABLE `t2` ( +`count` bigint(20) NOT NULL +) ENGINE=TokuDB DEFAULT CHARSET=latin1; +SELECT count(*) from t1; +count(*) +8388608 +SELECT count(*) from t1; +count(*) +8388608 +SELECT count(*) from t1; +count(*) +8388608 +SELECT count(*) from t1; +count(*) +8388608 +SELECT count(*) from t1; +count(*) +8388608 +SELECT count(*) from t1; +count(*) +8388608 +SELECT count(*) from t1; +count(*) +8388608 +SELECT count(*) from t1; +count(*) +8388608 +SELECT count(*) from t1; +count(*) +8388608 +SELECT count(*) from t1; +count(*) +8388608 +INSERT into t2 SELECT count(*) from t1; +INSERT into t2 SELECT count(*) from t1; +INSERT into t2 SELECT count(*) from t1; +INSERT into t2 SELECT count(*) from t1; +INSERT into t2 SELECT count(*) from t1; +INSERT into t2 SELECT count(*) from t1; +INSERT into t2 SELECT count(*) from t1; +INSERT into t2 SELECT count(*) from t1; +INSERT into t2 SELECT count(*) from t1; +INSERT into t2 SELECT count(*) from t1; +1 +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +INSERT into t2 SELECT count(*) from t1 where num > 7000000; +INSERT into t2 SELECT count(*) from t1 where num > 7000000; +INSERT into t2 SELECT count(*) from t1 where num > 7000000; +INSERT into t2 SELECT count(*) from t1 where num > 7000000; +INSERT into t2 SELECT count(*) from t1 where num > 7000000; +INSERT into t2 SELECT count(*) from t1 where num > 7000000; +INSERT into t2 SELECT count(*) from t1 where num > 7000000; +INSERT into t2 SELECT count(*) from t1 where num > 7000000; +INSERT into t2 SELECT count(*) from t1 where num > 7000000; +INSERT into t2 SELECT count(*) from t1 where num > 7000000; +INSERT into t2 SELECT count(*) from t1 where num > 7000000; +INSERT into t2 SELECT count(*) from t1 where num > 7000000; +INSERT into t2 SELECT count(*) from t1 where num > 7000000; +INSERT into t2 SELECT count(*) from t1 where num > 7000000; +INSERT into t2 SELECT count(*) from t1 where num > 7000000; +INSERT into t2 SELECT count(*) from t1 where num > 7000000; +INSERT into t2 SELECT count(*) from t1 where num > 7000000; +INSERT into t2 SELECT count(*) from t1 where num > 7000000; +INSERT into t2 SELECT count(*) from t1 where num > 7000000; +INSERT into t2 SELECT count(*) from t1 where num > 7000000; +INSERT into t2 SELECT count(*) from t1 where num > 7000000; +INSERT into t2 SELECT count(*) from t1 where num > 7000000; +INSERT into t2 SELECT count(*) from t1 where num > 7000000; +INSERT into t2 SELECT count(*) from t1 where num > 7000000; +INSERT into t2 SELECT count(*) from t1 where num > 7000000; +INSERT into t2 SELECT count(*) from t1 where num > 7000000; +INSERT into t2 SELECT count(*) from t1 where num > 7000000; +INSERT into t2 SELECT count(*) from t1 where num > 7000000; +INSERT into t2 SELECT count(*) from t1 where num > 7000000; +INSERT into t2 SELECT count(*) from t1 where num > 7000000; +1 +drop table t,t1,t2; diff --git a/storage/tokudb/mysql-test/tokudb/r/bf_insert_select_dup_key.result b/storage/tokudb/mysql-test/tokudb/r/bf_insert_select_dup_key.result new file mode 100644 index 00000000000..22e2846d181 --- /dev/null +++ b/storage/tokudb/mysql-test/tokudb/r/bf_insert_select_dup_key.result @@ -0,0 +1,207 @@ +set default_storage_engine='tokudb'; +drop table if exists t,t1,t2; +CREATE TABLE `t` ( +`num` int(10) unsigned auto_increment NOT NULL, +`val` varchar(32) DEFAULT NULL, +PRIMARY KEY (`num`) +); +INSERT INTO t values (null,null); +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +SELECT count(*) FROM t; +count(*) +8388608 +CREATE TABLE `t1` ( +`num` int(10) unsigned NOT NULL, +`val` varchar(32) DEFAULT NULL, +PRIMARY KEY (`num`) +) as select * from t; +CREATE TABLE `t2` ( +`num` int(10) unsigned auto_increment NOT NULL, +`count` bigint(20) NOT NULL, +UNIQUE (num) +) ENGINE=TokuDB DEFAULT CHARSET=latin1; +SELECT count(*) from t1; +count(*) +8388608 +SELECT count(*) from t1; +count(*) +8388608 +SELECT count(*) from t1; +count(*) +8388608 +SELECT count(*) from t1; +count(*) +8388608 +SELECT count(*) from t1; +count(*) +8388608 +SELECT count(*) from t1; +count(*) +8388608 +SELECT count(*) from t1; +count(*) +8388608 +SELECT count(*) from t1; +count(*) +8388608 +SELECT count(*) from t1; +count(*) +8388608 +SELECT count(*) from t1; +count(*) +8388608 +INSERT into t2 (num,count) SELECT NULL,count(*) from t1 on DUPLICATE KEY UPDATE count=count+1; +INSERT into t2 (num,count) SELECT NULL,count(*) from t1 on DUPLICATE KEY UPDATE count=count+1; +INSERT into t2 (num,count) SELECT NULL,count(*) from t1 on DUPLICATE KEY UPDATE count=count+1; +INSERT into t2 (num,count) SELECT NULL,count(*) from t1 on DUPLICATE KEY UPDATE count=count+1; +INSERT into t2 (num,count) SELECT NULL,count(*) from t1 on DUPLICATE KEY UPDATE count=count+1; +INSERT into t2 (num,count) SELECT NULL,count(*) from t1 on DUPLICATE KEY UPDATE count=count+1; +INSERT into t2 (num,count) SELECT NULL,count(*) from t1 on DUPLICATE KEY UPDATE count=count+1; +INSERT into t2 (num,count) SELECT NULL,count(*) from t1 on DUPLICATE KEY UPDATE count=count+1; +INSERT into t2 (num,count) SELECT NULL,count(*) from t1 on DUPLICATE KEY UPDATE count=count+1; +INSERT into t2 (num,count) SELECT NULL,count(*) from t1 on DUPLICATE KEY UPDATE count=count+1; +1 +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +INSERT into t2 (num,count) SELECT NULL,count(*) from t1 where num > 7000000 on DUPLICATE KEY UPDATE count=count+1; +INSERT into t2 (num,count) SELECT NULL,count(*) from t1 where num > 7000000 on DUPLICATE KEY UPDATE count=count+1; +INSERT into t2 (num,count) SELECT NULL,count(*) from t1 where num > 7000000 on DUPLICATE KEY UPDATE count=count+1; +INSERT into t2 (num,count) SELECT NULL,count(*) from t1 where num > 7000000 on DUPLICATE KEY UPDATE count=count+1; +INSERT into t2 (num,count) SELECT NULL,count(*) from t1 where num > 7000000 on DUPLICATE KEY UPDATE count=count+1; +INSERT into t2 (num,count) SELECT NULL,count(*) from t1 where num > 7000000 on DUPLICATE KEY UPDATE count=count+1; +INSERT into t2 (num,count) SELECT NULL,count(*) from t1 where num > 7000000 on DUPLICATE KEY UPDATE count=count+1; +INSERT into t2 (num,count) SELECT NULL,count(*) from t1 where num > 7000000 on DUPLICATE KEY UPDATE count=count+1; +INSERT into t2 (num,count) SELECT NULL,count(*) from t1 where num > 7000000 on DUPLICATE KEY UPDATE count=count+1; +INSERT into t2 (num,count) SELECT NULL,count(*) from t1 where num > 7000000 on DUPLICATE KEY UPDATE count=count+1; +INSERT into t2 (num,count) SELECT NULL,count(*) from t1 where num > 7000000 on DUPLICATE KEY UPDATE count=count+1; +INSERT into t2 (num,count) SELECT NULL,count(*) from t1 where num > 7000000 on DUPLICATE KEY UPDATE count=count+1; +INSERT into t2 (num,count) SELECT NULL,count(*) from t1 where num > 7000000 on DUPLICATE KEY UPDATE count=count+1; +INSERT into t2 (num,count) SELECT NULL,count(*) from t1 where num > 7000000 on DUPLICATE KEY UPDATE count=count+1; +INSERT into t2 (num,count) SELECT NULL,count(*) from t1 where num > 7000000 on DUPLICATE KEY UPDATE count=count+1; +INSERT into t2 (num,count) SELECT NULL,count(*) from t1 where num > 7000000 on DUPLICATE KEY UPDATE count=count+1; +INSERT into t2 (num,count) SELECT NULL,count(*) from t1 where num > 7000000 on DUPLICATE KEY UPDATE count=count+1; +INSERT into t2 (num,count) SELECT NULL,count(*) from t1 where num > 7000000 on DUPLICATE KEY UPDATE count=count+1; +INSERT into t2 (num,count) SELECT NULL,count(*) from t1 where num > 7000000 on DUPLICATE KEY UPDATE count=count+1; +INSERT into t2 (num,count) SELECT NULL,count(*) from t1 where num > 7000000 on DUPLICATE KEY UPDATE count=count+1; +INSERT into t2 (num,count) SELECT NULL,count(*) from t1 where num > 7000000 on DUPLICATE KEY UPDATE count=count+1; +INSERT into t2 (num,count) SELECT NULL,count(*) from t1 where num > 7000000 on DUPLICATE KEY UPDATE count=count+1; +INSERT into t2 (num,count) SELECT NULL,count(*) from t1 where num > 7000000 on DUPLICATE KEY UPDATE count=count+1; +INSERT into t2 (num,count) SELECT NULL,count(*) from t1 where num > 7000000 on DUPLICATE KEY UPDATE count=count+1; +INSERT into t2 (num,count) SELECT NULL,count(*) from t1 where num > 7000000 on DUPLICATE KEY UPDATE count=count+1; +INSERT into t2 (num,count) SELECT NULL,count(*) from t1 where num > 7000000 on DUPLICATE KEY UPDATE count=count+1; +INSERT into t2 (num,count) SELECT NULL,count(*) from t1 where num > 7000000 on DUPLICATE KEY UPDATE count=count+1; +INSERT into t2 (num,count) SELECT NULL,count(*) from t1 where num > 7000000 on DUPLICATE KEY UPDATE count=count+1; +INSERT into t2 (num,count) SELECT NULL,count(*) from t1 where num > 7000000 on DUPLICATE KEY UPDATE count=count+1; +INSERT into t2 (num,count) SELECT NULL,count(*) from t1 where num > 7000000 on DUPLICATE KEY UPDATE count=count+1; +1 +drop table t,t1,t2; diff --git a/storage/tokudb/mysql-test/tokudb/r/bf_insert_select_trigger.result b/storage/tokudb/mysql-test/tokudb/r/bf_insert_select_trigger.result new file mode 100644 index 00000000000..860d26602dd --- /dev/null +++ b/storage/tokudb/mysql-test/tokudb/r/bf_insert_select_trigger.result @@ -0,0 +1,45 @@ +set default_storage_engine='tokudb'; +drop table if exists s,t; +create table s (id bigint not null primary key, x bigint); +insert into s values (1,0),(2,0),(3,0),(4,0); +create table t like s; +begin; +insert into t select * from s; +rollback; +create trigger t_trigger before insert on t for each row insert into s values (1000000,0); +begin; +insert into t select * from s; +ERROR HY000: Can't update table 's' in stored function/trigger because it is already used by statement which invoked this stored function/trigger. +rollback; +drop trigger t_trigger; +create trigger t_trigger after insert on t for each row insert into s values (1000000,0); +begin; +insert into t select * from s; +ERROR HY000: Can't update table 's' in stored function/trigger because it is already used by statement which invoked this stored function/trigger. +rollback; +drop trigger t_trigger; +create trigger t_trigger before insert on t for each row delete from s where id=1000000; +begin; +insert into t select * from s; +ERROR HY000: Can't update table 's' in stored function/trigger because it is already used by statement which invoked this stored function/trigger. +rollback; +drop trigger t_trigger; +create trigger t_trigger after insert on t for each row delete from s where id=1000000; +begin; +insert into t select * from s; +ERROR HY000: Can't update table 's' in stored function/trigger because it is already used by statement which invoked this stored function/trigger. +rollback; +drop trigger t_trigger; +create trigger t_trigger before insert on t for each row update s set x=x+1 where id=1000000; +begin; +insert into t select * from s; +ERROR HY000: Can't update table 's' in stored function/trigger because it is already used by statement which invoked this stored function/trigger. +rollback; +drop trigger t_trigger; +create trigger t_trigger after insert on t for each row update s set x=x+1 where id=1000000; +begin; +insert into t select * from s; +ERROR HY000: Can't update table 's' in stored function/trigger because it is already used by statement which invoked this stored function/trigger. +rollback; +drop trigger t_trigger; +drop table s,t; diff --git a/storage/tokudb/mysql-test/tokudb/r/bf_insert_select_update_trigger.result b/storage/tokudb/mysql-test/tokudb/r/bf_insert_select_update_trigger.result new file mode 100644 index 00000000000..d7588441d92 --- /dev/null +++ b/storage/tokudb/mysql-test/tokudb/r/bf_insert_select_update_trigger.result @@ -0,0 +1,121 @@ +set default_storage_engine='tokudb'; +drop table if exists s,t; +create table s (id bigint not null primary key, x bigint); +insert into s values (1,0),(2,0),(3,0),(4,0); +create table t like s; +begin; +insert into t select * from s; +rollback; +create trigger t_trigger before insert on t for each row insert into s values (1000000,0); +begin; +insert into t select * from s on duplicate key update x=t.x+1; +ERROR HY000: Can't update table 's' in stored function/trigger because it is already used by statement which invoked this stored function/trigger. +rollback; +drop trigger t_trigger; +create trigger t_trigger after insert on t for each row insert into s values (1000000,0); +begin; +insert into t select * from s on duplicate key update x=t.x+1; +ERROR HY000: Can't update table 's' in stored function/trigger because it is already used by statement which invoked this stored function/trigger. +rollback; +drop trigger t_trigger; +create trigger t_trigger before insert on t for each row delete from s where id=1000000; +begin; +insert into t select * from s on duplicate key update x=t.x+1; +ERROR HY000: Can't update table 's' in stored function/trigger because it is already used by statement which invoked this stored function/trigger. +rollback; +drop trigger t_trigger; +create trigger t_trigger after insert on t for each row delete from s where id=1000000; +begin; +insert into t select * from s on duplicate key update x=t.x+1; +ERROR HY000: Can't update table 's' in stored function/trigger because it is already used by statement which invoked this stored function/trigger. +rollback; +drop trigger t_trigger; +create trigger t_trigger before insert on t for each row update s set x=x+1 where id=1000000; +begin; +insert into t select * from s on duplicate key update x=t.x+1; +ERROR HY000: Can't update table 's' in stored function/trigger because it is already used by statement which invoked this stored function/trigger. +rollback; +drop trigger t_trigger; +create trigger t_trigger after insert on t for each row update s set x=x+1 where id=1000000; +begin; +insert into t select * from s on duplicate key update x=t.x+1; +ERROR HY000: Can't update table 's' in stored function/trigger because it is already used by statement which invoked this stored function/trigger. +rollback; +drop trigger t_trigger; +truncate table t; +insert into t values (1,0); +create trigger t_trigger before insert on t for each row insert into s values (1000000,0); +begin; +insert into t select * from s on duplicate key update x=t.x+1; +ERROR HY000: Can't update table 's' in stored function/trigger because it is already used by statement which invoked this stored function/trigger. +rollback; +drop trigger t_trigger; +create trigger t_trigger after insert on t for each row insert into s values (1000000,0); +begin; +insert into t select * from s on duplicate key update x=t.x+1; +ERROR HY000: Can't update table 's' in stored function/trigger because it is already used by statement which invoked this stored function/trigger. +rollback; +drop trigger t_trigger; +create trigger t_trigger before insert on t for each row delete from s where id=1000000; +begin; +insert into t select * from s on duplicate key update x=t.x+1; +ERROR HY000: Can't update table 's' in stored function/trigger because it is already used by statement which invoked this stored function/trigger. +rollback; +drop trigger t_trigger; +create trigger t_trigger after insert on t for each row delete from s where id=1000000; +begin; +insert into t select * from s on duplicate key update x=t.x+1; +ERROR HY000: Can't update table 's' in stored function/trigger because it is already used by statement which invoked this stored function/trigger. +rollback; +drop trigger t_trigger; +create trigger t_trigger before insert on t for each row update s set x=x+1 where id=1000000; +begin; +insert into t select * from s on duplicate key update x=t.x+1; +ERROR HY000: Can't update table 's' in stored function/trigger because it is already used by statement which invoked this stored function/trigger. +rollback; +drop trigger t_trigger; +create trigger t_trigger after insert on t for each row update s set x=x+1 where id=1000000; +begin; +insert into t select * from s on duplicate key update x=t.x+1; +ERROR HY000: Can't update table 's' in stored function/trigger because it is already used by statement which invoked this stored function/trigger. +rollback; +drop trigger t_trigger; +truncate table t; +insert into t values (1,0); +create trigger t_trigger before update on t for each row insert into s values (1000000,0); +begin; +insert into t select * from s on duplicate key update x=t.x+1; +ERROR HY000: Can't update table 's' in stored function/trigger because it is already used by statement which invoked this stored function/trigger. +rollback; +drop trigger t_trigger; +create trigger t_trigger after update on t for each row insert into s values (1000000,0); +begin; +insert into t select * from s on duplicate key update x=t.x+1; +ERROR HY000: Can't update table 's' in stored function/trigger because it is already used by statement which invoked this stored function/trigger. +rollback; +drop trigger t_trigger; +create trigger t_trigger before update on t for each row delete from s where id=1000000; +begin; +insert into t select * from s on duplicate key update x=t.x+1; +ERROR HY000: Can't update table 's' in stored function/trigger because it is already used by statement which invoked this stored function/trigger. +rollback; +drop trigger t_trigger; +create trigger t_trigger after update on t for each row delete from s where id=1000000; +begin; +insert into t select * from s on duplicate key update x=t.x+1; +ERROR HY000: Can't update table 's' in stored function/trigger because it is already used by statement which invoked this stored function/trigger. +rollback; +drop trigger t_trigger; +create trigger t_trigger before update on t for each row update s set x=x+1 where id=1000000; +begin; +insert into t select * from s on duplicate key update x=t.x+1; +ERROR HY000: Can't update table 's' in stored function/trigger because it is already used by statement which invoked this stored function/trigger. +rollback; +drop trigger t_trigger; +create trigger t_trigger after update on t for each row update s set x=x+1 where id=1000000; +begin; +insert into t select * from s on duplicate key update x=t.x+1; +ERROR HY000: Can't update table 's' in stored function/trigger because it is already used by statement which invoked this stored function/trigger. +rollback; +drop trigger t_trigger; +drop table s,t; diff --git a/storage/tokudb/mysql-test/tokudb/r/bf_replace_select.result b/storage/tokudb/mysql-test/tokudb/r/bf_replace_select.result new file mode 100644 index 00000000000..eab0f103ed6 --- /dev/null +++ b/storage/tokudb/mysql-test/tokudb/r/bf_replace_select.result @@ -0,0 +1,367 @@ +set default_storage_engine='tokudb'; +drop table if exists t,t1,t2; +CREATE TABLE `t` ( +`num` int(10) unsigned auto_increment NOT NULL, +`val` varchar(32) DEFAULT NULL, +PRIMARY KEY (`num`) +); +INSERT INTO t values (null,null); +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +SELECT count(*) FROM t; +count(*) +8388608 +CREATE TABLE `t1` ( +`num` int(10) unsigned NOT NULL, +`val` varchar(32) DEFAULT NULL, +PRIMARY KEY (`num`) +) as select * from t; +CREATE TABLE `t2` ( +`count` bigint(20) NOT NULL +) ENGINE=TokuDB DEFAULT CHARSET=latin1; +SELECT count(*) from t1; +count(*) +8388608 +SELECT count(*) from t1; +count(*) +8388608 +SELECT count(*) from t1; +count(*) +8388608 +SELECT count(*) from t1; +count(*) +8388608 +SELECT count(*) from t1; +count(*) +8388608 +SELECT count(*) from t1; +count(*) +8388608 +SELECT count(*) from t1; +count(*) +8388608 +SELECT count(*) from t1; +count(*) +8388608 +SELECT count(*) from t1; +count(*) +8388608 +SELECT count(*) from t1; +count(*) +8388608 +REPLACE into t2 SELECT count(*) from t1; +REPLACE into t2 SELECT count(*) from t1; +REPLACE into t2 SELECT count(*) from t1; +REPLACE into t2 SELECT count(*) from t1; +REPLACE into t2 SELECT count(*) from t1; +REPLACE into t2 SELECT count(*) from t1; +REPLACE into t2 SELECT count(*) from t1; +REPLACE into t2 SELECT count(*) from t1; +REPLACE into t2 SELECT count(*) from t1; +REPLACE into t2 SELECT count(*) from t1; +1 +SELECT count(*) from t1; +count(*) +8388608 +SELECT count(*) from t1; +count(*) +8388608 +SELECT count(*) from t1; +count(*) +8388608 +SELECT count(*) from t1; +count(*) +8388608 +SELECT count(*) from t1; +count(*) +8388608 +SELECT count(*) from t1; +count(*) +8388608 +SELECT count(*) from t1; +count(*) +8388608 +SELECT count(*) from t1; +count(*) +8388608 +SELECT count(*) from t1; +count(*) +8388608 +SELECT count(*) from t1; +count(*) +8388608 +INSERT IGNORE into t2 SELECT count(*) from t1; +INSERT IGNORE into t2 SELECT count(*) from t1; +INSERT IGNORE into t2 SELECT count(*) from t1; +INSERT IGNORE into t2 SELECT count(*) from t1; +INSERT IGNORE into t2 SELECT count(*) from t1; +INSERT IGNORE into t2 SELECT count(*) from t1; +INSERT IGNORE into t2 SELECT count(*) from t1; +INSERT IGNORE into t2 SELECT count(*) from t1; +INSERT IGNORE into t2 SELECT count(*) from t1; +INSERT IGNORE into t2 SELECT count(*) from t1; +1 +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +REPLACE into t2 SELECT count(*) from t1 where num > 7000000; +REPLACE into t2 SELECT count(*) from t1 where num > 7000000; +REPLACE into t2 SELECT count(*) from t1 where num > 7000000; +REPLACE into t2 SELECT count(*) from t1 where num > 7000000; +REPLACE into t2 SELECT count(*) from t1 where num > 7000000; +REPLACE into t2 SELECT count(*) from t1 where num > 7000000; +REPLACE into t2 SELECT count(*) from t1 where num > 7000000; +REPLACE into t2 SELECT count(*) from t1 where num > 7000000; +REPLACE into t2 SELECT count(*) from t1 where num > 7000000; +REPLACE into t2 SELECT count(*) from t1 where num > 7000000; +REPLACE into t2 SELECT count(*) from t1 where num > 7000000; +REPLACE into t2 SELECT count(*) from t1 where num > 7000000; +REPLACE into t2 SELECT count(*) from t1 where num > 7000000; +REPLACE into t2 SELECT count(*) from t1 where num > 7000000; +REPLACE into t2 SELECT count(*) from t1 where num > 7000000; +REPLACE into t2 SELECT count(*) from t1 where num > 7000000; +REPLACE into t2 SELECT count(*) from t1 where num > 7000000; +REPLACE into t2 SELECT count(*) from t1 where num > 7000000; +REPLACE into t2 SELECT count(*) from t1 where num > 7000000; +REPLACE into t2 SELECT count(*) from t1 where num > 7000000; +REPLACE into t2 SELECT count(*) from t1 where num > 7000000; +REPLACE into t2 SELECT count(*) from t1 where num > 7000000; +REPLACE into t2 SELECT count(*) from t1 where num > 7000000; +REPLACE into t2 SELECT count(*) from t1 where num > 7000000; +REPLACE into t2 SELECT count(*) from t1 where num > 7000000; +REPLACE into t2 SELECT count(*) from t1 where num > 7000000; +REPLACE into t2 SELECT count(*) from t1 where num > 7000000; +REPLACE into t2 SELECT count(*) from t1 where num > 7000000; +REPLACE into t2 SELECT count(*) from t1 where num > 7000000; +REPLACE into t2 SELECT count(*) from t1 where num > 7000000; +1 +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +SELECT count(*) from t1 where num > 7000000; +count(*) +1847274 +INSERT IGNORE into t2 SELECT count(*) from t1 where num > 7000000; +INSERT IGNORE into t2 SELECT count(*) from t1 where num > 7000000; +INSERT IGNORE into t2 SELECT count(*) from t1 where num > 7000000; +INSERT IGNORE into t2 SELECT count(*) from t1 where num > 7000000; +INSERT IGNORE into t2 SELECT count(*) from t1 where num > 7000000; +INSERT IGNORE into t2 SELECT count(*) from t1 where num > 7000000; +INSERT IGNORE into t2 SELECT count(*) from t1 where num > 7000000; +INSERT IGNORE into t2 SELECT count(*) from t1 where num > 7000000; +INSERT IGNORE into t2 SELECT count(*) from t1 where num > 7000000; +INSERT IGNORE into t2 SELECT count(*) from t1 where num > 7000000; +INSERT IGNORE into t2 SELECT count(*) from t1 where num > 7000000; +INSERT IGNORE into t2 SELECT count(*) from t1 where num > 7000000; +INSERT IGNORE into t2 SELECT count(*) from t1 where num > 7000000; +INSERT IGNORE into t2 SELECT count(*) from t1 where num > 7000000; +INSERT IGNORE into t2 SELECT count(*) from t1 where num > 7000000; +INSERT IGNORE into t2 SELECT count(*) from t1 where num > 7000000; +INSERT IGNORE into t2 SELECT count(*) from t1 where num > 7000000; +INSERT IGNORE into t2 SELECT count(*) from t1 where num > 7000000; +INSERT IGNORE into t2 SELECT count(*) from t1 where num > 7000000; +INSERT IGNORE into t2 SELECT count(*) from t1 where num > 7000000; +INSERT IGNORE into t2 SELECT count(*) from t1 where num > 7000000; +INSERT IGNORE into t2 SELECT count(*) from t1 where num > 7000000; +INSERT IGNORE into t2 SELECT count(*) from t1 where num > 7000000; +INSERT IGNORE into t2 SELECT count(*) from t1 where num > 7000000; +INSERT IGNORE into t2 SELECT count(*) from t1 where num > 7000000; +INSERT IGNORE into t2 SELECT count(*) from t1 where num > 7000000; +INSERT IGNORE into t2 SELECT count(*) from t1 where num > 7000000; +INSERT IGNORE into t2 SELECT count(*) from t1 where num > 7000000; +INSERT IGNORE into t2 SELECT count(*) from t1 where num > 7000000; +INSERT IGNORE into t2 SELECT count(*) from t1 where num > 7000000; +1 +drop table t,t1,t2; diff --git a/storage/tokudb/mysql-test/tokudb/r/bf_replace_select_trigger.result b/storage/tokudb/mysql-test/tokudb/r/bf_replace_select_trigger.result new file mode 100644 index 00000000000..acd17170301 --- /dev/null +++ b/storage/tokudb/mysql-test/tokudb/r/bf_replace_select_trigger.result @@ -0,0 +1,121 @@ +set default_storage_engine='tokudb'; +drop table if exists s,t; +create table s (id bigint not null primary key, x bigint); +insert into s values (1,0),(2,0),(3,0),(4,0); +create table t like s; +begin; +replace into t select * from s; +rollback; +create trigger t_trigger before insert on t for each row replace into s values (1000000,0); +begin; +replace into t select * from s; +ERROR HY000: Can't update table 's' in stored function/trigger because it is already used by statement which invoked this stored function/trigger. +rollback; +drop trigger t_trigger; +create trigger t_trigger after insert on t for each row replace into s values (1000000,0); +begin; +replace into t select * from s; +ERROR HY000: Can't update table 's' in stored function/trigger because it is already used by statement which invoked this stored function/trigger. +rollback; +drop trigger t_trigger; +create trigger t_trigger before insert on t for each row delete from s where id=1000000; +begin; +replace into t select * from s; +ERROR HY000: Can't update table 's' in stored function/trigger because it is already used by statement which invoked this stored function/trigger. +rollback; +drop trigger t_trigger; +create trigger t_trigger after insert on t for each row delete from s where id=1000000; +begin; +replace into t select * from s; +ERROR HY000: Can't update table 's' in stored function/trigger because it is already used by statement which invoked this stored function/trigger. +rollback; +drop trigger t_trigger; +create trigger t_trigger before insert on t for each row update s set x=x+1 where id=1000000; +begin; +replace into t select * from s; +ERROR HY000: Can't update table 's' in stored function/trigger because it is already used by statement which invoked this stored function/trigger. +rollback; +drop trigger t_trigger; +create trigger t_trigger after insert on t for each row update s set x=x+1 where id=1000000; +begin; +replace into t select * from s; +ERROR HY000: Can't update table 's' in stored function/trigger because it is already used by statement which invoked this stored function/trigger. +rollback; +drop trigger t_trigger; +truncate table t; +insert into t values (1,1); +create trigger t_trigger before insert on t for each row replace into s values (1000000,0); +begin; +replace into t select * from s; +ERROR HY000: Can't update table 's' in stored function/trigger because it is already used by statement which invoked this stored function/trigger. +rollback; +drop trigger t_trigger; +create trigger t_trigger after insert on t for each row replace into s values (1000000,0); +begin; +replace into t select * from s; +ERROR HY000: Can't update table 's' in stored function/trigger because it is already used by statement which invoked this stored function/trigger. +rollback; +drop trigger t_trigger; +create trigger t_trigger before insert on t for each row delete from s where id=1000000; +begin; +replace into t select * from s; +ERROR HY000: Can't update table 's' in stored function/trigger because it is already used by statement which invoked this stored function/trigger. +rollback; +drop trigger t_trigger; +create trigger t_trigger after insert on t for each row delete from s where id=1000000; +begin; +replace into t select * from s; +ERROR HY000: Can't update table 's' in stored function/trigger because it is already used by statement which invoked this stored function/trigger. +rollback; +drop trigger t_trigger; +create trigger t_trigger before insert on t for each row update s set x=x+1 where id=1000000; +begin; +replace into t select * from s; +ERROR HY000: Can't update table 's' in stored function/trigger because it is already used by statement which invoked this stored function/trigger. +rollback; +drop trigger t_trigger; +create trigger t_trigger after insert on t for each row update s set x=x+1 where id=1000000; +begin; +replace into t select * from s; +ERROR HY000: Can't update table 's' in stored function/trigger because it is already used by statement which invoked this stored function/trigger. +rollback; +drop trigger t_trigger; +truncate table t; +insert into t values (1,1); +create trigger t_trigger before delete on t for each row replace into s values (1000000,0); +begin; +replace into t select * from s; +ERROR HY000: Can't update table 's' in stored function/trigger because it is already used by statement which invoked this stored function/trigger. +rollback; +drop trigger t_trigger; +create trigger t_trigger after delete on t for each row replace into s values (1000000,0); +begin; +replace into t select * from s; +ERROR HY000: Can't update table 's' in stored function/trigger because it is already used by statement which invoked this stored function/trigger. +rollback; +drop trigger t_trigger; +create trigger t_trigger before delete on t for each row delete from s where id=1000000; +begin; +replace into t select * from s; +ERROR HY000: Can't update table 's' in stored function/trigger because it is already used by statement which invoked this stored function/trigger. +rollback; +drop trigger t_trigger; +create trigger t_trigger after delete on t for each row delete from s where id=1000000; +begin; +replace into t select * from s; +ERROR HY000: Can't update table 's' in stored function/trigger because it is already used by statement which invoked this stored function/trigger. +rollback; +drop trigger t_trigger; +create trigger t_trigger before delete on t for each row update s set x=x+1 where id=1000000; +begin; +replace into t select * from s; +ERROR HY000: Can't update table 's' in stored function/trigger because it is already used by statement which invoked this stored function/trigger. +rollback; +drop trigger t_trigger; +create trigger t_trigger after delete on t for each row update s set x=x+1 where id=1000000; +begin; +replace into t select * from s; +ERROR HY000: Can't update table 's' in stored function/trigger because it is already used by statement which invoked this stored function/trigger. +rollback; +drop trigger t_trigger; +drop table s,t; diff --git a/storage/tokudb/mysql-test/tokudb/r/bf_select_hash_part.result b/storage/tokudb/mysql-test/tokudb/r/bf_select_hash_part.result new file mode 100644 index 00000000000..2c72c7129db --- /dev/null +++ b/storage/tokudb/mysql-test/tokudb/r/bf_select_hash_part.result @@ -0,0 +1,278 @@ +set default_storage_engine='tokudb'; +drop table if exists t; +CREATE TABLE `t` ( +`num` int(10) unsigned NOT NULL auto_increment, +`val` varchar(32) DEFAULT NULL, +PRIMARY KEY (`num`) +) PARTITION BY HASH (num) PARTITIONS 8; +INSERT INTO t values (null,null); +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +SELECT count(*) FROM t; +count(*) +1048576 +set tokudb_bulk_fetch=ON; +SELECT count(*) from t; +count(*) +1048576 +SELECT count(*) from t; +count(*) +1048576 +SELECT count(*) from t; +count(*) +1048576 +SELECT count(*) from t; +count(*) +1048576 +SELECT count(*) from t; +count(*) +1048576 +SELECT count(*) from t; +count(*) +1048576 +SELECT count(*) from t; +count(*) +1048576 +SELECT count(*) from t; +count(*) +1048576 +SELECT count(*) from t; +count(*) +1048576 +SELECT count(*) from t; +count(*) +1048576 +SELECT count(*) from t; +count(*) +1048576 +SELECT count(*) from t; +count(*) +1048576 +SELECT count(*) from t; +count(*) +1048576 +SELECT count(*) from t; +count(*) +1048576 +SELECT count(*) from t; +count(*) +1048576 +SELECT count(*) from t; +count(*) +1048576 +SELECT count(*) from t; +count(*) +1048576 +SELECT count(*) from t; +count(*) +1048576 +SELECT count(*) from t; +count(*) +1048576 +SELECT count(*) from t; +count(*) +1048576 +set tokudb_bulk_fetch=OFF; +SELECT count(*) from t; +count(*) +1048576 +SELECT count(*) from t; +count(*) +1048576 +SELECT count(*) from t; +count(*) +1048576 +SELECT count(*) from t; +count(*) +1048576 +SELECT count(*) from t; +count(*) +1048576 +SELECT count(*) from t; +count(*) +1048576 +SELECT count(*) from t; +count(*) +1048576 +SELECT count(*) from t; +count(*) +1048576 +SELECT count(*) from t; +count(*) +1048576 +SELECT count(*) from t; +count(*) +1048576 +SELECT count(*) from t; +count(*) +1048576 +SELECT count(*) from t; +count(*) +1048576 +SELECT count(*) from t; +count(*) +1048576 +SELECT count(*) from t; +count(*) +1048576 +SELECT count(*) from t; +count(*) +1048576 +SELECT count(*) from t; +count(*) +1048576 +SELECT count(*) from t; +count(*) +1048576 +SELECT count(*) from t; +count(*) +1048576 +SELECT count(*) from t; +count(*) +1048576 +SELECT count(*) from t; +count(*) +1048576 +1 +set tokudb_bulk_fetch=ON; +SELECT count(*) from t where num > 500000; +count(*) +548576 +SELECT count(*) from t where num > 500000; +count(*) +548576 +SELECT count(*) from t where num > 500000; +count(*) +548576 +SELECT count(*) from t where num > 500000; +count(*) +548576 +SELECT count(*) from t where num > 500000; +count(*) +548576 +SELECT count(*) from t where num > 500000; +count(*) +548576 +SELECT count(*) from t where num > 500000; +count(*) +548576 +SELECT count(*) from t where num > 500000; +count(*) +548576 +SELECT count(*) from t where num > 500000; +count(*) +548576 +SELECT count(*) from t where num > 500000; +count(*) +548576 +SELECT count(*) from t where num > 500000; +count(*) +548576 +SELECT count(*) from t where num > 500000; +count(*) +548576 +SELECT count(*) from t where num > 500000; +count(*) +548576 +SELECT count(*) from t where num > 500000; +count(*) +548576 +SELECT count(*) from t where num > 500000; +count(*) +548576 +SELECT count(*) from t where num > 500000; +count(*) +548576 +SELECT count(*) from t where num > 500000; +count(*) +548576 +SELECT count(*) from t where num > 500000; +count(*) +548576 +SELECT count(*) from t where num > 500000; +count(*) +548576 +SELECT count(*) from t where num > 500000; +count(*) +548576 +set tokudb_bulk_fetch=OFF; +SELECT count(*) from t where num > 500000; +count(*) +548576 +SELECT count(*) from t where num > 500000; +count(*) +548576 +SELECT count(*) from t where num > 500000; +count(*) +548576 +SELECT count(*) from t where num > 500000; +count(*) +548576 +SELECT count(*) from t where num > 500000; +count(*) +548576 +SELECT count(*) from t where num > 500000; +count(*) +548576 +SELECT count(*) from t where num > 500000; +count(*) +548576 +SELECT count(*) from t where num > 500000; +count(*) +548576 +SELECT count(*) from t where num > 500000; +count(*) +548576 +SELECT count(*) from t where num > 500000; +count(*) +548576 +SELECT count(*) from t where num > 500000; +count(*) +548576 +SELECT count(*) from t where num > 500000; +count(*) +548576 +SELECT count(*) from t where num > 500000; +count(*) +548576 +SELECT count(*) from t where num > 500000; +count(*) +548576 +SELECT count(*) from t where num > 500000; +count(*) +548576 +SELECT count(*) from t where num > 500000; +count(*) +548576 +SELECT count(*) from t where num > 500000; +count(*) +548576 +SELECT count(*) from t where num > 500000; +count(*) +548576 +SELECT count(*) from t where num > 500000; +count(*) +548576 +SELECT count(*) from t where num > 500000; +count(*) +548576 +1 +drop table t; diff --git a/storage/tokudb/mysql-test/tokudb/r/bf_select_range_part.result b/storage/tokudb/mysql-test/tokudb/r/bf_select_range_part.result new file mode 100644 index 00000000000..c13324aa34e --- /dev/null +++ b/storage/tokudb/mysql-test/tokudb/r/bf_select_range_part.result @@ -0,0 +1,286 @@ +set default_storage_engine='tokudb'; +drop table if exists t; +CREATE TABLE `t` ( +`num` int(10) unsigned NOT NULL auto_increment, +`val` varchar(32) DEFAULT NULL, +PRIMARY KEY (`num`) +) PARTITION BY RANGE (num) +(PARTITION p0 VALUES LESS THAN (100000), +PARTITION p1 VALUES LESS THAN (200000), +PARTITION p2 VALUES LESS THAN (300000), +PARTITION p3 VALUES LESS THAN (400000), +PARTITION p4 VALUES LESS THAN (500000), +PARTITION p5 VALUES LESS THAN (600000), +PARTITION p6 VALUES LESS THAN (700000), +PARTITION p7 VALUES LESS THAN MAXVALUE); +INSERT INTO t values (null,null); +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +SELECT count(*) FROM t; +count(*) +1048576 +set tokudb_bulk_fetch=ON; +SELECT count(*) from t; +count(*) +1048576 +SELECT count(*) from t; +count(*) +1048576 +SELECT count(*) from t; +count(*) +1048576 +SELECT count(*) from t; +count(*) +1048576 +SELECT count(*) from t; +count(*) +1048576 +SELECT count(*) from t; +count(*) +1048576 +SELECT count(*) from t; +count(*) +1048576 +SELECT count(*) from t; +count(*) +1048576 +SELECT count(*) from t; +count(*) +1048576 +SELECT count(*) from t; +count(*) +1048576 +SELECT count(*) from t; +count(*) +1048576 +SELECT count(*) from t; +count(*) +1048576 +SELECT count(*) from t; +count(*) +1048576 +SELECT count(*) from t; +count(*) +1048576 +SELECT count(*) from t; +count(*) +1048576 +SELECT count(*) from t; +count(*) +1048576 +SELECT count(*) from t; +count(*) +1048576 +SELECT count(*) from t; +count(*) +1048576 +SELECT count(*) from t; +count(*) +1048576 +SELECT count(*) from t; +count(*) +1048576 +set tokudb_bulk_fetch=OFF; +SELECT count(*) from t; +count(*) +1048576 +SELECT count(*) from t; +count(*) +1048576 +SELECT count(*) from t; +count(*) +1048576 +SELECT count(*) from t; +count(*) +1048576 +SELECT count(*) from t; +count(*) +1048576 +SELECT count(*) from t; +count(*) +1048576 +SELECT count(*) from t; +count(*) +1048576 +SELECT count(*) from t; +count(*) +1048576 +SELECT count(*) from t; +count(*) +1048576 +SELECT count(*) from t; +count(*) +1048576 +SELECT count(*) from t; +count(*) +1048576 +SELECT count(*) from t; +count(*) +1048576 +SELECT count(*) from t; +count(*) +1048576 +SELECT count(*) from t; +count(*) +1048576 +SELECT count(*) from t; +count(*) +1048576 +SELECT count(*) from t; +count(*) +1048576 +SELECT count(*) from t; +count(*) +1048576 +SELECT count(*) from t; +count(*) +1048576 +SELECT count(*) from t; +count(*) +1048576 +SELECT count(*) from t; +count(*) +1048576 +1 +set tokudb_bulk_fetch=ON; +SELECT count(*) from t where num > 700000; +count(*) +348576 +SELECT count(*) from t where num > 700000; +count(*) +348576 +SELECT count(*) from t where num > 700000; +count(*) +348576 +SELECT count(*) from t where num > 700000; +count(*) +348576 +SELECT count(*) from t where num > 700000; +count(*) +348576 +SELECT count(*) from t where num > 700000; +count(*) +348576 +SELECT count(*) from t where num > 700000; +count(*) +348576 +SELECT count(*) from t where num > 700000; +count(*) +348576 +SELECT count(*) from t where num > 700000; +count(*) +348576 +SELECT count(*) from t where num > 700000; +count(*) +348576 +SELECT count(*) from t where num > 700000; +count(*) +348576 +SELECT count(*) from t where num > 700000; +count(*) +348576 +SELECT count(*) from t where num > 700000; +count(*) +348576 +SELECT count(*) from t where num > 700000; +count(*) +348576 +SELECT count(*) from t where num > 700000; +count(*) +348576 +SELECT count(*) from t where num > 700000; +count(*) +348576 +SELECT count(*) from t where num > 700000; +count(*) +348576 +SELECT count(*) from t where num > 700000; +count(*) +348576 +SELECT count(*) from t where num > 700000; +count(*) +348576 +SELECT count(*) from t where num > 700000; +count(*) +348576 +set tokudb_bulk_fetch=OFF; +SELECT count(*) from t where num > 700000; +count(*) +348576 +SELECT count(*) from t where num > 700000; +count(*) +348576 +SELECT count(*) from t where num > 700000; +count(*) +348576 +SELECT count(*) from t where num > 700000; +count(*) +348576 +SELECT count(*) from t where num > 700000; +count(*) +348576 +SELECT count(*) from t where num > 700000; +count(*) +348576 +SELECT count(*) from t where num > 700000; +count(*) +348576 +SELECT count(*) from t where num > 700000; +count(*) +348576 +SELECT count(*) from t where num > 700000; +count(*) +348576 +SELECT count(*) from t where num > 700000; +count(*) +348576 +SELECT count(*) from t where num > 700000; +count(*) +348576 +SELECT count(*) from t where num > 700000; +count(*) +348576 +SELECT count(*) from t where num > 700000; +count(*) +348576 +SELECT count(*) from t where num > 700000; +count(*) +348576 +SELECT count(*) from t where num > 700000; +count(*) +348576 +SELECT count(*) from t where num > 700000; +count(*) +348576 +SELECT count(*) from t where num > 700000; +count(*) +348576 +SELECT count(*) from t where num > 700000; +count(*) +348576 +SELECT count(*) from t where num > 700000; +count(*) +348576 +SELECT count(*) from t where num > 700000; +count(*) +348576 +1 +drop table t; diff --git a/storage/tokudb/mysql-test/tokudb/r/cluster_key_part.result b/storage/tokudb/mysql-test/tokudb/r/cluster_key_part.result new file mode 100644 index 00000000000..cd8fc340314 --- /dev/null +++ b/storage/tokudb/mysql-test/tokudb/r/cluster_key_part.result @@ -0,0 +1,28 @@ +set default_storage_engine='tokudb'; +drop table if exists t; +create table t ( +x int not null, +y int not null, +primary key(x)) +partition by hash(x) partitions 2; +show create table t; +Table Create Table +t CREATE TABLE `t` ( + `x` int(11) NOT NULL, + `y` int(11) NOT NULL, + PRIMARY KEY (`x`) +) ENGINE=TokuDB DEFAULT CHARSET=latin1 +/*!50100 PARTITION BY HASH (x) +PARTITIONS 2 */ +alter table t add clustering key(y); +show create table t; +Table Create Table +t CREATE TABLE `t` ( + `x` int(11) NOT NULL, + `y` int(11) NOT NULL, + PRIMARY KEY (`x`), + CLUSTERING KEY `y` (`y`) +) ENGINE=TokuDB DEFAULT CHARSET=latin1 +/*!50100 PARTITION BY HASH (x) +PARTITIONS 2 */ +drop table t; diff --git a/storage/tokudb/mysql-test/tokudb/r/ext_key_1_innodb.result b/storage/tokudb/mysql-test/tokudb/r/ext_key_1_innodb.result deleted file mode 100644 index d289229653a..00000000000 --- a/storage/tokudb/mysql-test/tokudb/r/ext_key_1_innodb.result +++ /dev/null @@ -1,107 +0,0 @@ -drop table if exists t; -set session optimizer_switch='extended_keys=on'; -select @@optimizer_switch; -@@optimizer_switch -index_merge=on,index_merge_union=on,index_merge_sort_union=on,index_merge_intersection=on,index_merge_sort_intersection=off,engine_condition_pushdown=off,index_condition_pushdown=on,derived_merge=on,derived_with_keys=on,firstmatch=on,loosescan=on,materialization=on,in_to_exists=on,semijoin=on,partial_match_rowid_merge=on,partial_match_table_scan=on,subquery_cache=on,mrr=off,mrr_cost_based=off,mrr_sort_keys=off,outer_join_with_cache=on,semijoin_with_cache=on,join_cache_incremental=on,join_cache_hashed=on,join_cache_bka=on,optimize_join_buffer_size=off,table_elimination=on,extended_keys=on,exists_to_in=on -create table t (id int not null, x int not null, y int not null, primary key(id), key(x)) engine=innodb; -insert into t values (0,0,0),(1,1,1),(2,2,2),(3,2,3),(4,2,4); -explain select x,id from t force index (x) where x=0 and id=0; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t const x x 8 const,const 1 Using index -flush status; -select x,id from t force index (x) where x=0 and id=0; -x id -0 0 -show status like 'handler_read%'; -Variable_name Value -Handler_read_first 0 -Handler_read_key 1 -Handler_read_last 0 -Handler_read_next 0 -Handler_read_prev 0 -Handler_read_rnd 0 -Handler_read_rnd_deleted 0 -Handler_read_rnd_next 0 -explain select y,id from t force index (x) where x=0 and id=0; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t const x x 8 const,const 1 -flush status; -select y,id from t force index (x) where x=0 and id=0; -y id -0 0 -show status like 'handler_read%'; -Variable_name Value -Handler_read_first 0 -Handler_read_key 1 -Handler_read_last 0 -Handler_read_next 0 -Handler_read_prev 0 -Handler_read_rnd 0 -Handler_read_rnd_deleted 0 -Handler_read_rnd_next 0 -explain select x,id from t force index (x) where x=0 and id=1; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t const x x 8 const,const 1 Using index -flush status; -select x,id from t force index (x) where x=0 and id=1; -x id -show status like 'handler_read%'; -Variable_name Value -Handler_read_first 0 -Handler_read_key 1 -Handler_read_last 0 -Handler_read_next 0 -Handler_read_prev 0 -Handler_read_rnd 0 -Handler_read_rnd_deleted 0 -Handler_read_rnd_next 0 -explain select y,id from t force index (x)where x=0 and id=1; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t const x x 8 const,const 1 -flush status; -select y,id from t force index(x) where x=0 and id=1; -y id -show status like 'handler_read%'; -Variable_name Value -Handler_read_first 0 -Handler_read_key 1 -Handler_read_last 0 -Handler_read_next 0 -Handler_read_prev 0 -Handler_read_rnd 0 -Handler_read_rnd_deleted 0 -Handler_read_rnd_next 0 -explain select x,id from t force index (x) where x=2 and id=3; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t const x x 8 const,const 1 Using index -flush status; -select x,id from t force index (x) where x=2 and id=3; -x id -2 3 -show status like 'handler_read%'; -Variable_name Value -Handler_read_first 0 -Handler_read_key 1 -Handler_read_last 0 -Handler_read_next 0 -Handler_read_prev 0 -Handler_read_rnd 0 -Handler_read_rnd_deleted 0 -Handler_read_rnd_next 0 -explain select x,id from t force index (x) where x=2 and id=0; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t const x x 8 const,const 1 Using index -flush status; -select x,id from t force index (x) where x=2 and id=0; -x id -show status like 'handler_read%'; -Variable_name Value -Handler_read_first 0 -Handler_read_key 1 -Handler_read_last 0 -Handler_read_next 0 -Handler_read_prev 0 -Handler_read_rnd 0 -Handler_read_rnd_deleted 0 -Handler_read_rnd_next 0 -drop table t; diff --git a/storage/tokudb/mysql-test/tokudb/r/ext_key_1_tokudb.result b/storage/tokudb/mysql-test/tokudb/r/ext_key_1_tokudb.result deleted file mode 100644 index 0e14eb28c42..00000000000 --- a/storage/tokudb/mysql-test/tokudb/r/ext_key_1_tokudb.result +++ /dev/null @@ -1,107 +0,0 @@ -drop table if exists t; -set session optimizer_switch='extended_keys=on'; -select @@optimizer_switch; -@@optimizer_switch -index_merge=on,index_merge_union=on,index_merge_sort_union=on,index_merge_intersection=on,index_merge_sort_intersection=off,engine_condition_pushdown=off,index_condition_pushdown=on,derived_merge=on,derived_with_keys=on,firstmatch=on,loosescan=on,materialization=on,in_to_exists=on,semijoin=on,partial_match_rowid_merge=on,partial_match_table_scan=on,subquery_cache=on,mrr=off,mrr_cost_based=off,mrr_sort_keys=off,outer_join_with_cache=on,semijoin_with_cache=on,join_cache_incremental=on,join_cache_hashed=on,join_cache_bka=on,optimize_join_buffer_size=off,table_elimination=on,extended_keys=on,exists_to_in=on -create table t (id int not null, x int not null, y int not null, primary key(id), key(x)) engine=tokudb; -insert into t values (0,0,0),(1,1,1),(2,2,2),(3,2,3),(4,2,4); -explain select x,id from t force index (x) where x=0 and id=0; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t const x x 8 const,const 1 Using index -flush status; -select x,id from t force index (x) where x=0 and id=0; -x id -0 0 -show status like 'handler_read%'; -Variable_name Value -Handler_read_first 0 -Handler_read_key 1 -Handler_read_last 0 -Handler_read_next 0 -Handler_read_prev 0 -Handler_read_rnd 0 -Handler_read_rnd_deleted 0 -Handler_read_rnd_next 0 -explain select y,id from t force index (x) where x=0 and id=0; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t const x x 8 const,const 1 -flush status; -select y,id from t force index (x) where x=0 and id=0; -y id -0 0 -show status like 'handler_read%'; -Variable_name Value -Handler_read_first 0 -Handler_read_key 1 -Handler_read_last 0 -Handler_read_next 0 -Handler_read_prev 0 -Handler_read_rnd 0 -Handler_read_rnd_deleted 0 -Handler_read_rnd_next 0 -explain select x,id from t force index (x) where x=0 and id=1; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t const x x 8 const,const 1 Using index -flush status; -select x,id from t force index (x) where x=0 and id=1; -x id -show status like 'handler_read%'; -Variable_name Value -Handler_read_first 0 -Handler_read_key 1 -Handler_read_last 0 -Handler_read_next 0 -Handler_read_prev 0 -Handler_read_rnd 0 -Handler_read_rnd_deleted 0 -Handler_read_rnd_next 0 -explain select y,id from t force index (x)where x=0 and id=1; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t const x x 8 const,const 1 -flush status; -select y,id from t force index(x) where x=0 and id=1; -y id -show status like 'handler_read%'; -Variable_name Value -Handler_read_first 0 -Handler_read_key 1 -Handler_read_last 0 -Handler_read_next 0 -Handler_read_prev 0 -Handler_read_rnd 0 -Handler_read_rnd_deleted 0 -Handler_read_rnd_next 0 -explain select x,id from t force index (x) where x=2 and id=3; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t const x x 8 const,const 1 Using index -flush status; -select x,id from t force index (x) where x=2 and id=3; -x id -2 3 -show status like 'handler_read%'; -Variable_name Value -Handler_read_first 0 -Handler_read_key 1 -Handler_read_last 0 -Handler_read_next 0 -Handler_read_prev 0 -Handler_read_rnd 0 -Handler_read_rnd_deleted 0 -Handler_read_rnd_next 0 -explain select x,id from t force index (x) where x=2 and id=0; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t const x x 8 const,const 1 Using index -flush status; -select x,id from t force index (x) where x=2 and id=0; -x id -show status like 'handler_read%'; -Variable_name Value -Handler_read_first 0 -Handler_read_key 1 -Handler_read_last 0 -Handler_read_next 0 -Handler_read_prev 0 -Handler_read_rnd 0 -Handler_read_rnd_deleted 0 -Handler_read_rnd_next 0 -drop table t; diff --git a/storage/tokudb/mysql-test/tokudb/r/ext_key_2_innodb.result b/storage/tokudb/mysql-test/tokudb/r/ext_key_2_innodb.result deleted file mode 100644 index 84fd6ed8ecc..00000000000 --- a/storage/tokudb/mysql-test/tokudb/r/ext_key_2_innodb.result +++ /dev/null @@ -1,42 +0,0 @@ -drop table if exists t; -set session optimizer_switch='extended_keys=on'; -select @@optimizer_switch; -@@optimizer_switch -index_merge=on,index_merge_union=on,index_merge_sort_union=on,index_merge_intersection=on,index_merge_sort_intersection=off,engine_condition_pushdown=off,index_condition_pushdown=on,derived_merge=on,derived_with_keys=on,firstmatch=on,loosescan=on,materialization=on,in_to_exists=on,semijoin=on,partial_match_rowid_merge=on,partial_match_table_scan=on,subquery_cache=on,mrr=off,mrr_cost_based=off,mrr_sort_keys=off,outer_join_with_cache=on,semijoin_with_cache=on,join_cache_incremental=on,join_cache_hashed=on,join_cache_bka=on,optimize_join_buffer_size=off,table_elimination=on,extended_keys=on,exists_to_in=on -create table t (a int not null, b int not null, c int not null, d int not null, primary key(a,b), key(c,a)) engine=innodb; -insert into t values (0,0,0,0),(0,1,0,1); -explain select c,a,b from t where c=0 and a=0 and b=1; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t const PRIMARY,c PRIMARY 8 const,const 1 -flush status; -select c,a,b from t where c=0 and a=0 and b=1; -c a b -0 0 1 -show status like 'handler_read%'; -Variable_name Value -Handler_read_first 0 -Handler_read_key 1 -Handler_read_last 0 -Handler_read_next 0 -Handler_read_prev 0 -Handler_read_rnd 0 -Handler_read_rnd_deleted 0 -Handler_read_rnd_next 0 -explain select c,a,b from t force index (c) where c=0 and a=0 and b=1; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t const c c 12 const,const,const 1 Using index -flush status; -select c,a,b from t force index (c) where c=0 and a=0 and b=1; -c a b -0 0 1 -show status like 'handler_read%'; -Variable_name Value -Handler_read_first 0 -Handler_read_key 1 -Handler_read_last 0 -Handler_read_next 0 -Handler_read_prev 0 -Handler_read_rnd 0 -Handler_read_rnd_deleted 0 -Handler_read_rnd_next 0 -drop table t; diff --git a/storage/tokudb/mysql-test/tokudb/r/ext_key_2_tokudb.result b/storage/tokudb/mysql-test/tokudb/r/ext_key_2_tokudb.result deleted file mode 100644 index 89ca5307525..00000000000 --- a/storage/tokudb/mysql-test/tokudb/r/ext_key_2_tokudb.result +++ /dev/null @@ -1,42 +0,0 @@ -drop table if exists t; -set session optimizer_switch='extended_keys=on'; -select @@optimizer_switch; -@@optimizer_switch -index_merge=on,index_merge_union=on,index_merge_sort_union=on,index_merge_intersection=on,index_merge_sort_intersection=off,engine_condition_pushdown=off,index_condition_pushdown=on,derived_merge=on,derived_with_keys=on,firstmatch=on,loosescan=on,materialization=on,in_to_exists=on,semijoin=on,partial_match_rowid_merge=on,partial_match_table_scan=on,subquery_cache=on,mrr=off,mrr_cost_based=off,mrr_sort_keys=off,outer_join_with_cache=on,semijoin_with_cache=on,join_cache_incremental=on,join_cache_hashed=on,join_cache_bka=on,optimize_join_buffer_size=off,table_elimination=on,extended_keys=on,exists_to_in=on -create table t (a int not null, b int not null, c int not null, d int not null, primary key(a,b), key(c,a)) engine=tokudb; -insert into t values (0,0,0,0),(0,1,0,1); -explain select c,a,b from t where c=0 and a=0 and b=1; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t const PRIMARY,c PRIMARY 8 const,const 1 -flush status; -select c,a,b from t where c=0 and a=0 and b=1; -c a b -0 0 1 -show status like 'handler_read%'; -Variable_name Value -Handler_read_first 0 -Handler_read_key 1 -Handler_read_last 0 -Handler_read_next 0 -Handler_read_prev 0 -Handler_read_rnd 0 -Handler_read_rnd_deleted 0 -Handler_read_rnd_next 0 -explain select c,a,b from t force index (c) where c=0 and a=0 and b=1; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t const c c 12 const,const,const 1 Using index -flush status; -select c,a,b from t force index (c) where c=0 and a=0 and b=1; -c a b -0 0 1 -show status like 'handler_read%'; -Variable_name Value -Handler_read_first 0 -Handler_read_key 1 -Handler_read_last 0 -Handler_read_next 0 -Handler_read_prev 0 -Handler_read_rnd 0 -Handler_read_rnd_deleted 0 -Handler_read_rnd_next 0 -drop table t; diff --git a/storage/tokudb/mysql-test/tokudb/r/i_s_tokudb_lock_waits_released.result b/storage/tokudb/mysql-test/tokudb/r/i_s_tokudb_lock_waits_released.result index f84be01163f..018900c7b98 100644 --- a/storage/tokudb/mysql-test/tokudb/r/i_s_tokudb_lock_waits_released.result +++ b/storage/tokudb/mysql-test/tokudb/r/i_s_tokudb_lock_waits_released.result @@ -5,63 +5,64 @@ create table t (id int primary key); select * from information_schema.tokudb_trx; trx_id trx_mysql_thread_id select * from information_schema.tokudb_locks; -locks_trx_id locks_mysql_thread_id locks_dname locks_key_left locks_key_right +locks_trx_id locks_mysql_thread_id locks_dname locks_key_left locks_key_right locks_table_schema locks_table_name locks_table_dictionary_name select * from information_schema.tokudb_lock_waits; -requesting_trx_id blocking_trx_id lock_waits_dname lock_waits_key_left lock_waits_key_right lock_waits_start_time +requesting_trx_id blocking_trx_id lock_waits_dname lock_waits_key_left lock_waits_key_right lock_waits_start_time lock_waits_table_schema lock_waits_table_name lock_waits_table_dictionary_name set autocommit=0; +set tokudb_prelock_empty=OFF; insert into t values (1); set autocommit=0; insert into t values (1); select * from information_schema.tokudb_locks; -locks_trx_id locks_mysql_thread_id locks_dname locks_key_left locks_key_right -TRX_ID MYSQL_ID ./test/t-main 0001000000 0001000000 +locks_trx_id locks_mysql_thread_id locks_dname locks_key_left locks_key_right locks_table_schema locks_table_name locks_table_dictionary_name +TRX_ID MYSQL_ID ./test/t-main 0001000000 0001000000 test t main select * from information_schema.tokudb_lock_waits; -requesting_trx_id blocking_trx_id lock_waits_dname lock_waits_key_left lock_waits_key_right lock_waits_start_time -REQUEST_TRX_ID BLOCK_TRX_ID ./test/t-main 0001000000 0001000000 LOCK_WAITS_START_TIME +requesting_trx_id blocking_trx_id lock_waits_dname lock_waits_key_left lock_waits_key_right lock_waits_start_time lock_waits_table_schema lock_waits_table_name lock_waits_table_dictionary_name +REQUEST_TRX_ID BLOCK_TRX_ID ./test/t-main 0001000000 0001000000 LOCK_WAITS_START_TIME test t main select * from information_schema.tokudb_trx; trx_id trx_mysql_thread_id TRX_ID MYSQL_ID TRX_ID MYSQL_ID commit; select * from information_schema.tokudb_locks; -locks_trx_id locks_mysql_thread_id locks_dname locks_key_left locks_key_right -TRX_ID MYSQL_ID ./test/t-main 0001000000 0001000000 -TRX_ID MYSQL_ID ./test/t-main 0001000000 0001000000 +locks_trx_id locks_mysql_thread_id locks_dname locks_key_left locks_key_right locks_table_schema locks_table_name locks_table_dictionary_name +TRX_ID MYSQL_ID ./test/t-main 0001000000 0001000000 test t main select * from information_schema.tokudb_lock_waits; -requesting_trx_id blocking_trx_id lock_waits_dname lock_waits_key_left lock_waits_key_right lock_waits_start_time +requesting_trx_id blocking_trx_id lock_waits_dname lock_waits_key_left lock_waits_key_right lock_waits_start_time lock_waits_table_schema lock_waits_table_name lock_waits_table_dictionary_name ERROR 23000: Duplicate entry '1' for key 'PRIMARY' commit; select * from information_schema.tokudb_trx; trx_id trx_mysql_thread_id select * from information_schema.tokudb_locks; -locks_trx_id locks_mysql_thread_id locks_dname locks_key_left locks_key_right +locks_trx_id locks_mysql_thread_id locks_dname locks_key_left locks_key_right locks_table_schema locks_table_name locks_table_dictionary_name select * from information_schema.tokudb_lock_waits; -requesting_trx_id blocking_trx_id lock_waits_dname lock_waits_key_left lock_waits_key_right lock_waits_start_time +requesting_trx_id blocking_trx_id lock_waits_dname lock_waits_key_left lock_waits_key_right lock_waits_start_time lock_waits_table_schema lock_waits_table_name lock_waits_table_dictionary_name set autocommit=0; +set tokudb_prelock_empty=OFF; replace into t values (1); set autocommit=0; replace into t values (1); select * from information_schema.tokudb_locks; -locks_trx_id locks_mysql_thread_id locks_dname locks_key_left locks_key_right -TRX_ID MYSQL_ID ./test/t-main 0001000000 0001000000 +locks_trx_id locks_mysql_thread_id locks_dname locks_key_left locks_key_right locks_table_schema locks_table_name locks_table_dictionary_name +TRX_ID MYSQL_ID ./test/t-main 0001000000 0001000000 test t main select * from information_schema.tokudb_lock_waits; -requesting_trx_id blocking_trx_id lock_waits_dname lock_waits_key_left lock_waits_key_right lock_waits_start_time -REQUEST_TRX_ID BLOCK_TRX_ID ./test/t-main 0001000000 0001000000 LOCK_WAITS_START_TIME +requesting_trx_id blocking_trx_id lock_waits_dname lock_waits_key_left lock_waits_key_right lock_waits_start_time lock_waits_table_schema lock_waits_table_name lock_waits_table_dictionary_name +REQUEST_TRX_ID BLOCK_TRX_ID ./test/t-main 0001000000 0001000000 LOCK_WAITS_START_TIME test t main select * from information_schema.tokudb_trx; trx_id trx_mysql_thread_id TRX_ID MYSQL_ID TRX_ID MYSQL_ID commit; select * from information_schema.tokudb_locks; -locks_trx_id locks_mysql_thread_id locks_dname locks_key_left locks_key_right -TRX_ID MYSQL_ID ./test/t-main 0001000000 0001000000 +locks_trx_id locks_mysql_thread_id locks_dname locks_key_left locks_key_right locks_table_schema locks_table_name locks_table_dictionary_name +TRX_ID MYSQL_ID ./test/t-main 0001000000 0001000000 test t main select * from information_schema.tokudb_lock_waits; -requesting_trx_id blocking_trx_id lock_waits_dname lock_waits_key_left lock_waits_key_right lock_waits_start_time +requesting_trx_id blocking_trx_id lock_waits_dname lock_waits_key_left lock_waits_key_right lock_waits_start_time lock_waits_table_schema lock_waits_table_name lock_waits_table_dictionary_name commit; select * from information_schema.tokudb_trx; trx_id trx_mysql_thread_id select * from information_schema.tokudb_locks; -locks_trx_id locks_mysql_thread_id locks_dname locks_key_left locks_key_right +locks_trx_id locks_mysql_thread_id locks_dname locks_key_left locks_key_right locks_table_schema locks_table_name locks_table_dictionary_name select * from information_schema.tokudb_lock_waits; -requesting_trx_id blocking_trx_id lock_waits_dname lock_waits_key_left lock_waits_key_right lock_waits_start_time +requesting_trx_id blocking_trx_id lock_waits_dname lock_waits_key_left lock_waits_key_right lock_waits_start_time lock_waits_table_schema lock_waits_table_name lock_waits_table_dictionary_name drop table t; diff --git a/storage/tokudb/mysql-test/tokudb/r/i_s_tokudb_lock_waits_timeout.result b/storage/tokudb/mysql-test/tokudb/r/i_s_tokudb_lock_waits_timeout.result index 1e0668164ff..b9fca50b507 100644 --- a/storage/tokudb/mysql-test/tokudb/r/i_s_tokudb_lock_waits_timeout.result +++ b/storage/tokudb/mysql-test/tokudb/r/i_s_tokudb_lock_waits_timeout.result @@ -5,34 +5,35 @@ create table t (id int primary key); select * from information_schema.tokudb_trx; trx_id trx_mysql_thread_id select * from information_schema.tokudb_locks; -locks_trx_id locks_mysql_thread_id locks_dname locks_key_left locks_key_right +locks_trx_id locks_mysql_thread_id locks_dname locks_key_left locks_key_right locks_table_schema locks_table_name locks_table_dictionary_name select * from information_schema.tokudb_lock_waits; -requesting_trx_id blocking_trx_id lock_waits_dname lock_waits_key_left lock_waits_key_right lock_waits_start_time +requesting_trx_id blocking_trx_id lock_waits_dname lock_waits_key_left lock_waits_key_right lock_waits_start_time lock_waits_table_schema lock_waits_table_name lock_waits_table_dictionary_name set autocommit=0; +set tokudb_prelock_empty=OFF; insert into t values (1); set autocommit=0; insert into t values (1); select * from information_schema.tokudb_locks; -locks_trx_id locks_mysql_thread_id locks_dname locks_key_left locks_key_right -TRX_ID MYSQL_ID ./test/t-main 0001000000 0001000000 +locks_trx_id locks_mysql_thread_id locks_dname locks_key_left locks_key_right locks_table_schema locks_table_name locks_table_dictionary_name +TRX_ID MYSQL_ID ./test/t-main 0001000000 0001000000 test t main select * from information_schema.tokudb_lock_waits; -requesting_trx_id blocking_trx_id lock_waits_dname lock_waits_key_left lock_waits_key_right lock_waits_start_time -REQUEST_TRX_ID BLOCK_TRX_ID ./test/t-main 0001000000 0001000000 LOCK_WAITS_START_TIME +requesting_trx_id blocking_trx_id lock_waits_dname lock_waits_key_left lock_waits_key_right lock_waits_start_time lock_waits_table_schema lock_waits_table_name lock_waits_table_dictionary_name +REQUEST_TRX_ID BLOCK_TRX_ID ./test/t-main 0001000000 0001000000 LOCK_WAITS_START_TIME test t main select * from information_schema.tokudb_trx; trx_id trx_mysql_thread_id TRX_ID MYSQL_ID TRX_ID MYSQL_ID commit; select * from information_schema.tokudb_locks; -locks_trx_id locks_mysql_thread_id locks_dname locks_key_left locks_key_right +locks_trx_id locks_mysql_thread_id locks_dname locks_key_left locks_key_right locks_table_schema locks_table_name locks_table_dictionary_name select * from information_schema.tokudb_lock_waits; -requesting_trx_id blocking_trx_id lock_waits_dname lock_waits_key_left lock_waits_key_right lock_waits_start_time +requesting_trx_id blocking_trx_id lock_waits_dname lock_waits_key_left lock_waits_key_right lock_waits_start_time lock_waits_table_schema lock_waits_table_name lock_waits_table_dictionary_name ERROR HY000: Lock wait timeout exceeded; try restarting transaction commit; select * from information_schema.tokudb_trx; trx_id trx_mysql_thread_id select * from information_schema.tokudb_locks; -locks_trx_id locks_mysql_thread_id locks_dname locks_key_left locks_key_right +locks_trx_id locks_mysql_thread_id locks_dname locks_key_left locks_key_right locks_table_schema locks_table_name locks_table_dictionary_name select * from information_schema.tokudb_lock_waits; -requesting_trx_id blocking_trx_id lock_waits_dname lock_waits_key_left lock_waits_key_right lock_waits_start_time +requesting_trx_id blocking_trx_id lock_waits_dname lock_waits_key_left lock_waits_key_right lock_waits_start_time lock_waits_table_schema lock_waits_table_name lock_waits_table_dictionary_name drop table t; diff --git a/storage/tokudb/mysql-test/tokudb/r/i_s_tokudb_locks.result b/storage/tokudb/mysql-test/tokudb/r/i_s_tokudb_locks.result index ad252da448f..a07f7ba52fe 100644 --- a/storage/tokudb/mysql-test/tokudb/r/i_s_tokudb_locks.result +++ b/storage/tokudb/mysql-test/tokudb/r/i_s_tokudb_locks.result @@ -4,7 +4,7 @@ drop table if exists t; create table t (id int primary key); set autocommit=0; select * from information_schema.tokudb_locks; -locks_trx_id locks_mysql_thread_id locks_dname locks_key_left locks_key_right +locks_trx_id locks_mysql_thread_id locks_dname locks_key_left locks_key_right locks_table_schema locks_table_name locks_table_dictionary_name insert into t values (1); insert into t values (3); insert into t values (5); @@ -12,17 +12,17 @@ set autocommit=0; insert into t values (2); insert into t values (4); insert into t values (6); -select * from information_schema.tokudb_locks order by locks_trx_id; -locks_trx_id locks_mysql_thread_id locks_dname locks_key_left locks_key_right -TRX_ID MYSQL_ID ./test/t-main 0001000000 0001000000 -TRX_ID MYSQL_ID ./test/t-main 0003000000 0003000000 -TRX_ID MYSQL_ID ./test/t-main 0005000000 0005000000 -TRX_ID MYSQL_ID ./test/t-main 0002000000 0002000000 -TRX_ID MYSQL_ID ./test/t-main 0004000000 0004000000 -TRX_ID MYSQL_ID ./test/t-main 0006000000 0006000000 +select * from information_schema.tokudb_locks order by locks_trx_id,locks_key_left; +locks_trx_id locks_mysql_thread_id locks_dname locks_key_left locks_key_right locks_table_schema locks_table_name locks_table_dictionary_name +TRX_ID MYSQL_ID ./test/t-main 0001000000 0001000000 test t main +TRX_ID MYSQL_ID ./test/t-main 0003000000 0003000000 test t main +TRX_ID MYSQL_ID ./test/t-main 0005000000 0005000000 test t main +TRX_ID MYSQL_ID ./test/t-main 0002000000 0002000000 test t main +TRX_ID MYSQL_ID ./test/t-main 0004000000 0004000000 test t main +TRX_ID MYSQL_ID ./test/t-main 0006000000 0006000000 test t main commit; commit; select * from information_schema.tokudb_locks; -locks_trx_id locks_mysql_thread_id locks_dname locks_key_left locks_key_right +locks_trx_id locks_mysql_thread_id locks_dname locks_key_left locks_key_right locks_table_schema locks_table_name locks_table_dictionary_name commit; drop table t; diff --git a/storage/tokudb/mysql-test/tokudb/r/i_s_tokudb_locks_released.result b/storage/tokudb/mysql-test/tokudb/r/i_s_tokudb_locks_released.result index 21a6b5d308c..0a5862e9322 100644 --- a/storage/tokudb/mysql-test/tokudb/r/i_s_tokudb_locks_released.result +++ b/storage/tokudb/mysql-test/tokudb/r/i_s_tokudb_locks_released.result @@ -4,21 +4,21 @@ drop table if exists t; create table t (id int primary key); set autocommit=0; select * from information_schema.tokudb_locks; -locks_trx_id locks_mysql_thread_id locks_dname locks_key_left locks_key_right +locks_trx_id locks_mysql_thread_id locks_dname locks_key_left locks_key_right locks_table_schema locks_table_name locks_table_dictionary_name set autocommit=0; +set tokudb_prelock_empty=OFF; insert into t values (1); set autocommit=0; insert into t values (1); select * from information_schema.tokudb_locks; -locks_trx_id locks_mysql_thread_id locks_dname locks_key_left locks_key_right -TRX_ID MYSQL_ID ./test/t-main 0001000000 0001000000 +locks_trx_id locks_mysql_thread_id locks_dname locks_key_left locks_key_right locks_table_schema locks_table_name locks_table_dictionary_name +TRX_ID MYSQL_ID ./test/t-main 0001000000 0001000000 test t main commit; select * from information_schema.tokudb_locks; -locks_trx_id locks_mysql_thread_id locks_dname locks_key_left locks_key_right -TRX_ID MYSQL_ID ./test/t-main 0001000000 0001000000 -TRX_ID MYSQL_ID ./test/t-main 0001000000 0001000000 +locks_trx_id locks_mysql_thread_id locks_dname locks_key_left locks_key_right locks_table_schema locks_table_name locks_table_dictionary_name +TRX_ID MYSQL_ID ./test/t-main 0001000000 0001000000 test t main ERROR 23000: Duplicate entry '1' for key 'PRIMARY' commit; select * from information_schema.tokudb_locks; -locks_trx_id locks_mysql_thread_id locks_dname locks_key_left locks_key_right +locks_trx_id locks_mysql_thread_id locks_dname locks_key_left locks_key_right locks_table_schema locks_table_name locks_table_dictionary_name drop table t; diff --git a/storage/tokudb/mysql-test/tokudb/r/information-schema-global-status.result b/storage/tokudb/mysql-test/tokudb/r/information-schema-global-status.result index 438d10ac282..369c14fe4fe 100644 --- a/storage/tokudb/mysql-test/tokudb/r/information-schema-global-status.result +++ b/storage/tokudb/mysql-test/tokudb/r/information-schema-global-status.result @@ -45,6 +45,7 @@ TOKUDB_CACHETABLE_MISS TOKUDB_CACHETABLE_MISS_TIME TOKUDB_CACHETABLE_PREFETCHES TOKUDB_CACHETABLE_SIZE_CACHEPRESSURE +TOKUDB_CACHETABLE_SIZE_CLONED TOKUDB_CACHETABLE_SIZE_CURRENT TOKUDB_CACHETABLE_SIZE_LEAF TOKUDB_CACHETABLE_SIZE_LIMIT diff --git a/storage/tokudb/mysql-test/tokudb/r/mvcc-26.result b/storage/tokudb/mysql-test/tokudb/r/mvcc-26.result index d861a972388..ba5e6ab69f8 100644 --- a/storage/tokudb/mysql-test/tokudb/r/mvcc-26.result +++ b/storage/tokudb/mysql-test/tokudb/r/mvcc-26.result @@ -16,6 +16,7 @@ select * from foo; a optimize table foo; Table Op Msg_type Msg_text +test.foo optimize note Table does not support optimize, doing recreate + analyze instead test.foo optimize status OK select * from foo; a @@ -27,6 +28,7 @@ a 3 optimize table foo; Table Op Msg_type Msg_text +test.foo optimize note Table does not support optimize, doing recreate + analyze instead test.foo optimize status OK select * from foo; a diff --git a/storage/tokudb/mysql-test/tokudb/r/tokudb_support_xa.result b/storage/tokudb/mysql-test/tokudb/r/tokudb_support_xa.result new file mode 100644 index 00000000000..c265f38cdc2 --- /dev/null +++ b/storage/tokudb/mysql-test/tokudb/r/tokudb_support_xa.result @@ -0,0 +1,126 @@ +'#--------------------begin------------------------#' +SET @session_start_value = @@session.tokudb_support_xa; +SELECT @session_start_value; +@session_start_value +1 +SET @global_start_value = @@global.tokudb_support_xa; +SELECT @global_start_value; +@global_start_value +1 +SET @@session.tokudb_support_xa = 0; +SET @@session.tokudb_support_xa = DEFAULT; +SELECT @@session.tokudb_support_xa; +@@session.tokudb_support_xa +1 +SET @@global.tokudb_support_xa = 0; +SET @@global.tokudb_support_xa = DEFAULT; +SELECT @@global.tokudb_support_xa; +@@global.tokudb_support_xa +1 +'#--------------------case#1 valid set support_xa------------------------#' +SET @@session.tokudb_support_xa = 0; +SELECT @@session.tokudb_support_xa; +@@session.tokudb_support_xa +0 +SET @@session.tokudb_support_xa = 1; +SELECT @@session.tokudb_support_xa; +@@session.tokudb_support_xa +1 +SET @@global.tokudb_support_xa = 0; +SELECT @@global.tokudb_support_xa; +@@global.tokudb_support_xa +0 +SET @@global.tokudb_support_xa = 1; +SELECT @@global.tokudb_support_xa; +@@global.tokudb_support_xa +1 +'#--------------------case#2 invalid set support_xa------------------------#' +SET @@session.tokudb_support_xa = -0.6; +ERROR 42000: Incorrect argument type to variable 'tokudb_support_xa' +SET @@session.tokudb_support_xa = 1.6; +ERROR 42000: Incorrect argument type to variable 'tokudb_support_xa' +SET @@session.tokudb_support_xa = "T"; +ERROR 42000: Variable 'tokudb_support_xa' can't be set to the value of 'T' +SET @@session.tokudb_support_xa = "Y"; +ERROR 42000: Variable 'tokudb_support_xa' can't be set to the value of 'Y' +SET @@session.tokudb_support_xa = OF; +SELECT @@session.tokudb_support_xa; +@@session.tokudb_support_xa +0 +SET @@global.tokudb_support_xa = 2; +ERROR 42000: Variable 'tokudb_support_xa' can't be set to the value of '2' +SET @@global.tokudb_support_xa = "T"; +ERROR 42000: Variable 'tokudb_support_xa' can't be set to the value of 'T' +SET @@global.tokudb_support_xa = "Y"; +ERROR 42000: Variable 'tokudb_support_xa' can't be set to the value of 'Y' +'#--------------------case#3 xa.test port from tokudb_mariadb/xa.test ------------------------#' +'#--------------------xa.test with tokudb_support_xa OFF ------------------------#' +SET @@global.tokudb_support_xa = OFF; +SELECT @@global.tokudb_support_xa; +@@global.tokudb_support_xa +0 +create table t1 (a int) engine=tokudb; +xa start 'test1'; +insert t1 values (10); +xa end 'test1'; +xa prepare 'test1'; +xa rollback 'test1'; +select * from t1; +a +xa start 'test2'; +xa start 'test-bad'; +ERROR XAE07: XAER_RMFAIL: The command cannot be executed when global transaction is in the ACTIVE state +insert t1 values (20); +xa prepare 'test2'; +ERROR XAE07: XAER_RMFAIL: The command cannot be executed when global transaction is in the ACTIVE state +xa end 'test2'; +xa prepare 'test2'; +xa commit 'test2'; +select * from t1; +a +20 +xa start 'testa','testb'; +insert t1 values (30); +commit; +ERROR XAE07: XAER_RMFAIL: The command cannot be executed when global transaction is in the ACTIVE state +xa end 'testa','testb'; +begin; +ERROR XAE07: XAER_RMFAIL: The command cannot be executed when global transaction is in the IDLE state +create table t2 (a int); +ERROR XAE07: XAER_RMFAIL: The command cannot be executed when global transaction is in the IDLE state +xa start 'testa','testb'; +ERROR XAE08: XAER_DUPID: The XID already exists +xa start 'testa','testb', 123; +ERROR XAE08: XAER_DUPID: The XID already exists +xa start 0x7465737462, 0x2030405060, 0xb; +insert t1 values (40); +xa end 'testb',' 0@P`',11; +xa prepare 'testb',0x2030405060,11; +start transaction; +ERROR XAE07: XAER_RMFAIL: The command cannot be executed when global transaction is in the PREPARED state +xa recover; +formatID gtrid_length bqual_length data +11 5 5 testb 0@P` +xa prepare 'testa','testb'; +xa recover; +formatID gtrid_length bqual_length data +11 5 5 testb 0@P` +1 5 5 testatestb +xa commit 'testb',0x2030405060,11; +ERROR XAE04: XAER_NOTA: Unknown XID +xa rollback 'testa','testb'; +xa start 'zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz'; +ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your XYZ server version for the right syntax to use near '' at line 1 +select * from t1; +a +20 +drop table t1; +'#--------------------end------------------------#' +SET @@session.tokudb_support_xa = @session_start_value; +SELECT @@session.tokudb_support_xa; +@@session.tokudb_support_xa +1 +SET @@global.tokudb_support_xa = @global_start_value; +SELECT @@global.tokudb_support_xa; +@@global.tokudb_support_xa +1 diff --git a/storage/tokudb/mysql-test/tokudb/r/type_datetime.result b/storage/tokudb/mysql-test/tokudb/r/type_datetime.result index e6701b0a0b5..82f5ebe9600 100644 --- a/storage/tokudb/mysql-test/tokudb/r/type_datetime.result +++ b/storage/tokudb/mysql-test/tokudb/r/type_datetime.result @@ -21,6 +21,7 @@ t delete from t1 where t > 0; optimize table t1; Table Op Msg_type Msg_text +test.t1 optimize note Table does not support optimize, doing recreate + analyze instead test.t1 optimize status OK check table t1; Table Op Msg_type Msg_text diff --git a/storage/tokudb/mysql-test/tokudb/r/type_decimal.result b/storage/tokudb/mysql-test/tokudb/r/type_decimal.result index 647065bdedc..a432927eda2 100644 --- a/storage/tokudb/mysql-test/tokudb/r/type_decimal.result +++ b/storage/tokudb/mysql-test/tokudb/r/type_decimal.result @@ -799,10 +799,10 @@ ROUND(qty,3) dps ROUND(qty,dps) DROP TABLE t1; SELECT 1 % .123456789123456789123456789123456789123456789123456789123456789123456789123456789 AS '%'; % -0.012345687012345687012345687012345687012345687012345687012345687012345687000000000 +0.012345687012345687012345687012 SELECT MOD(1, .123456789123456789123456789123456789123456789123456789123456789123456789123456789) AS 'MOD()'; MOD() -0.012345687012345687012345687012345687012345687012345687012345687012345687000000000 +0.012345687012345687012345687012 create table t1 (f1 decimal(6,6),f2 decimal(6,6) zerofill); insert into t1 values (-0.123456,0.123456); select group_concat(f1),group_concat(f2) from t1; diff --git a/storage/tokudb/mysql-test/tokudb/r/type_newdecimal.result b/storage/tokudb/mysql-test/tokudb/r/type_newdecimal.result index 14ff0fbd1ca..3f76f54609f 100644 --- a/storage/tokudb/mysql-test/tokudb/r/type_newdecimal.result +++ b/storage/tokudb/mysql-test/tokudb/r/type_newdecimal.result @@ -704,7 +704,7 @@ select .7777777777777777777777777777777777777 * 777777777777777777.777777777777777777700000000000 select .7777777777777777777777777777777777777 - 0.1; .7777777777777777777777777777777777777 - 0.1 -0.6777777777777777777777777777777777777 +0.677777777777777777777777777778 select .343434343434343434 + .343434343434343434; .343434343434343434 + .343434343434343434 0.686868686868686868 @@ -1841,7 +1841,7 @@ Warnings: Note 1265 Data truncated for column 'c1' at row 4 DESC t2; Field Type Null Key Default Extra -c1 decimal(32,30) YES NULL +c1 decimal(33,30) YES NULL DROP TABLE t1,t2; CREATE TABLE t1 (a DECIMAL(30,30)); INSERT INTO t1 VALUES (0.1),(0.2),(0.3); @@ -1852,7 +1852,7 @@ Note 1265 Data truncated for column 'c1' at row 2 Note 1265 Data truncated for column 'c1' at row 3 DESC t2; Field Type Null Key Default Extra -c1 decimal(34,0) YES NULL +c1 decimal(33,30) YES NULL DROP TABLE t1,t2; CREATE TABLE t1 (a DECIMAL(30,30)); INSERT INTO t1 VALUES (0.1),(0.2),(0.3); diff --git a/storage/tokudb/mysql-test/tokudb/suite.opt b/storage/tokudb/mysql-test/tokudb/suite.opt index 8cfa7cacb1f..ea8042b7740 100644 --- a/storage/tokudb/mysql-test/tokudb/suite.opt +++ b/storage/tokudb/mysql-test/tokudb/suite.opt @@ -1 +1 @@ ---tokudb --plugin-load-add=$HA_TOKUDB_SO +--tokudb --plugin-load-add=$HA_TOKUDB_SO --loose-tokudb-check-jemalloc=0 diff --git a/storage/tokudb/mysql-test/tokudb/t/bf_create_select.test b/storage/tokudb/mysql-test/tokudb/t/bf_create_select.test new file mode 100644 index 00000000000..a8b7f9c9f6a --- /dev/null +++ b/storage/tokudb/mysql-test/tokudb/t/bf_create_select.test @@ -0,0 +1,118 @@ +# Verify that index and range scans are not slow +# on tables during create select statements +# due to tokudb bulk fetch not being used + +source include/have_tokudb.inc; +source include/big_test.inc; +set default_storage_engine='tokudb'; +disable_warnings; +drop table if exists t,t1,t2; +enable_warnings; + +let $maxq = 10; + +CREATE TABLE `t` ( + `num` int(10) unsigned auto_increment NOT NULL, + `val` varchar(32) DEFAULT NULL, + PRIMARY KEY (`num`) +); + +# put 8M rows into t +INSERT INTO t values (null,null); +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +SELECT count(*) FROM t; + +# Create first table from source table t +CREATE TABLE `t1` ( + `num` int(10) unsigned NOT NULL, + `val` varchar(32) DEFAULT NULL, + PRIMARY KEY (`num`) +) as select * from t; + +let $s = `select to_seconds(now())`; +let $i = 0; +while ($i < $maxq) { + SELECT count(*) from t1; + inc $i; +} +let $time_elapsed_select = `select to_seconds(now()) - $s`; + +# The following line can be used to display the time elapsed data +# which could be useful for debugging. +#echo Index scans took $time_elapsed_select seconds.; + +let $s = `select to_seconds(now())`; +let $i = 0; +while ($i < $maxq) { + CREATE TABLE t2 AS SELECT count(*) from t1; + DROP TABLE t2; + inc $i; +} + +let $time_elapsed_create_select = `select to_seconds(now()) - $s`; + +# The following line can be used to display the time elapsed data +# which could be useful for debugging. +#echo Index scans took $time_elapsed_create_select seconds.; + +# This check evaluates whether the time elapsed during the create select statement is on par +# with the select statement, which will confirm that bulk fetch is in fact being used. +let $verdict = `select abs($time_elapsed_create_select - $time_elapsed_select) <= $time_elapsed_select`; +echo $verdict; + +let $maxrq = 30; + +let $s = `select to_seconds(now())`; +let $i = 0; +while ($i < $maxrq) { + SELECT count(*) from t1 where num > 7000000; + inc $i; +} +let $time_elapsed_select = `select to_seconds(now()) - $s`; + +# The following line can be used to display the time elapsed data +# which could be useful for debugging. +#echo Index scans took $time_elapsed_select seconds.; + +let $s = `select to_seconds(now())`; +let $i = 0; +while ($i < $maxrq) { + CREATE TABLE t2 AS SELECT count(*) from t1 where num > 7000000; + DROP TABLE t2; + inc $i; +} + +let $time_elapsed_create_select = `select to_seconds(now()) - $s`; + +# The following line can be used to display the time elapsed data +# which could be useful for debugging. +#echo Index scans took $time_elapsed_create_select seconds.; + +# This check evaluates whether the time elapsed during the create select statement is on par +# with the select statement, which will confirm that bulk fetch is in fact being used. +let $verdict = `select abs($time_elapsed_create_select - $time_elapsed_select) <= $time_elapsed_select`; +echo $verdict; + +drop table t,t1; diff --git a/storage/tokudb/mysql-test/tokudb/t/bf_create_select_hash_part.test b/storage/tokudb/mysql-test/tokudb/t/bf_create_select_hash_part.test new file mode 100644 index 00000000000..b9ec8c52aa8 --- /dev/null +++ b/storage/tokudb/mysql-test/tokudb/t/bf_create_select_hash_part.test @@ -0,0 +1,143 @@ +# Verify that index and range scans are not slow +# on tables during create select statements +# due to tokudb bulk fetch not being used + +source include/have_tokudb.inc; +source include/have_partition.inc; +source include/big_test.inc; +set default_storage_engine='tokudb'; +disable_warnings; +drop table if exists t,t1,t2,t3; +enable_warnings; + +let $maxq = 10; + +CREATE TABLE `t` ( + `num` int(10) unsigned auto_increment NOT NULL, + `val` varchar(32) DEFAULT NULL, + PRIMARY KEY (`num`) +); + +# put 8M rows into t +INSERT INTO t values (null,null); +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +SELECT count(*) FROM t; + +# Create base table (control table) from source table t +CREATE TABLE `t1` ( + `num` int(10) unsigned NOT NULL, + `val` varchar(32) DEFAULT NULL, + PRIMARY KEY (`num`) +) as select * from t; + +# Create source hash partitioned table from source table t +CREATE TABLE `t2` ( + `num` int(10) unsigned NOT NULL, + `val` varchar(32) DEFAULT NULL, + PRIMARY KEY (`num`) +) PARTITION BY HASH (num) +PARTITIONS 8 as select * from t; + +let $s = `select to_seconds(now())`; +let $i = 0; +while ($i < $maxq) { + CREATE TABLE `t3` (`x` bigint); + SELECT count(*) from t1; + DROP TABLE t3; + inc $i; +} +let $time_elapsed_select = `select to_seconds(now()) - $s`; + +# The following line can be used to display the time elapsed data +# which could be useful for debugging. +#echo Index scans took $time_elapsed_select seconds.; + +let $s = `select to_seconds(now())`; +let $i = 0; +while ($i < $maxq) { + CREATE TABLE t3 AS SELECT count(*) from t2; + DROP TABLE t3; + inc $i; +} + +let $time_elapsed_create_select = `select to_seconds(now()) - $s`; + +# The following line can be used to display the time elapsed data +# which could be useful for debugging. +#echo Index scans took $time_elapsed_create_select seconds.; + +# This check evaluates whether the time elapsed during the create select statement is on par +# with the select statement, which will confirm that bulk fetch is in fact being used. +# Additionally, it is important to note that 1.5 is the multiplier applied to the time_elapsed_select +# value because it appears that MySQL 5.5.39 uses a sorted index scan during the create select statement +# while Percona Server 5.6 uses an unsorted index scan. +# The issue has been resolved in MySQL 5.6 but still persists in Maria 10.0.12 +# in the defect found at https://mariadb.atlassian.net/browse/MDEV-6547. +let $verdict = `select abs($time_elapsed_create_select - $time_elapsed_select) <= 1.5 * $time_elapsed_select`; +echo $verdict; +if (!$verdict) { echo index scan t2 $time_elapsed_create_select $time_elapsed_select; } + +let $maxrq = 30; + +let $s = `select to_seconds(now())`; +let $i = 0; +while ($i < $maxrq) { + CREATE TABLE `t3` (`x` bigint); + SELECT count(*) from t1 where num > 7000000; + DROP TABLE t3; + inc $i; +} +let $time_elapsed_select = `select to_seconds(now()) - $s`; + +# The following line can be used to display the time elapsed data +# which could be useful for debugging. +#echo Index scans took $time_elapsed_select seconds.; + +let $s = `select to_seconds(now())`; +let $i = 0; +while ($i < $maxrq) { + CREATE TABLE t3 AS SELECT count(*) from t2 where num > 7000000; + DROP TABLE t3; + inc $i; +} + +let $time_elapsed_create_select = `select to_seconds(now()) - $s`; + +# The following line can be used to display the time elapsed data +# which could be useful for debugging. +#echo Index scans took $time_elapsed_create_select seconds.; + +# This check evaluates whether the time elapsed during the create select statement is on par +# with the select statement, which will confirm that bulk fetch is in fact being used. +# Additionally, it is important to note that 1.5 is the multiplier applied to the time_elapsed_select +# value because it appears that MySQL 5.5.39 uses a sorted index scan during the create select statement +# while Percona Server 5.6 uses an unsorted index scan. +# The issue has been resolved in MySQL 5.6 but still persists in Maria 10.0.12 +# in the defect found at https://mariadb.atlassian.net/browse/MDEV-6547. +let $verdict = `select abs($time_elapsed_create_select - $time_elapsed_select) <= 1.5 * $time_elapsed_select`; +echo $verdict; +if (!$verdict) { echo range scan t2 $time_elapsed_create_select $time_elapsed_select; } + +drop table t,t1,t2; diff --git a/storage/tokudb/mysql-test/tokudb/t/bf_create_select_range_part.test b/storage/tokudb/mysql-test/tokudb/t/bf_create_select_range_part.test new file mode 100644 index 00000000000..a48f272b84f --- /dev/null +++ b/storage/tokudb/mysql-test/tokudb/t/bf_create_select_range_part.test @@ -0,0 +1,138 @@ +# Verify that index and range scans are not slow +# on tables during create select statements +# due to tokudb bulk fetch not being used + +source include/have_tokudb.inc; +source include/have_partition.inc; +source include/big_test.inc; +set default_storage_engine='tokudb'; +disable_warnings; +drop table if exists t,t1,t2; +enable_warnings; + +let $maxq = 10; + +CREATE TABLE `t` ( + `num` int(10) unsigned auto_increment NOT NULL, + `val` varchar(32) DEFAULT NULL, + PRIMARY KEY (`num`) +); + +# put 8M rows into t +INSERT INTO t values (null,null); +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +SELECT count(*) FROM t; + +# Create base table (control table) from source table t +CREATE TABLE `t1` ( + `num` int(10) unsigned NOT NULL, + `val` varchar(32) DEFAULT NULL, + PRIMARY KEY (`num`) +) as select * from t; + +# Create source range partitioned table from source table t +CREATE TABLE `t2` ( + `num` int(10) unsigned NOT NULL, + `val` varchar(32) DEFAULT NULL, + PRIMARY KEY (`num`) +) PARTITION BY RANGE (num) +(PARTITION p0 VALUES LESS THAN (1000000), + PARTITION p1 VALUES LESS THAN (2000000), + PARTITION p2 VALUES LESS THAN (3000000), + PARTITION p3 VALUES LESS THAN (4000000), + PARTITION p4 VALUES LESS THAN (5000000), + PARTITION p5 VALUES LESS THAN (6000000), + PARTITION p6 VALUES LESS THAN (7000000), + PARTITION p7 VALUES LESS THAN MAXVALUE) as select * from t; + +let $s = `select to_seconds(now())`; +let $i = 0; +while ($i < $maxq) { + CREATE TABLE `t3` (`x` bigint); + SELECT count(*) from t1; + DROP TABLE t3; + inc $i; +} +let $time_elapsed_select = `select to_seconds(now()) - $s`; + +# The following line can be used to display the time elapsed data +# which could be useful for debugging. +#echo Index scans took $time_elapsed_select seconds.; + +let $s = `select to_seconds(now())`; +let $i = 0; +while ($i < $maxq) { + CREATE TABLE t4 AS SELECT count(*) from t2; + DROP TABLE t4; + inc $i; +} + +let $time_elapsed_create_select = `select to_seconds(now()) - $s`; + +# The following line can be used to display the time elapsed data +# which could be useful for debugging. +#echo Index scans took $time_elapsed_create_select seconds.; + +# This check evaluates whether the time elapsed during the create select statement is on par +# with the select statement, which will confirm that bulk fetch is in fact being used. +let $verdict = `select abs($time_elapsed_create_select - $time_elapsed_select) <= $time_elapsed_select`; +echo $verdict; + +let $maxrq = 30; + +let $s = `select to_seconds(now())`; +let $i = 0; +while ($i < $maxrq) { + CREATE TABLE `t3` (`x` bigint); + SELECT count(*) from t1 where num > 7000000; + DROP TABLE t3; + inc $i; +} +let $time_elapsed_select = `select to_seconds(now()) - $s`; + +# The following line can be used to display the time elapsed data +# which could be useful for debugging. +#echo Index scans took $time_elapsed_select seconds.; + +let $s = `select to_seconds(now())`; +let $i = 0; +while ($i < $maxrq) { + CREATE TABLE t4 AS SELECT count(*) from t2 where num > 7000000; + DROP TABLE t4; + inc $i; +} + +let $time_elapsed_create_select = `select to_seconds(now()) - $s`; + +# The following line can be used to display the time elapsed data +# which could be useful for debugging. +#echo Index scans took $time_elapsed_create_select seconds.; + +# This check evaluates whether the time elapsed during the create select statement is on par +# with the select statement, which will confirm that bulk fetch is in fact being used. +let $verdict = `select abs($time_elapsed_create_select - $time_elapsed_select) <= $time_elapsed_select`; +echo $verdict; + +drop table t,t1,t2; diff --git a/storage/tokudb/mysql-test/tokudb/t/bf_create_temp_select.test b/storage/tokudb/mysql-test/tokudb/t/bf_create_temp_select.test new file mode 100644 index 00000000000..fdd665076c9 --- /dev/null +++ b/storage/tokudb/mysql-test/tokudb/t/bf_create_temp_select.test @@ -0,0 +1,118 @@ +# Verify that index and range scans are not slow +# on temporary tables during create select statements +# due to tokudb bulk fetch not being used + +source include/have_tokudb.inc; +source include/big_test.inc; +set default_storage_engine='tokudb'; +disable_warnings; +drop table if exists t,t1,t2; +enable_warnings; + +let $maxq = 10; + +CREATE TABLE `t` ( + `num` int(10) unsigned auto_increment NOT NULL, + `val` varchar(32) DEFAULT NULL, + PRIMARY KEY (`num`) +); + +# put 8M rows into t +INSERT INTO t values (null,null); +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +SELECT count(*) FROM t; + +# Create first table from source table t +CREATE TABLE `t1` ( + `num` int(10) unsigned NOT NULL, + `val` varchar(32) DEFAULT NULL, + PRIMARY KEY (`num`) +) as select * from t; + +let $s = `select to_seconds(now())`; +let $i = 0; +while ($i < $maxq) { + SELECT count(*) from t1; + inc $i; +} +let $time_elapsed_select = `select to_seconds(now()) - $s`; + +# The following line can be used to display the time elapsed data +# which could be useful for debugging. +#echo Index scans took $time_elapsed_select seconds.; + +let $s = `select to_seconds(now())`; +let $i = 0; +while ($i < $maxq) { + CREATE TEMPORARY TABLE t2 AS SELECT count(*) from t1; + DROP TEMPORARY TABLE t2; + inc $i; +} + +let $time_elapsed_create_select = `select to_seconds(now()) - $s`; + +# The following line can be used to display the time elapsed data +# which could be useful for debugging. +#echo Index scans took $time_elapsed_create_select seconds.; + +# This check evaluates whether the time elapsed during the create select statement is on par +# with the select statement, which will confirm that bulk fetch is in fact being used. +let $verdict = `select abs($time_elapsed_create_select - $time_elapsed_select) <= $time_elapsed_select`; +echo $verdict; + +let $maxrq = 30; + +let $s = `select to_seconds(now())`; +let $i = 0; +while ($i < $maxrq) { + SELECT count(*) from t1 where num > 7000000; + inc $i; +} +let $time_elapsed_select = `select to_seconds(now()) - $s`; + +# The following line can be used to display the time elapsed data +# which could be useful for debugging. +#echo Range scans took $time_elapsed_select seconds.; + +let $s = `select to_seconds(now())`; +let $i = 0; +while ($i < $maxrq) { + CREATE TEMPORARY TABLE t2 AS SELECT count(*) from t1 where num > 7000000; + DROP TEMPORARY TABLE t2; + inc $i; +} + +let $time_elapsed_create_select = `select to_seconds(now()) - $s`; + +# The following line can be used to display the time elapsed data +# which could be useful for debugging. +#echo Range scans took $time_elapsed_create_select seconds.; + +# This check evaluates whether the time elapsed during the create select statement is on par +# with the select statement, which will confirm that bulk fetch is in fact being used. +let $verdict = `select abs($time_elapsed_create_select - $time_elapsed_select) <= $time_elapsed_select`; +echo $verdict; + +drop table t,t1; diff --git a/storage/tokudb/mysql-test/tokudb/t/bf_delete.test b/storage/tokudb/mysql-test/tokudb/t/bf_delete.test new file mode 100644 index 00000000000..65b26df230e --- /dev/null +++ b/storage/tokudb/mysql-test/tokudb/t/bf_delete.test @@ -0,0 +1,68 @@ +# Verify that index scans for delete statements use bulk fetch and are +# at least twice as fast + +source include/have_tokudb.inc; +source include/big_test.inc; +set default_storage_engine='tokudb'; +disable_warnings; +drop table if exists t; +enable_warnings; + +CREATE TABLE `t` (id bigint not null auto_increment primary key, val bigint not null default 0); + +# put 8M rows into t +INSERT INTO t (id) values (null); +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +SELECT count(*) FROM t; + +# run $maxq measurements +let $maxq = 10; + +# measure the time to do $maxq deletes from t that affect no rows with bulk fetch ON +set tokudb_bulk_fetch = ON; +let $s = `select to_seconds(now())`; +let $i = 0; +while ($i < $maxq) { + delete from t where val > 0; + inc $i; +} +let $time_elapsed_bf_on = `select to_seconds(now()) - $s`; + +# measure the time to do $maxq deletes from t that affect no rows with bulk fetch OFF +set tokudb_bulk_fetch = OFF; +let $s = `select to_seconds(now())`; +let $i = 0; +while ($i < $maxq) { + delete from t where val > 0; + inc $i; +} +let $time_elapsed_bf_off = `select to_seconds(now()) - $s`; + +# verify that a delete scan with bulk fetch ON is at least 2 times faster than with bulk fetch OFF +let $verdict = `select $time_elapsed_bf_off > $time_elapsed_bf_on && ($time_elapsed_bf_off - $time_elapsed_bf_on) / $time_elapsed_bf_on >= 2`; +echo $verdict; +if (!$verdict) { echo $time_elapsed_bf_on $time_elapsed_bf_off; } + +drop table t; diff --git a/storage/tokudb/mysql-test/tokudb/t/bf_delete_trigger.test b/storage/tokudb/mysql-test/tokudb/t/bf_delete_trigger.test new file mode 100644 index 00000000000..031a48feab1 --- /dev/null +++ b/storage/tokudb/mysql-test/tokudb/t/bf_delete_trigger.test @@ -0,0 +1,70 @@ +# verify that delete triggers can not insert, delete, or update rows in the target table + +source include/have_tokudb.inc; +set default_storage_engine='tokudb'; +disable_warnings; +drop table if exists t; +enable_warnings; + +create table t (id bigint not null primary key, x bigint not null); +insert into t values (1,0),(2,0),(3,0),(4,0); + +# verify that a before delete trigger can not insert into the target table +create trigger t_delete before delete on t for each row insert into t values (1000000,0); +begin; +error 1442; +delete from t where x=0; +rollback; +drop trigger t_delete; + +# verify that an after delete trigger can not insert into the target table +create trigger t_delete after delete on t for each row insert into t values (1000000,0); +begin; +error 1442; +delete from t where x=0; +rollback; +drop trigger t_delete; + +# verify that a before delete trigger can not delete from the target table +create trigger t_delete before delete on t for each row delete from t where id=1000000; +begin; +error 1442; +delete from t where x=0; +rollback; +drop trigger t_delete; + +# verify that an after delete trigger can not delete from the target table +create trigger t_delete after delete on t for each row delete from t where id=1000000; +begin; +error 1442; +delete from t where x=0; +rollback; +drop trigger t_delete; + +# verify that a before delete trigger can not update the target table +create trigger t_delete before delete on t for each row update t set x=x+1 where id=1000000; +begin; +error 1442; +delete from t where x=0; +rollback; +drop trigger t_delete; + +# verify that an after delete trigger can not update the target table +create trigger t_delete after delete on t for each row update t set x=x+1 where id=10000000; +begin; +error 1442; +delete from t where x=0; +rollback; +drop trigger t_delete; + +# can execute select on the target table in a delete trigger. it better use a different handler. +create table count (count bigint not null); +create trigger t_delete before delete on t for each row insert into count select count(*) from t; +begin; +delete from t where x=0; +select * from count; +rollback; +drop trigger t_delete; +drop table count; + +drop table t;
\ No newline at end of file diff --git a/storage/tokudb/mysql-test/tokudb/t/bf_insert_select.test b/storage/tokudb/mysql-test/tokudb/t/bf_insert_select.test new file mode 100644 index 00000000000..11f14013a30 --- /dev/null +++ b/storage/tokudb/mysql-test/tokudb/t/bf_insert_select.test @@ -0,0 +1,122 @@ +# Verify that index and range scans are not slow +# on tables during insert select statements +# due to tokudb bulk fetch not being used + +source include/have_tokudb.inc; +source include/big_test.inc; +set default_storage_engine='tokudb'; +disable_warnings; +drop table if exists t,t1,t2; +enable_warnings; + +let $maxq = 10; + +CREATE TABLE `t` ( + `num` int(10) unsigned auto_increment NOT NULL, + `val` varchar(32) DEFAULT NULL, + PRIMARY KEY (`num`) +); + +# put 8M rows into t +INSERT INTO t values (null,null); +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +SELECT count(*) FROM t; + +# Create first table from source table t +CREATE TABLE `t1` ( + `num` int(10) unsigned NOT NULL, + `val` varchar(32) DEFAULT NULL, + PRIMARY KEY (`num`) +) as select * from t; + +# Create second table t2 that will serve as the target for the insert select statment +CREATE TABLE `t2` ( + `count` bigint(20) NOT NULL + ) ENGINE=TokuDB DEFAULT CHARSET=latin1; + + +let $s = `select to_seconds(now())`; +let $i = 0; +while ($i < $maxq) { + SELECT count(*) from t1; + inc $i; +} +let $time_elapsed_select = `select to_seconds(now()) - $s`; + +# The following line can be used to display the time elapsed data +# which could be useful for debugging. +#echo Index scans took $time_elapsed_select seconds.; + + +let $s = `select to_seconds(now())`; +let $i = 0; +while ($i < $maxq) { + INSERT into t2 SELECT count(*) from t1; + inc $i; +} +let $time_elapsed_insert_select = `select to_seconds(now()) - $s`; + +# The following line can be used to display the time elapsed data +# which could be useful for debugging. +#echo Index scans took $time_elapsed_insert_select seconds.; + +# This check evaluates whether the time elapsed during the insert select statement is on par +# with the select statement, which will confirm that bulk fetch is in fact being used. +let $verdict = `select abs($time_elapsed_insert_select - $time_elapsed_select) <= $time_elapsed_select`; +echo $verdict; + +let $maxrq = 30; + +let $s = `select to_seconds(now())`; +let $i = 0; +while ($i < $maxrq) { + SELECT count(*) from t1 where num > 7000000; + inc $i; +} +let $time_elapsed_select = `select to_seconds(now()) - $s`; + +# This check evaluates whether the time elapsed during the insert select statement is on par +# with the select statement, which will confirm that bulk fetch is in fact being used. +#echo Range scans took $time_elapsed_select seconds.; + + +let $s = `select to_seconds(now())`; +let $i = 0; +while ($i < $maxrq) { + INSERT into t2 SELECT count(*) from t1 where num > 7000000; + inc $i; +} +let $time_elapsed_insert_select = `select to_seconds(now()) - $s`; + +# This check evaluates whether the time elapsed during the insert select statement is on par +# with the select statement, which will confirm that bulk fetch is in fact being used. +#echo Range scans took $time_elapsed_insert_select seconds.; + +# This check evaluates whether the time elapsed during the insert select statement is on par +# with the select statement, which will confirm that bulk fetch is in fact being used. +let $verdict = `select abs($time_elapsed_insert_select - $time_elapsed_select) <= $time_elapsed_select`; +echo $verdict; + +drop table t,t1,t2; diff --git a/storage/tokudb/mysql-test/tokudb/t/bf_insert_select_dup_key.test b/storage/tokudb/mysql-test/tokudb/t/bf_insert_select_dup_key.test new file mode 100644 index 00000000000..3200beeaba9 --- /dev/null +++ b/storage/tokudb/mysql-test/tokudb/t/bf_insert_select_dup_key.test @@ -0,0 +1,127 @@ +# Verify that index and range scans are not slow +# on tables during insert select on duplicate key statements +# due to tokudb bulk fetch not being used. +# In this test case, the on duplicate key condition does not need to fire +# since the performance of the embedded select statement is all we are measuring. + +source include/have_tokudb.inc; +source include/big_test.inc; +set default_storage_engine='tokudb'; +disable_warnings; +drop table if exists t,t1,t2; +enable_warnings; + +let $maxq = 10; + +CREATE TABLE `t` ( + `num` int(10) unsigned auto_increment NOT NULL, + `val` varchar(32) DEFAULT NULL, + PRIMARY KEY (`num`) +); + +# put 8M rows into t +INSERT INTO t values (null,null); +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +SELECT count(*) FROM t; + +# Create first table from source table t +CREATE TABLE `t1` ( + `num` int(10) unsigned NOT NULL, + `val` varchar(32) DEFAULT NULL, + PRIMARY KEY (`num`) +) as select * from t; + +# Create second table t2 that will serve as the target for the insert select statment +CREATE TABLE `t2` ( + `num` int(10) unsigned auto_increment NOT NULL, + `count` bigint(20) NOT NULL, + UNIQUE (num) + ) ENGINE=TokuDB DEFAULT CHARSET=latin1; + + +let $s = `select to_seconds(now())`; +let $i = 0; +while ($i < $maxq) { + SELECT count(*) from t1; + inc $i; +} +let $time_elapsed_select = `select to_seconds(now()) - $s`; + +# The following line can be used to display the time elapsed data +# which could be useful for debugging. +#echo Index scans took $time_elapsed_select seconds.; + + +let $s = `select to_seconds(now())`; +let $i = 0; +while ($i < $maxq) { + INSERT into t2 (num,count) SELECT NULL,count(*) from t1 on DUPLICATE KEY UPDATE count=count+1; + inc $i; +} +let $time_elapsed_insert_select = `select to_seconds(now()) - $s`; + +# The following line can be used to display the time elapsed data +# which could be useful for debugging. +#echo Index scans took $time_elapsed_insert_select seconds.; + +# This check evaluates whether the time elapsed during the insert select on duplicate key statement is on par +# with the select statement, which will confirm that bulk fetch is in fact being used. +let $verdict = `select abs($time_elapsed_insert_select - $time_elapsed_select) <= $time_elapsed_select`; +echo $verdict; + +let $maxrq = 30; + +let $s = `select to_seconds(now())`; +let $i = 0; +while ($i < $maxrq) { + SELECT count(*) from t1 where num > 7000000; + inc $i; +} +let $time_elapsed_select = `select to_seconds(now()) - $s`; + +# The following line can be used to display the time elapsed data +# which could be useful for debugging. +#echo Range scans took $time_elapsed_select seconds.; + + +let $s = `select to_seconds(now())`; +let $i = 0; +while ($i < $maxrq) { + INSERT into t2 (num,count) SELECT NULL,count(*) from t1 where num > 7000000 on DUPLICATE KEY UPDATE count=count+1; + inc $i; +} +let $time_elapsed_insert_select = `select to_seconds(now()) - $s`; + +# The following line can be used to display the time elapsed data +# which could be useful for debugging. +#echo Range scans took $time_elapsed_insert_select seconds.; + +# This check evaluates whether the time elapsed during the insert select on duplicate key statement is on par +# with the select statement, which will confirm that bulk fetch is in fact being used. +let $verdict = `select abs($time_elapsed_insert_select - $time_elapsed_select) <= $time_elapsed_select`; +echo $verdict; + +enable_warnings; +drop table t,t1,t2; diff --git a/storage/tokudb/mysql-test/tokudb/t/bf_insert_select_trigger.test b/storage/tokudb/mysql-test/tokudb/t/bf_insert_select_trigger.test new file mode 100644 index 00000000000..337013c9dad --- /dev/null +++ b/storage/tokudb/mysql-test/tokudb/t/bf_insert_select_trigger.test @@ -0,0 +1,65 @@ +# verify that various insert triggers can not execute on the source table for an insert select statement + +source include/have_tokudb.inc; +set default_storage_engine='tokudb'; +disable_warnings; +drop table if exists s,t; +enable_warnings; + +create table s (id bigint not null primary key, x bigint); +insert into s values (1,0),(2,0),(3,0),(4,0); + +create table t like s; +begin; +insert into t select * from s; +rollback; + +# verify that before insert triggers can not insert into the source table +create trigger t_trigger before insert on t for each row insert into s values (1000000,0); +begin; +error 1442; +insert into t select * from s; +rollback; +drop trigger t_trigger; + +# verify that after insert triggers can not insert into the source table +create trigger t_trigger after insert on t for each row insert into s values (1000000,0); +begin; +error 1442; +insert into t select * from s; +rollback; +drop trigger t_trigger; + +# verify that before insert triggers can not delete from the source table +create trigger t_trigger before insert on t for each row delete from s where id=1000000; +begin; +error 1442; +insert into t select * from s; +rollback; +drop trigger t_trigger; + +# verify that after insert triggers can not delete from the source table +create trigger t_trigger after insert on t for each row delete from s where id=1000000; +begin; +error 1442; +insert into t select * from s; +rollback; +drop trigger t_trigger; + +# verify that before insert triggers can not update the source table +create trigger t_trigger before insert on t for each row update s set x=x+1 where id=1000000; +begin; +error 1442; +insert into t select * from s; +rollback; +drop trigger t_trigger; + +# verify that after insert triggers can not update the source table +create trigger t_trigger after insert on t for each row update s set x=x+1 where id=1000000; +begin; +error 1442; +insert into t select * from s; +rollback; +drop trigger t_trigger; + +drop table s,t; diff --git a/storage/tokudb/mysql-test/tokudb/t/bf_insert_select_update_trigger.test b/storage/tokudb/mysql-test/tokudb/t/bf_insert_select_update_trigger.test new file mode 100644 index 00000000000..d5addc69076 --- /dev/null +++ b/storage/tokudb/mysql-test/tokudb/t/bf_insert_select_update_trigger.test @@ -0,0 +1,170 @@ +# verify that various insert triggers can not execute on the source table for an insert select statement + +source include/have_tokudb.inc; +set default_storage_engine='tokudb'; +disable_warnings; +drop table if exists s,t; +enable_warnings; + +create table s (id bigint not null primary key, x bigint); +insert into s values (1,0),(2,0),(3,0),(4,0); + +create table t like s; +begin; +insert into t select * from s; +rollback; +# insert into t values (1,0); + +# verify that before insert triggers can not insert into the source table +create trigger t_trigger before insert on t for each row insert into s values (1000000,0); +begin; +error 1442; +insert into t select * from s on duplicate key update x=t.x+1; +rollback; +drop trigger t_trigger; + +# verify that after insert triggers can not insert into the source table +create trigger t_trigger after insert on t for each row insert into s values (1000000,0); +begin; +error 1442; +insert into t select * from s on duplicate key update x=t.x+1; +rollback; +drop trigger t_trigger; + +# verify that before insert triggers can not delete from the source table +create trigger t_trigger before insert on t for each row delete from s where id=1000000; +begin; +error 1442; +insert into t select * from s on duplicate key update x=t.x+1; +rollback; +drop trigger t_trigger; + +# verify that after insert triggers can not delete from the source table +create trigger t_trigger after insert on t for each row delete from s where id=1000000; +begin; +error 1442; +insert into t select * from s on duplicate key update x=t.x+1; +rollback; +drop trigger t_trigger; + +# verify that before insert triggers can not update the source table +create trigger t_trigger before insert on t for each row update s set x=x+1 where id=1000000; +begin; +error 1442; +insert into t select * from s on duplicate key update x=t.x+1; +rollback; +drop trigger t_trigger; + +# verify that after insert triggers can not update the source table +create trigger t_trigger after insert on t for each row update s set x=x+1 where id=1000000; +begin; +error 1442; +insert into t select * from s on duplicate key update x=t.x+1; +rollback; +drop trigger t_trigger; + +# force duplicate keys +truncate table t; +insert into t values (1,0); + +# verify that before insert triggers can not insert into the source table +create trigger t_trigger before insert on t for each row insert into s values (1000000,0); +begin; +error 1442; +insert into t select * from s on duplicate key update x=t.x+1; +rollback; +drop trigger t_trigger; + +# verify that after insert triggers can not insert into the source table +create trigger t_trigger after insert on t for each row insert into s values (1000000,0); +begin; +error 1442; +insert into t select * from s on duplicate key update x=t.x+1; +rollback; +drop trigger t_trigger; + +# verify that before insert triggers can not delete from the source table +create trigger t_trigger before insert on t for each row delete from s where id=1000000; +begin; +error 1442; +insert into t select * from s on duplicate key update x=t.x+1; +rollback; +drop trigger t_trigger; + +# verify that after insert triggers can not delete from the source table +create trigger t_trigger after insert on t for each row delete from s where id=1000000; +begin; +error 1442; +insert into t select * from s on duplicate key update x=t.x+1; +rollback; +drop trigger t_trigger; + +# verify that before insert triggers can not update the source table +create trigger t_trigger before insert on t for each row update s set x=x+1 where id=1000000; +begin; +error 1442; +insert into t select * from s on duplicate key update x=t.x+1; +rollback; +drop trigger t_trigger; + +# verify that after insert triggers can not update the source table +create trigger t_trigger after insert on t for each row update s set x=x+1 where id=1000000; +begin; +error 1442; +insert into t select * from s on duplicate key update x=t.x+1; +rollback; +drop trigger t_trigger; + +# force duplicate keys +truncate table t; +insert into t values (1,0); + +# verify that before insert triggers can not insert into the source table +create trigger t_trigger before update on t for each row insert into s values (1000000,0); +begin; +error 1442; +insert into t select * from s on duplicate key update x=t.x+1; +rollback; +drop trigger t_trigger; + +# verify that after insert triggers can not insert into the source table +create trigger t_trigger after update on t for each row insert into s values (1000000,0); +begin; +error 1442; +insert into t select * from s on duplicate key update x=t.x+1; +rollback; +drop trigger t_trigger; + +# verify that before update triggers can not delete from the source table +create trigger t_trigger before update on t for each row delete from s where id=1000000; +begin; +error 1442; +insert into t select * from s on duplicate key update x=t.x+1; +rollback; +drop trigger t_trigger; + +# verify that after insert triggers can not delete from the source table +create trigger t_trigger after update on t for each row delete from s where id=1000000; +begin; +error 1442; +insert into t select * from s on duplicate key update x=t.x+1; +rollback; +drop trigger t_trigger; + +# verify that before update triggers can not update the source table +create trigger t_trigger before update on t for each row update s set x=x+1 where id=1000000; +begin; +error 1442; +insert into t select * from s on duplicate key update x=t.x+1; +rollback; +drop trigger t_trigger; + +# verify that after insert triggers can not update the source table +create trigger t_trigger after update on t for each row update s set x=x+1 where id=1000000; +begin; +error 1442; +insert into t select * from s on duplicate key update x=t.x+1; +rollback; +drop trigger t_trigger; + +drop table s,t; diff --git a/storage/tokudb/mysql-test/tokudb/t/bf_replace_select.test b/storage/tokudb/mysql-test/tokudb/t/bf_replace_select.test new file mode 100644 index 00000000000..380eb5adda8 --- /dev/null +++ b/storage/tokudb/mysql-test/tokudb/t/bf_replace_select.test @@ -0,0 +1,190 @@ +# Verify that index and range scans are not slow +# on tables during replace select and insert ignore statements +# due to tokudb bulk fetch not being used + +source include/have_tokudb.inc; +source include/big_test.inc; +set default_storage_engine='tokudb'; +disable_warnings; +drop table if exists t,t1,t2; +enable_warnings; + +let $maxq = 10; + +CREATE TABLE `t` ( + `num` int(10) unsigned auto_increment NOT NULL, + `val` varchar(32) DEFAULT NULL, + PRIMARY KEY (`num`) +); + +# put 8M rows into t +INSERT INTO t values (null,null); +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +SELECT count(*) FROM t; + +# Create first table from source table t +CREATE TABLE `t1` ( + `num` int(10) unsigned NOT NULL, + `val` varchar(32) DEFAULT NULL, + PRIMARY KEY (`num`) +) as select * from t; + +# Create second table t2 that will serve as the target for the replace select statment +CREATE TABLE `t2` ( + `count` bigint(20) NOT NULL + ) ENGINE=TokuDB DEFAULT CHARSET=latin1; + + +let $s = `select to_seconds(now())`; +let $i = 0; +while ($i < $maxq) { + SELECT count(*) from t1; + inc $i; +} +let $time_elapsed_select = `select to_seconds(now()) - $s`; + +# The following line can be used to display the time elapsed data +# which could be useful for debugging. +#echo Index scans took $time_elapsed_select seconds.; + + +let $s = `select to_seconds(now())`; +let $i = 0; +while ($i < $maxq) { + REPLACE into t2 SELECT count(*) from t1; + inc $i; +} +let $time_elapsed_replace_select = `select to_seconds(now()) - $s`; + +# The following line can be used to display the time elapsed data +# which could be useful for debugging. +#echo Index scans took $time_elapsed_replace_select seconds.; + +# This check evaluates whether the time elapsed during the replace select statement is on par +# with the select statement, which will confirm that bulk fetch is in fact being used. +let $verdict = `select abs($time_elapsed_replace_select - $time_elapsed_select) <= $time_elapsed_select`; +echo $verdict; + +############################################################## + +let $s = `select to_seconds(now())`; +let $i = 0; +while ($i < $maxq) { + SELECT count(*) from t1; + inc $i; +} +let $time_elapsed_select = `select to_seconds(now()) - $s`; + +# The following line can be used to display the time elapsed data +# which could be useful for debugging. +#echo Index scans took $time_elapsed_select seconds.; + + +let $s = `select to_seconds(now())`; +let $i = 0; +while ($i < $maxq) { + INSERT IGNORE into t2 SELECT count(*) from t1; + inc $i; +} +let $time_elapsed_insert_ignore_select = `select to_seconds(now()) - $s`; + +# The following line can be used to display the time elapsed data +# which could be useful for debugging. +#echo Index scans took $time_elapsed_insert_ignore_select seconds.; + +# This check evaluates whether the time elapsed during the insert ignore select statement is on par +# with the select statement, which will confirm that bulk fetch is in fact being used. +let $verdict = `select abs($time_elapsed_insert_ignore_select - $time_elapsed_select) <= $time_elapsed_select`; +echo $verdict; + +################################################################## + +let $maxrq = 30; + +let $s = `select to_seconds(now())`; +let $i = 0; +while ($i < $maxrq) { + SELECT count(*) from t1 where num > 7000000; + inc $i; +} +let $time_elapsed_select = `select to_seconds(now()) - $s`; + +# The following line can be used to display the time elapsed data +# which could be useful for debugging. +#echo Range scans took $time_elapsed_select seconds.; + + +let $s = `select to_seconds(now())`; +let $i = 0; +while ($i < $maxrq) { + REPLACE into t2 SELECT count(*) from t1 where num > 7000000; + inc $i; +} +let $time_elapsed_replace_select = `select to_seconds(now()) - $s`; + +# The following line can be used to display the time elapsed data +# which could be useful for debugging. +#echo Range scans took $time_elapsed_replace_select seconds.; + +# This check evaluates whether the time elapsed during the replace select statement is on par +# with the select statement, which will confirm that bulk fetch is in fact being used. +let $verdict = `select abs($time_elapsed_replace_select - $time_elapsed_select) <= $time_elapsed_select`; +echo $verdict; + +#################################################################### + +let $s = `select to_seconds(now())`; +let $i = 0; +while ($i < $maxrq) { + SELECT count(*) from t1 where num > 7000000; + inc $i; +} +let $time_elapsed_select = `select to_seconds(now()) - $s`; + +# The following line can be used to display the time elapsed data +# which could be useful for debugging. +#echo Range scans took $time_elapsed_select seconds.; + + +let $s = `select to_seconds(now())`; +let $i = 0; +while ($i < $maxrq) { + INSERT IGNORE into t2 SELECT count(*) from t1 where num > 7000000; + inc $i; +} +let $time_elapsed_insert_ignore_select = `select to_seconds(now()) - $s`; + +# The following line can be used to display the time elapsed data +# which could be useful for debugging. +#echo Range scans took $time_elapsed_insert_ignore_select seconds.; + +# This check evaluates whether the time elapsed during the insert ignore select statement is on par +# with the select statement, which will confirm that bulk fetch is in fact being used. +let $verdict = `select abs($time_elapsed_insert_ignore_select - $time_elapsed_select) <= $time_elapsed_select`; +echo $verdict; + +######################################################################### + +drop table t,t1,t2; diff --git a/storage/tokudb/mysql-test/tokudb/t/bf_replace_select_trigger.test b/storage/tokudb/mysql-test/tokudb/t/bf_replace_select_trigger.test new file mode 100644 index 00000000000..6b098ce04af --- /dev/null +++ b/storage/tokudb/mysql-test/tokudb/t/bf_replace_select_trigger.test @@ -0,0 +1,169 @@ +# verify that various insert and update triggers can not execute on the source table +# for a replace select statement + +source include/have_tokudb.inc; +set default_storage_engine='tokudb'; +disable_warnings; +drop table if exists s,t; +enable_warnings; + +create table s (id bigint not null primary key, x bigint); +insert into s values (1,0),(2,0),(3,0),(4,0); + +create table t like s; +begin; +replace into t select * from s; +rollback; + +# verify that before insert triggers can not replace into the source table +create trigger t_trigger before insert on t for each row replace into s values (1000000,0); +begin; +error 1442; +replace into t select * from s; +rollback; +drop trigger t_trigger; + +# verify that after insert triggers can not replace into the source table +create trigger t_trigger after insert on t for each row replace into s values (1000000,0); +begin; +error 1442; +replace into t select * from s; +rollback; +drop trigger t_trigger; + +# verify that before insert triggers can not delete from the source table +create trigger t_trigger before insert on t for each row delete from s where id=1000000; +begin; +error 1442; +replace into t select * from s; +rollback; +drop trigger t_trigger; + +# verify that after insert triggers can not delete from the source table +create trigger t_trigger after insert on t for each row delete from s where id=1000000; +begin; +error 1442; +replace into t select * from s; +rollback; +drop trigger t_trigger; + +# verify that before insert triggers can not update the source table +create trigger t_trigger before insert on t for each row update s set x=x+1 where id=1000000; +begin; +error 1442; +replace into t select * from s; +rollback; +drop trigger t_trigger; + +# verify that after insert triggers can not update the source table +create trigger t_trigger after insert on t for each row update s set x=x+1 where id=1000000; +begin; +error 1442; +replace into t select * from s; +rollback; +drop trigger t_trigger; + +truncate table t; +insert into t values (1,1); + +# verify that before insert triggers can not replace into the source table +create trigger t_trigger before insert on t for each row replace into s values (1000000,0); +begin; +error 1442; +replace into t select * from s; +rollback; +drop trigger t_trigger; + +# verify that after insert triggers can not replace into the source table +create trigger t_trigger after insert on t for each row replace into s values (1000000,0); +begin; +error 1442; +replace into t select * from s; +rollback; +drop trigger t_trigger; + +# verify that before insert triggers can not delete from the source table +create trigger t_trigger before insert on t for each row delete from s where id=1000000; +begin; +error 1442; +replace into t select * from s; +rollback; +drop trigger t_trigger; + +# verify that after insert triggers can not delete from the source table +create trigger t_trigger after insert on t for each row delete from s where id=1000000; +begin; +error 1442; +replace into t select * from s; +rollback; +drop trigger t_trigger; + +# verify that before insert triggers can not update the source table +create trigger t_trigger before insert on t for each row update s set x=x+1 where id=1000000; +begin; +error 1442; +replace into t select * from s; +rollback; +drop trigger t_trigger; + +# verify that after insert triggers can not update the source table +create trigger t_trigger after insert on t for each row update s set x=x+1 where id=1000000; +begin; +error 1442; +replace into t select * from s; +rollback; +drop trigger t_trigger; + +truncate table t; +insert into t values (1,1); + +# verify that before delete triggers can not replace into the source table +create trigger t_trigger before delete on t for each row replace into s values (1000000,0); +begin; +error 1442; +replace into t select * from s; +rollback; +drop trigger t_trigger; + +# verify that after delete triggers can not replace into the source table +create trigger t_trigger after delete on t for each row replace into s values (1000000,0); +begin; +error 1442; +replace into t select * from s; +rollback; +drop trigger t_trigger; + +# verify that before delete triggers can not delete from the source table +create trigger t_trigger before delete on t for each row delete from s where id=1000000; +begin; +error 1442; +replace into t select * from s; +rollback; +drop trigger t_trigger; + +# verify that after delete triggers can not delete from the source table +create trigger t_trigger after delete on t for each row delete from s where id=1000000; +begin; +error 1442; +replace into t select * from s; +rollback; +drop trigger t_trigger; + +# verify that before delete triggers can not update the source table +create trigger t_trigger before delete on t for each row update s set x=x+1 where id=1000000; +begin; +error 1442; +replace into t select * from s; +rollback; +drop trigger t_trigger; + +# verify that after delete triggers can not update the source table +create trigger t_trigger after delete on t for each row update s set x=x+1 where id=1000000; +begin; +error 1442; +replace into t select * from s; +rollback; +drop trigger t_trigger; + + +drop table s,t; diff --git a/storage/tokudb/mysql-test/tokudb/t/bf_select_hash_part.test b/storage/tokudb/mysql-test/tokudb/t/bf_select_hash_part.test new file mode 100644 index 00000000000..67fefcb45bd --- /dev/null +++ b/storage/tokudb/mysql-test/tokudb/t/bf_select_hash_part.test @@ -0,0 +1,100 @@ +# Verify that index and range scans on a hash partitioned tokudb table are not slow on tables +# due to tokudb bulk fetch not being used. + +source include/have_tokudb.inc; +source include/have_partition.inc; +source include/big_test.inc; +set default_storage_engine='tokudb'; +disable_warnings; +drop table if exists t; +enable_warnings; + +let $maxq = 20; +let $debug = 0; + +# create the hash partition table +CREATE TABLE `t` ( + `num` int(10) unsigned NOT NULL auto_increment, + `val` varchar(32) DEFAULT NULL, + PRIMARY KEY (`num`) +) PARTITION BY HASH (num) PARTITIONS 8; + +# put 1M rows into t +INSERT INTO t values (null,null); +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +SELECT count(*) FROM t; + +set tokudb_bulk_fetch=ON; +let $s = `select unix_timestamp()`; +let $i = 0; +while ($i < $maxq) { + SELECT count(*) from t; + inc $i; +} +let $time_bf_on = `select unix_timestamp() - $s`; + +if ($debug) { echo index scans took $time_bf_on; } + +set tokudb_bulk_fetch=OFF; +let $s = `select unix_timestamp()`; +let $i = 0; +while ($i < $maxq) { + SELECT count(*) from t; + inc $i; +} +let $time_bf_off = `select unix_timestamp() - $s`; + +if ($debug) { echo index scans took $time_bf_off.; } + +# check that the scan time with bulk fetch off is at least 1.5 times as long as with bulk fetch on +let $verdict = `select $time_bf_off > $time_bf_on && $time_bf_off >= 1.5 * $time_bf_on`; +echo $verdict; +if (!$verdict) { echo index scan $time_bf_on $time_bf_off; } + +set tokudb_bulk_fetch=ON; +let $s = `select unix_timestamp()`; +let $i = 0; +while ($i < $maxq) { + SELECT count(*) from t where num > 500000; + inc $i; +} +let $time_bf_on = `select unix_timestamp() - $s`; + +if ($debug) { echo range scans took $time_bf_on; } + +set tokudb_bulk_fetch=OFF; +let $s = `select unix_timestamp()`; +let $i = 0; +while ($i < $maxq) { + SELECT count(*) from t where num > 500000; + inc $i; +} +let $time_bf_off = `select unix_timestamp() - $s`; + +if ($debug) { echo range scans took $time_bf_off.; } + +# check that the scan time with bulk fetch off is at least 1.5 times as long as with bulk fetch on +let $verdict = `select $time_bf_off > $time_bf_on && $time_bf_off >= 1.5 * $time_bf_on`; +echo $verdict; +if (!$verdict) { echo range scan $time_bf_on $time_bf_off; } + +drop table t; diff --git a/storage/tokudb/mysql-test/tokudb/t/bf_select_range_part.test b/storage/tokudb/mysql-test/tokudb/t/bf_select_range_part.test new file mode 100644 index 00000000000..0a1d7de3747 --- /dev/null +++ b/storage/tokudb/mysql-test/tokudb/t/bf_select_range_part.test @@ -0,0 +1,108 @@ +# Verify that index and range scans on a range partitioned tokudb table are not slow on tables +# due to tokudb bulk fetch not being used. + +source include/have_tokudb.inc; +source include/have_partition.inc; +source include/big_test.inc; +set default_storage_engine='tokudb'; +disable_warnings; +drop table if exists t; +enable_warnings; + +let $maxq = 20; +let $debug = 0; + +# create the range partition table +CREATE TABLE `t` ( + `num` int(10) unsigned NOT NULL auto_increment, + `val` varchar(32) DEFAULT NULL, + PRIMARY KEY (`num`) +) PARTITION BY RANGE (num) +(PARTITION p0 VALUES LESS THAN (100000), + PARTITION p1 VALUES LESS THAN (200000), + PARTITION p2 VALUES LESS THAN (300000), + PARTITION p3 VALUES LESS THAN (400000), + PARTITION p4 VALUES LESS THAN (500000), + PARTITION p5 VALUES LESS THAN (600000), + PARTITION p6 VALUES LESS THAN (700000), + PARTITION p7 VALUES LESS THAN MAXVALUE); + +# put 1M rows into t +INSERT INTO t values (null,null); +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +INSERT INTO t SELECT null,val FROM t; +SELECT count(*) FROM t; + +set tokudb_bulk_fetch=ON; +let $s = `select unix_timestamp()`; +let $i = 0; +while ($i < $maxq) { + SELECT count(*) from t; + inc $i; +} +let $time_bf_on = `select unix_timestamp() - $s`; + +if ($debug) { echo index scans took $time_bf_on; } + +set tokudb_bulk_fetch=OFF; +let $s = `select unix_timestamp()`; +let $i = 0; +while ($i < $maxq) { + SELECT count(*) from t; + inc $i; +} +let $time_bf_off = `select unix_timestamp() - $s`; + +if ($debug) { echo index scans took $time_bf_off.; } + +# check that the scan time with bulk fetch off is at least 1.5 times as long as with bulk fetch on +let $verdict = `select $time_bf_off > $time_bf_on && $time_bf_off >= 1.5 * $time_bf_on`; +echo $verdict; +if (!$verdict) { echo index scan $time_bf_on $time_bf_off; } + +set tokudb_bulk_fetch=ON; +let $s = `select unix_timestamp()`; +let $i = 0; +while ($i < $maxq) { + SELECT count(*) from t where num > 700000; + inc $i; +} +let $time_bf_on = `select unix_timestamp() - $s`; + +if ($debug) { echo range scans took $time_bf_on; } + +set tokudb_bulk_fetch=OFF; +let $s = `select unix_timestamp()`; +let $i = 0; +while ($i < $maxq) { + SELECT count(*) from t where num > 700000; + inc $i; +} +let $time_bf_off = `select unix_timestamp() - $s`; + +if ($debug) { echo range scans took $time_bf_off.; } + +# check that the scan time with bulk fetch off is at least 1.5 times as long as with bulk fetch on +let $verdict = `select $time_bf_off > $time_bf_on && $time_bf_off >= 1.5 * $time_bf_on`; +echo $verdict; +if (!$verdict) { echo range scan $time_bf_on $time_bf_off; } + +drop table t; diff --git a/storage/tokudb/mysql-test/tokudb/t/cluster_key_part.test b/storage/tokudb/mysql-test/tokudb/t/cluster_key_part.test new file mode 100644 index 00000000000..8da7aa1078f --- /dev/null +++ b/storage/tokudb/mysql-test/tokudb/t/cluster_key_part.test @@ -0,0 +1,23 @@ +# Test that clustering keys can be created on partitioned tokudb tables + +source include/have_tokudb.inc; +source include/have_partition.inc; +set default_storage_engine='tokudb'; + +disable_warnings; +drop table if exists t; +enable_warnings; + +create table t ( + x int not null, + y int not null, + primary key(x)) +partition by hash(x) partitions 2; + +show create table t; + +alter table t add key(y) clustering=yes; + +show create table t; + +drop table t; diff --git a/storage/tokudb/mysql-test/tokudb/t/ext_key_1_innodb.test b/storage/tokudb/mysql-test/tokudb/t/ext_key_1_innodb.test deleted file mode 100644 index f23e02ddff3..00000000000 --- a/storage/tokudb/mysql-test/tokudb/t/ext_key_1_innodb.test +++ /dev/null @@ -1,44 +0,0 @@ -source include/have_innodb.inc; - -disable_warnings; -drop table if exists t; -enable_warnings; - -set session optimizer_switch='extended_keys=on'; -select @@optimizer_switch; - -create table t (id int not null, x int not null, y int not null, primary key(id), key(x)) engine=innodb; - -insert into t values (0,0,0),(1,1,1),(2,2,2),(3,2,3),(4,2,4); - -explain select x,id from t force index (x) where x=0 and id=0; -flush status; -select x,id from t force index (x) where x=0 and id=0; -show status like 'handler_read%'; - -explain select y,id from t force index (x) where x=0 and id=0; -flush status; -select y,id from t force index (x) where x=0 and id=0; -show status like 'handler_read%'; - -explain select x,id from t force index (x) where x=0 and id=1; -flush status; -select x,id from t force index (x) where x=0 and id=1; -show status like 'handler_read%'; - -explain select y,id from t force index (x)where x=0 and id=1; -flush status; -select y,id from t force index(x) where x=0 and id=1; -show status like 'handler_read%'; - -explain select x,id from t force index (x) where x=2 and id=3; -flush status; -select x,id from t force index (x) where x=2 and id=3; -show status like 'handler_read%'; - -explain select x,id from t force index (x) where x=2 and id=0; -flush status; -select x,id from t force index (x) where x=2 and id=0; -show status like 'handler_read%'; - -drop table t; diff --git a/storage/tokudb/mysql-test/tokudb/t/ext_key_1_tokudb.test b/storage/tokudb/mysql-test/tokudb/t/ext_key_1_tokudb.test deleted file mode 100644 index 802385e8e9a..00000000000 --- a/storage/tokudb/mysql-test/tokudb/t/ext_key_1_tokudb.test +++ /dev/null @@ -1,44 +0,0 @@ -#source include/have_tokudb.inc; - -disable_warnings; -drop table if exists t; -enable_warnings; - -set session optimizer_switch='extended_keys=on'; -select @@optimizer_switch; - -create table t (id int not null, x int not null, y int not null, primary key(id), key(x)) engine=tokudb; - -insert into t values (0,0,0),(1,1,1),(2,2,2),(3,2,3),(4,2,4); - -explain select x,id from t force index (x) where x=0 and id=0; -flush status; -select x,id from t force index (x) where x=0 and id=0; -show status like 'handler_read%'; - -explain select y,id from t force index (x) where x=0 and id=0; -flush status; -select y,id from t force index (x) where x=0 and id=0; -show status like 'handler_read%'; - -explain select x,id from t force index (x) where x=0 and id=1; -flush status; -select x,id from t force index (x) where x=0 and id=1; -show status like 'handler_read%'; - -explain select y,id from t force index (x)where x=0 and id=1; -flush status; -select y,id from t force index(x) where x=0 and id=1; -show status like 'handler_read%'; - -explain select x,id from t force index (x) where x=2 and id=3; -flush status; -select x,id from t force index (x) where x=2 and id=3; -show status like 'handler_read%'; - -explain select x,id from t force index (x) where x=2 and id=0; -flush status; -select x,id from t force index (x) where x=2 and id=0; -show status like 'handler_read%'; - -drop table t; diff --git a/storage/tokudb/mysql-test/tokudb/t/ext_key_2_innodb.test b/storage/tokudb/mysql-test/tokudb/t/ext_key_2_innodb.test deleted file mode 100644 index 265275c0e50..00000000000 --- a/storage/tokudb/mysql-test/tokudb/t/ext_key_2_innodb.test +++ /dev/null @@ -1,24 +0,0 @@ -source include/have_innodb.inc; - -disable_warnings; -drop table if exists t; -enable_warnings; - -set session optimizer_switch='extended_keys=on'; -select @@optimizer_switch; - -create table t (a int not null, b int not null, c int not null, d int not null, primary key(a,b), key(c,a)) engine=innodb; - -insert into t values (0,0,0,0),(0,1,0,1); - -explain select c,a,b from t where c=0 and a=0 and b=1; -flush status; -select c,a,b from t where c=0 and a=0 and b=1; -show status like 'handler_read%'; - -explain select c,a,b from t force index (c) where c=0 and a=0 and b=1; -flush status; -select c,a,b from t force index (c) where c=0 and a=0 and b=1; -show status like 'handler_read%'; - -drop table t; diff --git a/storage/tokudb/mysql-test/tokudb/t/ext_key_2_tokudb.test b/storage/tokudb/mysql-test/tokudb/t/ext_key_2_tokudb.test deleted file mode 100644 index b12f056f1a6..00000000000 --- a/storage/tokudb/mysql-test/tokudb/t/ext_key_2_tokudb.test +++ /dev/null @@ -1,24 +0,0 @@ -#source include/have_tokudb.inc; - -disable_warnings; -drop table if exists t; -enable_warnings; - -set session optimizer_switch='extended_keys=on'; -select @@optimizer_switch; - -create table t (a int not null, b int not null, c int not null, d int not null, primary key(a,b), key(c,a)) engine=tokudb; - -insert into t values (0,0,0,0),(0,1,0,1); - -explain select c,a,b from t where c=0 and a=0 and b=1; -flush status; -select c,a,b from t where c=0 and a=0 and b=1; -show status like 'handler_read%'; - -explain select c,a,b from t force index (c) where c=0 and a=0 and b=1; -flush status; -select c,a,b from t force index (c) where c=0 and a=0 and b=1; -show status like 'handler_read%'; - -drop table t; diff --git a/storage/tokudb/mysql-test/tokudb/t/fast_update_binlog_mixed.test b/storage/tokudb/mysql-test/tokudb/t/fast_update_binlog_mixed.test index 28a11cf302c..9e48d056417 100644 --- a/storage/tokudb/mysql-test/tokudb/t/fast_update_binlog_mixed.test +++ b/storage/tokudb/mysql-test/tokudb/t/fast_update_binlog_mixed.test @@ -1,6 +1,6 @@ -source include/master-slave.inc; source include/have_binlog_format_mixed.inc; source include/have_tokudb.inc; +source include/master-slave.inc; set default_storage_engine='tokudb'; create table tt (id int primary key, x int); diff --git a/storage/tokudb/mysql-test/tokudb/t/fast_update_binlog_statement.test b/storage/tokudb/mysql-test/tokudb/t/fast_update_binlog_statement.test index bab2aadb340..90575c615af 100644 --- a/storage/tokudb/mysql-test/tokudb/t/fast_update_binlog_statement.test +++ b/storage/tokudb/mysql-test/tokudb/t/fast_update_binlog_statement.test @@ -1,6 +1,6 @@ -source include/master-slave.inc; source include/have_binlog_format_statement.inc; source include/have_tokudb.inc; +source include/master-slave.inc; set default_storage_engine='tokudb'; create table tt (id int primary key, x int); diff --git a/storage/tokudb/mysql-test/tokudb/t/hotindex-insert-bigchar.opt b/storage/tokudb/mysql-test/tokudb/t/hotindex-insert-bigchar.opt deleted file mode 100644 index d76fda471ca..00000000000 --- a/storage/tokudb/mysql-test/tokudb/t/hotindex-insert-bigchar.opt +++ /dev/null @@ -1 +0,0 @@ ---loose-tokudb-max-lock-memory=320M diff --git a/storage/tokudb/mysql-test/tokudb/t/i_s_tokudb_lock_waits_released.test b/storage/tokudb/mysql-test/tokudb/t/i_s_tokudb_lock_waits_released.test index 6b4e5d88673..e217ad38fd2 100644 --- a/storage/tokudb/mysql-test/tokudb/t/i_s_tokudb_lock_waits_released.test +++ b/storage/tokudb/mysql-test/tokudb/t/i_s_tokudb_lock_waits_released.test @@ -1,7 +1,7 @@ # verify that information_schema.tokudb_locks gets populated with locks, information_schema.tokudb_lock_waits gets -if (`select @@tokudb_version <= "7.1.6"`) +if (`select @@tokudb_version <= "7.1.8"`) { - --skip Race condition in the test in TokuDB 7.1.6 or earlier + --skip Race condition in the test in TokuDB 7.1.8 or earlier } # populated with 1 lock_wait and all transactions are present in information_schema.tokudb_trx for 2 clients @@ -23,6 +23,7 @@ select * from information_schema.tokudb_lock_waits; connect (conn_a,localhost,root,,); set autocommit=0; +set tokudb_prelock_empty=OFF; # disable the bulk loader insert into t values (1); connect (conn_b,localhost,root,,); @@ -36,10 +37,12 @@ source include/wait_condition.inc; real_sleep 1; # delay a little to shorten the update -> write row -> lock wait race replace_column 1 TRX_ID 2 MYSQL_ID; +replace_result $datadir ./; select * from information_schema.tokudb_locks; # should find the presence of a lock_wait on the 2nd transaction replace_column 1 REQUEST_TRX_ID 2 BLOCK_TRX_ID 6 LOCK_WAITS_START_TIME; +replace_result $datadir ./; select * from information_schema.tokudb_lock_waits; # should find the presence of two transactions @@ -50,6 +53,7 @@ connection conn_a; commit; # verify that the lock on the 1st transaction is released and replaced by the lock for the 2nd transaction replace_column 1 TRX_ID 2 MYSQL_ID; +replace_result $datadir ./; select * from information_schema.tokudb_locks; select * from information_schema.tokudb_lock_waits; @@ -72,6 +76,7 @@ select * from information_schema.tokudb_lock_waits; connect (conn_a,localhost,root,,); set autocommit=0; +set tokudb_prelock_empty=OFF; # disable the bulk loader replace into t values (1); connect (conn_b,localhost,root,,); @@ -85,10 +90,12 @@ source include/wait_condition.inc; real_sleep 1; # delay a little to shorten the update -> write row -> lock wait race replace_column 1 TRX_ID 2 MYSQL_ID; +replace_result $datadir ./; select * from information_schema.tokudb_locks; # should find the presence of a lock_wait on the 2nd transaction replace_column 1 REQUEST_TRX_ID 2 BLOCK_TRX_ID 6 LOCK_WAITS_START_TIME; +replace_result $datadir ./; select * from information_schema.tokudb_lock_waits; # should find the presence of two transactions @@ -99,6 +106,7 @@ connection conn_a; commit; # verify that the lock on the 1st transaction is released and replaced by the lock for the 2nd transaction replace_column 1 TRX_ID 2 MYSQL_ID; +replace_result $datadir ./; select * from information_schema.tokudb_locks; select * from information_schema.tokudb_lock_waits; diff --git a/storage/tokudb/mysql-test/tokudb/t/i_s_tokudb_lock_waits_timeout.test b/storage/tokudb/mysql-test/tokudb/t/i_s_tokudb_lock_waits_timeout.test index ea7eb9a2c89..75929fa3b3d 100644 --- a/storage/tokudb/mysql-test/tokudb/t/i_s_tokudb_lock_waits_timeout.test +++ b/storage/tokudb/mysql-test/tokudb/t/i_s_tokudb_lock_waits_timeout.test @@ -16,6 +16,7 @@ select * from information_schema.tokudb_lock_waits; connect (conn_a,localhost,root,,); set autocommit=0; +set tokudb_prelock_empty=OFF; insert into t values (1); connect (conn_b,localhost,root,,); @@ -29,10 +30,12 @@ source include/wait_condition.inc; real_sleep 1; # delay a little to shorten the update -> write row -> lock wait race replace_column 1 TRX_ID 2 MYSQL_ID; +replace_result $datadir ./; select * from information_schema.tokudb_locks; # should find the presence of a lock_wait on the 2nd transaction replace_column 1 REQUEST_TRX_ID 2 BLOCK_TRX_ID 6 LOCK_WAITS_START_TIME; +replace_result $datadir ./; select * from information_schema.tokudb_lock_waits; # should find the presence of two transactions diff --git a/storage/tokudb/mysql-test/tokudb/t/i_s_tokudb_locks.test b/storage/tokudb/mysql-test/tokudb/t/i_s_tokudb_locks.test index a3745b5471b..617b487f043 100644 --- a/storage/tokudb/mysql-test/tokudb/t/i_s_tokudb_locks.test +++ b/storage/tokudb/mysql-test/tokudb/t/i_s_tokudb_locks.test @@ -29,7 +29,8 @@ insert into t values (6); # should find 3 locks for 2 transactions connection default; replace_column 1 TRX_ID 2 MYSQL_ID; -eval select * from information_schema.tokudb_locks order by locks_trx_id; +replace_result $datadir ./; +eval select * from information_schema.tokudb_locks order by locks_trx_id,locks_key_left; connection conn_a; commit; @@ -42,4 +43,4 @@ commit; disconnect conn_a; -drop table t;
\ No newline at end of file +drop table t; diff --git a/storage/tokudb/mysql-test/tokudb/t/i_s_tokudb_locks_released.test b/storage/tokudb/mysql-test/tokudb/t/i_s_tokudb_locks_released.test index 3a1cf2023da..6df23e7edb3 100644 --- a/storage/tokudb/mysql-test/tokudb/t/i_s_tokudb_locks_released.test +++ b/storage/tokudb/mysql-test/tokudb/t/i_s_tokudb_locks_released.test @@ -1,6 +1,6 @@ -if (`select @@tokudb_version <= "7.1.6"`) +if (`select @@tokudb_version <= "7.1.8"`) { - --skip Race condition in the test in TokuDB 7.1.6 or earlier + --skip Race condition in the test in TokuDB 7.1.8 or earlier } # verify that information_schema.tokudb_locks gets populated with locks for 2 clients @@ -18,20 +18,17 @@ let $default_id=`select connection_id()`; # should be empty select * from information_schema.tokudb_locks; - connect (conn_a,localhost,root,,); set autocommit=0; -let $a_id=`select connection_id()`; +set tokudb_prelock_empty=OFF; # disable bulk loader insert into t values (1); connect (conn_b,localhost,root,,); set autocommit=0; -let $b_id=`select connection_id()`; send insert into t values (1); - # should find the presence of a lock on 2nd transaction connection default; let $wait_condition= select count(*)=1 from information_schema.processlist where info='insert into t values (1)' and state='update'; @@ -39,12 +36,14 @@ source include/wait_condition.inc; real_sleep 1; # delay a little to shorten the update -> write row -> lock wait race replace_column 1 TRX_ID 2 MYSQL_ID; +replace_result $datadir ./; eval select * from information_schema.tokudb_locks; connection conn_a; commit; # verify that the lock on the 1st transaction is released and replaced by the lock for the 2nd transaction replace_column 1 TRX_ID 2 MYSQL_ID; +replace_result $datadir ./; select * from information_schema.tokudb_locks; connection conn_b; diff --git a/storage/tokudb/mysql-test/tokudb/t/tokudb_support_xa.test b/storage/tokudb/mysql-test/tokudb/t/tokudb_support_xa.test new file mode 100644 index 00000000000..ba0b1f92a13 --- /dev/null +++ b/storage/tokudb/mysql-test/tokudb/t/tokudb_support_xa.test @@ -0,0 +1,133 @@ +--source include/load_sysvars.inc +let $engine=TokuDB; + +--echo '#--------------------begin------------------------#' +SET @session_start_value = @@session.tokudb_support_xa; +SELECT @session_start_value; + +SET @global_start_value = @@global.tokudb_support_xa; +SELECT @global_start_value; + +SET @@session.tokudb_support_xa = 0; +SET @@session.tokudb_support_xa = DEFAULT; +SELECT @@session.tokudb_support_xa; +SET @@global.tokudb_support_xa = 0; +SET @@global.tokudb_support_xa = DEFAULT; +SELECT @@global.tokudb_support_xa; + +--echo '#--------------------case#1 valid set support_xa------------------------#' +# for session +SET @@session.tokudb_support_xa = 0; +SELECT @@session.tokudb_support_xa; +SET @@session.tokudb_support_xa = 1; +SELECT @@session.tokudb_support_xa; + +# for global +SET @@global.tokudb_support_xa = 0; +SELECT @@global.tokudb_support_xa; +SET @@global.tokudb_support_xa = 1; +SELECT @@global.tokudb_support_xa; + +--echo '#--------------------case#2 invalid set support_xa------------------------#' +# for session +--Error ER_WRONG_TYPE_FOR_VAR +SET @@session.tokudb_support_xa = -0.6; +--Error ER_WRONG_TYPE_FOR_VAR +SET @@session.tokudb_support_xa = 1.6; +--Error ER_WRONG_VALUE_FOR_VAR +SET @@session.tokudb_support_xa = "T"; +--Error ER_WRONG_VALUE_FOR_VAR +SET @@session.tokudb_support_xa = "Y"; +SET @@session.tokudb_support_xa = OF; +SELECT @@session.tokudb_support_xa; + +# for global +--Error ER_WRONG_VALUE_FOR_VAR +SET @@global.tokudb_support_xa = 2; +--Error ER_WRONG_VALUE_FOR_VAR +SET @@global.tokudb_support_xa = "T"; +--Error ER_WRONG_VALUE_FOR_VAR +SET @@global.tokudb_support_xa = "Y"; + + +--echo '#--------------------case#3 xa.test port from tokudb_mariadb/xa.test ------------------------#' +--echo '#--------------------xa.test with tokudb_support_xa OFF ------------------------#' +SET @@global.tokudb_support_xa = OFF; +SELECT @@global.tokudb_support_xa; +create table t1 (a int) engine=tokudb; +xa start 'test1'; +insert t1 values (10); +xa end 'test1'; +xa prepare 'test1'; +xa rollback 'test1'; +select * from t1; + +xa start 'test2'; +--error ER_XAER_RMFAIL +xa start 'test-bad'; +insert t1 values (20); +--error ER_XAER_RMFAIL +xa prepare 'test2'; +xa end 'test2'; +xa prepare 'test2'; +xa commit 'test2'; +select * from t1; + +xa start 'testa','testb'; +insert t1 values (30); + +--error ER_XAER_RMFAIL +commit; + +xa end 'testa','testb'; + +--error ER_XAER_RMFAIL +begin; +--error ER_XAER_RMFAIL +create table t2 (a int); + +connect (con1,localhost,root,,); +connection con1; + +--error ER_XAER_DUPID +xa start 'testa','testb'; +--error ER_XAER_DUPID +xa start 'testa','testb', 123; + +# gtrid [ , bqual [ , formatID ] ] +xa start 0x7465737462, 0x2030405060, 0xb; +insert t1 values (40); +xa end 'testb',' 0@P`',11; +xa prepare 'testb',0x2030405060,11; + +--error ER_XAER_RMFAIL +start transaction; + +xa recover; + +connection default; + +xa prepare 'testa','testb'; + +xa recover; + +--error ER_XAER_NOTA +xa commit 'testb',0x2030405060,11; +xa rollback 'testa','testb'; + +--replace_regex /MariaDB/XYZ/ /MySQL/XYZ/ +--error ER_PARSE_ERROR +xa start 'zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz'; + +select * from t1; + +disconnect con1; +connection default; +drop table t1; + +--echo '#--------------------end------------------------#' +SET @@session.tokudb_support_xa = @session_start_value; +SELECT @@session.tokudb_support_xa; + +SET @@global.tokudb_support_xa = @global_start_value; +SELECT @@global.tokudb_support_xa; diff --git a/storage/tokudb/mysql-test/tokudb_add_index/suite.opt b/storage/tokudb/mysql-test/tokudb_add_index/suite.opt index 8cfa7cacb1f..ea8042b7740 100644 --- a/storage/tokudb/mysql-test/tokudb_add_index/suite.opt +++ b/storage/tokudb/mysql-test/tokudb_add_index/suite.opt @@ -1 +1 @@ ---tokudb --plugin-load-add=$HA_TOKUDB_SO +--tokudb --plugin-load-add=$HA_TOKUDB_SO --loose-tokudb-check-jemalloc=0 diff --git a/storage/tokudb/mysql-test/tokudb_alter_table/r/fractional_time_alter_table.result b/storage/tokudb/mysql-test/tokudb_alter_table/r/fractional_time_alter_table.result deleted file mode 100644 index 830cf373732..00000000000 --- a/storage/tokudb/mysql-test/tokudb_alter_table/r/fractional_time_alter_table.result +++ /dev/null @@ -1,174 +0,0 @@ -SET DEFAULT_STORAGE_ENGINE = 'tokudb'; -DROP TABLE IF EXISTS foo; -set tokudb_disable_slow_alter=on; -create table foo ( -a timestamp, -b timestamp(1), -c timestamp(2), -d timestamp(3), -e timestamp(4), -f timestamp(5), -g timestamp(6) -) engine=TokuDB; -alter table foo change a a timestamp(1); -ERROR 42000: Table 'foo' uses an extension that doesn't exist in this MariaDB version -alter table foo change a a timestamp(2); -ERROR 42000: Table 'foo' uses an extension that doesn't exist in this MariaDB version -alter table foo change a a timestamp(3); -ERROR 42000: Table 'foo' uses an extension that doesn't exist in this MariaDB version -alter table foo change a a timestamp(4); -ERROR 42000: Table 'foo' uses an extension that doesn't exist in this MariaDB version -alter table foo change a a timestamp(5); -ERROR 42000: Table 'foo' uses an extension that doesn't exist in this MariaDB version -alter table foo change a a timestamp(6); -ERROR 42000: Table 'foo' uses an extension that doesn't exist in this MariaDB version -alter table foo change b b timestamp(2); -show create table foo; -Table Create Table -foo CREATE TABLE `foo` ( - `a` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, - `b` timestamp(2) NOT NULL DEFAULT '0000-00-00 00:00:00.00', - `c` timestamp(2) NOT NULL DEFAULT '0000-00-00 00:00:00.00', - `d` timestamp(3) NOT NULL DEFAULT '0000-00-00 00:00:00.000', - `e` timestamp(4) NOT NULL DEFAULT '0000-00-00 00:00:00.0000', - `f` timestamp(5) NOT NULL DEFAULT '0000-00-00 00:00:00.00000', - `g` timestamp(6) NOT NULL DEFAULT '0000-00-00 00:00:00.000000' -) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' -alter table foo change b b timestamp(1); -alter table foo change b b timestamp(3); -ERROR 42000: Table 'foo' uses an extension that doesn't exist in this MariaDB version -alter table foo change b b timestamp(4); -ERROR 42000: Table 'foo' uses an extension that doesn't exist in this MariaDB version -alter table foo change b b timestamp(5); -ERROR 42000: Table 'foo' uses an extension that doesn't exist in this MariaDB version -alter table foo change b b timestamp(6); -ERROR 42000: Table 'foo' uses an extension that doesn't exist in this MariaDB version -alter table foo change d d timestamp(2); -ERROR 42000: Table 'foo' uses an extension that doesn't exist in this MariaDB version -alter table foo change d d timestamp(5); -ERROR 42000: Table 'foo' uses an extension that doesn't exist in this MariaDB version -alter table foo change d d timestamp(6); -ERROR 42000: Table 'foo' uses an extension that doesn't exist in this MariaDB version -alter table foo change f f timestamp(6); -alter table foo change f f timestamp(5); -alter table foo change f f timestamp(4); -ERROR 42000: Table 'foo' uses an extension that doesn't exist in this MariaDB version -drop table foo; -create table foo ( -a datetime, -b datetime(1), -c datetime(2), -d datetime(3), -e datetime(4), -f datetime(5), -g datetime(6) -) engine=TokuDB; -alter table foo change a a datetime(1); -ERROR 42000: Table 'foo' uses an extension that doesn't exist in this MariaDB version -alter table foo change a a datetime(2); -ERROR 42000: Table 'foo' uses an extension that doesn't exist in this MariaDB version -alter table foo change a a datetime(3); -ERROR 42000: Table 'foo' uses an extension that doesn't exist in this MariaDB version -alter table foo change a a datetime(4); -ERROR 42000: Table 'foo' uses an extension that doesn't exist in this MariaDB version -alter table foo change a a datetime(5); -ERROR 42000: Table 'foo' uses an extension that doesn't exist in this MariaDB version -alter table foo change a a datetime(6); -ERROR 42000: Table 'foo' uses an extension that doesn't exist in this MariaDB version -alter table foo change b b datetime(2); -show create table foo; -Table Create Table -foo CREATE TABLE `foo` ( - `a` datetime DEFAULT NULL, - `b` datetime(2) DEFAULT NULL, - `c` datetime(2) DEFAULT NULL, - `d` datetime(3) DEFAULT NULL, - `e` datetime(4) DEFAULT NULL, - `f` datetime(5) DEFAULT NULL, - `g` datetime(6) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' -alter table foo change b b datetime(1); -alter table foo change b b datetime(3); -ERROR 42000: Table 'foo' uses an extension that doesn't exist in this MariaDB version -alter table foo change b b datetime(4); -ERROR 42000: Table 'foo' uses an extension that doesn't exist in this MariaDB version -alter table foo change b b datetime(5); -ERROR 42000: Table 'foo' uses an extension that doesn't exist in this MariaDB version -alter table foo change b b datetime(6); -ERROR 42000: Table 'foo' uses an extension that doesn't exist in this MariaDB version -alter table foo change d d datetime(2); -ERROR 42000: Table 'foo' uses an extension that doesn't exist in this MariaDB version -alter table foo change d d datetime(5); -alter table foo change d d datetime(6); -ERROR 42000: Table 'foo' uses an extension that doesn't exist in this MariaDB version -alter table foo change g g datetime(5); -ERROR 42000: Table 'foo' uses an extension that doesn't exist in this MariaDB version -drop table foo; -create table foo ( -a time, -b time(1), -c time(2), -d time(3), -e time(4), -f time(5), -g time(6) -) engine=TokuDB; -alter table foo change a a time(1); -ERROR 42000: Table 'foo' uses an extension that doesn't exist in this MariaDB version -alter table foo change a a time(2); -ERROR 42000: Table 'foo' uses an extension that doesn't exist in this MariaDB version -alter table foo change a a time(3); -ERROR 42000: Table 'foo' uses an extension that doesn't exist in this MariaDB version -alter table foo change a a time(4); -ERROR 42000: Table 'foo' uses an extension that doesn't exist in this MariaDB version -alter table foo change a a time(5); -ERROR 42000: Table 'foo' uses an extension that doesn't exist in this MariaDB version -alter table foo change a a time(6); -ERROR 42000: Table 'foo' uses an extension that doesn't exist in this MariaDB version -alter table foo change b b time(2); -show create table foo; -Table Create Table -foo CREATE TABLE `foo` ( - `a` time DEFAULT NULL, - `b` time(2) DEFAULT NULL, - `c` time(2) DEFAULT NULL, - `d` time(3) DEFAULT NULL, - `e` time(4) DEFAULT NULL, - `f` time(5) DEFAULT NULL, - `g` time(6) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' -alter table foo change b b time(1); -alter table foo change b b time(3); -ERROR 42000: Table 'foo' uses an extension that doesn't exist in this MariaDB version -alter table foo change b b time(4); -ERROR 42000: Table 'foo' uses an extension that doesn't exist in this MariaDB version -alter table foo change b b time(5); -ERROR 42000: Table 'foo' uses an extension that doesn't exist in this MariaDB version -alter table foo change b b time(6); -ERROR 42000: Table 'foo' uses an extension that doesn't exist in this MariaDB version -alter table foo change d d time(2); -ERROR 42000: Table 'foo' uses an extension that doesn't exist in this MariaDB version -alter table foo change d d time(5); -alter table foo change d d time(6); -ERROR 42000: Table 'foo' uses an extension that doesn't exist in this MariaDB version -alter table foo change g g time(5); -ERROR 42000: Table 'foo' uses an extension that doesn't exist in this MariaDB version -drop table foo; -create table foo (a int, b int) engine=TokuDB; -insert into foo values (1,2); -alter table foo add column tt timestamp(3) NOT NULL Default '1981-11-01 10:11:13.123' after a; -select * from foo; -a tt b -1 1981-11-01 10:11:13.123 2 -alter table foo drop column tt; -alter table foo add column tt datetime(3) NOT NULL Default '1981-11-01 10:11:13.123' after a; -select * from foo; -a tt b -1 1981-11-01 10:11:13.123 2 -alter table foo drop column tt; -alter table foo add column tt time(3) NOT NULL Default '10:11:13.123' after a; -select * from foo; -a tt b -1 10:11:13.123 2 -alter table foo drop column tt; -drop table foo; diff --git a/storage/tokudb/mysql-test/tokudb_alter_table/suite.opt b/storage/tokudb/mysql-test/tokudb_alter_table/suite.opt index 8cfa7cacb1f..ea8042b7740 100644 --- a/storage/tokudb/mysql-test/tokudb_alter_table/suite.opt +++ b/storage/tokudb/mysql-test/tokudb_alter_table/suite.opt @@ -1 +1 @@ ---tokudb --plugin-load-add=$HA_TOKUDB_SO +--tokudb --plugin-load-add=$HA_TOKUDB_SO --loose-tokudb-check-jemalloc=0 diff --git a/storage/tokudb/mysql-test/tokudb_alter_table/t/fractional_time_alter_table.test b/storage/tokudb/mysql-test/tokudb_alter_table/t/fractional_time_alter_table.test deleted file mode 100644 index eee661023a8..00000000000 --- a/storage/tokudb/mysql-test/tokudb_alter_table/t/fractional_time_alter_table.test +++ /dev/null @@ -1,164 +0,0 @@ -SET DEFAULT_STORAGE_ENGINE = 'tokudb'; - ---disable_warnings -DROP TABLE IF EXISTS foo; ---enable_warnings -set tokudb_disable_slow_alter=on; - -create table foo ( - a timestamp, - b timestamp(1), - c timestamp(2), - d timestamp(3), - e timestamp(4), - f timestamp(5), - g timestamp(6) -) engine=TokuDB; - ---error ER_UNSUPPORTED_EXTENSION -alter table foo change a a timestamp(1); ---error ER_UNSUPPORTED_EXTENSION -alter table foo change a a timestamp(2); ---error ER_UNSUPPORTED_EXTENSION -alter table foo change a a timestamp(3); ---error ER_UNSUPPORTED_EXTENSION -alter table foo change a a timestamp(4); ---error ER_UNSUPPORTED_EXTENSION -alter table foo change a a timestamp(5); ---error ER_UNSUPPORTED_EXTENSION -alter table foo change a a timestamp(6); - -alter table foo change b b timestamp(2); -show create table foo; -alter table foo change b b timestamp(1); ---error ER_UNSUPPORTED_EXTENSION -alter table foo change b b timestamp(3); ---error ER_UNSUPPORTED_EXTENSION -alter table foo change b b timestamp(4); ---error ER_UNSUPPORTED_EXTENSION -alter table foo change b b timestamp(5); ---error ER_UNSUPPORTED_EXTENSION -alter table foo change b b timestamp(6); - ---error ER_UNSUPPORTED_EXTENSION -alter table foo change d d timestamp(2); ---error ER_UNSUPPORTED_EXTENSION -alter table foo change d d timestamp(5); ---error ER_UNSUPPORTED_EXTENSION -alter table foo change d d timestamp(6); - -alter table foo change f f timestamp(6); -alter table foo change f f timestamp(5); ---error ER_UNSUPPORTED_EXTENSION -alter table foo change f f timestamp(4); -drop table foo; - - -create table foo ( - a datetime, - b datetime(1), - c datetime(2), - d datetime(3), - e datetime(4), - f datetime(5), - g datetime(6) -) engine=TokuDB; - ---error ER_UNSUPPORTED_EXTENSION -alter table foo change a a datetime(1); ---error ER_UNSUPPORTED_EXTENSION -alter table foo change a a datetime(2); ---error ER_UNSUPPORTED_EXTENSION -alter table foo change a a datetime(3); ---error ER_UNSUPPORTED_EXTENSION -alter table foo change a a datetime(4); ---error ER_UNSUPPORTED_EXTENSION -alter table foo change a a datetime(5); ---error ER_UNSUPPORTED_EXTENSION -alter table foo change a a datetime(6); - -alter table foo change b b datetime(2); -show create table foo; -alter table foo change b b datetime(1); ---error ER_UNSUPPORTED_EXTENSION -alter table foo change b b datetime(3); ---error ER_UNSUPPORTED_EXTENSION -alter table foo change b b datetime(4); ---error ER_UNSUPPORTED_EXTENSION -alter table foo change b b datetime(5); ---error ER_UNSUPPORTED_EXTENSION -alter table foo change b b datetime(6); - ---error ER_UNSUPPORTED_EXTENSION -alter table foo change d d datetime(2); -alter table foo change d d datetime(5); ---error ER_UNSUPPORTED_EXTENSION -alter table foo change d d datetime(6); - ---error ER_UNSUPPORTED_EXTENSION -alter table foo change g g datetime(5); -drop table foo; - - -create table foo ( - a time, - b time(1), - c time(2), - d time(3), - e time(4), - f time(5), - g time(6) -) engine=TokuDB; - ---error ER_UNSUPPORTED_EXTENSION -alter table foo change a a time(1); ---error ER_UNSUPPORTED_EXTENSION -alter table foo change a a time(2); ---error ER_UNSUPPORTED_EXTENSION -alter table foo change a a time(3); ---error ER_UNSUPPORTED_EXTENSION -alter table foo change a a time(4); ---error ER_UNSUPPORTED_EXTENSION -alter table foo change a a time(5); ---error ER_UNSUPPORTED_EXTENSION -alter table foo change a a time(6); - -alter table foo change b b time(2); -show create table foo; -alter table foo change b b time(1); ---error ER_UNSUPPORTED_EXTENSION -alter table foo change b b time(3); ---error ER_UNSUPPORTED_EXTENSION -alter table foo change b b time(4); ---error ER_UNSUPPORTED_EXTENSION -alter table foo change b b time(5); ---error ER_UNSUPPORTED_EXTENSION -alter table foo change b b time(6); - ---error ER_UNSUPPORTED_EXTENSION -alter table foo change d d time(2); -alter table foo change d d time(5); ---error ER_UNSUPPORTED_EXTENSION -alter table foo change d d time(6); - ---error ER_UNSUPPORTED_EXTENSION -alter table foo change g g time(5); -drop table foo; - - -create table foo (a int, b int) engine=TokuDB; -insert into foo values (1,2); -alter table foo add column tt timestamp(3) NOT NULL Default '1981-11-01 10:11:13.123' after a; -select * from foo; -alter table foo drop column tt; - -alter table foo add column tt datetime(3) NOT NULL Default '1981-11-01 10:11:13.123' after a; -select * from foo; -alter table foo drop column tt; - - -alter table foo add column tt time(3) NOT NULL Default '10:11:13.123' after a; -select * from foo; -alter table foo drop column tt; - -drop table foo; diff --git a/storage/tokudb/mysql-test/tokudb_bugs/r/4676.result b/storage/tokudb/mysql-test/tokudb_bugs/r/4676.result deleted file mode 100644 index 2d9161dc653..00000000000 --- a/storage/tokudb/mysql-test/tokudb_bugs/r/4676.result +++ /dev/null @@ -1,13 +0,0 @@ -DROP TABLE IF EXISTS t; -CREATE TABLE t (a INT) ENGINE='tokudb' PARTITION BY KEY (a) (PARTITION part0, PARTITION part1); -SHOW CREATE TABLE t; -Table Create Table -t CREATE TABLE `t` ( - `a` int(11) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' -/*!50100 PARTITION BY KEY (a) -(PARTITION part0 ENGINE = TokuDB, - PARTITION part1 ENGINE = TokuDB) */ -ALTER TABLE t TRUNCATE PARTITION part0; -ALTER TABLE t TRUNCATE PARTITION part1; -DROP TABLE IF EXISTS t; diff --git a/storage/tokudb/mysql-test/tokudb_bugs/r/4677.result b/storage/tokudb/mysql-test/tokudb_bugs/r/4677.result deleted file mode 100644 index 72fa3c12696..00000000000 --- a/storage/tokudb/mysql-test/tokudb_bugs/r/4677.result +++ /dev/null @@ -1,11 +0,0 @@ -drop table if exists t; -create table t (a int primary key) engine='tokudb'; -begin; -insert into t values (1); -insert into t values (3); -begin; -insert into t values (2); -insert into t values (4); -commit; -commit; -drop table t; diff --git a/storage/tokudb/mysql-test/tokudb_bugs/r/fractional_time.result b/storage/tokudb/mysql-test/tokudb_bugs/r/fractional_time.result deleted file mode 100644 index ae75be98c14..00000000000 --- a/storage/tokudb/mysql-test/tokudb_bugs/r/fractional_time.result +++ /dev/null @@ -1,62 +0,0 @@ -SET DEFAULT_STORAGE_ENGINE = 'tokudb'; -DROP TABLE IF EXISTS foo; -create table foo (a timestamp(6), b timestamp(4), c timestamp(5), primary key (a))engine=tokudb; -insert into foo values ('2010-12-10 14:12:09.123452', '2010-12-10 14:12:09.123416', '2010-12-10 14:12:09.123451'); -insert into foo values ('2010-12-10 14:12:09.123454', '2010-12-10 14:12:09.123416', '2010-12-10 14:12:09.123451'); -insert into foo values ('2010-12-10 14:12:09.123451', '2010-12-10 14:12:09.123416', '2010-12-10 14:12:09.123451'); -insert into foo values ('2010-12-10 14:12:09.123453', '2010-12-10 14:12:09.123416', '2010-12-10 14:12:09.123451'); -select * from foo; -a b c -2010-12-10 14:12:09.123451 2010-12-10 14:12:09.1234 2010-12-10 14:12:09.12345 -2010-12-10 14:12:09.123452 2010-12-10 14:12:09.1234 2010-12-10 14:12:09.12345 -2010-12-10 14:12:09.123453 2010-12-10 14:12:09.1234 2010-12-10 14:12:09.12345 -2010-12-10 14:12:09.123454 2010-12-10 14:12:09.1234 2010-12-10 14:12:09.12345 -explain select * from foo where a > '2010-12-10 14:12:09.123452'; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE foo range PRIMARY PRIMARY 7 NULL 2 Using where -select * from foo where a > '2010-12-10 14:12:09.123452'; -a b c -2010-12-10 14:12:09.123453 2010-12-10 14:12:09.1234 2010-12-10 14:12:09.12345 -2010-12-10 14:12:09.123454 2010-12-10 14:12:09.1234 2010-12-10 14:12:09.12345 -alter table foo change a a datetime(6), change b b datetime(4), change c c datetime(5); -show create table foo; -Table Create Table -foo CREATE TABLE `foo` ( - `a` datetime(6) NOT NULL DEFAULT '0000-00-00 00:00:00.000000', - `b` datetime(4) DEFAULT NULL, - `c` datetime(5) DEFAULT NULL, - PRIMARY KEY (`a`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' -select * from foo; -a b c -2010-12-10 14:12:09.123451 2010-12-10 14:12:09.1234 2010-12-10 14:12:09.12345 -2010-12-10 14:12:09.123452 2010-12-10 14:12:09.1234 2010-12-10 14:12:09.12345 -2010-12-10 14:12:09.123453 2010-12-10 14:12:09.1234 2010-12-10 14:12:09.12345 -2010-12-10 14:12:09.123454 2010-12-10 14:12:09.1234 2010-12-10 14:12:09.12345 -explain select * from foo where a > '2010-12-10 14:12:09.123452'; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE foo range PRIMARY PRIMARY 8 NULL 2 Using where -select * from foo where a > '2010-12-10 14:12:09.123452'; -a b c -2010-12-10 14:12:09.123453 2010-12-10 14:12:09.1234 2010-12-10 14:12:09.12345 -2010-12-10 14:12:09.123454 2010-12-10 14:12:09.1234 2010-12-10 14:12:09.12345 -drop table foo; -create table foo (a time(6), b time(4), c time(5), primary key (a))engine=TokuDB; -insert into foo values ('14:12:09.123452', '14:12:09.123416', '14:12:09.123451'); -insert into foo values ('14:12:09.123454', '14:12:09.123416', '14:12:09.123451'); -insert into foo values ('14:12:09.123451', '14:12:09.123416', '14:12:09.123451'); -insert into foo values ('14:12:09.123453', '14:12:09.123416', '14:12:09.123451'); -select * from foo; -a b c -14:12:09.123451 14:12:09.1234 14:12:09.12345 -14:12:09.123452 14:12:09.1234 14:12:09.12345 -14:12:09.123453 14:12:09.1234 14:12:09.12345 -14:12:09.123454 14:12:09.1234 14:12:09.12345 -explain select * from foo where a > '14:12:09.123452'; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE foo range PRIMARY PRIMARY 6 NULL 2 Using where -select * from foo where a > '14:12:09.123452'; -a b c -14:12:09.123453 14:12:09.1234 14:12:09.12345 -14:12:09.123454 14:12:09.1234 14:12:09.12345 -DROP TABLE foo; diff --git a/storage/tokudb/mysql-test/tokudb_bugs/r/lock_uniq_key_empty.result b/storage/tokudb/mysql-test/tokudb_bugs/r/lock_uniq_key_empty.result index 6966aa24ff8..325aef46afe 100644 --- a/storage/tokudb/mysql-test/tokudb_bugs/r/lock_uniq_key_empty.result +++ b/storage/tokudb/mysql-test/tokudb_bugs/r/lock_uniq_key_empty.result @@ -1,6 +1,7 @@ set default_storage_engine=tokudb; drop table if exists t; create table t (id int, unique key(id)); +set tokudb_prelock_empty=OFF; begin; insert into t values (1); begin; @@ -13,6 +14,7 @@ id 2 drop table if exists t; create table t (id int not null, unique key(id)); +set tokudb_prelock_empty=OFF; begin; insert into t values (1); begin; diff --git a/storage/tokudb/mysql-test/tokudb_bugs/r/mdev5932.result b/storage/tokudb/mysql-test/tokudb_bugs/r/mdev5932.result index 2aaa321fed6..f179ee36f95 100644 --- a/storage/tokudb/mysql-test/tokudb_bugs/r/mdev5932.result +++ b/storage/tokudb/mysql-test/tokudb_bugs/r/mdev5932.result @@ -1,5 +1,6 @@ drop table if exists t1,t2; drop table if exists t1i,t2i; +drop table if exists tsub,t3; CREATE TABLE t1 (a CHAR(3), INDEX(a)) ENGINE=TokuDB; INSERT INTO t1 VALUES ('foo'),( NULL); SELECT * FROM t1 WHERE 'bar' NOT IN ( SELECT t1_1.a FROM t1 AS t1_1, t1 AS t1_2 ); diff --git a/storage/tokudb/mysql-test/tokudb_bugs/r/optimize_temp_table_tokudb.result b/storage/tokudb/mysql-test/tokudb_bugs/r/optimize_temp_table_tokudb.result index fe6dba5214e..f3ac15b220d 100644 --- a/storage/tokudb/mysql-test/tokudb_bugs/r/optimize_temp_table_tokudb.result +++ b/storage/tokudb/mysql-test/tokudb_bugs/r/optimize_temp_table_tokudb.result @@ -2,5 +2,6 @@ drop table if exists t; create temporary table t (x int) engine=tokudb; optimize table t; Table Op Msg_type Msg_text +test.t optimize note Table does not support optimize, doing recreate + analyze instead test.t optimize status OK drop table t; diff --git a/storage/tokudb/mysql-test/tokudb_bugs/r/simple_icp.result b/storage/tokudb/mysql-test/tokudb_bugs/r/simple_icp.result deleted file mode 100644 index 7b4e16df207..00000000000 --- a/storage/tokudb/mysql-test/tokudb_bugs/r/simple_icp.result +++ /dev/null @@ -1,178 +0,0 @@ -drop table if exists a,b,c,foo; -create table a (a int auto_increment, primary key (a)) engine=TokuDB; -create table b (a int auto_increment, primary key (a)) engine=TokuDB; -create table c (a int auto_increment, primary key (a)) engine=TokuDB; -insert into a values (),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(); -insert into b values (),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(); -insert into c values (),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(); -create table foo (a int, b int, c int, d int, e int, key(a,b,c)) engine=TokuDB; -insert into foo (a,b,c) select * from a,b,c; -flush status; -show status like '%Handler_read_next%'; -Variable_name Value -Handler_read_next 0 -explain select * from foo where a between 5 and 6 and c=10; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE foo range a a 5 NULL 800 Using where -select * from foo where a between 5 and 6 and c=10; -a b c d e -5 1 10 NULL NULL -5 2 10 NULL NULL -5 3 10 NULL NULL -5 4 10 NULL NULL -5 5 10 NULL NULL -5 6 10 NULL NULL -5 7 10 NULL NULL -5 8 10 NULL NULL -5 9 10 NULL NULL -5 10 10 NULL NULL -5 11 10 NULL NULL -5 12 10 NULL NULL -5 13 10 NULL NULL -5 14 10 NULL NULL -5 15 10 NULL NULL -5 16 10 NULL NULL -5 17 10 NULL NULL -5 18 10 NULL NULL -5 19 10 NULL NULL -5 20 10 NULL NULL -6 1 10 NULL NULL -6 2 10 NULL NULL -6 3 10 NULL NULL -6 4 10 NULL NULL -6 5 10 NULL NULL -6 6 10 NULL NULL -6 7 10 NULL NULL -6 8 10 NULL NULL -6 9 10 NULL NULL -6 10 10 NULL NULL -6 11 10 NULL NULL -6 12 10 NULL NULL -6 13 10 NULL NULL -6 14 10 NULL NULL -6 15 10 NULL NULL -6 16 10 NULL NULL -6 17 10 NULL NULL -6 18 10 NULL NULL -6 19 10 NULL NULL -6 20 10 NULL NULL -show status like '%Handler_read_next%'; -Variable_name Value -Handler_read_next 41 -flush status; -show status like '%Handler_read_prev%'; -Variable_name Value -Handler_read_prev 0 -explain select * from foo where a between 5 and 6 and c=10; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE foo range a a 5 NULL 800 Using where -select * from foo where a between 5 and 6 and c=10 order by a desc; -a b c d e -6 20 10 NULL NULL -6 19 10 NULL NULL -6 18 10 NULL NULL -6 17 10 NULL NULL -6 16 10 NULL NULL -6 15 10 NULL NULL -6 14 10 NULL NULL -6 13 10 NULL NULL -6 12 10 NULL NULL -6 11 10 NULL NULL -6 10 10 NULL NULL -6 9 10 NULL NULL -6 8 10 NULL NULL -6 7 10 NULL NULL -6 6 10 NULL NULL -6 5 10 NULL NULL -6 4 10 NULL NULL -6 3 10 NULL NULL -6 2 10 NULL NULL -6 1 10 NULL NULL -5 20 10 NULL NULL -5 19 10 NULL NULL -5 18 10 NULL NULL -5 17 10 NULL NULL -5 16 10 NULL NULL -5 15 10 NULL NULL -5 14 10 NULL NULL -5 13 10 NULL NULL -5 12 10 NULL NULL -5 11 10 NULL NULL -5 10 10 NULL NULL -5 9 10 NULL NULL -5 8 10 NULL NULL -5 7 10 NULL NULL -5 6 10 NULL NULL -5 5 10 NULL NULL -5 4 10 NULL NULL -5 3 10 NULL NULL -5 2 10 NULL NULL -5 1 10 NULL NULL -show status like '%Handler_read_prev%'; -Variable_name Value -Handler_read_prev 41 -flush status; -show status like '%Handler_read_prev%'; -Variable_name Value -Handler_read_prev 0 -explain select * from foo where a > 19 and c=10; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE foo range a a 5 NULL 1713 Using where -select * from foo where a > 19 and c=10 order by a desc; -a b c d e -20 20 10 NULL NULL -20 19 10 NULL NULL -20 18 10 NULL NULL -20 17 10 NULL NULL -20 16 10 NULL NULL -20 15 10 NULL NULL -20 14 10 NULL NULL -20 13 10 NULL NULL -20 12 10 NULL NULL -20 11 10 NULL NULL -20 10 10 NULL NULL -20 9 10 NULL NULL -20 8 10 NULL NULL -20 7 10 NULL NULL -20 6 10 NULL NULL -20 5 10 NULL NULL -20 4 10 NULL NULL -20 3 10 NULL NULL -20 2 10 NULL NULL -20 1 10 NULL NULL -show status like '%Handler_read_prev%'; -Variable_name Value -Handler_read_prev 21 -flush status; -show status like '%Handler_read_next%'; -Variable_name Value -Handler_read_next 0 -explain select * from foo where a > 19 and c=10; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE foo range a a 5 NULL 1402 Using where -select * from foo where a > 19 and c=10; -a b c d e -20 1 10 NULL NULL -20 2 10 NULL NULL -20 3 10 NULL NULL -20 4 10 NULL NULL -20 5 10 NULL NULL -20 6 10 NULL NULL -20 7 10 NULL NULL -20 8 10 NULL NULL -20 9 10 NULL NULL -20 10 10 NULL NULL -20 11 10 NULL NULL -20 12 10 NULL NULL -20 13 10 NULL NULL -20 14 10 NULL NULL -20 15 10 NULL NULL -20 16 10 NULL NULL -20 17 10 NULL NULL -20 18 10 NULL NULL -20 19 10 NULL NULL -20 20 10 NULL NULL -show status like '%Handler_read_next%'; -Variable_name Value -Handler_read_next 21 -drop table foo,a,b,c; diff --git a/storage/tokudb/mysql-test/tokudb_bugs/r/tokudb718.result b/storage/tokudb/mysql-test/tokudb_bugs/r/tokudb718.result new file mode 100644 index 00000000000..022a4d56d75 --- /dev/null +++ b/storage/tokudb/mysql-test/tokudb_bugs/r/tokudb718.result @@ -0,0 +1,9 @@ +set default_storage_engine='tokudb'; +drop table if exists t; +create table t (id int primary key); +begin; +insert into t values (1),(2); +select * from information_schema.tokudb_fractal_tree_info; +ERROR HY000: Unknown error -30994 +commit; +drop table t; diff --git a/storage/tokudb/mysql-test/tokudb_bugs/r/tokudb_mrr.result b/storage/tokudb/mysql-test/tokudb_bugs/r/tokudb_mrr.result deleted file mode 100644 index 57ae386a78f..00000000000 --- a/storage/tokudb/mysql-test/tokudb_bugs/r/tokudb_mrr.result +++ /dev/null @@ -1,851 +0,0 @@ -drop table if exists t1,t2,t3,t4; -set @save_storage_engine= @@storage_engine; -set storage_engine=TokuDB; -set @innodb_mrr_tmp=@@optimizer_switch; -set optimizer_switch='mrr=on,mrr_sort_keys=on,index_condition_pushdown=on'; -create table t1(a int); -show create table t1; -Table Create Table -t1 CREATE TABLE `t1` ( - `a` int(11) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' -insert into t1 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9); -create table t2(a int); -insert into t2 select A.a + 10*(B.a + 10*C.a) from t1 A, t1 B, t1 C; -create table t3 ( -a char(8) not null, b char(8) not null, filler char(200), -key(a) -); -insert into t3 select @a:=concat('c-', 1000+ A.a, '=w'), @a, 'filler' from t2 A; -insert into t3 select concat('c-', 1000+A.a, '=w'), concat('c-', 2000+A.a, '=w'), -'filler-1' from t2 A; -insert into t3 select concat('c-', 1000+A.a, '=w'), concat('c-', 3000+A.a, '=w'), -'filler-2' from t2 A; -select a,filler from t3 where a >= 'c-9011=w'; -a filler -select a,filler from t3 where a >= 'c-1011=w' and a <= 'c-1015=w'; -a filler -c-1011=w filler -c-1012=w filler -c-1013=w filler -c-1014=w filler -c-1015=w filler -c-1011=w filler-1 -c-1012=w filler-1 -c-1013=w filler-1 -c-1014=w filler-1 -c-1015=w filler-1 -c-1011=w filler-2 -c-1012=w filler-2 -c-1013=w filler-2 -c-1014=w filler-2 -c-1015=w filler-2 -select a,filler from t3 where (a>='c-1011=w' and a <= 'c-1013=w') or -(a>='c-1014=w' and a <= 'c-1015=w'); -a filler -c-1011=w filler -c-1012=w filler -c-1013=w filler -c-1014=w filler -c-1015=w filler -c-1011=w filler-1 -c-1012=w filler-1 -c-1013=w filler-1 -c-1014=w filler-1 -c-1015=w filler-1 -c-1011=w filler-2 -c-1012=w filler-2 -c-1013=w filler-2 -c-1014=w filler-2 -c-1015=w filler-2 -insert into t3 values ('c-1013=z', 'c-1013=z', 'err'); -insert into t3 values ('a-1014=w', 'a-1014=w', 'err'); -select a,filler from t3 where (a>='c-1011=w' and a <= 'c-1013=w') or -(a>='c-1014=w' and a <= 'c-1015=w'); -a filler -c-1011=w filler -c-1012=w filler -c-1013=w filler -c-1014=w filler -c-1015=w filler -c-1011=w filler-1 -c-1012=w filler-1 -c-1013=w filler-1 -c-1014=w filler-1 -c-1015=w filler-1 -c-1011=w filler-2 -c-1012=w filler-2 -c-1013=w filler-2 -c-1014=w filler-2 -c-1015=w filler-2 -delete from t3 where b in ('c-1013=z', 'a-1014=w'); -select a,filler from t3 where a='c-1011=w' or a='c-1012=w' or a='c-1013=w' or -a='c-1014=w' or a='c-1015=w'; -a filler -c-1011=w filler -c-1012=w filler -c-1013=w filler -c-1014=w filler -c-1015=w filler -c-1011=w filler-1 -c-1012=w filler-1 -c-1013=w filler-1 -c-1014=w filler-1 -c-1015=w filler-1 -c-1011=w filler-2 -c-1012=w filler-2 -c-1013=w filler-2 -c-1014=w filler-2 -c-1015=w filler-2 -insert into t3 values ('c-1013=w', 'del-me', 'inserted'); -select a,filler from t3 where a='c-1011=w' or a='c-1012=w' or a='c-1013=w' or -a='c-1014=w' or a='c-1015=w'; -a filler -c-1011=w filler -c-1012=w filler -c-1013=w filler -c-1014=w filler -c-1015=w filler -c-1011=w filler-1 -c-1012=w filler-1 -c-1013=w filler-1 -c-1014=w filler-1 -c-1015=w filler-1 -c-1011=w filler-2 -c-1012=w filler-2 -c-1013=w filler-2 -c-1014=w filler-2 -c-1015=w filler-2 -c-1013=w inserted -delete from t3 where b='del-me'; -alter table t3 add primary key(b); -select b,filler from t3 where (b>='c-1011=w' and b<= 'c-1018=w') or -b IN ('c-1019=w', 'c-1020=w', 'c-1021=w', -'c-1022=w', 'c-1023=w', 'c-1024=w'); -b filler -c-1011=w filler -c-1012=w filler -c-1013=w filler -c-1014=w filler -c-1015=w filler -c-1016=w filler -c-1017=w filler -c-1018=w filler -c-1019=w filler -c-1020=w filler -c-1021=w filler -c-1022=w filler -c-1023=w filler -c-1024=w filler -select b,filler from t3 where (b>='c-1011=w' and b<= 'c-1020=w') or -b IN ('c-1021=w', 'c-1022=w', 'c-1023=w'); -b filler -c-1011=w filler -c-1012=w filler -c-1013=w filler -c-1014=w filler -c-1015=w filler -c-1016=w filler -c-1017=w filler -c-1018=w filler -c-1019=w filler -c-1020=w filler -c-1021=w filler -c-1022=w filler -c-1023=w filler -select b,filler from t3 where (b>='c-1011=w' and b<= 'c-1018=w') or -b IN ('c-1019=w', 'c-1020=w') or -(b>='c-1021=w' and b<= 'c-1023=w'); -b filler -c-1011=w filler -c-1012=w filler -c-1013=w filler -c-1014=w filler -c-1015=w filler -c-1016=w filler -c-1017=w filler -c-1018=w filler -c-1019=w filler -c-1020=w filler -c-1021=w filler -c-1022=w filler -c-1023=w filler -drop table if exists t4; -create table t4 (a varchar(10), b int, c char(10), filler char(200), -key idx1 (a, b, c)); -insert into t4 (filler) select concat('NULL-', 15-a) from t2 order by a limit 15; -insert into t4 (a,b,c,filler) -select 'b-1',NULL,'c-1', concat('NULL-', 15-a) from t2 order by a limit 15; -insert into t4 (a,b,c,filler) -select 'b-1',NULL,'c-222', concat('NULL-', 15-a) from t2 order by a limit 15; -insert into t4 (a,b,c,filler) -select 'bb-1',NULL,'cc-2', concat('NULL-', 15-a) from t2 order by a limit 15; -insert into t4 (a,b,c,filler) -select 'zz-1',NULL,'cc-2', 'filler-data' from t2 order by a limit 500; -explain -select * from t4 where a IS NULL and b IS NULL and (c IS NULL or c='no-such-row1' - or c='no-such-row2'); -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t4 range idx1 idx1 29 NULL 16 Using where; Rowid-ordered scan -select * from t4 where a IS NULL and b IS NULL and (c IS NULL or c='no-such-row1' - or c='no-such-row2'); -a b c filler -NULL NULL NULL NULL-15 -NULL NULL NULL NULL-14 -NULL NULL NULL NULL-13 -NULL NULL NULL NULL-12 -NULL NULL NULL NULL-11 -NULL NULL NULL NULL-10 -NULL NULL NULL NULL-9 -NULL NULL NULL NULL-8 -NULL NULL NULL NULL-7 -NULL NULL NULL NULL-6 -NULL NULL NULL NULL-5 -NULL NULL NULL NULL-4 -NULL NULL NULL NULL-3 -NULL NULL NULL NULL-2 -NULL NULL NULL NULL-1 -explain -select * from t4 where (a ='b-1' or a='bb-1') and b IS NULL and (c='c-1' or c='cc-2'); -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t4 range idx1 idx1 29 NULL 32 Using where; Rowid-ordered scan -select * from t4 where (a ='b-1' or a='bb-1') and b IS NULL and (c='c-1' or c='cc-2'); -a b c filler -b-1 NULL c-1 NULL-15 -b-1 NULL c-1 NULL-14 -b-1 NULL c-1 NULL-13 -b-1 NULL c-1 NULL-12 -b-1 NULL c-1 NULL-11 -b-1 NULL c-1 NULL-10 -b-1 NULL c-1 NULL-9 -b-1 NULL c-1 NULL-8 -b-1 NULL c-1 NULL-7 -b-1 NULL c-1 NULL-6 -b-1 NULL c-1 NULL-5 -b-1 NULL c-1 NULL-4 -b-1 NULL c-1 NULL-3 -b-1 NULL c-1 NULL-2 -b-1 NULL c-1 NULL-1 -bb-1 NULL cc-2 NULL-15 -bb-1 NULL cc-2 NULL-14 -bb-1 NULL cc-2 NULL-13 -bb-1 NULL cc-2 NULL-12 -bb-1 NULL cc-2 NULL-11 -bb-1 NULL cc-2 NULL-10 -bb-1 NULL cc-2 NULL-9 -bb-1 NULL cc-2 NULL-8 -bb-1 NULL cc-2 NULL-7 -bb-1 NULL cc-2 NULL-6 -bb-1 NULL cc-2 NULL-5 -bb-1 NULL cc-2 NULL-4 -bb-1 NULL cc-2 NULL-3 -bb-1 NULL cc-2 NULL-2 -bb-1 NULL cc-2 NULL-1 -select * from t4 ignore index(idx1) where (a ='b-1' or a='bb-1') and b IS NULL and (c='c-1' or c='cc-2'); -a b c filler -b-1 NULL c-1 NULL-15 -b-1 NULL c-1 NULL-14 -b-1 NULL c-1 NULL-13 -b-1 NULL c-1 NULL-12 -b-1 NULL c-1 NULL-11 -b-1 NULL c-1 NULL-10 -b-1 NULL c-1 NULL-9 -b-1 NULL c-1 NULL-8 -b-1 NULL c-1 NULL-7 -b-1 NULL c-1 NULL-6 -b-1 NULL c-1 NULL-5 -b-1 NULL c-1 NULL-4 -b-1 NULL c-1 NULL-3 -b-1 NULL c-1 NULL-2 -b-1 NULL c-1 NULL-1 -bb-1 NULL cc-2 NULL-15 -bb-1 NULL cc-2 NULL-14 -bb-1 NULL cc-2 NULL-13 -bb-1 NULL cc-2 NULL-12 -bb-1 NULL cc-2 NULL-11 -bb-1 NULL cc-2 NULL-10 -bb-1 NULL cc-2 NULL-9 -bb-1 NULL cc-2 NULL-8 -bb-1 NULL cc-2 NULL-7 -bb-1 NULL cc-2 NULL-6 -bb-1 NULL cc-2 NULL-5 -bb-1 NULL cc-2 NULL-4 -bb-1 NULL cc-2 NULL-3 -bb-1 NULL cc-2 NULL-2 -bb-1 NULL cc-2 NULL-1 -drop table t1, t2, t3, t4; -create table t1 (a int, b int not null,unique key (a,b),index(b)); -insert ignore into t1 values (1,1),(2,2),(3,3),(4,4),(5,5),(6,6),(null,7),(9,9),(8,8),(7,7),(null,9),(null,9),(6,6); -Warnings: -Warning 1062 Duplicate entry '6-6' for key 'a' -create table t2 like t1; -insert into t2 select * from t1; -alter table t1 modify b blob not null, add c int not null, drop key a, add unique key (a,b(20),c), drop key b, add key (b(10)); -select * from t1 where a is null; -a b c -NULL 7 0 -NULL 9 0 -NULL 9 0 -select * from t1 where (a is null or a > 0 and a < 3) and b > 7 limit 3; -a b c -NULL 9 0 -NULL 9 0 -select * from t1 where a is null and b=9 or a is null and b=7 limit 3; -a b c -NULL 7 0 -NULL 9 0 -NULL 9 0 -drop table t1, t2; -set storage_engine= @save_storage_engine; -set @mrr_buffer_size_save= @@mrr_buffer_size; -set mrr_buffer_size=64; -Warnings: -Warning 1292 Truncated incorrect mrr_buffer_size value: '64' -create table t1(a int); -insert into t1 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9); -create table t2(a char(8), b char(8), c char(8), filler char(100), key(a,b,c) ) engine=TokuDB; -insert into t2 select -concat('a-', 1000 + A.a, '-a'), -concat('b-', 1000 + B.a, '-b'), -concat('c-', 1000 + C.a, '-c'), -'filler' -from t1 A, t1 B, t1 C; -explain -select count(length(a) + length(filler)) from t2 where a>='a-1000-a' and a <'a-1001-a'; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t2 range a a 9 NULL 100 Using where; Rowid-ordered scan -select count(length(a) + length(filler)) from t2 where a>='a-1000-a' and a <'a-1001-a'; -count(length(a) + length(filler)) -100 -drop table t2; -create table t2 (a char(100), b char(100), c char(100), d int, -filler char(10), key(d), primary key (a,b,c)) engine= tokudb; -insert into t2 select A.a, B.a, B.a, A.a, 'filler' from t1 A, t1 B; -explain select * from t2 force index (d) where d < 10; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t2 range d d 5 NULL # Using where; Rowid-ordered scan -drop table t2; -drop table t1; -set @@mrr_buffer_size= @mrr_buffer_size_save; -create table t1 (f1 int not null, f2 int not null,f3 int not null, f4 char(1), primary key (f1,f2), key ix(f3))Engine=tokuDB; -select * from t1 where (f3>=5 and f3<=10) or (f3>=1 and f3<=4); -f1 f2 f3 f4 -1 1 1 A -2 2 2 A -3 3 3 A -4 4 4 A -5 5 5 A -6 6 6 A -7 7 7 A -8 8 8 A -9 9 9 A -10 10 10 A -drop table t1; - -BUG#37977: Wrong result returned on GROUP BY + OR + innodb - -CREATE TABLE t1 ( -`pk` int(11) NOT NULL AUTO_INCREMENT, -`int_nokey` int(11) NOT NULL, -`int_key` int(11) NOT NULL, -`date_key` date NOT NULL, -`date_nokey` date NOT NULL, -`time_key` time NOT NULL, -`time_nokey` time NOT NULL, -`datetime_key` datetime NOT NULL, -`datetime_nokey` datetime NOT NULL, -`varchar_key` varchar(5) DEFAULT NULL, -`varchar_nokey` varchar(5) DEFAULT NULL, -PRIMARY KEY (`pk`), -KEY `int_key` (`int_key`), -KEY `date_key` (`date_key`), -KEY `time_key` (`time_key`), -KEY `datetime_key` (`datetime_key`), -KEY `varchar_key` (`varchar_key`) -) ENGINE=TokuDB; -INSERT INTO t1 VALUES -(1,5,5,'2009-10-16','2009-10-16','09:28:15','09:28:15','2007-09-14 05:34:08','2007-09-14 05:34:08','qk','qk'), -(2,6,6,'0000-00-00','0000-00-00','23:06:39','23:06:39','0000-00-00 00:00:00','0000-00-00 00:00:00','j','j'), -(3,10,10,'2000-12-18','2000-12-18','22:16:19','22:16:19','2006-11-04 15:42:50','2006-11-04 15:42:50','aew','aew'), -(4,0,0,'2001-09-18','2001-09-18','00:00:00','00:00:00','2004-03-23 13:23:35','2004-03-23 13:23:35',NULL,NULL), -(5,6,6,'2007-08-16','2007-08-16','22:13:38','22:13:38','2004-08-19 11:01:28','2004-08-19 11:01:28','qu','qu'); -select pk from t1 WHERE `varchar_key` > 'kr' group by pk; -pk -1 -5 -select pk from t1 WHERE `int_nokey` IS NULL OR `varchar_key` > 'kr' group by pk; -pk -1 -5 -drop table t1; -# -# BUG#39447: Error with NOT NULL condition and LIMIT 1 -# -CREATE TABLE t1 ( -id int(11) NOT NULL, -parent_id int(11) DEFAULT NULL, -name varchar(10) DEFAULT NULL, -PRIMARY KEY (id), -KEY ind_parent_id (parent_id) -) ENGINE=TokuDB; -insert into t1 (id, parent_id, name) values -(10,NULL,'A'), -(20,10,'B'), -(30,10,'C'), -(40,NULL,'D'), -(50,40,'E'), -(60,40,'F'), -(70,NULL,'J'); -SELECT id FROM t1 WHERE parent_id IS NOT NULL ORDER BY id DESC LIMIT 1; -id -60 -This must show type=index, extra=Using where -explain SELECT * FROM t1 FORCE INDEX (PRIMARY) WHERE parent_id IS NOT NULL ORDER BY id DESC LIMIT 1; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 index NULL PRIMARY 4 NULL 1 Using where -SELECT * FROM t1 WHERE parent_id IS NOT NULL ORDER BY id DESC LIMIT 1; -id parent_id name -60 40 F -drop table t1; -# -# BUG#628785: multi_range_read.cc:430: int DsMrr_impl::dsmrr_init(): Assertion `do_sort_keys || do_rowid_fetch' failed -# -set @save_join_cache_level= @@join_cache_level; -set @save_optimizer_switch= @@optimizer_switch; -SET SESSION join_cache_level=9; -Warnings: -Warning 1292 Truncated incorrect join_cache_level value: '9' -SET SESSION optimizer_switch='mrr_sort_keys=off'; -CREATE TABLE `t1` ( -`pk` int(11) NOT NULL AUTO_INCREMENT, -`col_int_nokey` int(11) DEFAULT NULL, -`col_int_key` int(11) DEFAULT NULL, -`col_varchar_key` varchar(1) DEFAULT NULL, -`col_varchar_nokey` varchar(1) DEFAULT NULL, -PRIMARY KEY (`pk`), -KEY `col_varchar_key` (`col_varchar_key`,`col_int_key`) -) ENGINE=TokuDB AUTO_INCREMENT=101 DEFAULT CHARSET=latin1; -INSERT INTO `t1` VALUES (1,6,NULL,'r','r'); -INSERT INTO `t1` VALUES (2,8,0,'c','c'); -INSERT INTO `t1` VALUES (97,7,0,'z','z'); -INSERT INTO `t1` VALUES (98,1,1,'j','j'); -INSERT INTO `t1` VALUES (99,7,8,'c','c'); -INSERT INTO `t1` VALUES (100,2,5,'f','f'); -SELECT table1 .`col_varchar_key` -FROM t1 table1 STRAIGHT_JOIN ( t1 table3 JOIN t1 table4 ON table4 .`pk` = table3 .`col_int_nokey` ) ON table4 .`col_varchar_nokey` ; -col_varchar_key -Warnings: -Warning 1292 Truncated incorrect DOUBLE value: 'r' -Warning 1292 Truncated incorrect DOUBLE value: 'r' -Warning 1292 Truncated incorrect DOUBLE value: 'r' -Warning 1292 Truncated incorrect DOUBLE value: 'r' -Warning 1292 Truncated incorrect DOUBLE value: 'r' -Warning 1292 Truncated incorrect DOUBLE value: 'r' -Warning 1292 Truncated incorrect DOUBLE value: 'c' -Warning 1292 Truncated incorrect DOUBLE value: 'c' -Warning 1292 Truncated incorrect DOUBLE value: 'c' -Warning 1292 Truncated incorrect DOUBLE value: 'c' -Warning 1292 Truncated incorrect DOUBLE value: 'c' -Warning 1292 Truncated incorrect DOUBLE value: 'c' -DROP TABLE t1; -set join_cache_level=@save_join_cache_level; -set optimizer_switch=@save_optimizer_switch; -# -# BUG#623300: Query with join_cache_level = 6 returns extra rows in maria-5.3-dsmrr-cpk -# -CREATE TABLE t1 ( -pk int(11) NOT NULL AUTO_INCREMENT, -col_int_nokey int(11) DEFAULT NULL, -PRIMARY KEY (pk) -) ENGINE=TokuDB; -INSERT INTO t1 VALUES (10,7); -INSERT INTO t1 VALUES (11,1); -INSERT INTO t1 VALUES (12,5); -INSERT INTO t1 VALUES (13,3); -INSERT INTO t1 VALUES (14,6); -INSERT INTO t1 VALUES (15,92); -INSERT INTO t1 VALUES (16,7); -INSERT INTO t1 VALUES (17,NULL); -INSERT INTO t1 VALUES (18,3); -INSERT INTO t1 VALUES (19,5); -INSERT INTO t1 VALUES (20,1); -INSERT INTO t1 VALUES (21,2); -INSERT INTO t1 VALUES (22,NULL); -INSERT INTO t1 VALUES (23,1); -INSERT INTO t1 VALUES (24,0); -INSERT INTO t1 VALUES (25,210); -INSERT INTO t1 VALUES (26,8); -INSERT INTO t1 VALUES (27,7); -INSERT INTO t1 VALUES (28,5); -INSERT INTO t1 VALUES (29,NULL); -CREATE TABLE t2 ( -pk int(11) NOT NULL AUTO_INCREMENT, -col_int_nokey int(11) DEFAULT NULL, -PRIMARY KEY (pk) -) ENGINE=TokuDB; -INSERT INTO t2 VALUES (1,NULL); -INSERT INTO t2 VALUES (2,7); -INSERT INTO t2 VALUES (3,9); -INSERT INTO t2 VALUES (4,7); -INSERT INTO t2 VALUES (5,4); -INSERT INTO t2 VALUES (6,2); -INSERT INTO t2 VALUES (7,6); -INSERT INTO t2 VALUES (8,8); -INSERT INTO t2 VALUES (9,NULL); -INSERT INTO t2 VALUES (10,5); -INSERT INTO t2 VALUES (11,NULL); -INSERT INTO t2 VALUES (12,6); -INSERT INTO t2 VALUES (13,188); -INSERT INTO t2 VALUES (14,2); -INSERT INTO t2 VALUES (15,1); -INSERT INTO t2 VALUES (16,1); -INSERT INTO t2 VALUES (17,0); -INSERT INTO t2 VALUES (18,9); -INSERT INTO t2 VALUES (19,NULL); -INSERT INTO t2 VALUES (20,4); -set @my_save_join_cache_level= @@join_cache_level; -SET join_cache_level = 0; -SELECT table2.col_int_nokey -FROM t1 table1 JOIN t2 table2 ON table2.pk = table1.col_int_nokey -WHERE table1.pk ; -col_int_nokey -2 -4 -4 -4 -6 -6 -6 -7 -8 -9 -9 -NULL -NULL -NULL -SET join_cache_level = 6; -SELECT table2.col_int_nokey -FROM t1 table1 JOIN t2 table2 ON table2.pk = table1.col_int_nokey -WHERE table1.pk ; -col_int_nokey -2 -4 -4 -4 -6 -6 -6 -7 -8 -9 -9 -NULL -NULL -NULL -set join_cache_level= @my_save_join_cache_level; -drop table t1, t2; -# -# BUG#623315: Query returns less rows when run with join_cache_level=6 on maria-5.3-dsmrr-cpk -# -CREATE TABLE t1 ( -pk int(11) NOT NULL AUTO_INCREMENT, -col_int_nokey int(11) DEFAULT NULL, -col_int_key int(11) DEFAULT NULL, -col_varchar_key varchar(1) DEFAULT NULL, -PRIMARY KEY (pk), -KEY col_int_key (col_int_key), -KEY col_varchar_key (col_varchar_key,col_int_key) -) ENGINE=TokuDB; -INSERT INTO t1 VALUES (10,7,8,'v'); -INSERT INTO t1 VALUES (11,1,9,'r'); -INSERT INTO t1 VALUES (12,5,9,'a'); -INSERT INTO t1 VALUES (13,3,186,'m'); -INSERT INTO t1 VALUES (14,6,NULL,'y'); -INSERT INTO t1 VALUES (15,92,2,'j'); -INSERT INTO t1 VALUES (16,7,3,'d'); -INSERT INTO t1 VALUES (17,NULL,0,'z'); -INSERT INTO t1 VALUES (18,3,133,'e'); -INSERT INTO t1 VALUES (19,5,1,'h'); -INSERT INTO t1 VALUES (20,1,8,'b'); -INSERT INTO t1 VALUES (21,2,5,'s'); -INSERT INTO t1 VALUES (22,NULL,5,'e'); -INSERT INTO t1 VALUES (23,1,8,'j'); -INSERT INTO t1 VALUES (24,0,6,'e'); -INSERT INTO t1 VALUES (25,210,51,'f'); -INSERT INTO t1 VALUES (26,8,4,'v'); -INSERT INTO t1 VALUES (27,7,7,'x'); -INSERT INTO t1 VALUES (28,5,6,'m'); -INSERT INTO t1 VALUES (29,NULL,4,'c'); -set @my_save_join_cache_level= @@join_cache_level; -SET join_cache_level=6; -select count(*) from -(SELECT table2.pk FROM -t1 LEFT JOIN t1 table2 JOIN t1 table3 ON table3.col_varchar_key = table2.col_varchar_key -ON table3.col_int_nokey) foo; -count(*) -480 -SET join_cache_level=0; -select count(*) from -(SELECT table2.pk FROM -t1 LEFT JOIN t1 table2 JOIN t1 table3 ON table3.col_varchar_key = table2.col_varchar_key -ON table3.col_int_nokey) foo; -count(*) -480 -set join_cache_level= @my_save_join_cache_level; -drop table t1; -# -# BUG#671340: Diverging results in with mrr_sort_keys=ON|OFF and join_cache_level=5 -# -CREATE TABLE t1 ( -pk int(11) NOT NULL AUTO_INCREMENT, -col_int_key int(11) NOT NULL, -col_varchar_key varchar(1) NOT NULL, -col_varchar_nokey varchar(1) NOT NULL, -PRIMARY KEY (pk), -KEY col_int_key (col_int_key), -KEY col_varchar_key (col_varchar_key,col_int_key) -) ENGINE=TokuDB; -INSERT INTO t1 VALUES -(10,8,'v','v'), -(11,8,'f','f'), -(12,5,'v','v'), -(13,8,'s','s'), -(14,8,'a','a'), -(15,6,'p','p'), -(16,7,'z','z'), -(17,2,'a','a'), -(18,5,'h','h'), -(19,7,'h','h'), -(20,2,'v','v'), -(21,9,'v','v'), -(22,142,'b','b'), -(23,3,'y','y'), -(24,0,'v','v'), -(25,3,'m','m'), -(26,5,'z','z'), -(27,9,'n','n'), -(28,1,'d','d'), -(29,107,'a','a'); -CREATE TABLE t2 ( -pk int(11) NOT NULL AUTO_INCREMENT, -col_int_key int(11) NOT NULL, -col_varchar_key varchar(1) NOT NULL, -col_varchar_nokey varchar(1) NOT NULL, -PRIMARY KEY (pk), -KEY col_int_key (col_int_key), -KEY col_varchar_key (col_varchar_key,col_int_key) -) ENGINE=TokuDB; -INSERT INTO t2 VALUES -(1,9,'x','x'), -(2,5,'g','g'), -(3,1,'o','o'), -(4,0,'g','g'), -(5,1,'v','v'), -(6,190,'m','m'), -(7,6,'x','x'), -(8,3,'c','c'), -(9,4,'z','z'), -(10,3,'i','i'), -(11,186,'x','x'), -(12,1,'g','g'), -(13,8,'q','q'), -(14,226,'m','m'), -(15,133,'p','p'), -(16,6,'e','e'), -(17,3,'t','t'), -(18,8,'j','j'), -(19,5,'h','h'), -(20,7,'w','w'); -SELECT count(*), sum(table1.col_int_key*table2.pk) -FROM -t2 AS table1, t1 AS table2, t2 AS table3 -WHERE -table3.col_varchar_nokey = table2.col_varchar_key AND table3.pk > table2.col_varchar_nokey ; -count(*) sum(table1.col_int_key*table2.pk) -240 185955 -Warnings: -Warning 1292 Truncated incorrect DOUBLE value: 'v' -Warning 1292 Truncated incorrect DOUBLE value: 'v' -Warning 1292 Truncated incorrect DOUBLE value: 'v' -Warning 1292 Truncated incorrect DOUBLE value: 'v' -Warning 1292 Truncated incorrect DOUBLE value: 'v' -Warning 1292 Truncated incorrect DOUBLE value: 'v' -Warning 1292 Truncated incorrect DOUBLE value: 'v' -Warning 1292 Truncated incorrect DOUBLE value: 'v' -Warning 1292 Truncated incorrect DOUBLE value: 'v' -Warning 1292 Truncated incorrect DOUBLE value: 'v' -Warning 1292 Truncated incorrect DOUBLE value: 'v' -Warning 1292 Truncated incorrect DOUBLE value: 'v' -Warning 1292 Truncated incorrect DOUBLE value: 'v' -Warning 1292 Truncated incorrect DOUBLE value: 'v' -Warning 1292 Truncated incorrect DOUBLE value: 'v' -Warning 1292 Truncated incorrect DOUBLE value: 'v' -Warning 1292 Truncated incorrect DOUBLE value: 'v' -Warning 1292 Truncated incorrect DOUBLE value: 'v' -Warning 1292 Truncated incorrect DOUBLE value: 'v' -Warning 1292 Truncated incorrect DOUBLE value: 'v' -Warning 1292 Truncated incorrect DOUBLE value: 'v' -Warning 1292 Truncated incorrect DOUBLE value: 'v' -Warning 1292 Truncated incorrect DOUBLE value: 'v' -Warning 1292 Truncated incorrect DOUBLE value: 'v' -Warning 1292 Truncated incorrect DOUBLE value: 'v' -Warning 1292 Truncated incorrect DOUBLE value: 'v' -Warning 1292 Truncated incorrect DOUBLE value: 'v' -Warning 1292 Truncated incorrect DOUBLE value: 'v' -Warning 1292 Truncated incorrect DOUBLE value: 'v' -Warning 1292 Truncated incorrect DOUBLE value: 'v' -Warning 1292 Truncated incorrect DOUBLE value: 'v' -Warning 1292 Truncated incorrect DOUBLE value: 'v' -Warning 1292 Truncated incorrect DOUBLE value: 'v' -Warning 1292 Truncated incorrect DOUBLE value: 'v' -Warning 1292 Truncated incorrect DOUBLE value: 'v' -Warning 1292 Truncated incorrect DOUBLE value: 'v' -Warning 1292 Truncated incorrect DOUBLE value: 'v' -Warning 1292 Truncated incorrect DOUBLE value: 'v' -Warning 1292 Truncated incorrect DOUBLE value: 'v' -Warning 1292 Truncated incorrect DOUBLE value: 'v' -Warning 1292 Truncated incorrect DOUBLE value: 'p' -Warning 1292 Truncated incorrect DOUBLE value: 'p' -Warning 1292 Truncated incorrect DOUBLE value: 'p' -Warning 1292 Truncated incorrect DOUBLE value: 'p' -Warning 1292 Truncated incorrect DOUBLE value: 'p' -Warning 1292 Truncated incorrect DOUBLE value: 'p' -Warning 1292 Truncated incorrect DOUBLE value: 'p' -Warning 1292 Truncated incorrect DOUBLE value: 'p' -Warning 1292 Truncated incorrect DOUBLE value: 'p' -Warning 1292 Truncated incorrect DOUBLE value: 'p' -Warning 1292 Truncated incorrect DOUBLE value: 'p' -Warning 1292 Truncated incorrect DOUBLE value: 'p' -Warning 1292 Truncated incorrect DOUBLE value: 'p' -Warning 1292 Truncated incorrect DOUBLE value: 'p' -Warning 1292 Truncated incorrect DOUBLE value: 'p' -Warning 1292 Truncated incorrect DOUBLE value: 'p' -Warning 1292 Truncated incorrect DOUBLE value: 'p' -Warning 1292 Truncated incorrect DOUBLE value: 'p' -Warning 1292 Truncated incorrect DOUBLE value: 'p' -Warning 1292 Truncated incorrect DOUBLE value: 'p' -Warning 1292 Truncated incorrect DOUBLE value: 'z' -Warning 1292 Truncated incorrect DOUBLE value: 'z' -Warning 1292 Truncated incorrect DOUBLE value: 'z' -Warning 1292 Truncated incorrect DOUBLE value: 'z' -set @my_save_join_cache_level= @@join_cache_level; -set @my_save_join_buffer_size= @@join_buffer_size; -set join_cache_level=6; -set join_buffer_size=1536; -SELECT count(*), sum(table1.col_int_key*table2.pk) -FROM -t2 AS table1, t1 AS table2, t2 AS table3 -WHERE -table3.col_varchar_nokey = table2.col_varchar_key AND table3.pk > table2.col_varchar_nokey ; -count(*) sum(table1.col_int_key*table2.pk) -240 185955 -drop table t1,t2; -set join_cache_level=@my_save_join_cache_level; -set join_buffer_size=@my_save_join_buffer_size; -# -# BUG#665669: Result differences on query re-execution -# -create table t1 (pk int primary key, b int, c int default 0, index idx(b)) engine=Tokudb; -insert into t1(pk,b) values (3, 30), (2, 20), (9, 90), (7, 70), (4, 40), (5, 50), (10, 100), (12, 120); -set @bug665669_tmp=@@optimizer_switch; -set optimizer_switch='mrr=off'; -explain select * from t1 where b > 1000; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range idx idx 5 NULL 1 Using where -# The following two must produce indentical results: -select * from t1 where pk < 2 or pk between 3 and 4; -pk b c -3 30 0 -4 40 0 -select * from t1 where pk < 2 or pk between 3 and 4; -pk b c -3 30 0 -4 40 0 -drop table t1; -set optimizer_switch = @bug665669_tmp; -# -# Bug#43360 - Server crash with a simple multi-table update -# -CREATE TABLE t1 ( -a CHAR(2) NOT NULL PRIMARY KEY, -b VARCHAR(20) NOT NULL, -KEY (b) -) ENGINE=TokuDB; -CREATE TABLE t2 ( -a CHAR(2) NOT NULL PRIMARY KEY, -b VARCHAR(20) NOT NULL, -KEY (b) -) ENGINE=TokuDB; -INSERT INTO t1 VALUES -('AB','MySQLAB'), -('JA','Sun Microsystems'), -('MS','Microsoft'), -('IB','IBM- Inc.'), -('GO','Google Inc.'); -INSERT INTO t2 VALUES -('AB','Sweden'), -('JA','USA'), -('MS','United States of America'), -('IB','North America'), -('GO','South America'); -Warnings: -Warning 1265 Data truncated for column 'b' at row 3 -UPDATE t1,t2 SET t1.b=UPPER(t1.b) WHERE t1.b LIKE 'United%'; -SELECT * FROM t1; -a b -GO Google Inc. -IB IBM- Inc. -MS Microsoft -AB MySQLAB -JA Sun Microsystems -SELECT * FROM t2; -a b -IB North America -GO South America -AB Sweden -MS United States of Ame -JA USA -DROP TABLE t1,t2; -# -# Testcase backport: Bug#43249 -# (Note: Fixed by patch for BUG#42580) -# -CREATE TABLE t1(c1 TIME NOT NULL, c2 TIME NULL, c3 DATE, PRIMARY KEY(c1), UNIQUE INDEX(c2)) engine=Tokudb; -INSERT INTO t1 VALUES('8:29:45',NULL,'2009-02-01'); -SELECT * FROM t1 WHERE c2 <=> NULL ORDER BY c2 LIMIT 2; -c1 c2 c3 -08:29:45 NULL 2009-02-01 -SELECT * FROM t1 WHERE c2 <=> NULL ORDER BY c2 LIMIT 2; -c1 c2 c3 -08:29:45 NULL 2009-02-01 -drop table `t1`; -# -# BUG#707925: Wrong result with join_cache_level=6 optimizer_use_mrr = -# force (incremental, BKA join) -# -set @_save_join_cache_level= @@join_cache_level; -set join_cache_level = 6; -CREATE TABLE t1 ( -f1 int(11), f2 int(11), f3 varchar(1), f4 varchar(1), -PRIMARY KEY (f1), -KEY (f3), -KEY (f2) -) ENGINE=TokuDB; -INSERT INTO t1 VALUES ('11','8','f','f'),('12','5','v','v'),('13','8','s','s'), -('14','8','a','a'),('15','6','p','p'),('16','7','z','z'),('17','2','a','a'), -('18','5','h','h'),('19','7','h','h'),('20','2','v','v'),('21','9','v','v'), -('22','142','b','b'),('23','3','y','y'),('24','0','v','v'),('25','3','m','m'), -('26','5','z','z'),('27','9','n','n'),('28','1','d','d'),('29','107','a','a'); -select count(*) from ( -SELECT alias1.f2 -FROM -t1 AS alias1 JOIN ( -t1 AS alias2 FORCE KEY (f3) JOIN -t1 AS alias3 FORCE KEY (f2) ON alias3.f2 = alias2.f2 AND alias3.f4 = alias2.f3 -) ON alias3.f1 <= alias2.f1 -) X; -count(*) -361 -set join_cache_level=@_save_join_cache_level; -set optimizer_switch= @innodb_mrr_tmp; -drop table t1; diff --git a/storage/tokudb/mysql-test/tokudb_bugs/r/tokudb_mrr2.result b/storage/tokudb/mysql-test/tokudb_bugs/r/tokudb_mrr2.result deleted file mode 100644 index 99c72264e06..00000000000 --- a/storage/tokudb/mysql-test/tokudb_bugs/r/tokudb_mrr2.result +++ /dev/null @@ -1,441 +0,0 @@ -drop table if exists t1,t2,t3,t4; -set @maria_mrr_tmp=@@optimizer_switch; -set optimizer_switch='mrr=on,mrr_sort_keys=on,index_condition_pushdown=on'; -set @mrr_buffer_size_save= @@mrr_buffer_size; -set @save_storage_engine= @@storage_engine; -set storage_engine=TokuDB; -create table t1(a int); -show create table t1; -Table Create Table -t1 CREATE TABLE `t1` ( - `a` int(11) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' -insert into t1 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9); -create table t2(a int); -insert into t2 select A.a + 10*(B.a + 10*C.a) from t1 A, t1 B, t1 C; -create table t3 ( -a char(8) not null, b char(8) not null, filler char(200), -key(a) -); -insert into t3 select @a:=concat('c-', 1000+ A.a, '=w'), @a, 'filler' from t2 A; -insert into t3 select concat('c-', 1000+A.a, '=w'), concat('c-', 2000+A.a, '=w'), -'filler-1' from t2 A; -insert into t3 select concat('c-', 1000+A.a, '=w'), concat('c-', 3000+A.a, '=w'), -'filler-2' from t2 A; -select a,filler from t3 where a >= 'c-9011=w'; -a filler -select a,filler from t3 where a >= 'c-1011=w' and a <= 'c-1015=w'; -a filler -c-1011=w filler -c-1012=w filler -c-1013=w filler -c-1014=w filler -c-1015=w filler -c-1011=w filler-1 -c-1012=w filler-1 -c-1013=w filler-1 -c-1014=w filler-1 -c-1015=w filler-1 -c-1011=w filler-2 -c-1012=w filler-2 -c-1013=w filler-2 -c-1014=w filler-2 -c-1015=w filler-2 -select a,filler from t3 where (a>='c-1011=w' and a <= 'c-1013=w') or -(a>='c-1014=w' and a <= 'c-1015=w'); -a filler -c-1011=w filler -c-1012=w filler -c-1013=w filler -c-1014=w filler -c-1015=w filler -c-1011=w filler-1 -c-1012=w filler-1 -c-1013=w filler-1 -c-1014=w filler-1 -c-1015=w filler-1 -c-1011=w filler-2 -c-1012=w filler-2 -c-1013=w filler-2 -c-1014=w filler-2 -c-1015=w filler-2 -insert into t3 values ('c-1013=z', 'c-1013=z', 'err'); -insert into t3 values ('a-1014=w', 'a-1014=w', 'err'); -select a,filler from t3 where (a>='c-1011=w' and a <= 'c-1013=w') or -(a>='c-1014=w' and a <= 'c-1015=w'); -a filler -c-1011=w filler -c-1012=w filler -c-1013=w filler -c-1014=w filler -c-1015=w filler -c-1011=w filler-1 -c-1012=w filler-1 -c-1013=w filler-1 -c-1014=w filler-1 -c-1015=w filler-1 -c-1011=w filler-2 -c-1012=w filler-2 -c-1013=w filler-2 -c-1014=w filler-2 -c-1015=w filler-2 -delete from t3 where b in ('c-1013=z', 'a-1014=w'); -select a,filler from t3 where a='c-1011=w' or a='c-1012=w' or a='c-1013=w' or -a='c-1014=w' or a='c-1015=w'; -a filler -c-1011=w filler -c-1012=w filler -c-1013=w filler -c-1014=w filler -c-1015=w filler -c-1011=w filler-1 -c-1012=w filler-1 -c-1013=w filler-1 -c-1014=w filler-1 -c-1015=w filler-1 -c-1011=w filler-2 -c-1012=w filler-2 -c-1013=w filler-2 -c-1014=w filler-2 -c-1015=w filler-2 -insert into t3 values ('c-1013=w', 'del-me', 'inserted'); -select a,filler from t3 where a='c-1011=w' or a='c-1012=w' or a='c-1013=w' or -a='c-1014=w' or a='c-1015=w'; -a filler -c-1011=w filler -c-1012=w filler -c-1013=w filler -c-1014=w filler -c-1015=w filler -c-1011=w filler-1 -c-1012=w filler-1 -c-1013=w filler-1 -c-1014=w filler-1 -c-1015=w filler-1 -c-1011=w filler-2 -c-1012=w filler-2 -c-1013=w filler-2 -c-1014=w filler-2 -c-1015=w filler-2 -c-1013=w inserted -delete from t3 where b='del-me'; -alter table t3 add primary key(b); -select b,filler from t3 where (b>='c-1011=w' and b<= 'c-1018=w') or -b IN ('c-1019=w', 'c-1020=w', 'c-1021=w', -'c-1022=w', 'c-1023=w', 'c-1024=w'); -b filler -c-1011=w filler -c-1012=w filler -c-1013=w filler -c-1014=w filler -c-1015=w filler -c-1016=w filler -c-1017=w filler -c-1018=w filler -c-1019=w filler -c-1020=w filler -c-1021=w filler -c-1022=w filler -c-1023=w filler -c-1024=w filler -select b,filler from t3 where (b>='c-1011=w' and b<= 'c-1020=w') or -b IN ('c-1021=w', 'c-1022=w', 'c-1023=w'); -b filler -c-1011=w filler -c-1012=w filler -c-1013=w filler -c-1014=w filler -c-1015=w filler -c-1016=w filler -c-1017=w filler -c-1018=w filler -c-1019=w filler -c-1020=w filler -c-1021=w filler -c-1022=w filler -c-1023=w filler -select b,filler from t3 where (b>='c-1011=w' and b<= 'c-1018=w') or -b IN ('c-1019=w', 'c-1020=w') or -(b>='c-1021=w' and b<= 'c-1023=w'); -b filler -c-1011=w filler -c-1012=w filler -c-1013=w filler -c-1014=w filler -c-1015=w filler -c-1016=w filler -c-1017=w filler -c-1018=w filler -c-1019=w filler -c-1020=w filler -c-1021=w filler -c-1022=w filler -c-1023=w filler -drop table if exists t4; -create table t4 (a varchar(10), b int, c char(10), filler char(200), -key idx1 (a, b, c)); -insert into t4 (filler) select concat('NULL-', 15-a) from t2 order by a limit 15; -insert into t4 (a,b,c,filler) -select 'b-1',NULL,'c-1', concat('NULL-', 15-a) from t2 order by a limit 15; -insert into t4 (a,b,c,filler) -select 'b-1',NULL,'c-222', concat('NULL-', 15-a) from t2 order by a limit 15; -insert into t4 (a,b,c,filler) -select 'bb-1',NULL,'cc-2', concat('NULL-', 15-a) from t2 order by a limit 15; -insert into t4 (a,b,c,filler) -select 'zz-1',NULL,'cc-2', 'filler-data' from t2 order by a limit 500; -explain -select * from t4 where a IS NULL and b IS NULL and (c IS NULL or c='no-such-row1' - or c='no-such-row2'); -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t4 range idx1 idx1 29 NULL 16 Using where; Rowid-ordered scan -select * from t4 where a IS NULL and b IS NULL and (c IS NULL or c='no-such-row1' - or c='no-such-row2'); -a b c filler -NULL NULL NULL NULL-15 -NULL NULL NULL NULL-14 -NULL NULL NULL NULL-13 -NULL NULL NULL NULL-12 -NULL NULL NULL NULL-11 -NULL NULL NULL NULL-10 -NULL NULL NULL NULL-9 -NULL NULL NULL NULL-8 -NULL NULL NULL NULL-7 -NULL NULL NULL NULL-6 -NULL NULL NULL NULL-5 -NULL NULL NULL NULL-4 -NULL NULL NULL NULL-3 -NULL NULL NULL NULL-2 -NULL NULL NULL NULL-1 -explain -select * from t4 where (a ='b-1' or a='bb-1') and b IS NULL and (c='c-1' or c='cc-2'); -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t4 range idx1 idx1 29 NULL 32 Using where; Rowid-ordered scan -select * from t4 where (a ='b-1' or a='bb-1') and b IS NULL and (c='c-1' or c='cc-2'); -a b c filler -b-1 NULL c-1 NULL-15 -b-1 NULL c-1 NULL-14 -b-1 NULL c-1 NULL-13 -b-1 NULL c-1 NULL-12 -b-1 NULL c-1 NULL-11 -b-1 NULL c-1 NULL-10 -b-1 NULL c-1 NULL-9 -b-1 NULL c-1 NULL-8 -b-1 NULL c-1 NULL-7 -b-1 NULL c-1 NULL-6 -b-1 NULL c-1 NULL-5 -b-1 NULL c-1 NULL-4 -b-1 NULL c-1 NULL-3 -b-1 NULL c-1 NULL-2 -b-1 NULL c-1 NULL-1 -bb-1 NULL cc-2 NULL-15 -bb-1 NULL cc-2 NULL-14 -bb-1 NULL cc-2 NULL-13 -bb-1 NULL cc-2 NULL-12 -bb-1 NULL cc-2 NULL-11 -bb-1 NULL cc-2 NULL-10 -bb-1 NULL cc-2 NULL-9 -bb-1 NULL cc-2 NULL-8 -bb-1 NULL cc-2 NULL-7 -bb-1 NULL cc-2 NULL-6 -bb-1 NULL cc-2 NULL-5 -bb-1 NULL cc-2 NULL-4 -bb-1 NULL cc-2 NULL-3 -bb-1 NULL cc-2 NULL-2 -bb-1 NULL cc-2 NULL-1 -select * from t4 ignore index(idx1) where (a ='b-1' or a='bb-1') and b IS NULL and (c='c-1' or c='cc-2'); -a b c filler -b-1 NULL c-1 NULL-15 -b-1 NULL c-1 NULL-14 -b-1 NULL c-1 NULL-13 -b-1 NULL c-1 NULL-12 -b-1 NULL c-1 NULL-11 -b-1 NULL c-1 NULL-10 -b-1 NULL c-1 NULL-9 -b-1 NULL c-1 NULL-8 -b-1 NULL c-1 NULL-7 -b-1 NULL c-1 NULL-6 -b-1 NULL c-1 NULL-5 -b-1 NULL c-1 NULL-4 -b-1 NULL c-1 NULL-3 -b-1 NULL c-1 NULL-2 -b-1 NULL c-1 NULL-1 -bb-1 NULL cc-2 NULL-15 -bb-1 NULL cc-2 NULL-14 -bb-1 NULL cc-2 NULL-13 -bb-1 NULL cc-2 NULL-12 -bb-1 NULL cc-2 NULL-11 -bb-1 NULL cc-2 NULL-10 -bb-1 NULL cc-2 NULL-9 -bb-1 NULL cc-2 NULL-8 -bb-1 NULL cc-2 NULL-7 -bb-1 NULL cc-2 NULL-6 -bb-1 NULL cc-2 NULL-5 -bb-1 NULL cc-2 NULL-4 -bb-1 NULL cc-2 NULL-3 -bb-1 NULL cc-2 NULL-2 -bb-1 NULL cc-2 NULL-1 -drop table t1, t2, t3, t4; -create table t1 (a int, b int not null,unique key (a,b),index(b)); -insert ignore into t1 values (1,1),(2,2),(3,3),(4,4),(5,5),(6,6),(null,7),(9,9),(8,8),(7,7),(null,9),(null,9),(6,6); -Warnings: -Warning 1062 Duplicate entry '6-6' for key 'a' -create table t2 like t1; -insert into t2 select * from t1; -alter table t1 modify b blob not null, add c int not null, drop key a, add unique key (a,b(20),c), drop key b, add key (b(10)); -select * from t1 where a is null; -a b c -NULL 7 0 -NULL 9 0 -NULL 9 0 -select * from t1 where (a is null or a > 0 and a < 3) and b > 7 limit 3; -a b c -NULL 9 0 -NULL 9 0 -select * from t1 where a is null and b=9 or a is null and b=7 limit 3; -a b c -NULL 7 0 -NULL 9 0 -NULL 9 0 -drop table t1, t2; -set storage_engine= @save_storage_engine; -set @@mrr_buffer_size= @mrr_buffer_size_save; -# -# Crash in quick_range_seq_next() in maria-5.3-dsmrr-cpk with join_cache_level = {8,1} -# -set @save_join_cache_level= @@join_cache_level; -SET SESSION join_cache_level = 8; -CREATE TABLE `t1` ( -`col_int_key` int(11) DEFAULT NULL, -`col_datetime_key` datetime DEFAULT NULL, -`col_varchar_key` varchar(1) DEFAULT NULL, -`col_varchar_nokey` varchar(1) DEFAULT NULL, -KEY `col_varchar_key` (`col_varchar_key`,`col_int_key`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1; -INSERT INTO `t1` VALUES (6,'2005-10-07 00:00:00','e','e'); -INSERT INTO `t1` VALUES (51,'2000-07-15 05:00:34','f','f'); -CREATE TABLE `t2` ( -`col_int_key` int(11) DEFAULT NULL, -`col_datetime_key` datetime DEFAULT NULL, -`col_varchar_key` varchar(1) DEFAULT NULL, -`col_varchar_nokey` varchar(1) DEFAULT NULL, -KEY `col_varchar_key` (`col_varchar_key`,`col_int_key`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 PAGE_CHECKSUM=1; -INSERT INTO `t2` VALUES (2,'2004-10-11 18:13:16','w','w'); -INSERT INTO `t2` VALUES (2,'1900-01-01 00:00:00','d','d'); -SELECT table2 .`col_datetime_key` -FROM t2 JOIN ( t1 table2 JOIN t2 table3 ON table3 .`col_varchar_key` < table2 .`col_varchar_key` ) ON table3 .`col_varchar_nokey` ; -col_datetime_key -Warnings: -Warning 1292 Truncated incorrect DOUBLE value: 'd' -Warning 1292 Truncated incorrect DOUBLE value: 'd' -Warning 1292 Truncated incorrect DOUBLE value: 'd' -Warning 1292 Truncated incorrect DOUBLE value: 'd' -drop table t1, t2; -set join_cache_level=@save_join_cache_level; -CREATE TABLE t1( -pk int NOT NULL, i int NOT NULL, v varchar(1) NOT NULL, -PRIMARY KEY (pk), INDEX idx (v, i) -) ENGINE=TokuDB; -INSERT INTO t1 VALUES -(1,9,'x'), (2,5,'g'), (3,1,'o'), (4,0,'g'), (5,1,'v'), -(6,190,'m'), (7,6,'x'), (8,3,'c'), (9,4,'z'), (10,3,'i'), -(11,186,'x'), (12,1,'g'), (13,8,'q'), (14,226,'m'), (15,133,'p'); -CREATE TABLE t2( -pk int NOT NULL, i int NOT NULL, v varchar(1) NOT NULL, -PRIMARY KEY (pk), INDEX idx (v, i) -) ENGINE=TokuDB; -INSERT INTO t2 SELECT * FROM t1; -INSERT INTO t2 VALUES (77, 333, 'z'); -CREATE TABLE t3( -pk int NOT NULL, i int NOT NULL, v varchar(1) NOT NULL, -PRIMARY KEY (pk), INDEX idx (v, i) -) ENGINE=TokuDB; -INSERT INTO t3 SELECT * FROM t1; -INSERT INTO t3 VALUES -(88, 442, 'y'), (99, 445, 'w'), (87, 442, 'z'), (98, 445, 'v'), (86, 442, 'x'), -(97, 445, 't'), (85, 442, 'b'), (96, 445, 'l'), (84, 442, 'a'), (95, 445, 'k'); -set @save_join_cache_level=@@join_cache_level; -set join_cache_level=1; -SELECT COUNT(t1.v) FROM t1, t2 IGNORE INDEX (idx), t3 IGNORE INDEX (idx) -WHERE t3.v = t2.v AND t3.i < t2.i AND t3.pk > 0 AND t2.pk > 0; -COUNT(t1.v) -120 -EXPLAIN -SELECT COUNT(t1.v) FROM t1, t2 IGNORE INDEX (idx), t3 IGNORE INDEX (idx) -WHERE t3.v = t2.v AND t3.i < t2.i AND t3.pk > 0 AND t2.pk > 0; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 index NULL idx 7 NULL 15 Using index -1 SIMPLE t2 range PRIMARY PRIMARY 4 NULL 16 Using where; Using join buffer (flat, BNL join) -1 SIMPLE t3 range PRIMARY PRIMARY 4 NULL 25 Using where; Using join buffer (flat, BNL join) -SELECT COUNT(t1.v) FROM t1, t2, t3 -WHERE t3.v = t2.v AND t3.i < t2.i AND t3.pk > 0 AND t2.pk > 0; -COUNT(t1.v) -120 -EXPLAIN -SELECT COUNT(t1.v) FROM t1, t2, t3 -WHERE t3.v = t2.v AND t3.i < t2.i AND t3.pk > 0 AND t2.pk > 0; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 index NULL idx 7 NULL 15 Using index -1 SIMPLE t2 index PRIMARY,idx idx 7 NULL 16 Using where; Using index; Using join buffer (flat, BNL join) -1 SIMPLE t3 ref PRIMARY,idx idx 3 test.t2.v 3 Using where; Using index -set join_cache_level=@save_join_cache_level; -DROP TABLE t1,t2,t3; -# -# BUG#671361: virtual int Mrr_ordered_index_reader::refill_buffer(): Assertion `!know_key_tuple_params -# (works only on Maria because we need 1024-byte long key) -# -SET SESSION join_cache_level = 6; -SET SESSION join_buffer_size = 1024; -CREATE TABLE t1 ( -pk int(11) NOT NULL AUTO_INCREMENT, -col_varchar_1024_latin1_key varchar(1024) DEFAULT NULL, -PRIMARY KEY (pk), -KEY col_varchar_1024_latin1_key (col_varchar_1024_latin1_key) -) ENGINE=TokuDB; -INSERT INTO t1 VALUES -(1,'z'), (2,'abcdefjhjkl'), (3,'in'), (4,'abcdefjhjkl'), (6,'abcdefjhjkl'), -(11,'zx'), (12,'abcdefjhjm'), (13,'jn'), (14,'abcdefjhjp'), (16,'abcdefjhjr'); -CREATE TABLE t2 ( -col_varchar_10_latin1 varchar(10) DEFAULT NULL -) ENGINE=TokuDB; -INSERT INTO t2 VALUES ('foo'), ('foo'); -EXPLAIN SELECT count(*) -FROM t1 AS table1, t2 AS table2 -WHERE -table1.col_varchar_1024_latin1_key = table2.col_varchar_10_latin1 AND table1.pk<>0 ; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE table2 ALL NULL NULL NULL NULL 2 Using where -1 SIMPLE table1 ref PRIMARY,col_varchar_1024_latin1_key col_varchar_1024_latin1_key 1027 test.table2.col_varchar_10_latin1 2 Using where; Using index -SELECT count(*) -FROM t1 AS table1, t2 AS table2 -WHERE -table1.col_varchar_1024_latin1_key = table2.col_varchar_10_latin1 AND table1.pk<>0 ; -count(*) -0 -drop table t1, t2; -# -# BUG#693747: Assertion multi_range_read.cc:908: int DsMrr_impl::dsmrr_init( -# -set @_save_join_cache_level= @@join_cache_level; -set @_save_join_buffer_size= @@join_buffer_size; -set join_cache_level=8; -set join_buffer_size=10240; -CREATE TABLE t1 ( -f2 varchar(32) COLLATE latin1_swedish_ci, -f3 int(11), -f4 varchar(1024) COLLATE utf8_bin, -f5 varchar(1024) COLLATE latin1_bin, -KEY (f5) -) ENGINE=TokuDB; -# Fill the table with some data -SELECT alias2.* , alias1.f2 -FROM -t1 AS alias1 -LEFT JOIN t1 AS alias2 ON alias1.f2 = alias2.f5 -WHERE -alias2.f3 < 0; -f2 f3 f4 f5 f2 -set join_cache_level=@_save_join_cache_level; -set join_buffer_size=@_save_join_buffer_size; -set optimizer_switch=@maria_mrr_tmp; -drop table t1; diff --git a/storage/tokudb/mysql-test/tokudb_bugs/suite.opt b/storage/tokudb/mysql-test/tokudb_bugs/suite.opt index 8cfa7cacb1f..ea8042b7740 100644 --- a/storage/tokudb/mysql-test/tokudb_bugs/suite.opt +++ b/storage/tokudb/mysql-test/tokudb_bugs/suite.opt @@ -1 +1 @@ ---tokudb --plugin-load-add=$HA_TOKUDB_SO +--tokudb --plugin-load-add=$HA_TOKUDB_SO --loose-tokudb-check-jemalloc=0 diff --git a/storage/tokudb/mysql-test/tokudb_bugs/t/4676.test b/storage/tokudb/mysql-test/tokudb_bugs/t/4676.test deleted file mode 100644 index ac827ddee51..00000000000 --- a/storage/tokudb/mysql-test/tokudb_bugs/t/4676.test +++ /dev/null @@ -1,15 +0,0 @@ ---source include/have_partition.inc - -let $engine='tokudb'; - ---disable_warnings -DROP TABLE IF EXISTS t; ---enable_warnings - -eval CREATE TABLE t (a INT) ENGINE=$engine PARTITION BY KEY (a) (PARTITION part0, PARTITION part1); -SHOW CREATE TABLE t; - -ALTER TABLE t TRUNCATE PARTITION part0; -ALTER TABLE t TRUNCATE PARTITION part1; - -DROP TABLE IF EXISTS t;
\ No newline at end of file diff --git a/storage/tokudb/mysql-test/tokudb_bugs/t/4677.test b/storage/tokudb/mysql-test/tokudb_bugs/t/4677.test deleted file mode 100644 index 2a05e104a9b..00000000000 --- a/storage/tokudb/mysql-test/tokudb_bugs/t/4677.test +++ /dev/null @@ -1,30 +0,0 @@ -let $engine='tokudb'; - ---disable_warnings -drop table if exists t; ---enable_warnings - -eval create table t (a int primary key) engine=$engine; - -connect (conn1,localhost,root,,); - -connection default; -begin; -insert into t values (1); -insert into t values (3); - -connection conn1; -begin; -insert into t values (2); -insert into t values (4); - -connection default; -commit; - -connection conn1; -commit; - -connection default; -disconnect conn1; - -drop table t;
\ No newline at end of file diff --git a/storage/tokudb/mysql-test/tokudb_bugs/t/fractional_time.test b/storage/tokudb/mysql-test/tokudb_bugs/t/fractional_time.test deleted file mode 100644 index c31bf8fc66b..00000000000 --- a/storage/tokudb/mysql-test/tokudb_bugs/t/fractional_time.test +++ /dev/null @@ -1,36 +0,0 @@ -SET DEFAULT_STORAGE_ENGINE = 'tokudb'; - ---disable_warnings -DROP TABLE IF EXISTS foo; ---enable_warnings -create table foo (a timestamp(6), b timestamp(4), c timestamp(5), primary key (a))engine=tokudb; - -insert into foo values ('2010-12-10 14:12:09.123452', '2010-12-10 14:12:09.123416', '2010-12-10 14:12:09.123451'); -insert into foo values ('2010-12-10 14:12:09.123454', '2010-12-10 14:12:09.123416', '2010-12-10 14:12:09.123451'); -insert into foo values ('2010-12-10 14:12:09.123451', '2010-12-10 14:12:09.123416', '2010-12-10 14:12:09.123451'); -insert into foo values ('2010-12-10 14:12:09.123453', '2010-12-10 14:12:09.123416', '2010-12-10 14:12:09.123451'); - -select * from foo; -explain select * from foo where a > '2010-12-10 14:12:09.123452'; -select * from foo where a > '2010-12-10 14:12:09.123452'; - -alter table foo change a a datetime(6), change b b datetime(4), change c c datetime(5); -show create table foo; -select * from foo; -explain select * from foo where a > '2010-12-10 14:12:09.123452'; -select * from foo where a > '2010-12-10 14:12:09.123452'; -drop table foo; - -create table foo (a time(6), b time(4), c time(5), primary key (a))engine=TokuDB; -insert into foo values ('14:12:09.123452', '14:12:09.123416', '14:12:09.123451'); -insert into foo values ('14:12:09.123454', '14:12:09.123416', '14:12:09.123451'); -insert into foo values ('14:12:09.123451', '14:12:09.123416', '14:12:09.123451'); -insert into foo values ('14:12:09.123453', '14:12:09.123416', '14:12:09.123451'); - -select * from foo; -explain select * from foo where a > '14:12:09.123452'; -select * from foo where a > '14:12:09.123452'; - - -# Final cleanup. -DROP TABLE foo;
\ No newline at end of file diff --git a/storage/tokudb/mysql-test/tokudb_bugs/t/lock_uniq_key_empty.test b/storage/tokudb/mysql-test/tokudb_bugs/t/lock_uniq_key_empty.test index 3f8d7113dff..0a001c2736d 100644 --- a/storage/tokudb/mysql-test/tokudb_bugs/t/lock_uniq_key_empty.test +++ b/storage/tokudb/mysql-test/tokudb_bugs/t/lock_uniq_key_empty.test @@ -7,6 +7,7 @@ enable_warnings; create table t (id int, unique key(id)); connect(c1,localhost,root,,); +set tokudb_prelock_empty=OFF; # disable the tokudb bulk loader begin; insert into t values (1); connect(c2,localhost,root,,); @@ -24,6 +25,7 @@ drop table if exists t; create table t (id int not null, unique key(id)); connect(c1,localhost,root,,); +set tokudb_prelock_empty=OFF; # disable the tokudb bulk loader begin; insert into t values (1); connect(c2,localhost,root,,); diff --git a/storage/tokudb/mysql-test/tokudb_bugs/t/mdev5932.test b/storage/tokudb/mysql-test/tokudb_bugs/t/mdev5932.test index 34a1f7eab82..879b57ade63 100644 --- a/storage/tokudb/mysql-test/tokudb_bugs/t/mdev5932.test +++ b/storage/tokudb/mysql-test/tokudb_bugs/t/mdev5932.test @@ -4,6 +4,7 @@ source include/have_innodb.inc; disable_warnings; drop table if exists t1,t2; drop table if exists t1i,t2i; +drop table if exists tsub,t3; enable_warnings; CREATE TABLE t1 (a CHAR(3), INDEX(a)) ENGINE=TokuDB; diff --git a/storage/tokudb/mysql-test/tokudb_bugs/t/rpl_mixed_replace_into.test b/storage/tokudb/mysql-test/tokudb_bugs/t/rpl_mixed_replace_into.test index 05e6e2fb228..2f53ee5f118 100644 --- a/storage/tokudb/mysql-test/tokudb_bugs/t/rpl_mixed_replace_into.test +++ b/storage/tokudb/mysql-test/tokudb_bugs/t/rpl_mixed_replace_into.test @@ -1,6 +1,6 @@ source include/have_tokudb.inc; -source include/master-slave.inc; source include/have_binlog_format_mixed.inc; +source include/master-slave.inc; set default_storage_engine='tokudb'; diff --git a/storage/tokudb/mysql-test/tokudb_bugs/t/rpl_row_replace_into.test b/storage/tokudb/mysql-test/tokudb_bugs/t/rpl_row_replace_into.test index 062f11e0ed9..b44877e9fa5 100644 --- a/storage/tokudb/mysql-test/tokudb_bugs/t/rpl_row_replace_into.test +++ b/storage/tokudb/mysql-test/tokudb_bugs/t/rpl_row_replace_into.test @@ -1,6 +1,6 @@ source include/have_tokudb.inc; -source include/master-slave.inc; source include/have_binlog_format_row.inc; +source include/master-slave.inc; set default_storage_engine='tokudb'; diff --git a/storage/tokudb/mysql-test/tokudb_bugs/t/rpl_stmt_replace_into.test b/storage/tokudb/mysql-test/tokudb_bugs/t/rpl_stmt_replace_into.test index f7e4c7a09e0..d1d0ef11f1b 100644 --- a/storage/tokudb/mysql-test/tokudb_bugs/t/rpl_stmt_replace_into.test +++ b/storage/tokudb/mysql-test/tokudb_bugs/t/rpl_stmt_replace_into.test @@ -1,6 +1,6 @@ source include/have_tokudb.inc; -source include/master-slave.inc; source include/have_binlog_format_statement.inc; +source include/master-slave.inc; set default_storage_engine='tokudb'; diff --git a/storage/tokudb/mysql-test/tokudb_bugs/t/simple_icp.test b/storage/tokudb/mysql-test/tokudb_bugs/t/simple_icp.test deleted file mode 100644 index afc98affa85..00000000000 --- a/storage/tokudb/mysql-test/tokudb_bugs/t/simple_icp.test +++ /dev/null @@ -1,43 +0,0 @@ -#-- source include/have_tokudb.inc - ---disable_warnings -drop table if exists a,b,c,foo; ---enable_warnings - -create table a (a int auto_increment, primary key (a)) engine=TokuDB; -create table b (a int auto_increment, primary key (a)) engine=TokuDB; -create table c (a int auto_increment, primary key (a)) engine=TokuDB; - -insert into a values (),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(); -insert into b values (),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(); -insert into c values (),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(); - -create table foo (a int, b int, c int, d int, e int, key(a,b,c)) engine=TokuDB; - -insert into foo (a,b,c) select * from a,b,c; - -flush status; -show status like '%Handler_read_next%'; -explain select * from foo where a between 5 and 6 and c=10; -select * from foo where a between 5 and 6 and c=10; -show status like '%Handler_read_next%'; - -flush status; -show status like '%Handler_read_prev%'; -explain select * from foo where a between 5 and 6 and c=10; -select * from foo where a between 5 and 6 and c=10 order by a desc; -show status like '%Handler_read_prev%'; - -flush status; -show status like '%Handler_read_prev%'; -explain select * from foo where a > 19 and c=10; -select * from foo where a > 19 and c=10 order by a desc; -show status like '%Handler_read_prev%'; - -flush status; -show status like '%Handler_read_next%'; -explain select * from foo where a > 19 and c=10; -select * from foo where a > 19 and c=10; -show status like '%Handler_read_next%'; - -drop table foo,a,b,c; diff --git a/storage/tokudb/mysql-test/tokudb_bugs/t/tokudb718.test b/storage/tokudb/mysql-test/tokudb_bugs/t/tokudb718.test new file mode 100644 index 00000000000..877087776b2 --- /dev/null +++ b/storage/tokudb/mysql-test/tokudb_bugs/t/tokudb718.test @@ -0,0 +1,13 @@ +# test DB-718, a crash caused by broken error handling in tokudb's fractal_tree_info information schema +source include/have_tokudb.inc; +set default_storage_engine='tokudb'; +disable_warnings; +drop table if exists t; +enable_warnings; +create table t (id int primary key); +begin; +insert into t values (1),(2); +--error 34542 +select * from information_schema.tokudb_fractal_tree_info; +commit; +drop table t; diff --git a/storage/tokudb/mysql-test/tokudb_bugs/t/tokudb_mrr.test b/storage/tokudb/mysql-test/tokudb_bugs/t/tokudb_mrr.test deleted file mode 100644 index a4859838965..00000000000 --- a/storage/tokudb/mysql-test/tokudb_bugs/t/tokudb_mrr.test +++ /dev/null @@ -1,462 +0,0 @@ -#-- source include/have_tokudb.inc - ---disable_warnings -drop table if exists t1,t2,t3,t4; ---enable_warnings - -set @save_storage_engine= @@storage_engine; -set storage_engine=TokuDB; - -set @innodb_mrr_tmp=@@optimizer_switch; -set optimizer_switch='mrr=on,mrr_sort_keys=on,index_condition_pushdown=on'; - ---source include/mrr_tests.inc - -set storage_engine= @save_storage_engine; - -# Try big rowid sizes -set @mrr_buffer_size_save= @@mrr_buffer_size; -set mrr_buffer_size=64; - -# By default InnoDB will fill values only for key parts used by the query, -# which will cause DS-MRR to supply an invalid tuple on scan restoration. -# Verify that DS-MRR's code extra(HA_EXTRA_RETRIEVE_ALL_COLS) call has effect: -create table t1(a int); -insert into t1 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9); -create table t2(a char(8), b char(8), c char(8), filler char(100), key(a,b,c) ) engine=TokuDB; - -insert into t2 select - concat('a-', 1000 + A.a, '-a'), - concat('b-', 1000 + B.a, '-b'), - concat('c-', 1000 + C.a, '-c'), - 'filler' -from t1 A, t1 B, t1 C; - -explain -select count(length(a) + length(filler)) from t2 where a>='a-1000-a' and a <'a-1001-a'; -select count(length(a) + length(filler)) from t2 where a>='a-1000-a' and a <'a-1001-a'; -drop table t2; - -# Try a very big rowid -create table t2 (a char(100), b char(100), c char(100), d int, - filler char(10), key(d), primary key (a,b,c)) engine= tokudb; -insert into t2 select A.a, B.a, B.a, A.a, 'filler' from t1 A, t1 B; ---replace_column 9 # -explain select * from t2 force index (d) where d < 10; -drop table t2; - -drop table t1; -set @@mrr_buffer_size= @mrr_buffer_size_save; - -# -# BUG#33033 "MySQL/InnoDB crashes with simple select range query" -# -create table t1 (f1 int not null, f2 int not null,f3 int not null, f4 char(1), primary key (f1,f2), key ix(f3))Engine=tokuDB; - ---disable_query_log -let $1=55; - -while ($1) -{ - eval insert into t1(f1,f2,f3,f4) values ($1,$1,$1,'A'); - dec $1; -} ---enable_query_log - -# The following must not crash: -select * from t1 where (f3>=5 and f3<=10) or (f3>=1 and f3<=4); - -drop table t1; - ---echo ---echo BUG#37977: Wrong result returned on GROUP BY + OR + innodb ---echo -CREATE TABLE t1 ( - `pk` int(11) NOT NULL AUTO_INCREMENT, - `int_nokey` int(11) NOT NULL, - `int_key` int(11) NOT NULL, - `date_key` date NOT NULL, - `date_nokey` date NOT NULL, - `time_key` time NOT NULL, - `time_nokey` time NOT NULL, - `datetime_key` datetime NOT NULL, - `datetime_nokey` datetime NOT NULL, - `varchar_key` varchar(5) DEFAULT NULL, - `varchar_nokey` varchar(5) DEFAULT NULL, - PRIMARY KEY (`pk`), - KEY `int_key` (`int_key`), - KEY `date_key` (`date_key`), - KEY `time_key` (`time_key`), - KEY `datetime_key` (`datetime_key`), - KEY `varchar_key` (`varchar_key`) -) ENGINE=TokuDB; - -INSERT INTO t1 VALUES -(1,5,5,'2009-10-16','2009-10-16','09:28:15','09:28:15','2007-09-14 05:34:08','2007-09-14 05:34:08','qk','qk'), -(2,6,6,'0000-00-00','0000-00-00','23:06:39','23:06:39','0000-00-00 00:00:00','0000-00-00 00:00:00','j','j'), -(3,10,10,'2000-12-18','2000-12-18','22:16:19','22:16:19','2006-11-04 15:42:50','2006-11-04 15:42:50','aew','aew'), -(4,0,0,'2001-09-18','2001-09-18','00:00:00','00:00:00','2004-03-23 13:23:35','2004-03-23 13:23:35',NULL,NULL), -(5,6,6,'2007-08-16','2007-08-16','22:13:38','22:13:38','2004-08-19 11:01:28','2004-08-19 11:01:28','qu','qu'); -select pk from t1 WHERE `varchar_key` > 'kr' group by pk; -select pk from t1 WHERE `int_nokey` IS NULL OR `varchar_key` > 'kr' group by pk; -drop table t1; - ---echo # ---echo # BUG#39447: Error with NOT NULL condition and LIMIT 1 ---echo # -CREATE TABLE t1 ( - id int(11) NOT NULL, - parent_id int(11) DEFAULT NULL, - name varchar(10) DEFAULT NULL, - PRIMARY KEY (id), - KEY ind_parent_id (parent_id) -) ENGINE=TokuDB; - -insert into t1 (id, parent_id, name) values -(10,NULL,'A'), -(20,10,'B'), -(30,10,'C'), -(40,NULL,'D'), -(50,40,'E'), -(60,40,'F'), -(70,NULL,'J'); - -SELECT id FROM t1 WHERE parent_id IS NOT NULL ORDER BY id DESC LIMIT 1; ---echo This must show type=index, extra=Using where -explain SELECT * FROM t1 FORCE INDEX (PRIMARY) WHERE parent_id IS NOT NULL ORDER BY id DESC LIMIT 1; -SELECT * FROM t1 WHERE parent_id IS NOT NULL ORDER BY id DESC LIMIT 1; -drop table t1; - - --- echo # --- echo # BUG#628785: multi_range_read.cc:430: int DsMrr_impl::dsmrr_init(): Assertion `do_sort_keys || do_rowid_fetch' failed --- echo # -set @save_join_cache_level= @@join_cache_level; -set @save_optimizer_switch= @@optimizer_switch; -SET SESSION join_cache_level=9; -SET SESSION optimizer_switch='mrr_sort_keys=off'; - -CREATE TABLE `t1` ( - `pk` int(11) NOT NULL AUTO_INCREMENT, - `col_int_nokey` int(11) DEFAULT NULL, - `col_int_key` int(11) DEFAULT NULL, - `col_varchar_key` varchar(1) DEFAULT NULL, - `col_varchar_nokey` varchar(1) DEFAULT NULL, - PRIMARY KEY (`pk`), - KEY `col_varchar_key` (`col_varchar_key`,`col_int_key`) -) ENGINE=TokuDB AUTO_INCREMENT=101 DEFAULT CHARSET=latin1; -INSERT INTO `t1` VALUES (1,6,NULL,'r','r'); -INSERT INTO `t1` VALUES (2,8,0,'c','c'); -INSERT INTO `t1` VALUES (97,7,0,'z','z'); -INSERT INTO `t1` VALUES (98,1,1,'j','j'); -INSERT INTO `t1` VALUES (99,7,8,'c','c'); -INSERT INTO `t1` VALUES (100,2,5,'f','f'); -SELECT table1 .`col_varchar_key` -FROM t1 table1 STRAIGHT_JOIN ( t1 table3 JOIN t1 table4 ON table4 .`pk` = table3 .`col_int_nokey` ) ON table4 .`col_varchar_nokey` ; -DROP TABLE t1; -set join_cache_level=@save_join_cache_level; -set optimizer_switch=@save_optimizer_switch; - ---echo # ---echo # BUG#623300: Query with join_cache_level = 6 returns extra rows in maria-5.3-dsmrr-cpk ---echo # -CREATE TABLE t1 ( - pk int(11) NOT NULL AUTO_INCREMENT, - col_int_nokey int(11) DEFAULT NULL, - PRIMARY KEY (pk) -) ENGINE=TokuDB; - -INSERT INTO t1 VALUES (10,7); -INSERT INTO t1 VALUES (11,1); -INSERT INTO t1 VALUES (12,5); -INSERT INTO t1 VALUES (13,3); -INSERT INTO t1 VALUES (14,6); -INSERT INTO t1 VALUES (15,92); -INSERT INTO t1 VALUES (16,7); -INSERT INTO t1 VALUES (17,NULL); -INSERT INTO t1 VALUES (18,3); -INSERT INTO t1 VALUES (19,5); -INSERT INTO t1 VALUES (20,1); -INSERT INTO t1 VALUES (21,2); -INSERT INTO t1 VALUES (22,NULL); -INSERT INTO t1 VALUES (23,1); -INSERT INTO t1 VALUES (24,0); -INSERT INTO t1 VALUES (25,210); -INSERT INTO t1 VALUES (26,8); -INSERT INTO t1 VALUES (27,7); -INSERT INTO t1 VALUES (28,5); -INSERT INTO t1 VALUES (29,NULL); - -CREATE TABLE t2 ( - pk int(11) NOT NULL AUTO_INCREMENT, - col_int_nokey int(11) DEFAULT NULL, - PRIMARY KEY (pk) -) ENGINE=TokuDB; -INSERT INTO t2 VALUES (1,NULL); -INSERT INTO t2 VALUES (2,7); -INSERT INTO t2 VALUES (3,9); -INSERT INTO t2 VALUES (4,7); -INSERT INTO t2 VALUES (5,4); -INSERT INTO t2 VALUES (6,2); -INSERT INTO t2 VALUES (7,6); -INSERT INTO t2 VALUES (8,8); -INSERT INTO t2 VALUES (9,NULL); -INSERT INTO t2 VALUES (10,5); -INSERT INTO t2 VALUES (11,NULL); -INSERT INTO t2 VALUES (12,6); -INSERT INTO t2 VALUES (13,188); -INSERT INTO t2 VALUES (14,2); -INSERT INTO t2 VALUES (15,1); -INSERT INTO t2 VALUES (16,1); -INSERT INTO t2 VALUES (17,0); -INSERT INTO t2 VALUES (18,9); -INSERT INTO t2 VALUES (19,NULL); -INSERT INTO t2 VALUES (20,4); - -set @my_save_join_cache_level= @@join_cache_level; -SET join_cache_level = 0; - ---sorted_result -SELECT table2.col_int_nokey -FROM t1 table1 JOIN t2 table2 ON table2.pk = table1.col_int_nokey -WHERE table1.pk ; - -SET join_cache_level = 6; - ---sorted_result -SELECT table2.col_int_nokey -FROM t1 table1 JOIN t2 table2 ON table2.pk = table1.col_int_nokey -WHERE table1.pk ; - -set join_cache_level= @my_save_join_cache_level; -drop table t1, t2; - ---echo # ---echo # BUG#623315: Query returns less rows when run with join_cache_level=6 on maria-5.3-dsmrr-cpk ---echo # -CREATE TABLE t1 ( - pk int(11) NOT NULL AUTO_INCREMENT, - col_int_nokey int(11) DEFAULT NULL, - col_int_key int(11) DEFAULT NULL, - col_varchar_key varchar(1) DEFAULT NULL, - PRIMARY KEY (pk), - KEY col_int_key (col_int_key), - KEY col_varchar_key (col_varchar_key,col_int_key) -) ENGINE=TokuDB; -INSERT INTO t1 VALUES (10,7,8,'v'); -INSERT INTO t1 VALUES (11,1,9,'r'); -INSERT INTO t1 VALUES (12,5,9,'a'); -INSERT INTO t1 VALUES (13,3,186,'m'); -INSERT INTO t1 VALUES (14,6,NULL,'y'); -INSERT INTO t1 VALUES (15,92,2,'j'); -INSERT INTO t1 VALUES (16,7,3,'d'); -INSERT INTO t1 VALUES (17,NULL,0,'z'); -INSERT INTO t1 VALUES (18,3,133,'e'); -INSERT INTO t1 VALUES (19,5,1,'h'); -INSERT INTO t1 VALUES (20,1,8,'b'); -INSERT INTO t1 VALUES (21,2,5,'s'); -INSERT INTO t1 VALUES (22,NULL,5,'e'); -INSERT INTO t1 VALUES (23,1,8,'j'); -INSERT INTO t1 VALUES (24,0,6,'e'); -INSERT INTO t1 VALUES (25,210,51,'f'); -INSERT INTO t1 VALUES (26,8,4,'v'); -INSERT INTO t1 VALUES (27,7,7,'x'); -INSERT INTO t1 VALUES (28,5,6,'m'); -INSERT INTO t1 VALUES (29,NULL,4,'c'); - -set @my_save_join_cache_level= @@join_cache_level; -SET join_cache_level=6; -select count(*) from -(SELECT table2.pk FROM - t1 LEFT JOIN t1 table2 JOIN t1 table3 ON table3.col_varchar_key = table2.col_varchar_key - ON table3.col_int_nokey) foo; - -SET join_cache_level=0; -select count(*) from -(SELECT table2.pk FROM - t1 LEFT JOIN t1 table2 JOIN t1 table3 ON table3.col_varchar_key = table2.col_varchar_key - ON table3.col_int_nokey) foo; - -set join_cache_level= @my_save_join_cache_level; -drop table t1; - - ---echo # ---echo # BUG#671340: Diverging results in with mrr_sort_keys=ON|OFF and join_cache_level=5 ---echo # -CREATE TABLE t1 ( - pk int(11) NOT NULL AUTO_INCREMENT, - col_int_key int(11) NOT NULL, - col_varchar_key varchar(1) NOT NULL, - col_varchar_nokey varchar(1) NOT NULL, - PRIMARY KEY (pk), - KEY col_int_key (col_int_key), - KEY col_varchar_key (col_varchar_key,col_int_key) -) ENGINE=TokuDB; -INSERT INTO t1 VALUES - (10,8,'v','v'), - (11,8,'f','f'), - (12,5,'v','v'), - (13,8,'s','s'), - (14,8,'a','a'), - (15,6,'p','p'), - (16,7,'z','z'), - (17,2,'a','a'), - (18,5,'h','h'), - (19,7,'h','h'), - (20,2,'v','v'), - (21,9,'v','v'), - (22,142,'b','b'), - (23,3,'y','y'), - (24,0,'v','v'), - (25,3,'m','m'), - (26,5,'z','z'), - (27,9,'n','n'), - (28,1,'d','d'), - (29,107,'a','a'); - -CREATE TABLE t2 ( - pk int(11) NOT NULL AUTO_INCREMENT, - col_int_key int(11) NOT NULL, - col_varchar_key varchar(1) NOT NULL, - col_varchar_nokey varchar(1) NOT NULL, - PRIMARY KEY (pk), - KEY col_int_key (col_int_key), - KEY col_varchar_key (col_varchar_key,col_int_key) -) ENGINE=TokuDB; -INSERT INTO t2 VALUES - (1,9,'x','x'), - (2,5,'g','g'), - (3,1,'o','o'), - (4,0,'g','g'), - (5,1,'v','v'), - (6,190,'m','m'), - (7,6,'x','x'), - (8,3,'c','c'), - (9,4,'z','z'), - (10,3,'i','i'), - (11,186,'x','x'), - (12,1,'g','g'), - (13,8,'q','q'), - (14,226,'m','m'), - (15,133,'p','p'), - (16,6,'e','e'), - (17,3,'t','t'), - (18,8,'j','j'), - (19,5,'h','h'), - (20,7,'w','w'); - -SELECT count(*), sum(table1.col_int_key*table2.pk) -FROM - t2 AS table1, t1 AS table2, t2 AS table3 -WHERE - table3.col_varchar_nokey = table2.col_varchar_key AND table3.pk > table2.col_varchar_nokey ; - -set @my_save_join_cache_level= @@join_cache_level; -set @my_save_join_buffer_size= @@join_buffer_size; -set join_cache_level=6; -set join_buffer_size=1536; ---disable_warnings -SELECT count(*), sum(table1.col_int_key*table2.pk) -FROM - t2 AS table1, t1 AS table2, t2 AS table3 -WHERE - table3.col_varchar_nokey = table2.col_varchar_key AND table3.pk > table2.col_varchar_nokey ; ---enable_warnings -drop table t1,t2; -set join_cache_level=@my_save_join_cache_level; -set join_buffer_size=@my_save_join_buffer_size; - - ---echo # ---echo # BUG#665669: Result differences on query re-execution ---echo # -create table t1 (pk int primary key, b int, c int default 0, index idx(b)) engine=Tokudb; -insert into t1(pk,b) values (3, 30), (2, 20), (9, 90), (7, 70), (4, 40), (5, 50), (10, 100), (12, 120); -set @bug665669_tmp=@@optimizer_switch; -set optimizer_switch='mrr=off'; -explain select * from t1 where b > 1000; ---echo # The following two must produce indentical results: -select * from t1 where pk < 2 or pk between 3 and 4; -select * from t1 where pk < 2 or pk between 3 and 4; -drop table t1; -set optimizer_switch = @bug665669_tmp; ---echo # ---echo # Bug#43360 - Server crash with a simple multi-table update ---echo # -CREATE TABLE t1 ( - a CHAR(2) NOT NULL PRIMARY KEY, - b VARCHAR(20) NOT NULL, - KEY (b) -) ENGINE=TokuDB; - -CREATE TABLE t2 ( - a CHAR(2) NOT NULL PRIMARY KEY, - b VARCHAR(20) NOT NULL, - KEY (b) -) ENGINE=TokuDB; - -INSERT INTO t1 VALUES -('AB','MySQLAB'), -('JA','Sun Microsystems'), -('MS','Microsoft'), -('IB','IBM- Inc.'), -('GO','Google Inc.'); - -INSERT INTO t2 VALUES -('AB','Sweden'), -('JA','USA'), -('MS','United States of America'), -('IB','North America'), -('GO','South America'); - -UPDATE t1,t2 SET t1.b=UPPER(t1.b) WHERE t1.b LIKE 'United%'; - -SELECT * FROM t1; - -SELECT * FROM t2; - -DROP TABLE t1,t2; - ---echo # ---echo # Testcase backport: Bug#43249 ---echo # (Note: Fixed by patch for BUG#42580) ---echo # -CREATE TABLE t1(c1 TIME NOT NULL, c2 TIME NULL, c3 DATE, PRIMARY KEY(c1), UNIQUE INDEX(c2)) engine=Tokudb; -INSERT INTO t1 VALUES('8:29:45',NULL,'2009-02-01'); -# first time, good results: -SELECT * FROM t1 WHERE c2 <=> NULL ORDER BY c2 LIMIT 2; -# second time, bad results: -SELECT * FROM t1 WHERE c2 <=> NULL ORDER BY c2 LIMIT 2; -drop table `t1`; - ---echo # ---echo # BUG#707925: Wrong result with join_cache_level=6 optimizer_use_mrr = ---echo # force (incremental, BKA join) ---echo # -set @_save_join_cache_level= @@join_cache_level; -set join_cache_level = 6; -CREATE TABLE t1 ( - f1 int(11), f2 int(11), f3 varchar(1), f4 varchar(1), - PRIMARY KEY (f1), - KEY (f3), - KEY (f2) -) ENGINE=TokuDB; -INSERT INTO t1 VALUES ('11','8','f','f'),('12','5','v','v'),('13','8','s','s'), -('14','8','a','a'),('15','6','p','p'),('16','7','z','z'),('17','2','a','a'), -('18','5','h','h'),('19','7','h','h'),('20','2','v','v'),('21','9','v','v'), -('22','142','b','b'),('23','3','y','y'),('24','0','v','v'),('25','3','m','m'), -('26','5','z','z'),('27','9','n','n'),('28','1','d','d'),('29','107','a','a'); - -select count(*) from ( - SELECT alias1.f2 - FROM - t1 AS alias1 JOIN ( - t1 AS alias2 FORCE KEY (f3) JOIN - t1 AS alias3 FORCE KEY (f2) ON alias3.f2 = alias2.f2 AND alias3.f4 = alias2.f3 - ) ON alias3.f1 <= alias2.f1 -) X; - -set join_cache_level=@_save_join_cache_level; -set optimizer_switch= @innodb_mrr_tmp; -drop table t1; diff --git a/storage/tokudb/mysql-test/tokudb_bugs/t/tokudb_mrr2.test b/storage/tokudb/mysql-test/tokudb_bugs/t/tokudb_mrr2.test deleted file mode 100644 index 3be77674f22..00000000000 --- a/storage/tokudb/mysql-test/tokudb_bugs/t/tokudb_mrr2.test +++ /dev/null @@ -1,213 +0,0 @@ --- source include/have_maria.inc -# -# MRR/Maria tests. -# - ---disable_warnings -drop table if exists t1,t2,t3,t4; ---enable_warnings - -set @maria_mrr_tmp=@@optimizer_switch; -set optimizer_switch='mrr=on,mrr_sort_keys=on,index_condition_pushdown=on'; - -set @mrr_buffer_size_save= @@mrr_buffer_size; - -set @save_storage_engine= @@storage_engine; -set storage_engine=TokuDB; - ---source include/mrr_tests.inc -set storage_engine= @save_storage_engine; - -set @@mrr_buffer_size= @mrr_buffer_size_save; - ---echo # ---echo # Crash in quick_range_seq_next() in maria-5.3-dsmrr-cpk with join_cache_level = {8,1} ---echo # -set @save_join_cache_level= @@join_cache_level; -SET SESSION join_cache_level = 8; -CREATE TABLE `t1` ( - `col_int_key` int(11) DEFAULT NULL, - `col_datetime_key` datetime DEFAULT NULL, - `col_varchar_key` varchar(1) DEFAULT NULL, - `col_varchar_nokey` varchar(1) DEFAULT NULL, - KEY `col_varchar_key` (`col_varchar_key`,`col_int_key`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1; -INSERT INTO `t1` VALUES (6,'2005-10-07 00:00:00','e','e'); -INSERT INTO `t1` VALUES (51,'2000-07-15 05:00:34','f','f'); -CREATE TABLE `t2` ( - `col_int_key` int(11) DEFAULT NULL, - `col_datetime_key` datetime DEFAULT NULL, - `col_varchar_key` varchar(1) DEFAULT NULL, - `col_varchar_nokey` varchar(1) DEFAULT NULL, - KEY `col_varchar_key` (`col_varchar_key`,`col_int_key`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 PAGE_CHECKSUM=1; -INSERT INTO `t2` VALUES (2,'2004-10-11 18:13:16','w','w'); -INSERT INTO `t2` VALUES (2,'1900-01-01 00:00:00','d','d'); -SELECT table2 .`col_datetime_key` -FROM t2 JOIN ( t1 table2 JOIN t2 table3 ON table3 .`col_varchar_key` < table2 .`col_varchar_key` ) ON table3 .`col_varchar_nokey` ; - -drop table t1, t2; -set join_cache_level=@save_join_cache_level; - -# -# Bug #665049: index condition pushdown with Maria -# - -CREATE TABLE t1( - pk int NOT NULL, i int NOT NULL, v varchar(1) NOT NULL, - PRIMARY KEY (pk), INDEX idx (v, i) -) ENGINE=TokuDB; -INSERT INTO t1 VALUES - (1,9,'x'), (2,5,'g'), (3,1,'o'), (4,0,'g'), (5,1,'v'), - (6,190,'m'), (7,6,'x'), (8,3,'c'), (9,4,'z'), (10,3,'i'), - (11,186,'x'), (12,1,'g'), (13,8,'q'), (14,226,'m'), (15,133,'p'); - -CREATE TABLE t2( - pk int NOT NULL, i int NOT NULL, v varchar(1) NOT NULL, - PRIMARY KEY (pk), INDEX idx (v, i) -) ENGINE=TokuDB; -INSERT INTO t2 SELECT * FROM t1; -INSERT INTO t2 VALUES (77, 333, 'z'); - -CREATE TABLE t3( - pk int NOT NULL, i int NOT NULL, v varchar(1) NOT NULL, - PRIMARY KEY (pk), INDEX idx (v, i) -) ENGINE=TokuDB; -INSERT INTO t3 SELECT * FROM t1; -INSERT INTO t3 VALUES - (88, 442, 'y'), (99, 445, 'w'), (87, 442, 'z'), (98, 445, 'v'), (86, 442, 'x'), - (97, 445, 't'), (85, 442, 'b'), (96, 445, 'l'), (84, 442, 'a'), (95, 445, 'k'); - -set @save_join_cache_level=@@join_cache_level; -set join_cache_level=1; - -SELECT COUNT(t1.v) FROM t1, t2 IGNORE INDEX (idx), t3 IGNORE INDEX (idx) - WHERE t3.v = t2.v AND t3.i < t2.i AND t3.pk > 0 AND t2.pk > 0; -EXPLAIN -SELECT COUNT(t1.v) FROM t1, t2 IGNORE INDEX (idx), t3 IGNORE INDEX (idx) - WHERE t3.v = t2.v AND t3.i < t2.i AND t3.pk > 0 AND t2.pk > 0; - -SELECT COUNT(t1.v) FROM t1, t2, t3 - WHERE t3.v = t2.v AND t3.i < t2.i AND t3.pk > 0 AND t2.pk > 0; -EXPLAIN - SELECT COUNT(t1.v) FROM t1, t2, t3 - WHERE t3.v = t2.v AND t3.i < t2.i AND t3.pk > 0 AND t2.pk > 0; - -set join_cache_level=@save_join_cache_level; - -DROP TABLE t1,t2,t3; - ---echo # ---echo # BUG#671361: virtual int Mrr_ordered_index_reader::refill_buffer(): Assertion `!know_key_tuple_params ---echo # (works only on Maria because we need 1024-byte long key) ---echo # - -SET SESSION join_cache_level = 6; -SET SESSION join_buffer_size = 1024; -CREATE TABLE t1 ( - pk int(11) NOT NULL AUTO_INCREMENT, - col_varchar_1024_latin1_key varchar(1024) DEFAULT NULL, - PRIMARY KEY (pk), - KEY col_varchar_1024_latin1_key (col_varchar_1024_latin1_key) -) ENGINE=TokuDB; - -INSERT INTO t1 VALUES - (1,'z'), (2,'abcdefjhjkl'), (3,'in'), (4,'abcdefjhjkl'), (6,'abcdefjhjkl'), - (11,'zx'), (12,'abcdefjhjm'), (13,'jn'), (14,'abcdefjhjp'), (16,'abcdefjhjr'); - -CREATE TABLE t2 ( - col_varchar_10_latin1 varchar(10) DEFAULT NULL -) ENGINE=TokuDB; -INSERT INTO t2 VALUES ('foo'), ('foo'); - -EXPLAIN SELECT count(*) -FROM t1 AS table1, t2 AS table2 -WHERE - table1.col_varchar_1024_latin1_key = table2.col_varchar_10_latin1 AND table1.pk<>0 ; - -SELECT count(*) -FROM t1 AS table1, t2 AS table2 -WHERE - table1.col_varchar_1024_latin1_key = table2.col_varchar_10_latin1 AND table1.pk<>0 ; - -drop table t1, t2; - ---echo # ---echo # BUG#693747: Assertion multi_range_read.cc:908: int DsMrr_impl::dsmrr_init( ---echo # -set @_save_join_cache_level= @@join_cache_level; -set @_save_join_buffer_size= @@join_buffer_size; - -set join_cache_level=8; -set join_buffer_size=10240; - -CREATE TABLE t1 ( - f2 varchar(32) COLLATE latin1_swedish_ci, - f3 int(11), - f4 varchar(1024) COLLATE utf8_bin, - f5 varchar(1024) COLLATE latin1_bin, - KEY (f5) -) ENGINE=TokuDB; - ---echo # Fill the table with some data ---disable_query_log -INSERT IGNORE INTO t1 VALUES -('cueikuirqr','0','f4-data','hcueikuirqrzflno'),('her','0','f4-data','ehcueikuirqrzfln'), -('YKAOE','0','f4-data','qieehcueikuirqrz'),('youre','0','f4-data','nkqieehcueikuirq'), -('b','0','f4-data','the'),('MGUDG','0','f4-data','m'), -('UXAGU','0','f4-data','HZXVA'),('bwbgsnkqie','0','f4-data','something'), -('s','0','f4-data','slelfhjawbwbgsnk'),('the','0','f4-data','if'), -('TDLKE','0','f4-data','MGWNJ'),('do','0','f4-data','see'), -('why','0','f4-data','mean'),('THKCG','0','f4-data','YFLDY'), -('x','0','f4-data','e'),('yncitaeysb','0','f4-data','tgyncitaeysbgucs'), -('ZEOXX','0','f4-data','jawbwbgsnkqieehc'),('hjawbwbgsn','0','f4-data','fhjawbwbgsnkqiee'), -('all','0','f4-data','sbgucsgqslelfhja'),('the','0','f4-data','would'), -('mtgyncitae','0','f4-data','ISNQQ'),('KNCUI','0','f4-data','want'), -('is','0','f4-data','i'),('out','0','f4-data','jvcmjlmtgyncitae'), -('it','0','f4-data','you'),('LHDIH','0','f4-data','txmtxyjvcmjlmtgy'), -('z','0','f4-data','ntxmtxyjvcmjlmtg'),('vyhnmvgmcn','0','f4-data','AIGQK'), -('ytvyhnmvgm','0','f4-data','z'),('t','0','f4-data','on'), -('xqegbytvyh','0','f4-data','ixqegbytvyhnmvgm'),('WGVRU','0','f4-data','h'), -('b','0','f4-data','z'),('who','0','f4-data','gddixqegbytvy'), -('PMLFL','0','f4-data','vgmcntxmtxyjvcmj'),('back','0','f4-data','n'), -('i','0','f4-data','PZGUB'),('f','0','f4-data','the'), -('PNXVP','0','f4-data','v'),('MAKKL','0','f4-data','CGCWF'), -('RMDAV','0','f4-data','v'),('l','0','f4-data','n'), -('rhnoypgddi','0','f4-data','VIZNE'),('t','0','f4-data','a'), -('like','0','f4-data','JSHPZ'),('pskeywslmk','0','f4-data','q'), -('QZZJJ','0','f4-data','c'),('atlxepskey','0','f4-data','YJRMA'), -('YUVOU','0','f4-data','eywslmkdrhnoypgd'),('some','0','f4-data','r'), -('c','0','f4-data','her'),('o','0','f4-data','EMURT'), -('if','0','f4-data','had'),('when','0','f4-data','CLVWT'), -('blfufrcdjm','0','f4-data','IZCZN'),('vutblfufrc','0','f4-data','how'), -('why','0','f4-data','I'),('IXLYQ','0','f4-data','weuwuvutblfufrcd'), -('here','0','f4-data','m'),('ZOCTJ','0','f4-data','IDSFD'), -('kqsweuwuvu','0','f4-data','oh'),('ykqsweuwuv','0','f4-data','zykqsweuwuvutblf'), -('zezykqsweu','0','f4-data','t'),('q','0','f4-data','o'), -('IBKAU','0','f4-data','oh'),('ivjisuzezy','0','f4-data','XHXKE'), -('xsivjisuze','0','f4-data','plxsivjisuzezykq'),('have','0','f4-data','uvplxsivjisuzezy'), -('on','0','f4-data','me'),('ijkfuvplxs','0','f4-data','OGEHV'), -('u','0','f4-data','okay'),('i','0','f4-data','pajzbbojshnijkfu'), -('of','0','f4-data','g'),('for','0','f4-data','Im'), -('or','0','f4-data','ZOJHX'),('n','0','f4-data','you'), -('that','0','f4-data','just'),('bbojshnijk','0','f4-data','JYGSJ'), -('k','0','f4-data','y'),('k','0','f4-data','y'), -('be','0','f4-data','m'),('fnbmxwicrk','0','f4-data','t'), -('yaffpegvav','0','f4-data','have'),('crkdymahya','0','f4-data','QQWQI'), -('t','0','f4-data','hnijkfuvplxsivji'),('dgxpajzbbo','0','f4-data','vavdgxpajzbbojsh'), -('g','0','f4-data','pegvavdgxpajzbbo'),('Im','0','f4-data','ffpegvavdgxpajzb'); ---enable_query_log - - -SELECT alias2.* , alias1.f2 -FROM - t1 AS alias1 - LEFT JOIN t1 AS alias2 ON alias1.f2 = alias2.f5 -WHERE - alias2.f3 < 0; - -set join_cache_level=@_save_join_cache_level; -set join_buffer_size=@_save_join_buffer_size; -set optimizer_switch=@maria_mrr_tmp; - -drop table t1; diff --git a/storage/tokudb/mysql-test/tokudb_bugs/t/xa-3.test b/storage/tokudb/mysql-test/tokudb_bugs/t/xa-3.test index 4bca18ad109..116d5b5d99b 100644 --- a/storage/tokudb/mysql-test/tokudb_bugs/t/xa-3.test +++ b/storage/tokudb/mysql-test/tokudb_bugs/t/xa-3.test @@ -1,6 +1,7 @@ -- source include/have_innodb.inc -- source include/have_tokudb.inc -- source include/have_debug.inc +-- source include/not_embedded.inc --disable_warnings drop table if exists t1, t2; diff --git a/storage/tokudb/mysql-test/tokudb_bugs/t/xa-4.test b/storage/tokudb/mysql-test/tokudb_bugs/t/xa-4.test index 5e4071c7b93..014b371630f 100644 --- a/storage/tokudb/mysql-test/tokudb_bugs/t/xa-4.test +++ b/storage/tokudb/mysql-test/tokudb_bugs/t/xa-4.test @@ -1,6 +1,7 @@ -- source include/have_innodb.inc -- source include/have_tokudb.inc -- source include/have_debug.inc +-- source include/not_embedded.inc --disable_warnings drop table if exists t1, t2; diff --git a/storage/tokudb/mysql-test/tokudb_mariadb/r/autoinc.result b/storage/tokudb/mysql-test/tokudb_mariadb/r/autoinc.result new file mode 100644 index 00000000000..3d424357736 --- /dev/null +++ b/storage/tokudb/mysql-test/tokudb_mariadb/r/autoinc.result @@ -0,0 +1,36 @@ +create table t1 (a int auto_increment, b bigint(20), primary key (b,a)) engine=tokudb; +start transaction; +insert t1 (b) values (1); +set tokudb_lock_timeout=1; +insert t1 (b) values (1); +ERROR HY000: Lock wait timeout exceeded; try restarting transaction +set tokudb_lock_timeout=default; +insert t1 (b) values (1); +insert t1 (b) values (1); +commit; +commit; +select * from t1; +a b +1 1 +2 1 +3 1 +alter table t1 partition by range (b) (partition p0 values less than (9)); +start transaction; +insert t1 (b) values (2); +set tokudb_lock_timeout=1; +insert t1 (b) values (2); +ERROR HY000: Lock wait timeout exceeded; try restarting transaction +set tokudb_lock_timeout=default; +insert t1 (b) values (2); +insert t1 (b) values (2); +commit; +commit; +select * from t1; +a b +1 1 +2 1 +3 1 +1 2 +2 2 +3 2 +drop table t1; diff --git a/storage/tokudb/mysql-test/tokudb_mariadb/r/optimize.result b/storage/tokudb/mysql-test/tokudb_mariadb/r/optimize.result index 5151f2bc895..c32a7d61129 100644 --- a/storage/tokudb/mysql-test/tokudb_mariadb/r/optimize.result +++ b/storage/tokudb/mysql-test/tokudb_mariadb/r/optimize.result @@ -9,6 +9,7 @@ a 3 set debug_sync='now SIGNAL go'; Table Op Msg_type Msg_text +test.t1 optimize note Table does not support optimize, doing recreate + analyze instead test.t1 optimize status OK drop table t1; set debug_sync='reset'; diff --git a/storage/tokudb/mysql-test/tokudb_mariadb/suite.opt b/storage/tokudb/mysql-test/tokudb_mariadb/suite.opt index 8cfa7cacb1f..ea8042b7740 100644 --- a/storage/tokudb/mysql-test/tokudb_mariadb/suite.opt +++ b/storage/tokudb/mysql-test/tokudb_mariadb/suite.opt @@ -1 +1 @@ ---tokudb --plugin-load-add=$HA_TOKUDB_SO +--tokudb --plugin-load-add=$HA_TOKUDB_SO --loose-tokudb-check-jemalloc=0 diff --git a/storage/tokudb/mysql-test/tokudb_mariadb/t/autoinc.test b/storage/tokudb/mysql-test/tokudb_mariadb/t/autoinc.test new file mode 100644 index 00000000000..99fd5333920 --- /dev/null +++ b/storage/tokudb/mysql-test/tokudb_mariadb/t/autoinc.test @@ -0,0 +1,51 @@ +# +# MDEV-6605 Multiple Clients Inserting Causing Error: Failed to read auto-increment value from storage engine +# + +--source include/have_partition.inc +create table t1 (a int auto_increment, b bigint(20), primary key (b,a)) engine=tokudb; + +# first, without partitions +start transaction; +insert t1 (b) values (1); + +--connect(con2,localhost,root) +set tokudb_lock_timeout=1; +# auto-inc value is locked +--error ER_LOCK_WAIT_TIMEOUT +insert t1 (b) values (1); +# but no deadlock +set tokudb_lock_timeout=default; +--send insert t1 (b) values (1) +--connection default +insert t1 (b) values (1); +commit; +--connection con2 +--reap +commit; +select * from t1; + +# now with partitions +--connection default +alter table t1 partition by range (b) (partition p0 values less than (9)); +start transaction; +insert t1 (b) values (2); + +--connection con2 +set tokudb_lock_timeout=1; +# auto-inc value is locked +--error ER_LOCK_WAIT_TIMEOUT +insert t1 (b) values (2); +# but no deadlock +set tokudb_lock_timeout=default; +--send insert t1 (b) values (2) +--connection default +insert t1 (b) values (2); +commit; +--connection con2 +--reap +commit; +select * from t1; + +drop table t1; + diff --git a/storage/tokudb/scripts/atc.ontime/atc_ontime_create.sql b/storage/tokudb/scripts/atc.ontime/atc_ontime_create.sql deleted file mode 100644 index 72148f81d6f..00000000000 --- a/storage/tokudb/scripts/atc.ontime/atc_ontime_create.sql +++ /dev/null @@ -1,95 +0,0 @@ -CREATE TABLE `ontime` ( - `Year` year(4) DEFAULT NULL, - `Quarter` tinyint(4) DEFAULT NULL, - `Month` tinyint(4) DEFAULT NULL, - `DayofMonth` tinyint(4) DEFAULT NULL, - `DayOfWeek` tinyint(4) DEFAULT NULL, - `FlightDate` date DEFAULT NULL, - `UniqueCarrier` char(7) DEFAULT NULL, - `AirlineID` int(11) DEFAULT NULL, - `Carrier` char(2) DEFAULT NULL, - `TailNum` varchar(50) DEFAULT NULL, - `FlightNum` varchar(10) DEFAULT NULL, - `Origin` char(5) DEFAULT NULL, - `OriginCityName` varchar(100) DEFAULT NULL, - `OriginState` char(2) DEFAULT NULL, - `OriginStateFips` varchar(10) DEFAULT NULL, - `OriginStateName` varchar(100) DEFAULT NULL, - `OriginWac` int(11) DEFAULT NULL, - `Dest` char(5) DEFAULT NULL, - `DestCityName` varchar(100) DEFAULT NULL, - `DestState` char(2) DEFAULT NULL, - `DestStateFips` varchar(10) DEFAULT NULL, - `DestStateName` varchar(100) DEFAULT NULL, - `DestWac` int(11) DEFAULT NULL, - `CRSDepTime` int(11) DEFAULT NULL, - `DepTime` int(11) DEFAULT NULL, - `DepDelay` int(11) DEFAULT NULL, - `DepDelayMinutes` int(11) DEFAULT NULL, - `DepDel15` int(11) DEFAULT NULL, - `DepartureDelayGroups` int(11) DEFAULT NULL, - `DepTimeBlk` varchar(20) DEFAULT NULL, - `TaxiOut` int(11) DEFAULT NULL, - `WheelsOff` int(11) DEFAULT NULL, - `WheelsOn` int(11) DEFAULT NULL, - `TaxiIn` int(11) DEFAULT NULL, - `CRSArrTime` int(11) DEFAULT NULL, - `ArrTime` int(11) DEFAULT NULL, - `ArrDelay` int(11) DEFAULT NULL, - `ArrDelayMinutes` int(11) DEFAULT NULL, - `ArrDel15` int(11) DEFAULT NULL, - `ArrivalDelayGroups` int(11) DEFAULT NULL, - `ArrTimeBlk` varchar(20) DEFAULT NULL, - `Cancelled` tinyint(4) DEFAULT NULL, - `CancellationCode` char(1) DEFAULT NULL, - `Diverted` tinyint(4) DEFAULT NULL, - `CRSElapsedTime` INT(11) DEFAULT NULL, - `ActualElapsedTime` INT(11) DEFAULT NULL, - `AirTime` INT(11) DEFAULT NULL, - `Flights` INT(11) DEFAULT NULL, - `Distance` INT(11) DEFAULT NULL, - `DistanceGroup` TINYINT(4) DEFAULT NULL, - `CarrierDelay` INT(11) DEFAULT NULL, - `WeatherDelay` INT(11) DEFAULT NULL, - `NASDelay` INT(11) DEFAULT NULL, - `SecurityDelay` INT(11) DEFAULT NULL, - `LateAircraftDelay` INT(11) DEFAULT NULL, - `FirstDepTime` varchar(10) DEFAULT NULL, - `TotalAddGTime` varchar(10) DEFAULT NULL, - `LongestAddGTime` varchar(10) DEFAULT NULL, - `DivAirportLandings` varchar(10) DEFAULT NULL, - `DivReachedDest` varchar(10) DEFAULT NULL, - `DivActualElapsedTime` varchar(10) DEFAULT NULL, - `DivArrDelay` varchar(10) DEFAULT NULL, - `DivDistance` varchar(10) DEFAULT NULL, - `Div1Airport` varchar(10) DEFAULT NULL, - `Div1WheelsOn` varchar(10) DEFAULT NULL, - `Div1TotalGTime` varchar(10) DEFAULT NULL, - `Div1LongestGTime` varchar(10) DEFAULT NULL, - `Div1WheelsOff` varchar(10) DEFAULT NULL, - `Div1TailNum` varchar(10) DEFAULT NULL, - `Div2Airport` varchar(10) DEFAULT NULL, - `Div2WheelsOn` varchar(10) DEFAULT NULL, - `Div2TotalGTime` varchar(10) DEFAULT NULL, - `Div2LongestGTime` varchar(10) DEFAULT NULL, - `Div2WheelsOff` varchar(10) DEFAULT NULL, - `Div2TailNum` varchar(10) DEFAULT NULL, - `Div3Airport` varchar(10) DEFAULT NULL, - `Div3WheelsOn` varchar(10) DEFAULT NULL, - `Div3TotalGTime` varchar(10) DEFAULT NULL, - `Div3LongestGTime` varchar(10) DEFAULT NULL, - `Div3WheelsOff` varchar(10) DEFAULT NULL, - `Div3TailNum` varchar(10) DEFAULT NULL, - `Div4Airport` varchar(10) DEFAULT NULL, - `Div4WheelsOn` varchar(10) DEFAULT NULL, - `Div4TotalGTime` varchar(10) DEFAULT NULL, - `Div4LongestGTime` varchar(10) DEFAULT NULL, - `Div4WheelsOff` varchar(10) DEFAULT NULL, - `Div4TailNum` varchar(10) DEFAULT NULL, - `Div5Airport` varchar(10) DEFAULT NULL, - `Div5WheelsOn` varchar(10) DEFAULT NULL, - `Div5TotalGTime` varchar(10) DEFAULT NULL, - `Div5LongestGTime` varchar(10) DEFAULT NULL, - `Div5WheelsOff` varchar(10) DEFAULT NULL, - `Div5TailNum` varchar(10) DEFAULT NULL -) ENGINE=TOKUDB;
\ No newline at end of file diff --git a/storage/tokudb/scripts/atc.ontime/atc_ontime_create_covered.sql b/storage/tokudb/scripts/atc.ontime/atc_ontime_create_covered.sql deleted file mode 100644 index 4ea091409c5..00000000000 --- a/storage/tokudb/scripts/atc.ontime/atc_ontime_create_covered.sql +++ /dev/null @@ -1,103 +0,0 @@ -CREATE TABLE `ontime` ( - `Year` year(4) DEFAULT NULL, - `Quarter` tinyint(4) DEFAULT NULL, - `Month` tinyint(4) DEFAULT NULL, - `DayofMonth` tinyint(4) DEFAULT NULL, - `DayOfWeek` tinyint(4) DEFAULT NULL, - `FlightDate` date DEFAULT NULL, - `UniqueCarrier` char(7) DEFAULT NULL, - `AirlineID` int(11) DEFAULT NULL, - `Carrier` char(2) DEFAULT NULL, - `TailNum` varchar(50) DEFAULT NULL, - `FlightNum` varchar(10) DEFAULT NULL, - `Origin` char(5) DEFAULT NULL, - `OriginCityName` varchar(100) DEFAULT NULL, - `OriginState` char(2) DEFAULT NULL, - `OriginStateFips` varchar(10) DEFAULT NULL, - `OriginStateName` varchar(100) DEFAULT NULL, - `OriginWac` int(11) DEFAULT NULL, - `Dest` char(5) DEFAULT NULL, - `DestCityName` varchar(100) DEFAULT NULL, - `DestState` char(2) DEFAULT NULL, - `DestStateFips` varchar(10) DEFAULT NULL, - `DestStateName` varchar(100) DEFAULT NULL, - `DestWac` int(11) DEFAULT NULL, - `CRSDepTime` int(11) DEFAULT NULL, - `DepTime` int(11) DEFAULT NULL, - `DepDelay` int(11) DEFAULT NULL, - `DepDelayMinutes` int(11) DEFAULT NULL, - `DepDel15` int(11) DEFAULT NULL, - `DepartureDelayGroups` int(11) DEFAULT NULL, - `DepTimeBlk` varchar(20) DEFAULT NULL, - `TaxiOut` int(11) DEFAULT NULL, - `WheelsOff` int(11) DEFAULT NULL, - `WheelsOn` int(11) DEFAULT NULL, - `TaxiIn` int(11) DEFAULT NULL, - `CRSArrTime` int(11) DEFAULT NULL, - `ArrTime` int(11) DEFAULT NULL, - `ArrDelay` int(11) DEFAULT NULL, - `ArrDelayMinutes` int(11) DEFAULT NULL, - `ArrDel15` int(11) DEFAULT NULL, - `ArrivalDelayGroups` int(11) DEFAULT NULL, - `ArrTimeBlk` varchar(20) DEFAULT NULL, - `Cancelled` tinyint(4) DEFAULT NULL, - `CancellationCode` char(1) DEFAULT NULL, - `Diverted` tinyint(4) DEFAULT NULL, - `CRSElapsedTime` INT(11) DEFAULT NULL, - `ActualElapsedTime` INT(11) DEFAULT NULL, - `AirTime` INT(11) DEFAULT NULL, - `Flights` INT(11) DEFAULT NULL, - `Distance` INT(11) DEFAULT NULL, - `DistanceGroup` TINYINT(4) DEFAULT NULL, - `CarrierDelay` INT(11) DEFAULT NULL, - `WeatherDelay` INT(11) DEFAULT NULL, - `NASDelay` INT(11) DEFAULT NULL, - `SecurityDelay` INT(11) DEFAULT NULL, - `LateAircraftDelay` INT(11) DEFAULT NULL, - `FirstDepTime` varchar(10) DEFAULT NULL, - `TotalAddGTime` varchar(10) DEFAULT NULL, - `LongestAddGTime` varchar(10) DEFAULT NULL, - `DivAirportLandings` varchar(10) DEFAULT NULL, - `DivReachedDest` varchar(10) DEFAULT NULL, - `DivActualElapsedTime` varchar(10) DEFAULT NULL, - `DivArrDelay` varchar(10) DEFAULT NULL, - `DivDistance` varchar(10) DEFAULT NULL, - `Div1Airport` varchar(10) DEFAULT NULL, - `Div1WheelsOn` varchar(10) DEFAULT NULL, - `Div1TotalGTime` varchar(10) DEFAULT NULL, - `Div1LongestGTime` varchar(10) DEFAULT NULL, - `Div1WheelsOff` varchar(10) DEFAULT NULL, - `Div1TailNum` varchar(10) DEFAULT NULL, - `Div2Airport` varchar(10) DEFAULT NULL, - `Div2WheelsOn` varchar(10) DEFAULT NULL, - `Div2TotalGTime` varchar(10) DEFAULT NULL, - `Div2LongestGTime` varchar(10) DEFAULT NULL, - `Div2WheelsOff` varchar(10) DEFAULT NULL, - `Div2TailNum` varchar(10) DEFAULT NULL, - `Div3Airport` varchar(10) DEFAULT NULL, - `Div3WheelsOn` varchar(10) DEFAULT NULL, - `Div3TotalGTime` varchar(10) DEFAULT NULL, - `Div3LongestGTime` varchar(10) DEFAULT NULL, - `Div3WheelsOff` varchar(10) DEFAULT NULL, - `Div3TailNum` varchar(10) DEFAULT NULL, - `Div4Airport` varchar(10) DEFAULT NULL, - `Div4WheelsOn` varchar(10) DEFAULT NULL, - `Div4TotalGTime` varchar(10) DEFAULT NULL, - `Div4LongestGTime` varchar(10) DEFAULT NULL, - `Div4WheelsOff` varchar(10) DEFAULT NULL, - `Div4TailNum` varchar(10) DEFAULT NULL, - `Div5Airport` varchar(10) DEFAULT NULL, - `Div5WheelsOn` varchar(10) DEFAULT NULL, - `Div5TotalGTime` varchar(10) DEFAULT NULL, - `Div5LongestGTime` varchar(10) DEFAULT NULL, - `Div5WheelsOff` varchar(10) DEFAULT NULL, - `Div5TailNum` varchar(10) DEFAULT NULL, - KEY `Year` (`Year`,`Month`), - KEY `Year_2` (`Year`,`DayOfWeek`), - KEY `Year_3` (`Year`,`DepDelay`,`DayOfWeek`), - KEY `DayOfWeek` (`DayOfWeek`,`Year`,`DepDelay`), - KEY `Year_4` (`Year`,`DepDelay`,`Origin`,`Carrier`), - KEY `DepDelay` (`DepDelay`,`Year`), - KEY `Year_5` (`Year`,`DestCityName`,`OriginCityName`), - KEY `DestCityName` (`DestCityName`,`OriginCityName`,`Year`) -) ENGINE=TOKUDB;
\ No newline at end of file diff --git a/storage/tokudb/scripts/atc.ontime/nodistinct.q8.sql b/storage/tokudb/scripts/atc.ontime/nodistinct.q8.sql deleted file mode 100644 index 547d6fa08e0..00000000000 --- a/storage/tokudb/scripts/atc.ontime/nodistinct.q8.sql +++ /dev/null @@ -1,2 +0,0 @@ -# Q8: As final I tested most popular destination in sense count of direct connected cities for different diapason of years. -SELECT DestCityName, COUNT( OriginCityName) FROM ontime WHERE Year BETWEEN 2006 and 2007 GROUP BY DestCityName ORDER BY 2 DESC LIMIT 10; diff --git a/storage/tokudb/scripts/atc.ontime/q0.result b/storage/tokudb/scripts/atc.ontime/q0.result deleted file mode 100644 index 457e3b525de..00000000000 --- a/storage/tokudb/scripts/atc.ontime/q0.result +++ /dev/null @@ -1,2 +0,0 @@ -avg(c1) -485021.3730 diff --git a/storage/tokudb/scripts/atc.ontime/q0.sql b/storage/tokudb/scripts/atc.ontime/q0.sql deleted file mode 100644 index 258c88927b2..00000000000 --- a/storage/tokudb/scripts/atc.ontime/q0.sql +++ /dev/null @@ -1 +0,0 @@ -select avg(c1) from (select year,month,count(*) as c1 from ontime group by YEAR,month) t; diff --git a/storage/tokudb/scripts/atc.ontime/q1.result b/storage/tokudb/scripts/atc.ontime/q1.result deleted file mode 100644 index e1f9df4d004..00000000000 --- a/storage/tokudb/scripts/atc.ontime/q1.result +++ /dev/null @@ -1,8 +0,0 @@ -DayOfWeek c -5 8732424 -1 8730614 -4 8710843 -3 8685626 -2 8639632 -7 8274367 -6 7514194 diff --git a/storage/tokudb/scripts/atc.ontime/q1.sql b/storage/tokudb/scripts/atc.ontime/q1.sql deleted file mode 100644 index e155da8fc3d..00000000000 --- a/storage/tokudb/scripts/atc.ontime/q1.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT DayOfWeek, count(*) AS c FROM ontime WHERE Year BETWEEN 2000 AND 2008 GROUP BY DayOfWeek ORDER BY c DESC; diff --git a/storage/tokudb/scripts/atc.ontime/q2.result b/storage/tokudb/scripts/atc.ontime/q2.result deleted file mode 100644 index 0f88b842f45..00000000000 --- a/storage/tokudb/scripts/atc.ontime/q2.result +++ /dev/null @@ -1,8 +0,0 @@ -DayOfWeek c -5 2088300 -4 1918325 -1 1795120 -7 1782292 -3 1640798 -2 1538291 -6 1391984 diff --git a/storage/tokudb/scripts/atc.ontime/q2.sql b/storage/tokudb/scripts/atc.ontime/q2.sql deleted file mode 100644 index 9d31ecd0ec5..00000000000 --- a/storage/tokudb/scripts/atc.ontime/q2.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT DayOfWeek, count(*) AS c FROM ontime WHERE DepDelay>10 AND Year BETWEEN 2000 AND 2008 GROUP BY DayOfWeek ORDER BY c DESC; diff --git a/storage/tokudb/scripts/atc.ontime/q3.result b/storage/tokudb/scripts/atc.ontime/q3.result deleted file mode 100644 index ecacefe261e..00000000000 --- a/storage/tokudb/scripts/atc.ontime/q3.result +++ /dev/null @@ -1,11 +0,0 @@ -Origin c -ORD 846692 -ATL 822955 -DFW 601318 -LAX 391247 -PHX 391191 -LAS 351713 -DEN 345108 -EWR 292916 -DTW 289233 -IAH 283861 diff --git a/storage/tokudb/scripts/atc.ontime/q3.sql b/storage/tokudb/scripts/atc.ontime/q3.sql deleted file mode 100644 index 9daa49cda68..00000000000 --- a/storage/tokudb/scripts/atc.ontime/q3.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT Origin, count(*) AS c FROM ontime WHERE DepDelay>10 AND Year BETWEEN 2000 AND 2008 GROUP BY Origin ORDER BY c DESC LIMIT 10; diff --git a/storage/tokudb/scripts/atc.ontime/q4.result b/storage/tokudb/scripts/atc.ontime/q4.result deleted file mode 100644 index 1de2cf2c5d2..00000000000 --- a/storage/tokudb/scripts/atc.ontime/q4.result +++ /dev/null @@ -1,21 +0,0 @@ -carrier count(*) -WN 296293 -AA 176203 -MQ 145630 -US 135987 -UA 128174 -OO 127426 -EV 101796 -XE 99915 -DL 93675 -NW 90429 -CO 76662 -YV 67905 -FL 59460 -OH 59034 -B6 50740 -9E 46948 -AS 42830 -F9 23035 -AQ 4299 -HA 2746 diff --git a/storage/tokudb/scripts/atc.ontime/q4.sql b/storage/tokudb/scripts/atc.ontime/q4.sql deleted file mode 100644 index 20447cd17ce..00000000000 --- a/storage/tokudb/scripts/atc.ontime/q4.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT carrier, count(*) FROM ontime WHERE DepDelay>10 AND Year=2007 GROUP BY carrier ORDER BY 2 DESC; diff --git a/storage/tokudb/scripts/atc.ontime/q5.result b/storage/tokudb/scripts/atc.ontime/q5.result deleted file mode 100644 index a790800d53f..00000000000 --- a/storage/tokudb/scripts/atc.ontime/q5.result +++ /dev/null @@ -1,21 +0,0 @@ -carrier c c2 c3 -EV 101796 286234 355.6391 -US 135987 485447 280.1274 -AA 176203 633857 277.9854 -MQ 145630 540494 269.4387 -AS 42830 160185 267.3783 -B6 50740 191450 265.0300 -UA 128174 490002 261.5785 -WN 296293 1168871 253.4865 -OH 59034 236032 250.1102 -CO 76662 323151 237.2327 -F9 23035 97760 235.6281 -YV 67905 294362 230.6853 -XE 99915 434773 229.8096 -FL 59460 263159 225.9471 -NW 90429 414526 218.1504 -OO 127426 597880 213.1297 -DL 93675 475889 196.8421 -9E 46948 258851 181.3707 -AQ 4299 46360 92.7308 -HA 2746 56175 48.8830 diff --git a/storage/tokudb/scripts/atc.ontime/q5.sql b/storage/tokudb/scripts/atc.ontime/q5.sql deleted file mode 100644 index 59e5c8b95db..00000000000 --- a/storage/tokudb/scripts/atc.ontime/q5.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT t.carrier, c, c2, c*1000/c2 as c3 FROM (SELECT carrier, count(*) AS c FROM ontime WHERE DepDelay>10 AND Year=2007 GROUP BY carrier) t JOIN (SELECT carrier, count(*) AS c2 FROM ontime WHERE Year=2007 GROUP BY carrier) t2 ON (t.Carrier=t2.Carrier) ORDER BY c3 DESC; diff --git a/storage/tokudb/scripts/atc.ontime/q6.result b/storage/tokudb/scripts/atc.ontime/q6.result deleted file mode 100644 index 85a1db42079..00000000000 --- a/storage/tokudb/scripts/atc.ontime/q6.result +++ /dev/null @@ -1,21 +0,0 @@ -carrier c c2 c3 -UA 1096646 490002 2238.0439 -AS 354145 160185 2210.8500 -DL 1050448 475889 2207.3383 -AA 1276555 633857 2013.9479 -US 909154 485447 1872.8182 -WN 2165483 1168871 1852.6279 -NW 725076 414526 1749.1689 -MQ 876799 540494 1622.2178 -CO 522219 323151 1616.0216 -EV 461050 286234 1610.7451 -OH 301681 236032 1278.1360 -FL 298916 263159 1135.8760 -B6 197249 191450 1030.2899 -OO 556247 597880 930.3656 -F9 72150 97760 738.0319 -YV 198787 294362 675.3147 -XE 233488 434773 537.0343 -AQ 17239 46360 371.8507 -9E 89391 258851 345.3377 -HA 15968 56175 284.2546 diff --git a/storage/tokudb/scripts/atc.ontime/q6.sql b/storage/tokudb/scripts/atc.ontime/q6.sql deleted file mode 100644 index 9ac157c9ac0..00000000000 --- a/storage/tokudb/scripts/atc.ontime/q6.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT t.carrier, c, c2, c*1000/c2 as c3 FROM (SELECT carrier, count(*) AS c FROM ontime WHERE DepDelay>10 AND Year BETWEEN 2000 and 2008 GROUP BY carrier) t JOIN (SELECT carrier, count(*) AS c2 FROM ontime WHERE Year=2007 GROUP BY carrier) t2 ON (t.Carrier=t2.Carrier) ORDER BY c3 DESC; diff --git a/storage/tokudb/scripts/atc.ontime/q7.result b/storage/tokudb/scripts/atc.ontime/q7.result deleted file mode 100644 index ffc0236300d..00000000000 --- a/storage/tokudb/scripts/atc.ontime/q7.result +++ /dev/null @@ -1,22 +0,0 @@ -Year c1/c2 -1988 166.1709 -1989 199.5009 -1990 166.4513 -1991 147.2163 -1992 146.7543 -1993 154.2498 -1994 165.6803 -1995 193.9344 -1996 221.8281 -1997 191.6513 -1998 193.5638 -1999 200.8742 -2000 231.7167 -2001 189.0581 -2002 162.3769 -2003 150.2455 -2004 192.4838 -2005 207.5929 -2006 231.5599 -2007 245.3487 -2008 219.9228 diff --git a/storage/tokudb/scripts/atc.ontime/q7.sql b/storage/tokudb/scripts/atc.ontime/q7.sql deleted file mode 100644 index d0313c06dee..00000000000 --- a/storage/tokudb/scripts/atc.ontime/q7.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT t.Year, c1/c2 FROM (select Year,count(*)*1000 as c1 from ontime WHERE DepDelay>10 GROUP BY Year) t JOIN (select Year,count(*) as c2 from ontime GROUP BY Year) t2 ON (t.Year=t2.Year); diff --git a/storage/tokudb/scripts/atc.ontime/q8.10y.destcityname.result b/storage/tokudb/scripts/atc.ontime/q8.10y.destcityname.result deleted file mode 100644 index e98b44c790b..00000000000 --- a/storage/tokudb/scripts/atc.ontime/q8.10y.destcityname.result +++ /dev/null @@ -1,11 +0,0 @@ -DestCityName COUNT( DISTINCT OriginCityName) -Atlanta, GA 190 -Chicago, IL 159 -Dallas/Ft.Worth, TX 151 -Cincinnati, OH 139 -Minneapolis, MN 131 -Houston, TX 127 -Detroit, MI 121 -Denver, CO 120 -Salt Lake City, UT 116 -New York, NY 111 diff --git a/storage/tokudb/scripts/atc.ontime/q8.10y.destcityname.sql b/storage/tokudb/scripts/atc.ontime/q8.10y.destcityname.sql deleted file mode 100644 index 6ae5b91a54c..00000000000 --- a/storage/tokudb/scripts/atc.ontime/q8.10y.destcityname.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT DestCityName, COUNT( DISTINCT OriginCityName) FROM ontime FORCE INDEX(DestCityName) WHERE Year BETWEEN 1999 and 2009 GROUP BY DestCityName ORDER BY 2 DESC LIMIT 10; diff --git a/storage/tokudb/scripts/atc.ontime/q8.10y.result b/storage/tokudb/scripts/atc.ontime/q8.10y.result deleted file mode 100644 index e98b44c790b..00000000000 --- a/storage/tokudb/scripts/atc.ontime/q8.10y.result +++ /dev/null @@ -1,11 +0,0 @@ -DestCityName COUNT( DISTINCT OriginCityName) -Atlanta, GA 190 -Chicago, IL 159 -Dallas/Ft.Worth, TX 151 -Cincinnati, OH 139 -Minneapolis, MN 131 -Houston, TX 127 -Detroit, MI 121 -Denver, CO 120 -Salt Lake City, UT 116 -New York, NY 111 diff --git a/storage/tokudb/scripts/atc.ontime/q8.10y.sql b/storage/tokudb/scripts/atc.ontime/q8.10y.sql deleted file mode 100644 index f70b4f6f220..00000000000 --- a/storage/tokudb/scripts/atc.ontime/q8.10y.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT DestCityName, COUNT( DISTINCT OriginCityName) FROM ontime WHERE Year BETWEEN 1999 and 2009 GROUP BY DestCityName ORDER BY 2 DESC LIMIT 10; diff --git a/storage/tokudb/scripts/atc.ontime/q8.1y.sql b/storage/tokudb/scripts/atc.ontime/q8.1y.sql deleted file mode 100644 index 40b87b644b1..00000000000 --- a/storage/tokudb/scripts/atc.ontime/q8.1y.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT DestCityName, COUNT( DISTINCT OriginCityName) FROM ontime WHERE Year BETWEEN 1999 and 1999 GROUP BY DestCityName ORDER BY 2 DESC LIMIT 10; diff --git a/storage/tokudb/scripts/atc.ontime/q8.1y.year5.sql b/storage/tokudb/scripts/atc.ontime/q8.1y.year5.sql deleted file mode 100644 index ef56cc3786f..00000000000 --- a/storage/tokudb/scripts/atc.ontime/q8.1y.year5.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT DestCityName, COUNT( DISTINCT OriginCityName) FROM ontime USE INDEX(year_5) WHERE Year BETWEEN 1999 and 1999 GROUP BY DestCityName ORDER BY 2 DESC LIMIT 10; diff --git a/storage/tokudb/scripts/atc.ontime/q8.2y.sql b/storage/tokudb/scripts/atc.ontime/q8.2y.sql deleted file mode 100644 index 7ab2d10080f..00000000000 --- a/storage/tokudb/scripts/atc.ontime/q8.2y.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT DestCityName, COUNT( DISTINCT OriginCityName) FROM ontime WHERE Year BETWEEN 1999 and 2000 GROUP BY DestCityName ORDER BY 2 DESC LIMIT 10; diff --git a/storage/tokudb/scripts/atc.ontime/q8.3y.sql b/storage/tokudb/scripts/atc.ontime/q8.3y.sql deleted file mode 100644 index 340dc1aea4e..00000000000 --- a/storage/tokudb/scripts/atc.ontime/q8.3y.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT DestCityName, COUNT( DISTINCT OriginCityName) FROM ontime WHERE Year BETWEEN 1999 and 2001 GROUP BY DestCityName ORDER BY 2 DESC LIMIT 10; diff --git a/storage/tokudb/scripts/atc.ontime/q8.4y.sql b/storage/tokudb/scripts/atc.ontime/q8.4y.sql deleted file mode 100644 index c271654ad8e..00000000000 --- a/storage/tokudb/scripts/atc.ontime/q8.4y.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT DestCityName, COUNT( DISTINCT OriginCityName) FROM ontime WHERE Year BETWEEN 1999 and 2002 GROUP BY DestCityName ORDER BY 2 DESC LIMIT 10; diff --git a/storage/tokudb/scripts/atc.ontime/q8.result b/storage/tokudb/scripts/atc.ontime/q8.result deleted file mode 100644 index 35ed3e3f2c2..00000000000 --- a/storage/tokudb/scripts/atc.ontime/q8.result +++ /dev/null @@ -1,11 +0,0 @@ -DestCityName COUNT( DISTINCT OriginCityName) -Atlanta, GA 183 -Chicago, IL 147 -Dallas/Ft.Worth, TX 133 -Cincinnati, OH 129 -Minneapolis, MN 128 -Houston, TX 114 -Detroit, MI 112 -Denver, CO 111 -Salt Lake City, UT 108 -New York, NY 101 diff --git a/storage/tokudb/scripts/atc.ontime/q8.sql b/storage/tokudb/scripts/atc.ontime/q8.sql deleted file mode 100644 index bd5312a4b76..00000000000 --- a/storage/tokudb/scripts/atc.ontime/q8.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT DestCityName, COUNT( DISTINCT OriginCityName) FROM ontime WHERE Year BETWEEN 2006 and 2007 GROUP BY DestCityName ORDER BY 2 DESC LIMIT 10; diff --git a/storage/tokudb/scripts/atc.ontime/q8.year5.sql b/storage/tokudb/scripts/atc.ontime/q8.year5.sql deleted file mode 100644 index f6944cfccb9..00000000000 --- a/storage/tokudb/scripts/atc.ontime/q8.year5.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT DestCityName, COUNT( DISTINCT OriginCityName) FROM ontime use index(year_5) WHERE Year BETWEEN 2006 and 2007 GROUP BY DestCityName ORDER BY 2 DESC LIMIT 10; diff --git a/storage/tokudb/scripts/atc.ontime/q9.result b/storage/tokudb/scripts/atc.ontime/q9.result deleted file mode 100644 index 4790afb414b..00000000000 --- a/storage/tokudb/scripts/atc.ontime/q9.result +++ /dev/null @@ -1,22 +0,0 @@ -year c1 -1988 5202096 -1989 5041200 -1990 5270893 -1991 5076925 -1992 5092157 -1993 5070501 -1994 5180048 -1995 5327435 -1996 5351983 -1997 5411843 -1998 5384721 -1999 5527884 -2000 5683047 -2001 5967780 -2002 5271359 -2003 6488540 -2004 7129270 -2005 7140596 -2006 7141922 -2007 7455458 -2008 7009728 diff --git a/storage/tokudb/scripts/atc.ontime/q9.sql b/storage/tokudb/scripts/atc.ontime/q9.sql deleted file mode 100644 index 13ac1150449..00000000000 --- a/storage/tokudb/scripts/atc.ontime/q9.sql +++ /dev/null @@ -1 +0,0 @@ -select year,count(*) as c1 from ontime group by YEAR; diff --git a/storage/tokudb/scripts/atc.ontime/qcount.main.sql b/storage/tokudb/scripts/atc.ontime/qcount.main.sql deleted file mode 100644 index 0d27b611f44..00000000000 --- a/storage/tokudb/scripts/atc.ontime/qcount.main.sql +++ /dev/null @@ -1,2 +0,0 @@ -select count(*) from ontime use index(); - diff --git a/storage/tokudb/scripts/atc.ontime/qcount.result b/storage/tokudb/scripts/atc.ontime/qcount.result deleted file mode 100644 index 59130c40662..00000000000 --- a/storage/tokudb/scripts/atc.ontime/qcount.result +++ /dev/null @@ -1,2 +0,0 @@ -count(*) -122225386 diff --git a/storage/tokudb/scripts/atc.ontime/qcount.sql b/storage/tokudb/scripts/atc.ontime/qcount.sql deleted file mode 100644 index b3428c5781b..00000000000 --- a/storage/tokudb/scripts/atc.ontime/qcount.sql +++ /dev/null @@ -1 +0,0 @@ -select count(*) from ontime; diff --git a/storage/tokudb/scripts/atc.readme b/storage/tokudb/scripts/atc.readme deleted file mode 100644 index 856dc532b13..00000000000 --- a/storage/tokudb/scripts/atc.readme +++ /dev/null @@ -1,19 +0,0 @@ -The script to run the load the air traffic ontime database and run queries against it -is called run.atc.ontime.bas. - -The queries are in the tokudb-engine/scripts/atc.ontime directory. - -The data for the ontime database is in the amazon s3 bucket called tokutek-mysql-data. - -$ s3ls -l tokutek-mysql-data --prefix=atc_On_Time_Performance -2010-06-15T13:07:09.000Z 1073741824 atc_On_Time_Performance.mysql.csv.gz.aa -2010-06-15T13:08:19.000Z 1073741824 atc_On_Time_Performance.mysql.csv.gz.ab -2010-06-15T13:09:38.000Z 1073741824 atc_On_Time_Performance.mysql.csv.gz.ac -2010-06-15T13:10:54.000Z 446709742 atc_On_Time_Performance.mysql.csv.gz.ad -2010-06-15T13:11:26.000Z 503 atc_On_Time_Performance.mysql.csv.gz.xml - -The raw data is also stored in the amazon s3 bucket called tokutek-mysql-data. - -$ s3ls -l tokutek-mysql-data --prefix=atc - - diff --git a/storage/tokudb/scripts/make.mysql.bash b/storage/tokudb/scripts/make.mysql.bash index a0e5db48a47..1bf258c5c3b 100755 --- a/storage/tokudb/scripts/make.mysql.bash +++ b/storage/tokudb/scripts/make.mysql.bash @@ -52,7 +52,7 @@ cmake_build_type=RelWithDebInfo mysql_tree= tokudbengine_tree= ftindex_tree= -jemalloc_version=3.3.0 +jemalloc_version=3.6.0 jemalloc_tree= backup_tree= diff --git a/storage/tokudb/scripts/make.mysql.debug.env.bash b/storage/tokudb/scripts/make.mysql.debug.env.bash index b2bfaef1b71..b7c270cfbd7 100755 --- a/storage/tokudb/scripts/make.mysql.debug.env.bash +++ b/storage/tokudb/scripts/make.mysql.debug.env.bash @@ -57,7 +57,7 @@ git_tag= mysql=mysql-5.5 mysql_tree=mysql-5.5.35 jemalloc=jemalloc -jemalloc_tree=3.3.1 +jemalloc_tree=3.6.0 tokudbengine=tokudb-engine tokudbengine_tree=master ftindex=ft-index diff --git a/storage/tokudb/scripts/nightly.mysql.build.and.test.bash b/storage/tokudb/scripts/nightly.mysql.build.and.test.bash deleted file mode 100755 index b9d1b6aca7f..00000000000 --- a/storage/tokudb/scripts/nightly.mysql.build.and.test.bash +++ /dev/null @@ -1,58 +0,0 @@ -#!/usr/bin/env bash - -function usage() { - echo "run nightly mysql and fractal tree regressions" - echo "uses gearman to schedule jobs onto test machines" -} - -# generate a script that makes a mysql release and run tests on it -function make_and_test_mysql() { - echo $(date) $* >>$nightlytrace 2>&1 - echo "bash -x \$HOME/github/tokudb-engine/scripts/tokutek.make.mysql.bash $* >>$mysqltrace 2>&1; \ - buildexitcode=\$?; \ - echo \$(date) \$HOME/github/tokudb-engine/scripts/tokutek.make.mysql.bash -$* \$buildexitcode >>$mysqltrace; \ - if [ \$buildexitcode -eq 0 ] ; then \$HOME/bin/test.mysql.bash $* >>/tmp/mysql.test.trace 2>&1; fi" \ - | $gearmandir/bin/gearman -b -f mysql-build-$system-$arch -h $gearmandhost -p 4730 >>$nightlytrace 2>&1 -} - -# make a mysql release -function make_mysql() { - echo $(date) $* >>$nightlytrace 2>&1 - echo "\$HOME/github/tokudb-engine/scripts/tokutek.make.mysql.bash $* >>$mysqltrace 2>&1" | $gearmandir/bin/gearman -b -f mysql-build-$system-$arch -h $gearmandhost -p 4730 >>$nightlytrace 2>&1 -} - -# setup the PATH since cron gives us a minimal PATH -PATH=$HOME/bin:$HOME/usr/local/bin:/usr/local/bin:$PATH -source /etc/profile - -github_token= -gearmandhost=localhost -gearmandir=/usr/local/gearmand-1.1.6 -system=$(uname -s | tr '[:upper:]' '[:lower:]') -arch=$(uname -m | tr '[:upper:]' '[:lower:]') -now_ts=$(date +%s) -cc=gcc -cxx=g++ - -while [ $# -gt 0 ] ; do - arg=$1; shift - if [[ $arg =~ --(.*)=(.*) ]] ; then - eval ${BASH_REMATCH[1]}=${BASH_REMATCH[2]} - else - usage; exit 1; - fi -done - -nightlytrace=/tmp/$(whoami).nightly.trace -mysqltrace=/tmp/$(whoami).mysql.build.trace.$now_ts - -make_and_test_mysql --mysqlbuild=mysql-5.6.16-tokudb-${now_ts}-debug-e-${system}-${arch} --cc=$cc --cxx=$cxx --github_token=$github_token -make_and_test_mysql --mysqlbuild=mysql-5.6.16-tokudb-${now_ts}-e-${system}-${arch} --cc=$cc --cxx=$cxx --github_token=$github_token --tests=run.mysql.tests.bash:run.sql.bench.bash - -make_and_test_mysql --mysqlbuild=mysql-5.5.36-tokudb-${now_ts}-debug-e-${system}-${arch} --cc=$cc --cxx=$cxx --github_token=$github_token -make_and_test_mysql --mysqlbuild=mysql-5.5.36-tokudb-${now_ts}-e-${system}-${arch} --cc=$cc --cxx=$cxx --github_token=$github_token --tests=run.mysql.tests.bash:run.sql.bench.bash - -make_and_test_mysql --mysqlbuild=mariadb-5.5.35-tokudb-${now_ts}-debug-e-${system}-${arch} --cc=$cc --cxx=$cxx --github_token=$github_token -make_and_test_mysql --mysqlbuild=mariadb-5.5.35-tokudb-${now_ts}-e-${system}-${arch} --cc=$cc --cxx=$cxx --github_token=$github_token --tests=run.mysql.tests.bash:run.sql.bench.bash - -exit 0 diff --git a/storage/tokudb/scripts/nightly.mysql.build.and.test.my.cnf b/storage/tokudb/scripts/nightly.mysql.build.and.test.my.cnf deleted file mode 100644 index a837340924e..00000000000 --- a/storage/tokudb/scripts/nightly.mysql.build.and.test.my.cnf +++ /dev/null @@ -1,7 +0,0 @@ -[mysqld] -tmpdir=/data/mysql/tmp -max_connections=1024 -table_open_cache=1024 -loose_tokudb_cache_size=8G -loose_tokudb_directio=1 - diff --git a/storage/tokudb/scripts/run.atc.ontime.bash b/storage/tokudb/scripts/run.atc.ontime.bash deleted file mode 100755 index dddab8bb1fe..00000000000 --- a/storage/tokudb/scripts/run.atc.ontime.bash +++ /dev/null @@ -1,267 +0,0 @@ -#!/usr/bin/env bash - -function usage() { - echo "run the atc ontime load and run" - echo "--mysqlbuild=$mysqlbuild" - echo "[--commit=$commit]" - echo "[--dbname=$dbname]" - echo "[--load=$load] [--check=$check] [--run=$run]" - echo "[--engine=$engine]" - echo "[--tokudb_load_save_space=$tokudb_load_save_space] [--tokudb_row_format=$tokudb_row_format] [--tokudb_loader_memory_size=$tokudb_loader_memory_size]" -} - -function retry() { - local cmd - local retries - local exitcode - cmd=$* - let retries=0 - while [ $retries -le 10 ] ; do - echo `date` $cmd - bash -c "$cmd" - exitcode=$? - echo `date` $cmd $exitcode $retries - let retries=retries+1 - if [ $exitcode -eq 0 ] ; then break; fi - sleep 10 - done - test $exitcode = 0 -} - -mysqlbuild= -commit=0 -mysqlserver=`hostname` -mysqluser=`whoami` -mysqlsocket=/tmp/mysql.sock -svnserver=https://svn.tokutek.com/tokudb -basedir=$HOME/svn.build -builddir=$basedir/mysql.build -dbname=atc -tblname=ontime -load=1 -check=1 -run=1 -engine=tokudb -tokudb_load_save_space=0 -tokudb_row_format= -tokudb_loader_memory_size= -verbose=0 -svn_server=https://svn.tokutek.com/tokudb -svn_branch=. -svn_revision=HEAD - -# parse the command line -while [ $# -gt 0 ] ; do - arg=$1; shift - if [[ $arg =~ --(.*)=(.*) ]] ; then - eval ${BASH_REMATCH[1]}=${BASH_REMATCH[2]} - else - usage; exit 1 - fi -done - -if [[ $mysqlbuild =~ (.*)-(tokudb\-.*)-(linux)-(x86_64) ]] ; then - mysql=${BASH_REMATCH[1]} - tokudb=${BASH_REMATCH[2]} - system=${BASH_REMATCH[3]} - arch=${BASH_REMATCH[4]} -else - exit 1 -fi - -if [ -d /usr/local/mysql/bin ] ; then - export PATH=/usr/local/mysql/bin:$PATH -fi - -if [ -d /usr/local/mysql/lib/mysql ] ; then - export LD_LIBRARY_PATH=/usr/local/mysql/lib/mysql:$PATH -fi - -# goto the base directory -if [ ! -d $basedir ] ; then mkdir $basedir; fi - -pushd $basedir - -# update the build directory -if [ ! -d $builddir ] ; then mkdir $builddir; fi - -date=`date +%Y%m%d` -testresultsdir=$builddir/$date -pushd $builddir - while [ ! -d $date ] ; do - svn mkdir $svn_server/mysql.build/$date -m "" - svn checkout $svn_server/mysql.build/$date - if [ $? -ne 0 ] ; then rm -rf $date; fi - done -popd - -if [ $dbname = "atc" -a $engine != "tokudb" ] ; then dbname="atc_$engine"; fi - -runfile=$testresultsdir/$dbname-$tblname-$mysqlbuild-$mysqlserver -if [ $tokudb_load_save_space != 0 ] ; then runfile=$runfile-compress; fi -if [ "$tokudb_row_format" != "" ] ; then runfile=$runfile-$tokudb_row_format; fi -if [ "$tokudb_loader_memory_size" != "" ] ; then runfile=$runfile-$tokudb_loader_memory_size; fi -rm -rf $runfile - -testresult="PASS" - -# maybe get the atc data from s3 -if [ $testresult = "PASS" ] ; then - f=atc_On_Time_Performance.mysql.csv - if [ ! -f $f ] ; then - f=$f.gz - if [ ! -f $f ] ; then - echo `date` s3get --bundle tokutek-mysql-data $f >>$runfile 2>&1 - s3get --verbose --bundle tokutek-mysql-data $f >>$runfile 2>&1 - exitcode=$? - echo `date` s3get --bundle tokutek-mysql-data $f $exitcode >>$runfile 2>&1 - if [ $exitcode -ne 0 ] ; then testresult="FAIL"; fi - if [ $testresult = "PASS" ] ; then - echo `date` gunzip $f >>$runfile 2>&1 - gunzip $f - exitcode=$? - echo `date` gunzip $f $exitcode >>$runfile 2>&1 - if [ $exitcode -ne 0 ] ; then testresult="FAIL"; fi - fi - fi - fi -fi - -# checkout the atc test from svn -atc=atc-$mysqlbuild -if [ $testresult = "PASS" ] ; then - if [ -d atc-$mysqlbuild ] ; then rm -rf atc-$mysqlbuild; fi - - retry svn export -r $svn_revision $svn_server/$svn_branch/mysql/tests/atc atc-$mysqlbuild - exitcode=$? - echo `date` svn export -r $svn_revision $svn_server/$svn_branch/mysql/tests/atc $exitcode >>$runfile 2>&1 - if [ $exitcode != 0 ] ; then - retry svn export -r $svn_revision $svn_server/mysql/tests/atc atc-$mysqlbuild - exitcode=$? - echo `date` svn export -r $svn_revision $svn_server/mysql/tests/atc $exitcode >>$runfile 2>&1 - fi - if [ $exitcode != 0 ] ; then testresult="FAIL"; fi -fi - -# create the database -if [ $load -ne 0 -a $testresult = "PASS" ] ; then - echo `date` drop database if exists $dbname >>$runfile - mysql -S $mysqlsocket -u $mysqluser -e "drop database if exists $dbname" >>$runfile 2>&1 - exitcode=$? - echo `date` drop database if exists $dbname $exitcode>>$runfile - if [ $exitcode -ne 0 ] ; then testresult="FAIL"; fi - echo `date` create database $dbname >>$runfile - mysql -S $mysqlsocket -u $mysqluser -e "create database $dbname" >>$runfile 2>&1 - exitcode=$? - echo `date` create database $dbname $exitcode >>$runfile - if [ $exitcode -ne 0 ] ; then testresult="FAIL"; fi -fi - -# create the table -if [ $load -ne 0 -a $testresult = "PASS" ] ; then - echo `date` create table $dbname.$tblname >>$runfile - mysql -S $mysqlsocket -u $mysqluser -D $dbname -e "source $atc/atc_ontime_create_covered.sql" >>$runfile 2>&1 - exitcode=$? - echo `date` create table $exitcode >>$runfile - if [ $exitcode -ne 0 ] ; then testresult="FAIL"; fi -fi - -if [ $load -ne 0 -a $testresult = "PASS" -a "$tokudb_row_format" != "" ] ; then - echo `date` create table $dbname.$tblname >>$runfile - mysql -S $mysqlsocket -u $mysqluser -D $dbname -e "alter table $tblname row_format=$tokudb_row_format" >>$runfile 2>&1 - exitcode=$? - echo `date` create table $exitcode >>$runfile - if [ $exitcode -ne 0 ] ; then testresult="FAIL"; fi -fi - -if [ $load -ne 0 -a $testresult = "PASS" -a $engine != "tokudb" ] ; then - echo `date` alter table $engine >>$runfile - mysql -S $mysqlsocket -u $mysqluser -D $dbname -e "alter table $tblname engine=$engine" >>$runfile 2>&1 - exitcode=$? - echo `date` alter table $engine $exitcode >>$runfile - if [ $exitcode -ne 0 ] ; then testresult="FAIL"; fi -fi - -if [ $testresult = "PASS" ] ; then - mysql -S $mysqlsocket -u $mysqluser -D $dbname -e "show create table $tblname" >>$runfile 2>&1 -fi - -if [ $testresult = "PASS" ] ; then - let default_loader_memory_size="$(mysql -S $mysqlsocket -u $mysqluser -e'select @@tokudb_loader_memory_size' --silent --skip-column-names)" - exitcode=$? - echo `date` get tokudb_loader_memory_size $exitcode >>$runfile - if [ $exitcode -ne 0 ] ; then testresult="FAIL"; fi - if [ "$tokudb_loader_memory_size" = "" ] ; then tokudb_loader_memory_size=$default_loader_memory_size; fi -fi - -# load the data -if [ $load -ne 0 -a $testresult = "PASS" ] ; then - echo `date` load data >>$runfile - start=$(date +%s) - mysql -S $mysqlsocket -u $mysqluser -D $dbname -e "set tokudb_loader_memory_size=$tokudb_loader_memory_size;\ - set tokudb_load_save_space=$tokudb_load_save_space; load data infile '$basedir/atc_On_Time_Performance.mysql.csv' into table $tblname" >>$runfile 2>&1 - exitcode=$? - let loadtime=$(date +%s)-$start - echo `date` load data loadtime=$loadtime $exitcode >>$runfile - if [ $exitcode -ne 0 ] ; then testresult="FAIL"; fi -fi - -# check the tables -if [ $check -ne 0 -a $testresult = "PASS" ] ; then - echo `date` check table $tblname >> $runfile - mysql -S $mysqlsocket -u $mysqluser -D $dbname -e "check table $tblname" >>$runfile 2>&1 - exitcode=$? - echo `date` check table $tblname $exitcode >> $runfile - if [ $exitcode -ne 0 ] ; then testresult="FAIL"; fi -fi - -# run the queries -if [ $run -ne 0 -a $testresult = "PASS" ] ; then - pushd $atc - for qfile in q*.sql ; do - if [[ $qfile =~ q(.*)\.sql ]] ; then - qname=${BASH_REMATCH[1]} - q=`cat $qfile` - qrun=q${qname}.run - - echo `date` explain $qfile >>$runfile - if [ $verbose -ne 0 ] ; then echo explain $q >>$runfile; fi - mysql -S $mysqlsocket -u $mysqluser -D $dbname -e "explain $q" >$qrun - exitcode=$? - echo `date` explain $qfile $exitcode >>$runfile - if [ $verbose -ne 0 ] ; then cat $qrun >>$runfile; fi - - echo `date` $qfile >>$runfile - start=$(date +%s) - if [ $verbose -ne 0 ] ; then echo $q >>$runfile; fi - mysql -S $mysqlsocket -u $mysqluser -D $dbname -e "$q" >$qrun - exitcode=$? - let qtime=$(date +%s)-$start - echo `date` $qfile qtime=$qtime $exitcode >>$runfile - if [ $verbose -ne 0 ] ; then cat $qrun >>$runfile; fi - if [ $exitcode -ne 0 ] ; then - testresult="FAIL" - else - if [ -f q${qname}.result ] ; then - diff $qrun q${qname}.result >>$runfile - exitcode=$? - if [ $exitcode -ne 0 ] ; then - testresult="FAIL" - fi - fi - fi - fi - done - popd -fi - -# commit results -if [ $commit != 0 ] ; then - svn add $runfile - retry svn commit -m \"$testresult $dbname $tblname $mysqlbuild $mysqlserver\" $runfile -fi - -popd - -if [ $testresult = "PASS" ] ; then exitcode=0; else exitcode=1; fi -exit $exitcode diff --git a/storage/tokudb/scripts/run.iibench.bash b/storage/tokudb/scripts/run.iibench.bash deleted file mode 100755 index e6e57e7f512..00000000000 --- a/storage/tokudb/scripts/run.iibench.bash +++ /dev/null @@ -1,172 +0,0 @@ -#!/usr/bin/env bash - -function usage() { - echo "run iibench" - echo "--mysqlbuild=$mysqlbuild" - echo "[--max_row=$max_rows] [--rows_per_report=$rows_per_report] [--insert_only=$insert_only] [ --check=$check]" - echo "[--commit=$commit]" -} - -function retry() { - local cmd=$* - local retries - local exitcode - let retries=0 - while [ $retries -le 10 ] ; do - echo `date` $cmd - bash -c "$cmd" - exitcode=$? - echo `date` $cmd $exitcode $retries - let retries=retries+1 - if [ $exitcode -eq 0 ] ; then break; fi - sleep 10 - done - test $exitcode = 0 -} - -mysqlbuild= -commit=0 -check=1 -mysqlserver=`hostname` -mysqluser=`whoami` -mysqlsocket=/tmp/mysql.sock -svn_server=https://svn.tokutek.com/tokudb -svn_branch=. -svn_revision=HEAD -basedir=$HOME/svn.build -builddir=$basedir/mysql.build -system=`uname -s | tr [:upper:] [:lower:]` -instancetype= -testinstance= -arch=`uname -m | tr [:upper:] [:lower:]` -tracefile=/tmp/run.iibench.trace -cmd=iibench -dbname=$cmd -engine=tokudb -tblname=testit -max_rows=50000000 -rows_per_report=1000000 -insert_only=1 - -# parse the command line -while [ $# -gt 0 ] ; do - arg=$1; shift - if [ $arg = "--replace_into" ] ; then - cmd=replace_into - elif [ $arg = "--insert_ignore" ] ; then - cmd=insert_ignore - elif [[ $arg =~ --(.*)=(.*) ]] ; then - eval ${BASH_REMATCH[1]}=${BASH_REMATCH[2]} - else - usage; exit 1 - fi -done - -if [[ $mysqlbuild =~ (.*)-(tokudb-.*)-(linux)-(x86_64) ]] ; then - mysql=${BASH_REMATCH[1]} - tokudb=${BASH_REMATCH[2]} - system=${BASH_REMATCH[3]} - arch=${BASH_REMATCH[4]} -else - exit 1 -fi - -# setup the dbname -if [ $dbname = "iibench" ] ; then dbname=${cmd}_${engine}; fi -if [ "$testinstance" != "" ] ; then dbname=${dbname}_${testinstance}; fi - -if [ -d /usr/local/mysql ] ; then - export PATH=/usr/local/mysql/bin:$PATH -fi - -if [ -d /usr/local/mysql/lib/mysql ] ; then - export LD_LIBRARY_PATH=/usr/local/mysql/lib/mysql:$PATH -fi - -# goto the base directory -if [ ! -d $basedir ] ; then mkdir $basedir; fi -pushd $basedir - -# update the build directory -if [ $commit != 0 ] ; then - if [ ! -d $builddir ] ; then mkdir $builddir; fi - - date=`date +%Y%m%d` - testresultsdir=$builddir/$date - pushd $builddir - while [ ! -d $date ] ; do - svn mkdir $svn_server/mysql.build/$date -m "" - svn checkout -q $svn_server/mysql.build/$date - if [ $? -ne 0 ] ; then rm -rf $date; fi - done - popd -else - testresultsdir=$PWD -fi - -# checkout the code -testdir=iibench-$mysqlbuild-$mysqlserver -if [ "$testinstance" != "" ] ; then testdir=$testdir-$testinstance; fi -rm -rf $testdir -retry svn export -q -r $svn_revision $svn_server/$svn_branch/iibench $testdir -exitcode=$? -if [ $exitcode != 0 ] ; then - retry svn export -q -r $svn_revision $svn_server/iibench $testdir - exitcode=$? -fi -if [ $exitcode != 0 ] ; then exit 1; fi - -# create the iibench database -mysql -S $mysqlsocket -u root -e "grant all on *.* to '$mysqluser'@'$mysqlserver'" -exitcode=$? -if [ $exitcode != 0 ] ; then exit 1; fi - -mysql -S $mysqlsocket -u $mysqluser -e "drop database if exists $dbname" -exitcode=$? -if [ $exitcode != 0 ] ; then exit 1; fi - -mysql -S $mysqlsocket -u $mysqluser -e "create database $dbname" -exitcode=$? -if [ $exitcode != 0 ] ; then exit 1; fi - -# run -if [ $cmd = "iibench" -a $insert_only != 0 ] ; then - runfile=$testresultsdir/$dbname-insert_only-$max_rows-$mysqlbuild-$mysqlserver -else - runfile=$testresultsdir/$dbname-$max_rows-$mysqlbuild-$mysqlserver -fi -if [ "$instancetype" != "" ] ; then runfile=$runfile-$instancetype; fi -testresult="PASS" - -pushd $testdir/py - echo `date` $cmd start $mysql $svn_branch $svn_revision $max_rows $rows_per_report >>$runfile - runcmd=$cmd.py - args="--db_user=$mysqluser --db_name=$dbname --db_socket=$mysqlsocket --engine=$engine --setup --max_rows=$max_rows --rows_per_report=$rows_per_report --table_name=$tblname" - if [ $cmd = "iibench" -a $insert_only != 0 ] ; then runcmd="$runcmd --insert_only"; fi - if [ $cmd = "replace_into" ] ; then runcmd="replace_into.py --use_replace_into"; fi - if [ $cmd = "insert_ignore" ] ; then runcmd="replace_into.py"; fi - ./$runcmd $args >>$runfile 2>&1 - exitcode=$? - echo `date` $cmd complete $exitcode >>$runfile - if [ $exitcode != 0 ] ; then testresult="FAIL"; fi -popd - -if [ $check != 0 -a $testresult = "PASS" ] ; then - echo `date` check table $tblname >>$runfile - mysql -S $mysqlsocket -u $mysqluser -D $dbname -e "check table $tblname" >>$runfile 2>&1 - exitcode=$? - echo `date` check table $tblname $exitcode >>$runfile - if [ $exitcode != 0 ] ; then testresult="FAIL"; fi -fi - -# commit results -if [ $commit != 0 ] ; then - if [ $cmd = "iibench" -a $insert_only != 0 ] ; then cmd="$cmd insert_only"; fi - svn add $runfile - retry svn commit -m \"$testresult $cmd $max_rows $dbname $mysqlbuild $mysqlserver `hostname`\" $runfile -fi - -popd - -if [ $testresult = "PASS" ] ; then exitcode=0; else exitcode=1; fi -exit $exitcode diff --git a/storage/tokudb/scripts/run.mysql.tests.bash b/storage/tokudb/scripts/run.mysql.tests.bash deleted file mode 100755 index ce0fe88d99d..00000000000 --- a/storage/tokudb/scripts/run.mysql.tests.bash +++ /dev/null @@ -1,196 +0,0 @@ -#!/usr/bin/env bash -# ident 4, no tabs - -function usage() { - echo "run the tokudb mysql tests" - echo "--mysqlbuild=$mysqlbuild" - echo "--commit=$commit" - echo "--tests=$tests --engine=$engine" -} - -function retry() { - local cmd - local retries - local exitcode - cmd=$* - let retries=0 - while [ $retries -le 10 ] ; do - echo `date` $cmd - bash -c "$cmd" - exitcode=$? - echo `date` $cmd $exitcode $retries - let retries=retries+1 - if [ $exitcode -eq 0 ] ; then break; fi - sleep 10 - done - test $exitcode = 0 -} - -svnserver=https://svn.tokutek.com/tokudb -basedir=$HOME/svn.build -builddir=$basedir/mysql.build -mysqlbuild= -mysql_basedir=/usr/local/mysql -mysqlserver=`hostname` -commit=0 -tests="*" -engine="" -parallel=auto - -while [ $# -gt 0 ] ; do - arg=$1; shift - if [[ $arg =~ --(.*)=(.*) ]] ; then - eval ${BASH_REMATCH[1]}=${BASH_REMATCH[2]} - else - usage; exit 1 - fi -done - -if [[ $mysqlbuild =~ (.*)-(tokudb\-.*)-(linux)-(x86_64) ]] ; then - mysql=${BASH_REMATCH[1]} - tokudb=${BASH_REMATCH[2]} - system=${BASH_REMATCH[3]} - arch=${BASH_REMATCH[4]} -else - echo $mysqlbuild is not a tokudb build -fi - -if [ -d $mysql_basedir/lib/mysql ] ; then - export LD_LIBRARY_PATH=$mysql_basedir/lib/mysql -fi - -# update the build directory -if [ ! -d $basedir ] ; then mkdir $basedir ; fi - -pushd $basedir -if [ $? != 0 ] ; then exit 1; fi - -if [ ! -d $builddir ] ; then mkdir $builddir; fi - -# make the subversion directory that will hold the test results -date=`date +%Y%m%d` -testresultsdir=$builddir/$date -pushd $builddir -if [ $? = 0 ] ; then - while [ ! -d $date ] ; do - svn mkdir $svnserver/mysql.build/$date -m "" - svn checkout -q $svnserver/mysql.build/$date - if [ $? -ne 0 ] ; then rm -rf $date; fi - done - popd -fi - -# generate a trace file name -if [ -z $engine ] ; then - tracefile=mysql-test-$mysqlbuild-$mysqlserver -else - tracefile=mysql-engine-$engine-$mysqlbuild-$mysqlserver -fi -echo >$testresultsdir/$tracefile - -if [ -z $engine ] ; then - - # run all test suites including main - teststorun_original="main" - teststorun_tokudb="" - pushd $mysql_basedir/mysql-test/suite - if [ $? = 0 ] ; then - for t in $tests ; do - if [[ $t =~ .*\.xfail$ ]] ; then continue; fi - if [ $t = "perfschema_stress" ] ; then continue; fi - if [ $t = "large_tests" ] ; then continue; fi - if [ $t = "pbxt" ] ; then continue; fi - if [ -d $t/t ] ; then - if [[ $t =~ tokudb* ]] ; then - if [ -z $teststorun_tokudb ] ; then teststorun_tokudb="$t" ; else teststorun_tokudb="$teststorun_tokudb,$t"; fi - else - teststorun_original="$teststorun_original,$t"; - fi - fi - done - popd - fi - - # run the tests - pushd $mysql_basedir/mysql-test - if [ $? = 0 ] ; then - if [[ $mysqlbuild =~ tokudb ]] ; then - # run standard tests - if [[ $mysqlbuild =~ 5\\.5 ]] ; then - ./mysql-test-run.pl --suite=$teststorun_original --big-test --max-test-fail=0 --force --retry=1 --testcase-timeout=60 \ - --mysqld=--default-storage-engine=myisam --mysqld=--sql-mode="" \ - --mysqld=--loose-tokudb_debug=3072 \ - --parallel=$parallel >>$testresultsdir/$tracefile 2>&1 - else - ./mysql-test-run.pl --suite=$teststorun_original --big-test --max-test-fail=0 --force --retry=1 --testcase-timeout=60 \ - --mysqld=--loose-tokudb_debug=3072 \ - --parallel=$parallel >>$testresultsdir/$tracefile 2>&1 - fi - - # run tokudb tests - ./mysql-test-run.pl --suite=$teststorun_tokudb --big-test --max-test-fail=0 --force --retry=1 --testcase-timeout=60 \ - --mysqld=--loose-tokudb_debug=3072 \ - --parallel=$parallel >>$testresultsdir/$tracefile 2>&1 - # setup for engines tests - engine="tokudb" - else - ./mysql-test-run.pl --suite=$teststorun_original --big-test --max-test-fail=0 --force --retry=1 --testcase-timeout=60 \ - --parallel=$parallel >>$testresultsdir/$tracefile 2>&1 - fi - popd - fi -fi - -if [ ! -z $engine ] ; then - teststorun="engines/funcs,engines/iuds" - pushd $mysql_basedir/mysql-test - if [ $? = 0 ] ; then - if [[ $mysqlbuild =~ 5\\.6 ]] ; then - ./mysql-test-run.pl --suite=$teststorun --force --retry-failure=0 --max-test-fail=0 --nowarnings --testcase-timeout=60 \ - --mysqld=--default-storage-engine=$engine --mysqld=--default-tmp-storage-engine=$engine \ - --parallel=$parallel >>$testresultsdir/$tracefile 2>&1 - else - ./mysql-test-run.pl --suite=$teststorun --force --retry-failure=0 --max-test-fail=0 --nowarnings --testcase-timeout=60 \ - --mysqld=--default-storage-engine=$engine \ - --parallel=$parallel >>$testresultsdir/$tracefile 2>&1 - fi - popd - fi -fi - -# summarize the results -let tests_failed=0 -let tests_passed=0 -while read line ; do - if [[ "$line" =~ (Completed|Timeout):\ Failed\ ([0-9]+)\/([0-9]+) ]] ; then - # failed[2]/total[3] - let tests_failed=tests_failed+${BASH_REMATCH[2]} - let tests_passed=tests_passed+${BASH_REMATCH[3]}-${BASH_REMATCH[2]} - elif [[ "$line" =~ Completed:\ All\ ([0-9]+)\ tests ]] ; then - # passed[1] - let tests_passed=tests_passed+${BASH_REMATCH[1]} - fi -done <$testresultsdir/$tracefile - -# commit the results -if [ $tests_failed = 0 ] ; then - testresult="PASS=$tests_passed" -else - testresult="FAIL=$tests_failed PASS=$tests_passed" -fi -pushd $testresultsdir -if [ $? = 0 ] ; then - if [ $commit != 0 ] ; then - svn add $tracefile - if [[ $tracefile =~ "mysql-test" ]] ; then test=mysql-test; else test=mysql-engine-$engine; fi - retry svn commit -m \"$testresult $test $mysqlbuild $mysqlserver\" $tracefile - fi - popd -fi - -popd # $basedir - -if [[ $testresult =~ "PASS" ]] ; then exitcode=0; else exitcode=1; fi -exit $exitcode - - diff --git a/storage/tokudb/scripts/run.sql.bench.bash b/storage/tokudb/scripts/run.sql.bench.bash deleted file mode 100755 index 2e24c9c5c89..00000000000 --- a/storage/tokudb/scripts/run.sql.bench.bash +++ /dev/null @@ -1,153 +0,0 @@ -#!/usr/bin/env bash - -function usage() { - echo "run the sql bench tests" - echo "--mysqlbuild=$mysqlbuild" - echo "--commit=$commit" -} - -function retry() { - local cmd - local retries - local exitcode - cmd=$* - let retries=0 - while [ $retries -le 10 ] ; do - echo `date` $cmd - bash -c "$cmd" - exitcode=$? - echo `date` $cmd $exitcode $retries - let retries=retries+1 - if [ $exitcode -eq 0 ] ; then break; fi - sleep 10 - done - test $exitcode = 0 -} - -svnserver=https://svn.tokutek.com/tokudb -basedir=$HOME/svn.build -builddir=$basedir/mysql.build -mysqlbuild= -mysqlserver=`hostname` -commit=0 -engine=tokudb -socket=/tmp/mysql.sock -system=`uname -s | tr [:upper:] [:lower:]` -arch=`uname -m | tr [:upper:] [:lower:]` - -# parse the command line -while [ $# -gt 0 ] ; do - arg=$1; shift - if [[ $arg =~ --(.*)=(.*) ]] ; then - eval ${BASH_REMATCH[1]}=${BASH_REMATCH[2]} - else - usage; exit 1 - fi -done - -if [[ $mysqlbuild =~ (.*)-(tokudb-.*)-(linux)-(x86_64) ]] ; then - mysql=${BASH_REMATCH[1]} - tokudb=${BASH_REMATCH[2]} - system=${BASH_REMATCH[3]} - arch=${BASH_REMATCH[4]} -else - echo $mysqlbuild is not a tokudb build -fi - -# goto the base directory -if [ ! -d $basedir ] ; then mkdir $basedir; fi -pushd $basedir - -# update the build directory -if [ ! -d $builddir ] ; then mkdir $builddir; fi - -date=`date +%Y%m%d` -testresultsdir=$builddir/$date -pushd $builddir -while [ ! -d $date ] ; do - svn mkdir $svnserver/mysql.build/$date -m "" - svn checkout -q $svnserver/mysql.build/$date - if [ $? -ne 0 ] ; then rm -rf $date; fi -done -popd - -# run the tests -pushd /usr/local/mysql/sql-bench - -tracefile=sql-bench-$engine-$mysqlbuild-$mysqlserver.trace -summaryfile=sql-bench-$engine-$mysqlbuild-$mysqlserver.summary - -function mydate() { - date +"%Y%m%d %H:%M:%S" -} - -function runtests() { - testargs=$* - for testname in test-* ; do - chmod +x ./$testname - echo `mydate` $testname $testargs - ./$testname $testargs - exitcode=$? - echo `mydate` - if [ $exitcode != 0 ] ; then - # assume that the test failure due to a crash. allow mysqld to restart. - sleep 60 - fi - done -} - ->$testresultsdir/$tracefile - -runtests --create-options=engine=$engine --socket=$socket --verbose --small-test >>$testresultsdir/$tracefile 2>&1 -runtests --create-options=engine=$engine --socket=$socket --verbose --small-test --fast >>$testresultsdir/$tracefile 2>&1 -runtests --create-options=engine=$engine --socket=$socket --verbose >>$testresultsdir/$tracefile 2>&1 -runtests --create-options=engine=$engine --socket=$socket --verbose --fast >>$testresultsdir/$tracefile 2>&1 -runtests --create-options=engine=$engine --socket=$socket --verbose --fast --fast-insert >>$testresultsdir/$tracefile 2>&1 -runtests --create-options=engine=$engine --socket=$socket --verbose --fast --lock-tables >>$testresultsdir/$tracefile 2>&1 - -popd - -# summarize the results -while read l ; do - if [[ $l =~ ^([0-9]{8}\ [0-9]{2}:[0-9]{2}:[0-9]{2})(.*)$ ]] ; then - t=${BASH_REMATCH[1]} - cmd=${BASH_REMATCH[2]} - if [ -z "$cmd" ] ; then - let duration=$(date -d "$t" +%s)-$(date -d "$tlast" +%s) - printf "%4s %s %8d %s\n" "$status" "$tlast" "$duration" "$cmdlast" - else - cmdlast=$cmd - tlast=$t - status=PASS - fi - else - if [[ $l =~ Got\ error|Died ]] ; then - status=FAIL - fi - fi -done <$testresultsdir/$tracefile >$testresultsdir/$summaryfile - -testresult="" -pf=`mktemp` -egrep "^PASS" $testresultsdir/$summaryfile >$pf 2>&1 -if [ $? -eq 0 ] ; then testresult="PASS=`cat $pf | wc -l` $testresult"; fi -egrep "^FAIL" $testresultsdir/$summaryfile >$pf 2>&1 -if [ $? -eq 0 ] ; then testresult="FAIL=`cat $pf | wc -l` $testresult"; fi -rm $pf -if [ "$testresult" = "" ] ; then testresult="?"; fi - -# commit the results -pushd $testresultsdir -if [ $commit != 0 ] ; then - svn add $tracefile $summaryfile - retry svn commit -m \"$testresult sql-bench $mysqlbuild $mysqlserver\" $tracefile $summaryfile -fi -popd - -popd - -if [[ $testresult =~ "PASS" ]] ; then exitcode=0; else exitcode=1; fi -exit $exitcode - - - diff --git a/storage/tokudb/scripts/run.tpch.bash b/storage/tokudb/scripts/run.tpch.bash deleted file mode 100755 index efc37d25d2e..00000000000 --- a/storage/tokudb/scripts/run.tpch.bash +++ /dev/null @@ -1,342 +0,0 @@ -#!/usr/bin/env bash - -function usage() { - echo "run the TPCH load and compare test" - echo "[--SCALE=$SCALE] [--ENGINE=$ENGINE]" - echo "[--dbgen=$dbgen] [--load=$load] [--check=$check] [--compare=$compare] [--query=$query]" - echo "[--mysqlbuild=$mysqlbuild] [--commit=$commit]" - echo "[--testinstance=$testinstance]" - echo "[--tokudb_load_save_space=$tokudb_load_save_space] [--tokudb_row_format=$tokudb_row_format] [--tokudb_loader_memory_size=$tokudb_loader_memory_size]" -} - -function retry() { - local cmd - local retries - local exitcode - cmd=$* - let retries=0 - while [ $retries -le 10 ] ; do - echo `date` $cmd - bash -c "$cmd" - exitcode=$? - echo `date` $cmd $exitcode $retries - let retries=retries+1 - if [ $exitcode -eq 0 ] ; then break; fi - sleep 1 - done - test $exitcode = 0 -} - -SCALE=1 -ENGINE=tokudb -TABLES="part partsupp customer lineitem nation orders region supplier" -dbgen=1 -load=1 -compare=1 -query=0 -check=1 -datadir=/usr/local/mysql/data -mysqlbuild= -commit=0 -mysqlserver=`hostname` -mysqluser=`whoami` -mysqlsocket=/tmp/mysql.sock -basedir=$HOME/svn.build -builddir=$basedir/mysql.build -system=`uname -s | tr [:upper:] [:lower:]` -arch=`uname -m | tr [:upper:] [:lower:]` -testinstance= -tokudb_load_save_space=0 -tokudb_row_format= -tokudb_loader_memory_size= -svn_server=https://svn.tokutek.com/tokudb -svn_branch=. -svn_revision=HEAD - -# parse the command line -while [ $# -gt 0 ] ; do - arg=$1; shift - if [[ $arg =~ --(.*)=(.*) ]] ; then - eval ${BASH_REMATCH[1]}=${BASH_REMATCH[2]} - else - usage; exit 1 - fi -done - -if [[ $mysqlbuild =~ (.*)-(tokudb\-.*)-(linux)-(x86_64) ]] ; then - mysql=${BASH_REMATCH[1]} - tokudb=${BASH_REMATCH[2]} - system=${BASH_REMATCH[3]} - arch=${BASH_REMATCH[4]} -else - exit 1 -fi - -dbname=tpch${SCALE}G_${ENGINE} -if [ "$testinstance" != "" ] ; then dbname=${dbname}_${testinstance}; fi -tpchdir=$basedir/tpch${SCALE}G - -if [ -d /usr/local/mysql ] ; then - export PATH=/usr/local/mysql/bin:$PATH -fi - -if [ -d /usr/local/mysql/lib/mysql ] ; then - export LD_LIBRARY_PATH=/usr/local/mysql/lib/mysql:$PATH -fi - -# goto the base directory -if [ ! -d $basedir ] ; then mkdir $basedir; fi - -pushd $basedir - -# update the build directory -if [ $commit != 0 ] ; then - if [ ! -d $builddir ] ; then mkdir $builddir; fi - - date=`date +%Y%m%d` - testresultsdir=$builddir/$date - pushd $builddir - while [ ! -d $date ] ; do - svn mkdir $svn_server/mysql.build/$date -m "" - svn checkout -q $svn_server/mysql.build/$date - if [ $? -ne 0 ] ; then rm -rf $date; fi - done - popd -else - testresultsdir=$PWD -fi - -runfile=$testresultsdir/$dbname -if [ $tokudb_load_save_space != 0 ] ; then runfile=$runfile-compress; fi -if [ "$tokudb_row_format" != "" ] ; then runfile=$runfile-$tokudb_row_format; fi -if [ "$tokudb_loader_memory_size" != "" ] ; then runfile=$runfile-$tokudb_loader_memory_size; fi -runfile=$runfile-$mysqlbuild-$mysqlserver -rm -rf $runfile - -testresult="PASS" - -# maybe get the tpch data from AWS S3 -if [ $compare != 0 ] && [ ! -d $tpchdir ] ; then - tpchtarball=tpch${SCALE}G_data_dump.tar - if [ ! -f $tpchtarball ] ; then - echo `date` s3get --bundle tokutek-mysql-data $tpchtarball >>$runfile 2>&1 - s3get --verbose --bundle tokutek-mysql-data $tpchtarball >>$runfile 2>&1 - exitcode=$? - echo `date` s3get --bundle tokutek-mysql-data $tpchtarball $exitcode >>$runfile 2>&1 - if [ $exitcode -ne 0 ] ; then testresult="FAIL"; fi - fi - if [ $testresult = "PASS" ] ; then - tar xf $tpchtarball - exitcode=$? - echo `date` tar xf $tpchtarball $exitcode >>$runfile 2>&1 - if [ $exitcode -ne 0 ] ; then - testresult="FAIL" - else - # gunzip the data files - pushd tpch${SCALE}G/data/tpch${SCALE}G - for f in *.gz ; do - echo `date` gunzip $f >>$runfile 2>&1 - gunzip $f - done - ls -l >>$runfile 2>&1 - popd - fi - fi -fi - -# checkout the tpch scripts -tpchtestdir=tpch-$mysqlbuild -if [ "$testinstance" != "" ] ; then tpchtestdir=${tpchtestdir}_${testinstance}; fi -if [ $testresult = "PASS" ] ; then - rm -rf $tpchtestdir - retry svn export -q -r $svn_revision $svn_server/$svn_branch/tpch $tpchtestdir - exitcode=$? - echo `date` export $svn_server/$svn_branch/tpch $exitcode >>$runfile 2>&1 - if [ $exitcode != 0 ] ; then - retry svn export -q -r $svn_revision $svn_server/tpch $tpchtestdir - exitcode=$? - echo `date` export $svn_server/tpch $exitcode >>$runfile 2>&1 - fi - if [ $exitcode != 0 ] ; then testresult="FAIL"; fi -fi - -# generate the tpch data -if [ $dbgen != 0 -a $testresult = "PASS" ] ; then - pushd $tpchtestdir/dbgen - make - exitcode=$? - echo `date` make dbgen $exitcode >>$runfile 2>&1 - if [ $exitcode != 0 ] ; then testresult="FAIL"; fi - popd - if [ $testresult = "PASS" ] ; then - dbgen=0 - mkdir -p tpch${SCALE}G/data/tpch${SCALE}G - pushd tpch${SCALE}G/data/tpch${SCALE}G - if [ ! -f lineitem.tbl ] ; then dbgen=1; fi - popd - if [ $dbgen != 0 ] ; then - pushd $tpchtestdir/dbgen - ./dbgen -fF -s $SCALE - exitcode=$? - echo `date` dbgen -fF -s $SCALE $exitcode >>$runfile 2>&1 - if [ $exitcode != 0 ] ; then - testresult="FAIL" - else - ls -l *.tbl >>$runfile - chmod 0644 *.tbl - ls -l *.tbl >>$runfile - mv *.tbl $basedir/tpch${SCALE}G/data/tpch${SCALE}G - fi - popd - fi - fi -fi - -# create the tpch database -if [ $load != 0 -a $testresult = "PASS" ] ; then - echo `date` drop database if exists $dbname >>$runfile - mysql -S $mysqlsocket -u $mysqluser -e "drop database if exists $dbname" >>$runfile 2>&1 - exitcode=$? - echo `date` drop database if exists $dbname $exitcode>>$runfile - if [ $exitcode -ne 0 ] ; then testresult="FAIL"; fi - echo `date` create database $dbname >>$runfile - mysql -S $mysqlsocket -u $mysqluser -e "create database $dbname" >>$runfile 2>&1 - exitcode=$? - echo `date` create database $dbname $exitcode >>$runfile - if [ $exitcode -ne 0 ] ; then testresult="FAIL"; fi -fi - -# create the tpch tables -if [ $load != 0 -a $testresult = "PASS" ] ; then - echo `date` create table >>$runfile - mysql -S $mysqlsocket -u $mysqluser -D $dbname -e "source $basedir/tpch-$mysqlbuild/scripts/${ENGINE}_tpch_create_table.sql" >>$runfile 2>&1 - exitcode=$? - echo `date` create table $exitcode >>$runfile - if [ $exitcode -ne 0 ] ; then testresult="FAIL"; fi -fi - -# get the current loader memory size -if [ $load != 0 -a $testresult = "PASS" ] ; then - let default_loader_memory_size="$(mysql -S $mysqlsocket -u $mysqluser -e'select @@tokudb_loader_memory_size' --silent --skip-column-names)" - exitcode=$? - echo `date` get tokudb_loader_memory_size $exitcode >>$runfile - if [ $exitcode -ne 0 ] ; then testresult="FAIL"; fi - if [ "$tokudb_loader_memory_size" = "" ] ; then tokudb_loader_memory_size=$default_loader_memory_size; fi -fi - -# load the data -if [ $load != 0 -a $testresult = "PASS" ] ; then - for tblname in $TABLES ; do - echo `date` load table $tblname >>$runfile - ls -l $tpchdir/data/tpch${SCALE}G/$tblname.tbl >>$runfile - start=$(date +%s) - mysql -S $mysqlsocket -u $mysqluser -D $dbname -e "set tokudb_loader_memory_size=$tokudb_loader_memory_size;\ - set tokudb_load_save_space=$tokudb_load_save_space;\ - load data infile '$tpchdir/data/tpch${SCALE}G/$tblname.tbl' into table $tblname fields terminated by '|';" >>$runfile 2>&1 - exitcode=$? - let loadtime=$(date +%s)-$start - echo `date` load table $tblname $exitcode loadtime=$loadtime>>$runfile - if [ $exitcode -ne 0 ] ; then testresult="FAIL"; fi - done -fi - -if [ $check != 0 -a $testresult = "PASS" ] ; then - for tblname in lineitem ; do - echo `date` add clustering index $tblname >>$runfile - start=$(date +%s) - mysql -S $mysqlsocket -u $mysqluser -D $dbname -e "set tokudb_loader_memory_size=$tokudb_loader_memory_size;\ - set tokudb_load_save_space=$tokudb_load_save_space;\ - set tokudb_create_index_online=0;\ - create clustering index i_shipdate on lineitem (l_shipdate);" >>$runfile 2>&1 - exitcode=$? - let loadtime=$(date +%s)-$start - echo `date` add clustering index $tblname $exitcode loadtime=$loadtime >>$runfile - if [ $exitcode -ne 0 ] ; then testresult="FAIL"; fi - done -fi - -# check the tables -if [ $check != 0 -a $testresult = "PASS" ] ; then - for tblname in $TABLES ; do - echo `date` check table $tblname >>$runfile - start=$(date +%s) - mysql -S $mysqlsocket -u $mysqluser -D $dbname -e "check table $tblname" >>$runfile 2>&1 - exitcode=$? - let checktime=$(date +%s)-$start - echo `date` check table $tblname $exitcode checktime=$checktime >>$runfile - if [ $exitcode -ne 0 ] ; then testresult="FAIL"; fi - done -fi - -if [ $check != 0 -a $testresult = "PASS" ] ; then - for tblname in lineitem ; do - echo `date` drop index $tblname >>$runfile - mysql -S $mysqlsocket -u $mysqluser -D $dbname -e "drop index i_shipdate on lineitem" >>$runfile 2>&1 - exitcode=$? - echo `date` drop index $tblname $exitcode >>$runfile - if [ $exitcode -ne 0 ] ; then testresult="FAIL"; fi - done -fi - -# compare the data -if [ $compare != 0 -a $testresult = "PASS" ] ; then - if [ -d $tpchdir/dump/tpch${SCALE}G ] ; then - mysql -S $mysqlsocket -u $mysqluser -D $dbname -e "source $basedir/tpch-$mysqlbuild/scripts/dumptpch.sql" >>$runfile 2>&1 - exitcode=$? - echo `date` dump data $exitcode >>$runfile - if [ $exitcode -ne 0 ] ; then - testresult="FAIL" - else - # force the permissions on the dumpdir open - pushd $datadir/$dbname - exitcode=$? - if [ $exitcode != 0 ] ; then - sudo chmod g+rwx $datadir - sudo chmod g+rwx $datadir/$dbname - pushd $datadir/$dbname - exitcode=$? - fi - if [ $exitcode = 0 ] ; then - popd - fi - - # compare the dump files - dumpdir=$datadir/$dbname - comparedir=$tpchdir/dump/tpch${SCALE}G - for f in $dumpdir/dump* ; do - d=`basename $f` - if [ ! -f $comparedir/$d ] && [ -f $comparedir/$d.gz ] ; then - pushd $comparedir; gunzip $d.gz; popd - fi - if [ -f $comparedir/$d ] ; then - diff -q $dumpdir/$d $comparedir/$d - if [ $? = 0 ] ; then - result="PASS" - else - result="FAIL" - testresult="FAIL" - fi - else - result="MISSING" - testresult="FAIL" - fi - echo `date` $d $result >>$runfile - done - if [ $testresult = "PASS" ] ; then - # remove the dump files - rm -f $datadir/$dbname/dump* - fi - fi - fi -fi - -# commit results -if [ $commit != 0 ] ; then - svn add $runfile - retry svn commit -m \"$testresult $dbname $mysqlbuild $mysqlserver\" $runfile -fi - -popd - -if [ $testresult = "PASS" ] ; then exitcode=0; else exitcode=1; fi -exit $exitcode diff --git a/storage/tokudb/scripts/setup.mysql.bash b/storage/tokudb/scripts/setup.mysql.bash deleted file mode 100755 index 6ae604e34c1..00000000000 --- a/storage/tokudb/scripts/setup.mysql.bash +++ /dev/null @@ -1,194 +0,0 @@ -#!/usr/bin/env bash - -function usage() { - echo "setup.mysql.bash" - echo "--mysqlbuild=$mysqlbuild --shutdown=$shutdown --install=$install --startup=$startup" -} - -mysqlbuild= -shutdown=1 -install=1 -startup=1 -s3bucket=tokutek-mysql-build -sleeptime=60 -builtins="mysqlbuild shutdown install startup s3bucket sleeptime" -mysqld_args="--user=mysql --core-file --core-file-size=unlimited" -sudo=/usr/bin/sudo -defaultsfile="" -if [ -f /etc/$(whoami).my.cnf ] ; then - defaultsfile=/etc/$(whoami).my.cnf -fi - -function is_builtin() { - local v=$1; shift - local x - for x in $* ; do - if [ $v = $x ] ; then echo 1; return; fi - done - echo 0 -} - -while [ $# -gt 0 ] ; do - arg=$1; shift - if [ $arg = "--help" -o $arg = "-h" -o $arg = "-?" ] ; then - usage; exit 1 - elif [[ $arg =~ --(.*)=(.*) ]] ; then - r=$(is_builtin ${BASH_REMATCH[1]} $builtins) - if [ $r = 1 ] ; then - eval ${BASH_REMATCH[1]}=${BASH_REMATCH[2]} - else - mysqld_args="$mysqld_args $arg" - fi - else - mysqld_args="$mysqld_args $arg" - fi -done - -if [ -d /data/mysql/tmp ] ; then mysqld_args="$mysqld_args --tmpdir=/data/mysql/tmp"; fi - -if [[ $mysqlbuild =~ (.*)-(tokudb\-.*)-(linux)-(x86_64) ]] ; then - mysql=${BASH_REMATCH[1]} - tokudb=${BASH_REMATCH[2]} - system=${BASH_REMATCH[3]} - arch=${BASH_REMATCH[4]} -else - echo $mysqlbuild is not a tokudb build -fi - -if [ ! -d downloads ] ; then mkdir downloads; fi - -pushd downloads -if [ $? != 0 ] ; then exit 1; fi - -basedir=$PWD - -mysqltarball=$mysqlbuild.tar.gz - -if [ -f $mysqlbuild.tar.gz ] ; then - compression=-z - mysqltarball=$mysqlbuild.tar.gz -elif [ -f $mysqlbuild.tar.bz2 ] ; then - compression=-j - mysqltarball=$mysqlbuild.tar.bz2 -fi - -# get the release -if [ ! -f $mysqltarball ] ; then - s3get $s3bucket $mysqltarball $mysqltarball - if [ $? -ne 0 ] ; then exit 1; fi -fi -if [ ! -f $mysqltarball.md5 ] ; then - s3get $s3bucket $mysqltarball.md5 $mysqltarball.md5 - if [ $? -ne 0 ] ; then exit 1; fi -fi - -# check the md5 sum -md5sum --check $mysqltarball.md5 -if [ $? -ne 0 ] ; then - # support jacksum md5 output which is almost the same as md5sum - diff -b <(cat $mysqltarball.md5) <(md5sum $mysqltarball) - if [ $? -ne 0 ] ; then exit 1; fi -fi - -# set ldpath -ldpath="" -if [ -d /usr/local/gcc-4.7/lib64 ] ; then - echo skip ldpath="export LD_LIBRARY_PATH=/usr/local/gcc-4.7/lib64:\$LD_LIBRARY_PATH;" -fi - -# shutdown mysql -if [ $shutdown -ne 0 ] ; then - if [ -x /etc/init.d/mysql ] ; then - $sudo setsid /etc/init.d/mysql stop - else - /usr/local/mysql/bin/mysqladmin shutdown - fi - sleep $sleeptime -fi - -pushd /usr/local -if [ $? = 0 ] ; then - rm mysql - popd -fi - -# install the release -pushd /usr/local/mysqls 2>/dev/null -if [ $? = 0 ] ; then - mysqldir=mysqls/$mysqlbuild -else - pushd /usr/local - if [ $? -ne 0 ] ; then exit 1; fi - mysqldir=$mysqlbuild -fi - -if [ ! -d $mysqlbuild ] || [ $install -ne 0 ] ; then - rm mysql - if [ -d $mysqlbuild ] ; then $sudo rm -rf $mysqlbuild; fi - - tar -x $compression -f $basedir/$mysqltarball - if [ $? -ne 0 ] ; then exit 1; fi - ln -s $mysqldir /usr/local/mysql - if [ $? -ne 0 ] ; then exit 1; fi - ln -s $mysqldir /usr/local/$mysqlbuild - if [ $? -ne 0 ] ; then exit 1; fi - - installdb=$mysqlbuild/bin/mysql_install_db - if [ ! -f $installdb ] ; then - installdb=$mysqlbuild/scripts/mysql_install_db - fi - - $sudo chown -R mysql $mysqlbuild/data - $sudo chgrp -R mysql $mysqlbuild/data - - # 5.6 debug build needs this - if [ ! -f $mysqlbuild/bin/mysqld ] && [ -f $mysqlbuild/bin/mysqld-debug ] ; then - ln $mysqlbuild/bin/mysqld-debug $mysqlbuild/bin/mysqld - fi - - if [ -z "$defaultsfile" ] ; then - default_arg="" - else - default_arg="--defaults-file=$defaultsfile" - fi - $sudo bash -c "$ldpath $installdb $default_arg --user=mysql --basedir=$PWD/$mysqlbuild --datadir=$PWD/$mysqlbuild/data" - if [ $? -ne 0 ] ; then exit 1; fi -else - # create link - rm /usr/local/mysql - ln -s $mysqldir /usr/local/mysql - if [ $? -ne 0 ] ; then exit 1; fi - rm /usr/local/$mysqlbuild - ln -s $mysqldir /usr/local/$mysqlbuild - if [ $? -ne 0 ] ; then exit 1; fi -fi -popd - -# start mysql -if [ $startup -ne 0 ] ; then - ulimit -a - # increase the open file limit - ulimit -n 10240 - exitcode=$? - echo ulimit -n 10240 exitcode $exitcode - - if [ -x /etc/init.d/mysql ] ; then - $sudo setsid /etc/init.d/mysql start - else - if [ -z "$defaultsfile" ] ; then - default_arg="" - else - default_arg="--defaults-file=$defaultsfile" - fi - $sudo -b bash -c "$ldpath /usr/local/mysql/bin/mysqld_safe $default_arg $mysqld_args" >/dev/null 2>&1 & - fi - sleep $sleeptime - - # add mysql grants - /usr/local/mysql/bin/mysql -u root -e "grant all on *.* to tokubuild@localhost" - /usr/local/mysql/bin/mysql -u root -e "grant all on *.* to 'ec2-user'@localhost" -fi - -popd - -exit 0 diff --git a/storage/tokudb/scripts/test.mysql.bash b/storage/tokudb/scripts/test.mysql.bash deleted file mode 100755 index 5c389e05f3f..00000000000 --- a/storage/tokudb/scripts/test.mysql.bash +++ /dev/null @@ -1,51 +0,0 @@ -#!/usr/bin/env bash - -function usage() { - echo "run the mysql tests" - echo "--mysqlbuild=$mysqlbuild --tests=$tests" -} - -function expand() { - echo $* | tr ,: " " -} - -mysqlbuild= -mysqlsocket=/tmp/mysql.sock -gearmandir=/usr/local/gearmand-1.1.6 -gearmandhost=localhost -system=$(uname -s | tr [:upper:] [:lower:]) -arch=$(uname -m | tr [:upper:] [:lower:]) -tests=run.mysql.tests.bash - -while [ $# -gt 0 ] ; do - arg=$1; shift - if [[ $arg =~ --(.*)=(.*) ]] ; then - eval ${BASH_REMATCH[1]}=${BASH_REMATCH[2]} - else - usage; exit 1; - fi -done - -if [ -z $mysqlbuild ] ; then exit 1; fi - -for testname in $(expand $tests) ; do - if [ $testname = "run.mysql.tests.bash" ] ; then - run_mysqld=0 - else - run_mysqld=1 - fi - if [ $run_mysqld = 0 ] ; then - setupextra="--shutdown=1 --install=1 --startup=0" - else - setupextra="--shutdown=1 --install=1 --startup=1" - fi - echo "echo \$(date) $mysqlbuild >>/tmp/$(whoami).$testname.trace 2>&1; \ - \$HOME/bin/setup.mysql.bash --mysqlbuild=$mysqlbuild $setupextra >>/tmp/$(whoami).$testname.trace 2>&1; \ - testexitcode=\$?; \ - echo \$(date) $mysqlbuild \$testexitcode >>/tmp/$(whoami).$testname.trace 2>&1; \ - if [ \$testexitcode -ne 0 ] ; then exit 1; fi; \ - \$HOME/bin/$testname --mysqlbuild=$mysqlbuild --commit=1 >>/tmp/$(whoami).$testname.trace 2>&1; \ - if [ $run_mysqld != 0 ] ; then mysqladmin -S$mysqlsocket shutdown; fi" | $gearmandir/bin/gearman -b -f mysql-test-$system-$arch -h $gearmandhost -p 4730 -done - -exit 0 diff --git a/storage/tokudb/scripts/testbuildfromsrc.bash b/storage/tokudb/scripts/testbuildfromsrc.bash deleted file mode 100644 index 136841ea284..00000000000 --- a/storage/tokudb/scripts/testbuildfromsrc.bash +++ /dev/null @@ -1,32 +0,0 @@ -#!/usr/bin/env bash - -# for all source tarballs and their coresponding md5 files, build a binary release tarball - -system=$(uname -s|tr [:upper:] [:lower:]) -arch=$(uname -m) - -function expand() { - echo $* | tr ,: " " -} - -for f in *.md5; do - if [[ $f =~ (.*).tar.gz.md5 ]] ; then - mysqlsrc=${BASH_REMATCH[1]} - else - exit 1 - fi - if [ -d $mysqlsrc ] ; then continue; fi - md5sum --check $mysqlsrc.tar.gz.md5 - if [ $? != 0 ] ; then exit 1; fi - tar xzf $mysqlsrc.tar.gz - if [ $? != 0 ] ; then exit 1; fi - mkdir $mysqlsrc/build.RelWithDebInfo - pushd $mysqlsrc/build.RelWithDebInfo - if [ $? != 0 ] ; then exit 1; fi - cmake -D BUILD_CONFIG=mysql_release -D CMAKE_BUILD_TYPE=RelWithDebInfo -D BUILD_TESTING=OFF .. - if [ $? != 0 ] ; then exit 1; fi - make -j4 package - if [ $? != 0 ] ; then exit 1; fi - if [ ! -f $mysqlsrc-$system-$arch.tar.gz ] ; then exit 1; fi - popd -done diff --git a/storage/tokudb/scripts/testsandbox.bash b/storage/tokudb/scripts/testsandbox.bash deleted file mode 100644 index c348db6fd4a..00000000000 --- a/storage/tokudb/scripts/testsandbox.bash +++ /dev/null @@ -1,32 +0,0 @@ -#!/usr/bin/env bash - -# for all tokudb binary tarballs, verify that we can create and run the tarball using the MySQL sandbox. - -function expand() { - echo $* | tr ,: " " -} - -let n=0 -for f in *.md5; do - if [[ $f =~ (.*).tar.gz.md5 ]] ; then - mysqlbuild=${BASH_REMATCH[1]} - else - exit 1 - fi - - md5sum --check $f - if [ $? != 0 ] ; then exit 1; fi - make_sandbox --add_prefix=test$n- $mysqlbuild.tar.gz -- --sandbox_directory=test$n - if [ $? != 0 ] ; then exit 1; fi - pushd $HOME/sandboxes - if [ $? = 0 ] ; then - ./use_all 'show engines' - ./use_all 'create table test.t (a int primary key, b bigint, c varchar(256), d blob(500000), clustering key(b))' - ./use_all 'show create table test.t' - ./stop_all - popd - fi - let n=n+1 -done - - diff --git a/storage/tokudb/scripts/tokustat.py b/storage/tokudb/scripts/tokustat.py index 8e9233e5943..3ecac68e769 100755 --- a/storage/tokudb/scripts/tokustat.py +++ b/storage/tokudb/scripts/tokustat.py @@ -9,10 +9,10 @@ def usage(): print "diff the tokudb engine status" print "--host=HOSTNAME (default: localhost)" print "--port=PORT" - print "--sleeptime=SLEEPTIME (default: 10 seconds)" + print "--iterations=MAX_ITERATIONS (default: forever)" + print "--interval=TIME_BETWEEN_SAMPLES (default: 10 seconds)" print "--q='show engine tokudb status'" print "--q='select * from information_schema.global_status'" - return 1 def convert(v): @@ -23,14 +23,11 @@ def convert(v): v = float(v) return v -def printit(stats, rs, sleeptime): - # print rs +def printit(stats, rs, interval): for t in rs: l = len(t) # grab the last 2 fields in t k = t[l-2] v = t[l-1] - # print k, v # debug - # try to convert v try: v = convert(v) except: @@ -41,11 +38,11 @@ def printit(stats, rs, sleeptime): print k, "|", oldv, "|", v, try: d = v - oldv - if sleeptime != 1: - if d >= sleeptime: - e = d / sleeptime + if interval != 1: + if d >= interval: + e = d / interval else: - e = float(d) / sleeptime + e = float(d) / interval print "|", d, "|", e else: print "|", d @@ -59,7 +56,9 @@ def main(): port = None user = None passwd = None - sleeptime = 10 + interval = 10 + iterations = 0 + q = 'show engine tokudb status' for a in sys.argv[1:]: @@ -71,6 +70,9 @@ def main(): continue return usage() + iterations = int(iterations) + interval = int(interval) + connect_parameters = {} if host is not None: if host[0] == '/': @@ -93,7 +95,9 @@ def main(): print "connected" stats = {} - while 1: + i = 0 + while iterations == 0 or i <= iterations: + i += 1 try: c = db.cursor() n = c.execute(q) @@ -105,8 +109,8 @@ def main(): return 2 try: - printit(stats, rs, int(sleeptime)) - time.sleep(int(sleeptime)) + printit(stats, rs, interval) + time.sleep(interval) except: print "printit", sys.exc_info() return 3 diff --git a/storage/tokudb/scripts/tpch.readme b/storage/tokudb/scripts/tpch.readme deleted file mode 100644 index b5128045ade..00000000000 --- a/storage/tokudb/scripts/tpch.readme +++ /dev/null @@ -1,34 +0,0 @@ -TPCH is an industry standard data warehouse benchmark. We use TPCH databases to test the TokuDB loader. - -The run.tpch.bash script loads a TPCH database at a given scale factor into TokuDB. It then uses the SQL -CHECK TABLE statement to verify the fractal tree invariants. Finally, it dumps the database and compares -with a precomputed dump of the database from InnoDB. - -Here are some TPCH databases dumped from InnoDB. These dumps are used to compare with TPCH data loaded -into TokuDB. - -$ s3ls tokutek-mysql-data -l --prefix=tpch -2010-08-16T21:21:10.000Z 1073741824 tpch10G_data_dump.tar.0 -2010-08-16T21:38:45.000Z 1073741824 tpch10G_data_dump.tar.1 -2010-08-16T21:56:43.000Z 1073741824 tpch10G_data_dump.tar.2 -2010-08-16T22:14:49.000Z 1073741824 tpch10G_data_dump.tar.3 -2010-08-16T22:32:38.000Z 1073741824 tpch10G_data_dump.tar.4 -2010-08-16T22:51:04.000Z 1073741824 tpch10G_data_dump.tar.5 -2010-08-16T23:08:51.000Z 91262976 tpch10G_data_dump.tar.6 -2010-08-16T23:10:21.000Z 654 tpch10G_data_dump.tar.xml -2010-08-12T17:45:09.000Z 633579520 tpch1G_data_dump.tar -2010-08-12T17:56:30.000Z 160 tpch1G_data_dump.tar.xml -2010-08-06T13:57:51.000Z 633610240 tpch1G_data_dump_innodb.tar -2010-08-06T14:07:09.000Z 174 tpch1G_data_dump_innodb.tar.xml -2010-11-28T12:20:58.000Z 886 tpch30G_data_dump.tar.xml -2010-09-14T19:16:30.000Z 1073741824 tpch30G_dump_data.tar.0 -2010-09-14T19:40:02.000Z 1073741824 tpch30G_dump_data.tar.1 -2010-09-14T20:12:22.000Z 1073741824 tpch30G_dump_data.tar.2 -2010-09-14T20:45:23.000Z 1073741824 tpch30G_dump_data.tar.3 -2010-09-14T21:14:07.000Z 1073741824 tpch30G_dump_data.tar.4 -2010-09-14T21:37:54.000Z 1073741824 tpch30G_dump_data.tar.5 -2010-09-14T21:57:02.000Z 1073741824 tpch30G_dump_data.tar.6 -2010-09-14T22:16:59.000Z 1073741824 tpch30G_dump_data.tar.7 -2010-09-14T22:36:22.000Z 1073741824 tpch30G_dump_data.tar.8 -2010-09-14T22:55:25.000Z 382511104 tpch30G_dump_data.tar.9 -2010-09-14T23:02:04.000Z 886 tpch30G_dump_data.tar.xml diff --git a/storage/xtradb/CMakeLists.txt b/storage/xtradb/CMakeLists.txt index e34add61886..093f8f64e20 100644 --- a/storage/xtradb/CMakeLists.txt +++ b/storage/xtradb/CMakeLists.txt @@ -88,7 +88,6 @@ IF(NOT CMAKE_CROSSCOMPILING) long x; long y; long res; - char c; x = 10; y = 123; @@ -109,6 +108,16 @@ IF(NOT CMAKE_CROSSCOMPILING) if (res != 123 + 10 || x != 123 + 10) { return(1); } + return(0); + }" + HAVE_IB_GCC_ATOMIC_BUILTINS + ) + CHECK_C_SOURCE_RUNS( + " + int main() + { + long res; + char c; c = 10; res = __sync_lock_test_and_set(&c, 123); @@ -117,7 +126,7 @@ IF(NOT CMAKE_CROSSCOMPILING) } return(0); }" - HAVE_IB_GCC_ATOMIC_BUILTINS + HAVE_IB_GCC_ATOMIC_BUILTINS_BYTE ) CHECK_C_SOURCE_RUNS( "#include<stdint.h> @@ -139,6 +148,25 @@ IF(NOT CMAKE_CROSSCOMPILING) }" HAVE_IB_GCC_ATOMIC_BUILTINS_64 ) + CHECK_C_SOURCE_RUNS( + "#include<stdint.h> + int main() + { + __sync_synchronize(); + return(0); + }" + HAVE_IB_GCC_SYNC_SYNCHRONISE + ) + CHECK_C_SOURCE_RUNS( + "#include<stdint.h> + int main() + { + __atomic_thread_fence(__ATOMIC_ACQUIRE); + __atomic_thread_fence(__ATOMIC_RELEASE); + return(0); + }" + HAVE_IB_GCC_ATOMIC_THREAD_FENCE + ) ENDIF() IF(HAVE_IB_GCC_ATOMIC_BUILTINS) @@ -146,10 +174,22 @@ IF(HAVE_IB_GCC_ATOMIC_BUILTINS) SET(XTRADB_OK 1) ENDIF() +IF(HAVE_IB_GCC_ATOMIC_BUILTINS_BYTE) + ADD_DEFINITIONS(-DHAVE_IB_GCC_ATOMIC_BUILTINS_BYTE=1) +ENDIF() + IF(HAVE_IB_GCC_ATOMIC_BUILTINS_64) ADD_DEFINITIONS(-DHAVE_IB_GCC_ATOMIC_BUILTINS_64=1) ENDIF() +IF(HAVE_IB_GCC_SYNC_SYNCHRONISE) + ADD_DEFINITIONS(-DHAVE_IB_GCC_SYNC_SYNCHRONISE=1) +ENDIF() + +IF(HAVE_IB_GCC_ATOMIC_THREAD_FENCE) + ADD_DEFINITIONS(-DHAVE_IB_GCC_ATOMIC_THREAD_FENCE=1) +ENDIF() + # either define HAVE_IB_ATOMIC_PTHREAD_T_GCC or not IF(NOT CMAKE_CROSSCOMPILING) CHECK_C_SOURCE_RUNS( @@ -232,10 +272,21 @@ IF(CMAKE_SYSTEM_NAME STREQUAL "SunOS") return(0); } " HAVE_IB_ATOMIC_PTHREAD_T_SOLARIS) + CHECK_C_SOURCE_COMPILES( + "#include <mbarrier.h> + int main() { + __machine_r_barrier(); + __machine_w_barrier(); + return(0); + }" + HAVE_IB_MACHINE_BARRIER_SOLARIS) ENDIF() IF(HAVE_IB_ATOMIC_PTHREAD_T_SOLARIS) ADD_DEFINITIONS(-DHAVE_IB_ATOMIC_PTHREAD_T_SOLARIS=1) ENDIF() + IF(HAVE_IB_MACHINE_BARRIER_SOLARIS) + ADD_DEFINITIONS(-DHAVE_IB_MACHINE_BARRIER_SOLARIS=1) + ENDIF() ENDIF() @@ -253,14 +304,8 @@ ENDIF() IF(MSVC) ADD_DEFINITIONS(-DHAVE_WINDOWS_ATOMICS) + ADD_DEFINITIONS(-DHAVE_WINDOWS_MM_FENCE) SET(XTRADB_OK 1) - - # Avoid "unreferenced label" warning in generated file - GET_FILENAME_COMPONENT(_SRC_DIR ${CMAKE_CURRENT_LIST_FILE} PATH) - SET_SOURCE_FILES_PROPERTIES(${_SRC_DIR}/pars/pars0grm.c - PROPERTIES COMPILE_FLAGS "/wd4102") - SET_SOURCE_FILES_PROPERTIES(${_SRC_DIR}/pars/lexyy.c - PROPERTIES COMPILE_FLAGS "/wd4003") ENDIF() diff --git a/storage/xtradb/api/api0api.cc b/storage/xtradb/api/api0api.cc index bb65dd82216..a060cbc7270 100644 --- a/storage/xtradb/api/api0api.cc +++ b/storage/xtradb/api/api0api.cc @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 2008, 2013, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2008, 2014, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -2044,6 +2044,8 @@ ib_cursor_delete_row( const rec_t* rec; ib_bool_t page_format; mtr_t mtr; + rec_t* copy = NULL; + byte ptr[UNIV_PAGE_SIZE_MAX]; page_format = static_cast<ib_bool_t>( dict_table_is_comp(index->table)); @@ -2052,16 +2054,27 @@ ib_cursor_delete_row( if (btr_pcur_restore_position( BTR_SEARCH_LEAF, pcur, &mtr)) { + mem_heap_t* heap = NULL; + ulint offsets_[REC_OFFS_NORMAL_SIZE]; + ulint* offsets = offsets_; + + rec_offs_init(offsets_); rec = btr_pcur_get_rec(pcur); - } else { - rec = NULL; + + /* Since mtr will be commited, the rec + will not be protected. Make a copy of + the rec. */ + offsets = rec_get_offsets( + rec, index, offsets, ULINT_UNDEFINED, &heap); + ut_ad(rec_offs_size(offsets) < UNIV_PAGE_SIZE_MAX); + copy = rec_copy(ptr, rec, offsets); } mtr_commit(&mtr); - if (rec && !rec_get_deleted_flag(rec, page_format)) { - err = ib_delete_row(cursor, pcur, rec); + if (copy && !rec_get_deleted_flag(copy, page_format)) { + err = ib_delete_row(cursor, pcur, copy); } else { err = DB_RECORD_NOT_FOUND; } diff --git a/storage/xtradb/btr/btr0cur.cc b/storage/xtradb/btr/btr0cur.cc index 5e0473daa85..5d8b5c04d68 100644 --- a/storage/xtradb/btr/btr0cur.cc +++ b/storage/xtradb/btr/btr0cur.cc @@ -202,15 +202,6 @@ btr_rec_free_externally_stored_fields( mtr_t* mtr); /*!< in: mini-transaction handle which contains an X-latch to record page and to the index tree */ -/***********************************************************//** -Gets the externally stored size of a record, in units of a database page. -@return externally stored part, in units of a database page */ -static -ulint -btr_rec_get_externally_stored_len( -/*==============================*/ - const rec_t* rec, /*!< in: record */ - const ulint* offsets);/*!< in: array returned by rec_get_offsets() */ #endif /* !UNIV_HOTBACKUP */ /******************************************************//** @@ -2743,6 +2734,31 @@ make_external: goto return_after_reservations; } + if (big_rec_vec) { + const ulint redo_10p = srv_log_file_size * UNIV_PAGE_SIZE / 10; + ulint total_blob_len = 0; + + /* Calculate the total number of bytes for blob data */ + for (ulint i = 0; i < big_rec_vec->n_fields; i++) { + total_blob_len += big_rec_vec->fields[i].len; + } + + if (total_blob_len > redo_10p) { + ib_logf(IB_LOG_LEVEL_ERROR, "The total blob data" + " length (" ULINTPF ") is greater than" + " 10%% of the redo log file size (" UINT64PF + "). Please increase innodb_log_file_size.", + total_blob_len, srv_log_file_size); + if (n_reserved > 0) { + fil_space_release_free_extents( + index->space, n_reserved); + } + + err = DB_TOO_BIG_RECORD; + goto err_exit; + } + } + /* Store state of explicit locks on rec on the page infimum record, before deleting rec. The page infimum acts as a dummy carrier of the locks, taking care also of lock releases, before we can move the locks @@ -4238,15 +4254,15 @@ btr_rec_get_field_ref_offs( #define btr_rec_get_field_ref(rec, offsets, n) \ ((rec) + btr_rec_get_field_ref_offs(offsets, n)) -/***********************************************************//** -Gets the externally stored size of a record, in units of a database page. +/** Gets the externally stored size of a record, in units of a database page. +@param[in] rec record +@param[in] offsets array returned by rec_get_offsets() @return externally stored part, in units of a database page */ -static + ulint btr_rec_get_externally_stored_len( -/*==============================*/ - const rec_t* rec, /*!< in: record */ - const ulint* offsets)/*!< in: array returned by rec_get_offsets() */ + const rec_t* rec, + const ulint* offsets) { ulint n_fields; ulint total_extern_len = 0; @@ -4593,6 +4609,7 @@ btr_store_big_rec_extern_fields( buf_block_t** freed_pages = NULL; ulint n_freed_pages = 0; dberr_t error = DB_SUCCESS; + ulint total_blob_len = 0; ut_ad(rec_offs_validate(rec, index, offsets)); ut_ad(rec_offs_any_extern(offsets)); @@ -4612,6 +4629,23 @@ btr_store_big_rec_extern_fields( rec_page_no = buf_block_get_page_no(rec_block); ut_a(fil_page_get_type(page_align(rec)) == FIL_PAGE_INDEX); + const ulint redo_10p = (srv_log_file_size * UNIV_PAGE_SIZE / 10); + + /* Calculate the total number of bytes for blob data */ + for (ulint i = 0; i < big_rec_vec->n_fields; i++) { + total_blob_len += big_rec_vec->fields[i].len; + } + + if (total_blob_len > redo_10p) { + ut_ad(op == BTR_STORE_INSERT); + ib_logf(IB_LOG_LEVEL_ERROR, "The total blob data length" + " (" ULINTPF ") is greater than 10%% of the" + " redo log file size (" UINT64PF "). Please" + " increase innodb_log_file_size.", + total_blob_len, srv_log_file_size); + return(DB_TOO_BIG_RECORD); + } + if (page_zip) { int err; diff --git a/storage/xtradb/btr/btr0sea.cc b/storage/xtradb/btr/btr0sea.cc index eec3c0b29aa..ac5e9aec67b 100644 --- a/storage/xtradb/btr/btr0sea.cc +++ b/storage/xtradb/btr/btr0sea.cc @@ -1944,7 +1944,10 @@ btr_search_validate_one_table( buf_pool_t* buf_pool; index_id_t page_index_id; - buf_pool = buf_pool_from_bpage((buf_page_t*) block); + buf_pool = buf_pool_from_bpage((buf_page_t *) block); + /* Prevent BUF_BLOCK_FILE_PAGE -> BUF_BLOCK_REMOVE_HASH + transition until we lock the block mutex */ + mutex_enter(&buf_pool->LRU_list_mutex); if (UNIV_LIKELY(buf_block_get_state(block) == BUF_BLOCK_FILE_PAGE)) { @@ -1980,6 +1983,7 @@ btr_search_validate_one_table( } mutex_enter(&block->mutex); + mutex_exit(&buf_pool->LRU_list_mutex); ut_a(!dict_index_is_ibuf(block->index)); diff --git a/storage/xtradb/buf/buf0buf.cc b/storage/xtradb/buf/buf0buf.cc index 359b15f4a6b..b27178fa8c8 100644 --- a/storage/xtradb/buf/buf0buf.cc +++ b/storage/xtradb/buf/buf0buf.cc @@ -380,6 +380,48 @@ buf_pool_get_oldest_modification(void) } /********************************************************************//** +Gets the smallest oldest_modification lsn for any page in the pool. Returns +zero if all modified pages have been flushed to disk. +@return oldest modification in pool, zero if none */ +UNIV_INTERN +lsn_t +buf_pool_get_oldest_modification_peek(void) +/*=======================================*/ +{ + ulint i; + buf_page_t* bpage; + lsn_t lsn = 0; + lsn_t oldest_lsn = 0; + + /* Dirsty read to buffer pool array */ + for (i = 0; i < srv_buf_pool_instances; i++) { + buf_pool_t* buf_pool; + + buf_pool = buf_pool_from_array(i); + + buf_flush_list_mutex_enter(buf_pool); + + bpage = UT_LIST_GET_LAST(buf_pool->flush_list); + + if (bpage != NULL) { + ut_ad(bpage->in_flush_list); + lsn = bpage->oldest_modification; + } + + buf_flush_list_mutex_exit(buf_pool); + + if (!oldest_lsn || oldest_lsn > lsn) { + oldest_lsn = lsn; + } + } + + /* The returned answer may be out of date: the flush_list can + change after the mutex has been released. */ + + return(oldest_lsn); +} + +/********************************************************************//** Get total buffer pool statistics. */ UNIV_INTERN void @@ -2996,12 +3038,6 @@ got_block: ut_ad(buf_block_get_state(fix_block) == BUF_BLOCK_FILE_PAGE); -#if UNIV_WORD_SIZE == 4 - /* On 32-bit systems, there is no padding in buf_page_t. On - other systems, Valgrind could complain about uninitialized pad - bytes. */ - UNIV_MEM_ASSERT_RW(&fix_block->page, sizeof(fix_block->page)); -#endif #if defined UNIV_DEBUG || defined UNIV_IBUF_DEBUG if ((mode == BUF_GET_IF_IN_POOL || mode == BUF_GET_IF_IN_POOL_OR_WATCH) @@ -5635,7 +5671,7 @@ buf_get_free_list_len(void) #else /* !UNIV_HOTBACKUP */ /********************************************************************//** -Inits a page to the buffer buf_pool, for use in ibbackup --restore. */ +Inits a page to the buffer buf_pool, for use in mysqlbackup --restore. */ UNIV_INTERN void buf_page_init_for_backup_restore( diff --git a/storage/xtradb/buf/buf0flu.cc b/storage/xtradb/buf/buf0flu.cc index 57db9fd367c..ac614823bc1 100644 --- a/storage/xtradb/buf/buf0flu.cc +++ b/storage/xtradb/buf/buf0flu.cc @@ -542,7 +542,7 @@ buf_flush_ready_for_flush( ut_ad(flush_type < BUF_FLUSH_N_TYPES); ut_ad(mutex_own(buf_page_get_mutex(bpage)) || flush_type == BUF_FLUSH_LIST); - ut_a(buf_page_in_file(bpage)); + ut_a(buf_page_in_file(bpage) || buf_page_get_state(bpage) == BUF_BLOCK_REMOVE_HASH); if (bpage->oldest_modification == 0 || buf_page_get_io_fix_unlocked(bpage) != BUF_IO_NONE) { @@ -553,6 +553,7 @@ buf_flush_ready_for_flush( switch (flush_type) { case BUF_FLUSH_LIST: + return(buf_page_get_state(bpage) != BUF_BLOCK_REMOVE_HASH); case BUF_FLUSH_LRU: case BUF_FLUSH_SINGLE_PAGE: return(true); @@ -1377,7 +1378,8 @@ buf_flush_page_and_try_neighbors( } ut_a(buf_page_in_file(bpage) - || buf_page_get_state(bpage) == BUF_BLOCK_REMOVE_HASH); + || (buf_page_get_state(bpage) == BUF_BLOCK_REMOVE_HASH + )); if (buf_flush_ready_for_flush(bpage, flush_type)) { buf_pool_t* buf_pool; @@ -1663,7 +1665,7 @@ buf_do_LRU_batch( { if (buf_LRU_evict_from_unzip_LRU(buf_pool)) { n->unzip_LRU_evicted - += buf_free_from_unzip_LRU_list_batch(buf_pool, max); + = buf_free_from_unzip_LRU_list_batch(buf_pool, max); } else { n->unzip_LRU_evicted = 0; } @@ -1981,6 +1983,7 @@ buf_flush_LRU( if (!buf_flush_start(buf_pool, BUF_FLUSH_LRU)) { n->flushed = 0; n->evicted = 0; + n->unzip_LRU_evicted = 0; return(false); } @@ -2407,6 +2410,10 @@ af_get_pct_for_dirty() { ulint dirty_pct = buf_get_modified_ratio_pct(); + if (dirty_pct > 0 && srv_max_buf_pool_modified_pct == 0) { + return(100); + } + ut_a(srv_max_dirty_pages_pct_lwm <= srv_max_buf_pool_modified_pct); diff --git a/storage/xtradb/buf/buf0lru.cc b/storage/xtradb/buf/buf0lru.cc index d0904f4b8ad..3b0319e4e79 100644 --- a/storage/xtradb/buf/buf0lru.cc +++ b/storage/xtradb/buf/buf0lru.cc @@ -595,6 +595,8 @@ buf_flush_or_remove_pages( buf_page_t* bpage; ulint processed = 0; + ut_ad(mutex_own(&buf_pool->LRU_list_mutex)); + buf_flush_list_mutex_enter(buf_pool); rescan: @@ -1971,13 +1973,6 @@ buf_LRU_free_page( ut_ad(buf_page_in_file(bpage)); ut_ad(bpage->in_LRU_list); -#if UNIV_WORD_SIZE == 4 - /* On 32-bit systems, there is no padding in buf_page_t. On - other systems, Valgrind could complain about uninitialized pad - bytes. */ - UNIV_MEM_ASSERT_RW(bpage, sizeof *bpage); -#endif - if (!buf_page_can_relocate(bpage)) { /* Do not free buffer fixed or I/O-fixed blocks. */ @@ -2010,12 +2005,6 @@ buf_LRU_free_page( ut_ad(buf_page_in_file(bpage)); ut_ad(bpage->in_LRU_list); ut_ad(!bpage->in_flush_list == !bpage->oldest_modification); -#if UNIV_WORD_SIZE == 4 - /* On 32-bit systems, there is no padding in buf_page_t. On - other systems, Valgrind could complain about uninitialized pad - bytes. */ - UNIV_MEM_ASSERT_RW(bpage, sizeof *bpage); -#endif #ifdef UNIV_DEBUG if (buf_debug_prints) { @@ -2121,13 +2110,6 @@ not_freed: ut_ad(prev_b->in_LRU_list); ut_ad(buf_page_in_file(prev_b)); -#if UNIV_WORD_SIZE == 4 - /* On 32-bit systems, there is no - padding in buf_page_t. On other - systems, Valgrind could complain about - uninitialized pad bytes. */ - UNIV_MEM_ASSERT_RW(prev_b, sizeof *prev_b); -#endif UT_LIST_INSERT_AFTER(LRU, buf_pool->LRU, prev_b, b); @@ -2338,13 +2320,6 @@ buf_LRU_block_remove_hashed( ut_a(buf_page_get_io_fix(bpage) == BUF_IO_NONE); ut_a(bpage->buf_fix_count == 0); -#if UNIV_WORD_SIZE == 4 - /* On 32-bit systems, there is no padding in - buf_page_t. On other systems, Valgrind could complain - about uninitialized pad bytes. */ - UNIV_MEM_ASSERT_RW(bpage, sizeof *bpage); -#endif - buf_LRU_remove_block(bpage); buf_pool->freed_page_clock += 1; @@ -2429,6 +2404,25 @@ buf_LRU_block_remove_hashed( " in the hash table\n", (ulong) bpage->space, (ulong) bpage->offset); + +#ifdef UNIV_DEBUG + fprintf(stderr, + "InnoDB: in_page_hash %lu in_zip_hash %lu\n" + " in_free_list %lu in_flush_list %lu in_LRU_list %lu\n" + " zip.data %p zip_size %lu page_state %d\n", + bpage->in_page_hash, bpage->in_zip_hash, + bpage->in_free_list, bpage->in_flush_list, + bpage->in_LRU_list, bpage->zip.data, + buf_page_get_zip_size(bpage), + buf_page_get_state(bpage)); +#else + fprintf(stderr, + "InnoDB: zip.data %p zip_size %lu page_state %d\n", + bpage->zip.data, + buf_page_get_zip_size(bpage), + buf_page_get_state(bpage)); +#endif + if (hashed_bpage) { fprintf(stderr, "InnoDB: In hash table we find block" @@ -2439,6 +2433,9 @@ buf_LRU_block_remove_hashed( (const void*) bpage); } + ut_a(buf_page_get_io_fix(bpage) == BUF_IO_NONE); + ut_a(bpage->buf_fix_count == 0); + #if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG mutex_exit(buf_page_get_mutex(bpage)); rw_lock_x_unlock(hash_lock); diff --git a/storage/xtradb/dict/dict0crea.cc b/storage/xtradb/dict/dict0crea.cc index ff892749d4f..30523ff2af4 100644 --- a/storage/xtradb/dict/dict0crea.cc +++ b/storage/xtradb/dict/dict0crea.cc @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1996, 2013, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1996, 2014, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -1611,26 +1611,25 @@ dict_create_add_foreign_to_dictionary( return(error); } -/********************************************************************//** -Adds foreign key definitions to data dictionary tables in the database. -@return error code or DB_SUCCESS */ +/** Adds the given set of foreign key objects to the dictionary tables +in the database. This function does not modify the dictionary cache. The +caller must ensure that all foreign key objects contain a valid constraint +name in foreign->id. +@param[in] local_fk_set set of foreign key objects, to be added to +the dictionary tables +@param[in] table table to which the foreign key objects in +local_fk_set belong to +@param[in,out] trx transaction +@return error code or DB_SUCCESS */ UNIV_INTERN dberr_t dict_create_add_foreigns_to_dictionary( /*===================================*/ - ulint start_id,/*!< in: if we are actually doing ALTER TABLE - ADD CONSTRAINT, we want to generate constraint - numbers which are bigger than in the table so - far; we number the constraints from - start_id + 1 up; start_id should be set to 0 if - we are creating a new table, or if the table - so far has no constraints for which the name - was generated here */ - dict_table_t* table, /*!< in: table */ - trx_t* trx) /*!< in: transaction */ + const dict_foreign_set& local_fk_set, + const dict_table_t* table, + trx_t* trx) { dict_foreign_t* foreign; - ulint number = start_id + 1; dberr_t error; ut_ad(mutex_own(&(dict_sys->mutex))); @@ -1643,17 +1642,12 @@ dict_create_add_foreigns_to_dictionary( return(DB_ERROR); } - for (foreign = UT_LIST_GET_FIRST(table->foreign_list); - foreign; - foreign = UT_LIST_GET_NEXT(foreign_list, foreign)) { + for (dict_foreign_set::const_iterator it = local_fk_set.begin(); + it != local_fk_set.end(); + ++it) { - error = dict_create_add_foreign_id(&number, table->name, - foreign); - - if (error != DB_SUCCESS) { - - return(error); - } + foreign = *it; + ut_ad(foreign->id != NULL); error = dict_create_add_foreign_to_dictionary(table->name, foreign, trx); diff --git a/storage/xtradb/dict/dict0dict.cc b/storage/xtradb/dict/dict0dict.cc index 931f14af312..515a40dff14 100644 --- a/storage/xtradb/dict/dict0dict.cc +++ b/storage/xtradb/dict/dict0dict.cc @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1996, 2013, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1996, 2014, Oracle and/or its affiliates. All Rights Reserved. Copyright (c) 2012, Facebook Inc. Copyright (c) 2013, SkySQL Ab. All Rights Reserved. @@ -28,6 +28,7 @@ Created 1/8/1996 Heikki Tuuri #include "dict0dict.h" #include "fts0fts.h" #include "fil0fil.h" +#include <algorithm> #ifdef UNIV_NONINL #include "dict0dict.ic" @@ -51,6 +52,7 @@ UNIV_INTERN dict_index_t* dict_ind_compact; #include "btr0btr.h" #include "btr0cur.h" #include "btr0sea.h" +#include "os0once.h" #include "page0zip.h" #include "page0page.h" #include "pars0pars.h" @@ -103,7 +105,7 @@ UNIV_INTERN ulong zip_pad_max = 50; UNIV_INTERN mysql_pfs_key_t dict_operation_lock_key; UNIV_INTERN mysql_pfs_key_t index_tree_rw_lock_key; UNIV_INTERN mysql_pfs_key_t index_online_log_key; -UNIV_INTERN mysql_pfs_key_t dict_table_stats_latch_key; +UNIV_INTERN mysql_pfs_key_t dict_table_stats_key; #endif /* UNIV_PFS_RWLOCK */ #ifdef UNIV_PFS_MUTEX @@ -122,6 +124,11 @@ UNIV_INTERN mysql_pfs_key_t dict_foreign_err_mutex_key; /** Identifies generated InnoDB foreign key names */ static char dict_ibfk[] = "_ibfk_"; +bool innodb_table_stats_not_found = false; +bool innodb_index_stats_not_found = false; +static bool innodb_table_stats_not_found_reported = false; +static bool innodb_index_stats_not_found_reported = false; + /*******************************************************************//** Tries to find column names for the index and sets the col field of the index. @@ -320,6 +327,82 @@ dict_mutex_exit_for_mysql(void) mutex_exit(&(dict_sys->mutex)); } +/** Allocate and init a dict_table_t's stats latch. +This function must not be called concurrently on the same table object. +@param[in,out] table_void table whose stats latch to create */ +static +void +dict_table_stats_latch_alloc( + void* table_void) +{ + dict_table_t* table = static_cast<dict_table_t*>(table_void); + + table->stats_latch = new(std::nothrow) rw_lock_t; + + ut_a(table->stats_latch != NULL); + + rw_lock_create(dict_table_stats_key, table->stats_latch, + SYNC_INDEX_TREE); +} + +/** Deinit and free a dict_table_t's stats latch. +This function must not be called concurrently on the same table object. +@param[in,out] table table whose stats latch to free */ +static +void +dict_table_stats_latch_free( + dict_table_t* table) +{ + rw_lock_free(table->stats_latch); + delete table->stats_latch; +} + +/** Create a dict_table_t's stats latch or delay for lazy creation. +This function is only called from either single threaded environment +or from a thread that has not shared the table object with other threads. +@param[in,out] table table whose stats latch to create +@param[in] enabled if false then the latch is disabled +and dict_table_stats_lock()/unlock() become noop on this table. */ + +void +dict_table_stats_latch_create( + dict_table_t* table, + bool enabled) +{ + if (!enabled) { + table->stats_latch = NULL; + table->stats_latch_created = os_once::DONE; + return; + } + +#ifdef HAVE_ATOMIC_BUILTINS + /* We create this lazily the first time it is used. */ + table->stats_latch = NULL; + table->stats_latch_created = os_once::NEVER_DONE; +#else /* HAVE_ATOMIC_BUILTINS */ + + dict_table_stats_latch_alloc(table); + + table->stats_latch_created = os_once::DONE; +#endif /* HAVE_ATOMIC_BUILTINS */ +} + +/** Destroy a dict_table_t's stats latch. +This function is only called from either single threaded environment +or from a thread that has not shared the table object with other threads. +@param[in,out] table table whose stats latch to destroy */ + +void +dict_table_stats_latch_destroy( + dict_table_t* table) +{ + if (table->stats_latch_created == os_once::DONE + && table->stats_latch != NULL) { + + dict_table_stats_latch_free(table); + } +} + /**********************************************************************//** Lock the appropriate latch to protect a given table's statistics. */ UNIV_INTERN @@ -332,6 +415,14 @@ dict_table_stats_lock( ut_ad(table != NULL); ut_ad(table->magic_n == DICT_TABLE_MAGIC_N); +#ifdef HAVE_ATOMIC_BUILTINS + os_once::do_or_wait_for_done( + &table->stats_latch_created, + dict_table_stats_latch_alloc, table); +#else /* HAVE_ATOMIC_BUILTINS */ + ut_ad(table->stats_latch_created == os_once::DONE); +#endif /* HAVE_ATOMIC_BUILTINS */ + if (table->stats_latch == NULL) { /* This is a dummy table object that is private in the current thread and is not shared between multiple threads, thus we @@ -1163,8 +1254,8 @@ dict_table_can_be_evicted( #endif /* UNIV_SYNC_DEBUG */ ut_a(table->can_be_evicted); - ut_a(UT_LIST_GET_LEN(table->foreign_list) == 0); - ut_a(UT_LIST_GET_LEN(table->referenced_list) == 0); + ut_a(table->foreign_set.empty()); + ut_a(table->referenced_set.empty()); if (table->n_ref_count == 0) { dict_index_t* index; @@ -1380,6 +1471,22 @@ dict_index_find_on_id_low( return(NULL); } +/** Function object to remove a foreign key constraint from the +referenced_set of the referenced table. The foreign key object is +also removed from the dictionary cache. The foreign key constraint +is not removed from the foreign_set of the table containing the +constraint. */ +struct dict_foreign_remove_partial +{ + void operator()(dict_foreign_t* foreign) { + dict_table_t* table = foreign->referenced_table; + if (table != NULL) { + table->referenced_set.erase(foreign); + } + dict_foreign_free(foreign); + } +}; + /**********************************************************************//** Renames a table object. @return TRUE if success */ @@ -1554,27 +1661,25 @@ dict_table_rename_in_cache( system tables through a call of dict_load_foreigns. */ /* Remove the foreign constraints from the cache */ - foreign = UT_LIST_GET_LAST(table->foreign_list); - - while (foreign != NULL) { - dict_foreign_remove_from_cache(foreign); - foreign = UT_LIST_GET_LAST(table->foreign_list); - } + std::for_each(table->foreign_set.begin(), + table->foreign_set.end(), + dict_foreign_remove_partial()); + table->foreign_set.clear(); /* Reset table field in referencing constraints */ + for (dict_foreign_set::iterator it + = table->referenced_set.begin(); + it != table->referenced_set.end(); + ++it) { - foreign = UT_LIST_GET_FIRST(table->referenced_list); - - while (foreign != NULL) { + foreign = *it; foreign->referenced_table = NULL; foreign->referenced_index = NULL; - foreign = UT_LIST_GET_NEXT(referenced_list, foreign); } - /* Make the list of referencing constraints empty */ - - UT_LIST_INIT(table->referenced_list); + /* Make the set of referencing constraints empty */ + table->referenced_set.clear(); return(DB_SUCCESS); } @@ -1583,9 +1688,19 @@ dict_table_rename_in_cache( the constraint id of new format >= 4.0.18 constraints. Note that at this point we have already changed table->name to the new name. */ - foreign = UT_LIST_GET_FIRST(table->foreign_list); + dict_foreign_set fk_set; + + for (;;) { + + dict_foreign_set::iterator it + = table->foreign_set.begin(); + + if (it == table->foreign_set.end()) { + break; + } + + foreign = *it; - while (foreign != NULL) { if (ut_strlen(foreign->foreign_table_name) < ut_strlen(table->name)) { /* Allocate a longer name buffer; @@ -1735,12 +1850,18 @@ dict_table_rename_in_cache( mem_free(old_id); } - foreign = UT_LIST_GET_NEXT(foreign_list, foreign); + table->foreign_set.erase(it); + fk_set.insert(foreign); } - for (foreign = UT_LIST_GET_FIRST(table->referenced_list); - foreign != NULL; - foreign = UT_LIST_GET_NEXT(referenced_list, foreign)) { + ut_a(table->foreign_set.empty()); + table->foreign_set.swap(fk_set); + + for (dict_foreign_set::iterator it = table->referenced_set.begin(); + it != table->referenced_set.end(); + ++it) { + + foreign = *it; if (ut_strlen(foreign->referenced_table_name) < ut_strlen(table->name)) { @@ -1810,27 +1931,17 @@ dict_table_remove_from_cache_low( ut_ad(mutex_own(&(dict_sys->mutex))); ut_ad(table->magic_n == DICT_TABLE_MAGIC_N); -#if 0 - fputs("Removing table ", stderr); - ut_print_name(stderr, table->name, ULINT_UNDEFINED); - fputs(" from dictionary cache\n", stderr); -#endif - /* Remove the foreign constraints from the cache */ - - for (foreign = UT_LIST_GET_LAST(table->foreign_list); - foreign != NULL; - foreign = UT_LIST_GET_LAST(table->foreign_list)) { - - dict_foreign_remove_from_cache(foreign); - } + std::for_each(table->foreign_set.begin(), table->foreign_set.end(), + dict_foreign_remove_partial()); + table->foreign_set.clear(); /* Reset table field in referencing constraints */ + for (dict_foreign_set::iterator it = table->referenced_set.begin(); + it != table->referenced_set.end(); + ++it) { - for (foreign = UT_LIST_GET_FIRST(table->referenced_list); - foreign != NULL; - foreign = UT_LIST_GET_NEXT(referenced_list, foreign)) { - + foreign = *it; foreign->referenced_table = NULL; foreign->referenced_index = NULL; } @@ -3057,7 +3168,7 @@ dict_table_is_referenced_by_foreign_key( /*====================================*/ const dict_table_t* table) /*!< in: InnoDB table */ { - return(UT_LIST_GET_LEN(table->referenced_list) > 0); + return(!table->referenced_set.empty()); } /*********************************************************************//** @@ -3077,9 +3188,11 @@ dict_table_get_referenced_constraint( ut_ad(index != NULL); ut_ad(table != NULL); - for (foreign = UT_LIST_GET_FIRST(table->referenced_list); - foreign; - foreign = UT_LIST_GET_NEXT(referenced_list, foreign)) { + for (dict_foreign_set::iterator it = table->referenced_set.begin(); + it != table->referenced_set.end(); + ++it) { + + foreign = *it; if (foreign->referenced_index == index) { @@ -3108,9 +3221,11 @@ dict_table_get_foreign_constraint( ut_ad(index != NULL); ut_ad(table != NULL); - for (foreign = UT_LIST_GET_FIRST(table->foreign_list); - foreign; - foreign = UT_LIST_GET_NEXT(foreign_list, foreign)) { + for (dict_foreign_set::iterator it = table->foreign_set.begin(); + it != table->foreign_set.end(); + ++it) { + + foreign = *it; if (foreign->foreign_index == index) { @@ -3121,17 +3236,6 @@ dict_table_get_foreign_constraint( return(NULL); } -/*********************************************************************//** -Frees a foreign key struct. */ -UNIV_INTERN -void -dict_foreign_free( -/*==============*/ - dict_foreign_t* foreign) /*!< in, own: foreign key struct */ -{ - mem_heap_free(foreign->heap); -} - /**********************************************************************//** Removes a foreign constraint struct from the dictionary cache. */ UNIV_INTERN @@ -3143,16 +3247,12 @@ dict_foreign_remove_from_cache( ut_ad(mutex_own(&(dict_sys->mutex))); ut_a(foreign); - if (foreign->referenced_table) { - UT_LIST_REMOVE(referenced_list, - foreign->referenced_table->referenced_list, - foreign); + if (foreign->referenced_table != NULL) { + foreign->referenced_table->referenced_set.erase(foreign); } - if (foreign->foreign_table) { - UT_LIST_REMOVE(foreign_list, - foreign->foreign_table->foreign_list, - foreign); + if (foreign->foreign_table != NULL) { + foreign->foreign_table->foreign_set.erase(foreign); } dict_foreign_free(foreign); @@ -3166,33 +3266,21 @@ static dict_foreign_t* dict_foreign_find( /*==============*/ - dict_table_t* table, /*!< in: table object */ - const char* id) /*!< in: foreign constraint id */ + dict_table_t* table, /*!< in: table object */ + dict_foreign_t* foreign) /*!< in: foreign constraint */ { - dict_foreign_t* foreign; - ut_ad(mutex_own(&(dict_sys->mutex))); - foreign = UT_LIST_GET_FIRST(table->foreign_list); - - while (foreign) { - if (ut_strcmp(id, foreign->id) == 0) { - - return(foreign); - } + dict_foreign_set::iterator it = table->foreign_set.find(foreign); - foreign = UT_LIST_GET_NEXT(foreign_list, foreign); + if (it != table->foreign_set.end()) { + return(*it); } - foreign = UT_LIST_GET_FIRST(table->referenced_list); - - while (foreign) { - if (ut_strcmp(id, foreign->id) == 0) { - - return(foreign); - } + it = table->referenced_set.find(foreign); - foreign = UT_LIST_GET_NEXT(referenced_list, foreign); + if (it != table->referenced_set.end()) { + return(*it); } return(NULL); @@ -3352,11 +3440,11 @@ dict_foreign_add_to_cache( ut_a(for_table || ref_table); if (for_table) { - for_in_cache = dict_foreign_find(for_table, foreign->id); + for_in_cache = dict_foreign_find(for_table, foreign); } if (!for_in_cache && ref_table) { - for_in_cache = dict_foreign_find(ref_table, foreign->id); + for_in_cache = dict_foreign_find(ref_table, foreign); } if (for_in_cache) { @@ -3393,9 +3481,12 @@ dict_foreign_add_to_cache( for_in_cache->referenced_table = ref_table; for_in_cache->referenced_index = index; - UT_LIST_ADD_LAST(referenced_list, - ref_table->referenced_list, - for_in_cache); + + std::pair<dict_foreign_set::iterator, bool> ret + = ref_table->referenced_set.insert(for_in_cache); + + ut_a(ret.second); /* second is true if the insertion + took place */ added_to_referenced_list = TRUE; } @@ -3424,10 +3515,13 @@ dict_foreign_add_to_cache( if (for_in_cache == foreign) { if (added_to_referenced_list) { - UT_LIST_REMOVE( - referenced_list, - ref_table->referenced_list, - for_in_cache); + const dict_foreign_set::size_type n + = ref_table->referenced_set + .erase(for_in_cache); + + ut_a(n == 1); /* the number of + elements removed must + be one */ } mem_heap_free(foreign->heap); @@ -3438,9 +3532,11 @@ dict_foreign_add_to_cache( for_in_cache->foreign_table = for_table; for_in_cache->foreign_index = index; - UT_LIST_ADD_LAST(foreign_list, - for_table->foreign_list, - for_in_cache); + std::pair<dict_foreign_set::iterator, bool> ret + = for_table->foreign_set.insert(for_in_cache); + + ut_a(ret.second); /* second is true if the insertion + took place */ } /* We need to move the table to the non-LRU end of the table LRU @@ -4018,9 +4114,12 @@ dict_table_get_highest_foreign_id( ut_a(table); len = ut_strlen(table->name); - foreign = UT_LIST_GET_FIRST(table->foreign_list); - while (foreign) { + for (dict_foreign_set::iterator it = table->foreign_set.begin(); + it != table->foreign_set.end(); + ++it) { + foreign = *it; + if (ut_strlen(foreign->id) > ((sizeof dict_ibfk) - 1) + len && 0 == ut_memcmp(foreign->id, table->name, len) && 0 == ut_memcmp(foreign->id + len, @@ -4039,8 +4138,6 @@ dict_table_get_highest_foreign_id( } } } - - foreign = UT_LIST_GET_NEXT(foreign_list, foreign); } return(biggest_id); @@ -4101,6 +4198,7 @@ dict_create_foreign_constraints_low( dict_table_t* referenced_table; dict_table_t* table_to_alter; ulint highest_id_so_far = 0; + ulint number = 1; dict_index_t* index; dict_foreign_t* foreign; const char* ptr = sql_string; @@ -4119,6 +4217,8 @@ dict_create_foreign_constraints_low( const dict_col_t*columns[500]; const char* column_names[500]; const char* referenced_table_name; + dict_foreign_set local_fk_set; + dict_foreign_set_free local_fk_set_free(local_fk_set); ut_ad(!srv_read_only_mode); ut_ad(mutex_own(&(dict_sys->mutex))); @@ -4183,6 +4283,7 @@ dict_create_foreign_constraints_low( table_to_alter); } + number = highest_id_so_far + 1; /* Scan for foreign key declarations in a loop */ loop: /* Scan either to "CONSTRAINT" or "FOREIGN", whichever is closer */ @@ -4227,7 +4328,7 @@ loop: command, determine if there are any foreign keys, and if so, immediately reject the command if the table is a temporary one. For now, this kludge will work. */ - if (reject_fks && (UT_LIST_GET_LEN(table->foreign_list) > 0)) { + if (reject_fks && !local_fk_set.empty()) { return(DB_CANNOT_ADD_CONSTRAINT); } @@ -4237,7 +4338,17 @@ loop: to the data dictionary system tables on disk */ error = dict_create_add_foreigns_to_dictionary( - highest_id_so_far, table, trx); + local_fk_set, table, trx); + + if (error == DB_SUCCESS) { + + table->foreign_set.insert(local_fk_set.begin(), + local_fk_set.end()); + std::for_each(local_fk_set.begin(), + local_fk_set.end(), + dict_foreign_add_to_referenced_table()); + local_fk_set.clear(); + } return(error); } @@ -4396,6 +4507,24 @@ col_loop1: strcpy(foreign->id + db_len + 1, constraint_name); } + if (foreign->id == NULL) { + error = dict_create_add_foreign_id(&number, + table->name, foreign); + if (error != DB_SUCCESS) { + dict_foreign_free(foreign); + return(error); + } + } + + std::pair<dict_foreign_set::iterator, bool> ret + = local_fk_set.insert(foreign); + + if (!ret.second) { + /* A duplicate foreign key name has been found */ + dict_foreign_free(foreign); + return(DB_CANNOT_ADD_CONSTRAINT); + } + foreign->foreign_table = table; foreign->foreign_table_name = mem_heap_strdup( foreign->heap, table->name); @@ -4421,8 +4550,6 @@ col_loop1: checking of foreign key constraints! */ if (!success || (!referenced_table && trx->check_foreigns)) { - dict_foreign_free(foreign); - mutex_enter(&dict_foreign_err_mutex); dict_foreign_error_report_low(ef, name); fprintf(ef, "%s:\nCannot resolve table name close to:\n" @@ -4436,7 +4563,6 @@ col_loop1: ptr = dict_accept(cs, ptr, "(", &success); if (!success) { - dict_foreign_free(foreign); dict_foreign_report_syntax_err(name, start_of_latest_foreign, ptr); return(DB_CANNOT_ADD_CONSTRAINT); @@ -4451,7 +4577,6 @@ col_loop2: i++; if (!success) { - dict_foreign_free(foreign); mutex_enter(&dict_foreign_err_mutex); dict_foreign_error_report_low(ef, name); @@ -4472,7 +4597,6 @@ col_loop2: ptr = dict_accept(cs, ptr, ")", &success); if (!success || foreign->n_fields != i) { - dict_foreign_free(foreign); dict_foreign_report_syntax_err(name, start_of_latest_foreign, ptr); @@ -4498,7 +4622,6 @@ scan_on_conditions: ptr = dict_accept(cs, ptr, "UPDATE", &success); if (!success) { - dict_foreign_free(foreign); dict_foreign_report_syntax_err( name, start_of_latest_foreign, ptr); @@ -4536,7 +4659,6 @@ scan_on_conditions: ptr = dict_accept(cs, ptr, "ACTION", &success); if (!success) { - dict_foreign_free(foreign); dict_foreign_report_syntax_err( name, start_of_latest_foreign, ptr); @@ -4555,7 +4677,6 @@ scan_on_conditions: ptr = dict_accept(cs, ptr, "SET", &success); if (!success) { - dict_foreign_free(foreign); dict_foreign_report_syntax_err(name, start_of_latest_foreign, ptr); return(DB_CANNOT_ADD_CONSTRAINT); @@ -4564,7 +4685,6 @@ scan_on_conditions: ptr = dict_accept(cs, ptr, "NULL", &success); if (!success) { - dict_foreign_free(foreign); dict_foreign_report_syntax_err(name, start_of_latest_foreign, ptr); return(DB_CANNOT_ADD_CONSTRAINT); @@ -4577,8 +4697,6 @@ scan_on_conditions: /* It is not sensible to define SET NULL if the column is not allowed to be NULL! */ - dict_foreign_free(foreign); - mutex_enter(&dict_foreign_err_mutex); dict_foreign_error_report_low(ef, name); fprintf(ef, "%s:\n" @@ -4604,8 +4722,6 @@ try_find_index: if (n_on_deletes > 1 || n_on_updates > 1) { /* It is an error to define more than 1 action */ - dict_foreign_free(foreign); - mutex_enter(&dict_foreign_err_mutex); dict_foreign_error_report_low(ef, name); fprintf(ef, "%s:\n" @@ -4627,7 +4743,6 @@ try_find_index: foreign->foreign_index, TRUE, FALSE); if (!index) { - dict_foreign_free(foreign); mutex_enter(&dict_foreign_err_mutex); dict_foreign_error_report_low(ef, name); fprintf(ef, "%s:\n" @@ -4671,16 +4786,6 @@ try_find_index: = mem_heap_strdup(foreign->heap, column_names[i]); } - /* We found an ok constraint definition: add to the lists */ - - UT_LIST_ADD_LAST(foreign_list, table->foreign_list, foreign); - - if (referenced_table) { - UT_LIST_ADD_LAST(referenced_list, - referenced_table->referenced_list, - foreign); - } - goto loop; } /************************************************************************** @@ -4766,7 +4871,6 @@ dict_foreign_parse_drop_constraints( const char*** constraints_to_drop) /*!< out: id's of the constraints to drop */ { - dict_foreign_t* foreign; ibool success; char* str; size_t len; @@ -4843,25 +4947,10 @@ loop: (*constraints_to_drop)[*n] = id; (*n)++; - /* Look for the given constraint id */ - - foreign = UT_LIST_GET_FIRST(table->foreign_list); - - while (foreign != NULL) { - if (0 == innobase_strcasecmp(foreign->id, id) - || (strchr(foreign->id, '/') - && 0 == innobase_strcasecmp( - id, - dict_remove_db_name(foreign->id)))) { - /* Found */ - break; - } - - foreign = UT_LIST_GET_NEXT(foreign_list, foreign); - } - - - if (foreign == NULL) { + if (std::find_if(table->foreign_set.begin(), + table->foreign_set.end(), + dict_foreign_matches_id(id)) + == table->foreign_set.end()) { if (!srv_read_only_mode) { FILE* ef = dict_foreign_err_file; @@ -5188,7 +5277,6 @@ dict_table_print( dict_table_t* table) /*!< in: table */ { dict_index_t* index; - dict_foreign_t* foreign; ulint i; ut_ad(mutex_own(&(dict_sys->mutex))); @@ -5225,23 +5313,15 @@ dict_table_print( index = UT_LIST_GET_NEXT(indexes, index); } - table->stat_initialized = FALSE; - dict_table_stats_unlock(table, RW_X_LATCH); - foreign = UT_LIST_GET_FIRST(table->foreign_list); - - while (foreign != NULL) { - dict_foreign_print_low(foreign); - foreign = UT_LIST_GET_NEXT(foreign_list, foreign); - } - - foreign = UT_LIST_GET_FIRST(table->referenced_list); + std::for_each(table->foreign_set.begin(), + table->foreign_set.end(), + dict_foreign_print_low); - while (foreign != NULL) { - dict_foreign_print_low(foreign); - foreign = UT_LIST_GET_NEXT(referenced_list, foreign); - } + std::for_each(table->referenced_set.begin(), + table->referenced_set.end(), + dict_foreign_print_low); } /**********************************************************************//** @@ -5449,15 +5529,12 @@ dict_print_info_on_foreign_keys( mutex_enter(&(dict_sys->mutex)); - foreign = UT_LIST_GET_FIRST(table->foreign_list); - - if (foreign == NULL) { - mutex_exit(&(dict_sys->mutex)); + for (dict_foreign_set::iterator it = table->foreign_set.begin(); + it != table->foreign_set.end(); + ++it) { - return; - } + foreign = *it; - while (foreign != NULL) { if (create_table_format) { dict_print_info_on_foreign_key_in_create_format( file, trx, foreign, TRUE); @@ -5514,8 +5591,6 @@ dict_print_info_on_foreign_keys( fputs(" ON UPDATE NO ACTION", file); } } - - foreign = UT_LIST_GET_NEXT(foreign_list, foreign); } mutex_exit(&(dict_sys->mutex)); @@ -5886,10 +5961,11 @@ dict_foreign_replace_index( ut_ad(index->to_be_dropped); ut_ad(index->table == table); - for (foreign = UT_LIST_GET_FIRST(table->foreign_list); - foreign; - foreign = UT_LIST_GET_NEXT(foreign_list, foreign)) { + for (dict_foreign_set::iterator it = table->foreign_set.begin(); + it != table->foreign_set.end(); + ++it) { + foreign = *it; if (foreign->foreign_index == index) { ut_ad(foreign->foreign_table == index->table); @@ -5909,10 +5985,11 @@ dict_foreign_replace_index( } } - for (foreign = UT_LIST_GET_FIRST(table->referenced_list); - foreign; - foreign = UT_LIST_GET_NEXT(referenced_list, foreign)) { + for (dict_foreign_set::iterator it = table->referenced_set.begin(); + it != table->referenced_set.end(); + ++it) { + foreign = *it; if (foreign->referenced_index == index) { ut_ad(foreign->referenced_table == index->table); @@ -6068,14 +6145,34 @@ dict_table_schema_check( table = dict_table_get_low(req_schema->table_name); if (table == NULL) { + bool should_print=true; /* no such table */ - ut_snprintf(errstr, errstr_sz, - "Table %s not found.", - ut_format_name(req_schema->table_name, - TRUE, buf, sizeof(buf))); + if (innobase_strcasecmp(req_schema->table_name, "mysql/innodb_table_stats") == 0) { + if (innodb_table_stats_not_found_reported == false) { + innodb_table_stats_not_found = true; + innodb_table_stats_not_found_reported = true; + } else { + should_print = false; + } + } else if (innobase_strcasecmp(req_schema->table_name, "mysql/innodb_index_stats") == 0 ) { + if (innodb_index_stats_not_found_reported == false) { + innodb_index_stats_not_found = true; + innodb_index_stats_not_found_reported = true; + } else { + should_print = false; + } + } - return(DB_TABLE_NOT_FOUND); + if (should_print) { + ut_snprintf(errstr, errstr_sz, + "Table %s not found.", + ut_format_name(req_schema->table_name, + TRUE, buf, sizeof(buf))); + return(DB_TABLE_NOT_FOUND); + } else { + return(DB_STATS_DO_NOT_EXIST); + } } if (table->ibd_file_missing) { @@ -6204,24 +6301,24 @@ dict_table_schema_check( } } - if (req_schema->n_foreign != UT_LIST_GET_LEN(table->foreign_list)) { + if (req_schema->n_foreign != table->foreign_set.size()) { ut_snprintf( errstr, errstr_sz, - "Table %s has %lu foreign key(s) pointing to other " - "tables, but it must have %lu.", + "Table %s has " ULINTPF " foreign key(s) pointing" + " to other tables, but it must have %lu.", ut_format_name(req_schema->table_name, TRUE, buf, sizeof(buf)), - UT_LIST_GET_LEN(table->foreign_list), + static_cast<ulint>(table->foreign_set.size()), req_schema->n_foreign); return(DB_ERROR); } - if (req_schema->n_referenced != UT_LIST_GET_LEN(table->referenced_list)) { + if (req_schema->n_referenced != table->referenced_set.size()) { ut_snprintf( errstr, errstr_sz, - "There are %lu foreign key(s) pointing to %s, " + "There are " ULINTPF " foreign key(s) pointing to %s, " "but there must be %lu.", - UT_LIST_GET_LEN(table->referenced_list), + static_cast<ulint>(table->referenced_set.size()), ut_format_name(req_schema->table_name, TRUE, buf, sizeof(buf)), req_schema->n_referenced); diff --git a/storage/xtradb/dict/dict0mem.cc b/storage/xtradb/dict/dict0mem.cc index 7ce42fa8efc..5e0ffab4bf7 100644 --- a/storage/xtradb/dict/dict0mem.cc +++ b/storage/xtradb/dict/dict0mem.cc @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1996, 2013, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1996, 2014, Oracle and/or its affiliates. All Rights Reserved. Copyright (c) 2012, Facebook Inc. This program is free software; you can redistribute it and/or modify it under @@ -98,13 +98,9 @@ dict_mem_table_create( ut_d(table->magic_n = DICT_TABLE_MAGIC_N); - if (!nonshared) { - table->stats_latch = new rw_lock_t; - rw_lock_create(dict_table_stats_latch_key, table->stats_latch, - SYNC_INDEX_TREE); - } else { - table->stats_latch = NULL; - } + /* true means that the stats latch will be enabled - + dict_table_stats_lock() will not be noop. */ + dict_table_stats_latch_create(table, true); #ifndef UNIV_HOTBACKUP @@ -141,6 +137,9 @@ dict_mem_table_create( #endif /* !UNIV_HOTBACKUP */ + new(&table->foreign_set) dict_foreign_set(); + new(&table->referenced_set) dict_foreign_set(); + return(table); } @@ -168,17 +167,16 @@ dict_mem_table_free( } } #ifndef UNIV_HOTBACKUP - if (table->stats_latch) { + if (table->autoinc_lock) { mutex_free(&(table->autoinc_mutex)); } #endif /* UNIV_HOTBACKUP */ - if (table->stats_latch) { + dict_table_stats_latch_destroy(table); - rw_lock_free(table->stats_latch); - delete table->stats_latch; - } + table->foreign_set.~dict_foreign_set(); + table->referenced_set.~dict_foreign_set(); ut_free(table->name); mem_heap_free(table->heap); @@ -350,10 +348,15 @@ dict_mem_table_col_rename_low( table->col_names = col_names; } + dict_foreign_t* foreign; + /* Replace the field names in every foreign key constraint. */ - for (dict_foreign_t* foreign = UT_LIST_GET_FIRST(table->foreign_list); - foreign != NULL; - foreign = UT_LIST_GET_NEXT(foreign_list, foreign)) { + for (dict_foreign_set::iterator it = table->foreign_set.begin(); + it != table->foreign_set.end(); + ++it) { + + foreign = *it; + for (unsigned f = 0; f < foreign->n_fields; f++) { /* These can point straight to table->col_names, because the foreign key @@ -365,10 +368,12 @@ dict_mem_table_col_rename_low( } } - for (dict_foreign_t* foreign = UT_LIST_GET_FIRST( - table->referenced_list); - foreign != NULL; - foreign = UT_LIST_GET_NEXT(referenced_list, foreign)) { + for (dict_foreign_set::iterator it = table->referenced_set.begin(); + it != table->referenced_set.end(); + ++it) { + + foreign = *it; + for (unsigned f = 0; f < foreign->n_fields; f++) { /* foreign->referenced_col_names[] need to be copies, because the constraint may become diff --git a/storage/xtradb/dict/dict0stats.cc b/storage/xtradb/dict/dict0stats.cc index bec0079942b..e0a2880e214 100644 --- a/storage/xtradb/dict/dict0stats.cc +++ b/storage/xtradb/dict/dict0stats.cc @@ -46,6 +46,7 @@ Created Jan 06, 2010 Vasil Dimov #include "ut0rnd.h" /* ut_rnd_interval() */ #include "ut0ut.h" /* ut_format_name(), ut_time() */ +#include <algorithm> #include <map> #include <vector> @@ -127,10 +128,11 @@ where n=1..n_uniq. #endif /* UNIV_STATS_DEBUG */ /* Gets the number of leaf pages to sample in persistent stats estimation */ -#define N_SAMPLE_PAGES(index) \ - ((index)->table->stats_sample_pages != 0 ? \ - (index)->table->stats_sample_pages : \ - srv_stats_persistent_sample_pages) +#define N_SAMPLE_PAGES(index) \ + static_cast<ib_uint64_t>( \ + (index)->table->stats_sample_pages != 0 \ + ? (index)->table->stats_sample_pages \ + : srv_stats_persistent_sample_pages) /* number of distinct records on a given level that are required to stop descending to lower levels and fetch N_SAMPLE_PAGES(index) records @@ -268,10 +270,12 @@ dict_stats_persistent_storage_check( mutex_exit(&(dict_sys->mutex)); } - if (ret != DB_SUCCESS) { + if (ret != DB_SUCCESS && ret != DB_STATS_DO_NOT_EXIST) { ut_print_timestamp(stderr); fprintf(stderr, " InnoDB: Error: %s\n", errstr); return(false); + } else if (ret == DB_STATS_DO_NOT_EXIST) { + return false; } /* else */ @@ -430,9 +434,9 @@ dict_stats_table_clone_create( t->corrupted = table->corrupted; /* This private object "t" is not shared with other threads, so - we do not need the stats_latch. The lock/unlock routines will do - nothing if stats_latch is NULL. */ - t->stats_latch = NULL; + we do not need the stats_latch (thus we pass false below). The + dict_table_stats_lock()/unlock() routines will do nothing. */ + dict_table_stats_latch_create(t, false); UT_LIST_INIT(t->indexes); @@ -511,6 +515,7 @@ dict_stats_table_clone_free( /*========================*/ dict_table_t* t) /*!< in: dummy table object to free */ { + dict_table_stats_latch_destroy(t); mem_heap_free(t->heap); } @@ -1330,35 +1335,40 @@ enum page_scan_method_t { }; /* @} */ -/*********************************************************************//** -Scan a page, reading records from left to right and counting the number -of distinct records on that page (looking only at the first n_prefix -columns). If scan_method is QUIT_ON_FIRST_NON_BORING then the function +/** Scan a page, reading records from left to right and counting the number +of distinct records (looking only at the first n_prefix +columns) and the number of external pages pointed by records from this page. +If scan_method is QUIT_ON_FIRST_NON_BORING then the function will return as soon as it finds a record that does not match its neighbor to the right, which means that in the case of QUIT_ON_FIRST_NON_BORING the returned n_diff can either be 0 (empty page), 1 (the whole page has all keys equal) or 2 (the function found a non-boring record and returned). +@param[out] out_rec record, or NULL +@param[out] offsets1 rec_get_offsets() working space (must +be big enough) +@param[out] offsets2 rec_get_offsets() working space (must +be big enough) +@param[in] index index of the page +@param[in] page the page to scan +@param[in] n_prefix look at the first n_prefix columns +@param[in] scan_method scan to the end of the page or not +@param[out] n_diff number of distinct records encountered +@param[out] n_external_pages if this is non-NULL then it will be set +to the number of externally stored pages which were encountered @return offsets1 or offsets2 (the offsets of *out_rec), or NULL if the page is empty and does not contain user records. */ -UNIV_INLINE __attribute__((nonnull)) +UNIV_INLINE ulint* dict_stats_scan_page( -/*=================*/ - const rec_t** out_rec, /*!< out: record, or NULL */ - ulint* offsets1, /*!< out: rec_get_offsets() - working space (must be big - enough) */ - ulint* offsets2, /*!< out: rec_get_offsets() - working space (must be big - enough) */ - dict_index_t* index, /*!< in: index of the page */ - const page_t* page, /*!< in: the page to scan */ - ulint n_prefix, /*!< in: look at the first - n_prefix columns */ - page_scan_method_t scan_method, /*!< in: scan to the end of - the page or not */ - ib_uint64_t* n_diff) /*!< out: number of distinct - records encountered */ + const rec_t** out_rec, + ulint* offsets1, + ulint* offsets2, + dict_index_t* index, + const page_t* page, + ulint n_prefix, + page_scan_method_t scan_method, + ib_uint64_t* n_diff, + ib_uint64_t* n_external_pages) { ulint* offsets_rec = offsets1; ulint* offsets_next_rec = offsets2; @@ -1376,6 +1386,12 @@ dict_stats_scan_page( get_next = page_rec_get_next_const; } + const bool should_count_external_pages = n_external_pages != NULL; + + if (should_count_external_pages) { + *n_external_pages = 0; + } + rec = get_next(page_get_infimum_rec(page)); if (page_rec_is_supremum(rec)) { @@ -1388,6 +1404,11 @@ dict_stats_scan_page( offsets_rec = rec_get_offsets(rec, index, offsets_rec, ULINT_UNDEFINED, &heap); + if (should_count_external_pages) { + *n_external_pages += btr_rec_get_externally_stored_len( + rec, offsets_rec); + } + next_rec = get_next(rec); *n_diff = 1; @@ -1438,6 +1459,11 @@ dict_stats_scan_page( offsets_next_rec = offsets_tmp; } + if (should_count_external_pages) { + *n_external_pages += btr_rec_get_externally_stored_len( + rec, offsets_rec); + } + next_rec = get_next(next_rec); } @@ -1448,19 +1474,25 @@ func_exit: return(offsets_rec); } -/*********************************************************************//** -Dive below the current position of a cursor and calculate the number of +/** Dive below the current position of a cursor and calculate the number of distinct records on the leaf page, when looking at the fist n_prefix -columns. +columns. Also calculate the number of external pages pointed by records +on the leaf page. +@param[in] cur cursor +@param[in] n_prefix look at the first n_prefix columns +when comparing records +@param[out] n_diff number of distinct records +@param[out] n_external_pages number of external pages +@param[in,out] mtr mini-transaction @return number of distinct records on the leaf page */ static -ib_uint64_t +void dict_stats_analyze_index_below_cur( -/*===============================*/ - const btr_cur_t*cur, /*!< in: cursor */ - ulint n_prefix, /*!< in: look at the first n_prefix - columns when comparing records */ - mtr_t* mtr) /*!< in/out: mini-transaction */ + const btr_cur_t* cur, + ulint n_prefix, + ib_uint64_t* n_diff, + ib_uint64_t* n_external_pages, + mtr_t* mtr) { dict_index_t* index; ulint space; @@ -1473,7 +1505,6 @@ dict_stats_analyze_index_below_cur( ulint* offsets1; ulint* offsets2; ulint* offsets_rec; - ib_uint64_t n_diff; /* the result */ ulint size; index = btr_cur_get_index(cur); @@ -1509,6 +1540,10 @@ dict_stats_analyze_index_below_cur( page_no = btr_node_ptr_get_child_page_no(rec, offsets_rec); + /* assume no external pages by default - in case we quit from this + function without analyzing any leaf pages */ + *n_external_pages = 0; + /* descend to the leaf level on the B-tree */ for (;;) { @@ -1527,20 +1562,24 @@ dict_stats_analyze_index_below_cur( /* search for the first non-boring record on the page */ offsets_rec = dict_stats_scan_page( &rec, offsets1, offsets2, index, page, n_prefix, - QUIT_ON_FIRST_NON_BORING, &n_diff); + QUIT_ON_FIRST_NON_BORING, n_diff, NULL); /* pages on level > 0 are not allowed to be empty */ ut_a(offsets_rec != NULL); /* if page is not empty (offsets_rec != NULL) then n_diff must be > 0, otherwise there is a bug in dict_stats_scan_page() */ - ut_a(n_diff > 0); + ut_a(*n_diff > 0); - if (n_diff == 1) { + if (*n_diff == 1) { /* page has all keys equal and the end of the page was reached by dict_stats_scan_page(), no need to descend to the leaf level */ mem_heap_free(heap); - return(1); + /* can't get an estimate for n_external_pages here + because we do not dive to the leaf level, assume no + external pages (*n_external_pages was assigned to 0 + above). */ + return; } /* else */ @@ -1548,7 +1587,7 @@ dict_stats_analyze_index_below_cur( first non-boring record it finds, then the returned n_diff can either be 0 (empty page), 1 (page has all keys equal) or 2 (non-boring record was found) */ - ut_a(n_diff == 2); + ut_a(*n_diff == 2); /* we have a non-boring record in rec, descend below it */ @@ -1559,11 +1598,14 @@ dict_stats_analyze_index_below_cur( ut_ad(btr_page_get_level(page, mtr) == 0); /* scan the leaf page and find the number of distinct keys, - when looking only at the first n_prefix columns */ + when looking only at the first n_prefix columns; also estimate + the number of externally stored pages pointed by records on this + page */ offsets_rec = dict_stats_scan_page( &rec, offsets1, offsets2, index, page, n_prefix, - COUNT_ALL_NON_BORING_AND_SKIP_DEL_MARKED, &n_diff); + COUNT_ALL_NON_BORING_AND_SKIP_DEL_MARKED, n_diff, + n_external_pages); #if 0 DEBUG_PRINTF(" %s(): n_diff below page_no=%lu: " UINT64PF "\n", @@ -1571,133 +1613,146 @@ dict_stats_analyze_index_below_cur( #endif mem_heap_free(heap); - - return(n_diff); } -/*********************************************************************//** -For a given level in an index select N_SAMPLE_PAGES(index) -(or less) records from that level and dive below them to the corresponding -leaf pages, then scan those leaf pages and save the sampling results in -index->stat_n_diff_key_vals[n_prefix - 1] and the number of pages scanned in -index->stat_n_sample_sizes[n_prefix - 1]. */ +/** Input data that is used to calculate dict_index_t::stat_n_diff_key_vals[] +for each n-columns prefix (n from 1 to n_uniq). */ +struct n_diff_data_t { + /** Index of the level on which the descent through the btree + stopped. level 0 is the leaf level. This is >= 1 because we + avoid scanning the leaf level because it may contain too many + pages and doing so is useless when combined with the random dives - + if we are to scan the leaf level, this means a full scan and we can + simply do that instead of fiddling with picking random records higher + in the tree and to dive below them. At the start of the analyzing + we may decide to do full scan of the leaf level, but then this + structure is not used in that code path. */ + ulint level; + + /** Number of records on the level where the descend through the btree + stopped. When we scan the btree from the root, we stop at some mid + level, choose some records from it and dive below them towards a leaf + page to analyze. */ + ib_uint64_t n_recs_on_level; + + /** Number of different key values that were found on the mid level. */ + ib_uint64_t n_diff_on_level; + + /** Number of leaf pages that are analyzed. This is also the same as + the number of records that we pick from the mid level and dive below + them. */ + ib_uint64_t n_leaf_pages_to_analyze; + + /** Cumulative sum of the number of different key values that were + found on all analyzed pages. */ + ib_uint64_t n_diff_all_analyzed_pages; + + /** Cumulative sum of the number of external pages (stored outside of + the btree but in the same file segment). */ + ib_uint64_t n_external_pages_sum; +}; + +/** Estimate the number of different key values in an index when looking at +the first n_prefix columns. For a given level in an index select +n_diff_data->n_leaf_pages_to_analyze records from that level and dive below +them to the corresponding leaf pages, then scan those leaf pages and save the +sampling results in n_diff_data->n_diff_all_analyzed_pages. +@param[in] index index +@param[in] n_prefix look at first 'n_prefix' columns when +comparing records +@param[in] boundaries a vector that contains +n_diff_data->n_diff_on_level integers each of which represents the index (on +level 'level', counting from left/smallest to right/biggest from 0) of the +last record from each group of distinct keys +@param[in,out] n_diff_data n_diff_all_analyzed_pages and +n_external_pages_sum in this structure will be set by this function. The +members level, n_diff_on_level and n_leaf_pages_to_analyze must be set by the +caller in advance - they are used by some calculations inside this function +@param[in,out] mtr mini-transaction */ static void dict_stats_analyze_index_for_n_prefix( -/*==================================*/ - dict_index_t* index, /*!< in/out: index */ - ulint level, /*!< in: level, must be >= 1 */ - ib_uint64_t total_recs_on_level, - /*!< in: total number of - records on the given level */ - ulint n_prefix, /*!< in: look at first - n_prefix columns when - comparing records */ - ib_uint64_t n_diff_for_this_prefix, - /*!< in: number of distinct - records on the given level, - when looking at the first - n_prefix columns */ - boundaries_t* boundaries, /*!< in: array that contains - n_diff_for_this_prefix - integers each of which - represents the index (on the - level, counting from - left/smallest to right/biggest - from 0) of the last record - from each group of distinct - keys */ - mtr_t* mtr) /*!< in/out: mini-transaction */ + dict_index_t* index, + ulint n_prefix, + const boundaries_t* boundaries, + n_diff_data_t* n_diff_data, + mtr_t* mtr) { btr_pcur_t pcur; const page_t* page; ib_uint64_t rec_idx; - ib_uint64_t last_idx_on_level; - ib_uint64_t n_recs_to_dive_below; - ib_uint64_t n_diff_sum_of_all_analyzed_pages; ib_uint64_t i; #if 0 DEBUG_PRINTF(" %s(table=%s, index=%s, level=%lu, n_prefix=%lu, " - "n_diff_for_this_prefix=" UINT64PF ")\n", + "n_diff_on_level=" UINT64PF ")\n", __func__, index->table->name, index->name, level, - n_prefix, n_diff_for_this_prefix); + n_prefix, n_diff_data->n_diff_on_level); #endif ut_ad(mtr_memo_contains(mtr, dict_index_get_lock(index), MTR_MEMO_S_LOCK)); - /* if some of those is 0 then this means that there is exactly one - page in the B-tree and it is empty and we should have done full scan - and should not be here */ - ut_ad(total_recs_on_level > 0); - ut_ad(n_diff_for_this_prefix > 0); - - /* this must be at least 1 */ - ut_ad(N_SAMPLE_PAGES(index) > 0); - /* Position pcur on the leftmost record on the leftmost page on the desired level. */ btr_pcur_open_at_index_side( true, index, BTR_SEARCH_LEAF | BTR_ALREADY_S_LATCHED, - &pcur, true, level, mtr); + &pcur, true, n_diff_data->level, mtr); btr_pcur_move_to_next_on_page(&pcur); page = btr_pcur_get_page(&pcur); + const rec_t* first_rec = btr_pcur_get_rec(&pcur); + + /* We shouldn't be scanning the leaf level. The caller of this function + should have stopped the descend on level 1 or higher. */ + ut_ad(n_diff_data->level > 0); + ut_ad(!page_is_leaf(page)); + /* The page must not be empty, except when it is the root page (and the whole index is empty). */ - ut_ad(btr_pcur_is_on_user_rec(&pcur) || page_is_leaf(page)); - ut_ad(btr_pcur_get_rec(&pcur) - == page_rec_get_next_const(page_get_infimum_rec(page))); + ut_ad(btr_pcur_is_on_user_rec(&pcur)); + ut_ad(first_rec == page_rec_get_next_const(page_get_infimum_rec(page))); /* check that we are indeed on the desired level */ - ut_a(btr_page_get_level(page, mtr) == level); + ut_a(btr_page_get_level(page, mtr) == n_diff_data->level); /* there should not be any pages on the left */ ut_a(btr_page_get_prev(page, mtr) == FIL_NULL); /* check whether the first record on the leftmost page is marked - as such, if we are on a non-leaf level */ - ut_a((level == 0) - == !(REC_INFO_MIN_REC_FLAG & rec_get_info_bits( - btr_pcur_get_rec(&pcur), page_is_comp(page)))); + as such; we are on a non-leaf level */ + ut_a(rec_get_info_bits(first_rec, page_is_comp(page)) + & REC_INFO_MIN_REC_FLAG); - last_idx_on_level = boundaries->at( - static_cast<unsigned int>(n_diff_for_this_prefix - 1)); + const ib_uint64_t last_idx_on_level = boundaries->at( + static_cast<unsigned>(n_diff_data->n_diff_on_level - 1)); rec_idx = 0; - n_diff_sum_of_all_analyzed_pages = 0; - - n_recs_to_dive_below = ut_min(N_SAMPLE_PAGES(index), - n_diff_for_this_prefix); - - for (i = 0; i < n_recs_to_dive_below; i++) { - ib_uint64_t left; - ib_uint64_t right; - ib_uint64_t rnd; - ib_uint64_t dive_below_idx; + n_diff_data->n_diff_all_analyzed_pages = 0; + n_diff_data->n_external_pages_sum = 0; - /* there are n_diff_for_this_prefix elements + for (i = 0; i < n_diff_data->n_leaf_pages_to_analyze; i++) { + /* there are n_diff_on_level elements in 'boundaries' and we divide those elements - into n_recs_to_dive_below segments, for example: + into n_leaf_pages_to_analyze segments, for example: - let n_diff_for_this_prefix=100, n_recs_to_dive_below=4, then: + let n_diff_on_level=100, n_leaf_pages_to_analyze=4, then: segment i=0: [0, 24] segment i=1: [25, 49] segment i=2: [50, 74] segment i=3: [75, 99] or - let n_diff_for_this_prefix=1, n_recs_to_dive_below=1, then: + let n_diff_on_level=1, n_leaf_pages_to_analyze=1, then: segment i=0: [0, 0] or - let n_diff_for_this_prefix=2, n_recs_to_dive_below=2, then: + let n_diff_on_level=2, n_leaf_pages_to_analyze=2, then: segment i=0: [0, 0] segment i=1: [1, 1] or - let n_diff_for_this_prefix=13, n_recs_to_dive_below=7, then: + let n_diff_on_level=13, n_leaf_pages_to_analyze=7, then: segment i=0: [0, 0] segment i=1: [1, 2] segment i=2: [3, 4] @@ -1708,9 +1763,12 @@ dict_stats_analyze_index_for_n_prefix( then we select a random record from each segment and dive below it */ - left = n_diff_for_this_prefix * i / n_recs_to_dive_below; - right = n_diff_for_this_prefix * (i + 1) - / n_recs_to_dive_below - 1; + const ib_uint64_t n_diff = n_diff_data->n_diff_on_level; + const ib_uint64_t n_pick + = n_diff_data->n_leaf_pages_to_analyze; + + const ib_uint64_t left = n_diff * i / n_pick; + const ib_uint64_t right = n_diff * (i + 1) / n_pick - 1; ut_a(left <= right); ut_a(right <= last_idx_on_level); @@ -1718,11 +1776,11 @@ dict_stats_analyze_index_for_n_prefix( /* we do not pass (left, right) because we do not want to ask ut_rnd_interval() to work with too big numbers since ib_uint64_t could be bigger than ulint */ - rnd = static_cast<ib_uint64_t>( - ut_rnd_interval(0, static_cast<ulint>(right - left))); + const ulint rnd = ut_rnd_interval( + 0, static_cast<ulint>(right - left)); - dive_below_idx = boundaries->at( - static_cast<unsigned int>(left + rnd)); + const ib_uint64_t dive_below_idx + = boundaries->at(static_cast<unsigned>(left + rnd)); #if 0 DEBUG_PRINTF(" %s(): dive below record with index=" @@ -1758,9 +1816,13 @@ dict_stats_analyze_index_for_n_prefix( ut_a(rec_idx == dive_below_idx); ib_uint64_t n_diff_on_leaf_page; + ib_uint64_t n_external_pages; - n_diff_on_leaf_page = dict_stats_analyze_index_below_cur( - btr_pcur_get_btr_cur(&pcur), n_prefix, mtr); + dict_stats_analyze_index_below_cur(btr_pcur_get_btr_cur(&pcur), + n_prefix, + &n_diff_on_leaf_page, + &n_external_pages, + mtr); /* We adjust n_diff_on_leaf_page here to avoid counting one record twice - once as the last on some page and once @@ -1780,37 +1842,86 @@ dict_stats_analyze_index_for_n_prefix( n_diff_on_leaf_page--; } - n_diff_sum_of_all_analyzed_pages += n_diff_on_leaf_page; - } - - /* n_diff_sum_of_all_analyzed_pages can be 0 here if all the leaf - pages sampled contained only delete-marked records. In this case - we should assign 0 to index->stat_n_diff_key_vals[n_prefix - 1], which - the formula below does. */ + n_diff_data->n_diff_all_analyzed_pages += n_diff_on_leaf_page; - /* See REF01 for an explanation of the algorithm */ - index->stat_n_diff_key_vals[n_prefix - 1] - = index->stat_n_leaf_pages - - * n_diff_for_this_prefix - / total_recs_on_level - - * n_diff_sum_of_all_analyzed_pages - / n_recs_to_dive_below; + n_diff_data->n_external_pages_sum += n_external_pages; + } - index->stat_n_sample_sizes[n_prefix - 1] = n_recs_to_dive_below; + btr_pcur_close(&pcur); +} - DEBUG_PRINTF(" %s(): n_diff=" UINT64PF " for n_prefix=%lu " - "(%lu" - " * " UINT64PF " / " UINT64PF - " * " UINT64PF " / " UINT64PF ")\n", - __func__, index->stat_n_diff_key_vals[n_prefix - 1], - n_prefix, - index->stat_n_leaf_pages, - n_diff_for_this_prefix, total_recs_on_level, - n_diff_sum_of_all_analyzed_pages, n_recs_to_dive_below); +/** Set dict_index_t::stat_n_diff_key_vals[] and stat_n_sample_sizes[]. +@param[in] n_diff_data input data to use to derive the results +@param[in,out] index index whose stat_n_diff_key_vals[] to set */ +UNIV_INLINE +void +dict_stats_index_set_n_diff( + const n_diff_data_t* n_diff_data, + dict_index_t* index) +{ + for (ulint n_prefix = dict_index_get_n_unique(index); + n_prefix >= 1; + n_prefix--) { + /* n_diff_all_analyzed_pages can be 0 here if + all the leaf pages sampled contained only + delete-marked records. In this case we should assign + 0 to index->stat_n_diff_key_vals[n_prefix - 1], which + the formula below does. */ + + const n_diff_data_t* data = &n_diff_data[n_prefix - 1]; + + ut_ad(data->n_leaf_pages_to_analyze > 0); + ut_ad(data->n_recs_on_level > 0); + + ulint n_ordinary_leaf_pages; + + if (data->level == 1) { + /* If we know the number of records on level 1, then + this number is the same as the number of pages on + level 0 (leaf). */ + n_ordinary_leaf_pages = data->n_recs_on_level; + } else { + /* If we analyzed D ordinary leaf pages and found E + external pages in total linked from those D ordinary + leaf pages, then this means that the ratio + ordinary/external is D/E. Then the ratio ordinary/total + is D / (D + E). Knowing that the total number of pages + is T (including ordinary and external) then we estimate + that the total number of ordinary leaf pages is + T * D / (D + E). */ + n_ordinary_leaf_pages + = index->stat_n_leaf_pages + * data->n_leaf_pages_to_analyze + / (data->n_leaf_pages_to_analyze + + data->n_external_pages_sum); + } - btr_pcur_close(&pcur); + /* See REF01 for an explanation of the algorithm */ + index->stat_n_diff_key_vals[n_prefix - 1] + = n_ordinary_leaf_pages + + * data->n_diff_on_level + / data->n_recs_on_level + + * data->n_diff_all_analyzed_pages + / data->n_leaf_pages_to_analyze; + + index->stat_n_sample_sizes[n_prefix - 1] + = data->n_leaf_pages_to_analyze; + + DEBUG_PRINTF(" %s(): n_diff=" UINT64PF " for n_prefix=%lu" + " (%lu" + " * " UINT64PF " / " UINT64PF + " * " UINT64PF " / " UINT64PF ")\n", + __func__, + index->stat_n_diff_key_vals[n_prefix - 1], + n_prefix, + index->stat_n_leaf_pages, + data->n_diff_on_level, + data->n_recs_on_level, + data->n_diff_all_analyzed_pages, + data->n_leaf_pages_to_analyze); + } } /*********************************************************************//** @@ -1828,10 +1939,8 @@ dict_stats_analyze_index( bool level_is_analyzed; ulint n_uniq; ulint n_prefix; - ib_uint64_t* n_diff_on_level; ib_uint64_t total_recs; ib_uint64_t total_pages; - boundaries_t* n_diff_boundaries; mtr_t mtr; ulint size; DBUG_ENTER("dict_stats_analyze_index"); @@ -1917,11 +2026,18 @@ dict_stats_analyze_index( DBUG_VOID_RETURN; } - /* set to zero */ - n_diff_on_level = reinterpret_cast<ib_uint64_t*> - (mem_zalloc(n_uniq * sizeof(ib_uint64_t))); + /* For each level that is being scanned in the btree, this contains the + number of different key values for all possible n-column prefixes. */ + ib_uint64_t* n_diff_on_level = new ib_uint64_t[n_uniq]; - n_diff_boundaries = new boundaries_t[n_uniq]; + /* For each level that is being scanned in the btree, this contains the + index of the last record from each group of equal records (when + comparing only the first n columns, n=1..n_uniq). */ + boundaries_t* n_diff_boundaries = new boundaries_t[n_uniq]; + + /* For each n-column prefix this array contains the input data that is + used to calculate dict_index_t::stat_n_diff_key_vals[]. */ + n_diff_data_t* n_diff_data = new n_diff_data_t[n_uniq]; /* total_recs is also used to estimate the number of pages on one level below, so at the start we have 1 page (the root) */ @@ -2033,12 +2149,12 @@ dict_stats_analyze_index( level_is_analyzed = true; - if (n_diff_on_level[n_prefix - 1] - >= N_DIFF_REQUIRED(index) - || level == 1) { - /* we found a good level with many distinct - records or we have reached the last level we - could scan */ + if (level == 1 + || n_diff_on_level[n_prefix - 1] + >= N_DIFF_REQUIRED(index)) { + /* we have reached the last level we could scan + or we found a good level with many distinct + records */ break; } @@ -2051,7 +2167,6 @@ found_level: " distinct records for n_prefix=%lu\n", __func__, level, n_diff_on_level[n_prefix - 1], n_prefix); - /* here we are either on level 1 or the level that we are on contains >= N_DIFF_REQUIRED distinct keys or we did not scan deeper levels because they would contain too many pages */ @@ -2060,20 +2175,47 @@ found_level: ut_ad(level_is_analyzed); + /* if any of these is 0 then there is exactly one page in the + B-tree and it is empty and we should have done full scan and + should not be here */ + ut_ad(total_recs > 0); + ut_ad(n_diff_on_level[n_prefix - 1] > 0); + + ut_ad(N_SAMPLE_PAGES(index) > 0); + + n_diff_data_t* data = &n_diff_data[n_prefix - 1]; + + data->level = level; + + data->n_recs_on_level = total_recs; + + data->n_diff_on_level = n_diff_on_level[n_prefix - 1]; + + data->n_leaf_pages_to_analyze = std::min( + N_SAMPLE_PAGES(index), + n_diff_on_level[n_prefix - 1]); + /* pick some records from this level and dive below them for the given n_prefix */ dict_stats_analyze_index_for_n_prefix( - index, level, total_recs, n_prefix, - n_diff_on_level[n_prefix - 1], - &n_diff_boundaries[n_prefix - 1], &mtr); + index, n_prefix, &n_diff_boundaries[n_prefix - 1], + data, &mtr); } mtr_commit(&mtr); delete[] n_diff_boundaries; - mem_free(n_diff_on_level); + delete[] n_diff_on_level; + + /* n_prefix == 0 means that the above loop did not end up prematurely + due to tree being changed and so n_diff_data[] is set up. */ + if (n_prefix == 0) { + dict_stats_index_set_n_diff(n_diff_data, index); + } + + delete[] n_diff_data; dict_stats_assert_initialized_index(index); DBUG_VOID_RETURN; @@ -2248,17 +2390,21 @@ dict_stats_save_index_stat( "END;", trx); if (ret != DB_SUCCESS) { - char buf_table[MAX_FULL_NAME_LEN]; - char buf_index[MAX_FULL_NAME_LEN]; - ut_print_timestamp(stderr); - fprintf(stderr, - " InnoDB: Cannot save index statistics for table " - "%s, index %s, stat name \"%s\": %s\n", - ut_format_name(index->table->name, TRUE, - buf_table, sizeof(buf_table)), - ut_format_name(index->name, FALSE, - buf_index, sizeof(buf_index)), - stat_name, ut_strerr(ret)); + if (innodb_index_stats_not_found == false && + index->stats_error_printed == false) { + char buf_table[MAX_FULL_NAME_LEN]; + char buf_index[MAX_FULL_NAME_LEN]; + ut_print_timestamp(stderr); + fprintf(stderr, + " InnoDB: Cannot save index statistics for table " + "%s, index %s, stat name \"%s\": %s\n", + ut_format_name(index->table->name, TRUE, + buf_table, sizeof(buf_table)), + ut_format_name(index->name, FALSE, + buf_index, sizeof(buf_index)), + stat_name, ut_strerr(ret)); + index->stats_error_printed = true; + } } return(ret); @@ -2973,20 +3119,24 @@ dict_stats_update_for_index( } /* else */ - /* Fall back to transient stats since the persistent - storage is not present or is corrupted */ - char buf_table[MAX_FULL_NAME_LEN]; - char buf_index[MAX_FULL_NAME_LEN]; - ut_print_timestamp(stderr); - fprintf(stderr, - " InnoDB: Recalculation of persistent statistics " - "requested for table %s index %s but the required " - "persistent statistics storage is not present or is " - "corrupted. Using transient stats instead.\n", - ut_format_name(index->table->name, TRUE, - buf_table, sizeof(buf_table)), - ut_format_name(index->name, FALSE, - buf_index, sizeof(buf_index))); + if (innodb_index_stats_not_found == false && + index->stats_error_printed == false) { + /* Fall back to transient stats since the persistent + storage is not present or is corrupted */ + char buf_table[MAX_FULL_NAME_LEN]; + char buf_index[MAX_FULL_NAME_LEN]; + ut_print_timestamp(stderr); + fprintf(stderr, + " InnoDB: Recalculation of persistent statistics " + "requested for table %s index %s but the required " + "persistent statistics storage is not present or is " + "corrupted. Using transient stats instead.\n", + ut_format_name(index->table->name, TRUE, + buf_table, sizeof(buf_table)), + ut_format_name(index->name, FALSE, + buf_index, sizeof(buf_index))); + index->stats_error_printed = false; + } } dict_table_stats_lock(index->table, RW_X_LATCH); @@ -3071,13 +3221,17 @@ dict_stats_update( /* Fall back to transient stats since the persistent storage is not present or is corrupted */ - ut_print_timestamp(stderr); - fprintf(stderr, - " InnoDB: Recalculation of persistent statistics " - "requested for table %s but the required persistent " - "statistics storage is not present or is corrupted. " - "Using transient stats instead.\n", - ut_format_name(table->name, TRUE, buf, sizeof(buf))); + if (innodb_table_stats_not_found == false && + table->stats_error_printed == false) { + ut_print_timestamp(stderr); + fprintf(stderr, + " InnoDB: Recalculation of persistent statistics " + "requested for table %s but the required persistent " + "statistics storage is not present or is corrupted. " + "Using transient stats instead.\n", + ut_format_name(table->name, TRUE, buf, sizeof(buf))); + table->stats_error_printed = true; + } goto transient; @@ -3121,17 +3275,21 @@ dict_stats_update( /* persistent statistics storage does not exist or is corrupted, calculate the transient stats */ - ut_print_timestamp(stderr); - fprintf(stderr, - " InnoDB: Error: Fetch of persistent " - "statistics requested for table %s but the " - "required system tables %s and %s are not " - "present or have unexpected structure. " - "Using transient stats instead.\n", - ut_format_name(table->name, TRUE, - buf, sizeof(buf)), - TABLE_STATS_NAME_PRINT, - INDEX_STATS_NAME_PRINT); + if (innodb_table_stats_not_found == false && + table->stats_error_printed == false) { + ut_print_timestamp(stderr); + fprintf(stderr, + " InnoDB: Error: Fetch of persistent " + "statistics requested for table %s but the " + "required system tables %s and %s are not " + "present or have unexpected structure. " + "Using transient stats instead.\n", + ut_format_name(table->name, TRUE, + buf, sizeof(buf)), + TABLE_STATS_NAME_PRINT, + INDEX_STATS_NAME_PRINT); + table->stats_error_printed = true; + } goto transient; } @@ -3202,16 +3360,19 @@ dict_stats_update( dict_stats_table_clone_free(t); - ut_print_timestamp(stderr); - fprintf(stderr, - " InnoDB: Error fetching persistent statistics " - "for table %s from %s and %s: %s. " - "Using transient stats method instead.\n", - ut_format_name(table->name, TRUE, buf, - sizeof(buf)), - TABLE_STATS_NAME, - INDEX_STATS_NAME, - ut_strerr(err)); + if (innodb_table_stats_not_found == false && + table->stats_error_printed == false) { + ut_print_timestamp(stderr); + fprintf(stderr, + " InnoDB: Error fetching persistent statistics " + "for table %s from %s and %s: %s. " + "Using transient stats method instead.\n", + ut_format_name(table->name, TRUE, buf, + sizeof(buf)), + TABLE_STATS_NAME, + INDEX_STATS_NAME, + ut_strerr(err)); + } goto transient; } diff --git a/storage/xtradb/fil/fil0fil.cc b/storage/xtradb/fil/fil0fil.cc index 017e96e6111..8e7758a8a07 100644 --- a/storage/xtradb/fil/fil0fil.cc +++ b/storage/xtradb/fil/fil0fil.cc @@ -121,7 +121,7 @@ completes, we decrement the count and return the file node to the LRU-list if the count drops to zero. */ /** When mysqld is run, the default directory "." is the mysqld datadir, -but in the MySQL Embedded Server Library and ibbackup it is not the default +but in the MySQL Embedded Server Library and mysqlbackup it is not the default directory, and we must set the base file path explicitly */ UNIV_INTERN const char* fil_path_to_mysql_datadir = "."; @@ -793,7 +793,7 @@ fil_node_open_file( fprintf(stderr, "InnoDB: Error: the size of single-table" " tablespace file %s\n" - "InnoDB: is only "UINT64PF"," + "InnoDB: is only " UINT64PF "," " should be at least %lu!\n", node->name, size_bytes, @@ -1866,6 +1866,9 @@ fil_close_all_files(void) { fil_space_t* space; + if (srv_track_changed_pages && srv_redo_log_thread_started) + os_event_wait(srv_redo_log_tracked_event); + mutex_enter(&fil_system->mutex); space = UT_LIST_GET_FIRST(fil_system->space_list); @@ -1902,6 +1905,9 @@ fil_close_log_files( { fil_space_t* space; + if (srv_track_changed_pages && srv_redo_log_thread_started) + os_event_wait(srv_redo_log_tracked_event); + mutex_enter(&fil_system->mutex); space = UT_LIST_GET_FIRST(fil_system->space_list); @@ -2109,8 +2115,8 @@ fil_check_first_page( } /*******************************************************************//** -Reads the flushed lsn, arch no, and tablespace flag fields from a data -file at database startup. +Reads the flushed lsn, arch no, space_id and tablespace flag fields from +the first page of a data file at database startup. @retval NULL on success, or if innodb_force_recovery is set @return pointer to an error message string */ UNIV_INTERN @@ -2146,7 +2152,13 @@ fil_read_first_page( fil_space_is_page_compressed(orig_space_id) : FALSE); - *flags = fsp_header_get_flags(page); + /* The FSP_HEADER on page 0 is only valid for the first file + in a tablespace. So if this is not the first datafile, leave + *flags and *space_id as they were read from the first file and + do not validate the first page. */ + if (!one_read_already) { + *flags = fsp_header_get_flags(page); + } /* Page is page compressed page, need to decompress, before continue. */ @@ -2155,14 +2167,14 @@ fil_read_first_page( fil_decompress_page(NULL, page, UNIV_PAGE_SIZE, &write_size); } - *space_id = fsp_header_get_space_id(page); - - flushed_lsn = mach_read_from_8(page + FIL_PAGE_FILE_FLUSH_LSN); - if (!one_read_already) { + *space_id = fsp_header_get_space_id(page); + check_msg = fil_check_first_page(page); } + flushed_lsn = mach_read_from_8(page + FIL_PAGE_FILE_FLUSH_LSN); + ut_free(buf); if (check_msg) { @@ -2359,13 +2371,13 @@ exists and the space id in it matches. Replays the create operation if a file at that path does not exist yet. If the database directory for the file to be created does not exist, then we create the directory, too. -Note that ibbackup --apply-log sets fil_path_to_mysql_datadir to point to the -datadir that we should use in replaying the file operations. +Note that mysqlbackup --apply-log sets fil_path_to_mysql_datadir to point to +the datadir that we should use in replaying the file operations. InnoDB recovery does not replay these fully since it always sets the space id -to zero. But ibbackup does replay them. TODO: If remote tablespaces are used, -ibbackup will only create tables in the default directory since MLOG_FILE_CREATE -and MLOG_FILE_CREATE2 only know the tablename, not the path. +to zero. But mysqlbackup does replay them. TODO: If remote tablespaces are +used, mysqlbackup will only create tables in the default directory since +MLOG_FILE_CREATE and MLOG_FILE_CREATE2 only know the tablename, not the path. @return end of log record, or NULL if the record was not completely contained between ptr and end_ptr */ @@ -2457,11 +2469,11 @@ fil_op_log_parse_or_replay( } /* Let us try to perform the file operation, if sensible. Note that - ibbackup has at this stage already read in all space id info to the + mysqlbackup has at this stage already read in all space id info to the fil0fil.cc data structures. NOTE that our algorithm is not guaranteed to work correctly if there - were renames of tables during the backup. See ibbackup code for more + were renames of tables during the backup. See mysqlbackup code for more on the problem. */ switch (type) { @@ -2876,12 +2888,12 @@ fil_delete_tablespace( if (err == DB_SUCCESS) { #ifndef UNIV_HOTBACKUP /* Write a log record about the deletion of the .ibd - file, so that ibbackup can replay it in the + file, so that mysqlbackup can replay it in the --apply-log phase. We use a dummy mtr and the familiar log write mechanism. */ mtr_t mtr; - /* When replaying the operation in ibbackup, do not try + /* When replaying the operation in mysqlbackup, do not try to write any log record */ mtr_start(&mtr); @@ -4563,7 +4575,7 @@ will_not_choose: " (< 4 pages 16 kB each),\n" "InnoDB: or the space id in the file header" " is not sensible.\n" - "InnoDB: This can happen in an ibbackup run," + "InnoDB: This can happen in an mysqlbackup run," " and is not dangerous.\n", fsp->filepath, fsp->id, fsp->filepath, size); os_file_close(fsp->file); @@ -4600,7 +4612,7 @@ will_not_choose: "InnoDB: because space %s with the same id\n" "InnoDB: was scanned earlier. This can happen" " if you have renamed tables\n" - "InnoDB: during an ibbackup run.\n", + "InnoDB: during an mysqlbackup run.\n", fsp->filepath, fsp->id, fsp->filepath, space->name); os_file_close(fsp->file); @@ -5332,9 +5344,9 @@ file_extended: #ifdef UNIV_HOTBACKUP /********************************************************************//** Extends all tablespaces to the size stored in the space header. During the -ibbackup --apply-log phase we extended the spaces on-demand so that log records -could be applied, but that may have left spaces still too small compared to -the size stored in the space header. */ +mysqlbackup --apply-log phase we extended the spaces on-demand so that log +records could be applied, but that may have left spaces still too small +compared to the size stored in the space header. */ UNIV_INTERN void fil_extend_tablespaces_to_stored_len(void) @@ -5653,7 +5665,7 @@ _fil_io( ulint mode; fil_space_t* space; fil_node_t* node; - ibool ret; + ibool ret=TRUE; ulint is_log; ulint wake_later; os_offset_t offset; @@ -5878,7 +5890,7 @@ _fil_io( page_compressed, page_compression_level, write_size); #else - /* In ibbackup do normal i/o, not aio */ + /* In mysqlbackup do normal i/o, not aio */ if (type == OS_FILE_READ) { ret = os_file_read(node->handle, buf, offset, len); } else { @@ -5887,7 +5899,6 @@ _fil_io( offset, len); } #endif /* !UNIV_HOTBACKUP */ - ut_a(ret); if (mode == OS_AIO_SYNC) { /* The i/o operation is already completed when we return from @@ -5902,7 +5913,11 @@ _fil_io( ut_ad(fil_validate_skip()); } - return(DB_SUCCESS); + if (!ret) { + return(DB_OUT_OF_FILE_SPACE); + } else { + return(DB_SUCCESS); + } } #ifndef UNIV_HOTBACKUP diff --git a/storage/xtradb/fts/fts0ast.cc b/storage/xtradb/fts/fts0ast.cc index d6c19c0050a..dd48ffee14d 100644 --- a/storage/xtradb/fts/fts0ast.cc +++ b/storage/xtradb/fts/fts0ast.cc @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 2007, 2013, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2007, 2014, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -83,11 +83,11 @@ UNIV_INTERN fts_ast_node_t* fts_ast_create_node_term( /*=====================*/ - void* arg, /*!< in: ast state instance */ - const char* ptr) /*!< in: ast term string */ + void* arg, /*!< in: ast state instance */ + const fts_ast_string_t* ptr) /*!< in: ast term string */ { fts_ast_state_t* state = static_cast<fts_ast_state_t*>(arg); - ulint len = strlen(ptr); + ulint len = ptr->len; ulint cur_pos = 0; fts_ast_node_t* node = NULL; fts_ast_node_t* node_list = NULL; @@ -101,8 +101,9 @@ fts_ast_create_node_term( cur_len = innobase_mysql_fts_get_token( state->charset, - reinterpret_cast<const byte*>(ptr) + cur_pos, - reinterpret_cast<const byte*>(ptr) + len, &str, &offset); + reinterpret_cast<const byte*>(ptr->str) + cur_pos, + reinterpret_cast<const byte*>(ptr->str) + len, + &str, &offset); if (cur_len == 0) { break; @@ -124,10 +125,8 @@ fts_ast_create_node_term( node->type = FTS_AST_TERM; - node->term.ptr = static_cast<byte*>(ut_malloc( - str.f_len + 1)); - memcpy(node->term.ptr, str.f_str, str.f_len); - node->term.ptr[str.f_len] = '\0'; + node->term.ptr = fts_ast_string_create( + str.f_str, str.f_len); fts_ast_state_add_node( static_cast<fts_ast_state_t*>(arg), node); @@ -160,25 +159,21 @@ UNIV_INTERN fts_ast_node_t* fts_ast_create_node_text( /*=====================*/ - void* arg, /*!< in: ast state instance */ - const char* ptr) /*!< in: ast text string */ + void* arg, /*!< in: ast state instance */ + const fts_ast_string_t* ptr) /*!< in: ast text string */ { - ulint len = strlen(ptr); + ulint len = ptr->len; fts_ast_node_t* node = NULL; + /* Once we come here, the string must have at least 2 quotes "" + around the query string, which could be empty. Also the query + string may contain 0x00 in it, we don't treat it as null-terminated. */ + ut_ad(len >= 2); + ut_ad(ptr->str[0] == '\"' && ptr->str[len - 1] == '\"'); - ut_ad(len >= 1); - - if (len <= 2) { - /* There is a way to directly supply null terminator - in the query string (by using 0x220022) and get here, - and certainly it would not make a valid query text */ - ut_ad(ptr[0] == '\"'); - - if (len == 2) { - ut_ad(ptr[1] == '\"'); - } - + if (len == 2) { + /* If the query string contains nothing except quotes, + it's obviously an invalid query. */ return(NULL); } @@ -188,11 +183,9 @@ fts_ast_create_node_text( len -= 2; node->type = FTS_AST_TEXT; - node->text.ptr = static_cast<byte*>(ut_malloc(len + 1)); - /*!< Skip copying the first quote */ - memcpy(node->text.ptr, ptr + 1, len); - node->text.ptr[len] = 0; + node->text.ptr = fts_ast_string_create( + reinterpret_cast<const byte*>(ptr->str + 1), len); node->text.distance = ULINT_UNDEFINED; fts_ast_state_add_node((fts_ast_state_t*) arg, node); @@ -275,14 +268,14 @@ fts_ast_free_node( switch (node->type) { case FTS_AST_TEXT: if (node->text.ptr) { - ut_free(node->text.ptr); + fts_ast_string_free(node->text.ptr); node->text.ptr = NULL; } break; case FTS_AST_TERM: if (node->term.ptr) { - ut_free(node->term.ptr); + fts_ast_string_free(node->term.ptr); node->term.ptr = NULL; } break; @@ -421,10 +414,10 @@ fts_ast_state_free( fts_ast_node_t* next = node->next_alloc; if (node->type == FTS_AST_TEXT && node->text.ptr) { - ut_free(node->text.ptr); + fts_ast_string_free(node->text.ptr); node->text.ptr = NULL; } else if (node->type == FTS_AST_TERM && node->term.ptr) { - ut_free(node->term.ptr); + fts_ast_string_free(node->term.ptr); node->term.ptr = NULL; } @@ -445,11 +438,13 @@ fts_ast_node_print( { switch (node->type) { case FTS_AST_TEXT: - printf("TEXT: %s\n", node->text.ptr); + printf("TEXT: "); + fts_ast_string_print(node->text.ptr); break; case FTS_AST_TERM: - printf("TERM: %s\n", node->term.ptr); + printf("TERM: "); + fts_ast_string_print(node->term.ptr); break; case FTS_AST_LIST: @@ -628,3 +623,74 @@ fts_ast_visit( return(error); } + +/** +Create an ast string object, with NUL-terminator, so the string +has one more byte than len +@param[in] str pointer to string +@param[in] len length of the string +@return ast string with NUL-terminator */ +UNIV_INTERN +fts_ast_string_t* +fts_ast_string_create( + const byte* str, + ulint len) +{ + fts_ast_string_t* ast_str; + + ut_ad(len > 0); + + ast_str = static_cast<fts_ast_string_t*> + (ut_malloc(sizeof(fts_ast_string_t))); + ast_str->str = static_cast<byte*>(ut_malloc(len + 1)); + + ast_str->len = len; + memcpy(ast_str->str, str, len); + ast_str->str[len] = '\0'; + + return(ast_str); +} + +/** +Free an ast string instance +@param[in,out] ast_str string to free */ +UNIV_INTERN +void +fts_ast_string_free( + fts_ast_string_t* ast_str) +{ + if (ast_str != NULL) { + ut_free(ast_str->str); + ut_free(ast_str); + } +} + +/** +Translate ast string of type FTS_AST_NUMB to unsigned long by strtoul +@param[in] str string to translate +@param[in] base the base +@return translated number */ +UNIV_INTERN +ulint +fts_ast_string_to_ul( + const fts_ast_string_t* ast_str, + int base) +{ + return(strtoul(reinterpret_cast<const char*>(ast_str->str), + NULL, base)); +} + +/** +Print the ast string +@param[in] str string to print */ +UNIV_INTERN +void +fts_ast_string_print( + const fts_ast_string_t* ast_str) +{ + for (ulint i = 0; i < ast_str->len; ++i) { + printf("%c", ast_str->str[i]); + } + + printf("\n"); +} diff --git a/storage/xtradb/fts/fts0blex.cc b/storage/xtradb/fts/fts0blex.cc index f83523825d2..7d0acb00a3b 100644 --- a/storage/xtradb/fts/fts0blex.cc +++ b/storage/xtradb/fts/fts0blex.cc @@ -451,7 +451,7 @@ static yyconst flex_int16_t yy_chk[32] = #line 1 "fts0blex.l" /***************************************************************************** -Copyright (c) 2007, 2011, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2007, 2014, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -806,7 +806,7 @@ case 3: YY_RULE_SETUP #line 53 "fts0blex.l" { - val->token = strdup(fts0bget_text(yyscanner)); + val->token = fts_ast_string_create(reinterpret_cast<const byte*>(fts0bget_text(yyscanner)), fts0bget_leng(yyscanner)); return(FTS_NUMB); } @@ -815,7 +815,7 @@ case 4: YY_RULE_SETUP #line 59 "fts0blex.l" { - val->token = strdup(fts0bget_text(yyscanner)); + val->token = fts_ast_string_create(reinterpret_cast<const byte*>(fts0bget_text(yyscanner)), fts0bget_leng(yyscanner)); return(FTS_TERM); } @@ -824,7 +824,7 @@ case 5: YY_RULE_SETUP #line 65 "fts0blex.l" { - val->token = strdup(fts0bget_text(yyscanner)); + val->token = fts_ast_string_create(reinterpret_cast<const byte*>(fts0bget_text(yyscanner)), fts0bget_leng(yyscanner)); return(FTS_TEXT); } diff --git a/storage/xtradb/fts/fts0blex.l b/storage/xtradb/fts/fts0blex.l index 6193f0df187..ae6e8ffaa48 100644 --- a/storage/xtradb/fts/fts0blex.l +++ b/storage/xtradb/fts/fts0blex.l @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 2007, 2011, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2007, 2014, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -51,19 +51,19 @@ this program; if not, write to the Free Software Foundation, Inc., } [0-9]+ { - val->token = strdup(fts0bget_text(yyscanner)); + val->token = fts_ast_string_create(reinterpret_cast<const byte*>(fts0bget_text(yyscanner)), fts0bget_leng(yyscanner)); return(FTS_NUMB); } [^" \n*()+\-<>~@%]* { - val->token = strdup(fts0bget_text(yyscanner)); + val->token = fts_ast_string_create(reinterpret_cast<const byte*>(fts0bget_text(yyscanner)), fts0bget_leng(yyscanner)); return(FTS_TERM); } \"[^\"\n]*\" { - val->token = strdup(fts0bget_text(yyscanner)); + val->token = fts_ast_string_create(reinterpret_cast<const byte*>(fts0bget_text(yyscanner)), fts0bget_leng(yyscanner)); return(FTS_TEXT); } diff --git a/storage/xtradb/fts/fts0fts.cc b/storage/xtradb/fts/fts0fts.cc index 795f08da966..8884e944dfd 100644 --- a/storage/xtradb/fts/fts0fts.cc +++ b/storage/xtradb/fts/fts0fts.cc @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 2011, 2013, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2011, 2014, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -44,6 +44,13 @@ Full Text Search interface /** Column name from the FTS config table */ #define FTS_MAX_CACHE_SIZE_IN_MB "cache_size_in_mb" +/** Verify if a aux table name is a obsolete table +by looking up the key word in the obsolete table names */ +#define FTS_IS_OBSOLETE_AUX_TABLE(table_name) \ + (strstr((table_name), "DOC_ID") != NULL \ + || strstr((table_name), "ADDED") != NULL \ + || strstr((table_name), "STOPWORDS") != NULL) + /** This is maximum FTS cache for each table and would be a configurable variable */ UNIV_INTERN ulong fts_max_cache_size; @@ -601,8 +608,10 @@ fts_cache_init( cache->total_size = 0; + mutex_enter((ib_mutex_t*) &cache->deleted_lock); cache->deleted_doc_ids = ib_vector_create( cache->sync_heap, sizeof(fts_update_t), 4); + mutex_exit((ib_mutex_t*) &cache->deleted_lock); /* Reset the cache data for all the FTS indexes. */ for (i = 0; i < ib_vector_size(cache->indexes); ++i) { @@ -1130,7 +1139,10 @@ fts_cache_clear( cache->sync_heap->arg = NULL; cache->total_size = 0; + + mutex_enter((ib_mutex_t*) &cache->deleted_lock); cache->deleted_doc_ids = NULL; + mutex_exit((ib_mutex_t*) &cache->deleted_lock); } /*********************************************************************//** @@ -1947,10 +1959,15 @@ fts_create_one_index_table( char* table_name = fts_get_table_name(fts_table); dberr_t error; CHARSET_INFO* charset; + ulint flags2 = 0; ut_ad(index->type & DICT_FTS); - new_table = dict_mem_table_create(table_name, 0, 5, 1, 0, false); + if (srv_file_per_table) { + flags2 = DICT_TF2_USE_TABLESPACE; + } + + new_table = dict_mem_table_create(table_name, 0, 5, 1, flags2, false); field = dict_index_get_nth_field(index, 0); charset = innobase_get_fts_charset( @@ -1979,7 +1996,7 @@ fts_create_one_index_table( dict_mem_table_add_col(new_table, heap, "ilist", DATA_BLOB, 4130048, 0); - error = row_create_table_for_mysql(new_table, trx, true); + error = row_create_table_for_mysql(new_table, trx, false); if (error != DB_SUCCESS) { trx->error_state = error; @@ -2244,11 +2261,15 @@ static fts_trx_t* fts_trx_create( /*===========*/ - trx_t* trx) /*!< in: InnoDB transaction */ + trx_t* trx) /*!< in/out: InnoDB + transaction */ { - fts_trx_t* ftt; - ib_alloc_t* heap_alloc; - mem_heap_t* heap = mem_heap_create(1024); + fts_trx_t* ftt; + ib_alloc_t* heap_alloc; + mem_heap_t* heap = mem_heap_create(1024); + trx_named_savept_t* savep; + + ut_a(trx->fts_trx == NULL); ftt = static_cast<fts_trx_t*>(mem_heap_alloc(heap, sizeof(fts_trx_t))); ftt->trx = trx; @@ -2266,6 +2287,14 @@ fts_trx_create( fts_savepoint_create(ftt->savepoints, NULL, NULL); fts_savepoint_create(ftt->last_stmt, NULL, NULL); + /* Copy savepoints that already set before. */ + for (savep = UT_LIST_GET_FIRST(trx->trx_savepoints); + savep != NULL; + savep = UT_LIST_GET_NEXT(trx_savepoints, savep)) { + + fts_savepoint_take(trx, ftt, savep->name); + } + return(ftt); } @@ -4359,6 +4388,7 @@ fts_sync_commit( /* We need to do this within the deleted lock since fts_delete() can attempt to add a deleted doc id to the cache deleted id array. */ fts_cache_clear(cache); + DEBUG_SYNC_C("fts_deleted_doc_ids_clear"); fts_cache_init(cache); rw_lock_x_unlock(&cache->lock); @@ -5160,6 +5190,12 @@ fts_cache_append_deleted_doc_ids( mutex_enter((ib_mutex_t*) &cache->deleted_lock); + if (cache->deleted_doc_ids == NULL) { + mutex_exit((ib_mutex_t*) &cache->deleted_lock); + return; + } + + for (i = 0; i < ib_vector_size(cache->deleted_doc_ids); ++i) { fts_update_t* update; @@ -5445,16 +5481,15 @@ void fts_savepoint_take( /*===============*/ trx_t* trx, /*!< in: transaction */ + fts_trx_t* fts_trx, /*!< in: fts transaction */ const char* name) /*!< in: savepoint name */ { mem_heap_t* heap; - fts_trx_t* fts_trx; fts_savepoint_t* savepoint; fts_savepoint_t* last_savepoint; ut_a(name != NULL); - fts_trx = trx->fts_trx; heap = fts_trx->heap; /* The implied savepoint must exist. */ @@ -5771,7 +5806,7 @@ fts_savepoint_rollback( ut_a(ib_vector_size(savepoints) > 0); /* Restore the savepoint. */ - fts_savepoint_take(trx, name); + fts_savepoint_take(trx, trx->fts_trx, name); } } @@ -5837,6 +5872,12 @@ fts_is_aux_table_name( } } + /* Could be obsolete common tables. */ + if (strncmp(ptr, "ADDED", len) == 0 + || strncmp(ptr, "STOPWORDS", len) == 0) { + return(true); + } + /* Try and read the index id. */ if (!fts_read_object_id(&table->index_id, ptr)) { return(FALSE); @@ -6433,6 +6474,56 @@ fts_check_and_drop_orphaned_tables( mem_free(path); } + } else { + if (FTS_IS_OBSOLETE_AUX_TABLE(aux_table->name)) { + + /* Current table could be one of the three + obsolete tables, in this case, we should + always try to drop it but not rename it. + This could happen when we try to upgrade + from older server to later one, which doesn't + contain these obsolete tables. */ + drop = true; + + dberr_t err; + trx_t* trx_drop = + trx_allocate_for_background(); + + trx_drop->op_info = "Drop obsolete aux tables"; + trx_drop->dict_operation_lock_mode = RW_X_LATCH; + + trx_start_for_ddl(trx_drop, TRX_DICT_OP_TABLE); + + err = row_drop_table_for_mysql( + aux_table->name, trx_drop, false, true); + + trx_drop->dict_operation_lock_mode = 0; + + if (err != DB_SUCCESS) { + /* We don't need to worry about the + failure, since server would try to + drop it on next restart, even if + the table was broken. */ + + ib_logf(IB_LOG_LEVEL_WARN, + "Fail to drop obsolete aux" + " table '%s', which is" + " harmless. will try to drop" + " it on next restart.", + aux_table->name); + + fts_sql_rollback(trx_drop); + } else { + ib_logf(IB_LOG_LEVEL_INFO, + "Dropped obsolete aux" + " table '%s'.", + aux_table->name); + + fts_sql_commit(trx_drop); + } + + trx_free_for_background(trx_drop); + } } #ifdef _WIN32 if (!drop && rename) { diff --git a/storage/xtradb/fts/fts0opt.cc b/storage/xtradb/fts/fts0opt.cc index a9f3a25530d..910a00cd521 100644 --- a/storage/xtradb/fts/fts0opt.cc +++ b/storage/xtradb/fts/fts0opt.cc @@ -95,7 +95,7 @@ enum fts_msg_type_t { /** Compressed list of words that have been read from FTS INDEX that needs to be optimized. */ struct fts_zip_t { - ulint status; /*!< Status of (un)/zip operation */ + lint status; /*!< Status of (un)/zip operation */ ulint n_words; /*!< Number of words compressed */ diff --git a/storage/xtradb/fts/fts0pars.cc b/storage/xtradb/fts/fts0pars.cc index 83d465b0988..7f0ba4e0c1b 100644 --- a/storage/xtradb/fts/fts0pars.cc +++ b/storage/xtradb/fts/fts0pars.cc @@ -100,6 +100,8 @@ extern int ftserror(const char* p); #define YYPARSE_PARAM state #define YYLEX_PARAM ((fts_ast_state_t*) state)->lexer +#define YYTOKENFREE(token) fts_ast_string_free((token)) + typedef int (*fts_scanner_alt)(YYSTYPE* val, yyscan_t yyscanner); typedef int (*fts_scanner)(); @@ -154,9 +156,9 @@ typedef union YYSTYPE /* Line 293 of yacc.c */ #line 61 "fts0pars.y" - int oper; - char* token; - fts_ast_node_t* node; + int oper; + fts_ast_string_t* token; + fts_ast_node_t* node; @@ -632,6 +634,19 @@ while (YYID (0)) #define YYTERROR 1 #define YYERRCODE 256 +#define YYERRCLEANUP \ +do \ + switch (yylastchar) \ + { \ + case FTS_NUMB: \ + case FTS_TEXT: \ + case FTS_TERM: \ + YYTOKENFREE(yylval.token); \ + break; \ + default: \ + break; \ + } \ +while (YYID (0)) /* YYLLOC_DEFAULT -- Set CURRENT to span from RHS[1] to RHS[N]. If N is 0, then set CURRENT to the empty location which ends @@ -1169,6 +1184,8 @@ yyparse () { /* The lookahead symbol. */ int yychar; +/* The backup of yychar when there is an error and we're in yyerrlab. */ +int yylastchar; /* The semantic value of the lookahead symbol. */ YYSTYPE yylval; @@ -1524,8 +1541,8 @@ yyreduce: /* Line 1806 of yacc.c */ #line 141 "fts0pars.y" { - fts_ast_term_set_distance((yyvsp[(1) - (3)].node), strtoul((yyvsp[(3) - (3)].token), NULL, 10)); - free((yyvsp[(3) - (3)].token)); + fts_ast_term_set_distance((yyvsp[(1) - (3)].node), fts_ast_string_to_ul((yyvsp[(3) - (3)].token), 10)); + fts_ast_string_free((yyvsp[(3) - (3)].token)); } break; @@ -1557,8 +1574,8 @@ yyreduce: { (yyval.node) = fts_ast_create_node_list(state, (yyvsp[(1) - (4)].node)); fts_ast_add_node((yyval.node), (yyvsp[(2) - (4)].node)); - fts_ast_term_set_distance((yyvsp[(2) - (4)].node), strtoul((yyvsp[(4) - (4)].token), NULL, 10)); - free((yyvsp[(4) - (4)].token)); + fts_ast_term_set_distance((yyvsp[(2) - (4)].node), fts_ast_string_to_ul((yyvsp[(4) - (4)].token), 10)); + fts_ast_string_free((yyvsp[(4) - (4)].token)); } break; @@ -1623,7 +1640,7 @@ yyreduce: #line 191 "fts0pars.y" { (yyval.node) = fts_ast_create_node_term(state, (yyvsp[(1) - (1)].token)); - free((yyvsp[(1) - (1)].token)); + fts_ast_string_free((yyvsp[(1) - (1)].token)); } break; @@ -1633,7 +1650,7 @@ yyreduce: #line 196 "fts0pars.y" { (yyval.node) = fts_ast_create_node_term(state, (yyvsp[(1) - (1)].token)); - free((yyvsp[(1) - (1)].token)); + fts_ast_string_free((yyvsp[(1) - (1)].token)); } break; @@ -1652,7 +1669,7 @@ yyreduce: #line 207 "fts0pars.y" { (yyval.node) = fts_ast_create_node_text(state, (yyvsp[(1) - (1)].token)); - free((yyvsp[(1) - (1)].token)); + fts_ast_string_free((yyvsp[(1) - (1)].token)); } break; @@ -1700,6 +1717,8 @@ yyreduce: | yyerrlab -- here on detecting error | `------------------------------------*/ yyerrlab: + /* Backup yychar, in case we would change it. */ + yylastchar = yychar; /* Make sure we have latest lookahead translation. See comments at user semantic actions for why this is necessary. */ yytoken = yychar == YYEMPTY ? YYEMPTY : YYTRANSLATE (yychar); @@ -1755,7 +1774,11 @@ yyerrlab: { /* Return failure if at end of input. */ if (yychar == YYEOF) - YYABORT; + { + /* Since we don't need the token, we have to free it first. */ + YYERRCLEANUP; + YYABORT; + } } else { @@ -1812,7 +1835,11 @@ yyerrlab1: /* Pop the current state because it cannot handle the error token. */ if (yyssp == yyss) - YYABORT; + { + /* Since we don't need the error token, we have to free it first. */ + YYERRCLEANUP; + YYABORT; + } yydestruct ("Error: popping", diff --git a/storage/xtradb/fts/fts0pars.y b/storage/xtradb/fts/fts0pars.y index ff22e9a9873..e48036e82fe 100644 --- a/storage/xtradb/fts/fts0pars.y +++ b/storage/xtradb/fts/fts0pars.y @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 2007, 2013, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2007, 2014, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -59,9 +59,9 @@ struct fts_lexer_struct { %} %union { - int oper; - char* token; - fts_ast_node_t* node; + int oper; + fts_ast_string_t* token; + fts_ast_node_t* node; }; /* Enable re-entrant parser */ @@ -139,8 +139,8 @@ expr : term { } | text '@' FTS_NUMB { - fts_ast_term_set_distance($1, strtoul($3, NULL, 10)); - free($3); + fts_ast_term_set_distance($1, fts_ast_string_to_ul($3, 10)); + fts_ast_string_free($3); } | prefix term '*' { @@ -157,8 +157,8 @@ expr : term { | prefix text '@' FTS_NUMB { $$ = fts_ast_create_node_list(state, $1); fts_ast_add_node($$, $2); - fts_ast_term_set_distance($2, strtoul($4, NULL, 10)); - free($4); + fts_ast_term_set_distance($2, fts_ast_string_to_ul($4, 10)); + fts_ast_string_free($4); } | prefix text { @@ -190,12 +190,12 @@ prefix : '-' { term : FTS_TERM { $$ = fts_ast_create_node_term(state, $1); - free($1); + fts_ast_string_free($1); } | FTS_NUMB { $$ = fts_ast_create_node_term(state, $1); - free($1); + fts_ast_string_free($1); } /* Ignore leading '*' */ @@ -206,7 +206,7 @@ term : FTS_TERM { text : FTS_TEXT { $$ = fts_ast_create_node_text(state, $1); - free($1); + fts_ast_string_free($1); } ; %% diff --git a/storage/xtradb/fts/fts0que.cc b/storage/xtradb/fts/fts0que.cc index c5c5f954789..beeb31abb9e 100644 --- a/storage/xtradb/fts/fts0que.cc +++ b/storage/xtradb/fts/fts0que.cc @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 2007, 2013, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2007, 2014, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -2800,20 +2800,19 @@ fts_query_get_token( ulint str_len; byte* new_ptr = NULL; - str_len = ut_strlen((char*) node->term.ptr); + str_len = node->term.ptr->len; ut_a(node->type == FTS_AST_TERM); token->f_len = str_len; - token->f_str = node->term.ptr; + token->f_str = node->term.ptr->str; if (node->term.wildcard) { token->f_str = static_cast<byte*>(ut_malloc(str_len + 2)); token->f_len = str_len + 1; - /* Need to copy the NUL character too. */ - memcpy(token->f_str, node->term.ptr, str_len + 1); + memcpy(token->f_str, node->term.ptr->str, str_len); token->f_str[str_len] = '%'; token->f_str[token->f_len] = 0; @@ -2848,8 +2847,8 @@ fts_query_visitor( switch (node->type) { case FTS_AST_TEXT: - token.f_str = node->text.ptr; - token.f_len = ut_strlen((char*) token.f_str); + token.f_str = node->text.ptr->str; + token.f_len = node->text.ptr->len; if (query->oper == FTS_EXIST) { ut_ad(query->intersection == NULL); @@ -2878,8 +2877,8 @@ fts_query_visitor( break; case FTS_AST_TERM: - token.f_str = node->term.ptr; - token.f_len = ut_strlen(reinterpret_cast<char*>(token.f_str)); + token.f_str = node->term.ptr->str; + token.f_len = node->term.ptr->len; /* Add the word to our RB tree that will be used to calculate this terms per document frequency. */ @@ -3191,13 +3190,9 @@ fts_query_read_node( to assign the frequency on search string behalf. */ if (query->cur_node->type == FTS_AST_TERM && query->cur_node->term.wildcard) { - - /* These cast are safe since we only care about the - terminating NUL character as an end of string marker. */ - term.f_len = ut_strlen(reinterpret_cast<char*> - (query->cur_node->term.ptr)); + term.f_len = query->cur_node->term.ptr->len; ut_ad(FTS_MAX_WORD_LEN >= term.f_len); - memcpy(term.f_str, query->cur_node->term.ptr, term.f_len); + memcpy(term.f_str, query->cur_node->term.ptr->str, term.f_len); } else { term.f_len = word->f_len; ut_ad(FTS_MAX_WORD_LEN >= word->f_len); @@ -3507,14 +3502,15 @@ fts_query_prepare_result( doc_freq = rbt_value(fts_doc_freq_t, node); /* Don't put deleted docs into result */ - if (fts_bsearch(array, 0, static_cast<int>(size), doc_freq->doc_id) - >= 0) { + if (fts_bsearch(array, 0, static_cast<int>(size), + doc_freq->doc_id) >= 0) { + /* one less matching doc count */ + --word_freq->doc_count; continue; } ranking.doc_id = doc_freq->doc_id; - ranking.rank = static_cast<fts_rank_t>( - doc_freq->freq * word_freq->idf * word_freq->idf); + ranking.rank = static_cast<fts_rank_t>(doc_freq->freq); ranking.words = NULL; fts_query_add_ranking(query, result->rankings_by_id, @@ -3527,6 +3523,25 @@ fts_query_prepare_result( } } + /* Calculate IDF only after we exclude the deleted items */ + fts_query_calculate_idf(query); + + node = rbt_first(query->word_freqs); + word_freq = rbt_value(fts_word_freq_t, node); + + /* Calculate the ranking for each doc */ + for (node = rbt_first(result->rankings_by_id); + node != NULL; + node = rbt_next(result->rankings_by_id, node)) { + + fts_ranking_t* ranking; + + ranking = rbt_value(fts_ranking_t, node); + + ranking->rank = static_cast<fts_rank_t>( + ranking->rank * word_freq->idf * word_freq->idf); + } + return(result); } @@ -3898,6 +3913,7 @@ fts_query( /* Get the deleted doc ids that are in the cache. */ fts_cache_append_deleted_doc_ids( index->table->fts->cache, query.deleted->doc_ids); + DEBUG_SYNC_C("fts_deleted_doc_ids_append"); /* Sort the vector so that we can do a binary search over the ids. */ ib_vector_sort(query.deleted->doc_ids, fts_update_doc_id_cmp); @@ -3954,7 +3970,8 @@ fts_query( } /* Calculate the inverse document frequency of the terms. */ - if (query.error == DB_SUCCESS) { + if (query.error == DB_SUCCESS + && query.flags != FTS_OPT_RANKING) { fts_query_calculate_idf(&query); } diff --git a/storage/xtradb/fts/fts0tlex.cc b/storage/xtradb/fts/fts0tlex.cc index ef17ab1acf2..b744fbf0763 100644 --- a/storage/xtradb/fts/fts0tlex.cc +++ b/storage/xtradb/fts/fts0tlex.cc @@ -447,7 +447,7 @@ static yyconst flex_int16_t yy_chk[29] = #line 1 "fts0tlex.l" /***************************************************************************** -Copyright (c) 2007, 2011, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2007, 2014, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -802,7 +802,7 @@ case 3: YY_RULE_SETUP #line 54 "fts0tlex.l" { - val->token = strdup(fts0tget_text(yyscanner)); + val->token = fts_ast_string_create(reinterpret_cast<const byte*>(fts0tget_text(yyscanner)), fts0tget_leng(yyscanner)); return(FTS_TEXT); } @@ -811,7 +811,7 @@ case 4: YY_RULE_SETUP #line 60 "fts0tlex.l" { - val->token = strdup(fts0tget_text(yyscanner)); + val->token = fts_ast_string_create(reinterpret_cast<const byte*>(fts0tget_text(yyscanner)), fts0tget_leng(yyscanner)); return(FTS_TERM); } diff --git a/storage/xtradb/fts/fts0tlex.l b/storage/xtradb/fts/fts0tlex.l index a18c2a55081..4f55a83afe5 100644 --- a/storage/xtradb/fts/fts0tlex.l +++ b/storage/xtradb/fts/fts0tlex.l @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 2007, 2011, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2007, 2014, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -52,13 +52,13 @@ this program; if not, write to the Free Software Foundation, Inc., } \"[^\"\n]*\" { - val->token = strdup(fts0tget_text(yyscanner)); + val->token = fts_ast_string_create(reinterpret_cast<const byte*>(fts0tget_text(yyscanner)), fts0tget_leng(yyscanner)); return(FTS_TEXT); } [^" \n\%]* { - val->token = strdup(fts0tget_text(yyscanner)); + val->token = fts_ast_string_create(reinterpret_cast<const byte*>(fts0tget_text(yyscanner)), fts0tget_leng(yyscanner)); return(FTS_TERM); } diff --git a/storage/xtradb/handler/ha_innodb.cc b/storage/xtradb/handler/ha_innodb.cc index 1355c8a22a1..e4cfad45838 100644 --- a/storage/xtradb/handler/ha_innodb.cc +++ b/storage/xtradb/handler/ha_innodb.cc @@ -99,6 +99,7 @@ this program; if not, write to the Free Software Foundation, Inc., #include "fts0types.h" #include "row0import.h" #include "row0quiesce.h" +#include "row0mysql.h" #ifdef UNIV_DEBUG #include "trx0purge.h" #endif /* UNIV_DEBUG */ @@ -488,7 +489,7 @@ static PSI_rwlock_info all_innodb_rwlocks[] = { {&trx_purge_latch_key, "trx_purge_latch", 0}, {&index_tree_rw_lock_key, "index_tree_rw_lock", 0}, {&index_online_log_key, "index_online_log", 0}, - {&dict_table_stats_latch_key, "dict_table_stats", 0}, + {&dict_table_stats_key, "dict_table_stats", 0}, {&hash_table_rw_lock_key, "hash_table_locks", 0} }; # endif /* UNIV_PFS_RWLOCK */ @@ -1298,6 +1299,22 @@ innobase_start_trx_and_assign_read_view( THD* thd); /* in: MySQL thread handle of the user for whom the transaction should be committed */ +/*****************************************************************//** +Creates an InnoDB transaction struct for the thd if it does not yet have one. +Starts a new InnoDB transaction if a transaction is not yet started. And +clones snapshot for a consistent read from another session, if it has one. +@return 0 */ +static +int +innobase_start_trx_and_clone_read_view( +/*====================================*/ + handlerton* hton, /* in: Innodb handlerton */ + THD* thd, /* in: MySQL thread handle of the + user for whom the transaction should + be committed */ + THD* from_thd); /* in: MySQL thread handle of the + user session from which the consistent + read should be cloned */ /****************************************************************//** Flushes InnoDB logs to disk and makes a checkpoint. Really, a commit flushes the logs, and the name of this function should be innobase_checkpoint. @@ -4034,6 +4051,14 @@ innobase_end( if (innodb_inited) { + THD *thd= current_thd; + if (thd) { // may be UNINSTALL PLUGIN statement + trx_t* trx = thd_to_trx(thd); + if (trx) { + trx_free_for_mysql(trx); + } + } + srv_fast_shutdown = (ulint) innobase_fast_shutdown; innodb_inited = 0; @@ -4224,7 +4249,7 @@ innobase_commit_ordered_2( { DBUG_ENTER("innobase_commit_ordered_2"); - /* We need current binlog position for ibbackup to work. */ + /* We need current binlog position for mysqlbackup to work. */ retry: if (innobase_commit_concurrency > 0) { mysql_mutex_lock(&commit_cond_m); @@ -4327,6 +4352,102 @@ innobase_commit_ordered( } /*****************************************************************//** +Creates an InnoDB transaction struct for the thd if it does not yet have one. +Starts a new InnoDB transaction if a transaction is not yet started. And +clones snapshot for a consistent read from another session, if it has one. +@return 0 */ +static +int +innobase_start_trx_and_clone_read_view( +/*====================================*/ + handlerton* hton, /* in: Innodb handlerton */ + THD* thd, /* in: MySQL thread handle of the + user for whom the transaction should + be committed */ + THD* from_thd) /* in: MySQL thread handle of the + user session from which the consistent + read should be cloned */ +{ + trx_t* trx; + trx_t* from_trx; + + DBUG_ENTER("innobase_start_trx_and_clone_read_view"); + DBUG_ASSERT(hton == innodb_hton_ptr); + + /* Get transaction handle from the donor session */ + + from_trx = thd_to_trx(from_thd); + + if (!from_trx) { + push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, + HA_ERR_UNSUPPORTED, + "InnoDB: WITH CONSISTENT SNAPSHOT " + "FROM SESSION was ignored because the " + "specified session does not have an open " + "transaction inside InnoDB."); + + DBUG_RETURN(0); + } + + /* Create a new trx struct for thd, if it does not yet have one */ + + trx = check_trx_exists(thd); + + /* This is just to play safe: release a possible FIFO ticket and + search latch. Since we can potentially reserve the trx_sys->mutex, + we have to release the search system latch first to obey the latching + order. */ + + trx_search_latch_release_if_reserved(trx); + + innobase_srv_conc_force_exit_innodb(trx); + + /* If the transaction is not started yet, start it */ + + trx_start_if_not_started_xa(trx); + + /* Clone the read view from the donor transaction. Do this only if + transaction is using REPEATABLE READ isolation level. */ + trx->isolation_level = innobase_map_isolation_level( + thd_get_trx_isolation(thd)); + + if (trx->isolation_level != TRX_ISO_REPEATABLE_READ) { + + push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, + HA_ERR_UNSUPPORTED, + "InnoDB: WITH CONSISTENT SNAPSHOT " + "was ignored because this phrase " + "can only be used with " + "REPEATABLE READ isolation level."); + } else { + + lock_mutex_enter(); + mutex_enter(&trx_sys->mutex); + trx_mutex_enter(from_trx); + + if (!trx_clone_read_view(trx, from_trx)) { + + push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, + HA_ERR_UNSUPPORTED, + "InnoDB: WITH CONSISTENT SNAPSHOT " + "FROM SESSION was ignored because " + "the target transaction has not been " + "assigned a read view."); + } + + trx_mutex_exit(from_trx); + mutex_exit(&trx_sys->mutex); + lock_mutex_exit(); + } + + /* Set the MySQL flag to mark that there is an active transaction */ + + innobase_register_trx(hton, current_thd, trx); + + DBUG_RETURN(0); +} + +/*****************************************************************//** Commits a transaction in an InnoDB database or marks an SQL statement ended. @return 0 */ @@ -4760,6 +4881,7 @@ innobase_release_savepoint( DBUG_ASSERT(hton == innodb_hton_ptr); trx = check_trx_exists(thd); + trx_start_if_not_started(trx); /* TODO: use provided savepoint data area to store savepoint data */ @@ -4815,7 +4937,7 @@ innobase_savepoint( error = trx_savepoint_for_mysql(trx, name, (ib_int64_t)0); if (error == DB_SUCCESS && trx->fts_trx != NULL) { - fts_savepoint_take(trx, name); + fts_savepoint_take(trx, trx->fts_trx, name); } DBUG_RETURN(convert_error_code_to_mysql(error, 0, NULL)); @@ -4850,7 +4972,7 @@ innobase_close_connection( sql_print_warning( "MySQL is closing a connection that has an active " - "InnoDB transaction. "TRX_ID_FMT" row modifications " + "InnoDB transaction. " TRX_ID_FMT " row modifications " "will roll back.", trx->undo_no); } @@ -4943,16 +5065,24 @@ innobase_kill_connection( #endif /* WITH_WSREP */ trx = thd_to_trx(thd); - if (trx) - { - /* Cancel a pending lock request. */ - lock_mutex_enter(); - trx_mutex_enter(trx); - if (trx->lock.wait_lock) - lock_cancel_waiting_and_release(trx->lock.wait_lock); - trx_mutex_exit(trx); - lock_mutex_exit(); - } + if (trx) { + THD *cur = current_thd; + THD *owner = trx->current_lock_mutex_owner; + + if (owner != cur) { + lock_mutex_enter(); + } + trx_mutex_enter(trx); + + /* Cancel a pending lock request. */ + if (trx->lock.wait_lock) + lock_cancel_waiting_and_release(trx->lock.wait_lock); + + trx_mutex_exit(trx); + if (owner != cur) { + lock_mutex_exit(); + } + } DBUG_VOID_RETURN; } @@ -4967,14 +5097,11 @@ handler::Table_flags ha_innobase::table_flags() const /*============================*/ { - THD *thd = ha_thd(); /* Need to use tx_isolation here since table flags is (also) called before prebuilt is inited. */ - ulong const tx_isolation = thd_tx_isolation(thd); + ulong const tx_isolation = thd_tx_isolation(ha_thd()); - if (tx_isolation <= ISO_READ_COMMITTED && - !(tx_isolation == ISO_READ_COMMITTED && - thd_rpl_is_parallel(thd))) { + if (tx_isolation <= ISO_READ_COMMITTED) { return(int_table_flags); } @@ -8528,7 +8655,7 @@ calc_row_difference( if (doc_id < prebuilt->table->fts->cache->next_doc_id) { fprintf(stderr, "InnoDB: FTS Doc ID must be larger than" - " "IB_ID_FMT" for table", + " " IB_ID_FMT " for table", innodb_table->fts->cache->next_doc_id - 1); ut_print_name(stderr, trx, @@ -8540,9 +8667,9 @@ calc_row_difference( - prebuilt->table->fts->cache->next_doc_id) >= FTS_DOC_ID_MAX_STEP) { fprintf(stderr, - "InnoDB: Doc ID "UINT64PF" is too" + "InnoDB: Doc ID " UINT64PF " is too" " big. Its difference with largest" - " Doc ID used "UINT64PF" cannot" + " Doc ID used " UINT64PF " cannot" " exceed or equal to %d\n", doc_id, prebuilt->table->fts->cache->next_doc_id - 1, @@ -9326,6 +9453,29 @@ ha_innobase::innobase_get_index( index = innobase_index_lookup(share, keynr); if (index) { + + if (!key || ut_strcmp(index->name, key->name) != 0) { + fprintf(stderr, "InnoDB: [Error] Index for key no %u" + " mysql name %s , InnoDB name %s for table %s\n", + keynr, key ? key->name : "NULL", + index->name, + prebuilt->table->name); + + for(ulint i=0; i < table->s->keys; i++) { + index = innobase_index_lookup(share, i); + key = table->key_info + keynr; + + if (index) { + + fprintf(stderr, "InnoDB: [Note] Index for key no %u" + " mysql name %s , InnoDB name %s for table %s\n", + keynr, key ? key->name : "NULL", + index->name, + prebuilt->table->name); + } + } + } + ut_a(ut_strcmp(index->name, key->name) == 0); } else { /* Can't find index with keynr in the translation @@ -12959,16 +13109,6 @@ ha_innobase::get_memory_buffer_size() const return(innobase_buffer_pool_size); } -UNIV_INTERN -bool -ha_innobase::is_corrupt() const -{ - if (share->ib_table) - return ((bool)share->ib_table->is_corrupt); - else - return (FALSE); -} - /*********************************************************************//** Calculates the key number used inside MySQL for an Innobase index. We will first check the "index translation table" for a match of the index to get @@ -13446,6 +13586,35 @@ ha_innobase::info_low( break; } + DBUG_EXECUTE_IF("ib_ha_innodb_stat_not_initialized", + index->table->stat_initialized = FALSE;); + + if (!ib_table->stat_initialized || + (index->table != ib_table || + !index->table->stat_initialized)) { + fprintf(stderr, + "InnoDB: Warning: Index %s points to table %s" + " and ib_table %s statistics is initialized %d " + " but index table %s initialized %d " + " mysql table is %s. Have you mixed " + "up .frm files from different " + "installations? " + "See " REFMAN + "innodb-troubleshooting.html\n", + index->name, + index->table->name, + ib_table->name, + ib_table->stat_initialized, + index->table->name, + index->table->stat_initialized, + table->s->table_name.str + ); + + /* This is better than + assert on below function */ + dict_stats_init(index->table); + } + rec_per_key = innodb_rec_per_key( index, j, stats.records); @@ -14139,9 +14308,13 @@ ha_innobase::get_foreign_key_list( mutex_enter(&(dict_sys->mutex)); - for (foreign = UT_LIST_GET_FIRST(prebuilt->table->foreign_list); - foreign != NULL; - foreign = UT_LIST_GET_NEXT(foreign_list, foreign)) { + for (dict_foreign_set::iterator it + = prebuilt->table->foreign_set.begin(); + it != prebuilt->table->foreign_set.end(); + ++it) { + + foreign = *it; + pf_key_info = get_foreign_key_info(thd, foreign); if (pf_key_info) { f_key_list->push_back(pf_key_info); @@ -14177,9 +14350,13 @@ ha_innobase::get_parent_foreign_key_list( mutex_enter(&(dict_sys->mutex)); - for (foreign = UT_LIST_GET_FIRST(prebuilt->table->referenced_list); - foreign != NULL; - foreign = UT_LIST_GET_NEXT(referenced_list, foreign)) { + for (dict_foreign_set::iterator it + = prebuilt->table->referenced_set.begin(); + it != prebuilt->table->referenced_set.end(); + ++it) { + + foreign = *it; + pf_key_info = get_foreign_key_info(thd, foreign); if (pf_key_info) { f_key_list->push_back(pf_key_info); @@ -14212,8 +14389,8 @@ ha_innobase::can_switch_engines(void) "determining if there are foreign key constraints"; row_mysql_freeze_data_dictionary(prebuilt->trx); - can_switch = !UT_LIST_GET_FIRST(prebuilt->table->referenced_list) - && !UT_LIST_GET_FIRST(prebuilt->table->foreign_list); + can_switch = prebuilt->table->referenced_set.empty() + && prebuilt->table->foreign_set.empty(); row_mysql_unfreeze_data_dictionary(prebuilt->trx); prebuilt->trx->op_info = ""; @@ -16041,7 +16218,7 @@ innobase_xa_prepare( || !thd_test_options( thd, OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN))) { - /* For ibbackup to work the order of transactions in binlog + /* For mysqlbackup to work the order of transactions in binlog and InnoDB must be the same. Consider the situation thread1> prepare; write to binlog; ... @@ -19816,8 +19993,14 @@ static MYSQL_SYSVAR_ULONG(saved_page_number_debug, srv_saved_page_number_debug, PLUGIN_VAR_OPCMDARG, "An InnoDB page number.", NULL, innodb_save_page_no, 0, 0, UINT_MAX32, 0); + #endif /* UNIV_DEBUG */ +static MYSQL_SYSVAR_UINT(simulate_comp_failures, srv_simulate_comp_failures, + PLUGIN_VAR_NOCMDARG, + "Simulate compression failures.", + NULL, NULL, 0, 0, 99, 0); + static MYSQL_SYSVAR_BOOL(force_primary_key, srv_force_primary_key, PLUGIN_VAR_OPCMDARG, @@ -20097,6 +20280,7 @@ static struct st_mysql_sys_var* innobase_system_variables[]= { MYSQL_SYSVAR(fil_make_page_dirty_debug), MYSQL_SYSVAR(saved_page_number_debug), #endif /* UNIV_DEBUG */ + MYSQL_SYSVAR(simulate_comp_failures), MYSQL_SYSVAR(corrupt_table_action), MYSQL_SYSVAR(fake_changes), MYSQL_SYSVAR(locking_fake_changes), @@ -20106,7 +20290,6 @@ static struct st_mysql_sys_var* innobase_system_variables[]= { MYSQL_SYSVAR(compression_algorithm), MYSQL_SYSVAR(mtflush_threads), MYSQL_SYSVAR(use_mtflush), - NULL }; @@ -20385,7 +20568,7 @@ ib_senderrf( va_start(args, code); - myf l; + myf l=0; switch(level) { case IB_LOG_LEVEL_INFO: diff --git a/storage/xtradb/handler/ha_innodb.h b/storage/xtradb/handler/ha_innodb.h index 0c76c286030..2d70c67d3bf 100644 --- a/storage/xtradb/handler/ha_innodb.h +++ b/storage/xtradb/handler/ha_innodb.h @@ -151,7 +151,6 @@ class ha_innobase: public handler double read_time(uint index, uint ranges, ha_rows rows); longlong get_memory_buffer_size() const; my_bool is_fake_change_enabled(THD *thd); - bool is_corrupt() const; int write_row(uchar * buf); int update_row(const uchar * old_data, uchar * new_data); diff --git a/storage/xtradb/handler/handler0alter.cc b/storage/xtradb/handler/handler0alter.cc index 8097fd01e3f..2d7fd259cb1 100644 --- a/storage/xtradb/handler/handler0alter.cc +++ b/storage/xtradb/handler/handler0alter.cc @@ -612,15 +612,9 @@ innobase_init_foreign( /* Check if any existing foreign key has the same id, this is needed only if user supplies the constraint name */ - for (const dict_foreign_t* existing_foreign - = UT_LIST_GET_FIRST(table->foreign_list); - existing_foreign != 0; - existing_foreign = UT_LIST_GET_NEXT( - foreign_list, existing_foreign)) { - - if (ut_strcmp(existing_foreign->id, foreign->id) == 0) { - return(false); - } + if (table->foreign_set.find(foreign) + != table->foreign_set.end()) { + return(false); } } @@ -2258,14 +2252,18 @@ innobase_check_foreigns_low( const char* col_name, bool drop) { + dict_foreign_t* foreign; ut_ad(mutex_own(&dict_sys->mutex)); /* Check if any FOREIGN KEY constraints are defined on this column. */ - for (const dict_foreign_t* foreign = UT_LIST_GET_FIRST( - user_table->foreign_list); - foreign; - foreign = UT_LIST_GET_NEXT(foreign_list, foreign)) { + + for (dict_foreign_set::iterator it = user_table->foreign_set.begin(); + it != user_table->foreign_set.end(); + ++it) { + + foreign = *it; + if (!drop && !(foreign->type & (DICT_FOREIGN_ON_DELETE_SET_NULL | DICT_FOREIGN_ON_UPDATE_SET_NULL))) { @@ -2297,10 +2295,13 @@ innobase_check_foreigns_low( /* Check if any FOREIGN KEY constraints in other tables are referring to the column that is being dropped. */ - for (const dict_foreign_t* foreign = UT_LIST_GET_FIRST( - user_table->referenced_list); - foreign; - foreign = UT_LIST_GET_NEXT(referenced_list, foreign)) { + for (dict_foreign_set::iterator it + = user_table->referenced_set.begin(); + it != user_table->referenced_set.end(); + ++it) { + + foreign = *it; + if (innobase_dropping_foreign(foreign, drop_fk, n_drop_fk)) { continue; } @@ -3188,6 +3189,9 @@ error_handling: case DB_DUPLICATE_KEY: my_error(ER_DUP_KEY, MYF(0), "SYS_INDEXES"); break; + case DB_OUT_OF_FILE_SPACE: + my_error_innodb(error, table_name, user_table->flags); + break; default: my_error_innodb(error, table_name, user_table->flags); } @@ -3648,11 +3652,12 @@ check_if_ok_to_rename: continue; } - for (dict_foreign_t* foreign = UT_LIST_GET_FIRST( - prebuilt->table->foreign_list); - foreign != NULL; - foreign = UT_LIST_GET_NEXT( - foreign_list, foreign)) { + for (dict_foreign_set::iterator it + = prebuilt->table->foreign_set.begin(); + it != prebuilt->table->foreign_set.end(); + ++it) { + + dict_foreign_t* foreign = *it; const char* fid = strchr(foreign->id, '/'); DBUG_ASSERT(fid); @@ -4498,10 +4503,12 @@ err_exit: rename_foreign: trx->op_info = "renaming column in SYS_FOREIGN_COLS"; - for (const dict_foreign_t* foreign = UT_LIST_GET_FIRST( - user_table->foreign_list); - foreign != NULL; - foreign = UT_LIST_GET_NEXT(foreign_list, foreign)) { + for (dict_foreign_set::iterator it = user_table->foreign_set.begin(); + it != user_table->foreign_set.end(); + ++it) { + + dict_foreign_t* foreign = *it; + for (unsigned i = 0; i < foreign->n_fields; i++) { if (strcmp(foreign->foreign_col_names[i], from)) { continue; @@ -4531,10 +4538,12 @@ rename_foreign: } } - for (const dict_foreign_t* foreign = UT_LIST_GET_FIRST( - user_table->referenced_list); - foreign != NULL; - foreign = UT_LIST_GET_NEXT(referenced_list, foreign)) { + for (dict_foreign_set::iterator it + = user_table->referenced_set.begin(); + it != user_table->referenced_set.end(); + ++it) { + + dict_foreign_t* foreign = *it; for (unsigned i = 0; i < foreign->n_fields; i++) { if (strcmp(foreign->referenced_col_names[i], from)) { continue; @@ -4858,8 +4867,8 @@ innobase_update_foreign_cache( column names. No need to pass col_names or to drop constraints from the data dictionary cache. */ DBUG_ASSERT(!ctx->col_names); - DBUG_ASSERT(UT_LIST_GET_LEN(user_table->foreign_list) == 0); - DBUG_ASSERT(UT_LIST_GET_LEN(user_table->referenced_list) == 0); + DBUG_ASSERT(user_table->foreign_set.empty()); + DBUG_ASSERT(user_table->referenced_set.empty()); user_table = ctx->new_table; } else { /* Drop the foreign key constraints if the diff --git a/storage/xtradb/handler/i_s.cc b/storage/xtradb/handler/i_s.cc index b53c3ad536e..a0ba8c5f1a1 100644 --- a/storage/xtradb/handler/i_s.cc +++ b/storage/xtradb/handler/i_s.cc @@ -8287,6 +8287,15 @@ i_s_innodb_changed_pages_fill( limit_lsn_range_from_condition(table, cond, &min_lsn, &max_lsn); } + + /* If the log tracker is running and our max_lsn > current tracked LSN, + cap the max lsn so that we don't try to read any partial runs as the + tracked LSN advances. */ + if (srv_track_changed_pages) { + ib_uint64_t tracked_lsn = log_get_tracked_lsn(); + if (max_lsn > tracked_lsn) + max_lsn = tracked_lsn; + } if (!log_online_bitmap_iterator_init(&i, min_lsn, max_lsn)) { my_error(ER_CANT_FIND_SYSTEM_REC, MYF(0)); diff --git a/storage/xtradb/include/btr0cur.h b/storage/xtradb/include/btr0cur.h index 8a35cb1a3da..4ed66e76fe0 100644 --- a/storage/xtradb/include/btr0cur.h +++ b/storage/xtradb/include/btr0cur.h @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1994, 2013, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1994, 2014, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -582,6 +582,17 @@ void btr_estimate_number_of_different_key_vals( /*======================================*/ dict_index_t* index); /*!< in: index */ + +/** Gets the externally stored size of a record, in units of a database page. +@param[in] rec record +@param[in] offsets array returned by rec_get_offsets() +@return externally stored part, in units of a database page */ + +ulint +btr_rec_get_externally_stored_len( + const rec_t* rec, + const ulint* offsets); + /*******************************************************************//** Marks non-updated off-page fields as disowned by this record. The ownership must be transferred to the updated record which is inserted elsewhere in the diff --git a/storage/xtradb/include/buf0buf.h b/storage/xtradb/include/buf0buf.h index d5cc03d8c86..84db9cfcf2b 100644 --- a/storage/xtradb/include/buf0buf.h +++ b/storage/xtradb/include/buf0buf.h @@ -273,6 +273,15 @@ buf_pool_get_oldest_modification(void); /*==================================*/ /********************************************************************//** +Gets the smallest oldest_modification lsn for any page in the pool. Returns +zero if all modified pages have been flushed to disk. +@return oldest modification in pool, zero if none */ +UNIV_INTERN +lsn_t +buf_pool_get_oldest_modification_peek(void); +/*=======================================*/ + +/********************************************************************//** Allocates a buf_page_t descriptor. This function must succeed. In case of failure we assert in this function. */ UNIV_INLINE @@ -437,7 +446,7 @@ buf_page_create( mtr_t* mtr); /*!< in: mini-transaction handle */ #else /* !UNIV_HOTBACKUP */ /********************************************************************//** -Inits a page to the buffer buf_pool, for use in ibbackup --restore. */ +Inits a page to the buffer buf_pool, for use in mysqlbackup --restore. */ UNIV_INTERN void buf_page_init_for_backup_restore( diff --git a/storage/xtradb/include/buf0buf.ic b/storage/xtradb/include/buf0buf.ic index c49061621f3..10f0e02cb8f 100644 --- a/storage/xtradb/include/buf0buf.ic +++ b/storage/xtradb/include/buf0buf.ic @@ -662,6 +662,11 @@ buf_page_get_block( buf_page_t* bpage) /*!< in: control block, or NULL */ { if (bpage != NULL) { +#ifdef UNIV_DEBUG + buf_pool_t* buf_pool = buf_pool_from_bpage(bpage); + ut_ad(buf_page_hash_lock_held_s_or_x(buf_pool, bpage) + || mutex_own(&buf_pool->LRU_list_mutex)); +#endif ut_ad(buf_page_in_file(bpage)); if (buf_page_get_state(bpage) == BUF_BLOCK_FILE_PAGE) { @@ -1176,12 +1181,6 @@ buf_page_hash_get_low( ut_a(buf_page_in_file(bpage)); ut_ad(bpage->in_page_hash); ut_ad(!bpage->in_zip_hash); -#if UNIV_WORD_SIZE == 4 - /* On 32-bit systems, there is no padding in - buf_page_t. On other systems, Valgrind could complain - about uninitialized pad bytes. */ - UNIV_MEM_ASSERT_RW(bpage, sizeof *bpage); -#endif } return(bpage); diff --git a/storage/xtradb/include/dict0crea.h b/storage/xtradb/include/dict0crea.h index 6ec1079957b..67eab9058da 100644 --- a/storage/xtradb/include/dict0crea.h +++ b/storage/xtradb/include/dict0crea.h @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1996, 2012, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1996, 2014, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -124,28 +124,24 @@ dict_create_add_foreign_id( const char* name, /*!< in: table name */ dict_foreign_t* foreign)/*!< in/out: foreign key */ __attribute__((nonnull)); -/********************************************************************//** -Adds foreign key definitions to data dictionary tables in the database. We -look at table->foreign_list, and also generate names to constraints that were -not named by the user. A generated constraint has a name of the format -databasename/tablename_ibfk_NUMBER, where the numbers start from 1, and are -given locally for this table, that is, the number is not global, as in the -old format constraints < 4.0.18 it used to be. -@return error code or DB_SUCCESS */ + +/** Adds the given set of foreign key objects to the dictionary tables +in the database. This function does not modify the dictionary cache. The +caller must ensure that all foreign key objects contain a valid constraint +name in foreign->id. +@param[in] local_fk_set set of foreign key objects, to be added to +the dictionary tables +@param[in] table table to which the foreign key objects in +local_fk_set belong to +@param[in,out] trx transaction +@return error code or DB_SUCCESS */ UNIV_INTERN dberr_t dict_create_add_foreigns_to_dictionary( /*===================================*/ - ulint start_id,/*!< in: if we are actually doing ALTER TABLE - ADD CONSTRAINT, we want to generate constraint - numbers which are bigger than in the table so - far; we number the constraints from - start_id + 1 up; start_id should be set to 0 if - we are creating a new table, or if the table - so far has no constraints for which the name - was generated here */ - dict_table_t* table, /*!< in: table */ - trx_t* trx) /*!< in: transaction */ + const dict_foreign_set& local_fk_set, + const dict_table_t* table, + trx_t* trx) __attribute__((nonnull, warn_unused_result)); /****************************************************************//** Creates the tablespaces and datafiles system tables inside InnoDB diff --git a/storage/xtradb/include/dict0dict.h b/storage/xtradb/include/dict0dict.h index 52ac5eee86b..78503d954ba 100644 --- a/storage/xtradb/include/dict0dict.h +++ b/storage/xtradb/include/dict0dict.h @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1996, 2013, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1996, 2014, Oracle and/or its affiliates. All Rights Reserved. Copyright (c) 2012, Facebook Inc. Copyright (c) 2013, SkySQL Ab. All Rights Reserved. @@ -46,6 +46,9 @@ Created 1/8/1996 Heikki Tuuri #include "fsp0fsp.h" #include "dict0pagecompress.h" +extern bool innodb_table_stats_not_found; +extern bool innodb_index_stats_not_found; + #ifndef UNIV_HOTBACKUP # include "sync0sync.h" # include "sync0rw.h" @@ -1447,6 +1450,28 @@ UNIV_INTERN void dict_mutex_exit_for_mysql(void); /*===========================*/ + +/** Create a dict_table_t's stats latch or delay for lazy creation. +This function is only called from either single threaded environment +or from a thread that has not shared the table object with other threads. +@param[in,out] table table whose stats latch to create +@param[in] enabled if false then the latch is disabled +and dict_table_stats_lock()/unlock() become noop on this table. */ + +void +dict_table_stats_latch_create( + dict_table_t* table, + bool enabled); + +/** Destroy a dict_table_t's stats latch. +This function is only called from either single threaded environment +or from a thread that has not shared the table object with other threads. +@param[in,out] table table whose stats latch to destroy */ + +void +dict_table_stats_latch_destroy( + dict_table_t* table); + /**********************************************************************//** Lock the appropriate latch to protect a given table's statistics. table->id is used to pick the corresponding latch from a global array of diff --git a/storage/xtradb/include/dict0mem.h b/storage/xtradb/include/dict0mem.h index 8de9206cb81..5bea2334131 100644 --- a/storage/xtradb/include/dict0mem.h +++ b/storage/xtradb/include/dict0mem.h @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1996, 2013, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1996, 2014, Oracle and/or its affiliates. All Rights Reserved. Copyright (c) 2012, Facebook Inc. Copyright (c) 2013, SkySQL Ab. All Rights Reserved. @@ -50,6 +50,9 @@ Created 1/8/1996 Heikki Tuuri #include "hash0hash.h" #include "trx0types.h" #include "fts0fts.h" +#include "os0once.h" +#include <set> +#include <algorithm> /* Forward declaration. */ struct ib_rbt_t; @@ -695,6 +698,9 @@ struct dict_index_t{ ulint stat_n_leaf_pages; /*!< approximate number of leaf pages in the index tree */ + bool stats_error_printed; + /*!< has persistent statistics error printed + for this index ? */ /* @} */ /** Statistics for defragmentation, these numbers are estimations and could be very inaccurate at certain times, e.g. right after restart, @@ -790,12 +796,106 @@ struct dict_foreign_t{ does not generate new indexes implicitly */ dict_index_t* referenced_index;/*!< referenced index */ - UT_LIST_NODE_T(dict_foreign_t) - foreign_list; /*!< list node for foreign keys of the - table */ - UT_LIST_NODE_T(dict_foreign_t) - referenced_list;/*!< list node for referenced - keys of the table */ +}; + +/** Compare two dict_foreign_t objects using their ids. Used in the ordering +of dict_table_t::foreign_set and dict_table_t::referenced_set. It returns +true if the first argument is considered to go before the second in the +strict weak ordering it defines, and false otherwise. */ +struct dict_foreign_compare { + + bool operator()( + const dict_foreign_t* lhs, + const dict_foreign_t* rhs) const + { + return(ut_strcmp(lhs->id, rhs->id) < 0); + } +}; + +/** A function object to find a foreign key with the given index as the +referenced index. Return the foreign key with matching criteria or NULL */ +struct dict_foreign_with_index { + + dict_foreign_with_index(const dict_index_t* index) + : m_index(index) + {} + + bool operator()(const dict_foreign_t* foreign) const + { + return(foreign->referenced_index == m_index); + } + + const dict_index_t* m_index; +}; + +/* A function object to check if the foreign constraint is between different +tables. Returns true if foreign key constraint is between different tables, +false otherwise. */ +struct dict_foreign_different_tables { + + bool operator()(const dict_foreign_t* foreign) const + { + return(foreign->foreign_table != foreign->referenced_table); + } +}; + +/** A function object to check if the foreign key constraint has the same +name as given. If the full name of the foreign key constraint doesn't match, +then, check if removing the database name from the foreign key constraint +matches. Return true if it matches, false otherwise. */ +struct dict_foreign_matches_id { + + dict_foreign_matches_id(const char* id) + : m_id(id) + {} + + bool operator()(const dict_foreign_t* foreign) const + { + if (0 == innobase_strcasecmp(foreign->id, m_id)) { + return(true); + } + if (const char* pos = strchr(foreign->id, '/')) { + if (0 == innobase_strcasecmp(m_id, pos + 1)) { + return(true); + } + } + return(false); + } + + const char* m_id; +}; + +typedef std::set<dict_foreign_t*, dict_foreign_compare> dict_foreign_set; + +/*********************************************************************//** +Frees a foreign key struct. */ +inline +void +dict_foreign_free( +/*==============*/ + dict_foreign_t* foreign) /*!< in, own: foreign key struct */ +{ + mem_heap_free(foreign->heap); +} + +/** The destructor will free all the foreign key constraints in the set +by calling dict_foreign_free() on each of the foreign key constraints. +This is used to free the allocated memory when a local set goes out +of scope. */ +struct dict_foreign_set_free { + + dict_foreign_set_free(const dict_foreign_set& foreign_set) + : m_foreign_set(foreign_set) + {} + + ~dict_foreign_set_free() + { + std::for_each(m_foreign_set.begin(), + m_foreign_set.end(), + dict_foreign_free); + } + + const dict_foreign_set& m_foreign_set; }; /** The flags for ON_UPDATE and ON_DELETE can be ORed; the default is that @@ -817,6 +917,8 @@ the table, DML from memcached will be blocked. */ /** Data structure for a database table. Most fields will be initialized to 0, NULL or FALSE in dict_mem_table_create(). */ struct dict_table_t{ + + table_id_t id; /*!< id of the table */ mem_heap_t* heap; /*!< memory heap */ char* name; /*!< table name */ @@ -871,13 +973,16 @@ struct dict_table_t{ hash_node_t id_hash; /*!< hash chain node */ UT_LIST_BASE_NODE_T(dict_index_t) indexes; /*!< list of indexes of the table */ - UT_LIST_BASE_NODE_T(dict_foreign_t) - foreign_list;/*!< list of foreign key constraints + + dict_foreign_set foreign_set; + /*!< set of foreign key constraints in the table; these refer to columns in other tables */ - UT_LIST_BASE_NODE_T(dict_foreign_t) - referenced_list;/*!< list of foreign key constraints + + dict_foreign_set referenced_set; + /*!< list of foreign key constraints which refer to this table */ + UT_LIST_NODE_T(dict_table_t) table_LRU; /*!< node of the LRU list of tables */ unsigned fk_max_recusive_level:8; @@ -927,6 +1032,10 @@ struct dict_table_t{ initialized in dict_table_add_to_cache() */ /** Statistics for query optimization */ /* @{ */ + + volatile os_once::state_t stats_latch_created; + /*!< Creation state of 'stats_latch'. */ + rw_lock_t* stats_latch; /*!< this latch protects: dict_table_t::stat_initialized dict_table_t::stat_n_rows (*) @@ -1036,6 +1145,9 @@ struct dict_table_t{ /*!< see BG_STAT_* above. Writes are covered by dict_sys->mutex. Dirty reads are possible. */ + bool stats_error_printed; + /*!< Has persistent stats error beein + already printed for this table ? */ /* @} */ /*----------------------*/ /**!< The following fields are used by the @@ -1116,6 +1228,19 @@ struct dict_table_t{ #endif /* UNIV_DEBUG */ }; +/** A function object to add the foreign key constraint to the referenced set +of the referenced table, if it exists in the dictionary cache. */ +struct dict_foreign_add_to_referenced_table { + void operator()(dict_foreign_t* foreign) const + { + if (dict_table_t* table = foreign->referenced_table) { + std::pair<dict_foreign_set::iterator, bool> ret + = table->referenced_set.insert(foreign); + ut_a(ret.second); + } + } +}; + #ifndef UNIV_NONINL #include "dict0mem.ic" #endif diff --git a/storage/xtradb/include/fil0fil.h b/storage/xtradb/include/fil0fil.h index 3960eef5d7e..c2d113bdc1f 100644 --- a/storage/xtradb/include/fil0fil.h +++ b/storage/xtradb/include/fil0fil.h @@ -50,7 +50,7 @@ struct fil_space_t; typedef std::list<const char*> space_name_list_t; /** When mysqld is run, the default directory "." is the mysqld datadir, -but in the MySQL Embedded Server Library and ibbackup it is not the default +but in the MySQL Embedded Server Library and mysqlbackup it is not the default directory, and we must set the base file path explicitly */ extern const char* fil_path_to_mysql_datadir; @@ -456,8 +456,8 @@ exists and the space id in it matches. Replays the create operation if a file at that path does not exist yet. If the database directory for the file to be created does not exist, then we create the directory, too. -Note that ibbackup --apply-log sets fil_path_to_mysql_datadir to point to the -datadir that we should use in replaying the file operations. +Note that mysqlbackup --apply-log sets fil_path_to_mysql_datadir to point to +the datadir that we should use in replaying the file operations. @return end of log record, or NULL if the record was not completely contained between ptr and end_ptr */ UNIV_INTERN @@ -710,9 +710,9 @@ fil_space_for_table_exists_in_mem( #else /* !UNIV_HOTBACKUP */ /********************************************************************//** Extends all tablespaces to the size stored in the space header. During the -ibbackup --apply-log phase we extended the spaces on-demand so that log records -could be appllied, but that may have left spaces still too small compared to -the size stored in the space header. */ +mysqlbackup --apply-log phase we extended the spaces on-demand so that log +records could be appllied, but that may have left spaces still too small +compared to the size stored in the space header. */ UNIV_INTERN void fil_extend_tablespaces_to_stored_len(void); diff --git a/storage/xtradb/include/fts0ast.h b/storage/xtradb/include/fts0ast.h index c0aac6d8e4c..50ee587e282 100644 --- a/storage/xtradb/include/fts0ast.h +++ b/storage/xtradb/include/fts0ast.h @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 2007, 2013, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2007, 2014, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -76,6 +76,7 @@ enum fts_ast_oper_t { struct fts_lexer_t; struct fts_ast_node_t; struct fts_ast_state_t; +struct fts_ast_string_t; typedef dberr_t (*fts_ast_callback)(fts_ast_oper_t, fts_ast_node_t*, void*); @@ -101,16 +102,16 @@ extern fts_ast_node_t* fts_ast_create_node_term( /*=====================*/ - void* arg, /*!< in: ast state */ - const char* ptr); /*!< in: term string */ + void* arg, /*!< in: ast state */ + const fts_ast_string_t* ptr); /*!< in: term string */ /******************************************************************** Create an AST text node */ extern fts_ast_node_t* fts_ast_create_node_text( /*=====================*/ - void* arg, /*!< in: ast state */ - const char* ptr); /*!< in: text string */ + void* arg, /*!< in: ast state */ + const fts_ast_string_t* ptr); /*!< in: text string */ /******************************************************************** Create an AST expr list node */ extern @@ -233,16 +234,66 @@ fts_lexer_free( free */ __attribute__((nonnull)); +/** +Create an ast string object, with NUL-terminator, so the string +has one more byte than len +@param[in] str pointer to string +@param[in] len length of the string +@return ast string with NUL-terminator */ +UNIV_INTERN +fts_ast_string_t* +fts_ast_string_create( + const byte* str, + ulint len); + +/** +Free an ast string instance +@param[in,out] ast_str string to free */ +UNIV_INTERN +void +fts_ast_string_free( + fts_ast_string_t* ast_str); + +/** +Translate ast string of type FTS_AST_NUMB to unsigned long by strtoul +@param[in] str string to translate +@param[in] base the base +@return translated number */ +UNIV_INTERN +ulint +fts_ast_string_to_ul( + const fts_ast_string_t* ast_str, + int base); + +/** +Print the ast string +@param[in] str string to print */ +UNIV_INTERN +void +fts_ast_string_print( + const fts_ast_string_t* ast_str); + +/* String of length len. +We always store the string of length len with a terminating '\0', +regardless of there is any 0x00 in the string itself */ +struct fts_ast_string_t { + /*!< Pointer to string. */ + byte* str; + + /*!< Length of the string. */ + ulint len; +}; + /* Query term type */ struct fts_ast_term_t { - byte* ptr; /*!< Pointer to term string.*/ - ibool wildcard; /*!< TRUE if wild card set.*/ + fts_ast_string_t* ptr; /*!< Pointer to term string.*/ + ibool wildcard; /*!< TRUE if wild card set.*/ }; /* Query text type */ struct fts_ast_text_t { - byte* ptr; /*!< Pointer to term string.*/ - ulint distance; /*!< > 0 if proximity distance + fts_ast_string_t* ptr; /*!< Pointer to text string.*/ + ulint distance; /*!< > 0 if proximity distance set */ }; diff --git a/storage/xtradb/include/fts0fts.h b/storage/xtradb/include/fts0fts.h index 5bea5bc0e97..a2996ecacc8 100644 --- a/storage/xtradb/include/fts0fts.h +++ b/storage/xtradb/include/fts0fts.h @@ -745,6 +745,7 @@ void fts_savepoint_take( /*===============*/ trx_t* trx, /*!< in: transaction */ + fts_trx_t* fts_trx, /*!< in: fts transaction */ const char* name) /*!< in: savepoint name */ __attribute__((nonnull)); /**********************************************************************//** diff --git a/storage/xtradb/include/fts0pars.h b/storage/xtradb/include/fts0pars.h index 50f636944e5..8108e811599 100644 --- a/storage/xtradb/include/fts0pars.h +++ b/storage/xtradb/include/fts0pars.h @@ -53,9 +53,9 @@ typedef union YYSTYPE /* Line 2068 of yacc.c */ #line 61 "fts0pars.y" - int oper; - char* token; - fts_ast_node_t* node; + int oper; + fts_ast_string_t* token; + fts_ast_node_t* node; diff --git a/storage/xtradb/include/lock0lock.h b/storage/xtradb/include/lock0lock.h index 8d5515b5eb5..235b2373c25 100644 --- a/storage/xtradb/include/lock0lock.h +++ b/storage/xtradb/include/lock0lock.h @@ -301,7 +301,7 @@ lock_rec_insert_check_and_lock( inserted record maybe should inherit LOCK_GAP type locks from the successor record */ - __attribute__((nonnull, warn_unused_result)); + __attribute__((nonnull(2,3,4,6,7), warn_unused_result)); /*********************************************************************//** Checks if locks of other transactions prevent an immediate modify (update, delete mark, or delete unmark) of a clustered index record. If they do, diff --git a/storage/xtradb/include/log0log.h b/storage/xtradb/include/log0log.h index b9e0c2ef516..f130c8de423 100644 --- a/storage/xtradb/include/log0log.h +++ b/storage/xtradb/include/log0log.h @@ -168,6 +168,13 @@ lsn_t log_get_lsn(void); /*=============*/ /************************************************************//** +Gets the current lsn. +@return current lsn */ +UNIV_INLINE +lsn_t +log_get_lsn_nowait(void); +/*=============*/ +/************************************************************//** Gets the last lsn that is fully flushed to disk. @return last flushed lsn */ UNIV_INLINE @@ -615,6 +622,27 @@ void log_mem_free(void); /*==============*/ +/****************************************************************//** +Safely reads the log_sys->tracked_lsn value. Uses atomic operations +if available, otherwise this field is protected with the log system +mutex. The writer counterpart function is log_set_tracked_lsn() in +log0online.c. + +@return log_sys->tracked_lsn value. */ +UNIV_INLINE +lsn_t +log_get_tracked_lsn(void); +/*=====================*/ +/****************************************************************//** +Unsafely reads the log_sys->tracked_lsn value. Uses atomic operations +if available, or use dirty read. Use for printing only. + +@return log_sys->tracked_lsn value. */ +UNIV_INLINE +lsn_t +log_get_tracked_lsn_peek(void); +/*==========================*/ + extern log_t* log_sys; /* Values used as flags */ @@ -696,13 +724,13 @@ extern log_t* log_sys; megabyte. This information might have been used - since ibbackup version 0.35 but + since mysqlbackup version 0.35 but before 1.41 to decide if unused ends of non-auto-extending data files in space 0 can be truncated. This information was made obsolete - by ibbackup --compress. */ + by mysqlbackup --compress. */ #define LOG_CHECKPOINT_FSP_MAGIC_N (12 + LOG_CHECKPOINT_ARRAY_END) /*!< Not used (0); This magic number tells if the @@ -731,7 +759,7 @@ extern log_t* log_sys; /* a 32-byte field which contains the string 'ibbackup' and the creation time if the log file was - created by ibbackup --restore; + created by mysqlbackup --restore; when mysqld is first time started on the restored database, it can print helpful info for the user */ diff --git a/storage/xtradb/include/log0log.ic b/storage/xtradb/include/log0log.ic index 7724d94b51a..853027daa7e 100644 --- a/storage/xtradb/include/log0log.ic +++ b/storage/xtradb/include/log0log.ic @@ -486,6 +486,26 @@ log_get_flush_lsn(void) return(lsn); } +/************************************************************//** +Gets the current lsn with a trylock +@return current lsn or 0 if false*/ +UNIV_INLINE +lsn_t +log_get_lsn_nowait(void) +/*=============*/ +{ + lsn_t lsn; + + if (mutex_enter_nowait(&(log_sys->mutex))) + return 0; + + lsn = log_sys->lsn; + + mutex_exit(&(log_sys->mutex)); + + return(lsn); +} + /**************************************************************** Gets the log group capacity. It is OK to read the value without holding log_sys->mutex because it is constant. @@ -531,3 +551,39 @@ log_free_check(void) } } #endif /* !UNIV_HOTBACKUP */ + +/****************************************************************//** +Unsafely reads the log_sys->tracked_lsn value. Uses atomic operations +if available, or use dirty read. Use for printing only. + +@return log_sys->tracked_lsn value. */ +UNIV_INLINE +lsn_t +log_get_tracked_lsn_peek(void) +/*==========================*/ +{ +#ifdef HAVE_ATOMIC_BUILTINS_64 + return os_atomic_increment_uint64(&log_sys->tracked_lsn, 0); +#else + return log_sys->tracked_lsn; +#endif +} + +/****************************************************************//** +Safely reads the log_sys->tracked_lsn value. Uses atomic operations +if available, otherwise this field is protected with the log system +mutex. The writer counterpart function is log_set_tracked_lsn() in +log0online.c. +@return log_sys->tracked_lsn value. */ +UNIV_INLINE +lsn_t +log_get_tracked_lsn(void) +/*=====================*/ +{ +#ifdef HAVE_ATOMIC_BUILTINS_64 + return os_atomic_increment_uint64(&log_sys->tracked_lsn, 0); +#else + ut_ad(mutex_own(&(log_sys->mutex))); + return log_sys->tracked_lsn; +#endif +} diff --git a/storage/xtradb/include/os0file.h b/storage/xtradb/include/os0file.h index 76e77799b43..ba362a0e458 100644 --- a/storage/xtradb/include/os0file.h +++ b/storage/xtradb/include/os0file.h @@ -132,7 +132,7 @@ enum os_file_create_t { #define OS_FILE_READ_ONLY 333 #define OS_FILE_READ_WRITE 444 -#define OS_FILE_READ_ALLOW_DELETE 555 /* for ibbackup */ +#define OS_FILE_READ_ALLOW_DELETE 555 /* for mysqlbackup */ /* Options for file_create */ #define OS_FILE_AIO 61 @@ -168,8 +168,8 @@ enum os_file_create_t { #define OS_FILE_LOG 256 /* This can be ORed to type */ /* @} */ -#define OS_AIO_N_PENDING_IOS_PER_THREAD 256 /*!< Windows might be able to handle -more */ +#define OS_AIO_N_PENDING_IOS_PER_THREAD 32 /*!< Win NT does not allow more + than 64 */ /** Modes for aio operations @{ */ #define OS_AIO_NORMAL 21 /*!< Normal asynchronous i/o not for ibuf diff --git a/storage/xtradb/include/os0once.h b/storage/xtradb/include/os0once.h new file mode 100644 index 00000000000..a8bbaf1d2d4 --- /dev/null +++ b/storage/xtradb/include/os0once.h @@ -0,0 +1,125 @@ +/***************************************************************************** + +Copyright (c) 2014, Oracle and/or its affiliates. All Rights Reserved. + +This program is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free Software +Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, but WITHOUT +ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. + +You should have received a copy of the GNU General Public License along with +this program; if not, write to the Free Software Foundation, Inc., +51 Franklin Street, Suite 500, Boston, MA 02110-1335 USA + +*****************************************************************************/ + +/**************************************************//** +@file include/os0once.h +A class that aids executing a given function exactly once in a multi-threaded +environment. + +Created Feb 20, 2014 Vasil Dimov +*******************************************************/ + +#ifndef os0once_h +#define os0once_h + +#include "univ.i" + +#include "os0sync.h" +#include "ut0ut.h" + +/** Execute a given function exactly once in a multi-threaded environment +or wait for the function to be executed by another thread. + +Example usage: +First the user must create a control variable of type os_once::state_t and +assign it os_once::NEVER_DONE. +Then the user must pass this variable, together with a function to be +executed to os_once::do_or_wait_for_done(). + +Multiple threads can call os_once::do_or_wait_for_done() simultaneously with +the same (os_once::state_t) control variable. The provided function will be +called exactly once and when os_once::do_or_wait_for_done() returns then this +function has completed execution, by this or another thread. In other words +os_once::do_or_wait_for_done() will either execute the provided function or +will wait for its execution to complete if it is already called by another +thread or will do nothing if the function has already completed its execution +earlier. + +This mimics pthread_once(3), but unfortunatelly pthread_once(3) does not +support passing arguments to the init_routine() function. We should use +std::call_once() when we start compiling with C++11 enabled. */ +class os_once { +public: + /** Control variables' state type */ + typedef ib_uint32_t state_t; + + /** Not yet executed. */ + static const state_t NEVER_DONE = 0; + + /** Currently being executed by this or another thread. */ + static const state_t IN_PROGRESS = 1; + + /** Finished execution. */ + static const state_t DONE = 2; + +#ifdef HAVE_ATOMIC_BUILTINS + /** Call a given function or wait its execution to complete if it is + already called by another thread. + @param[in,out] state control variable + @param[in] do_func function to call + @param[in,out] do_func_arg an argument to pass to do_func(). */ + static + void + do_or_wait_for_done( + volatile state_t* state, + void (*do_func)(void*), + void* do_func_arg) + { + /* Avoid calling os_compare_and_swap_uint32() in the most + common case. */ + if (*state == DONE) { + return; + } + + if (os_compare_and_swap_uint32(state, + NEVER_DONE, IN_PROGRESS)) { + /* We are the first. Call the function. */ + + do_func(do_func_arg); + + const bool swapped = os_compare_and_swap_uint32( + state, IN_PROGRESS, DONE); + + ut_a(swapped); + } else { + /* The state is not NEVER_DONE, so either it is + IN_PROGRESS (somebody is calling the function right + now or DONE (it has already been called and completed). + Wait for it to become DONE. */ + for (;;) { + const state_t s = *state; + + switch (s) { + case DONE: + return; + case IN_PROGRESS: + break; + case NEVER_DONE: + /* fall through */ + default: + ut_error; + } + + UT_RELAX_CPU(); + } + } + } +#endif /* HAVE_ATOMIC_BUILTINS */ +}; + +#endif /* os0once_h */ diff --git a/storage/xtradb/include/os0sync.h b/storage/xtradb/include/os0sync.h index ea5d09ec535..066fd34d668 100644 --- a/storage/xtradb/include/os0sync.h +++ b/storage/xtradb/include/os0sync.h @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1995, 2011, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1995, 2014, Oracle and/or its affiliates. All Rights Reserved. Copyright (c) 2008, Google Inc. Portions of this file contain modifications contributed and copyrighted by @@ -357,6 +357,10 @@ Atomic compare-and-swap and increment for InnoDB. */ # define HAVE_ATOMIC_BUILTINS +# ifdef HAVE_IB_GCC_ATOMIC_BUILTINS_BYTE +# define HAVE_ATOMIC_BUILTINS_BYTE +# endif + # ifdef HAVE_IB_GCC_ATOMIC_BUILTINS_64 # define HAVE_ATOMIC_BUILTINS_64 # endif @@ -434,9 +438,13 @@ Returns the old value of *ptr, atomically sets *ptr to new_val */ # define os_atomic_test_and_set_ulint(ptr, new_val) \ __sync_lock_test_and_set(ptr, new_val) +# define os_atomic_lock_release_byte(ptr) \ + __sync_lock_release(ptr) + #elif defined(HAVE_IB_SOLARIS_ATOMICS) # define HAVE_ATOMIC_BUILTINS +# define HAVE_ATOMIC_BUILTINS_BYTE # define HAVE_ATOMIC_BUILTINS_64 /* If not compiling with GCC or GCC doesn't support the atomic @@ -515,9 +523,13 @@ Returns the old value of *ptr, atomically sets *ptr to new_val */ # define os_atomic_test_and_set_ulint(ptr, new_val) \ atomic_swap_ulong(ptr, new_val) +# define os_atomic_lock_release_byte(ptr) \ + (void) atomic_swap_uchar(ptr, 0) + #elif defined(HAVE_WINDOWS_ATOMICS) # define HAVE_ATOMIC_BUILTINS +# define HAVE_ATOMIC_BUILTINS_BYTE # ifndef _WIN32 # define HAVE_ATOMIC_BUILTINS_64 @@ -574,7 +586,8 @@ Returns true if swapped, ptr is pointer to target, old_val is value to compare to, new_val is the value to swap in. */ # define os_compare_and_swap_uint32(ptr, old_val, new_val) \ - (win_cmp_and_xchg_dword(ptr, new_val, old_val) == old_val) + (InterlockedCompareExchange(reinterpret_cast<volatile long*>(ptr), \ + new_val, old_val) == old_val) # define os_compare_and_swap_ulint(ptr, old_val, new_val) \ (win_cmp_and_xchg_ulint(ptr, new_val, old_val) == old_val) @@ -637,6 +650,9 @@ clobbered */ # define os_atomic_test_and_set_ulong(ptr, new_val) \ InterlockedExchange(ptr, new_val) +# define os_atomic_lock_release_byte(ptr) \ + (void) InterlockedExchange(ptr, 0) + #else # define IB_ATOMICS_STARTUP_MSG \ "Mutexes and rw_locks use InnoDB's own implementation" @@ -684,6 +700,65 @@ for synchronization */ os_decrement_counter_by_amount(mutex, counter, 1);\ } while (0); +/** barrier definitions for memory ordering */ +#if defined __i386__ || defined __x86_64__ || defined _M_IX86 || defined _M_X64 || defined __WIN__ +/* Performance regression was observed at some conditions for Intel +architecture. Disable memory barrier for Intel architecture for now. */ +# define os_rmb do { } while(0) +# define os_wmb do { } while(0) +# define os_isync do { } while(0) +# define IB_MEMORY_BARRIER_STARTUP_MSG \ + "Memory barrier is not used" +#elif defined(HAVE_IB_GCC_ATOMIC_THREAD_FENCE) +# define HAVE_MEMORY_BARRIER +# define os_rmb __atomic_thread_fence(__ATOMIC_ACQUIRE) +# define os_wmb __atomic_thread_fence(__ATOMIC_RELEASE) +#ifdef __powerpc__ +# define os_isync __asm __volatile ("isync":::"memory") +#else +#define os_isync do { } while(0) +#endif + +# define IB_MEMORY_BARRIER_STARTUP_MSG \ + "GCC builtin __atomic_thread_fence() is used for memory barrier" + +#elif defined(HAVE_IB_GCC_SYNC_SYNCHRONISE) +# define HAVE_MEMORY_BARRIER +# define os_rmb __sync_synchronize() +# define os_wmb __sync_synchronize() +# define os_isync __sync_synchronize() +# define IB_MEMORY_BARRIER_STARTUP_MSG \ + "GCC builtin __sync_synchronize() is used for memory barrier" + +#elif defined(HAVE_IB_MACHINE_BARRIER_SOLARIS) +# define HAVE_MEMORY_BARRIER +# include <mbarrier.h> +# define os_rmb __machine_r_barrier() +# define os_wmb __machine_w_barrier() +# define os_isync os_rmb; os_wmb +# define IB_MEMORY_BARRIER_STARTUP_MSG \ + "Solaris memory ordering functions are used for memory barrier" + +#elif defined(HAVE_WINDOWS_MM_FENCE) +# define HAVE_MEMORY_BARRIER +# include <intrin.h> +# define os_rmb _mm_lfence() +# define os_wmb _mm_sfence() +# define os_isync os_rmb; os_wmb +# define IB_MEMORY_BARRIER_STARTUP_MSG \ + "_mm_lfence() and _mm_sfence() are used for memory barrier" + +# define os_atomic_lock_release_byte(ptr) \ + (void) InterlockedExchange(ptr, 0) + +#else +# define os_rmb do { } while(0) +# define os_wmb do { } while(0) +# define os_isync do { } while(0) +# define IB_MEMORY_BARRIER_STARTUP_MSG \ + "Memory barrier is not used" +#endif + #ifndef UNIV_NONINL #include "os0sync.ic" #endif diff --git a/storage/xtradb/include/read0read.h b/storage/xtradb/include/read0read.h index e17d49b1321..0352f129c30 100644 --- a/storage/xtradb/include/read0read.h +++ b/storage/xtradb/include/read0read.h @@ -50,6 +50,27 @@ read_view_open_now( NULL if a new one needs to be created */ /*********************************************************************//** +Clones a read view object. This function will allocate space for two read +views contiguously, one identical in size and content as @param view (starting +at returned pointer) and another view immediately following the trx_ids array. +The second view will have space for an extra trx_id_t element. +@return read view struct */ +UNIV_INTERN +read_view_t* +read_view_clone( +/*============*/ + const read_view_t* view, /*!< in: view to clone */ + read_view_t*& prebuilt_clone);/*!< in,out: prebuilt view or + NULL */ +/*********************************************************************//** +Insert the view in the proper order into the trx_sys->view_list. The +read view list is ordered by read_view_t::low_limit_no in descending order. */ +UNIV_INTERN +void +read_view_add( +/*==========*/ + read_view_t* view); /*!< in: view to add to */ +/*********************************************************************//** Makes a copy of the oldest existing read view, or opens a new. The view must be closed with ..._close. @return own: read view struct */ diff --git a/storage/xtradb/include/srv0srv.h b/storage/xtradb/include/srv0srv.h index aef04d003d5..3ed3ba71698 100644 --- a/storage/xtradb/include/srv0srv.h +++ b/storage/xtradb/include/srv0srv.h @@ -1,9 +1,9 @@ /***************************************************************************** -Copyright (c) 1995, 2013, Oracle and/or its affiliates. All rights reserved. +Copyright (c) 1995, 2013, Oracle and/or its affiliates. Copyright (c) 2008, 2009, Google Inc. Copyright (c) 2009, Percona Inc. -Copyright (c) 2013, SkySQL Ab. All Rights Reserved. +Copyright (c) 2013, 2014, SkySQL Ab. Portions of this file contain modifications contributed and copyrighted by Google, Inc. Those modifications are gratefully acknowledged and are described @@ -183,9 +183,12 @@ extern char srv_disable_sort_file_cache; thread */ extern os_event_t srv_checkpoint_completed_event; -/* This event is set on the online redo log following thread exit to signal -that the (slow) shutdown may proceed */ -extern os_event_t srv_redo_log_thread_finished_event; +/* This event is set on the online redo log following thread after a successful +log tracking iteration */ +extern os_event_t srv_redo_log_tracked_event; + +/** srv_redo_log_follow_thread spawn flag */ +extern bool srv_redo_log_thread_started; /* If the last data file is auto-extended, we add this many pages to it at a time */ @@ -642,6 +645,8 @@ extern srv_stats_t srv_stats; When FALSE, row locks are not taken at all. */ extern my_bool srv_fake_changes_locks; +/** Simulate compression failures. */ +extern uint srv_simulate_comp_failures; # ifdef UNIV_PFS_THREAD /* Keys to register InnoDB threads with performance schema */ diff --git a/storage/xtradb/include/sync0rw.h b/storage/xtradb/include/sync0rw.h index 95bb7e16b26..0ac6b0f3f69 100644 --- a/storage/xtradb/include/sync0rw.h +++ b/storage/xtradb/include/sync0rw.h @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1995, 2011, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1995, 2014, Oracle and/or its affiliates. All Rights Reserved. Copyright (c) 2008, Google Inc. Portions of this file contain modifications contributed and copyrighted by @@ -109,14 +109,8 @@ extern ib_mutex_t rw_lock_list_mutex; #ifdef UNIV_SYNC_DEBUG /* The global mutex which protects debug info lists of all rw-locks. To modify the debug info list of an rw-lock, this mutex has to be - acquired in addition to the mutex protecting the lock. */ -extern ib_mutex_t rw_lock_debug_mutex; -extern os_event_t rw_lock_debug_event; /*!< If deadlock detection does - not get immediately the mutex it - may wait for this event */ -extern ibool rw_lock_debug_waiters; /*!< This is set to TRUE, if - there may be waiters for the event */ +extern os_fast_mutex_t rw_lock_debug_mutex; #endif /* UNIV_SYNC_DEBUG */ /** Counters for RW locks. */ @@ -142,7 +136,7 @@ extern mysql_pfs_key_t trx_i_s_cache_lock_key; extern mysql_pfs_key_t trx_purge_latch_key; extern mysql_pfs_key_t index_tree_rw_lock_key; extern mysql_pfs_key_t index_online_log_key; -extern mysql_pfs_key_t dict_table_stats_latch_key; +extern mysql_pfs_key_t dict_table_stats_key; extern mysql_pfs_key_t trx_sys_rw_lock_key; extern mysql_pfs_key_t hash_table_rw_lock_key; #endif /* UNIV_PFS_RWLOCK */ diff --git a/storage/xtradb/include/sync0rw.ic b/storage/xtradb/include/sync0rw.ic index 3511987dbb0..8aadc406132 100644 --- a/storage/xtradb/include/sync0rw.ic +++ b/storage/xtradb/include/sync0rw.ic @@ -112,6 +112,7 @@ rw_lock_set_waiter_flag( (void) os_compare_and_swap_ulint(&lock->waiters, 0, 1); #else /* INNODB_RW_LOCKS_USE_ATOMICS */ lock->waiters = 1; + os_wmb; #endif /* INNODB_RW_LOCKS_USE_ATOMICS */ } @@ -129,6 +130,7 @@ rw_lock_reset_waiter_flag( (void) os_compare_and_swap_ulint(&lock->waiters, 1, 0); #else /* INNODB_RW_LOCKS_USE_ATOMICS */ lock->waiters = 0; + os_wmb; #endif /* INNODB_RW_LOCKS_USE_ATOMICS */ } @@ -256,7 +258,10 @@ rw_lock_lock_word_decr( ulint amount) /*!< in: amount to decrement */ { #ifdef INNODB_RW_LOCKS_USE_ATOMICS - lint local_lock_word = lock->lock_word; + lint local_lock_word; + + os_rmb; + local_lock_word = lock->lock_word; while (local_lock_word > 0) { if (os_compare_and_swap_lint(&lock->lock_word, local_lock_word, @@ -620,10 +625,6 @@ rw_lock_s_unlock_func( /* A waiting next-writer exists, either high priority or regular, sharing the same wait event. */ - if (lock->high_priority_wait_ex_waiter) { - - lock->high_priority_wait_ex_waiter = 0; - } os_event_set(lock->base_lock.wait_ex_event); sync_array_object_signalled(); @@ -916,8 +917,9 @@ pfs_rw_lock_x_lock_func( rw_lock_x_lock_func(lock, pass, file_name, line); - if (locker != NULL) + if (locker != NULL) { PSI_RWLOCK_CALL(end_rwlock_wrwait)(locker, 0); + } } else { @@ -1072,8 +1074,9 @@ pfs_rw_lock_s_lock_func( rw_lock_s_lock_func(lock, pass, file_name, line); - if (locker != NULL) + if (locker != NULL) { PSI_RWLOCK_CALL(end_rwlock_rdwait)(locker, 0); + } } else { diff --git a/storage/xtradb/include/sync0sync.h b/storage/xtradb/include/sync0sync.h index 72cfbf61dd8..5ba385ce75f 100644 --- a/storage/xtradb/include/sync0sync.h +++ b/storage/xtradb/include/sync0sync.h @@ -50,6 +50,8 @@ extern "C" my_bool timed_mutexes; #ifdef _WIN32 typedef LONG lock_word_t; /*!< On Windows, InterlockedExchange operates on LONG variable */ +#elif defined(HAVE_ATOMIC_BUILTINS) && !defined(HAVE_ATOMIC_BUILTINS_BYTE) +typedef ulint lock_word_t; #else typedef byte lock_word_t; #endif diff --git a/storage/xtradb/include/sync0sync.ic b/storage/xtradb/include/sync0sync.ic index a302e1473a5..0c4a8ace887 100644 --- a/storage/xtradb/include/sync0sync.ic +++ b/storage/xtradb/include/sync0sync.ic @@ -83,7 +83,11 @@ ib_mutex_test_and_set( ib_mutex_t* mutex) /*!< in: mutex */ { #if defined(HAVE_ATOMIC_BUILTINS) +# if defined(HAVE_ATOMIC_BUILTINS_BYTE) return(os_atomic_test_and_set_byte(&mutex->lock_word, 1)); +# else + return(os_atomic_test_and_set_ulint(&mutex->lock_word, 1)); +# endif #else ibool ret; @@ -95,6 +99,7 @@ ib_mutex_test_and_set( ut_a(mutex->lock_word == 0); mutex->lock_word = 1; + os_wmb; } return((byte) ret); @@ -111,10 +116,7 @@ mutex_reset_lock_word( ib_mutex_t* mutex) /*!< in: mutex */ { #if defined(HAVE_ATOMIC_BUILTINS) - /* In theory __sync_lock_release should be used to release the lock. - Unfortunately, it does not work properly alone. The workaround is - that more conservative __sync_lock_test_and_set is used instead. */ - os_atomic_test_and_set_byte(&mutex->lock_word, 0); + os_atomic_lock_release_byte(&mutex->lock_word); #else mutex->lock_word = 0; @@ -150,6 +152,7 @@ mutex_get_waiters( ptr = &(mutex->waiters); + os_rmb; return(*ptr); /* Here we assume that the read of a single word from memory is atomic */ } @@ -184,6 +187,7 @@ mutex_exit_func( to wake up possible hanging threads if they are missed in mutex_signal_object. */ + os_isync; if (mutex_get_waiters(mutex) != 0) { mutex_signal_object(mutex); diff --git a/storage/xtradb/include/trx0trx.h b/storage/xtradb/include/trx0trx.h index be13c48fdfc..75325d73f4d 100644 --- a/storage/xtradb/include/trx0trx.h +++ b/storage/xtradb/include/trx0trx.h @@ -275,6 +275,17 @@ read_view_t* trx_assign_read_view( /*=================*/ trx_t* trx); /*!< in: active transaction */ +/********************************************************************//** +Clones the read view from another transaction. All the consistent reads within +the receiver transaction will get the same read view as the donor transaction +@return read view clone */ +UNIV_INTERN +read_view_t* +trx_clone_read_view( +/*================*/ + trx_t* trx, /*!< in: receiver transaction */ + trx_t* from_trx) /*!< in: donor transaction */ + __attribute__((nonnull, warn_unused_result)); /****************************************************************//** Prepares a transaction for commit/rollback. */ UNIV_INTERN @@ -1019,6 +1030,11 @@ struct trx_t{ count of tables being flushed. */ /*------------------------------*/ + THD* current_lock_mutex_owner; + /*!< If this is equal to current_thd, + then in innobase_kill_query() we know we + already hold the lock_sys->mutex. */ + /*------------------------------*/ #ifdef UNIV_DEBUG ulint start_line; /*!< Track where it was started from */ const char* start_file; /*!< Filename where it was started */ diff --git a/storage/xtradb/include/univ.i b/storage/xtradb/include/univ.i index 2a3a85c219f..8353b1dcf8a 100644 --- a/storage/xtradb/include/univ.i +++ b/storage/xtradb/include/univ.i @@ -45,10 +45,10 @@ Created 1/20/1994 Heikki Tuuri #define INNODB_VERSION_MAJOR 5 #define INNODB_VERSION_MINOR 6 -#define INNODB_VERSION_BUGFIX 17 +#define INNODB_VERSION_BUGFIX 20 #ifndef PERCONA_INNODB_VERSION -#define PERCONA_INNODB_VERSION 65.0 +#define PERCONA_INNODB_VERSION 68.0 #endif /* Enable UNIV_LOG_ARCHIVE in XtraDB */ @@ -481,10 +481,10 @@ typedef unsigned __int64 ib_uint64_t; typedef unsigned __int32 ib_uint32_t; #else /* Use the integer types and formatting strings defined in the C99 standard. */ -# define UINT32PF "%"PRIu32 -# define INT64PF "%"PRId64 -# define UINT64PF "%"PRIu64 -# define UINT64PFx "%016"PRIx64 +# define UINT32PF "%" PRIu32 +# define INT64PF "%" PRId64 +# define UINT64PF "%" PRIu64 +# define UINT64PFx "%016" PRIx64 # define DBUG_LSN_PF UINT64PF typedef int64_t ib_int64_t; typedef uint64_t ib_uint64_t; diff --git a/storage/xtradb/lock/lock0lock.cc b/storage/xtradb/lock/lock0lock.cc index 018f6f9a69a..30ad3ee7922 100644 --- a/storage/xtradb/lock/lock0lock.cc +++ b/storage/xtradb/lock/lock0lock.cc @@ -49,6 +49,7 @@ Created 5/7/1996 Heikki Tuuri #include "btr0btr.h" #include "dict0boot.h" #include <set> +#include "mysql/plugin.h" #include <mysql/service_wsrep.h> @@ -375,6 +376,11 @@ struct lock_stack_t { ulint heap_no; /*!< heap number if rec lock */ }; +extern "C" void thd_report_wait_for(const MYSQL_THD thd, MYSQL_THD other_thd); +extern "C" int thd_need_wait_for(const MYSQL_THD thd); +extern "C" +int thd_need_ordering_with(const MYSQL_THD thd, const MYSQL_THD other_thd); + /** Stack to use during DFS search. Currently only a single stack is required because there is no parallel deadlock check. This stack is protected by the lock_sys_t::mutex. */ @@ -390,6 +396,14 @@ UNIV_INTERN mysql_pfs_key_t lock_sys_mutex_key; UNIV_INTERN mysql_pfs_key_t lock_sys_wait_mutex_key; #endif /* UNIV_PFS_MUTEX */ +/* Buffer to collect THDs to report waits for. */ +struct thd_wait_reports { + struct thd_wait_reports *next; /*!< List link */ + ulint used; /*!< How many elements in waitees[] */ + trx_t *waitees[64]; /*!< Trxs for thd_report_wait_for() */ +}; + + #ifdef UNIV_DEBUG UNIV_INTERN ibool lock_print_waits = FALSE; @@ -1021,6 +1035,32 @@ lock_rec_has_to_wait( return(FALSE); } + if ((type_mode & LOCK_GAP || lock_rec_get_gap(lock2)) && + !thd_need_ordering_with(trx->mysql_thd, + lock2->trx->mysql_thd)) { + /* If the upper server layer has already decided on the + commit order between the transaction requesting the + lock and the transaction owning the lock, we do not + need to wait for gap locks. Such ordeering by the upper + server layer happens in parallel replication, where the + commit order is fixed to match the original order on the + master. + + Such gap locks are mainly needed to get serialisability + between transactions so that they will be binlogged in + the correct order so that statement-based replication + will give the correct results. Since the right order + was already determined on the master, we do not need + to enforce it again here. + + Skipping the locks is not essential for correctness, + since in case of deadlock we will just kill the later + transaction and retry it. But it can save some + unnecessary rollbacks and retries. */ + + return (FALSE); + } + #ifdef WITH_WSREP /* if BF thread is locking and has conflict with another BF thread, we need to look at trx ordering and lock types */ @@ -1065,6 +1105,7 @@ lock_rec_has_to_wait( } } #endif /* WITH_WSREP */ + return(TRUE); } @@ -4169,7 +4210,8 @@ static trx_id_t lock_deadlock_search( /*=================*/ - lock_deadlock_ctx_t* ctx) /*!< in/out: deadlock context */ + lock_deadlock_ctx_t* ctx, /*!< in/out: deadlock context */ + struct thd_wait_reports*waitee_ptr) /*!< in/out: list of waitees */ { const lock_t* lock; ulint heap_no; @@ -4250,38 +4292,65 @@ lock_deadlock_search( /* Select the joining transaction as the victim. */ return(ctx->start->id); - } else if (lock->trx->lock.que_state == TRX_QUE_LOCK_WAIT) { + } else { + /* We do not need to report autoinc locks to the upper + layer. These locks are released before commit, so they + can not cause deadlocks with binlog-fixed commit + order. */ + if (waitee_ptr && + (lock_get_type_low(lock) != LOCK_TABLE || + lock_get_mode(lock) != LOCK_AUTO_INC)) { + if (waitee_ptr->used == + sizeof(waitee_ptr->waitees) / + sizeof(waitee_ptr->waitees[0])) { + waitee_ptr->next = + (struct thd_wait_reports *) + mem_alloc(sizeof(*waitee_ptr)); + waitee_ptr = waitee_ptr->next; + if (!waitee_ptr) { + ctx->too_deep = TRUE; + return(ctx->start->id); + } + waitee_ptr->next = NULL; + waitee_ptr->used = 0; + } + waitee_ptr->waitees[waitee_ptr->used++] = lock->trx; + } + + if (lock->trx->lock.que_state == TRX_QUE_LOCK_WAIT) { - /* Another trx ahead has requested a lock in an - incompatible mode, and is itself waiting for a lock. */ + /* Another trx ahead has requested a lock in an + incompatible mode, and is itself waiting for a lock. */ - ++ctx->cost; + ++ctx->cost; - /* Save current search state. */ - if (!lock_deadlock_push(ctx, lock, heap_no)) { + /* Save current search state. */ + if (!lock_deadlock_push(ctx, lock, heap_no)) { - /* Unable to save current search state, stack - size not big enough. */ + /* Unable to save current search state, stack + size not big enough. */ - ctx->too_deep = TRUE; + ctx->too_deep = TRUE; #ifdef WITH_WSREP if (wsrep_thd_is_BF(ctx->start->mysql_thd, TRUE)) return(lock->trx->id); else #endif /* WITH_WSREP */ + return(ctx->start->id); - } + } + + ctx->wait_lock = lock->trx->lock.wait_lock; + lock = lock_get_first_lock(ctx, &heap_no); - ctx->wait_lock = lock->trx->lock.wait_lock; - lock = lock_get_first_lock(ctx, &heap_no); + if (lock->trx->lock.deadlock_mark > ctx->mark_start) { + lock = lock_get_next_lock(ctx, lock, heap_no); + } - if (lock->trx->lock.deadlock_mark > ctx->mark_start) { + } else { lock = lock_get_next_lock(ctx, lock, heap_no); } - - } else { - lock = lock_get_next_lock(ctx, lock, heap_no); } } @@ -4346,6 +4415,48 @@ lock_deadlock_trx_rollback( trx_mutex_exit(trx); } +static +void +lock_report_waiters_to_mysql( +/*=======================*/ + struct thd_wait_reports* waitee_buf_ptr, /*!< in: set of trxs */ + THD* mysql_thd, /*!< in: THD */ + trx_id_t victim_trx_id) /*!< in: Trx selected + as deadlock victim, if + any */ +{ + struct thd_wait_reports* p; + struct thd_wait_reports* q; + ulint i; + + p = waitee_buf_ptr; + while (p) { + i = 0; + while (i < p->used) { + trx_t *w_trx = p->waitees[i]; + /* There is no need to report waits to a trx already + selected as a victim. */ + if (w_trx->id != victim_trx_id) { + /* If thd_report_wait_for() decides to kill the + transaction, then we will get a call back into + innobase_kill_query. We mark this by setting + current_lock_mutex_owner, so we can avoid trying + to recursively take lock_sys->mutex. */ + w_trx->current_lock_mutex_owner = mysql_thd; + thd_report_wait_for(mysql_thd, w_trx->mysql_thd); + w_trx->current_lock_mutex_owner = NULL; + } + ++i; + } + q = p->next; + if (p != waitee_buf_ptr) { + mem_free(p); + } + p = q; + } +} + + /********************************************************************//** Checks if a joining lock request results in a deadlock. If a deadlock is found this function will resolve the dadlock by choosing a victim transaction @@ -4361,13 +4472,23 @@ lock_deadlock_check_and_resolve( const lock_t* lock, /*!< in: lock the transaction is requesting */ const trx_t* trx) /*!< in: transaction */ { - trx_id_t victim_trx_id; + trx_id_t victim_trx_id; + struct thd_wait_reports waitee_buf; + struct thd_wait_reports*waitee_buf_ptr; + THD* start_mysql_thd; ut_ad(trx != NULL); ut_ad(lock != NULL); ut_ad(lock_mutex_own()); assert_trx_in_list(trx); + start_mysql_thd = trx->mysql_thd; + if (start_mysql_thd && thd_need_wait_for(start_mysql_thd)) { + waitee_buf_ptr = &waitee_buf; + } else { + waitee_buf_ptr = NULL; + } + /* Try and resolve as many deadlocks as possible. */ do { lock_deadlock_ctx_t ctx; @@ -4380,7 +4501,19 @@ lock_deadlock_check_and_resolve( ctx.wait_lock = lock; ctx.mark_start = lock_mark_counter; - victim_trx_id = lock_deadlock_search(&ctx); + if (waitee_buf_ptr) { + waitee_buf_ptr->next = NULL; + waitee_buf_ptr->used = 0; + } + + victim_trx_id = lock_deadlock_search(&ctx, waitee_buf_ptr); + + /* Report waits to upper layer, as needed. */ + if (waitee_buf_ptr) { + lock_report_waiters_to_mysql(waitee_buf_ptr, + start_mysql_thd, + victim_trx_id); + } /* Search too deep, we rollback the joining transaction. */ if (ctx.too_deep) { diff --git a/storage/xtradb/log/log0log.cc b/storage/xtradb/log/log0log.cc index ee39f5846ca..8c7f2b319c0 100644 --- a/storage/xtradb/log/log0log.cc +++ b/storage/xtradb/log/log0log.cc @@ -197,22 +197,24 @@ log_buf_pool_get_oldest_modification(void) } /****************************************************************//** -Safely reads the log_sys->tracked_lsn value. Uses atomic operations -if available, otherwise this field is protected with the log system -mutex. The writer counterpart function is log_set_tracked_lsn() in -log0online.c. - -@return log_sys->tracked_lsn value. */ -UNIV_INLINE +Returns the oldest modified block lsn in the pool, or log_sys->lsn if none +exists. +@return LSN of oldest modification */ +static lsn_t -log_get_tracked_lsn() +log_buf_pool_get_oldest_modification_peek(void) +/*===========================================*/ { -#ifdef HAVE_ATOMIC_BUILTINS_64 - return os_atomic_increment_uint64(&log_sys->tracked_lsn, 0); -#else - ut_ad(mutex_own(&(log_sys->mutex))); - return log_sys->tracked_lsn; -#endif + lsn_t lsn; + + lsn = buf_pool_get_oldest_modification_peek(); + + if (!lsn) { + + lsn = log_sys->lsn; + } + + return(lsn); } /****************************************************************//** @@ -639,7 +641,7 @@ log_pad_current_log_block(void) byte b = MLOG_DUMMY_RECORD; ulint pad_length; ulint i; - ib_uint64_t lsn; + lsn_t lsn; /* We retrieve lsn only because otherwise gcc crashed on HP-UX */ lsn = log_reserve_and_open(OS_FILE_LOG_BLOCK_SIZE); @@ -647,6 +649,12 @@ log_pad_current_log_block(void) pad_length = OS_FILE_LOG_BLOCK_SIZE - (log_sys->buf_free % OS_FILE_LOG_BLOCK_SIZE) - LOG_BLOCK_TRL_SIZE; + if (pad_length + == (OS_FILE_LOG_BLOCK_SIZE - LOG_BLOCK_HDR_SIZE + - LOG_BLOCK_TRL_SIZE)) { + + pad_length = 0; + } for (i = 0; i < pad_length; i++) { log_write_low(&b, 1); @@ -1347,7 +1355,7 @@ log_group_file_header_flush( mach_write_to_4(buf + LOG_GROUP_ID, group->id); mach_write_to_8(buf + LOG_FILE_START_LSN, start_lsn); - /* Wipe over possible label of ibbackup --restore */ + /* Wipe over possible label of mysqlbackup --restore */ memcpy(buf + LOG_FILE_WAS_CREATED_BY_HOT_BACKUP, " ", 4); mach_write_to_4(buf + LOG_FILE_OS_FILE_LOG_BLOCK_SIZE, @@ -1950,6 +1958,7 @@ log_io_complete_checkpoint(void) /* Wake the redo log watching thread to parse the log up to this checkpoint. */ if (srv_track_changed_pages) { + os_event_reset(srv_redo_log_tracked_event); os_event_set(srv_checkpoint_completed_event); } } @@ -2121,7 +2130,7 @@ log_reset_first_header_and_checkpoint( lsn = start + LOG_BLOCK_HDR_SIZE; - /* Write the label of ibbackup --restore */ + /* Write the label of mysqlbackup --restore */ strcpy((char*) hdr_buf + LOG_FILE_WAS_CREATED_BY_HOT_BACKUP, "ibbackup "); ut_sprintf_timestamp((char*) hdr_buf @@ -3152,8 +3161,7 @@ void log_archive_all(void) /*=================*/ { - ib_uint64_t present_lsn; - ulint dummy; + lsn_t present_lsn; mutex_enter(&(log_sys->mutex)); @@ -3170,6 +3178,9 @@ log_archive_all(void) log_pad_current_log_block(); for (;;) { + + ulint archived_bytes; + mutex_enter(&(log_sys->mutex)); if (present_lsn <= log_sys->archived_lsn) { @@ -3181,7 +3192,10 @@ log_archive_all(void) mutex_exit(&(log_sys->mutex)); - log_archive_do(TRUE, &dummy); + log_archive_do(TRUE, &archived_bytes); + + if (archived_bytes == 0) + return; } } @@ -3681,8 +3695,8 @@ loop: /* Wake the log tracking thread which will then immediatelly quit because of srv_shutdown_state value */ if (srv_track_changed_pages) { + os_event_reset(srv_redo_log_tracked_event); os_event_set(srv_checkpoint_completed_event); - os_event_wait(srv_redo_log_thread_finished_event); } fil_close_all_files(); @@ -3759,6 +3773,7 @@ loop: /* Signal the log following thread to quit */ if (srv_track_changed_pages) { + os_event_reset(srv_redo_log_tracked_event); os_event_set(srv_checkpoint_completed_event); } @@ -3786,10 +3801,6 @@ loop: fil_flush_file_spaces(FIL_TABLESPACE); } - if (srv_track_changed_pages) { - os_event_wait(srv_redo_log_thread_finished_event); - } - fil_close_all_files(); /* Make some checks that the server really is quiet */ @@ -3885,7 +3896,7 @@ log_print( double time_elapsed; time_t current_time; - mutex_enter(&(log_sys->mutex)); + // mutex_enter(&(log_sys->mutex)); fprintf(file, "Log sequence number " LSN_PF "\n" @@ -3894,7 +3905,7 @@ log_print( "Last checkpoint at " LSN_PF "\n", log_sys->lsn, log_sys->flushed_to_disk_lsn, - log_buf_pool_get_oldest_modification(), + log_buf_pool_get_oldest_modification_peek(), log_sys->last_checkpoint_lsn); fprintf(file, @@ -3904,7 +3915,7 @@ log_print( "Checkpoint age " LSN_PF "\n", log_sys->max_checkpoint_age, log_sys->max_checkpoint_age_async, - log_sys->lsn -log_buf_pool_get_oldest_modification(), + log_sys->lsn -log_buf_pool_get_oldest_modification_peek(), log_sys->lsn - log_sys->last_checkpoint_lsn); current_time = time(NULL); @@ -3933,14 +3944,14 @@ log_print( "Log tracking enabled\n" "Log tracked up to " LSN_PF "\n" "Max tracked LSN age " LSN_PF "\n", - log_get_tracked_lsn(), + log_get_tracked_lsn_peek(), log_sys->max_checkpoint_age); } log_sys->n_log_ios_old = log_sys->n_log_ios; log_sys->last_printout_time = current_time; - mutex_exit(&(log_sys->mutex)); + //mutex_exit(&(log_sys->mutex)); } /**********************************************************************//** diff --git a/storage/xtradb/log/log0online.cc b/storage/xtradb/log/log0online.cc index bc930572c09..dbd8f46caaa 100644 --- a/storage/xtradb/log/log0online.cc +++ b/storage/xtradb/log/log0online.cc @@ -1188,6 +1188,9 @@ log_online_write_bitmap(void) bmp_tree_node = (ib_rbt_node_t*) rbt_next(log_bmp_sys->modified_pages, bmp_tree_node); + + DBUG_EXECUTE_IF("bitmap_page_2_write_error", + DBUG_SET("+d,bitmap_page_write_error");); } rbt_reset(log_bmp_sys->modified_pages); @@ -1253,6 +1256,7 @@ log_online_follow_redo_log(void) /*********************************************************************//** Diagnose a bitmap file range setup failure and free the partially-initialized bitmap file range. */ +UNIV_COLD static void log_online_diagnose_inconsistent_dir( @@ -1434,26 +1438,30 @@ log_online_setup_bitmap_file_range( return FALSE; } -#ifdef UNIV_DEBUG - if (!bitmap_files->files[0].seq_num) { + if (!bitmap_files->files[0].seq_num + || bitmap_files->files[0].seq_num != first_file_seq_num) { log_online_diagnose_inconsistent_dir(bitmap_files); return FALSE; } - ut_ad(bitmap_files->files[0].seq_num == first_file_seq_num); + { size_t i; for (i = 1; i < bitmap_files->count; i++) { if (!bitmap_files->files[i].seq_num) { break; } - ut_ad(bitmap_files->files[i].seq_num - > bitmap_files->files[i - 1].seq_num); - ut_ad(bitmap_files->files[i].start_lsn - >= bitmap_files->files[i - 1].start_lsn); + if ((bitmap_files->files[i].seq_num + <= bitmap_files->files[i - 1].seq_num) + || (bitmap_files->files[i].start_lsn + < bitmap_files->files[i - 1].start_lsn)) { + + log_online_diagnose_inconsistent_dir( + bitmap_files); + return FALSE; + } } } -#endif return TRUE; } @@ -1576,6 +1584,17 @@ log_online_bitmap_iterator_init( { ut_a(i); + if (UNIV_UNLIKELY(min_lsn > max_lsn)) { + + /* Empty range */ + i->in_files.count = 0; + i->in_files.files = NULL; + i->in.file = os_file_invalid; + i->page = NULL; + i->failed = FALSE; + return TRUE; + } + if (!log_online_setup_bitmap_file_range(&i->in_files, min_lsn, max_lsn)) { diff --git a/storage/xtradb/log/log0recv.cc b/storage/xtradb/log/log0recv.cc index a7e3333c4fd..7e9a26ef962 100644 --- a/storage/xtradb/log/log0recv.cc +++ b/storage/xtradb/log/log0recv.cc @@ -67,7 +67,7 @@ Created 9/20/1997 Heikki Tuuri /** This is set to FALSE if the backup was originally taken with the -ibbackup --include regexp option: then we do not want to create tables in +mysqlbackup --include regexp option: then we do not want to create tables in directories which were not included */ UNIV_INTERN ibool recv_replay_file_ops = TRUE; #endif /* !UNIV_HOTBACKUP */ @@ -2128,7 +2128,7 @@ recv_apply_log_recs_for_backup(void) /* Extend the tablespace's last file if the page_no does not fall inside its bounds; we assume the last - file is auto-extending, and ibbackup copied the file + file is auto-extending, and mysqlbackup copied the file when it still was smaller */ success = fil_extend_space_to_desired_size( @@ -2499,10 +2499,10 @@ loop: #ifdef UNIV_HOTBACKUP if (recv_replay_file_ops) { - /* In ibbackup --apply-log, replay an .ibd file - operation, if possible; note that - fil_path_to_mysql_datadir is set in ibbackup to - point to the datadir we should use there */ + /* In mysqlbackup --apply-log, replay an .ibd + file operation, if possible; note that + fil_path_to_mysql_datadir is set in mysqlbackup + to point to the datadir we should use there */ if (NULL == fil_op_log_parse_or_replay( body, end_ptr, type, @@ -3167,17 +3167,17 @@ recv_recovery_from_checkpoint_start_func( if (srv_read_only_mode) { ib_logf(IB_LOG_LEVEL_ERROR, - "Cannot restore from ibbackup, InnoDB running " - "in read-only mode!"); + "Cannot restore from mysqlbackup, InnoDB " + "running in read-only mode!"); return(DB_ERROR); } - /* This log file was created by ibbackup --restore: print + /* This log file was created by mysqlbackup --restore: print a note to the user about it */ ib_logf(IB_LOG_LEVEL_INFO, - "The log file was created by ibbackup --apply-log " + "The log file was created by mysqlbackup --apply-log " "at %s. The following crash recovery is part of a " "normal restore.", log_hdr_buf + LOG_FILE_WAS_CREATED_BY_HOT_BACKUP); diff --git a/storage/xtradb/mysql-test/storage_engine/alter_tablespace.opt b/storage/xtradb/mysql-test/storage_engine/alter_tablespace.opt new file mode 100644 index 00000000000..cf4b117e1b1 --- /dev/null +++ b/storage/xtradb/mysql-test/storage_engine/alter_tablespace.opt @@ -0,0 +1,2 @@ +--innodb-file-per-table=1 + diff --git a/storage/xtradb/mysql-test/storage_engine/autoinc_secondary.rdiff b/storage/xtradb/mysql-test/storage_engine/autoinc_secondary.rdiff new file mode 100644 index 00000000000..00cda7c4435 --- /dev/null +++ b/storage/xtradb/mysql-test/storage_engine/autoinc_secondary.rdiff @@ -0,0 +1,30 @@ +--- suite/storage_engine/autoinc_secondary.result 2012-07-12 04:34:18.153885986 +0400 ++++ suite/storage_engine/autoinc_secondary.reject 2012-07-15 17:47:03.937703666 +0400 +@@ -13,18 +13,15 @@ + 5 a + DROP TABLE t1; + CREATE TABLE t1 (a <CHAR_COLUMN>, b <INT_COLUMN> AUTO_INCREMENT, PRIMARY KEY (a,b)) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>; +-INSERT INTO t1 (a) VALUES ('a'),('b'),('b'),('c'),('a'); +-SELECT LAST_INSERT_ID(); +-LAST_INSERT_ID() +-1 +-SELECT a,b FROM t1; +-a b +-a 1 +-a 2 +-b 1 +-b 2 +-c 1 +-DROP TABLE t1; ++ERROR 42000: Incorrect table definition; there can be only one auto column and it must be defined as a key ++# ERROR: Statement ended with errno 1075, errname ER_WRONG_AUTO_KEY (expected to succeed) ++# ------------ UNEXPECTED RESULT ------------ ++# The statement|command finished with ER_WRONG_AUTO_KEY. ++# Multi-part keys or PK or AUTO_INCREMENT (on a secondary column) or the mix could be unsupported|malfunctioning, or the problem was caused by previous errors. ++# You can change the engine code, or create an rdiff, or disable the test by adding it to disabled.def. ++# Further in this test, the message might sometimes be suppressed; a part of the test might be skipped. ++# Also, this problem may cause a chain effect (more errors of different kinds in the test). ++# ------------------------------------------- + CREATE TABLE t1 (a <CHAR_COLUMN>, b <INT_COLUMN> AUTO_INCREMENT, PRIMARY KEY (a,b), <CUSTOM_INDEX>(b)) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>; + INSERT INTO t1 (a) VALUES ('a'),('b'),('b'),('c'),('a'); + SELECT LAST_INSERT_ID(); diff --git a/storage/xtradb/mysql-test/storage_engine/cache_index.rdiff b/storage/xtradb/mysql-test/storage_engine/cache_index.rdiff new file mode 100644 index 00000000000..e04df87aa34 --- /dev/null +++ b/storage/xtradb/mysql-test/storage_engine/cache_index.rdiff @@ -0,0 +1,71 @@ +--- suite/storage_engine/cache_index.result 2012-07-15 00:22:19.822493731 +0400 ++++ suite/storage_engine/cache_index.reject 2012-07-15 17:47:18.321522834 +0400 +@@ -12,31 +12,31 @@ + SET GLOBAL <CACHE_NAME>.key_buffer_size=128*1024; + CACHE INDEX t1 INDEX (a), t2 IN <CACHE_NAME>; + Table Op Msg_type Msg_text +-test.t1 assign_to_keycache status OK +-test.t2 assign_to_keycache status OK ++test.t1 assign_to_keycache note The storage engine for the table doesn't support assign_to_keycache ++test.t2 assign_to_keycache note The storage engine for the table doesn't support assign_to_keycache + LOAD INDEX INTO CACHE t1, t2; + Table Op Msg_type Msg_text +-test.t1 preload_keys status OK +-test.t2 preload_keys status OK ++test.t1 preload_keys note The storage engine for the table doesn't support preload_keys ++test.t2 preload_keys note The storage engine for the table doesn't support preload_keys + INSERT INTO t1 (a,b) VALUES (3,'c'),(4,'d'); + SET GLOBAL <CACHE_NAME>.key_buffer_size=8*1024; + LOAD INDEX INTO CACHE t1, t2 IGNORE LEAVES; + Table Op Msg_type Msg_text +-test.t1 preload_keys status OK +-test.t2 preload_keys status OK ++test.t1 preload_keys note The storage engine for the table doesn't support preload_keys ++test.t2 preload_keys note The storage engine for the table doesn't support preload_keys + SET GLOBAL <CACHE_NAME>.key_cache_age_threshold = 100, <CACHE_NAME>.key_cache_block_size = 512, <CACHE_NAME>.key_cache_division_limit = 1, <CACHE_NAME>.key_cache_segments=2; + INSERT INTO t1 (a,b) VALUES (5,'e'),(6,'f'); + LOAD INDEX INTO CACHE t1; + Table Op Msg_type Msg_text +-test.t1 preload_keys status OK ++test.t1 preload_keys note The storage engine for the table doesn't support preload_keys + SET GLOBAL new_<CACHE_NAME>.key_buffer_size=128*1024; + CACHE INDEX t1 IN new_<CACHE_NAME>; + Table Op Msg_type Msg_text +-test.t1 assign_to_keycache status OK ++test.t1 assign_to_keycache note The storage engine for the table doesn't support assign_to_keycache + INSERT INTO t1 (a,b) VALUES (7,'g'),(8,'h'); + LOAD INDEX INTO CACHE t1 IGNORE LEAVES; + Table Op Msg_type Msg_text +-test.t1 preload_keys status OK ++test.t1 preload_keys note The storage engine for the table doesn't support preload_keys + INSERT INTO t1 (a,b) VALUES (9,'i'); + DROP TABLE t2; + DROP TABLE t1; +@@ -47,11 +47,11 @@ + ) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>; + CACHE INDEX t1 IN <CACHE_NAME>; + Table Op Msg_type Msg_text +-test.t1 assign_to_keycache status OK ++test.t1 assign_to_keycache note The storage engine for the table doesn't support assign_to_keycache + INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b'); + LOAD INDEX INTO CACHE t1; + Table Op Msg_type Msg_text +-test.t1 preload_keys status OK ++test.t1 preload_keys note The storage engine for the table doesn't support preload_keys + DROP TABLE t1; + CREATE TABLE t1 (a <INT_COLUMN>, + b <CHAR_COLUMN>, +@@ -59,11 +59,11 @@ + ) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>; + CACHE INDEX t1 IN <CACHE_NAME>; + Table Op Msg_type Msg_text +-test.t1 assign_to_keycache status OK ++test.t1 assign_to_keycache note The storage engine for the table doesn't support assign_to_keycache + INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b'); + LOAD INDEX INTO CACHE t1; + Table Op Msg_type Msg_text +-test.t1 preload_keys status OK ++test.t1 preload_keys note The storage engine for the table doesn't support preload_keys + DROP TABLE t1; + SET GLOBAL <CACHE_NAME>.key_buffer_size=0; + SET GLOBAL new_<CACHE_NAME>.key_buffer_size=0; diff --git a/storage/xtradb/mysql-test/storage_engine/checksum_table_live.rdiff b/storage/xtradb/mysql-test/storage_engine/checksum_table_live.rdiff new file mode 100644 index 00000000000..71c782848a6 --- /dev/null +++ b/storage/xtradb/mysql-test/storage_engine/checksum_table_live.rdiff @@ -0,0 +1,13 @@ +--- suite/storage_engine/checksum_table_live.result 2012-07-12 21:05:44.497062968 +0400 ++++ suite/storage_engine/checksum_table_live.reject 2012-07-15 17:47:28.105399836 +0400 +@@ -11,8 +11,8 @@ + test.t1 4272806499 + CHECKSUM TABLE t1, t2 QUICK; + Table Checksum +-test.t1 4272806499 +-test.t2 0 ++test.t1 NULL ++test.t2 NULL + CHECKSUM TABLE t1, t2 EXTENDED; + Table Checksum + test.t1 4272806499 diff --git a/storage/xtradb/mysql-test/storage_engine/col_opt_not_null.opt b/storage/xtradb/mysql-test/storage_engine/col_opt_not_null.opt new file mode 100644 index 00000000000..40445305fc6 --- /dev/null +++ b/storage/xtradb/mysql-test/storage_engine/col_opt_not_null.opt @@ -0,0 +1 @@ +--innodb_log_file_size=100M diff --git a/storage/xtradb/mysql-test/storage_engine/col_opt_null.opt b/storage/xtradb/mysql-test/storage_engine/col_opt_null.opt new file mode 100644 index 00000000000..40445305fc6 --- /dev/null +++ b/storage/xtradb/mysql-test/storage_engine/col_opt_null.opt @@ -0,0 +1 @@ +--innodb_log_file_size=100M diff --git a/storage/xtradb/mysql-test/storage_engine/define_engine.inc b/storage/xtradb/mysql-test/storage_engine/define_engine.inc new file mode 100644 index 00000000000..77e384d2351 --- /dev/null +++ b/storage/xtradb/mysql-test/storage_engine/define_engine.inc @@ -0,0 +1,49 @@ +########################################### +# +# This is a template of the include file define_engine.inc which +# should be placed in storage/<engine>/mysql-test/storage_engine folder. +# +################################ +# +# The name of the engine under test must be defined in $ENGINE variable. +# You can set it either here (uncomment and edit) or in your environment. +# +let $ENGINE = InnoDB; +# +################################ +# +# The following three variables define specific options for columns and tables. +# Normally there should be none needed, but for some engines it can be different. +# If the engine requires specific column option for all or indexed columns, +# set them inside the comment, e.g. /*!NOT NULL*/. +# Do the same for table options if needed, e.g. /*!INSERT_METHOD=LAST*/ + +let $default_col_opts = /*!*/; +let $default_col_indexed_opts = /*!*/; +let $default_tbl_opts = /*!*/; + +# INDEX, UNIQUE INDEX, PRIMARY KEY, special index type - choose the fist that the engine allows, +# or set it to /*!*/ if none is supported + +let $default_index = /*!INDEX*/; + +# If the engine does not support the following types, replace them with the closest possible + +let $default_int_type = INT(11); +let $default_char_type = CHAR(8); + +################################ + +--disable_query_log +--disable_result_log + +# Here you can place your custom MTR code which needs to be executed before each test, +# e.g. creation of an additional schema or table, etc. +# The cleanup part should be defined in cleanup_engine.inc + +CALL mtr.add_suppression("InnoDB: Resizing redo log from .* to .* pages, LSN=.*"); +CALL mtr.add_suppression("InnoDB: Starting to delete and rewrite log files."); +CALL mtr.add_suppression("InnoDB: New log files created, LSN=.*"); + +--enable_query_log +--enable_result_log diff --git a/storage/xtradb/mysql-test/storage_engine/disabled.def b/storage/xtradb/mysql-test/storage_engine/disabled.def new file mode 100644 index 00000000000..2f3793047f4 --- /dev/null +++ b/storage/xtradb/mysql-test/storage_engine/disabled.def @@ -0,0 +1,8 @@ +autoinc_vars : MySQL:65225 (InnoDB miscalculates auto-increment) +tbl_opt_ai : MySQL:65901 (AUTO_INCREMENT option on InnoDB table is ignored if added before autoinc column) +delete_low_prio : InnoDB does not use table-level locking +insert_high_prio : InnoDB does not use table-level locking +insert_low_prio : InnoDB does not use table-level locking +select_high_prio : InnoDB does not use table-level locking +update_low_prio : InnoDB does not use table-level locking + diff --git a/storage/xtradb/mysql-test/storage_engine/fulltext_search.rdiff b/storage/xtradb/mysql-test/storage_engine/fulltext_search.rdiff new file mode 100644 index 00000000000..a68fe83070e --- /dev/null +++ b/storage/xtradb/mysql-test/storage_engine/fulltext_search.rdiff @@ -0,0 +1,49 @@ +--- suite/storage_engine/fulltext_search.result 2013-11-27 18:50:16.000000000 +0400 ++++ suite/storage_engine/fulltext_search.reject 2014-02-05 15:33:26.000000000 +0400 +@@ -52,15 +52,14 @@ + INSERT INTO t1 (v0,v1,v2) VALUES ('text4','Contributing more...','...is a good idea'),('text5','test','test'); + SELECT v0, MATCH(v1) AGAINST('contributing') AS rating FROM t1 WHERE MATCH(v1) AGAINST ('contributing'); + v0 rating +-text4 1.3705332279205322 ++text4 0.4885590672492981 + SELECT v0 FROM t1 WHERE MATCH(v1,v2) AGAINST ('-test1 +critical +Cook*' IN BOOLEAN MODE); +-v0 +-text1 ++ERROR HY000: Can't find FULLTEXT index matching the column list + SELECT v0 FROM t1 WHERE MATCH(v1,v2) AGAINST ('-patch +critical +Cook*' IN BOOLEAN MODE); +-v0 ++ERROR HY000: Can't find FULLTEXT index matching the column list + SELECT v0, MATCH(v1) AGAINST('database' WITH QUERY EXPANSION) AS rating FROM t1 WHERE MATCH(v1) AGAINST ('database' WITH QUERY EXPANSION); + v0 rating +-text1 178.11756896972656 ++text1 151.4530487060547 + DROP TABLE t1; + CREATE TABLE t1 (v0 VARCHAR(64) <CUSTOM_COL_OPTIONS>, + v1 VARCHAR(16384) <CUSTOM_COL_OPTIONS>, +@@ -112,14 +111,15 @@ + ), ('text2','test1','test2'); + SELECT v0 FROM t1 WHERE MATCH(v1,v2) AGAINST ('contributing' IN NATURAL LANGUAGE MODE); + v0 ++text1 + INSERT INTO t1 (v0,v1,v2) VALUES ('text3','test','test'); + SELECT v0, MATCH(v1,v2) AGAINST('contributing' IN NATURAL LANGUAGE MODE) AS rating FROM t1 WHERE MATCH(v1,v2) AGAINST ('contributing' IN NATURAL LANGUAGE MODE); + v0 rating +-text1 0.2809644043445587 ++text1 0.45528939366340637 + INSERT INTO t1 (v0,v1,v2) VALUES ('text4','Contributing more...','...is a good idea'),('text5','test','test'); + SELECT v0, MATCH(v1) AGAINST('contributing') AS rating FROM t1 WHERE MATCH(v1) AGAINST ('contributing'); + v0 rating +-text4 1.3705332279205322 ++text4 0.4885590672492981 + SELECT v0 FROM t1 WHERE MATCH(v1,v2) AGAINST ('-test1 +critical +Cook*' IN BOOLEAN MODE); + v0 + text1 +@@ -127,6 +127,6 @@ + v0 + SELECT v0, MATCH(v1,v2) AGAINST('database' WITH QUERY EXPANSION) AS rating FROM t1 WHERE MATCH(v1,v2) AGAINST ('database' WITH QUERY EXPANSION); + v0 rating +-text1 190.56150817871094 +-text4 1.1758291721343994 ++text1 229.60874938964844 ++text4 0.31671249866485596 + DROP TABLE t1; diff --git a/storage/xtradb/mysql-test/storage_engine/index_enable_disable.rdiff b/storage/xtradb/mysql-test/storage_engine/index_enable_disable.rdiff new file mode 100644 index 00000000000..f8e812e7edb --- /dev/null +++ b/storage/xtradb/mysql-test/storage_engine/index_enable_disable.rdiff @@ -0,0 +1,33 @@ +--- suite/storage_engine/index_enable_disable.result 2012-07-15 00:30:05.296641931 +0400 ++++ suite/storage_engine/index_enable_disable.reject 2012-07-15 17:49:12.988081281 +0400 +@@ -11,15 +11,19 @@ + Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment + t1 1 a 1 a # # NULL NULL YES BTREE + ALTER TABLE t1 DISABLE KEYS; ++Warnings: ++Note 1031 Storage engine <STORAGE_ENGINE> of the table `test`.`t1` doesn't have this option + SHOW INDEX IN t1; + Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment +-t1 1 a 1 a # # NULL NULL YES BTREE disabled ++t1 1 a 1 a # # NULL NULL YES BTREE + EXPLAIN SELECT a FROM t1 ORDER BY a; + id select_type table type possible_keys key key_len ref rows Extra +-1 SIMPLE t1 ALL NULL NULL NULL NULL 19 Using filesort ++1 SIMPLE t1 index NULL a 5 NULL 19 Using index + INSERT INTO t1 (a) VALUES + (11),(12),(13),(14),(15),(16),(17),(18),(19),(20); + ALTER TABLE t1 ENABLE KEYS; ++Warnings: ++Note 1031 Storage engine <STORAGE_ENGINE> of the table `test`.`t1` doesn't have this option + SHOW INDEX IN t1; + Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment + t1 1 a 1 a # # NULL NULL YES BTREE +@@ -32,6 +36,8 @@ + (1),(2),(3),(4),(5),(6),(7),(8),(9), + (21),(22),(23),(24),(25),(26),(27),(28),(29); + ALTER TABLE t1 DISABLE KEYS; ++Warnings: ++Note 1031 Storage engine <STORAGE_ENGINE> of the table `test`.`t1` doesn't have this option + INSERT INTO t1 (a) VALUES (29); + ERROR 23000: Duplicate entry '29' for key 'a' + # Statement ended with one of expected results (ER_DUP_ENTRY,ER_DUP_KEY). diff --git a/storage/xtradb/mysql-test/storage_engine/index_type_hash.rdiff b/storage/xtradb/mysql-test/storage_engine/index_type_hash.rdiff new file mode 100644 index 00000000000..02f9d93588f --- /dev/null +++ b/storage/xtradb/mysql-test/storage_engine/index_type_hash.rdiff @@ -0,0 +1,60 @@ +--- suite/storage_engine/index_type_hash.result 2012-07-15 01:10:17.919128889 +0400 ++++ suite/storage_engine/index_type_hash.reject 2012-07-15 17:49:26.135915989 +0400 +@@ -4,7 +4,7 @@ + ) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>; + SHOW KEYS IN t1; + Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment +-t1 1 a 1 a # # NULL NULL # HASH ++t1 1 a 1 a # # NULL NULL # BTREE + DROP TABLE t1; + CREATE TABLE t1 (a <INT_COLUMN>, + b <CHAR_COLUMN>, +@@ -12,8 +12,8 @@ + ) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>; + SHOW KEYS IN t1; + Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment +-t1 1 a_b 1 a # # NULL NULL # HASH a_b index +-t1 1 a_b 2 b # # NULL NULL # HASH a_b index ++t1 1 a_b 1 a # # NULL NULL # BTREE a_b index ++t1 1 a_b 2 b # # NULL NULL # BTREE a_b index + DROP TABLE t1; + CREATE TABLE t1 (a <INT_COLUMN>, + b <CHAR_COLUMN>, +@@ -22,8 +22,8 @@ + ) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>; + SHOW KEYS IN t1; + Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment +-t1 1 a 1 a # # NULL NULL # HASH +-t1 1 b 1 b # # NULL NULL # HASH ++t1 1 a 1 a # # NULL NULL # BTREE ++t1 1 b 1 b # # NULL NULL # BTREE + DROP TABLE t1; + CREATE TABLE t1 (a <INT_COLUMN>, + b <CHAR_COLUMN>, +@@ -31,7 +31,7 @@ + ) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>; + SHOW KEYS IN t1; + Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment +-t1 0 a 1 a # # NULL NULL # HASH ++t1 0 a 1 a # # NULL NULL # BTREE + INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b'); + INSERT INTO t1 (a,b) VALUES (1,'c'); + ERROR 23000: Duplicate entry '1' for key 'a' +@@ -43,7 +43,7 @@ + ALTER TABLE t1 ADD <CUSTOM_INDEX> (a) USING HASH COMMENT 'simple index on a'; + SHOW INDEX FROM t1; + Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment +-t1 1 a 1 a # # NULL NULL # HASH simple index on a ++t1 1 a 1 a # # NULL NULL # BTREE simple index on a + ALTER TABLE t1 DROP KEY a; + DROP TABLE t1; + CREATE TABLE t1 (a <INT_COLUMN>, +@@ -52,7 +52,7 @@ + ) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>; + SHOW KEYS IN t1; + Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment +-t1 0 a 1 a # # NULL NULL # HASH ++t1 0 a 1 a # # NULL NULL # BTREE + INSERT INTO t1 (a,b) VALUES (1,'a'),(2,'b'); + INSERT INTO t1 (a,b) VALUES (1,'c'); + ERROR 23000: Duplicate entry '1' for key 'a' diff --git a/storage/xtradb/mysql-test/storage_engine/insert_delayed.rdiff b/storage/xtradb/mysql-test/storage_engine/insert_delayed.rdiff new file mode 100644 index 00000000000..9e6cddf03f0 --- /dev/null +++ b/storage/xtradb/mysql-test/storage_engine/insert_delayed.rdiff @@ -0,0 +1,26 @@ +--- suite/storage_engine/insert_delayed.result 2013-01-23 01:23:49.461254916 +0400 ++++ suite/storage_engine/insert_delayed.reject 2013-01-23 01:47:05.975698364 +0400 +@@ -5,7 +5,16 @@ + connect con0,localhost,root,,; + SET lock_wait_timeout = 1; + INSERT DELAYED INTO t1 (a,b) VALUES (3,'c'); ++ERROR HY000: DELAYED option not supported for table 't1' ++# ------------ UNEXPECTED RESULT ------------ ++# The statement|command finished with ER_DELAYED_NOT_SUPPORTED. ++# INSERT DELAYED or the mix could be unsupported|malfunctioning, or the problem was caused by previous errors. ++# You can change the engine code, or create an rdiff, or disable the test by adding it to disabled.def. ++# Further in this test, the message might sometimes be suppressed; a part of the test might be skipped. ++# Also, this problem may cause a chain effect (more errors of different kinds in the test). ++# ------------------------------------------- + INSERT DELAYED INTO t1 SET a=4, b='d'; ++ERROR HY000: DELAYED option not supported for table 't1' + INSERT DELAYED INTO t1 (a,b) SELECT 5, 'e'; + ERROR HY000: Lock wait timeout exceeded; try restarting transaction + disconnect con0; +@@ -20,6 +29,4 @@ + a b + 1 f + 2 b +-3 c +-4 d + DROP TABLE t1; diff --git a/storage/xtradb/mysql-test/storage_engine/lock_concurrent.rdiff b/storage/xtradb/mysql-test/storage_engine/lock_concurrent.rdiff new file mode 100644 index 00000000000..fe4a0087fa9 --- /dev/null +++ b/storage/xtradb/mysql-test/storage_engine/lock_concurrent.rdiff @@ -0,0 +1,22 @@ +--- suite/storage_engine/lock_concurrent.result 2012-06-24 23:55:19.539380000 +0400 ++++ suite/storage_engine/lock_concurrent.reject 2012-07-15 17:50:21.279222746 +0400 +@@ -3,10 +3,19 @@ + LOCK TABLES t1 WRITE CONCURRENT, t1 AS t2 READ; + SET lock_wait_timeout = 1; + LOCK TABLES t1 READ LOCAL; ++ERROR HY000: Lock wait timeout exceeded; try restarting transaction ++# ------------ UNEXPECTED RESULT ------------ ++# The statement|command finished with ER_LOCK_WAIT_TIMEOUT. ++# LOCK .. WRITE CONCURRENT or the mix could be unsupported|malfunctioning, or the problem was caused by previous errors. ++# You can change the engine code, or create an rdiff, or disable the test by adding it to disabled.def. ++# Further in this test, the message might sometimes be suppressed; a part of the test might be skipped. ++# Also, this problem may cause a chain effect (more errors of different kinds in the test). ++# ------------------------------------------- + UNLOCK TABLES; + UNLOCK TABLES; + LOCK TABLES t1 READ LOCAL; + LOCK TABLES t1 WRITE CONCURRENT, t1 AS t2 READ; ++ERROR HY000: Lock wait timeout exceeded; try restarting transaction + UNLOCK TABLES; + UNLOCK TABLES; + DROP TABLE t1; diff --git a/storage/xtradb/mysql-test/storage_engine/optimize_table.rdiff b/storage/xtradb/mysql-test/storage_engine/optimize_table.rdiff new file mode 100644 index 00000000000..54d1f600516 --- /dev/null +++ b/storage/xtradb/mysql-test/storage_engine/optimize_table.rdiff @@ -0,0 +1,37 @@ +--- suite/storage_engine/optimize_table.result 2012-07-12 19:13:53.741428591 +0400 ++++ suite/storage_engine/optimize_table.reject 2012-07-15 17:50:30.843102510 +0400 +@@ -5,25 +5,32 @@ + INSERT INTO t1 (a,b) VALUES (3,'c'),(4,'d'); + OPTIMIZE TABLE t1; + Table Op Msg_type Msg_text ++test.t1 optimize note Table does not support optimize, doing recreate + analyze instead + test.t1 optimize status OK + INSERT INTO t2 (a,b) VALUES (4,'d'); + OPTIMIZE NO_WRITE_TO_BINLOG TABLE t2; + Table Op Msg_type Msg_text ++test.t2 optimize note Table does not support optimize, doing recreate + analyze instead + test.t2 optimize status OK + INSERT INTO t2 (a,b) VALUES (5,'e'); + INSERT INTO t1 (a,b) VALUES (6,'f'); + OPTIMIZE LOCAL TABLE t1, t2; + Table Op Msg_type Msg_text ++test.t1 optimize note Table does not support optimize, doing recreate + analyze instead + test.t1 optimize status OK ++test.t2 optimize note Table does not support optimize, doing recreate + analyze instead + test.t2 optimize status OK + OPTIMIZE TABLE t1, t2; + Table Op Msg_type Msg_text +-test.t1 optimize status Table is already up to date +-test.t2 optimize status Table is already up to date ++test.t1 optimize note Table does not support optimize, doing recreate + analyze instead ++test.t1 optimize status OK ++test.t2 optimize note Table does not support optimize, doing recreate + analyze instead ++test.t2 optimize status OK + DROP TABLE t1, t2; + CREATE TABLE t1 (a <INT_COLUMN>, b <CHAR_COLUMN>, <CUSTOM_INDEX> (a)) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>; + INSERT INTO t1 (a,b) VALUES (1,'a'),(100,'b'),(2,'c'),(3,'d'); + OPTIMIZE TABLE t1; + Table Op Msg_type Msg_text ++test.t1 optimize note Table does not support optimize, doing recreate + analyze instead + test.t1 optimize status OK + DROP TABLE t1; diff --git a/storage/xtradb/mysql-test/storage_engine/parts/checksum_table.rdiff b/storage/xtradb/mysql-test/storage_engine/parts/checksum_table.rdiff new file mode 100644 index 00000000000..c8aabb787e9 --- /dev/null +++ b/storage/xtradb/mysql-test/storage_engine/parts/checksum_table.rdiff @@ -0,0 +1,13 @@ +--- suite/storage_engine/parts/checksum_table.result 2013-11-08 22:30:34.000000000 +0400 ++++ suite/storage_engine/parts/checksum_table.reject 2013-11-08 22:32:30.000000000 +0400 +@@ -31,8 +31,8 @@ + test.t1 4272806499 + CHECKSUM TABLE t1, t2 QUICK; + Table Checksum +-test.t1 4272806499 +-test.t2 0 ++test.t1 NULL ++test.t2 NULL + CHECKSUM TABLE t1, t2 EXTENDED; + Table Checksum + test.t1 4272806499 diff --git a/storage/xtradb/mysql-test/storage_engine/parts/create_table.rdiff b/storage/xtradb/mysql-test/storage_engine/parts/create_table.rdiff new file mode 100644 index 00000000000..0df91c6fc6e --- /dev/null +++ b/storage/xtradb/mysql-test/storage_engine/parts/create_table.rdiff @@ -0,0 +1,20 @@ +--- suite/storage_engine/parts/create_table.result 2012-07-12 21:56:38.618667460 +0400 ++++ suite/storage_engine/parts/create_table.reject 2012-07-15 20:06:43.496358345 +0400 +@@ -65,7 +65,7 @@ + 1 SIMPLE t1 abc,def # # # # # # # + EXPLAIN PARTITIONS SELECT a FROM t1 WHERE a = 100; + id select_type table partitions type possible_keys key key_len ref rows Extra +-1 SIMPLE NULL NULL # # # # # # # ++1 SIMPLE t1 def # # # # # # # + INSERT INTO t1 (a) VALUES (50); + ERROR HY000: Table has no partition for value 50 + DROP TABLE t1; +@@ -81,7 +81,7 @@ + 1 SIMPLE t1 abc_abcsp0,def_defsp0 # # # # # # # + EXPLAIN PARTITIONS SELECT a FROM t1 WHERE a = 100; + id select_type table partitions type possible_keys key key_len ref rows Extra +-1 SIMPLE NULL NULL # # # # # # # ++1 SIMPLE t1 def_defsp0 # # # # # # # + SELECT TABLE_SCHEMA, TABLE_NAME, PARTITION_NAME, SUBPARTITION_NAME, PARTITION_METHOD, SUBPARTITION_METHOD + FROM INFORMATION_SCHEMA.PARTITIONS WHERE TABLE_NAME = 't1'; + TABLE_SCHEMA TABLE_NAME PARTITION_NAME SUBPARTITION_NAME PARTITION_METHOD SUBPARTITION_METHOD diff --git a/storage/xtradb/mysql-test/storage_engine/parts/disabled.def b/storage/xtradb/mysql-test/storage_engine/parts/disabled.def new file mode 100644 index 00000000000..796bdfc751b --- /dev/null +++ b/storage/xtradb/mysql-test/storage_engine/parts/disabled.def @@ -0,0 +1 @@ +repair_table : InnoDB of 5.6.10 does not support repair on partitioned tables (fixed by 5.6.14) diff --git a/storage/xtradb/mysql-test/storage_engine/parts/optimize_table.rdiff b/storage/xtradb/mysql-test/storage_engine/parts/optimize_table.rdiff new file mode 100644 index 00000000000..a35ba5167d9 --- /dev/null +++ b/storage/xtradb/mysql-test/storage_engine/parts/optimize_table.rdiff @@ -0,0 +1,58 @@ +--- suite/storage_engine/parts/optimize_table.result 2013-07-18 22:55:38.000000000 +0400 ++++ suite/storage_engine/parts/optimize_table.reject 2013-08-05 19:45:19.000000000 +0400 +@@ -9,18 +9,22 @@ + INSERT INTO t1 (a,b) VALUES (3,'c'),(4,'d'); + ALTER TABLE t1 OPTIMIZE PARTITION p1; + Table Op Msg_type Msg_text ++test.t1 optimize note Table does not support optimize on partitions. All partitions will be rebuilt and analyzed. + test.t1 optimize status OK + INSERT INTO t2 (a,b) VALUES (4,'d'); + ALTER TABLE t2 OPTIMIZE PARTITION p0 NO_WRITE_TO_BINLOG; + Table Op Msg_type Msg_text ++test.t2 optimize note Table does not support optimize on partitions. All partitions will be rebuilt and analyzed. + test.t2 optimize status OK + INSERT INTO t1 (a,b) VALUES (6,'f'); + ALTER TABLE t1 OPTIMIZE PARTITION ALL LOCAL; + Table Op Msg_type Msg_text ++test.t1 optimize note Table does not support optimize on partitions. All partitions will be rebuilt and analyzed. + test.t1 optimize status OK + INSERT INTO t2 (a,b) VALUES (5,'e'); + ALTER TABLE t2 OPTIMIZE PARTITION p1,p0; + Table Op Msg_type Msg_text ++test.t2 optimize note Table does not support optimize on partitions. All partitions will be rebuilt and analyzed. + test.t2 optimize status OK + DROP TABLE t1, t2; + DROP TABLE IF EXISTS t1,t2; +@@ -30,25 +34,32 @@ + INSERT INTO t1 (a,b) VALUES (3,'c'),(4,'d'); + OPTIMIZE TABLE t1; + Table Op Msg_type Msg_text ++test.t1 optimize note Table does not support optimize, doing recreate + analyze instead + test.t1 optimize status OK + INSERT INTO t2 (a,b) VALUES (4,'d'); + OPTIMIZE NO_WRITE_TO_BINLOG TABLE t2; + Table Op Msg_type Msg_text ++test.t2 optimize note Table does not support optimize, doing recreate + analyze instead + test.t2 optimize status OK + INSERT INTO t2 (a,b) VALUES (5,'e'); + INSERT INTO t1 (a,b) VALUES (6,'f'); + OPTIMIZE LOCAL TABLE t1, t2; + Table Op Msg_type Msg_text ++test.t1 optimize note Table does not support optimize, doing recreate + analyze instead + test.t1 optimize status OK ++test.t2 optimize note Table does not support optimize, doing recreate + analyze instead + test.t2 optimize status OK + OPTIMIZE TABLE t1, t2; + Table Op Msg_type Msg_text ++test.t1 optimize note Table does not support optimize, doing recreate + analyze instead + test.t1 optimize status OK ++test.t2 optimize note Table does not support optimize, doing recreate + analyze instead + test.t2 optimize status OK + DROP TABLE t1, t2; + CREATE TABLE t1 (a <INT_COLUMN>, b <CHAR_COLUMN>, <CUSTOM_INDEX> (a)) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS> PARTITION BY HASH(a) PARTITIONS 2; + INSERT INTO t1 (a,b) VALUES (1,'a'),(100,'b'),(2,'c'),(3,'d'); + OPTIMIZE TABLE t1; + Table Op Msg_type Msg_text ++test.t1 optimize note Table does not support optimize, doing recreate + analyze instead + test.t1 optimize status OK + DROP TABLE t1; diff --git a/storage/xtradb/mysql-test/storage_engine/parts/repair_table.rdiff b/storage/xtradb/mysql-test/storage_engine/parts/repair_table.rdiff new file mode 100644 index 00000000000..35b150e82d1 --- /dev/null +++ b/storage/xtradb/mysql-test/storage_engine/parts/repair_table.rdiff @@ -0,0 +1,158 @@ +--- suite/storage_engine/parts/repair_table.result 2013-07-18 22:55:38.000000000 +0400 ++++ suite/storage_engine/parts/repair_table.reject 2013-08-05 19:54:09.000000000 +0400 +@@ -25,7 +25,7 @@ + INSERT INTO t1 (a,b) VALUES (10,'j'); + ALTER TABLE t1 REPAIR PARTITION p1 QUICK USE_FRM; + Table Op Msg_type Msg_text +-test.t1 repair status OK ++test.t1 repair note The storage engine for the table doesn't support repair + INSERT INTO t2 (a,b) VALUES (12,'l'); + ALTER TABLE t2 REPAIR PARTITION NO_WRITE_TO_BINLOG ALL QUICK EXTENDED USE_FRM; + Table Op Msg_type Msg_text +@@ -58,8 +58,8 @@ + INSERT INTO t2 (a,b) VALUES (11,'k'); + REPAIR TABLE t1, t2 QUICK USE_FRM; + Table Op Msg_type Msg_text +-test.t1 repair status OK +-test.t2 repair status OK ++test.t1 repair note The storage engine for the table doesn't support repair ++test.t2 repair note The storage engine for the table doesn't support repair + INSERT INTO t1 (a,b) VALUES (12,'l'); + INSERT INTO t2 (a,b) VALUES (13,'m'); + REPAIR NO_WRITE_TO_BINLOG TABLE t1, t2 QUICK EXTENDED USE_FRM; +@@ -101,119 +101,13 @@ + INSERT INTO t1 (a,b) VALUES (10,'j'); + REPAIR TABLE t1 USE_FRM; + Table Op Msg_type Msg_text +-test.t1 repair status OK +-t1#P#p0.MYD +-t1#P#p0.MYI +-t1#P#p1.MYD +-t1#P#p1.MYI ++test.t1 repair note The storage engine for the table doesn't support repair + t1.frm + t1.par + INSERT INTO t1 (a,b) VALUES (14,'n'),(15,'o'); + # Statement ended with one of expected results (0,144). + # If you got a difference in error message, just add it to rdiff file + FLUSH TABLE t1; +-Restoring <DATADIR>/test/t1#P#p0.MYD +-CHECK TABLE t1; +-Table Op Msg_type Msg_text +-test.t1 check error Size of datafile is: 26 Should be: 39 +-test.t1 check error Partition p0 returned error +-test.t1 check error Corrupt +-SELECT a,b FROM t1; +-a b +-8 h +-10 j +-7 g +-15 o +-Warnings: +-Error 145 Table './test/t1#P#p0' is marked as crashed and should be repaired +-Error 1194 Table 't1' is marked as crashed and should be repaired +-Error 1034 Number of rows changed from 3 to 2 +-# Statement ended with one of expected results (0,ER_NOT_KEYFILE,144). +-# If you got a difference in error message, just add it to rdiff file +-INSERT INTO t1 (a,b) VALUES (14,'n'),(15,'o'); +-# Statement ended with one of expected results (0,144). +-# If you got a difference in error message, just add it to rdiff file +-FLUSH TABLE t1; +-Restoring <DATADIR>/test/t1#P#p0.MYI +-CHECK TABLE t1; +-Table Op Msg_type Msg_text +-test.t1 check warning Size of datafile is: 39 Should be: 26 +-test.t1 check error Record-count is not ok; is 3 Should be: 2 +-test.t1 check warning Found 3 key parts. Should be: 2 +-test.t1 check error Partition p0 returned error +-test.t1 check error Corrupt +-SELECT a,b FROM t1; +-a b +-8 h +-10 j +-14 n +-7 g +-15 o +-15 o +-Warnings: +-Error 145 Table './test/t1#P#p0' is marked as crashed and should be repaired +-Error 1194 Table 't1' is marked as crashed and should be repaired +-Error 1034 Number of rows changed from 2 to 3 +-# Statement ended with one of expected results (0,ER_NOT_KEYFILE,144). +-# If you got a difference in error message, just add it to rdiff file +-INSERT INTO t1 (a,b) VALUES (14,'n'),(15,'o'); +-# Statement ended with one of expected results (0,144). +-# If you got a difference in error message, just add it to rdiff file +-FLUSH TABLE t1; +-Restoring <DATADIR>/test/t1#P#p1.MYD +-CHECK TABLE t1; +-Table Op Msg_type Msg_text +-test.t1 check error Size of datafile is: 39 Should be: 52 +-test.t1 check error Partition p1 returned error +-test.t1 check error Corrupt +-SELECT a,b FROM t1; +-a b +-8 h +-10 j +-14 n +-14 n +-7 g +-15 o +-15 o +-Warnings: +-Error 145 Table './test/t1#P#p1' is marked as crashed and should be repaired +-Error 1194 Table 't1' is marked as crashed and should be repaired +-Error 1034 Number of rows changed from 4 to 3 +-# Statement ended with one of expected results (0,ER_NOT_KEYFILE,144). +-# If you got a difference in error message, just add it to rdiff file +-INSERT INTO t1 (a,b) VALUES (14,'n'),(15,'o'); +-# Statement ended with one of expected results (0,144). +-# If you got a difference in error message, just add it to rdiff file +-FLUSH TABLE t1; +-Restoring <DATADIR>/test/t1#P#p1.MYI +-CHECK TABLE t1; +-Table Op Msg_type Msg_text +-test.t1 check warning Size of datafile is: 52 Should be: 39 +-test.t1 check error Record-count is not ok; is 4 Should be: 3 +-test.t1 check warning Found 4 key parts. Should be: 3 +-test.t1 check error Partition p1 returned error +-test.t1 check error Corrupt +-SELECT a,b FROM t1; +-a b +-8 h +-10 j +-14 n +-14 n +-14 n +-7 g +-15 o +-15 o +-15 o +-Warnings: +-Error 145 Table './test/t1#P#p1' is marked as crashed and should be repaired +-Error 1194 Table 't1' is marked as crashed and should be repaired +-Error 1034 Number of rows changed from 3 to 4 +-# Statement ended with one of expected results (0,ER_NOT_KEYFILE,144). +-# If you got a difference in error message, just add it to rdiff file +-INSERT INTO t1 (a,b) VALUES (14,'n'),(15,'o'); +-# Statement ended with one of expected results (0,144). +-# If you got a difference in error message, just add it to rdiff file +-FLUSH TABLE t1; + Restoring <DATADIR>/test/t1.par + CHECK TABLE t1; + Table Op Msg_type Msg_text +@@ -223,14 +117,8 @@ + 8 h + 10 j + 14 n +-14 n +-14 n +-14 n + 7 g + 15 o +-15 o +-15 o +-15 o + # Statement ended with one of expected results (0,ER_NOT_KEYFILE,144). + # If you got a difference in error message, just add it to rdiff file + DROP TABLE t1; diff --git a/storage/xtradb/mysql-test/storage_engine/parts/suite.opt b/storage/xtradb/mysql-test/storage_engine/parts/suite.opt new file mode 100644 index 00000000000..66f581b56d0 --- /dev/null +++ b/storage/xtradb/mysql-test/storage_engine/parts/suite.opt @@ -0,0 +1,2 @@ +--innodb + diff --git a/storage/xtradb/mysql-test/storage_engine/repair_table.rdiff b/storage/xtradb/mysql-test/storage_engine/repair_table.rdiff new file mode 100644 index 00000000000..be3709c5833 --- /dev/null +++ b/storage/xtradb/mysql-test/storage_engine/repair_table.rdiff @@ -0,0 +1,138 @@ +--- suite/storage_engine/repair_table.result 2013-10-03 20:35:06.000000000 +0400 ++++ suite/storage_engine/repair_table.reject 2013-11-08 22:04:22.000000000 +0400 +@@ -4,56 +4,57 @@ + CREATE TABLE t2 (a <INT_COLUMN>, b <CHAR_COLUMN>) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>; + REPAIR TABLE t1; + Table Op Msg_type Msg_text +-test.t1 repair status OK ++test.t1 repair note The storage engine for the table doesn't support repair + INSERT INTO t1 (a,b) VALUES (3,'c'); + INSERT INTO t2 (a,b) VALUES (4,'d'); + REPAIR NO_WRITE_TO_BINLOG TABLE t1, t2; + Table Op Msg_type Msg_text +-test.t1 repair status OK +-test.t2 repair status OK ++test.t1 repair note The storage engine for the table doesn't support repair ++test.t2 repair note The storage engine for the table doesn't support repair + INSERT INTO t2 (a,b) VALUES (5,'e'),(6,'f'); + REPAIR LOCAL TABLE t2; + Table Op Msg_type Msg_text +-test.t2 repair status OK ++test.t2 repair note The storage engine for the table doesn't support repair + INSERT INTO t1 (a,b) VALUES (7,'g'),(8,'h'); + INSERT INTO t2 (a,b) VALUES (9,'i'); + REPAIR LOCAL TABLE t2, t1 EXTENDED; + Table Op Msg_type Msg_text +-test.t2 repair status OK +-test.t1 repair status OK ++test.t2 repair note The storage engine for the table doesn't support repair ++test.t1 repair note The storage engine for the table doesn't support repair + INSERT INTO t1 (a,b) VALUES (10,'j'); + INSERT INTO t2 (a,b) VALUES (11,'k'); + REPAIR TABLE t1, t2 QUICK USE_FRM; + Table Op Msg_type Msg_text +-test.t1 repair warning Number of rows changed from 0 to 6 +-test.t1 repair status OK +-test.t2 repair warning Number of rows changed from 0 to 5 +-test.t2 repair status OK ++test.t1 repair note The storage engine for the table doesn't support repair ++test.t2 repair note The storage engine for the table doesn't support repair + INSERT INTO t1 (a,b) VALUES (12,'l'); + INSERT INTO t2 (a,b) VALUES (13,'m'); + REPAIR NO_WRITE_TO_BINLOG TABLE t1, t2 QUICK EXTENDED USE_FRM; + Table Op Msg_type Msg_text +-test.t1 repair warning Number of rows changed from 0 to 7 +-test.t1 repair status OK +-test.t2 repair warning Number of rows changed from 0 to 6 +-test.t2 repair status OK ++test.t1 repair note The storage engine for the table doesn't support repair ++test.t2 repair note The storage engine for the table doesn't support repair + FLUSH TABLE t1; + INSERT INTO t1 (a,b) VALUES (14,'n'); +-ERROR HY000: Incorrect file format 't1' + # Statement ended with one of expected results (0,130,ER_FAILED_READ_FROM_PAR_FILE,ER_OPEN_AS_READONLY). + # If you got a difference in error message, just add it to rdiff file + CHECK TABLE t1; + Table Op Msg_type Msg_text +-test.t1 check Error Incorrect file format 't1' +-test.t1 check error Corrupt ++test.t1 check status OK + SELECT a,b FROM t1; +-ERROR HY000: Incorrect file format 't1' ++a b ++1 a ++2 b ++3 c ++7 g ++8 h ++10 j ++12 l ++14 n + # Statement ended with one of expected results (0,130,ER_FAILED_READ_FROM_PAR_FILE,ER_OPEN_AS_READONLY). + # If you got a difference in error message, just add it to rdiff file + REPAIR TABLE t1; + Table Op Msg_type Msg_text +-test.t1 repair Error Incorrect file format 't1' +-test.t1 repair error Corrupt ++test.t1 repair note The storage engine for the table doesn't support repair + DROP TABLE t1, t2; + call mtr.add_suppression("Got an error from thread_id=.*"); + call mtr.add_suppression("MySQL thread id .*, query id .* localhost.*root Checking table"); +@@ -62,45 +63,32 @@ + CREATE TABLE t1 (a <INT_COLUMN>, b <CHAR_COLUMN>, <CUSTOM_INDEX> (a)) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>; + REPAIR TABLE t1; + Table Op Msg_type Msg_text +-test.t1 repair status OK ++test.t1 repair note The storage engine for the table doesn't support repair + INSERT INTO t1 (a,b) VALUES (7,'g'),(8,'h'); + REPAIR TABLE t1 EXTENDED; + Table Op Msg_type Msg_text +-test.t1 repair status OK ++test.t1 repair note The storage engine for the table doesn't support repair + INSERT INTO t1 (a,b) VALUES (10,'j'); + REPAIR TABLE t1 USE_FRM; + Table Op Msg_type Msg_text +-test.t1 repair warning Number of rows changed from 0 to 3 +-test.t1 repair status OK +-t1.MYD +-t1.MYI ++test.t1 repair note The storage engine for the table doesn't support repair + t1.frm ++t1.ibd + INSERT INTO t1 (a,b) VALUES (14,'n'),(15,'o'); + # Statement ended with one of expected results (0,144). + # If you got a difference in error message, just add it to rdiff file + FLUSH TABLE t1; +-Restoring <DATADIR>/test/t1.MYD ++Restoring <DATADIR>/test/t1.ibd + CHECK TABLE t1; + Table Op Msg_type Msg_text +-test.t1 check error Size of datafile is: 39 Should be: 65 +-test.t1 check error Corrupt ++test.t1 check status OK + SELECT a,b FROM t1; +-ERROR HY000: Incorrect key file for table 't1'; try to repair it +-# Statement ended with one of expected results (0,ER_NOT_KEYFILE,144). +-# If you got a difference in error message, just add it to rdiff file +-INSERT INTO t1 (a,b) VALUES (14,'n'),(15,'o'); +-ERROR HY000: Table './test/t1' is marked as crashed and last (automatic?) repair failed +-# Statement ended with one of expected results (0,144). +-# If you got a difference in error message, just add it to rdiff file +-FLUSH TABLE t1; +-Restoring <DATADIR>/test/t1.MYI +-CHECK TABLE t1; +-Table Op Msg_type Msg_text +-test.t1 check warning Table is marked as crashed and last repair failed +-test.t1 check error Size of datafile is: 39 Should be: 65 +-test.t1 check error Corrupt +-SELECT a,b FROM t1; +-ERROR HY000: Table './test/t1' is marked as crashed and last (automatic?) repair failed ++a b ++7 g ++8 h ++10 j ++14 n ++15 o + # Statement ended with one of expected results (0,ER_NOT_KEYFILE,144). + # If you got a difference in error message, just add it to rdiff file + DROP TABLE t1; diff --git a/storage/xtradb/mysql-test/storage_engine/suite.opt b/storage/xtradb/mysql-test/storage_engine/suite.opt new file mode 100644 index 00000000000..8c10cefc626 --- /dev/null +++ b/storage/xtradb/mysql-test/storage_engine/suite.opt @@ -0,0 +1 @@ +--innodb diff --git a/storage/xtradb/mysql-test/storage_engine/tbl_opt_data_index_dir.rdiff b/storage/xtradb/mysql-test/storage_engine/tbl_opt_data_index_dir.rdiff new file mode 100644 index 00000000000..e09e50b17ec --- /dev/null +++ b/storage/xtradb/mysql-test/storage_engine/tbl_opt_data_index_dir.rdiff @@ -0,0 +1,23 @@ +--- suite/storage_engine/tbl_opt_data_index_dir.result 2013-10-03 20:35:06.000000000 +0400 ++++ suite/storage_engine/tbl_opt_data_index_dir.reject 2013-11-08 22:06:54.000000000 +0400 +@@ -1,10 +1,12 @@ + DROP TABLE IF EXISTS t1; ++Warnings: ++Warning 1618 <INDEX DIRECTORY> option ignored + SHOW CREATE TABLE t1; + Table Create Table + t1 CREATE TABLE `t1` ( + `a` int(11) DEFAULT NULL, + `b` char(8) DEFAULT NULL +-) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 DATA DIRECTORY='<DATA_DIR>' INDEX DIRECTORY='<INDEX_DIR>' ++) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 DATA DIRECTORY='<DATA_DIR>' + Warnings: + Warning 1618 <INDEX DIRECTORY> option ignored + SHOW CREATE TABLE t1; +@@ -12,5 +14,5 @@ + t1 CREATE TABLE `t1` ( + `a` int(11) DEFAULT NULL, + `b` char(8) DEFAULT NULL +-) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 DATA DIRECTORY='<DATA_DIR>' INDEX DIRECTORY='<INDEX_DIR>' ++) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 DATA DIRECTORY='<DATA_DIR>' + DROP TABLE t1; diff --git a/storage/xtradb/mysql-test/storage_engine/tbl_opt_insert_method.rdiff b/storage/xtradb/mysql-test/storage_engine/tbl_opt_insert_method.rdiff new file mode 100644 index 00000000000..468b82926f0 --- /dev/null +++ b/storage/xtradb/mysql-test/storage_engine/tbl_opt_insert_method.rdiff @@ -0,0 +1,11 @@ +--- suite/storage_engine/tbl_opt_insert_method.result 2012-06-24 23:55:19.539380000 +0400 ++++ suite/storage_engine/tbl_opt_insert_method.reject 2012-07-15 17:51:09.978610512 +0400 +@@ -5,7 +5,7 @@ + t1 CREATE TABLE `t1` ( + `a` int(11) DEFAULT NULL, + `b` char(8) DEFAULT NULL +-) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 INSERT_METHOD=FIRST ++) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 + ALTER TABLE t1 INSERT_METHOD=NO; + SHOW CREATE TABLE t1; + Table Create Table diff --git a/storage/xtradb/mysql-test/storage_engine/tbl_opt_key_block_size.opt b/storage/xtradb/mysql-test/storage_engine/tbl_opt_key_block_size.opt new file mode 100644 index 00000000000..7cd737b2b87 --- /dev/null +++ b/storage/xtradb/mysql-test/storage_engine/tbl_opt_key_block_size.opt @@ -0,0 +1,3 @@ +--innodb-file-per-table=1 +--innodb-file-format=Barracuda + diff --git a/storage/xtradb/mysql-test/storage_engine/tbl_opt_row_format.opt b/storage/xtradb/mysql-test/storage_engine/tbl_opt_row_format.opt new file mode 100644 index 00000000000..7cd737b2b87 --- /dev/null +++ b/storage/xtradb/mysql-test/storage_engine/tbl_opt_row_format.opt @@ -0,0 +1,3 @@ +--innodb-file-per-table=1 +--innodb-file-format=Barracuda + diff --git a/storage/xtradb/mysql-test/storage_engine/tbl_opt_row_format.rdiff b/storage/xtradb/mysql-test/storage_engine/tbl_opt_row_format.rdiff new file mode 100644 index 00000000000..a6572ffa7f0 --- /dev/null +++ b/storage/xtradb/mysql-test/storage_engine/tbl_opt_row_format.rdiff @@ -0,0 +1,10 @@ +--- suite/storage_engine/tbl_opt_row_format.result 2012-06-24 23:55:19.539380000 +0400 ++++ suite/storage_engine/tbl_opt_row_format.reject 2012-07-15 19:26:02.235049157 +0400 +@@ -1,5 +1,7 @@ + DROP TABLE IF EXISTS t1; + CREATE TABLE t1 (a <INT_COLUMN>, b <CHAR_COLUMN>) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS> ROW_FORMAT=FIXED; ++Warnings: ++Warning 1478 <STORAGE_ENGINE>: assuming ROW_FORMAT=COMPACT. + SHOW CREATE TABLE t1; + Table Create Table + t1 CREATE TABLE `t1` ( diff --git a/storage/xtradb/mysql-test/storage_engine/tbl_opt_union.rdiff b/storage/xtradb/mysql-test/storage_engine/tbl_opt_union.rdiff new file mode 100644 index 00000000000..cbdf5818022 --- /dev/null +++ b/storage/xtradb/mysql-test/storage_engine/tbl_opt_union.rdiff @@ -0,0 +1,16 @@ +--- suite/storage_engine/tbl_opt_union.result 2012-06-24 23:55:19.539380000 +0400 ++++ suite/storage_engine/tbl_opt_union.reject 2012-07-15 17:51:31.014346053 +0400 +@@ -4,11 +4,11 @@ + Table Create Table + t1 CREATE TABLE `t1` ( + `a` int(11) DEFAULT NULL +-) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 UNION=(`child1`) ++) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 + ALTER TABLE t1 UNION = (child1,child2); + SHOW CREATE TABLE t1; + Table Create Table + t1 CREATE TABLE `t1` ( + `a` int(11) DEFAULT NULL +-) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 UNION=(`child1`,`child2`) ++) ENGINE=<STORAGE_ENGINE> DEFAULT CHARSET=latin1 + DROP TABLE t1, child1, child2; diff --git a/storage/xtradb/mysql-test/storage_engine/trx/cons_snapshot_serializable.rdiff b/storage/xtradb/mysql-test/storage_engine/trx/cons_snapshot_serializable.rdiff new file mode 100644 index 00000000000..e6149be58dc --- /dev/null +++ b/storage/xtradb/mysql-test/storage_engine/trx/cons_snapshot_serializable.rdiff @@ -0,0 +1,18 @@ +--- suite/storage_engine/trx/cons_snapshot_serializable.result 2013-11-27 18:46:36.000000000 +0400 ++++ suite/storage_engine/trx/cons_snapshot_serializable.reject 2013-11-28 19:17:02.000000000 +0400 +@@ -5,12 +5,15 @@ + CREATE TABLE t1 (a <INT_COLUMN>) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>; + SET SESSION TRANSACTION ISOLATION LEVEL SERIALIZABLE; + START TRANSACTION WITH CONSISTENT SNAPSHOT; ++Warnings: ++Warning 138 InnoDB: WITH CONSISTENT SNAPSHOT was ignored because this phrase can only be used with REPEATABLE READ isolation level. + connection con2; + INSERT INTO t1 (a) VALUES (1); + connection con1; + # If consistent read works on this isolation level (SERIALIZABLE), the following SELECT should not return the value we inserted (1) + SELECT a FROM t1; + a ++1 + COMMIT; + connection default; + disconnect con1; diff --git a/storage/xtradb/mysql-test/storage_engine/trx/level_read_committed.rdiff b/storage/xtradb/mysql-test/storage_engine/trx/level_read_committed.rdiff new file mode 100644 index 00000000000..cb64d32138b --- /dev/null +++ b/storage/xtradb/mysql-test/storage_engine/trx/level_read_committed.rdiff @@ -0,0 +1,11 @@ +--- suite/storage_engine/trx/level_read_committed.result 2013-11-28 19:18:48.000000000 +0400 ++++ suite/storage_engine/trx/level_read_committed.reject 2013-11-28 19:18:59.000000000 +0400 +@@ -77,6 +77,8 @@ + CREATE TABLE t1 (a <INT_COLUMN>) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>; + SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED; + START TRANSACTION WITH CONSISTENT SNAPSHOT; ++Warnings: ++Warning 138 InnoDB: WITH CONSISTENT SNAPSHOT was ignored because this phrase can only be used with REPEATABLE READ isolation level. + connection con2; + INSERT INTO t1 (a) VALUES (1); + connection con1; diff --git a/storage/xtradb/mysql-test/storage_engine/trx/level_read_uncommitted.rdiff b/storage/xtradb/mysql-test/storage_engine/trx/level_read_uncommitted.rdiff new file mode 100644 index 00000000000..6a79abe3ca5 --- /dev/null +++ b/storage/xtradb/mysql-test/storage_engine/trx/level_read_uncommitted.rdiff @@ -0,0 +1,11 @@ +--- suite/storage_engine/trx/level_read_uncommitted.result 2013-11-28 19:18:48.000000000 +0400 ++++ suite/storage_engine/trx/level_read_uncommitted.reject 2013-11-28 19:19:50.000000000 +0400 +@@ -102,6 +102,8 @@ + CREATE TABLE t1 (a <INT_COLUMN>) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>; + SET SESSION TRANSACTION ISOLATION LEVEL READ UNCOMMITTED; + START TRANSACTION WITH CONSISTENT SNAPSHOT; ++Warnings: ++Warning 138 InnoDB: WITH CONSISTENT SNAPSHOT was ignored because this phrase can only be used with REPEATABLE READ isolation level. + connection con2; + INSERT INTO t1 (a) VALUES (1); + connection con1; diff --git a/storage/xtradb/mysql-test/storage_engine/trx/suite.opt b/storage/xtradb/mysql-test/storage_engine/trx/suite.opt new file mode 100644 index 00000000000..64bbe8b554c --- /dev/null +++ b/storage/xtradb/mysql-test/storage_engine/trx/suite.opt @@ -0,0 +1,3 @@ +--innodb +--innodb-lock-wait-timeout=1 + diff --git a/storage/xtradb/mysql-test/storage_engine/type_blob.opt b/storage/xtradb/mysql-test/storage_engine/type_blob.opt new file mode 100644 index 00000000000..40445305fc6 --- /dev/null +++ b/storage/xtradb/mysql-test/storage_engine/type_blob.opt @@ -0,0 +1 @@ +--innodb_log_file_size=100M diff --git a/storage/xtradb/mysql-test/storage_engine/type_char_indexes.rdiff b/storage/xtradb/mysql-test/storage_engine/type_char_indexes.rdiff new file mode 100644 index 00000000000..7a388552c57 --- /dev/null +++ b/storage/xtradb/mysql-test/storage_engine/type_char_indexes.rdiff @@ -0,0 +1,11 @@ +--- suite/storage_engine/type_char_indexes.result 2012-07-12 19:27:42.191013570 +0400 ++++ suite/storage_engine/type_char_indexes.reject 2012-07-15 17:51:55.810034331 +0400 +@@ -135,7 +135,7 @@ + r3a + EXPLAIN SELECT c,c20,v16,v128 FROM t1 WHERE v16 = 'varchar1a' OR v16 = 'varchar3a' ORDER BY v16; + id select_type table type possible_keys key key_len ref rows Extra +-# # # range # v16 # # # # ++# # # ALL # NULL # # # # + SELECT c,c20,v16,v128 FROM t1 WHERE v16 = 'varchar1a' OR v16 = 'varchar3a' ORDER BY v16; + c c20 v16 v128 + a char1 varchar1a varchar1b diff --git a/storage/xtradb/mysql-test/storage_engine/type_float_indexes.rdiff b/storage/xtradb/mysql-test/storage_engine/type_float_indexes.rdiff new file mode 100644 index 00000000000..6ebfd61d876 --- /dev/null +++ b/storage/xtradb/mysql-test/storage_engine/type_float_indexes.rdiff @@ -0,0 +1,11 @@ +--- suite/storage_engine/type_float_indexes.result 2012-07-12 19:37:27.031661128 +0400 ++++ suite/storage_engine/type_float_indexes.reject 2012-07-15 17:52:12.189828410 +0400 +@@ -60,7 +60,7 @@ + ALTER TABLE t1 ADD UNIQUE KEY(d); + EXPLAIN SELECT d FROM t1 WHERE r > 0 and d > 0 ORDER BY d; + id select_type table type possible_keys key key_len ref rows Extra +-# # # # # d # # # # ++# # # # # NULL # # # # + SELECT d FROM t1 WHERE r > 0 and d > 0 ORDER BY d; + d + 1.2345 diff --git a/storage/xtradb/mysql-test/storage_engine/type_spatial_indexes.rdiff b/storage/xtradb/mysql-test/storage_engine/type_spatial_indexes.rdiff new file mode 100644 index 00000000000..d3fb59e6ce3 --- /dev/null +++ b/storage/xtradb/mysql-test/storage_engine/type_spatial_indexes.rdiff @@ -0,0 +1,712 @@ +--- suite/storage_engine/type_spatial_indexes.result 2013-08-05 18:08:49.000000000 +0400 ++++ suite/storage_engine/type_spatial_indexes.reject 2013-08-05 18:25:24.000000000 +0400 +@@ -702,699 +702,15 @@ + DROP DATABASE IF EXISTS gis_ogs; + CREATE DATABASE gis_ogs; + CREATE TABLE gis_point (fid <INT_COLUMN>, g POINT NOT NULL, SPATIAL INDEX(g)) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>; +-CREATE TABLE gis_line (fid <INT_COLUMN>, g LINESTRING NOT NULL, SPATIAL INDEX(g)) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>; +-CREATE TABLE gis_polygon (fid <INT_COLUMN>, g POLYGON NOT NULL, SPATIAL INDEX(g)) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>; +-CREATE TABLE gis_multi_point (fid <INT_COLUMN>, g MULTIPOINT NOT NULL, SPATIAL INDEX(g)) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>; +-CREATE TABLE gis_multi_line (fid <INT_COLUMN>, g MULTILINESTRING NOT NULL, SPATIAL INDEX(g)) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>; +-CREATE TABLE gis_multi_polygon (fid <INT_COLUMN>, g MULTIPOLYGON NOT NULL, SPATIAL INDEX(g)) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>; +-CREATE TABLE gis_geometrycollection (fid <INT_COLUMN>, g GEOMETRYCOLLECTION NOT NULL, SPATIAL INDEX(g)) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>; +-CREATE TABLE gis_geometry (fid <INT_COLUMN>, g GEOMETRY NOT NULL) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>; +-USE gis_ogs; +-CREATE TABLE lakes (fid INT <CUSTOM_COL_OPTIONS>, +-name CHAR(64) <CUSTOM_COL_OPTIONS>, +-shore POLYGON NOT NULL, SPATIAL INDEX s(shore)) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>; +-CREATE TABLE road_segments (fid INT <CUSTOM_COL_OPTIONS>, +-name CHAR(64) <CUSTOM_COL_OPTIONS>, +-aliases CHAR(64) <CUSTOM_COL_OPTIONS>, +-num_lanes INT <CUSTOM_COL_OPTIONS>, +-centerline LINESTRING NOT NULL, SPATIAL INDEX c(centerline)) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>; +-CREATE TABLE divided_routes (fid INT <CUSTOM_COL_OPTIONS>, +-name CHAR(64) <CUSTOM_COL_OPTIONS>, +-num_lanes INT <CUSTOM_COL_OPTIONS>, +-centerlines MULTILINESTRING NOT NULL, SPATIAL INDEX c(centerlines)) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>; +-CREATE TABLE forests (fid INT <CUSTOM_COL_OPTIONS>, +-name CHAR(64) <CUSTOM_COL_OPTIONS>, +-boundary MULTIPOLYGON NOT NULL, SPATIAL INDEX b(boundary)) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>; +-CREATE TABLE bridges (fid INT <CUSTOM_COL_OPTIONS>, +-name CHAR(64) <CUSTOM_COL_OPTIONS>, +-position POINT NOT NULL, SPATIAL INDEX p(position)) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>; +-CREATE TABLE streams (fid INT <CUSTOM_COL_OPTIONS>, +-name CHAR(64) <CUSTOM_COL_OPTIONS>, +-centerline LINESTRING NOT NULL, SPATIAL INDEX c(centerline)) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>; +-CREATE TABLE buildings (fid INT <CUSTOM_COL_OPTIONS>, +-name CHAR(64) <CUSTOM_COL_OPTIONS>, +-position POINT NOT NULL, +-footprint POLYGON NOT NULL, SPATIAL INDEX p(position), SPATIAL INDEX f(footprint)) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>; +-CREATE TABLE ponds (fid INT <CUSTOM_COL_OPTIONS>, +-name CHAR(64) <CUSTOM_COL_OPTIONS>, +-type CHAR(64) <CUSTOM_COL_OPTIONS>, +-shores MULTIPOLYGON NOT NULL, SPATIAL INDEX s(shores)) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>; +-CREATE TABLE named_places (fid INT <CUSTOM_COL_OPTIONS>, +-name CHAR(64) <CUSTOM_COL_OPTIONS>, +-boundary POLYGON NOT NULL, SPATIAL INDEX b(boundary)) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>; +-CREATE TABLE map_neatlines (fid INT <CUSTOM_COL_OPTIONS>, +-neatline POLYGON NOT NULL, SPATIAL INDEX n(neatline)) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS>; +-USE test; +-SHOW FIELDS FROM gis_point; +-Field Type Null Key Default Extra +-fid int(11) YES NULL +-g point NO MUL NULL +-SHOW FIELDS FROM gis_line; +-Field Type Null Key Default Extra +-fid int(11) YES NULL +-g linestring NO MUL NULL +-SHOW FIELDS FROM gis_polygon; +-Field Type Null Key Default Extra +-fid int(11) YES NULL +-g polygon NO MUL NULL +-SHOW FIELDS FROM gis_multi_point; +-Field Type Null Key Default Extra +-fid int(11) YES NULL +-g multipoint NO MUL NULL +-SHOW FIELDS FROM gis_multi_line; +-Field Type Null Key Default Extra +-fid int(11) YES NULL +-g multilinestring NO MUL NULL +-SHOW FIELDS FROM gis_multi_polygon; +-Field Type Null Key Default Extra +-fid int(11) YES NULL +-g multipolygon NO MUL NULL +-SHOW FIELDS FROM gis_geometrycollection; +-Field Type Null Key Default Extra +-fid int(11) YES NULL +-g geometrycollection NO MUL NULL +-SHOW FIELDS FROM gis_geometry; +-Field Type Null Key Default Extra +-fid int(11) YES NULL +-g geometry NO NULL +-INSERT INTO gis_point (fid,g) VALUES +-(101, PointFromText('POINT(10 10)')), +-(102, PointFromText('POINT(20 10)')), +-(103, PointFromText('POINT(20 20)')), +-(104, PointFromWKB(AsWKB(PointFromText('POINT(10 20)')))); +-INSERT INTO gis_line (fid,g) VALUES +-(105, LineFromText('LINESTRING(0 0,0 10,10 0)')), +-(106, LineStringFromText('LINESTRING(10 10,20 10,20 20,10 20,10 10)')), +-(107, LineStringFromWKB(AsWKB(LineString(Point(10, 10), Point(40, 10))))); +-INSERT INTO gis_polygon (fid,g) VALUES +-(108, PolygonFromText('POLYGON((10 10,20 10,20 20,10 20,10 10))')), +-(109, PolyFromText('POLYGON((0 0,50 0,50 50,0 50,0 0), (10 10,20 10,20 20,10 20,10 10))')), +-(110, PolyFromWKB(AsWKB(Polygon(LineString(Point(0, 0), Point(30, 0), Point(30, 30), Point(0, 0)))))); +-INSERT INTO gis_multi_point (fid,g) VALUES +-(111, MultiPointFromText('MULTIPOINT(0 0,10 10,10 20,20 20)')), +-(112, MPointFromText('MULTIPOINT(1 1,11 11,11 21,21 21)')), +-(113, MPointFromWKB(AsWKB(MultiPoint(Point(3, 6), Point(4, 10))))); +-INSERT INTO gis_multi_line (fid,g) VALUES +-(114, MultiLineStringFromText('MULTILINESTRING((10 48,10 21,10 0),(16 0,16 23,16 48))')), +-(115, MLineFromText('MULTILINESTRING((10 48,10 21,10 0))')), +-(116, MLineFromWKB(AsWKB(MultiLineString(LineString(Point(1, 2), Point(3, 5)), LineString(Point(2, 5), Point(5, 8), Point(21, 7)))))); +-INSERT INTO gis_multi_polygon (fid,g) VALUES +-(117, MultiPolygonFromText('MULTIPOLYGON(((28 26,28 0,84 0,84 42,28 26),(52 18,66 23,73 9,48 6,52 18)),((59 18,67 18,67 13,59 13,59 18)))')), +-(118, MPolyFromText('MULTIPOLYGON(((28 26,28 0,84 0,84 42,28 26),(52 18,66 23,73 9,48 6,52 18)),((59 18,67 18,67 13,59 13,59 18)))')), +-(119, MPolyFromWKB(AsWKB(MultiPolygon(Polygon(LineString(Point(0, 3), Point(3, 3), Point(3, 0), Point(0, 3))))))); +-INSERT INTO gis_geometrycollection (fid,g) VALUES +-(120, GeomCollFromText('GEOMETRYCOLLECTION(POINT(0 0), LINESTRING(0 0,10 10))')), +-(121, GeometryFromWKB(AsWKB(GeometryCollection(Point(44, 6), LineString(Point(3, 6), Point(7, 9)))))), +-(122, GeomFromText('GeometryCollection()')), +-(123, GeomFromText('GeometryCollection EMPTY')); +-INSERT into gis_geometry (fid,g) SELECT fid,g FROM gis_point; +-INSERT into gis_geometry (fid,g) SELECT fid,g FROM gis_line; +-INSERT into gis_geometry (fid,g) SELECT fid,g FROM gis_polygon; +-INSERT into gis_geometry (fid,g) SELECT fid,g FROM gis_multi_point; +-INSERT into gis_geometry (fid,g) SELECT fid,g FROM gis_multi_line; +-INSERT into gis_geometry (fid,g) SELECT fid,g FROM gis_multi_polygon; +-INSERT into gis_geometry (fid,g) SELECT fid,g FROM gis_geometrycollection; +-SELECT fid, AsText(g) FROM gis_point; +-fid AsText(g) +-101 POINT(10 10) +-102 POINT(20 10) +-103 POINT(20 20) +-104 POINT(10 20) +-SELECT fid, AsText(g) FROM gis_line; +-fid AsText(g) +-105 LINESTRING(0 0,0 10,10 0) +-106 LINESTRING(10 10,20 10,20 20,10 20,10 10) +-107 LINESTRING(10 10,40 10) +-SELECT fid, AsText(g) FROM gis_polygon; +-fid AsText(g) +-108 POLYGON((10 10,20 10,20 20,10 20,10 10)) +-109 POLYGON((0 0,50 0,50 50,0 50,0 0),(10 10,20 10,20 20,10 20,10 10)) +-110 POLYGON((0 0,30 0,30 30,0 0)) +-SELECT fid, AsText(g) FROM gis_multi_point; +-fid AsText(g) +-111 MULTIPOINT(0 0,10 10,10 20,20 20) +-112 MULTIPOINT(1 1,11 11,11 21,21 21) +-113 MULTIPOINT(3 6,4 10) +-SELECT fid, AsText(g) FROM gis_multi_line; +-fid AsText(g) +-114 MULTILINESTRING((10 48,10 21,10 0),(16 0,16 23,16 48)) +-115 MULTILINESTRING((10 48,10 21,10 0)) +-116 MULTILINESTRING((1 2,3 5),(2 5,5 8,21 7)) +-SELECT fid, AsText(g) FROM gis_multi_polygon; +-fid AsText(g) +-117 MULTIPOLYGON(((28 26,28 0,84 0,84 42,28 26),(52 18,66 23,73 9,48 6,52 18)),((59 18,67 18,67 13,59 13,59 18))) +-118 MULTIPOLYGON(((28 26,28 0,84 0,84 42,28 26),(52 18,66 23,73 9,48 6,52 18)),((59 18,67 18,67 13,59 13,59 18))) +-119 MULTIPOLYGON(((0 3,3 3,3 0,0 3))) +-SELECT fid, AsText(g) FROM gis_geometrycollection; +-fid AsText(g) +-120 GEOMETRYCOLLECTION(POINT(0 0),LINESTRING(0 0,10 10)) +-121 GEOMETRYCOLLECTION(POINT(44 6),LINESTRING(3 6,7 9)) +-122 GEOMETRYCOLLECTION EMPTY +-123 GEOMETRYCOLLECTION EMPTY +-SELECT fid, AsText(g) FROM gis_geometry; +-fid AsText(g) +-101 POINT(10 10) +-102 POINT(20 10) +-103 POINT(20 20) +-104 POINT(10 20) +-105 LINESTRING(0 0,0 10,10 0) +-106 LINESTRING(10 10,20 10,20 20,10 20,10 10) +-107 LINESTRING(10 10,40 10) +-108 POLYGON((10 10,20 10,20 20,10 20,10 10)) +-109 POLYGON((0 0,50 0,50 50,0 50,0 0),(10 10,20 10,20 20,10 20,10 10)) +-110 POLYGON((0 0,30 0,30 30,0 0)) +-111 MULTIPOINT(0 0,10 10,10 20,20 20) +-112 MULTIPOINT(1 1,11 11,11 21,21 21) +-113 MULTIPOINT(3 6,4 10) +-114 MULTILINESTRING((10 48,10 21,10 0),(16 0,16 23,16 48)) +-115 MULTILINESTRING((10 48,10 21,10 0)) +-116 MULTILINESTRING((1 2,3 5),(2 5,5 8,21 7)) +-117 MULTIPOLYGON(((28 26,28 0,84 0,84 42,28 26),(52 18,66 23,73 9,48 6,52 18)),((59 18,67 18,67 13,59 13,59 18))) +-118 MULTIPOLYGON(((28 26,28 0,84 0,84 42,28 26),(52 18,66 23,73 9,48 6,52 18)),((59 18,67 18,67 13,59 13,59 18))) +-119 MULTIPOLYGON(((0 3,3 3,3 0,0 3))) +-120 GEOMETRYCOLLECTION(POINT(0 0),LINESTRING(0 0,10 10)) +-121 GEOMETRYCOLLECTION(POINT(44 6),LINESTRING(3 6,7 9)) +-122 GEOMETRYCOLLECTION EMPTY +-123 GEOMETRYCOLLECTION EMPTY +-SELECT fid, Dimension(g) FROM gis_geometry; +-fid Dimension(g) +-101 0 +-102 0 +-103 0 +-104 0 +-105 1 +-106 1 +-107 1 +-108 2 +-109 2 +-110 2 +-111 0 +-112 0 +-113 0 +-114 1 +-115 1 +-116 1 +-117 2 +-118 2 +-119 2 +-120 1 +-121 1 +-122 0 +-123 0 +-SELECT fid, GeometryType(g) FROM gis_geometry; +-fid GeometryType(g) +-101 POINT +-102 POINT +-103 POINT +-104 POINT +-105 LINESTRING +-106 LINESTRING +-107 LINESTRING +-108 POLYGON +-109 POLYGON +-110 POLYGON +-111 MULTIPOINT +-112 MULTIPOINT +-113 MULTIPOINT +-114 MULTILINESTRING +-115 MULTILINESTRING +-116 MULTILINESTRING +-117 MULTIPOLYGON +-118 MULTIPOLYGON +-119 MULTIPOLYGON +-120 GEOMETRYCOLLECTION +-121 GEOMETRYCOLLECTION +-122 GEOMETRYCOLLECTION +-123 GEOMETRYCOLLECTION +-SELECT fid, IsEmpty(g) FROM gis_geometry; +-fid IsEmpty(g) +-101 0 +-102 0 +-103 0 +-104 0 +-105 0 +-106 0 +-107 0 +-108 0 +-109 0 +-110 0 +-111 0 +-112 0 +-113 0 +-114 0 +-115 0 +-116 0 +-117 0 +-118 0 +-119 0 +-120 0 +-121 0 +-122 0 +-123 0 +-SELECT fid, AsText(Envelope(g)) FROM gis_geometry; +-fid AsText(Envelope(g)) +-101 POLYGON((10 10,10 10,10 10,10 10,10 10)) +-102 POLYGON((20 10,20 10,20 10,20 10,20 10)) +-103 POLYGON((20 20,20 20,20 20,20 20,20 20)) +-104 POLYGON((10 20,10 20,10 20,10 20,10 20)) +-105 POLYGON((0 0,10 0,10 10,0 10,0 0)) +-106 POLYGON((10 10,20 10,20 20,10 20,10 10)) +-107 POLYGON((10 10,40 10,40 10,10 10,10 10)) +-108 POLYGON((10 10,20 10,20 20,10 20,10 10)) +-109 POLYGON((0 0,50 0,50 50,0 50,0 0)) +-110 POLYGON((0 0,30 0,30 30,0 30,0 0)) +-111 POLYGON((0 0,20 0,20 20,0 20,0 0)) +-112 POLYGON((1 1,21 1,21 21,1 21,1 1)) +-113 POLYGON((3 6,4 6,4 10,3 10,3 6)) +-114 POLYGON((10 0,16 0,16 48,10 48,10 0)) +-115 POLYGON((10 0,10 0,10 48,10 48,10 0)) +-116 POLYGON((1 2,21 2,21 8,1 8,1 2)) +-117 POLYGON((28 0,84 0,84 42,28 42,28 0)) +-118 POLYGON((28 0,84 0,84 42,28 42,28 0)) +-119 POLYGON((0 0,3 0,3 3,0 3,0 0)) +-120 POLYGON((0 0,10 0,10 10,0 10,0 0)) +-121 POLYGON((3 6,44 6,44 9,3 9,3 6)) +-122 GEOMETRYCOLLECTION EMPTY +-123 GEOMETRYCOLLECTION EMPTY +-SELECT fid, X(g) FROM gis_point; +-fid X(g) +-101 10 +-102 20 +-103 20 +-104 10 +-SELECT fid, Y(g) FROM gis_point; +-fid Y(g) +-101 10 +-102 10 +-103 20 +-104 20 +-SELECT fid, AsText(StartPoint(g)) FROM gis_line; +-fid AsText(StartPoint(g)) +-105 POINT(0 0) +-106 POINT(10 10) +-107 POINT(10 10) +-SELECT fid, AsText(EndPoint(g)) FROM gis_line; +-fid AsText(EndPoint(g)) +-105 POINT(10 0) +-106 POINT(10 10) +-107 POINT(40 10) +-SELECT fid, GLength(g) FROM gis_line; +-fid GLength(g) +-105 24.14213562373095 +-106 40 +-107 30 +-SELECT fid, NumPoints(g) FROM gis_line; +-fid NumPoints(g) +-105 3 +-106 5 +-107 2 +-SELECT fid, AsText(PointN(g, 2)) FROM gis_line; +-fid AsText(PointN(g, 2)) +-105 POINT(0 10) +-106 POINT(20 10) +-107 POINT(40 10) +-SELECT fid, IsClosed(g) FROM gis_line; +-fid IsClosed(g) +-105 0 +-106 1 +-107 0 +-SELECT fid, AsText(Centroid(g)) FROM gis_polygon; +-fid AsText(Centroid(g)) +-108 POINT(15 15) +-109 POINT(25.416666666666668 25.416666666666668) +-110 POINT(20 10) +-SELECT fid, Area(g) FROM gis_polygon; +-fid Area(g) +-108 100 +-109 2400 +-110 450 +-SELECT fid, AsText(ExteriorRing(g)) FROM gis_polygon; +-fid AsText(ExteriorRing(g)) +-108 LINESTRING(10 10,20 10,20 20,10 20,10 10) +-109 LINESTRING(0 0,50 0,50 50,0 50,0 0) +-110 LINESTRING(0 0,30 0,30 30,0 0) +-SELECT fid, NumInteriorRings(g) FROM gis_polygon; +-fid NumInteriorRings(g) +-108 0 +-109 1 +-110 0 +-SELECT fid, AsText(InteriorRingN(g, 1)) FROM gis_polygon; +-fid AsText(InteriorRingN(g, 1)) +-108 NULL +-109 LINESTRING(10 10,20 10,20 20,10 20,10 10) +-110 NULL +-SELECT fid, IsClosed(g) FROM gis_multi_line; +-fid IsClosed(g) +-114 0 +-115 0 +-116 0 +-SELECT fid, AsText(Centroid(g)) FROM gis_multi_polygon; +-fid AsText(Centroid(g)) +-117 POINT(55.58852775304245 17.426536064113982) +-118 POINT(55.58852775304245 17.426536064113982) +-119 POINT(2 2) +-SELECT fid, Area(g) FROM gis_multi_polygon; +-fid Area(g) +-117 1684.5 +-118 1684.5 +-119 4.5 +-SELECT fid, NumGeometries(g) from gis_multi_point; +-fid NumGeometries(g) +-111 4 +-112 4 +-113 2 +-SELECT fid, NumGeometries(g) from gis_multi_line; +-fid NumGeometries(g) +-114 2 +-115 1 +-116 2 +-SELECT fid, NumGeometries(g) from gis_multi_polygon; +-fid NumGeometries(g) +-117 2 +-118 2 +-119 1 +-SELECT fid, NumGeometries(g) from gis_geometrycollection; +-fid NumGeometries(g) +-120 2 +-121 2 +-122 0 +-123 0 +-SELECT fid, AsText(GeometryN(g, 2)) from gis_multi_point; +-fid AsText(GeometryN(g, 2)) +-111 POINT(10 10) +-112 POINT(11 11) +-113 POINT(4 10) +-SELECT fid, AsText(GeometryN(g, 2)) from gis_multi_line; +-fid AsText(GeometryN(g, 2)) +-114 LINESTRING(16 0,16 23,16 48) +-115 NULL +-116 LINESTRING(2 5,5 8,21 7) +-SELECT fid, AsText(GeometryN(g, 2)) from gis_multi_polygon; +-fid AsText(GeometryN(g, 2)) +-117 POLYGON((59 18,67 18,67 13,59 13,59 18)) +-118 POLYGON((59 18,67 18,67 13,59 13,59 18)) +-119 NULL +-SELECT fid, AsText(GeometryN(g, 2)) from gis_geometrycollection; +-fid AsText(GeometryN(g, 2)) +-120 LINESTRING(0 0,10 10) +-121 LINESTRING(3 6,7 9) +-122 NULL +-123 NULL +-SELECT fid, AsText(GeometryN(g, 1)) from gis_geometrycollection; +-fid AsText(GeometryN(g, 1)) +-120 POINT(0 0) +-121 POINT(44 6) +-122 NULL +-123 NULL +-SELECT g1.fid as first, g2.fid as second, +-Within(g1.g, g2.g) as w, Contains(g1.g, g2.g) as c, Overlaps(g1.g, g2.g) as o, +-Equals(g1.g, g2.g) as e, Disjoint(g1.g, g2.g) as d, Touches(g1.g, g2.g) as t, +-Intersects(g1.g, g2.g) as i, Crosses(g1.g, g2.g) as r +-FROM gis_geometrycollection g1, gis_geometrycollection g2 ORDER BY first, second; +-first second w c o e d t i r +-120 120 1 1 0 1 0 1 1 0 +-120 121 0 0 1 0 0 0 1 0 +-120 122 NULL NULL NULL NULL NULL NULL NULL NULL +-120 123 NULL NULL NULL NULL NULL NULL NULL NULL +-121 120 0 0 1 0 0 0 1 0 +-121 121 1 1 0 1 0 1 1 0 +-121 122 NULL NULL NULL NULL NULL NULL NULL NULL +-121 123 NULL NULL NULL NULL NULL NULL NULL NULL +-122 120 NULL NULL NULL NULL NULL NULL NULL NULL +-122 121 NULL NULL NULL NULL NULL NULL NULL NULL +-122 122 NULL NULL NULL NULL NULL NULL NULL NULL +-122 123 NULL NULL NULL NULL NULL NULL NULL NULL +-123 120 NULL NULL NULL NULL NULL NULL NULL NULL +-123 121 NULL NULL NULL NULL NULL NULL NULL NULL +-123 122 NULL NULL NULL NULL NULL NULL NULL NULL +-123 123 NULL NULL NULL NULL NULL NULL NULL NULL +-DROP TABLE gis_point, gis_line, gis_polygon, gis_multi_point, gis_multi_line, gis_multi_polygon, gis_geometrycollection, gis_geometry; +-USE gis_ogs; +-# Lakes +-INSERT INTO lakes (fid,name,shore) VALUES ( +-101, 'BLUE LAKE', +-PolyFromText( +-'POLYGON( +- (52 18,66 23,73 9,48 6,52 18), +- (59 18,67 18,67 13,59 13,59 18) +- )', +-101)); +-# Road Segments +-INSERT INTO road_segments (fid,name,aliases,num_lanes,centerline) VALUES(102, 'Route 5', NULL, 2, +-LineFromText( +-'LINESTRING( 0 18, 10 21, 16 23, 28 26, 44 31 )' ,101)); +-INSERT INTO road_segments (fid,name,aliases,num_lanes,centerline) VALUES(103, 'Route 5', 'Main Street', 4, +-LineFromText( +-'LINESTRING( 44 31, 56 34, 70 38 )' ,101)); +-INSERT INTO road_segments (fid,name,aliases,num_lanes,centerline) VALUES(104, 'Route 5', NULL, 2, +-LineFromText( +-'LINESTRING( 70 38, 72 48 )' ,101)); +-INSERT INTO road_segments (fid,name,aliases,num_lanes,centerline) VALUES(105, 'Main Street', NULL, 4, +-LineFromText( +-'LINESTRING( 70 38, 84 42 )' ,101)); +-INSERT INTO road_segments (fid,name,aliases,num_lanes,centerline) VALUES(106, 'Dirt Road by Green Forest', NULL, +-1, +-LineFromText( +-'LINESTRING( 28 26, 28 0 )',101)); +-# DividedRoutes +-INSERT INTO divided_routes (fid,name,num_lanes,centerlines) VALUES(119, 'Route 75', 4, +-MLineFromText( +-'MULTILINESTRING((10 48,10 21,10 0), +- (16 0,16 23,16 48))', 101)); +-# Forests +-INSERT INTO forests (fid,name,boundary) VALUES(109, 'Green Forest', +-MPolyFromText( +-'MULTIPOLYGON(((28 26,28 0,84 0,84 42,28 26), +- (52 18,66 23,73 9,48 6,52 18)),((59 18,67 18,67 13,59 13,59 18)))', +-101)); +-# Bridges +-INSERT INTO bridges (fid,name,position) VALUES(110, 'Cam Bridge', PointFromText( +-'POINT( 44 31 )', 101)); +-# Streams +-INSERT INTO streams (fid,name,centerline) VALUES(111, 'Cam Stream', +-LineFromText( +-'LINESTRING( 38 48, 44 41, 41 36, 44 31, 52 18 )', 101)); +-INSERT INTO streams (fid,name,centerline) VALUES(112, NULL, +-LineFromText( +-'LINESTRING( 76 0, 78 4, 73 9 )', 101)); +-# Buildings +-INSERT INTO buildings (fid,name,position,footprint) VALUES(113, '123 Main Street', +-PointFromText( +-'POINT( 52 30 )', 101), +-PolyFromText( +-'POLYGON( ( 50 31, 54 31, 54 29, 50 29, 50 31) )', 101)); +-INSERT INTO buildings (fid,name,position,footprint) VALUES(114, '215 Main Street', +-PointFromText( +-'POINT( 64 33 )', 101), +-PolyFromText( +-'POLYGON( ( 66 34, 62 34, 62 32, 66 32, 66 34) )', 101)); +-# Ponds +-INSERT INTO ponds (fid,name,type,shores) VALUES(120, NULL, 'Stock Pond', +-MPolyFromText( +-'MULTIPOLYGON( ( ( 24 44, 22 42, 24 40, 24 44) ), +- ( ( 26 44, 26 40, 28 42, 26 44) ) )', 101)); +-# Named Places +-INSERT INTO named_places (fid,name,boundary) VALUES(117, 'Ashton', +-PolyFromText( +-'POLYGON( ( 62 48, 84 48, 84 30, 56 30, 56 34, 62 48) )', 101)); +-INSERT INTO named_places (fid,name,boundary) VALUES(118, 'Goose Island', +-PolyFromText( +-'POLYGON( ( 67 13, 67 18, 59 18, 59 13, 67 13) )', 101)); +-# Map Neatlines +-INSERT INTO map_neatlines (fid,neatline) VALUES(115, +-PolyFromText( +-'POLYGON( ( 0 0, 0 48, 84 48, 84 0, 0 0 ) )', 101)); +-SELECT Dimension(shore) +-FROM lakes +-WHERE name = 'Blue Lake'; +-Dimension(shore) +-2 +-SELECT GeometryType(centerlines) +-FROM divided_routes +-WHERE name = 'Route 75'; +-GeometryType(centerlines) +-MULTILINESTRING +-SELECT AsText(boundary) +-FROM named_places +-WHERE name = 'Goose Island'; +-AsText(boundary) +-POLYGON((67 13,67 18,59 18,59 13,67 13)) +-SELECT AsText(PolyFromWKB(AsBinary(boundary),101)) +-FROM named_places +-WHERE name = 'Goose Island'; +-AsText(PolyFromWKB(AsBinary(boundary),101)) +-POLYGON((67 13,67 18,59 18,59 13,67 13)) +-SELECT SRID(boundary) +-FROM named_places +-WHERE name = 'Goose Island'; +-SRID(boundary) +-101 +-SELECT IsEmpty(centerline) +-FROM road_segments +-WHERE name = 'Route 5' +-AND aliases = 'Main Street'; +-IsEmpty(centerline) +-0 +-SELECT AsText(Envelope(boundary)) +-FROM named_places +-WHERE name = 'Goose Island'; +-AsText(Envelope(boundary)) +-POLYGON((59 13,67 13,67 18,59 18,59 13)) +-SELECT X(position) +-FROM bridges +-WHERE name = 'Cam Bridge'; +-X(position) +-44 +-SELECT Y(position) +-FROM bridges +-WHERE name = 'Cam Bridge'; +-Y(position) +-31 +-SELECT AsText(StartPoint(centerline)) +-FROM road_segments +-WHERE fid = 102; +-AsText(StartPoint(centerline)) +-POINT(0 18) +-SELECT AsText(EndPoint(centerline)) +-FROM road_segments +-WHERE fid = 102; +-AsText(EndPoint(centerline)) +-POINT(44 31) +-SELECT GLength(centerline) +-FROM road_segments +-WHERE fid = 106; +-GLength(centerline) +-26 +-SELECT NumPoints(centerline) +-FROM road_segments +-WHERE fid = 102; +-NumPoints(centerline) +-5 +-SELECT AsText(PointN(centerline, 1)) +-FROM road_segments +-WHERE fid = 102; +-AsText(PointN(centerline, 1)) +-POINT(0 18) +-SELECT AsText(Centroid(boundary)) +-FROM named_places +-WHERE name = 'Goose Island'; +-AsText(Centroid(boundary)) +-POINT(63 15.5) +-SELECT Area(boundary) +-FROM named_places +-WHERE name = 'Goose Island'; +-Area(boundary) +-40 +-SELECT AsText(ExteriorRing(shore)) +-FROM lakes +-WHERE name = 'Blue Lake'; +-AsText(ExteriorRing(shore)) +-LINESTRING(52 18,66 23,73 9,48 6,52 18) +-SELECT NumInteriorRings(shore) +-FROM lakes +-WHERE name = 'Blue Lake'; +-NumInteriorRings(shore) +-1 +-SELECT AsText(InteriorRingN(shore, 1)) +-FROM lakes +-WHERE name = 'Blue Lake'; +-AsText(InteriorRingN(shore, 1)) +-LINESTRING(59 18,67 18,67 13,59 13,59 18) +-SELECT NumGeometries(centerlines) +-FROM divided_routes +-WHERE name = 'Route 75'; +-NumGeometries(centerlines) +-2 +-SELECT AsText(GeometryN(centerlines, 2)) +-FROM divided_routes +-WHERE name = 'Route 75'; +-AsText(GeometryN(centerlines, 2)) +-LINESTRING(16 0,16 23,16 48) +-SELECT IsClosed(centerlines) +-FROM divided_routes +-WHERE name = 'Route 75'; +-IsClosed(centerlines) +-0 +-SELECT GLength(centerlines) +-FROM divided_routes +-WHERE name = 'Route 75'; +-GLength(centerlines) +-96 +-SELECT AsText(Centroid(shores)) +-FROM ponds +-WHERE fid = 120; +-AsText(Centroid(shores)) +-POINT(25 42) +-SELECT Area(shores) +-FROM ponds +-WHERE fid = 120; +-Area(shores) +-8 +-SELECT ST_Equals(boundary, +-PolyFromText('POLYGON( ( 67 13, 67 18, 59 18, 59 13, 67 13) )',1)) +-FROM named_places +-WHERE name = 'Goose Island'; +-ST_Equals(boundary, +-PolyFromText('POLYGON( ( 67 13, 67 18, 59 18, 59 13, 67 13) )',1)) +-1 +-SELECT ST_Disjoint(centerlines, boundary) +-FROM divided_routes, named_places +-WHERE divided_routes.name = 'Route 75' +-AND named_places.name = 'Ashton'; +-ST_Disjoint(centerlines, boundary) +-1 +-SELECT ST_Touches(centerline, shore) +-FROM streams, lakes +-WHERE streams.name = 'Cam Stream' +-AND lakes.name = 'Blue Lake'; +-ST_Touches(centerline, shore) +-1 +-SELECT Crosses(road_segments.centerline, divided_routes.centerlines) +-FROM road_segments, divided_routes +-WHERE road_segments.fid = 102 +-AND divided_routes.name = 'Route 75'; +-Crosses(road_segments.centerline, divided_routes.centerlines) +-1 +-SELECT ST_Intersects(road_segments.centerline, divided_routes.centerlines) +-FROM road_segments, divided_routes +-WHERE road_segments.fid = 102 +-AND divided_routes.name = 'Route 75'; +-ST_Intersects(road_segments.centerline, divided_routes.centerlines) +-1 +-SELECT ST_Contains(forests.boundary, named_places.boundary) +-FROM forests, named_places +-WHERE forests.name = 'Green Forest' +-AND named_places.name = 'Ashton'; +-ST_Contains(forests.boundary, named_places.boundary) +-0 +-SELECT ST_Distance(position, boundary) +-FROM bridges, named_places +-WHERE bridges.name = 'Cam Bridge' +-AND named_places.name = 'Ashton'; +-ST_Distance(position, boundary) +-12 +-SELECT AsText(ST_Difference(named_places.boundary, forests.boundary)) +-FROM named_places, forests +-WHERE named_places.name = 'Ashton' +-AND forests.name = 'Green Forest'; +-AsText(ST_Difference(named_places.boundary, forests.boundary)) +-POLYGON((56 34,62 48,84 48,84 42,56 34)) +-SELECT AsText(ST_Union(shore, boundary)) +-FROM lakes, named_places +-WHERE lakes.name = 'Blue Lake' +-AND named_places.name = 'Goose Island'; +-AsText(ST_Union(shore, boundary)) +-POLYGON((48 6,52 18,66 23,73 9,48 6)) +-SELECT AsText(ST_SymDifference(shore, boundary)) +-FROM lakes, named_places +-WHERE lakes.name = 'Blue Lake' +-AND named_places.name = 'Ashton'; +-AsText(ST_SymDifference(shore, boundary)) +-MULTIPOLYGON(((48 6,52 18,66 23,73 9,48 6),(59 13,59 18,67 18,67 13,59 13)),((56 30,56 34,62 48,84 48,84 30,56 30))) +-SELECT count(*) +-FROM buildings, bridges +-WHERE ST_Contains(ST_Buffer(bridges.position, 15.0), buildings.footprint) = 1; +-count(*) +-1 ++ERROR HY000: The storage engine <STORAGE_ENGINE> doesn't support SPATIAL indexes ++# ERROR: Statement ended with errno 1464, errname ER_TABLE_CANT_HANDLE_SPKEYS (expected to succeed) ++# ------------ UNEXPECTED RESULT ------------ ++# [ CREATE TABLE gis_point (fid INT(11) /*!*/ /*Custom column options*/, g POINT NOT NULL, SPATIAL INDEX(g)) ENGINE=InnoDB /*!*/ /*Custom table options*/ ] ++# The statement|command finished with ER_TABLE_CANT_HANDLE_SPKEYS. ++# Geometry types or spatial indexes or the mix could be unsupported|malfunctioning, or the problem was caused by previous errors. ++# You can change the engine code, or create an rdiff, or disable the test by adding it to disabled.def. ++# Further in this test, the message might sometimes be suppressed; a part of the test might be skipped. ++# Also, this problem may cause a chain effect (more errors of different kinds in the test). ++# ------------------------------------------- + DROP DATABASE gis_ogs; + USE test; diff --git a/storage/xtradb/mysql-test/storage_engine/type_text.opt b/storage/xtradb/mysql-test/storage_engine/type_text.opt new file mode 100644 index 00000000000..40445305fc6 --- /dev/null +++ b/storage/xtradb/mysql-test/storage_engine/type_text.opt @@ -0,0 +1 @@ +--innodb_log_file_size=100M diff --git a/storage/xtradb/os/os0file.cc b/storage/xtradb/os/os0file.cc index a7a5d5f32c0..92e66ed7901 100644 --- a/storage/xtradb/os/os0file.cc +++ b/storage/xtradb/os/os0file.cc @@ -2195,7 +2195,7 @@ os_file_delete_if_exists_func( bool ret; ulint count = 0; loop: - /* In Windows, deleting an .ibd file may fail if ibbackup is copying + /* In Windows, deleting an .ibd file may fail if mysqlbackup is copying it */ ret = DeleteFile((LPCTSTR) name); @@ -2220,7 +2220,7 @@ loop: ib_logf(IB_LOG_LEVEL_WARN, "Delete of file %s failed.", name); } - os_thread_sleep(1000000); /* sleep for a second */ + os_thread_sleep(500000); /* sleep for 0.5 second */ if (count > 2000) { @@ -2258,7 +2258,7 @@ os_file_delete_func( BOOL ret; ulint count = 0; loop: - /* In Windows, deleting an .ibd file may fail if ibbackup is copying + /* In Windows, deleting an .ibd file may fail if mysqlbackup is copying it */ ret = DeleteFile((LPCTSTR) name); @@ -2281,7 +2281,7 @@ loop: fprintf(stderr, "InnoDB: Warning: cannot delete file %s\n" - "InnoDB: Are you running ibbackup" + "InnoDB: Are you running mysqlbackup" " to back up the file?\n", name); } @@ -3135,7 +3135,7 @@ try_again: } ib_logf(IB_LOG_LEVEL_ERROR, - "Tried to read "ULINTPF" bytes at offset " UINT64PF". " + "Tried to read " ULINTPF " bytes at offset " UINT64PF ". " "Was only able to read %ld.", n, offset, (lint) ret); #endif /* __WIN__ */ retry = os_file_handle_error(NULL, "read", __FILE__, __LINE__); @@ -3308,7 +3308,8 @@ os_file_write_func( DWORD len; ulint n_retries = 0; ulint err; - OVERLAPPED overlapped; + OVERLAPPED overlapped; + DWORD saved_error = 0; /* On 64-bit Windows, ulint is 64 bits. But offset and n should be no more than 32 bits. */ @@ -3336,7 +3337,7 @@ retry: if (ret) { ret = GetOverlappedResult(file, &overlapped, (DWORD *)&len, FALSE); } - else if(GetLastError() == ERROR_IO_PENDING) { + else if ( GetLastError() == ERROR_IO_PENDING) { ret = GetOverlappedResult(file, &overlapped, (DWORD *)&len, TRUE); } @@ -3364,8 +3365,10 @@ retry: } if (!os_has_said_disk_full) { + char *winmsg = NULL; - err = (ulint) GetLastError(); + saved_error = GetLastError(); + err = (ulint) saved_error; ut_print_timestamp(stderr); @@ -3382,6 +3385,23 @@ retry: name, offset, (ulong) n, (ulong) len, (ulong) err); + /* Ask Windows to prepare a standard message for a + GetLastError() */ + + FormatMessage(FORMAT_MESSAGE_ALLOCATE_BUFFER | + FORMAT_MESSAGE_FROM_SYSTEM | + FORMAT_MESSAGE_IGNORE_INSERTS, + NULL, saved_error, + MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT), + (LPSTR)&winmsg, 0, NULL); + + if (winmsg) { + fprintf(stderr, + "InnoDB: FormatMessage: Error number %lu means '%s'.\n", + (ulong) saved_error, winmsg); + LocalFree(winmsg); + } + if (strerror((int) err) != NULL) { fprintf(stderr, "InnoDB: Error number %lu means '%s'.\n", @@ -3415,7 +3435,7 @@ retry: fprintf(stderr, " InnoDB: Error: Write to file %s failed" - " at offset "UINT64PF".\n" + " at offset " UINT64PF ".\n" "InnoDB: %lu bytes should have been written," " only %ld were written.\n" "InnoDB: Operating system error number %lu.\n" @@ -5065,8 +5085,10 @@ os_aio_func( wake_later = mode & OS_AIO_SIMULATED_WAKE_LATER; mode = mode & (~OS_AIO_SIMULATED_WAKE_LATER); - if (mode == OS_AIO_SYNC) - { + DBUG_EXECUTE_IF("ib_os_aio_func_io_failure_28", + mode = OS_AIO_SYNC;); + + if (mode == OS_AIO_SYNC) { ibool ret; /* This is actually an ordinary synchronous read or write: no need to use an i/o-handler thread */ @@ -5081,7 +5103,18 @@ os_aio_func( ret = os_file_write(name, file, buf, offset, n); } - ut_a(ret); + + DBUG_EXECUTE_IF("ib_os_aio_func_io_failure_28", + os_has_said_disk_full = FALSE;); + DBUG_EXECUTE_IF("ib_os_aio_func_io_failure_28", + ret = 0;); + DBUG_EXECUTE_IF("ib_os_aio_func_io_failure_28", + errno = 28;); + + if (!ret) { + fprintf(stderr, "FAIL"); + } + return ret; } @@ -5978,7 +6011,13 @@ consecutive_loop: aio_slot->page_compression); } - ut_a(ret); + DBUG_EXECUTE_IF("ib_os_aio_func_io_failure_28_2", + os_has_said_disk_full = FALSE;); + DBUG_EXECUTE_IF("ib_os_aio_func_io_failure_28_2", + ret = 0;); + DBUG_EXECUTE_IF("ib_os_aio_func_io_failure_28_2", + errno = 28;); + srv_set_io_thread_op_info(global_segment, "file i/o done"); if (aio_slot->type == OS_FILE_READ && n_consecutive > 1) { diff --git a/storage/xtradb/os/os0stacktrace.cc b/storage/xtradb/os/os0stacktrace.cc index 4d52e625057..c4c428e0db3 100644 --- a/storage/xtradb/os/os0stacktrace.cc +++ b/storage/xtradb/os/os0stacktrace.cc @@ -85,16 +85,16 @@ os_stacktrace_print( caller_address = (void*) uc->uc_mcontext.gregs[REG_RIP] ; #elif defined(__hppa__) ucontext_t* uc = (ucontext_t*) ucontext; - caller_address = (void*) uc->uc_mcontext.sc_iaoq[0] & ~0×3UL ; + caller_address = (void*) (uc->uc_mcontext.sc_iaoq[0] & ~0x3UL) ; #elif (defined (__ppc__)) || (defined (__powerpc__)) ucontext_t* uc = (ucontext_t*) ucontext; caller_address = (void*) uc->uc_mcontext.regs->nip ; #elif defined(__sparc__) struct sigcontext* sc = (struct sigcontext*) ucontext; #if __WORDSIZE == 64 - caller_address = (void*) scp->sigc_regs.tpc ; + caller_address = (void*) sc->sigc_regs.tpc ; #else - pnt = (void*) scp->si_regs.pc ; + caller_address = (void*) sc->si_regs.pc ; #endif #elif defined(__i386__) ucontext_t* uc = (ucontext_t*) ucontext; diff --git a/storage/xtradb/page/page0zip.cc b/storage/xtradb/page/page0zip.cc index 245eb6198a0..a12d30a8063 100644 --- a/storage/xtradb/page/page0zip.cc +++ b/storage/xtradb/page/page0zip.cc @@ -2,6 +2,7 @@ Copyright (c) 2005, 2014, Oracle and/or its affiliates. All Rights Reserved. Copyright (c) 2012, Facebook Inc. +Copyright (c) 2014, SkySQL Ab. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -1313,6 +1314,30 @@ page_zip_compress( MONITOR_INC(MONITOR_PAGE_COMPRESS); + /* Simulate a compression failure with a probability determined by + innodb_simulate_comp_failures, only if the page has 2 or more + records. */ + + if (srv_simulate_comp_failures + && !dict_index_is_ibuf(index) + && page_get_n_recs(page) >= 2 + && ((ulint)(rand() % 100) < srv_simulate_comp_failures) + && strcasecmp(index->table_name, "IBUF_DUMMY") != 0) { + +#ifdef UNIV_DEBUG + fprintf(stderr, + "InnoDB: Simulating a compression failure" + " for table %s, index %s, page %lu (%s)\n", + index->table_name, + index->name, + page_get_page_no(page), + page_is_leaf(page) ? "leaf" : "non-leaf"); + +#endif + + goto err_exit; + } + heap = mem_heap_create(page_zip_get_size(page_zip) + n_fields * (2 + sizeof(ulint)) + REC_OFFS_HEADER_SIZE @@ -3258,24 +3283,8 @@ page_zip_validate_low( temp_page_buf = static_cast<byte*>(ut_malloc(2 * UNIV_PAGE_SIZE)); temp_page = static_cast<byte*>(ut_align(temp_page_buf, UNIV_PAGE_SIZE)); -#ifdef UNIV_DEBUG_VALGRIND - /* Get detailed information on the valid bits in case the - UNIV_MEM_ASSERT_RW() checks fail. The v-bits of page[], - page_zip->data[] or page_zip could be viewed at temp_page[] or - temp_page_zip in a debugger when running valgrind --db-attach. */ - (void) VALGRIND_GET_VBITS(page, temp_page, UNIV_PAGE_SIZE); UNIV_MEM_ASSERT_RW(page, UNIV_PAGE_SIZE); -# if UNIV_WORD_SIZE == 4 - VALGRIND_GET_VBITS(page_zip, &temp_page_zip, sizeof temp_page_zip); - /* On 32-bit systems, there is no padding in page_zip_des_t. - On other systems, Valgrind could complain about uninitialized - pad bytes. */ - UNIV_MEM_ASSERT_RW(page_zip, sizeof *page_zip); -# endif - (void) VALGRIND_GET_VBITS(page_zip->data, temp_page, - page_zip_get_size(page_zip)); UNIV_MEM_ASSERT_RW(page_zip->data, page_zip_get_size(page_zip)); -#endif /* UNIV_DEBUG_VALGRIND */ temp_page_zip = *page_zip; valid = page_zip_decompress(&temp_page_zip, temp_page, TRUE); diff --git a/storage/xtradb/read/read0read.cc b/storage/xtradb/read/read0read.cc index 887e1717769..c350e24dbb0 100644 --- a/storage/xtradb/read/read0read.cc +++ b/storage/xtradb/read/read0read.cc @@ -221,7 +221,7 @@ views contiguously, one identical in size and content as @param view (starting at returned pointer) and another view immediately following the trx_ids array. The second view will have space for an extra trx_id_t element. @return read view struct */ -UNIV_INLINE +UNIV_INTERN read_view_t* read_view_clone( /*============*/ @@ -256,7 +256,7 @@ read_view_clone( /*********************************************************************//** Insert the view in the proper order into the trx_sys->view_list. The read view list is ordered by read_view_t::low_limit_no in descending order. */ -static +UNIV_INTERN void read_view_add( /*==========*/ diff --git a/storage/xtradb/row/row0ins.cc b/storage/xtradb/row/row0ins.cc index 444fac87842..c0396a96cfc 100644 --- a/storage/xtradb/row/row0ins.cc +++ b/storage/xtradb/row/row0ins.cc @@ -151,35 +151,37 @@ row_ins_alloc_sys_fields( ut_ad(row && table && heap); ut_ad(dtuple_get_n_fields(row) == dict_table_get_n_cols(table)); - /* 1. Allocate buffer for row id */ + /* allocate buffer to hold the needed system created hidden columns. */ + uint len = DATA_ROW_ID_LEN + DATA_TRX_ID_LEN + DATA_ROLL_PTR_LEN; + ptr = static_cast<byte*>(mem_heap_zalloc(heap, len)); + /* 1. Populate row-id */ col = dict_table_get_sys_col(table, DATA_ROW_ID); dfield = dtuple_get_nth_field(row, dict_col_get_no(col)); - ptr = static_cast<byte*>(mem_heap_zalloc(heap, DATA_ROW_ID_LEN)); - dfield_set_data(dfield, ptr, DATA_ROW_ID_LEN); node->row_id_buf = ptr; - /* 3. Allocate buffer for trx id */ + ptr += DATA_ROW_ID_LEN; + /* 2. Populate trx id */ col = dict_table_get_sys_col(table, DATA_TRX_ID); dfield = dtuple_get_nth_field(row, dict_col_get_no(col)); - ptr = static_cast<byte*>(mem_heap_zalloc(heap, DATA_TRX_ID_LEN)); dfield_set_data(dfield, ptr, DATA_TRX_ID_LEN); node->trx_id_buf = ptr; - /* 4. Allocate buffer for roll ptr */ + ptr += DATA_TRX_ID_LEN; + + /* 3. Populate roll ptr */ col = dict_table_get_sys_col(table, DATA_ROLL_PTR); dfield = dtuple_get_nth_field(row, dict_col_get_no(col)); - ptr = static_cast<byte*>(mem_heap_zalloc(heap, DATA_ROLL_PTR_LEN)); dfield_set_data(dfield, ptr, DATA_ROLL_PTR_LEN); } @@ -1743,12 +1745,11 @@ do_possible_lock_wait: table case (check_ref == 0), since MDL lock will prevent concurrent DDL and DML on the same table */ if (!check_ref) { - for (const dict_foreign_t* check_foreign - = UT_LIST_GET_FIRST( table->referenced_list); - check_foreign; - check_foreign = UT_LIST_GET_NEXT( - referenced_list, check_foreign)) { - if (check_foreign == foreign) { + for (dict_foreign_set::iterator it + = table->referenced_set.begin(); + it != table->referenced_set.end(); + ++it) { + if (*it == foreign) { verified = true; break; } @@ -1801,12 +1802,15 @@ row_ins_check_foreign_constraints( trx = thr_get_trx(thr); - foreign = UT_LIST_GET_FIRST(table->foreign_list); - DEBUG_SYNC_C_IF_THD(thr_get_trx(thr)->mysql_thd, "foreign_constraint_check_for_ins"); - while (foreign) { + for (dict_foreign_set::iterator it = table->foreign_set.begin(); + it != table->foreign_set.end(); + ++it) { + + foreign = *it; + if (foreign->foreign_index == index) { dict_table_t* ref_table = NULL; dict_table_t* foreign_table = foreign->foreign_table; @@ -1862,8 +1866,6 @@ row_ins_check_foreign_constraints( return(err); } } - - foreign = UT_LIST_GET_NEXT(foreign_list, foreign); } return(DB_SUCCESS); @@ -2913,7 +2915,7 @@ row_ins_clust_index_entry( dberr_t err; ulint n_uniq; - if (UT_LIST_GET_FIRST(index->table->foreign_list)) { + if (!index->table->foreign_set.empty()) { err = row_ins_check_foreign_constraints( index->table, index, entry, thr); if (err != DB_SUCCESS) { @@ -2971,7 +2973,7 @@ row_ins_sec_index_entry( mem_heap_t* offsets_heap; mem_heap_t* heap; - if (UT_LIST_GET_FIRST(index->table->foreign_list)) { + if (!index->table->foreign_set.empty()) { err = row_ins_check_foreign_constraints(index->table, index, entry, thr); if (err != DB_SUCCESS) { diff --git a/storage/xtradb/row/row0merge.cc b/storage/xtradb/row/row0merge.cc index 84d6845363c..3dd1ac592fc 100644 --- a/storage/xtradb/row/row0merge.cc +++ b/storage/xtradb/row/row0merge.cc @@ -795,7 +795,7 @@ row_merge_read( if (UNIV_UNLIKELY(!success)) { ut_print_timestamp(stderr); fprintf(stderr, - " InnoDB: failed to read merge block at "UINT64PF"\n", + " InnoDB: failed to read merge block at " UINT64PF "\n", ofs); } diff --git a/storage/xtradb/row/row0mysql.cc b/storage/xtradb/row/row0mysql.cc index 86de2eeb14c..8f270cfbfd0 100644 --- a/storage/xtradb/row/row0mysql.cc +++ b/storage/xtradb/row/row0mysql.cc @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 2000, 2013, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2000, 2014, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -63,6 +63,7 @@ Created 9/17/2000 Heikki Tuuri #include "m_string.h" #include "my_sys.h" #include "ha_prototypes.h" +#include <algorithm> /** Provide optional 4.x backwards compatibility for 5.0 and above */ UNIV_INTERN ibool row_rollback_on_timeout = FALSE; @@ -1359,7 +1360,7 @@ error_exit: if (doc_id < next_doc_id) { fprintf(stderr, "InnoDB: FTS Doc ID must be large than" - " "UINT64PF" for table", + " " UINT64PF " for table", next_doc_id - 1); ut_print_name(stderr, trx, TRUE, table->name); putc('\n', stderr); @@ -1374,9 +1375,9 @@ error_exit: if (doc_id - next_doc_id >= FTS_DOC_ID_MAX_STEP) { fprintf(stderr, - "InnoDB: Doc ID "UINT64PF" is too" + "InnoDB: Doc ID " UINT64PF " is too" " big. Its difference with largest" - " used Doc ID "UINT64PF" cannot" + " used Doc ID " UINT64PF " cannot" " exceed or equal to %d\n", doc_id, next_doc_id - 1, FTS_DOC_ID_MAX_STEP); @@ -1577,8 +1578,6 @@ init_fts_doc_id_for_ref( { dict_foreign_t* foreign; - foreign = UT_LIST_GET_FIRST(table->referenced_list); - table->fk_max_recusive_level = 0; (*depth)++; @@ -1590,17 +1589,25 @@ init_fts_doc_id_for_ref( /* Loop through this table's referenced list and also recursively traverse each table's foreign table list */ - while (foreign && foreign->foreign_table) { - if (foreign->foreign_table->fts) { - fts_init_doc_id(foreign->foreign_table); + for (dict_foreign_set::iterator it = table->referenced_set.begin(); + it != table->referenced_set.end(); + ++it) { + + foreign = *it; + + if (foreign->foreign_table == NULL) { + break; } - if (UT_LIST_GET_LEN(foreign->foreign_table->referenced_list) - > 0 && foreign->foreign_table != table) { - init_fts_doc_id_for_ref(foreign->foreign_table, depth); + if (foreign->foreign_table->fts != NULL) { + fts_init_doc_id(foreign->foreign_table); } - foreign = UT_LIST_GET_NEXT(referenced_list, foreign); + if (!foreign->foreign_table->referenced_set.empty() + && foreign->foreign_table != table) { + init_fts_doc_id_for_ref( + foreign->foreign_table, depth); + } } } @@ -2840,43 +2847,47 @@ row_discard_tablespace_foreign_key_checks( const trx_t* trx, /*!< in: transaction handle */ const dict_table_t* table) /*!< in: table to be discarded */ { - const dict_foreign_t* foreign; + + if (srv_read_only_mode || !trx->check_foreigns) { + return(DB_SUCCESS); + } /* Check if the table is referenced by foreign key constraints from some other table (not the table itself) */ + dict_foreign_set::iterator it + = std::find_if(table->referenced_set.begin(), + table->referenced_set.end(), + dict_foreign_different_tables()); - for (foreign = UT_LIST_GET_FIRST(table->referenced_list); - foreign && foreign->foreign_table == table; - foreign = UT_LIST_GET_NEXT(referenced_list, foreign)) { - + if (it == table->referenced_set.end()) { + return(DB_SUCCESS); } - if (!srv_read_only_mode && foreign && trx->check_foreigns) { + const dict_foreign_t* foreign = *it; + FILE* ef = dict_foreign_err_file; - FILE* ef = dict_foreign_err_file; + ut_ad(foreign->foreign_table != table); + ut_ad(foreign->referenced_table == table); - /* We only allow discarding a referenced table if - FOREIGN_KEY_CHECKS is set to 0 */ + /* We only allow discarding a referenced table if + FOREIGN_KEY_CHECKS is set to 0 */ - mutex_enter(&dict_foreign_err_mutex); + mutex_enter(&dict_foreign_err_mutex); - rewind(ef); + rewind(ef); - ut_print_timestamp(ef); + ut_print_timestamp(ef); - fputs(" Cannot DISCARD table ", ef); - ut_print_name(stderr, trx, TRUE, table->name); - fputs("\n" - "because it is referenced by ", ef); - ut_print_name(stderr, trx, TRUE, foreign->foreign_table_name); - putc('\n', ef); + fputs(" Cannot DISCARD table ", ef); + ut_print_name(stderr, trx, TRUE, table->name); + fputs("\n" + "because it is referenced by ", ef); + ut_print_name(stderr, trx, TRUE, foreign->foreign_table_name); + putc('\n', ef); - mutex_exit(&dict_foreign_err_mutex); - - return(DB_CANNOT_DROP_CONSTRAINT); - } + mutex_exit(&dict_foreign_err_mutex); - return(DB_SUCCESS); + return(DB_CANNOT_DROP_CONSTRAINT); } /*********************************************************************//** @@ -3179,7 +3190,6 @@ row_truncate_table_for_mysql( dict_table_t* table, /*!< in: table handle */ trx_t* trx) /*!< in: transaction handle */ { - dict_foreign_t* foreign; dberr_t err; mem_heap_t* heap; byte* buf; @@ -3271,18 +3281,17 @@ row_truncate_table_for_mysql( /* Check if the table is referenced by foreign key constraints from some other table (not the table itself) */ - for (foreign = UT_LIST_GET_FIRST(table->referenced_list); - foreign != 0 && foreign->foreign_table == table; - foreign = UT_LIST_GET_NEXT(referenced_list, foreign)) { - - /* Do nothing. */ - } + dict_foreign_set::iterator it + = std::find_if(table->referenced_set.begin(), + table->referenced_set.end(), + dict_foreign_different_tables()); if (!srv_read_only_mode - && foreign + && it != table->referenced_set.end() && trx->check_foreigns) { - FILE* ef = dict_foreign_err_file; + FILE* ef = dict_foreign_err_file; + dict_foreign_t* foreign = *it; /* We only allow truncating a referenced table if FOREIGN_KEY_CHECKS is set to 0 */ @@ -3885,42 +3894,45 @@ row_drop_table_for_mysql( /* Check if the table is referenced by foreign key constraints from some other table (not the table itself) */ - foreign = UT_LIST_GET_FIRST(table->referenced_list); + if (!srv_read_only_mode && trx->check_foreigns) { - while (foreign && foreign->foreign_table == table) { -check_next_foreign: - foreign = UT_LIST_GET_NEXT(referenced_list, foreign); - } + for (dict_foreign_set::iterator it + = table->referenced_set.begin(); + it != table->referenced_set.end(); + ++it) { - if (!srv_read_only_mode - && foreign - && trx->check_foreigns - && !(drop_db && dict_tables_have_same_db( - name, foreign->foreign_table_name_lookup))) { - FILE* ef = dict_foreign_err_file; + foreign = *it; - /* We only allow dropping a referenced table if - FOREIGN_KEY_CHECKS is set to 0 */ + const bool ref_ok = drop_db + && dict_tables_have_same_db( + name, + foreign->foreign_table_name_lookup); - err = DB_CANNOT_DROP_CONSTRAINT; + if (foreign->foreign_table != table && !ref_ok) { - mutex_enter(&dict_foreign_err_mutex); - rewind(ef); - ut_print_timestamp(ef); + FILE* ef = dict_foreign_err_file; - fputs(" Cannot drop table ", ef); - ut_print_name(ef, trx, TRUE, name); - fputs("\n" - "because it is referenced by ", ef); - ut_print_name(ef, trx, TRUE, foreign->foreign_table_name); - putc('\n', ef); - mutex_exit(&dict_foreign_err_mutex); + /* We only allow dropping a referenced table + if FOREIGN_KEY_CHECKS is set to 0 */ - goto funct_exit; - } + err = DB_CANNOT_DROP_CONSTRAINT; + + mutex_enter(&dict_foreign_err_mutex); + rewind(ef); + ut_print_timestamp(ef); - if (foreign && trx->check_foreigns) { - goto check_next_foreign; + fputs(" Cannot drop table ", ef); + ut_print_name(ef, trx, TRUE, name); + fputs("\n" + "because it is referenced by ", ef); + ut_print_name(ef, trx, TRUE, + foreign->foreign_table_name); + putc('\n', ef); + mutex_exit(&dict_foreign_err_mutex); + + goto funct_exit; + } + } } /* TODO: could we replace the counter n_foreign_key_checks_running diff --git a/storage/xtradb/row/row0sel.cc b/storage/xtradb/row/row0sel.cc index 67107c34204..fd50e2240b5 100644 --- a/storage/xtradb/row/row0sel.cc +++ b/storage/xtradb/row/row0sel.cc @@ -878,16 +878,15 @@ row_sel_get_clust_rec( if (!node->read_view) { /* Try to place a lock on the index record */ - - /* If innodb_locks_unsafe_for_binlog option is used - or this session is using READ COMMITTED isolation level - we lock only the record, i.e., next-key locking is - not used. */ ulint lock_type; trx_t* trx; trx = thr_get_trx(thr); + /* If innodb_locks_unsafe_for_binlog option is used + or this session is using READ COMMITTED or lower isolation level + we lock only the record, i.e., next-key locking is + not used. */ if (srv_locks_unsafe_for_binlog || trx->isolation_level <= TRX_ISO_READ_COMMITTED) { lock_type = LOCK_REC_NOT_GAP; @@ -1505,12 +1504,6 @@ rec_loop: search result set, resulting in the phantom problem. */ if (!consistent_read) { - - /* If innodb_locks_unsafe_for_binlog option is used - or this session is using READ COMMITTED isolation - level, we lock only the record, i.e., next-key - locking is not used. */ - rec_t* next_rec = page_rec_get_next(rec); ulint lock_type; trx_t* trx; @@ -1520,6 +1513,10 @@ rec_loop: offsets = rec_get_offsets(next_rec, index, offsets, ULINT_UNDEFINED, &heap); + /* If innodb_locks_unsafe_for_binlog option is used + or this session is using READ COMMITTED or lower isolation + level, we lock only the record, i.e., next-key + locking is not used. */ if (srv_locks_unsafe_for_binlog || trx->isolation_level <= TRX_ISO_READ_COMMITTED) { @@ -1568,12 +1565,6 @@ skip_lock: if (!consistent_read) { /* Try to place a lock on the index record */ - - /* If innodb_locks_unsafe_for_binlog option is used - or this session is using READ COMMITTED isolation level, - we lock only the record, i.e., next-key locking is - not used. */ - ulint lock_type; trx_t* trx; @@ -1582,6 +1573,10 @@ skip_lock: trx = thr_get_trx(thr); + /* If innodb_locks_unsafe_for_binlog option is used + or this session is using READ COMMITTED or lower isolation level, + we lock only the record, i.e., next-key locking is + not used. */ if (srv_locks_unsafe_for_binlog || trx->isolation_level <= TRX_ISO_READ_COMMITTED) { @@ -4228,7 +4223,7 @@ rec_loop: /* Try to place a lock on the index record */ /* If innodb_locks_unsafe_for_binlog option is used - or this session is using a READ COMMITTED isolation + or this session is using a READ COMMITTED or lower isolation level we do not lock gaps. Supremum record is really a gap and therefore we do not set locks there. */ @@ -4379,7 +4374,7 @@ wrong_offs: /* Try to place a gap lock on the index record only if innodb_locks_unsafe_for_binlog option is not set or this session is not - using a READ COMMITTED isolation level. */ + using a READ COMMITTED or lower isolation level. */ err = sel_set_rec_lock( btr_pcur_get_block(pcur), @@ -4428,7 +4423,7 @@ wrong_offs: /* Try to place a gap lock on the index record only if innodb_locks_unsafe_for_binlog option is not set or this session is not - using a READ COMMITTED isolation level. */ + using a READ COMMITTED or lower isolation level. */ err = sel_set_rec_lock( btr_pcur_get_block(pcur), diff --git a/storage/xtradb/row/row0upd.cc b/storage/xtradb/row/row0upd.cc index a642f7932b7..ad6f10542cf 100644 --- a/storage/xtradb/row/row0upd.cc +++ b/storage/xtradb/row/row0upd.cc @@ -53,6 +53,7 @@ Created 12/27/1996 Heikki Tuuri #include "pars0sym.h" #include "eval0eval.h" #include "buf0lru.h" +#include <algorithm> #include <mysql/plugin.h> #include <mysql/service_wsrep.h> @@ -140,12 +141,10 @@ row_upd_index_is_referenced( trx_t* trx) /*!< in: transaction */ { dict_table_t* table = index->table; - dict_foreign_t* foreign; ibool froze_data_dict = FALSE; ibool is_referenced = FALSE; - if (!UT_LIST_GET_FIRST(table->referenced_list)) { - + if (table->referenced_set.empty()) { return(FALSE); } @@ -154,19 +153,13 @@ row_upd_index_is_referenced( froze_data_dict = TRUE; } - foreign = UT_LIST_GET_FIRST(table->referenced_list); + dict_foreign_set::iterator it + = std::find_if(table->referenced_set.begin(), + table->referenced_set.end(), + dict_foreign_with_index(index)); - while (foreign) { - if (foreign->referenced_index == index) { + is_referenced = (it != table->referenced_set.end()); - is_referenced = TRUE; - goto func_exit; - } - - foreign = UT_LIST_GET_NEXT(referenced_list, foreign); - } - -func_exit: if (froze_data_dict) { row_mysql_unfreeze_data_dictionary(trx); } @@ -187,7 +180,7 @@ wsrep_row_upd_index_is_foreign( ibool froze_data_dict = FALSE; ibool is_referenced = FALSE; - if (!UT_LIST_GET_FIRST(table->foreign_list)) { + if (table->foreign_set.empty()) { return(FALSE); } @@ -197,16 +190,18 @@ wsrep_row_upd_index_is_foreign( froze_data_dict = TRUE; } - foreign = UT_LIST_GET_FIRST(table->foreign_list); + for (dict_foreign_set::iterator it= table->foreign_set.begin(); + it != table->foreign_set.end(); + ++ it) + { + foreign= *it; - while (foreign) { if (foreign->foreign_index == index) { is_referenced = TRUE; goto func_exit; } - foreign = UT_LIST_GET_NEXT(foreign_list, foreign); } func_exit: @@ -248,7 +243,7 @@ row_upd_check_references_constraints( dberr_t err; ibool got_s_lock = FALSE; - if (UT_LIST_GET_FIRST(table->referenced_list) == NULL) { + if (table->referenced_set.empty()) { return(DB_SUCCESS); } @@ -275,9 +270,13 @@ row_upd_check_references_constraints( } run_again: - foreign = UT_LIST_GET_FIRST(table->referenced_list); - while (foreign) { + for (dict_foreign_set::iterator it = table->referenced_set.begin(); + it != table->referenced_set.end(); + ++it) { + + foreign = *it; + /* Note that we may have an update which updates the index record, but does NOT update the first fields which are referenced in a foreign key constraint. Then the update does @@ -330,8 +329,6 @@ run_again: goto func_exit; } } - - foreign = UT_LIST_GET_NEXT(referenced_list, foreign); } err = DB_SUCCESS; @@ -368,7 +365,7 @@ wsrep_row_upd_check_foreign_constraints( ibool got_s_lock = FALSE; ibool opened = FALSE; - if (UT_LIST_GET_FIRST(table->foreign_list) == NULL) { + if (table->foreign_set.empty()) { return(DB_SUCCESS); } @@ -395,9 +392,13 @@ wsrep_row_upd_check_foreign_constraints( row_mysql_freeze_data_dictionary(trx); } - foreign = UT_LIST_GET_FIRST(table->foreign_list); + for (dict_foreign_set::iterator it= table->foreign_set.begin(); + it != table->foreign_set.end(); + ++ it) + { + foreign= *it; + - while (foreign) { /* Note that we may have an update which updates the index record, but does NOT update the first fields which are referenced in a foreign key constraint. Then the update does @@ -448,7 +449,6 @@ wsrep_row_upd_check_foreign_constraints( } } - foreign = UT_LIST_GET_NEXT(foreign_list, foreign); } err = DB_SUCCESS; diff --git a/storage/xtradb/srv/srv0mon.cc b/storage/xtradb/srv/srv0mon.cc index f276efdc021..5880e03073e 100644 --- a/storage/xtradb/srv/srv0mon.cc +++ b/storage/xtradb/srv/srv0mon.cc @@ -41,8 +41,8 @@ Created 12/9/2009 Jimmy Yang /* Macro to standardize the counter names for counters in the "monitor_buf_page" module as they have very structured defines */ #define MONITOR_BUF_PAGE(name, description, code, op, op_code) \ - {"buffer_page_"op"_"name, "buffer_page_io", \ - "Number of "description" Pages "op, \ + {"buffer_page_" op "_" name, "buffer_page_io", \ + "Number of " description " Pages " op, \ MONITOR_GROUP_MODULE, MONITOR_DEFAULT_START, \ MONITOR_##code##_##op_code} diff --git a/storage/xtradb/srv/srv0srv.cc b/storage/xtradb/srv/srv0srv.cc index a12a8b197fb..16df97edd64 100644 --- a/storage/xtradb/srv/srv0srv.cc +++ b/storage/xtradb/srv/srv0srv.cc @@ -515,7 +515,12 @@ UNIV_INTERN ulong srv_log_checksum_algorithm = SRV_CHECKSUM_ALGORITHM_INNODB; /*-------------------------------------------*/ +#ifdef HAVE_MEMORY_BARRIER +/* No idea to wait long with memory barriers */ +UNIV_INTERN ulong srv_n_spin_wait_rounds = 15; +#else UNIV_INTERN ulong srv_n_spin_wait_rounds = 30; +#endif UNIV_INTERN ulong srv_spin_wait_delay = 6; UNIV_INTERN ibool srv_priority_boost = TRUE; @@ -671,6 +676,9 @@ current_time % 5 != 0. */ ? thd_lock_wait_timeout((trx)->mysql_thd) \ : 0) +/** Simulate compression failures. */ +UNIV_INTERN uint srv_simulate_comp_failures = 0; + /* IMPLEMENTATION OF THE SERVER MAIN PROGRAM ========================================= @@ -798,7 +806,9 @@ static const ulint SRV_MASTER_SLOT = 0; UNIV_INTERN os_event_t srv_checkpoint_completed_event; -UNIV_INTERN os_event_t srv_redo_log_thread_finished_event; +UNIV_INTERN os_event_t srv_redo_log_tracked_event; + +UNIV_INTERN bool srv_redo_log_thread_started = false; /*********************************************************************//** Prints counters for work done by srv_master_thread. */ @@ -1152,7 +1162,10 @@ srv_init(void) srv_checkpoint_completed_event = os_event_create(); - srv_redo_log_thread_finished_event = os_event_create(); + if (srv_track_changed_pages) { + srv_redo_log_tracked_event = os_event_create(); + os_event_set(srv_redo_log_tracked_event); + } UT_LIST_INIT(srv_sys->tasks); } @@ -2164,9 +2177,10 @@ loop: /* Try to track a strange bug reported by Harald Fuchs and others, where the lsn seems to decrease at times */ - new_lsn = log_get_lsn(); + /* We have to use nowait to ensure we don't block */ + new_lsn= log_get_lsn_nowait(); - if (new_lsn < old_lsn) { + if (new_lsn && new_lsn < old_lsn) { ut_print_timestamp(stderr); fprintf(stderr, " InnoDB: Error: old log sequence number " LSN_PF @@ -2178,7 +2192,8 @@ loop: ut_ad(0); } - old_lsn = new_lsn; + if (new_lsn) + old_lsn = new_lsn; if (difftime(time(NULL), srv_last_monitor_time) > 60) { /* We referesh InnoDB Monitor values so that averages are @@ -2391,6 +2406,7 @@ DECLARE_THREAD(srv_redo_log_follow_thread)( #endif my_thread_init(); + srv_redo_log_thread_started = true; do { os_event_wait(srv_checkpoint_completed_event); @@ -2410,13 +2426,15 @@ DECLARE_THREAD(srv_redo_log_follow_thread)( "stopping log tracking thread!\n"); break; } + os_event_set(srv_redo_log_tracked_event); } } while (srv_shutdown_state < SRV_SHUTDOWN_LAST_PHASE); srv_track_changed_pages = FALSE; log_online_read_shutdown(); - os_event_set(srv_redo_log_thread_finished_event); + os_event_set(srv_redo_log_tracked_event); + srv_redo_log_thread_started = false; /* Defensive, not required */ my_thread_end(); os_thread_exit(NULL); diff --git a/storage/xtradb/srv/srv0start.cc b/storage/xtradb/srv/srv0start.cc index 86b0764d948..86bc8ce041e 100644 --- a/storage/xtradb/srv/srv0start.cc +++ b/storage/xtradb/srv/srv0start.cc @@ -1576,6 +1576,7 @@ innobase_start_or_create_for_mysql(void) char logfilename[10000]; char* logfile0 = NULL; size_t dirnamelen; + bool sys_datafiles_created = false; /* This should be initialized early */ ut_init_timer(); @@ -1725,6 +1726,19 @@ innobase_start_or_create_for_mysql(void) "" IB_ATOMICS_STARTUP_MSG ""); ib_logf(IB_LOG_LEVEL_INFO, + "" IB_MEMORY_BARRIER_STARTUP_MSG ""); + +#ifndef HAVE_MEMORY_BARRIER +#if defined __i386__ || defined __x86_64__ || defined _M_IX86 || defined _M_X64 || defined __WIN__ +#else + ib_logf(IB_LOG_LEVEL_WARN, + "MySQL was built without a memory barrier capability on this" + " architecture, which might allow a mutex/rw_lock violation" + " under high thread concurrency. This may cause a hang."); +#endif /* IA32 or AMD64 */ +#endif /* HAVE_MEMORY_BARRIER */ + + ib_logf(IB_LOG_LEVEL_INFO, "Compressed tables use zlib " ZLIB_VERSION #ifdef UNIV_ZIP_DEBUG " with validation" @@ -2274,9 +2288,9 @@ innobase_start_or_create_for_mysql(void) } else if (size != srv_log_file_size) { ib_logf(IB_LOG_LEVEL_ERROR, "Log file %s is" - " of different size "UINT64PF" bytes" + " of different size " UINT64PF " bytes" " than other log" - " files "UINT64PF" bytes!", + " files " UINT64PF " bytes!", logfilename, size << UNIV_PAGE_SIZE_SHIFT, (os_offset_t) srv_log_file_size @@ -2528,6 +2542,15 @@ files_checked: dict_check = DICT_CHECK_NONE_LOADED; } + /* Create the SYS_TABLESPACES and SYS_DATAFILES system table */ + err = dict_create_or_check_sys_tablespace(); + if (err != DB_SUCCESS) { + return(err); + } + + sys_datafiles_created = true; + + /* This function assumes that SYS_DATAFILES exists */ dict_check_tablespaces_and_store_max_id(dict_check); } @@ -2709,13 +2732,6 @@ files_checked: srv_undo_logs = ULONG_UNDEFINED; } - /* Flush the changes made to TRX_SYS_PAGE by trx_sys_create_rsegs()*/ - if (!srv_force_recovery && !srv_read_only_mode) { - bool success = buf_flush_list(ULINT_MAX, LSN_MAX, NULL); - ut_a(success); - buf_flush_wait_batch_end(NULL, BUF_FLUSH_LIST); - } - if (!srv_read_only_mode) { /* Create the thread which watches the timeouts for lock waits */ @@ -2740,10 +2756,13 @@ files_checked: return(err); } - /* Create the SYS_TABLESPACES system table */ - err = dict_create_or_check_sys_tablespace(); - if (err != DB_SUCCESS) { - return(err); + /* Create the SYS_TABLESPACES and SYS_DATAFILES system tables if we + have not done that already on crash recovery. */ + if (sys_datafiles_created == false) { + err = dict_create_or_check_sys_tablespace(); + if (err != DB_SUCCESS) { + return(err); + } } srv_is_being_started = FALSE; diff --git a/storage/xtradb/sync/sync0arr.cc b/storage/xtradb/sync/sync0arr.cc index 126cf8de0d5..7ad9fe8d40b 100644 --- a/storage/xtradb/sync/sync0arr.cc +++ b/storage/xtradb/sync/sync0arr.cc @@ -2,6 +2,7 @@ Copyright (c) 1995, 2013, Oracle and/or its affiliates. All Rights Reserved. Copyright (c) 2008, Google Inc. +Copyright (c) 2013, 2014, SkySQL Ab. All Rights Reserved. Portions of this file contain modifications contributed and copyrighted by Google, Inc. Those modifications are gratefully acknowledged and are described @@ -183,6 +184,33 @@ sync_array_get_nth_cell( } /******************************************************************//** +Looks for a cell with the given thread id. +@return pointer to cell or NULL if not found */ +static +sync_cell_t* +sync_array_find_thread( +/*===================*/ + sync_array_t* arr, /*!< in: wait array */ + os_thread_id_t thread) /*!< in: thread id */ +{ + ulint i; + sync_cell_t* cell; + + for (i = 0; i < arr->n_cells; i++) { + + cell = sync_array_get_nth_cell(arr, i); + + if (cell->wait_object != NULL + && os_thread_eq(cell->thread, thread)) { + + return(cell); /* Found */ + } + } + + return(NULL); /* Not found */ +} + +/******************************************************************//** Reserves the mutex semaphore protecting a sync array. */ static void @@ -441,8 +469,10 @@ static void sync_array_cell_print( /*==================*/ - FILE* file, /*!< in: file where to print */ - sync_cell_t* cell) /*!< in: sync cell */ + FILE* file, /*!< in: file where to print */ + sync_cell_t* cell, /*!< in: sync cell */ + os_thread_id_t* reserver) /*!< out: write reserver or + 0 */ { ib_mutex_t* mutex; ib_prio_mutex_t* prio_mutex; @@ -460,16 +490,9 @@ sync_array_cell_print( innobase_basename(cell->file), (ulong) cell->line, difftime(time(NULL), cell->reservation_time)); - /* If stacktrace feature is enabled we will send a SIGUSR2 - signal to thread waiting for the semaphore. Signal handler - will then dump the current stack to error log. */ - if (srv_use_stacktrace) { -#ifdef __linux__ - pthread_kill(cell->thread, SIGUSR2); -#endif - } if (type == SYNC_MUTEX || type == SYNC_PRIO_MUTEX) { + /* We use old_wait_mutex in case the cell has already been freed meanwhile */ if (type == SYNC_MUTEX) { @@ -483,18 +506,29 @@ sync_array_cell_print( } - fprintf(file, - "Mutex at %p '%s', lock var %lu\n" + if (mutex) { + fprintf(file, + "Mutex at %p '%s', lock var %lu\n" #ifdef UNIV_SYNC_DEBUG - "Last time reserved in file %s line %lu, " + "Last time reserved in file %s line %lu, " #endif /* UNIV_SYNC_DEBUG */ - "waiters flag %lu\n", - (void*) mutex, mutex->cmutex_name, - (ulong) mutex->lock_word, + "waiters flag %lu\n", + (void*) mutex, mutex->cmutex_name, + (ulong) mutex->lock_word, #ifdef UNIV_SYNC_DEBUG - mutex->file_name, (ulong) mutex->line, + mutex->file_name, (ulong) mutex->line, #endif /* UNIV_SYNC_DEBUG */ - (ulong) mutex->waiters); + (ulong) mutex->waiters); + } + + /* If stacktrace feature is enabled we will send a SIGUSR2 + signal to thread waiting for the semaphore. Signal handler + will then dump the current stack to error log. */ + if (srv_use_stacktrace && cell && cell->thread) { +#ifdef __linux__ + pthread_kill(cell->thread, SIGUSR2); +#endif + } if (type == SYNC_PRIO_MUTEX) { @@ -529,40 +563,47 @@ sync_array_cell_print( rwlock = &prio_rwlock->base_lock; } - fprintf(file, - " RW-latch at %p '%s'\n", - (void*) rwlock, rwlock->lock_name); - writer = rw_lock_get_writer(rwlock); - if (writer != RW_LOCK_NOT_LOCKED) { + if (rwlock) { fprintf(file, - "a writer (thread id %lu) has" - " reserved it in mode %s", - (ulong) os_thread_pf(rwlock->writer_thread), - writer == RW_LOCK_EX - ? " exclusive\n" - : " wait exclusive\n"); - } + " RW-latch at %p '%s'\n", + (void*) rwlock, rwlock->lock_name); - fprintf(file, - "number of readers %lu, waiters flag %lu, " - "lock_word: %lx\n" - "Last time read locked in file %s line %lu\n" - "Last time write locked in file %s line %lu\n", - (ulong) rw_lock_get_reader_count(rwlock), - (ulong) rwlock->waiters, - rwlock->lock_word, - innobase_basename(rwlock->last_s_file_name), - (ulong) rwlock->last_s_line, - rwlock->last_x_file_name, - (ulong) rwlock->last_x_line); + writer = rw_lock_get_writer(rwlock); - /* If stacktrace feature is enabled we will send a SIGUSR2 - signal to thread that has locked RW-latch with write mode. - Signal handler will then dump the current stack to error log. */ - if (writer != RW_LOCK_NOT_LOCKED && srv_use_stacktrace) { + if (writer && writer != RW_LOCK_NOT_LOCKED) { + fprintf(file, + "a writer (thread id %lu) has" + " reserved it in mode %s", + (ulong) os_thread_pf(rwlock->writer_thread), + writer == RW_LOCK_EX + ? " exclusive\n" + : " wait exclusive\n"); + + *reserver = rwlock->writer_thread; + } + + fprintf(file, + "number of readers %lu, waiters flag %lu, " + "lock_word: %lx\n" + "Last time read locked in file %s line %lu\n" + "Last time write locked in file %s line %lu\n", + (ulong) rw_lock_get_reader_count(rwlock), + (ulong) rwlock->waiters, + rwlock->lock_word, + innobase_basename(rwlock->last_s_file_name), + (ulong) rwlock->last_s_line, + rwlock->last_x_file_name, + (ulong) rwlock->last_x_line); + + /* If stacktrace feature is enabled we will send a SIGUSR2 + signal to thread that has locked RW-latch with write mode. + Signal handler will then dump the current stack to error log. */ + if (writer != RW_LOCK_NOT_LOCKED && srv_use_stacktrace && + rwlock && rwlock->writer_thread) { #ifdef __linux__ - pthread_kill(rwlock->writer_thread, SIGUSR2); + pthread_kill(rwlock->writer_thread, SIGUSR2); #endif + } } if (prio_rwlock) { @@ -584,32 +625,6 @@ sync_array_cell_print( } #ifdef UNIV_SYNC_DEBUG -/******************************************************************//** -Looks for a cell with the given thread id. -@return pointer to cell or NULL if not found */ -static -sync_cell_t* -sync_array_find_thread( -/*===================*/ - sync_array_t* arr, /*!< in: wait array */ - os_thread_id_t thread) /*!< in: thread id */ -{ - ulint i; - sync_cell_t* cell; - - for (i = 0; i < arr->n_cells; i++) { - - cell = sync_array_get_nth_cell(arr, i); - - if (cell->wait_object != NULL - && os_thread_eq(cell->thread, thread)) { - - return(cell); /* Found */ - } - } - - return(NULL); /* Not found */ -} /******************************************************************//** Recursion step for deadlock detection. @@ -671,6 +686,7 @@ sync_array_detect_deadlock( os_thread_id_t thread; ibool ret; rw_lock_debug_t*debug; + os_thread_id_t r = 0; ut_a(arr); ut_a(start); @@ -715,7 +731,7 @@ sync_array_detect_deadlock( "Mutex %p owned by thread %lu file %s line %lu\n", mutex, (ulong) os_thread_pf(mutex->thread_id), mutex->file_name, (ulong) mutex->line); - sync_array_cell_print(stderr, cell); + sync_array_cell_print(stderr, cell, &r); return(TRUE); } @@ -754,7 +770,7 @@ sync_array_detect_deadlock( print: fprintf(stderr, "rw-lock %p ", (void*) lock); - sync_array_cell_print(stderr, cell); + sync_array_cell_print(stderr, cell, &r); rw_lock_debug_print(stderr, debug); return(TRUE); } @@ -823,6 +839,7 @@ sync_arr_cell_can_wake_up( cell->wait_object))->base_mutex; } + os_rmb; if (mutex_get_lock_word(mutex) == 0) { return(TRUE); @@ -833,6 +850,7 @@ sync_arr_cell_can_wake_up( lock = static_cast<rw_lock_t*>(cell->wait_object); + os_rmb; if (lock->lock_word > 0) { /* Either unlocked or only read locked. */ @@ -844,6 +862,7 @@ sync_arr_cell_can_wake_up( lock = static_cast<rw_lock_t*>(cell->wait_object); /* lock_word == 0 means all readers have left */ + os_rmb; if (lock->lock_word == 0) { return(TRUE); @@ -853,6 +872,7 @@ sync_arr_cell_can_wake_up( lock = static_cast<rw_lock_t*>(cell->wait_object); /* lock_word > 0 means no writer or reserved writer */ + os_rmb; if (lock->lock_word > 0) { return(TRUE); @@ -1009,6 +1029,7 @@ sync_array_print_long_waits_low( double diff; sync_cell_t* cell; void* wait_object; + os_thread_id_t reserver=0; cell = sync_array_get_nth_cell(arr, i); @@ -1024,7 +1045,7 @@ sync_array_print_long_waits_low( if (diff > SYNC_ARRAY_TIMEOUT) { fputs("InnoDB: Warning: a long semaphore wait:\n", stderr); - sync_array_cell_print(stderr, cell); + sync_array_cell_print(stderr, cell, &reserver); *noticed = TRUE; } @@ -1039,6 +1060,57 @@ sync_array_print_long_waits_low( } } + /* We found a long semaphore wait, wait all threads that are + waiting for a semaphore. */ + if (*noticed) { + for (i = 0; i < arr->n_cells; i++) { + void* wait_object; + sync_cell_t* cell; + os_thread_id_t reserver=(os_thread_id_t)ULINT_UNDEFINED; + ulint loop=0; + + cell = sync_array_get_nth_cell(arr, i); + + wait_object = cell->wait_object; + + if (wait_object == NULL || !cell->waiting) { + + continue; + } + + fputs("InnoDB: Warning: semaphore wait:\n", + stderr); + sync_array_cell_print(stderr, cell, &reserver); + + /* Try to output cell information for writer recursive way */ + while (reserver != (os_thread_id_t)ULINT_UNDEFINED) { + sync_cell_t* reserver_wait; + + reserver_wait = sync_array_find_thread(arr, reserver); + + if (reserver_wait && + reserver_wait->wait_object != NULL && + reserver_wait->waiting) { + fputs("InnoDB: Warning: Writer thread is waiting this semaphore:\n", + stderr); + sync_array_cell_print(stderr, reserver_wait, &reserver); + + if (reserver_wait->thread == reserver) { + reserver = (os_thread_id_t)ULINT_UNDEFINED; + } + } else { + reserver = (os_thread_id_t)ULINT_UNDEFINED; + } + + /* This is protection against loop */ + if (loop > 100) { + fputs("InnoDB: Warning: Too many waiting threads.\n", stderr); + break; + } + } + } + } + #undef SYNC_ARRAY_TIMEOUT return(fatal); @@ -1125,12 +1197,13 @@ sync_array_print_info_low( for (i = 0; count < arr->n_reserved; ++i) { sync_cell_t* cell; + os_thread_id_t r = 0; cell = sync_array_get_nth_cell(arr, i); if (cell->wait_object != NULL) { count++; - sync_array_cell_print(file, cell); + sync_array_cell_print(file, cell, &r); } } } diff --git a/storage/xtradb/sync/sync0rw.cc b/storage/xtradb/sync/sync0rw.cc index 2ff75b55cf6..7fad78ea577 100644 --- a/storage/xtradb/sync/sync0rw.cc +++ b/storage/xtradb/sync/sync0rw.cc @@ -41,6 +41,7 @@ Created 9/11/1995 Heikki Tuuri #include "srv0srv.h" #include "os0sync.h" /* for INNODB_RW_LOCKS_USE_ATOMICS */ #include "ha_prototypes.h" +#include "my_cpu.h" /* IMPLEMENTATION OF THE RW_LOCK @@ -151,18 +152,12 @@ UNIV_INTERN mysql_pfs_key_t rw_lock_mutex_key; To modify the debug info list of an rw-lock, this mutex has to be acquired in addition to the mutex protecting the lock. */ -UNIV_INTERN ib_mutex_t rw_lock_debug_mutex; +UNIV_INTERN os_fast_mutex_t rw_lock_debug_mutex; # ifdef UNIV_PFS_MUTEX UNIV_INTERN mysql_pfs_key_t rw_lock_debug_mutex_key; # endif -/* If deadlock detection does not get immediately the mutex, -it may wait for this event */ -UNIV_INTERN os_event_t rw_lock_debug_event; -/* This is set to TRUE, if there may be waiters for the event */ -UNIV_INTERN ibool rw_lock_debug_waiters; - /******************************************************************//** Creates a debug info struct. */ static @@ -454,6 +449,8 @@ lock_loop: lock)) { /* Spin waiting for the writer field to become free */ + os_rmb; + HMT_low(); while (i < SYNC_SPIN_ROUNDS && lock->lock_word <= 0) { if (srv_spin_wait_delay) { ut_delay(ut_rnd_interval(0, @@ -461,9 +458,11 @@ lock_loop: } i++; + os_rmb; } - if (i == SYNC_SPIN_ROUNDS) { + HMT_medium(); + if (i >= SYNC_SPIN_ROUNDS) { os_thread_yield(); } @@ -609,16 +608,26 @@ rw_lock_x_lock_wait( counter_index = (size_t) os_thread_get_curr_id(); + os_rmb; ut_ad(lock->lock_word <= 0); + HMT_low(); + if (high_priority) { + + prio_rw_lock = reinterpret_cast<prio_rw_lock_t *>(lock); + prio_rw_lock->high_priority_wait_ex_waiter = 1; + } + while (lock->lock_word < 0) { if (srv_spin_wait_delay) { ut_delay(ut_rnd_interval(0, srv_spin_wait_delay)); } if(i < SYNC_SPIN_ROUNDS) { i++; + os_rmb; continue; } + HMT_medium(); /* If there is still a reader, then go to sleep.*/ rw_lock_stats.rw_x_spin_round_count.add(counter_index, i); @@ -628,13 +637,6 @@ rw_lock_x_lock_wait( file_name, line, &index); - if (high_priority) { - - prio_rw_lock - = reinterpret_cast<prio_rw_lock_t *>(lock); - prio_rw_lock->high_priority_wait_ex_waiter = 1; - } - i = 0; /* Check lock_word to ensure wake-up isn't missed.*/ @@ -661,12 +663,16 @@ rw_lock_x_lock_wait( We must pass the while-loop check to proceed.*/ } else { sync_array_free_cell(sync_arr, index); - if (prio_rw_lock) { - - prio_rw_lock->high_priority_wait_ex_waiter = 0; - } } + HMT_low(); } + HMT_medium(); + + if (prio_rw_lock) { + + prio_rw_lock->high_priority_wait_ex_waiter = 0; + } + rw_lock_stats.rw_x_spin_round_count.add(counter_index, i); } @@ -708,6 +714,10 @@ rw_lock_x_lock_low( } else { os_thread_id_t thread_id = os_thread_get_curr_id(); + if (!pass) { + os_rmb; + } + /* Decrement failed: relock or failed lock */ if (!pass && lock->recursive && os_thread_eq(lock->writer_thread, thread_id)) { @@ -798,6 +808,8 @@ lock_loop: } /* Spin waiting for the lock_word to become free */ + os_rmb; + HMT_low(); while (i < SYNC_SPIN_ROUNDS && lock->lock_word <= 0) { if (srv_spin_wait_delay) { @@ -806,8 +818,10 @@ lock_loop: } i++; + os_rmb; } - if (i == SYNC_SPIN_ROUNDS) { + HMT_medium(); + if (i >= SYNC_SPIN_ROUNDS) { os_thread_yield(); } else { goto lock_loop; @@ -920,22 +934,7 @@ void rw_lock_debug_mutex_enter(void) /*===========================*/ { -loop: - if (0 == mutex_enter_nowait(&rw_lock_debug_mutex)) { - return; - } - - os_event_reset(rw_lock_debug_event); - - rw_lock_debug_waiters = TRUE; - - if (0 == mutex_enter_nowait(&rw_lock_debug_mutex)) { - return; - } - - os_event_wait(rw_lock_debug_event); - - goto loop; + os_fast_mutex_lock(&rw_lock_debug_mutex); } /******************************************************************//** @@ -945,12 +944,7 @@ void rw_lock_debug_mutex_exit(void) /*==========================*/ { - mutex_exit(&rw_lock_debug_mutex); - - if (rw_lock_debug_waiters) { - rw_lock_debug_waiters = FALSE; - os_event_set(rw_lock_debug_event); - } + os_fast_mutex_unlock(&rw_lock_debug_mutex); } /******************************************************************//** diff --git a/storage/xtradb/sync/sync0sync.cc b/storage/xtradb/sync/sync0sync.cc index 1c5b144eb24..46343a71d28 100644 --- a/storage/xtradb/sync/sync0sync.cc +++ b/storage/xtradb/sync/sync0sync.cc @@ -46,6 +46,7 @@ Created 9/5/1995 Heikki Tuuri # include "srv0start.h" /* srv_is_being_started */ #endif /* UNIV_SYNC_DEBUG */ #include "ha_prototypes.h" +#include "my_cpu.h" /* REASONS FOR IMPLEMENTING THE SPIN LOCK MUTEX @@ -535,6 +536,8 @@ mutex_set_waiters( ptr = &(mutex->waiters); + os_wmb; + *ptr = n; /* Here we assume that the write of a single word in memory is atomic */ } @@ -587,15 +590,17 @@ mutex_loop: spin_loop: + HMT_low(); + os_rmb; while (mutex_get_lock_word(mutex) != 0 && i < SYNC_SPIN_ROUNDS) { if (srv_spin_wait_delay) { ut_delay(ut_rnd_interval(0, srv_spin_wait_delay)); } - i++; } + HMT_medium(); - if (i == SYNC_SPIN_ROUNDS) { + if (i >= SYNC_SPIN_ROUNDS) { os_thread_yield(); } @@ -1599,11 +1604,7 @@ sync_init(void) SYNC_NO_ORDER_CHECK); #ifdef UNIV_SYNC_DEBUG - mutex_create(rw_lock_debug_mutex_key, &rw_lock_debug_mutex, - SYNC_NO_ORDER_CHECK); - - rw_lock_debug_event = os_event_create(); - rw_lock_debug_waiters = FALSE; + os_fast_mutex_init(rw_lock_debug_mutex_key, &rw_lock_debug_mutex); #endif /* UNIV_SYNC_DEBUG */ } @@ -1677,6 +1678,7 @@ sync_close(void) sync_order_checks_on = FALSE; sync_thread_level_arrays_free(); + os_fast_mutex_free(&rw_lock_debug_mutex); #endif /* UNIV_SYNC_DEBUG */ sync_initialized = FALSE; @@ -1691,12 +1693,12 @@ sync_print_wait_info( FILE* file) /*!< in: file where to print */ { fprintf(file, - "Mutex spin waits "UINT64PF", rounds "UINT64PF", " - "OS waits "UINT64PF"\n" - "RW-shared spins "UINT64PF", rounds "UINT64PF", " - "OS waits "UINT64PF"\n" - "RW-excl spins "UINT64PF", rounds "UINT64PF", " - "OS waits "UINT64PF"\n", + "Mutex spin waits " UINT64PF ", rounds " UINT64PF ", " + "OS waits " UINT64PF "\n" + "RW-shared spins " UINT64PF ", rounds " UINT64PF ", " + "OS waits " UINT64PF "\n" + "RW-excl spins " UINT64PF ", rounds " UINT64PF ", " + "OS waits " UINT64PF "\n", (ib_uint64_t) mutex_spin_wait_count, (ib_uint64_t) mutex_spin_round_count, (ib_uint64_t) mutex_os_wait_count, diff --git a/storage/xtradb/trx/trx0i_s.cc b/storage/xtradb/trx/trx0i_s.cc index f5d4a6c862f..794ee432ca4 100644 --- a/storage/xtradb/trx/trx0i_s.cc +++ b/storage/xtradb/trx/trx0i_s.cc @@ -1653,7 +1653,7 @@ trx_i_s_create_lock_id( } else { /* table lock */ res_len = ut_snprintf(lock_id, lock_id_size, - TRX_ID_FMT":"UINT64PF, + TRX_ID_FMT":" UINT64PF, row->lock_trx_id, row->lock_table_id); } diff --git a/storage/xtradb/trx/trx0sys.cc b/storage/xtradb/trx/trx0sys.cc index 9b59ae14278..32948d6847c 100644 --- a/storage/xtradb/trx/trx0sys.cc +++ b/storage/xtradb/trx/trx0sys.cc @@ -1097,7 +1097,7 @@ trx_sys_print_mysql_binlog_offset_from_page( == TRX_SYS_MYSQL_LOG_MAGIC_N) { fprintf(stderr, - "ibbackup: Last MySQL binlog file position %lu %lu," + "mysqlbackup: Last MySQL binlog file position %lu %lu," " file name %s\n", (ulong) mach_read_from_4( sys_header + TRX_SYS_MYSQL_LOG_INFO @@ -1148,9 +1148,9 @@ trx_sys_read_file_format_id( ut_print_timestamp(stderr); fprintf(stderr, - " ibbackup: Error: trying to read system tablespace " - "file format,\n" - " ibbackup: but could not open the tablespace " + " mysqlbackup: Error: trying to read system " + "tablespace file format,\n" + " mysqlbackup: but could not open the tablespace " "file %s!\n", pathname); return(FALSE); } @@ -1167,9 +1167,9 @@ trx_sys_read_file_format_id( ut_print_timestamp(stderr); fprintf(stderr, - " ibbackup: Error: trying to read system tablespace " - "file format,\n" - " ibbackup: but failed to read the tablespace " + " mysqlbackup: Error: trying to read system " + "tablespace file format,\n" + " mysqlbackup: but failed to read the tablespace " "file %s!\n", pathname); os_file_close(file); @@ -1228,9 +1228,9 @@ trx_sys_read_pertable_file_format_id( ut_print_timestamp(stderr); fprintf(stderr, - " ibbackup: Error: trying to read per-table " + " mysqlbackup: Error: trying to read per-table " "tablespace format,\n" - " ibbackup: but could not open the tablespace " + " mysqlbackup: but could not open the tablespace " "file %s!\n", pathname); return(FALSE); @@ -1247,9 +1247,9 @@ trx_sys_read_pertable_file_format_id( ut_print_timestamp(stderr); fprintf(stderr, - " ibbackup: Error: trying to per-table data file " + " mysqlbackup: Error: trying to per-table data file " "format,\n" - " ibbackup: but failed to read the tablespace " + " mysqlbackup: but failed to read the tablespace " "file %s!\n", pathname); os_file_close(file); diff --git a/storage/xtradb/trx/trx0trx.cc b/storage/xtradb/trx/trx0trx.cc index 30d4b7f6546..12f680bd7e2 100644 --- a/storage/xtradb/trx/trx0trx.cc +++ b/storage/xtradb/trx/trx0trx.cc @@ -53,6 +53,9 @@ Created 3/26/1996 Heikki Tuuri #include<set> +extern "C" +int thd_deadlock_victim_preference(const MYSQL_THD thd1, const MYSQL_THD thd2); + /** Set of table_id */ typedef std::set<table_id_t> table_id_set; @@ -1725,6 +1728,38 @@ trx_assign_read_view( return(trx->read_view); } +/********************************************************************//** +Clones the read view from another transaction. All consistent reads within +the receiver transaction will get the same read view as the donor transaction +@return read view clone */ +UNIV_INTERN +read_view_t* +trx_clone_read_view( +/*================*/ + trx_t* trx, /*!< in: receiver transaction */ + trx_t* from_trx) /*!< in: donor transaction */ +{ + ut_ad(lock_mutex_own()); + ut_ad(mutex_own(&trx_sys->mutex)); + ut_ad(trx_mutex_own(from_trx)); + ut_ad(trx->read_view == NULL); + + if (from_trx->state != TRX_STATE_ACTIVE || + from_trx->read_view == NULL) { + + return(NULL); + } + + trx->read_view = read_view_clone(from_trx->read_view, + trx->prebuilt_view); + + read_view_add(trx->read_view); + + trx->global_read_view = trx->read_view; + + return(trx->read_view); +} + /****************************************************************//** Prepares a transaction for commit/rollback. */ UNIV_INTERN @@ -2071,7 +2106,7 @@ state_ok: if (trx->undo_no != 0) { newline = TRUE; - fprintf(f, ", undo log entries "TRX_ID_FMT, trx->undo_no); + fprintf(f, ", undo log entries " TRX_ID_FMT, trx->undo_no); } if (newline) { @@ -2174,9 +2209,8 @@ trx_assert_started( #endif /* UNIV_DEBUG */ /*******************************************************************//** -Compares the "weight" (or size) of two transactions. Transactions that -have edited non-transactional tables are considered heavier than ones -that have not. +Compares the "weight" (or size) of two transactions. The heavier the weight, +the more reluctant we will be to choose the transaction as a deadlock victim. @return TRUE if weight(a) >= weight(b) */ UNIV_INTERN ibool @@ -2185,26 +2219,19 @@ trx_weight_ge( const trx_t* a, /*!< in: the first transaction to be compared */ const trx_t* b) /*!< in: the second transaction to be compared */ { - ibool a_notrans_edit; - ibool b_notrans_edit; - - /* If mysql_thd is NULL for a transaction we assume that it has - not edited non-transactional tables. */ - - a_notrans_edit = a->mysql_thd != NULL - && thd_has_edited_nontrans_tables(a->mysql_thd); - - b_notrans_edit = b->mysql_thd != NULL - && thd_has_edited_nontrans_tables(b->mysql_thd); - - if (a_notrans_edit != b_notrans_edit) { + int pref; - return(a_notrans_edit); + /* First ask the upper server layer if it has any preference for which + to prefer as a deadlock victim. */ + pref= thd_deadlock_victim_preference(a->mysql_thd, b->mysql_thd); + if (pref < 0) { + return FALSE; + } else if (pref > 0) { + return TRUE; } - /* Either both had edited non-transactional tables or both had - not, we fall back to comparing the number of altered/locked - rows. */ + /* Upper server layer had no preference, we fall back to comparing the + number of altered/locked rows. */ #if 0 fprintf(stderr, @@ -2371,7 +2398,7 @@ trx_recover_for_mysql( ut_print_timestamp(stderr); fprintf(stderr, " InnoDB: Transaction contains changes" - " to "TRX_ID_FMT" rows\n", + " to " TRX_ID_FMT " rows\n", trx->undo_no); count++; diff --git a/strings/CMakeLists.txt b/strings/CMakeLists.txt index e1cee8d9824..6291d107d90 100644 --- a/strings/CMakeLists.txt +++ b/strings/CMakeLists.txt @@ -1,4 +1,4 @@ -# Copyright (c) 2006, 2013, Oracle and/or its affiliates +# Copyright (c) 2006, 2014, Oracle and/or its affiliates # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -36,7 +36,6 @@ ADD_CONVENIENCE_LIBRARY(strings ${STRINGS_SOURCES}) ADD_EXECUTABLE(conf_to_src EXCLUDE_FROM_ALL conf_to_src.c) TARGET_LINK_LIBRARIES(conf_to_src strings) -INSTALL_DEBUG_SYMBOLS(strings) IF(MSVC) INSTALL_DEBUG_TARGET(strings DESTINATION ${INSTALL_LIBDIR}/debug) ENDIF() diff --git a/strings/ctype-bin.c b/strings/ctype-bin.c index 6e861f38ae4..3ca4ba2b430 100644 --- a/strings/ctype-bin.c +++ b/strings/ctype-bin.c @@ -1,5 +1,6 @@ /* Copyright (c) 2002-2007 MySQL AB & tommy@valley.ne.jp - Copyright (c) 2009-2011, Monty Program Ab + Copyright (c) 2002, 2014, Oracle and/or its affiliates. + Copyright (c) 2009, 2014, SkySQL Ab. This library is free software; you can redistribute it and/or modify it under the terms of the GNU Library General Public @@ -287,9 +288,7 @@ void my_hash_sort_8bit_bin(CHARSET_INFO *cs __attribute__((unused)), for (; key < end ; key++) { - tmp1^= (ulong) ((((uint) tmp1 & 63) + tmp2) * - ((uint) *key)) + (tmp1 << 8); - tmp2+= 3; + MY_HASH_ADD(tmp1, tmp2, (uint) *key); } *nr1= tmp1; @@ -306,9 +305,7 @@ void my_hash_sort_bin(CHARSET_INFO *cs __attribute__((unused)), for (; key < end ; key++) { - tmp1^= (ulong) ((((uint) tmp1 & 63) + tmp2) * - ((uint) *key)) + (tmp1 << 8); - tmp2+= 3; + MY_HASH_ADD(tmp1, tmp2, (uint) *key); } *nr1= tmp1; diff --git a/strings/ctype-eucjpms.c b/strings/ctype-eucjpms.c index d9033a234c4..0ce179b3a2d 100644 --- a/strings/ctype-eucjpms.c +++ b/strings/ctype-eucjpms.c @@ -1,5 +1,6 @@ -/* Copyright (c) 2005, 2011, Oracle and/or its affiliates. - Copyright (c) 2009-2011, Monty Program Ab +/* Copyright (c) 2002 MySQL AB & tommy@valley.ne.jp + Copyright (c) 2002, 2014, Oracle and/or its affiliates. + Copyright (c) 2009, 2014, SkySQL Ab. This library is free software; you can redistribute it and/or modify it under the terms of the GNU Library General Public @@ -67417,10 +67418,10 @@ my_wc_mb_eucjpms(CHARSET_INFO *cs __attribute__((unused)), /* EUCJPMS encoding subcomponents: - [x00-x7F] # ASCII/JIS-Roman (one-byte/character) - [x8E][xA0-xDF] # half-width katakana (two bytes/char) - [x8F][xA1-xFE][xA1-xFE] # JIS X 0212-1990 (three bytes/char) - [xA1-xFE][xA1-xFE] # JIS X 0208:1997 (two bytes/char) + [x00-x7F] # ASCII/JIS-Roman (one-byte/character) + [x8E][xA1-xDF] # half-width katakana (two bytes/char) + [x8F][xA1-xFE][xA1-xFE] # JIS X 0212-1990 (three bytes/char) + [xA1-xFE][xA1-xFE] # JIS X 0208:1997 (two bytes/char) */ static @@ -67443,15 +67444,15 @@ size_t my_well_formed_len_eucjpms(CHARSET_INFO *cs __attribute__((unused)), if (b >= (uchar *) end) /* need more bytes */ return (uint) (chbeg - beg); /* unexpected EOL */ - if (ch == 0x8E) /* [x8E][xA0-xDF] */ + if (iseucjpms_ss2(ch)) /* [x8E][xA1-xDF] */ { - if (*b >= 0xA0 && *b <= 0xDF) + if (iskata(*b)) continue; *error=1; return (uint) (chbeg - beg); /* invalid sequence */ } - if (ch == 0x8F) /* [x8F][xA1-xFE][xA1-xFE] */ + if (iseucjpms_ss3(ch)) /* [x8F][xA1-xFE][xA1-xFE] */ { ch= *b++; if (b >= (uchar*) end) @@ -67461,8 +67462,7 @@ size_t my_well_formed_len_eucjpms(CHARSET_INFO *cs __attribute__((unused)), } } - if (ch >= 0xA1 && ch <= 0xFE && - *b >= 0xA1 && *b <= 0xFE) /* [xA1-xFE][xA1-xFE] */ + if (iseucjpms(ch) && iseucjpms(*b)) /* [xA1-xFE][xA1-xFE] */ continue; *error=1; return (size_t) (chbeg - beg); /* invalid sequence */ diff --git a/strings/ctype-latin1.c b/strings/ctype-latin1.c index 2c84f86fad0..babf74599ea 100644 --- a/strings/ctype-latin1.c +++ b/strings/ctype-latin1.c @@ -691,6 +691,8 @@ void my_hash_sort_latin1_de(CHARSET_INFO *cs __attribute__((unused)), ulong *nr1, ulong *nr2) { const uchar *end; + register ulong m1= *nr1, m2= *nr2; + /* Remove end space. We have to do this to be able to compare 'AE' and 'Ä' as identical @@ -700,14 +702,14 @@ void my_hash_sort_latin1_de(CHARSET_INFO *cs __attribute__((unused)), for (; key < end ; key++) { uint X= (uint) combo1map[(uint) *key]; - nr1[0]^=(ulong) ((((uint) nr1[0] & 63)+nr2[0]) * X) + (nr1[0] << 8); - nr2[0]+=3; + MY_HASH_ADD(m1, m2, X); if ((X= combo2map[*key])) { - nr1[0]^=(ulong) ((((uint) nr1[0] & 63)+nr2[0]) * X) + (nr1[0] << 8); - nr2[0]+=3; + MY_HASH_ADD(m1, m2, X); } } + *nr1= m1; + *nr2= m2; } diff --git a/strings/ctype-mb.c b/strings/ctype-mb.c index f4e70fd1dd5..1ef772e1d5e 100644 --- a/strings/ctype-mb.c +++ b/strings/ctype-mb.c @@ -680,6 +680,8 @@ void my_hash_sort_mb_bin(CHARSET_INFO *cs __attribute__((unused)), const uchar *key, size_t len,ulong *nr1, ulong *nr2) { + register ulong m1= *nr1, m2= *nr2; + /* Remove trailing spaces. We have to do this to be able to compare 'A ' and 'A' as identical @@ -688,10 +690,10 @@ my_hash_sort_mb_bin(CHARSET_INFO *cs __attribute__((unused)), for (; key < end ; key++) { - nr1[0]^=(ulong) ((((uint) nr1[0] & 63)+nr2[0]) * - ((uint)*key)) + (nr1[0] << 8); - nr2[0]+=3; + MY_HASH_ADD(m1, m2, (uint)*key); } + *nr1= m1; + *nr2= m2; } diff --git a/strings/ctype-simple.c b/strings/ctype-simple.c index f5484965314..0785ba35700 100644 --- a/strings/ctype-simple.c +++ b/strings/ctype-simple.c @@ -306,24 +306,48 @@ void my_hash_sort_simple(CHARSET_INFO *cs, { register const uchar *sort_order=cs->sort_order; const uchar *end; - ulong n1, n2; + register ulong m1= *nr1, m2= *nr2; + uint16 space_weight= sort_order[' ']; /* - Remove end space. We have to do this to be able to compare - 'A ' and 'A' as identical + Remove all trailing characters that are equal to space. + We have to do this to be able to compare 'A ' and 'A' as identical. + + If the key is long enough, cut the trailing spaces (0x20) using an + optimized function implemented in skip_trailing_spaces(). + + "len > 16" is just some heuristic here. + Calling skip_triling_space() for short values is not desirable, + because its initialization block may be more expensive than the + performance gained. + */ + + end= len > 16 ? skip_trailing_space(key, len) : key + len; + + /* + We removed all trailing characters that are binary equal to space 0x20. + Now remove all trailing characters that have weights equal to space. + Some 8bit simple collations may have such characters: + - cp1250_general_ci 0xA0 NO-BREAK SPACE == 0x20 SPACE + - cp1251_ukrainian_ci 0x60 GRAVE ACCENT == 0x20 SPACE + - koi8u_general_ci 0x60 GRAVE ACCENT == 0x20 SPACE */ - end= skip_trailing_space(key, len); - n1= *nr1; - n2= *nr2; + for ( ; key < end ; ) + { + if (sort_order[*--end] != space_weight) + { + end++; + break; + } + } + for (; key < (uchar*) end ; key++) { - n1^=(ulong) ((((uint) n1 & 63)+n2) * - ((uint) sort_order[(uint) *key])) + (n1 << 8); - n2+=3; + MY_HASH_ADD(m1, m2, (uint) sort_order[(uint) *key]); } - *nr1= n1; - *nr2= n2; + *nr1= m1; + *nr2= m2; } diff --git a/strings/ctype-uca.c b/strings/ctype-uca.c index 5165a43e852..60de0a106a1 100644 --- a/strings/ctype-uca.c +++ b/strings/ctype-uca.c @@ -20873,21 +20873,54 @@ static int my_strnncollsp_uca(CHARSET_INFO *cs, static void my_hash_sort_uca(CHARSET_INFO *cs, my_uca_scanner_handler *scanner_handler, const uchar *s, size_t slen, - ulong *n1, ulong *n2) + ulong *nr1, ulong *nr2) { int s_res; my_uca_scanner scanner; - - slen= cs->cset->lengthsp(cs, (char*) s, slen); + int space_weight= my_space_weight(cs); + register ulong m1= *nr1, m2= *nr2; + scanner_handler->init(&scanner, cs, &cs->uca->level[0], s, slen); while ((s_res= scanner_handler->next(&scanner)) >0) { - n1[0]^= (((n1[0] & 63)+n2[0])*(s_res >> 8))+ (n1[0] << 8); - n2[0]+=3; - n1[0]^= (((n1[0] & 63)+n2[0])*(s_res & 0xFF))+ (n1[0] << 8); - n2[0]+=3; + if (s_res == space_weight) + { + /* Combine all spaces to be able to skip end spaces */ + uint count= 0; + do + { + count++; + if ((s_res= scanner_handler->next(&scanner)) <= 0) + { + /* Skip strings at end of string */ + goto end; + } + } + while (s_res == space_weight); + + /* Add back that has for the space characters */ + do + { + /* + We can't use MY_HASH_ADD_16() here as we, because of a misstake + in the original code, where we added the 16 byte variable the + opposite way. Changing this would cause old partitioned tables + to fail. + */ + MY_HASH_ADD(m1, m2, space_weight >> 8); + MY_HASH_ADD(m1, m2, space_weight & 0xFF); + } + while (--count != 0); + + } + /* See comment above why we can't use MY_HASH_ADD_16() */ + MY_HASH_ADD(m1, m2, s_res >> 8); + MY_HASH_ADD(m1, m2, s_res & 0xFF); } +end: + *nr1= m1; + *nr2= m2; } diff --git a/strings/ctype-ucs2.c b/strings/ctype-ucs2.c index a7f948ebe3a..593f9a12950 100644 --- a/strings/ctype-ucs2.c +++ b/strings/ctype-ucs2.c @@ -1222,23 +1222,23 @@ my_caseup_utf16(CHARSET_INFO *cs, char *src, size_t srclen, static void my_hash_sort_utf16(CHARSET_INFO *cs, const uchar *s, size_t slen, - ulong *n1, ulong *n2) + ulong *nr1, ulong *nr2) { my_wc_t wc; my_charset_conv_mb_wc mb_wc= cs->cset->mb_wc; int res; const uchar *e= s + cs->cset->lengthsp(cs, (const char *) s, slen); MY_UNICASE_INFO *uni_plane= cs->caseinfo; + register ulong m1= *nr1, m2= *nr2; while ((s < e) && (res= mb_wc(cs, &wc, (uchar *) s, (uchar *) e)) > 0) { my_tosort_utf16(uni_plane, &wc); - n1[0]^= (((n1[0] & 63) + n2[0]) * (wc & 0xFF)) + (n1[0] << 8); - n2[0]+= 3; - n1[0]^= (((n1[0] & 63) + n2[0]) * (wc >> 8)) + (n1[0] << 8); - n2[0]+= 3; + MY_HASH_ADD_16(m1, m2, wc); s+= res; } + *nr1= m1; + *nr2= m2; } @@ -1611,12 +1611,14 @@ my_hash_sort_utf16_bin(CHARSET_INFO *cs, const uchar *pos, size_t len, ulong *nr1, ulong *nr2) { const uchar *end= pos + cs->cset->lengthsp(cs, (const char *) pos, len); + register ulong m1= *nr1, m2= *nr2; + for ( ; pos < end ; pos++) { - nr1[0]^= (ulong) ((((uint) nr1[0] & 63) + nr2[0]) * - ((uint)*pos)) + (nr1[0] << 8); - nr2[0]+= 3; + MY_HASH_ADD(m1, m2, (uint)*pos); } + *nr1= m1; + *nr2= m2; } @@ -2007,22 +2009,15 @@ my_caseup_utf32(CHARSET_INFO *cs, char *src, size_t srclen, } -static inline void -my_hash_add(ulong *n1, ulong *n2, uint ch) -{ - n1[0]^= (((n1[0] & 63) + n2[0]) * (ch)) + (n1[0] << 8); - n2[0]+= 3; -} - - static void my_hash_sort_utf32(CHARSET_INFO *cs, const uchar *s, size_t slen, - ulong *n1, ulong *n2) + ulong *nr1, ulong *nr2) { my_wc_t wc; int res; const uchar *e= s + slen; MY_UNICASE_INFO *uni_plane= cs->caseinfo; + register ulong m1= *nr1, m2= *nr2; /* Skip trailing spaces */ while (e > s + 3 && e[-1] == ' ' && !e[-2] && !e[-3] && !e[-4]) @@ -2031,12 +2026,14 @@ my_hash_sort_utf32(CHARSET_INFO *cs, const uchar *s, size_t slen, while ((res= my_utf32_uni(cs, &wc, (uchar*) s, (uchar*) e)) > 0) { my_tosort_utf32(uni_plane, &wc); - my_hash_add(n1, n2, (uint) (wc >> 24)); - my_hash_add(n1, n2, (uint) (wc >> 16) & 0xFF); - my_hash_add(n1, n2, (uint) (wc >> 8) & 0xFF); - my_hash_add(n1, n2, (uint) (wc & 0xFF)); + MY_HASH_ADD(m1, m2, (uint) (wc >> 24)); + MY_HASH_ADD(m1, m2, (uint) (wc >> 16) & 0xFF); + MY_HASH_ADD(m1, m2, (uint) (wc >> 8) & 0xFF); + MY_HASH_ADD(m1, m2, (uint) (wc & 0xFF)); s+= res; } + *nr1= m1; + *nr2= m2; } @@ -2202,10 +2199,10 @@ my_strnncollsp_utf32(CHARSET_INFO *cs, static uint my_ismbchar_utf32(CHARSET_INFO *cs __attribute__((unused)), - const char *b __attribute__((unused)), - const char *e __attribute__((unused))) + const char *b, + const char *e) { - return 4; + return b + 4 > e ? 0 : 4; } @@ -2976,12 +2973,13 @@ static size_t my_caseup_ucs2(CHARSET_INFO *cs, char *src, size_t srclen, static void my_hash_sort_ucs2(CHARSET_INFO *cs, const uchar *s, size_t slen, - ulong *n1, ulong *n2) + ulong *nr1, ulong *nr2) { my_wc_t wc; int res; const uchar *e=s+slen; MY_UNICASE_INFO *uni_plane= cs->caseinfo; + register ulong m1= *nr1, m2= *nr2; while (e > s+1 && e[-1] == ' ' && e[-2] == '\0') e-= 2; @@ -2989,12 +2987,11 @@ static void my_hash_sort_ucs2(CHARSET_INFO *cs, const uchar *s, size_t slen, while ((s < e) && (res=my_ucs2_uni(cs,&wc, (uchar *)s, (uchar*)e)) >0) { my_tosort_ucs2(uni_plane, &wc); - n1[0]^= (((n1[0] & 63)+n2[0])*(wc & 0xFF))+ (n1[0] << 8); - n2[0]+=3; - n1[0]^= (((n1[0] & 63)+n2[0])*(wc >> 8))+ (n1[0] << 8); - n2[0]+=3; + MY_HASH_ADD_16(m1, m2, wc); s+=res; } + *nr1= m1; + *nr2= m2; } @@ -3159,10 +3156,10 @@ static int my_strnncollsp_ucs2(CHARSET_INFO *cs __attribute__((unused)), static uint my_ismbchar_ucs2(CHARSET_INFO *cs __attribute__((unused)), - const char *b __attribute__((unused)), - const char *e __attribute__((unused))) + const char *b, + const char *e) { - return 2; + return b + 2 > e ? 0 : 2; } @@ -3312,16 +3309,17 @@ void my_hash_sort_ucs2_bin(CHARSET_INFO *cs __attribute__((unused)), const uchar *key, size_t len,ulong *nr1, ulong *nr2) { const uchar *end = key + len; + register ulong m1= *nr1, m2= *nr2; while (end > key+1 && end[-1] == ' ' && end[-2] == '\0') end-= 2; for (; key < (uchar*) end ; key++) { - nr1[0]^=(ulong) ((((uint) nr1[0] & 63)+nr2[0]) * - ((uint)*key)) + (nr1[0] << 8); - nr2[0]+=3; + MY_HASH_ADD(m1, m2, (uint)*key); } + *nr1= m1; + *nr2= m2; } diff --git a/strings/ctype-ujis.c b/strings/ctype-ujis.c index 9a69995b278..e7dbefe6c1d 100644 --- a/strings/ctype-ujis.c +++ b/strings/ctype-ujis.c @@ -1,6 +1,6 @@ -/* Copyright tommy@valley.ne.jp. - Copyright (c) 2002, 2011, Oracle and/or its affiliates. - Copyright (c) 2009-2011, Monty Program Ab +/* Copyright (c) 2002 MySQL AB & tommy@valley.ne.jp + Copyright (c) 2002, 2014, Oracle and/or its affiliates. + Copyright (c) 2009, 2014, SkySQL Ab. This library is free software; you can redistribute it and/or modify it under the terms of the GNU Library General Public @@ -203,10 +203,10 @@ static uint mbcharlen_ujis(CHARSET_INFO *cs __attribute__((unused)),uint c) /* EUC-JP encoding subcomponents: - [x00-x7F] # ASCII/JIS-Roman (one-byte/character) - [x8E][xA0-xDF] # half-width katakana (two bytes/char) - [x8F][xA1-xFE][xA1-xFE] # JIS X 0212-1990 (three bytes/char) - [xA1-xFE][xA1-xFE] # JIS X 0208:1997 (two bytes/char) + [x00-x7F] # ASCII/JIS-Roman (one-byte/character) + [x8E][xA1-xDF] # half-width katakana (two bytes/char) + [x8F][xA1-xFE][xA1-xFE] # JIS X 0212-1990 (three bytes/char) + [xA1-xFE][xA1-xFE] # JIS X 0208:1997 (two bytes/char) */ static @@ -231,15 +231,15 @@ size_t my_well_formed_len_ujis(CHARSET_INFO *cs __attribute__((unused)), return (size_t) (chbeg - beg); /* unexpected EOL */ } - if (ch == 0x8E) /* [x8E][xA0-xDF] */ + if (isujis_ss2(ch)) /* [x8E][xA1-xDF] */ { - if (*b >= 0xA0 && *b <= 0xDF) + if (iskata(*b)) continue; *error= 1; return (size_t) (chbeg - beg); /* invalid sequence */ } - if (ch == 0x8F) /* [x8F][xA1-xFE][xA1-xFE] */ + if (isujis_ss3(ch)) /* [x8F][xA1-xFE][xA1-xFE] */ { ch= *b++; if (b >= (uchar*) end) @@ -249,8 +249,7 @@ size_t my_well_formed_len_ujis(CHARSET_INFO *cs __attribute__((unused)), } } - if (ch >= 0xA1 && ch <= 0xFE && - *b >= 0xA1 && *b <= 0xFE) /* [xA1-xFE][xA1-xFE] */ + if (isujis(ch) && isujis(*b)) /* [xA1-xFE][xA1-xFE] */ continue; *error= 1; return (size_t) (chbeg - beg); /* invalid sequence */ diff --git a/strings/ctype-utf8.c b/strings/ctype-utf8.c index 96d5ea26a3c..d0a64d11c84 100644 --- a/strings/ctype-utf8.c +++ b/strings/ctype-utf8.c @@ -5087,12 +5087,13 @@ static size_t my_caseup_utf8(CHARSET_INFO *cs, char *src, size_t srclen, static void my_hash_sort_utf8(CHARSET_INFO *cs, const uchar *s, size_t slen, - ulong *n1, ulong *n2) + ulong *nr1, ulong *nr2) { my_wc_t wc; int res; const uchar *e=s+slen; MY_UNICASE_INFO *uni_plane= cs->caseinfo; + register ulong m1= *nr1, m2= *nr2; /* Remove end space. We have to do this to be able to compare @@ -5104,12 +5105,11 @@ static void my_hash_sort_utf8(CHARSET_INFO *cs, const uchar *s, size_t slen, while ((s < e) && (res=my_utf8_uni(cs,&wc, (uchar *)s, (uchar*)e))>0 ) { my_tosort_unicode(uni_plane, &wc, cs->state); - n1[0]^= (((n1[0] & 63)+n2[0])*(wc & 0xFF))+ (n1[0] << 8); - n2[0]+=3; - n1[0]^= (((n1[0] & 63)+n2[0])*(wc >> 8))+ (n1[0] << 8); - n2[0]+=3; + MY_HASH_ADD_16(m1, m2, wc); s+=res; } + *nr1= m1; + *nr2= m2; } @@ -7597,22 +7597,15 @@ my_caseup_utf8mb4(CHARSET_INFO *cs, char *src, size_t srclen, } -static inline void -my_hash_add(ulong *n1, ulong *n2, uint ch) -{ - n1[0]^= (((n1[0] & 63) + n2[0]) * (ch)) + (n1[0] << 8); - n2[0]+= 3; -} - - static void my_hash_sort_utf8mb4(CHARSET_INFO *cs, const uchar *s, size_t slen, - ulong *n1, ulong *n2) + ulong *nr1, ulong *nr2) { my_wc_t wc; int res; const uchar *e= s + slen; MY_UNICASE_INFO *uni_plane= cs->caseinfo; + register ulong m1= *nr1, m2= *nr2; /* Remove end space. We do this to be able to compare @@ -7624,8 +7617,7 @@ my_hash_sort_utf8mb4(CHARSET_INFO *cs, const uchar *s, size_t slen, while ((res= my_mb_wc_utf8mb4(cs, &wc, (uchar*) s, (uchar*) e)) > 0) { my_tosort_unicode(uni_plane, &wc, cs->state); - my_hash_add(n1, n2, (uint) (wc & 0xFF)); - my_hash_add(n1, n2, (uint) (wc >> 8) & 0xFF); + MY_HASH_ADD_16(m1, m2, (uint) (wc & 0xFFFF)); if (wc > 0xFFFF) { /* @@ -7635,10 +7627,12 @@ my_hash_sort_utf8mb4(CHARSET_INFO *cs, const uchar *s, size_t slen, This is useful to keep order of records in test results, e.g. for "SHOW GRANTS". */ - my_hash_add(n1, n2, (uint) (wc >> 16) & 0xFF); + MY_HASH_ADD(m1, m2, (uint) ((wc >> 16) & 0xFF)); } s+= res; } + *nr1= m1; + *nr2= m2; } diff --git a/strings/ctype.c b/strings/ctype.c index 566dc0331c4..048fbe3d368 100644 --- a/strings/ctype.c +++ b/strings/ctype.c @@ -818,23 +818,102 @@ my_parse_charset_xml(MY_CHARSET_LOADER *loader, const char *buf, size_t len) } +uint +my_string_repertoire_8bit(CHARSET_INFO *cs, const char *str, ulong length) +{ + const char *strend; + if ((cs->state & MY_CS_NONASCII) && length > 0) + return MY_REPERTOIRE_UNICODE30; + for (strend= str + length; str < strend; str++) + { + if (((uchar) *str) > 0x7F) + return MY_REPERTOIRE_UNICODE30; + } + return MY_REPERTOIRE_ASCII; +} + + +static void +my_string_metadata_init(MY_STRING_METADATA *metadata) +{ + metadata->repertoire= MY_REPERTOIRE_ASCII; + metadata->char_length= 0; +} + + +/** + This should probably eventually go as a virtual function into + MY_CHARSET_HANDLER or MY_COLLATION_HANDLER. +*/ +static void +my_string_metadata_get_mb(MY_STRING_METADATA *metadata, + CHARSET_INFO *cs, const char *str, ulong length) +{ + const char *strend= str + length; + for (my_string_metadata_init(metadata) ; + str < strend; + metadata->char_length++) + { + my_wc_t wc; + int mblen= cs->cset->mb_wc(cs, &wc, (const uchar *) str, + (const uchar *) strend); + if (mblen > 0) /* Assigned character */ + { + if (wc > 0x7F) + metadata->repertoire|= MY_REPERTOIRE_EXTENDED; + str+= mblen; + } + else if (mblen == MY_CS_ILSEQ) /* Bad byte sequence */ + { + metadata->repertoire|= MY_REPERTOIRE_EXTENDED; + str++; + } + else if (mblen > MY_CS_TOOSMALL) /* Unassigned character */ + { + metadata->repertoire|= MY_REPERTOIRE_EXTENDED; + str+= (-mblen); + } + else /* Incomplete character, premature end-of-line */ + { + metadata->repertoire|= MY_REPERTOIRE_EXTENDED; /* Just in case */ + break; + } + } +} + + +/** + Collect string metadata: length in characters and repertoire. +*/ +void +my_string_metadata_get(MY_STRING_METADATA *metadata, + CHARSET_INFO *cs, const char *str, size_t length) +{ + if (cs->mbmaxlen == 1 && !(cs->state & MY_CS_NONASCII)) + { + metadata->char_length= length; + metadata->repertoire= my_string_repertoire_8bit(cs, str, length); + } + else + { + my_string_metadata_get_mb(metadata, cs, str, length); + } +} + + /* Check repertoire: detect pure ascii strings */ uint my_string_repertoire(CHARSET_INFO *cs, const char *str, ulong length) { - const char *strend= str + length; - if (cs->mbminlen == 1) + if (cs->mbminlen == 1 && !(cs->state & MY_CS_NONASCII)) { - for ( ; str < strend; str++) - { - if (((uchar) *str) > 0x7F) - return MY_REPERTOIRE_UNICODE30; - } + return my_string_repertoire_8bit(cs, str, length); } else { + const char *strend= str + length; my_wc_t wc; int chlen; for (; @@ -987,7 +1066,14 @@ my_convert_internal(char *to, uint32 to_length, wc= '?'; } else - break; // Not enough characters + { + if ((uchar *) from >= from_end) + break; /* End of line */ + /* Incomplete byte sequence */ + error_count++; + from++; + wc= '?'; + } outp: if ((cnvres= (*wc_mb)(to_cs, wc, (uchar*) to, to_end)) > 0) diff --git a/strings/strings_def.h b/strings/strings_def.h index 6bd8e8f5575..fb280b6bb6b 100644 --- a/strings/strings_def.h +++ b/strings/strings_def.h @@ -100,4 +100,13 @@ static inline const uchar *skip_trailing_space(const uchar *ptr,size_t len) end--; return (end); } + +/* Macros for hashing characters */ + +#define MY_HASH_ADD(A, B, value) \ + do { A^= (((A & 63)+B)*((value)))+ (A << 8); B+=3; } while(0) + +#define MY_HASH_ADD_16(A, B, value) \ + do { MY_HASH_ADD(A, B, ((value) & 0xFF)) ; MY_HASH_ADD(A, B, ((value >>8 ))); } while(0) + #endif diff --git a/support-files/CMakeLists.txt b/support-files/CMakeLists.txt index 67f6209ade2..75ebd4af3cd 100644 --- a/support-files/CMakeLists.txt +++ b/support-files/CMakeLists.txt @@ -1,5 +1,5 @@ -# Copyright (c) 2006, 2010, Oracle and/or its affiliates. -# Copyright (c) 2012, Monty Program Ab +# Copyright (c) 2006, 2014, Oracle and/or its affiliates. +# Copyright (c) 2012, 2014, SkySQL Ab. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -53,14 +53,8 @@ IF(UNIX) FOREACH(script mysqld_multi.server mysql-log-rotate binary-configure wsrep_notify) CONFIGURE_FILE(${CMAKE_CURRENT_SOURCE_DIR}/${script}.sh ${CMAKE_CURRENT_BINARY_DIR}/${script} @ONLY ) - - IF(script MATCHES ".ini") - SET(comp IniFiles) - ELSE() - SET(comp Server_Scripts) - ENDIF() INSTALL(PROGRAMS ${CMAKE_CURRENT_BINARY_DIR}/${script} - DESTINATION ${inst_location} COMPONENT ${comp}) + DESTINATION ${inst_location} COMPONENT Server_Scripts) ENDFOREACH() IF(INSTALL_SUPPORTFILESDIR) INSTALL(FILES magic DESTINATION ${inst_location} COMPONENT SupportFiles) diff --git a/support-files/build-tags b/support-files/build-tags index a7fca4b42f4..44bd2a02593 100755 --- a/support-files/build-tags +++ b/support-files/build-tags @@ -1,12 +1,9 @@ #! /bin/sh rm -f TAGS -filter='\.cc$\|\.c$\|\.h$\|sql_yacc\.yy$' +filter='\.cc$\|\.c$\|\.h$\|sql_yacc\.yy$\|\.ic$\|errmsg-utf8\.txt$' list="find . -type f" bzr root >/dev/null 2>/dev/null && list="bzr ls --from-root -R --kind=file --versioned" -$list |grep $filter | grep -v gen-cpp |while read f; -do - etags -o TAGS --append $f -done +$list |grep $filter | xargs etags -o TAGS --append diff --git a/support-files/mysql.server.sh b/support-files/mysql.server.sh index caeec3f610f..56502ae0717 100644 --- a/support-files/mysql.server.sh +++ b/support-files/mysql.server.sh @@ -259,6 +259,11 @@ wait_for_ready () { if $bindir/mysqladmin ping >/dev/null 2>&1; then log_success_msg return 0 + elif kill -0 $! 2>/dev/null ; then + : # mysqld_safe is still running + else + # mysqld_safe is no longer running, abort the wait loop + break fi echo $echo_n ".$echo_c" diff --git a/tests/mysql_client_test.c b/tests/mysql_client_test.c index 898d67c5058..51e549d9d1b 100644 --- a/tests/mysql_client_test.c +++ b/tests/mysql_client_test.c @@ -18616,7 +18616,7 @@ static void test_bug56976() const char* query = "SELECT LENGTH(?)"; char *long_buffer; unsigned long i, packet_len = 256 * 1024L; - unsigned long dos_len = 2 * 1024 * 1024L; + unsigned long dos_len = 35000000; DBUG_ENTER("test_bug56976"); myheader("test_bug56976"); @@ -18707,13 +18707,31 @@ static void test_progress_reporting() rc= mysql_query(conn, "insert into t1 (f2) select f2 from t2"); myquery(rc); } - rc= mysql_query(conn, "alter table t1 add f1 int primary key auto_increment, add key (f2), order by f2"); + + progress_stage= progress_max_stage= progress_count= 0; + rc= mysql_query(conn, "alter table t1 add f1 int primary key auto_increment, order by f2"); myquery(rc); if (!opt_silent) printf("Got progress_count: %u stage: %u max_stage: %u\n", progress_count, progress_stage, progress_max_stage); DIE_UNLESS(progress_count > 0 && progress_stage >=2 && progress_max_stage == 3); + + progress_stage= progress_max_stage= progress_count= 0; + rc= mysql_query(conn, "create index f2 on t1 (f2)"); + myquery(rc); + if (!opt_silent) + printf("Got progress_count: %u stage: %u max_stage: %u\n", + progress_count, progress_stage, progress_max_stage); + DIE_UNLESS(progress_count > 0 && progress_stage >=2 && progress_max_stage == 2); + + progress_stage= progress_max_stage= progress_count= 0; + rc= mysql_query(conn, "drop index f2 on t1"); myquery(rc); + if (!opt_silent) + printf("Got progress_count: %u stage: %u max_stage: %u\n", + progress_count, progress_stage, progress_max_stage); + DIE_UNLESS(progress_count > 0 && progress_stage >=2 && progress_max_stage == 2); + rc= mysql_query(conn, "set @@global.progress_report_time=@save"); myquery(rc); mysql_close(conn); @@ -19255,6 +19273,106 @@ static void test_mdev4326() myquery(rc); } + +/* + Check compressed protocol +*/ + +static void test_compressed_protocol() +{ + MYSQL *mysql_local; + char query[4096], *end; + int i; + myheader("test_compressed_protocol"); + + if (!(mysql_local= mysql_client_init(NULL))) + { + fprintf(stderr, "\n mysql_client_init() failed"); + exit(1); + } + + if (!(mysql_real_connect(mysql_local, opt_host, opt_user, + opt_password, current_db, opt_port, + opt_unix_socket, CLIENT_COMPRESS))) + { + fprintf(stderr, "\n connection failed(%s)", mysql_error(mysql_local)); + exit(1); + } + mysql_options(mysql_local,MYSQL_OPT_COMPRESS,NullS); + + end= strmov(strfill(strmov(query, "select length(\""),1000,'a'),"\")"); + + for (i=0 ; i < 2 ; i++) + { + MYSQL_RES *res; + + int rc= mysql_real_query(mysql, query, (int) (end-query)); + myquery(rc); + res= mysql_store_result(mysql); + DBUG_ASSERT(res != 0); + mysql_free_result(res); + } + + mysql_close(mysql_local); +} + +/* + Check big packets +*/ + +static void test_big_packet() +{ + MYSQL *mysql_local; + char *query, *end; + /* We run the tests with a server with max packet size of 3200000 */ + size_t big_packet= 31000000L; + int i; + MYSQL_PARAMETERS *mysql_params= mysql_get_parameters(); + long org_max_allowed_packet= *mysql_params->p_max_allowed_packet; + long opt_net_buffer_length= *mysql_params->p_net_buffer_length; + + myheader("test_big_packet"); + + query= (char*) my_malloc(big_packet+1024, MYF(MY_WME)); + DIE_UNLESS(query); + + if (!(mysql_local= mysql_client_init(NULL))) + { + fprintf(stderr, "\n mysql_client_init() failed"); + exit(1); + } + + if (!(mysql_real_connect(mysql_local, opt_host, opt_user, + opt_password, current_db, opt_port, + opt_unix_socket, 0))) + { + fprintf(stderr, "\n connection failed(%s)", mysql_error(mysql_local)); + exit(1); + } + + *mysql_params->p_max_allowed_packet= big_packet+1000; + *mysql_params->p_net_buffer_length= 8L*256L*256L; + + end= strmov(strfill(strmov(query, "select length(\""), big_packet,'a'),"\")"); + + for (i=0 ; i < 2 ; i++) + { + MYSQL_RES *res; + int rc= mysql_real_query(mysql, query, (int) (end-query)); + myquery(rc); + res= mysql_store_result(mysql); + DBUG_ASSERT(res != 0); + mysql_free_result(res); + } + + mysql_close(mysql_local); + my_free(query); + + *mysql_params->p_max_allowed_packet= org_max_allowed_packet; + *mysql_params->p_net_buffer_length = opt_net_buffer_length; +} + + static struct my_tests_st my_tests[]= { { "disable_query_logs", disable_query_logs }, { "test_view_sp_list_fields", test_view_sp_list_fields }, @@ -19526,6 +19644,8 @@ static struct my_tests_st my_tests[]= { { "test_bug13001491", test_bug13001491 }, { "test_mdev4326", test_mdev4326 }, { "test_ps_sp_out_params", test_ps_sp_out_params }, + { "test_compressed_protocol", test_compressed_protocol }, + { "test_big_packet", test_big_packet }, { 0, 0 } }; diff --git a/vio/CMakeLists.txt b/vio/CMakeLists.txt index 72059f1ec08..2fb82ef9dd2 100644 --- a/vio/CMakeLists.txt +++ b/vio/CMakeLists.txt @@ -1,4 +1,4 @@ -# Copyright (c) 2006, 2013, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 2006, 2014, Oracle and/or its affiliates. All rights reserved. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -21,7 +21,6 @@ SET(VIO_SOURCES vio.c viosocket.c viossl.c viopipe.c vioshm.c viosslfactories.c) ADD_CONVENIENCE_LIBRARY(vio ${VIO_SOURCES}) TARGET_LINK_LIBRARIES(vio ${LIBSOCKET}) -INSTALL_DEBUG_SYMBOLS(vio) IF(MSVC) INSTALL_DEBUG_TARGET(vio DESTINATION ${INSTALL_LIBDIR}/debug) ENDIF() diff --git a/zlib/CMakeLists.txt b/zlib/CMakeLists.txt index 0be1f976b39..7668ce723b8 100644 --- a/zlib/CMakeLists.txt +++ b/zlib/CMakeLists.txt @@ -1,4 +1,4 @@ -# Copyright (c) 2006, 2013, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 2006, 2014, Oracle and/or its affiliates. All rights reserved. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -23,7 +23,6 @@ SET(ZLIB_SOURCES adler32.c compress.c crc32.c crc32.h deflate.c deflate.h gzio. ADD_CONVENIENCE_LIBRARY(zlib ${ZLIB_SOURCES}) RESTRICT_SYMBOL_EXPORTS(zlib) -INSTALL_DEBUG_SYMBOLS(zlib) IF(MSVC) INSTALL_DEBUG_TARGET(zlib DESTINATION ${INSTALL_LIBDIR}/debug) ENDIF() |